Files
BC-bak/bc-cleanup.sh
Malin c81f4c51fb fix: export AWS credentials so aws cli can locate them
The config file sets AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY as
shell variables, but aws cli needs them as environment variables.
Added explicit export statements in both bc-backup.sh and bc-cleanup.sh.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-16 19:41:07 +01:00

133 lines
3.8 KiB
Bash
Executable File

#!/bin/bash
#
# Business Central Backup S3 Cleanup
# Deletes expired backup objects from S3 (older than RETENTION_DAYS)
#
set -euo pipefail
# Script directory
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
CONFIG_FILE="${SCRIPT_DIR}/bc-backup.conf"
LOG_DIR="${SCRIPT_DIR}/logs"
mkdir -p "${LOG_DIR}"
log() {
echo "[$(date '+%Y-%m-%d %H:%M:%S')] [CLEANUP] $*" | tee -a "${LOG_DIR}/backup.log"
}
log_error() {
echo "[$(date '+%Y-%m-%d %H:%M:%S')] [CLEANUP] ERROR: $*" | tee -a "${LOG_DIR}/backup.log" >&2
}
# Load configuration
if [[ ! -f "${CONFIG_FILE}" ]]; then
log_error "Configuration file not found: ${CONFIG_FILE}"
exit 1
fi
source "${CONFIG_FILE}"
# Validate required vars
for var in S3_BUCKET S3_ENDPOINT AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY; do
if [[ -z "${!var:-}" ]]; then
log_error "Required variable not set: ${var}"
exit 1
fi
done
RETENTION_DAYS="${RETENTION_DAYS:-30}"
# Export AWS credentials so aws cli can find them
export AWS_ACCESS_KEY_ID
export AWS_SECRET_ACCESS_KEY
export AWS_DEFAULT_REGION="${AWS_DEFAULT_REGION:-us-east-1}"
# Calculate cutoff date
if [[ "$OSTYPE" == "darwin"* ]]; then
CUTOFF_DATE=$(date -u -v-${RETENTION_DAYS}d '+%Y-%m-%dT%H:%M:%SZ')
else
CUTOFF_DATE=$(date -u -d "-${RETENTION_DAYS} days" '+%Y-%m-%dT%H:%M:%SZ')
fi
log "========================================="
log "S3 Backup Cleanup"
log "========================================="
log "Bucket: ${S3_BUCKET}"
log "Retention: ${RETENTION_DAYS} days"
log "Cutoff date: ${CUTOFF_DATE}"
log "Deleting objects last modified before ${CUTOFF_DATE}"
deleted_count=0
failed_count=0
skipped_count=0
# List all objects under backups/ prefix
continuation_token=""
while true; do
list_args=(
--bucket "${S3_BUCKET}"
--prefix "backups/"
--endpoint-url "${S3_ENDPOINT}"
--output json
)
if [[ -n "${continuation_token}" ]]; then
list_args+=(--continuation-token "${continuation_token}")
fi
response=$(aws s3api list-objects-v2 "${list_args[@]}" 2>/dev/null || echo '{"Contents":[]}')
# Parse objects
objects=$(echo "${response}" | python3 -c "
import json, sys
data = json.load(sys.stdin)
for obj in data.get('Contents', []):
print(obj['Key'] + '|' + obj.get('LastModified', '') + '|' + str(obj.get('Size', 0)))
" 2>/dev/null || true)
if [[ -z "${objects}" ]]; then
break
fi
while IFS='|' read -r key last_modified size; do
[[ -z "${key}" ]] && continue
# Compare dates - delete if older than cutoff
if [[ "${last_modified}" < "${CUTOFF_DATE}" ]]; then
log " Deleting: ${key} (modified: ${last_modified})"
if aws s3api delete-object \
--bucket "${S3_BUCKET}" \
--key "${key}" \
--endpoint-url "${S3_ENDPOINT}" 2>/dev/null; then
((deleted_count++))
else
# May fail due to object lock - that's expected for COMPLIANCE mode
log " Failed to delete ${key} (likely still under retention lock)"
((failed_count++))
fi
else
((skipped_count++))
fi
done <<< "${objects}"
# Check for more pages
is_truncated=$(echo "${response}" | python3 -c "import json,sys; print(json.load(sys.stdin).get('IsTruncated', False))" 2>/dev/null || echo "False")
if [[ "${is_truncated}" == "True" ]]; then
continuation_token=$(echo "${response}" | python3 -c "import json,sys; print(json.load(sys.stdin).get('NextContinuationToken', ''))" 2>/dev/null || true)
else
break
fi
done
log "========================================="
log "Cleanup completed"
log "Deleted: ${deleted_count}"
log "Failed (locked): ${failed_count}"
log "Skipped (within retention): ${skipped_count}"
log "========================================="
exit 0