manifests/backups/daemonset+cronjob/cm-script.yaml
songtianlun 1e60ca2eb6 feat: add source size calculation to backup script
- Introduce SOURCE_SIZE variable in cm-script.yaml
- Remove redundant SOURCE_SIZE calculation

This change calculates the size of the data directory
prior to initiating the backup process. It ensures that
the backup script has accurate information about the size
of the source data, enhancing the logging and monitoring
of backup activities.
2025-04-22 22:19:16 +08:00

139 lines
4.4 KiB
YAML

apiVersion: v1
kind: ConfigMap
metadata:
name: backup-script
namespace: backup-system
data:
backup.sh: |
#!/bin/bash
set -e
# 获取环境变量
S3_ENDPOINT="${S3_ENDPOINT}"
S3_ACCESS_KEY="${S3_ACCESS_KEY}"
S3_SECRET_KEY="${S3_SECRET_KEY}"
S3_BUCKET="${S3_BUCKET}"
S3_SUBPATH="${S3_SUBPATH:-backups}"
BACKUPS_TO_KEEP="${BACKUPS_TO_KEEP:-7}"
# 获取主机名
HOSTNAME=$(hostname)
HOSTNAME="${NODE_NAME:-$HOSTNAME}"
FEISHU_WEBHOOK_URL=$(cat /etc/feishu-webhook/url)
SOURCE_SIZE=$(du -sh /data/local-csi | cut -f1)
START_TIME=$(date +%s)
# 配置 s3cmd
cat > ~/.s3cfg << EOF
[default]
access_key = ${S3_ACCESS_KEY}
secret_key = ${S3_SECRET_KEY}
host_base = ${S3_ENDPOINT}
host_bucket = ${S3_ENDPOINT}
use_https = ${USE_HTTPS:-True}
signature_v2 = ${SIGNATURE_V2:-False}
EOF
# 检查 /data/local-csi 路径是否存在
if [ ! -d "/data/local-csi" ]; then
echo "目录 /data/local-csi 不存在,退出备份"
exit 0
fi
# 检查目录下是否有至少一个子目录
DIR_COUNT=$(find /data/local-csi -mindepth 1 -maxdepth 1 -type d | wc -l)
if [ "$DIR_COUNT" -eq 0 ]; then
echo "目录 /data/local-csi 中没有子目录,退出备份"
exit 0
fi
# 遍历所有目录
find /data/local-csi -mindepth 1 -maxdepth 1 -type d | while read dir; do
DIR_NAME=$(basename "$dir")
TIMESTAMP=$(date +%Y-%m-%d-%H-%M-%S)
BACKUP_NAME="backup-${DIR_NAME}-${TIMESTAMP}.tar.gz"
BACKUP_PATH="/tmp/${BACKUP_NAME}"
echo "正在备份目录: $dir"
if [[ "$DIR_NAME" == *"pg"* ]] && [ -d "/data/local-csi/$DIR_NAME/pg_wal" ]; then
echo "检测到 PostgreSQL 数据目录,使用特殊处理..."
TEMP_DIR="/tmp/data/${TIMESTAMP}/"
mkdir -p "$TEMP_DIR/$DIR_NAME"
rsync -a "/data/local-csi/$DIR_NAME/" "$TEMP_DIR/$DIR_NAME/"
# 创建备份压缩包
tar -czf "$BACKUP_PATH" -C "$TEMP_DIR" "$DIR_NAME"
# 清理临时目录
rm -rf "$TEMP_DIR"
else
# tar -czf "$BACKUP_PATH" -C "/data/local-csi" "$DIR_NAME"
# 普通目录使用标准备份,但忽略文件变更警告
tar --warning=no-file-changed -czf "$BACKUP_PATH" -C "/data/local-csi" "$DIR_NAME" || {
echo "备份 $DIR_NAME 时出现警告,继续处理..."
}
fi
# 计算上传路径
BACKUP_TARGET_PATH="s3://${S3_BUCKET}/${S3_SUBPATH}/${HOSTNAME}/${DIR_NAME}/"
echo "上传备份 $BACKUP_PATH 到 S3 路径: $BACKUP_TARGET_PATH"
# 上传备份文件
s3cmd put "$BACKUP_PATH" "${BACKUP_TARGET_PATH}${BACKUP_NAME}"
# 删除本地临时备份
rm "$BACKUP_PATH"
# 清理旧备份
echo "清理旧备份,保留最新的 $BACKUPS_TO_KEEP 个文件"
# 获取所有备份文件并按时间排序
BACKUP_FILES=$(s3cmd ls "${BACKUP_TARGET_PATH}" | sort -k1,2)
BACKUP_COUNT=$(echo "$BACKUP_FILES" | grep -c "backup-${DIR_NAME}")
if [ "$BACKUP_COUNT" -gt "$BACKUPS_TO_KEEP" ]; then
# 计算需要删除的文件数量
DELETE_COUNT=$((BACKUP_COUNT - BACKUPS_TO_KEEP))
# 获取需要删除的文件列表
FILES_TO_DELETE=$(echo "$BACKUP_FILES" | head -n $DELETE_COUNT | awk '{print $4}')
# 删除文件
for FILE in $FILES_TO_DELETE; do
echo "删除旧备份: $FILE"
s3cmd rm "$FILE"
done
fi
done
END_TIME=$(date +%s)
echo "备份完成"
DURATION=$((END_TIME - START_TIME))
MSG_TITLE="✅ [K3s Backup] $NODE_NAME Backup Success"
MSG_TEXT="Host: $NODE_NAME, Source: /data, Source Size: $SOURCE_SIZE, Duration: ${DURATION}s"
JSON_PAYLOAD=$(jq -n \
--arg title "$MSG_TITLE" \
--arg text "$MSG_TEXT" \
'{msg_type: "post", content: {post: {zh_cn: {title: $title, content: [[{tag: "text", text: $text}]]}}}}')
echo "Sending notification to Feishu..."
curl -X POST -H "Content-Type: application/json" -d "$JSON_PAYLOAD" "$FEISHU_WEBHOOK_URL"
echo "Notification sent."
s3cmd-trigger.sh: |
#!/bin/bash
set -e
# 检查标记文件
if [ -f "/tmp/backup-triggered" ]; then
echo "发现备份触发标记,执行备份脚本..."
bash /scripts/backup.sh
rm /tmp/backup-triggered
echo "备份完成"
fi