manifests/backups/daemonset+cronjob/cm-script.yaml
songtianlun e5e9bfd9f3 fix: update backup script for better directory handling
- Change TEMP_DIR to use a more structured temporary path
- Adjust rsync command to reflect the new directory structure
- Improve MSG_TEXT formatting for better clarity
- Add 'jq' to the dependencies for JSON processing

These changes address issues with the previous temporary directory location and enhance the output messages for a more informative backup success notification.
2025-04-22 15:48:17 +08:00

129 lines
4.0 KiB
YAML

apiVersion: v1
kind: ConfigMap
metadata:
name: backup-script
namespace: backup-system
data:
backup.sh: |
#!/bin/bash
set -e
# 获取环境变量
S3_ENDPOINT="${S3_ENDPOINT}"
S3_ACCESS_KEY="${S3_ACCESS_KEY}"
S3_SECRET_KEY="${S3_SECRET_KEY}"
S3_BUCKET="${S3_BUCKET}"
S3_SUBPATH="${S3_SUBPATH:-backups}"
BACKUPS_TO_KEEP="${BACKUPS_TO_KEEP:-7}"
# 获取主机名
HOSTNAME=$(hostname)
HOSTNAME="${NODE_NAME:-$HOSTNAME}"
FEISHU_WEBHOOK_URL=$(cat /etc/feishu-webhook/url)
START_TIME=$(date +%s)
# 配置 s3cmd
cat > ~/.s3cfg << EOF
[default]
access_key = ${S3_ACCESS_KEY}
secret_key = ${S3_SECRET_KEY}
host_base = ${S3_ENDPOINT}
host_bucket = ${S3_ENDPOINT}
use_https = ${USE_HTTPS:-True}
signature_v2 = ${SIGNATURE_V2:-False}
EOF
# 检查 /data/local-csi 路径是否存在
if [ ! -d "/data/local-csi" ]; then
echo "目录 /data/local-csi 不存在,退出备份"
exit 0
fi
# 检查目录下是否有至少一个子目录
DIR_COUNT=$(find /data/local-csi -mindepth 1 -maxdepth 1 -type d | wc -l)
if [ "$DIR_COUNT" -eq 0 ]; then
echo "目录 /data/local-csi 中没有子目录,退出备份"
exit 0
fi
# 遍历所有目录
find /data/local-csi -mindepth 1 -maxdepth 1 -type d | while read dir; do
DIR_NAME=$(basename "$dir")
TIMESTAMP=$(date +%Y-%m-%d-%H-%M-%S)
BACKUP_NAME="backup-${DIR_NAME}-${TIMESTAMP}.tar.gz"
TEMP_DIR="/tmp/data/${TIMESTAMP}/"
BACKUP_PATH="/tmp/${BACKUP_NAME}"
SOURCE_SIZE=$(du -sh /data/local-csi | cut -f1)
echo "正在备份目录: $dir"
mkdir -p "$TEMP_DIR/$DIR_NAME"
rsync -a "/data/local-csi/$DIR_NAME/" "$TEMP_DIR/$DIR_NAME/"
# 创建备份压缩包
# tar -czf "$BACKUP_PATH" -C "/data/local-csi" "$DIR_NAME"
tar -czf "$BACKUP_PATH" -C "$TEMP_DIR" "$DIR_NAME"
# 清理临时目录
rm -rf "$TEMP_DIR"
# 计算上传路径
BACKUP_TARGET_PATH="s3://${S3_BUCKET}/${S3_SUBPATH}/${HOSTNAME}/${DIR_NAME}/"
echo "上传备份 $BACKUP_PATH 到 S3 路径: $BACKUP_TARGET_PATH"
# 上传备份文件
s3cmd put "$BACKUP_PATH" "${BACKUP_TARGET_PATH}${BACKUP_NAME}"
# 删除本地临时备份
rm "$BACKUP_PATH"
# 清理旧备份
echo "清理旧备份,保留最新的 $BACKUPS_TO_KEEP 个文件"
# 获取所有备份文件并按时间排序
BACKUP_FILES=$(s3cmd ls "${BACKUP_TARGET_PATH}" | sort -k1,2)
BACKUP_COUNT=$(echo "$BACKUP_FILES" | grep -c "backup-${DIR_NAME}")
if [ "$BACKUP_COUNT" -gt "$BACKUPS_TO_KEEP" ]; then
# 计算需要删除的文件数量
DELETE_COUNT=$((BACKUP_COUNT - BACKUPS_TO_KEEP))
# 获取需要删除的文件列表
FILES_TO_DELETE=$(echo "$BACKUP_FILES" | head -n $DELETE_COUNT | awk '{print $4}')
# 删除文件
for FILE in $FILES_TO_DELETE; do
echo "删除旧备份: $FILE"
s3cmd rm "$FILE"
done
fi
done
END_TIME=$(date +%s)
echo "备份完成"
DURATION=$((END_TIME - START_TIME))
MSG_TITLE="✅ [K3s Backup] $NODE_NAME Backup Success"
MSG_TEXT="Host: $NODE_NAME, Source: /data, Source Size: $SOURCE_SIZE, Duration: ${DURATION}s"
JSON_PAYLOAD=$(jq -n \
--arg title "$MSG_TITLE" \
--arg text "$MSG_TEXT" \
'{msg_type: "post", content: {post: {zh_cn: {title: $title, content: [[{tag: "text", text: $text}]]}}}}')
echo "Sending notification to Feishu..."
curl -X POST -H "Content-Type: application/json" -d "$JSON_PAYLOAD" "$FEISHU_WEBHOOK_URL"
echo "Notification sent."
s3cmd-trigger.sh: |
#!/bin/bash
set -e
# 检查标记文件
if [ -f "/tmp/backup-triggered" ]; then
echo "发现备份触发标记,执行备份脚本..."
bash /scripts/backup.sh
rm /tmp/backup-triggered
echo "备份完成"
fi