manifests/backups/daemonset+cronjob/cm-script.yaml
songtianlun 6d2acdcd1e feat: add backup system with cron and daemon
- Create a new namespace for the backup system
- Implement a cron job for scheduled backups
- Add a daemon set to handle backup tasks across nodes
- Introduce necessary service accounts, roles, and role bindings
- Include environment variable handling and configuration via secrets and config maps
- Ensure triggering and execution workflow for backups is efficient

This commit establishes a new backup system that utilizes both a cron job and a daemon set to automate backups. It organizes the configurations and credentials needed for S3-compatible storage, allowing for seamless backup management across the specified nodes in the Kubernetes cluster.
2025-04-22 14:49:02 +08:00

112 lines
3.2 KiB
YAML

apiVersion: v1
kind: ConfigMap
metadata:
name: backup-script
namespace: backup-system
data:
backup.sh: |
#!/bin/bash
set -e
# 获取环境变量
S3_ENDPOINT="${S3_ENDPOINT}"
S3_ACCESS_KEY="${S3_ACCESS_KEY}"
S3_SECRET_KEY="${S3_SECRET_KEY}"
S3_BUCKET="${S3_BUCKET}"
S3_SUBPATH="${S3_SUBPATH:-backups}"
BACKUPS_TO_KEEP="${BACKUPS_TO_KEEP:-7}"
# 获取主机名
HOSTNAME=$(hostname)
HOSTNAME="${NODE_NAME:-$HOSTNAME}"
# 配置 s3cmd
cat > ~/.s3cfg << EOF
[default]
access_key = ${S3_ACCESS_KEY}
secret_key = ${S3_SECRET_KEY}
host_base = ${S3_ENDPOINT}
host_bucket = ${S3_ENDPOINT}
use_https = ${USE_HTTPS:-True}
signature_v2 = ${SIGNATURE_V2:-False}
EOF
# 检查 /data/local-csi 路径是否存在
if [ ! -d "/data/local-csi" ]; then
echo "目录 /data/local-csi 不存在,退出备份"
exit 0
fi
# 检查目录下是否有至少一个子目录
DIR_COUNT=$(find /data/local-csi -mindepth 1 -maxdepth 1 -type d | wc -l)
if [ "$DIR_COUNT" -eq 0 ]; then
echo "目录 /data/local-csi 中没有子目录,退出备份"
exit 0
fi
# 遍历所有目录
find /data/local-csi -mindepth 1 -maxdepth 1 -type d | while read dir; do
DIR_NAME=$(basename "$dir")
TIMESTAMP=$(date +%Y-%m-%d-%H-%M-%S)
BACKUP_NAME="backup-${DIR_NAME}-${TIMESTAMP}.tar.gz"
TEMP_DIR=$(mktemp -d)
BACKUP_PATH="/tmp/${BACKUP_NAME}"
echo "正在备份目录: $dir"
rsync -a "/data/local-csi/$DIR_NAME/" "$TEMP_DIR/"
# 创建备份压缩包
# tar -czf "$BACKUP_PATH" -C "/data/local-csi" "$DIR_NAME"
tar -czf "$BACKUP_PATH" -C "/data/local-csi" "$TEMP_DIR"
# 清理临时目录
rm -rf "$TEMP_DIR"
# 计算上传路径
BACKUP_TARGET_PATH="s3://${S3_BUCKET}/${S3_SUBPATH}/${HOSTNAME}/${DIR_NAME}/"
echo "上传备份 $BACKUP_PATH 到 S3 路径: $BACKUP_TARGET_PATH"
# 上传备份文件
s3cmd put "$BACKUP_PATH" "${BACKUP_TARGET_PATH}${BACKUP_NAME}"
# 删除本地临时备份
rm "$BACKUP_PATH"
# 清理旧备份
echo "清理旧备份,保留最新的 $BACKUPS_TO_KEEP 个文件"
# 获取所有备份文件并按时间排序
BACKUP_FILES=$(s3cmd ls "${BACKUP_TARGET_PATH}" | sort -k1,2)
BACKUP_COUNT=$(echo "$BACKUP_FILES" | grep -c "backup-${DIR_NAME}")
if [ "$BACKUP_COUNT" -gt "$BACKUPS_TO_KEEP" ]; then
# 计算需要删除的文件数量
DELETE_COUNT=$((BACKUP_COUNT - BACKUPS_TO_KEEP))
# 获取需要删除的文件列表
FILES_TO_DELETE=$(echo "$BACKUP_FILES" | head -n $DELETE_COUNT | awk '{print $4}')
# 删除文件
for FILE in $FILES_TO_DELETE; do
echo "删除旧备份: $FILE"
s3cmd rm "$FILE"
done
fi
done
echo "备份完成"
s3cmd-trigger.sh: |
#!/bin/bash
set -e
# 检查标记文件
if [ -f "/tmp/backup-triggered" ]; then
echo "发现备份触发标记,执行备份脚本..."
bash /scripts/backup.sh
rm /tmp/backup-triggered
echo "备份完成"
fi