remote daemonset+cronjob-aliyun-gz

This commit is contained in:
songtianlun 2025-04-26 11:08:23 +08:00
parent 31c0176813
commit 701bdfa398
7 changed files with 0 additions and 362 deletions

View File

@ -1,43 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: backup-config
namespace: backup-system
data:
subpath: "nodes"
backups-to-keep: "3"
use-https: "True"
signature-v2: "False" # 设置为 "True" 如果 S3 服务需要 V2 签名
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: backup-service-account
namespace: backup-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: backup-role
namespace: backup-system
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["create"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: backup-role-binding
namespace: backup-system
subjects:
- kind: ServiceAccount
name: backup-service-account
namespace: backup-system
roleRef:
kind: Role
name: backup-role
apiGroup: rbac.authorization.k8s.io

View File

@ -1,138 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: backup-script
namespace: backup-system
data:
backup.sh: |
#!/bin/bash
set -e
# 获取环境变量
S3_ENDPOINT="${S3_ENDPOINT}"
S3_ACCESS_KEY="${S3_ACCESS_KEY}"
S3_SECRET_KEY="${S3_SECRET_KEY}"
S3_BUCKET="${S3_BUCKET}"
S3_SUBPATH="${S3_SUBPATH:-backups}"
BACKUPS_TO_KEEP="${BACKUPS_TO_KEEP:-7}"
# 获取主机名
HOSTNAME=$(hostname)
HOSTNAME="${NODE_NAME:-$HOSTNAME}"
FEISHU_WEBHOOK_URL=$(cat /etc/feishu-webhook/url)
SOURCE_SIZE=$(du -sh /data/local-csi | cut -f1)
START_TIME=$(date +%s)
# 配置 s3cmd
cat > ~/.s3cfg << EOF
[default]
access_key = ${S3_ACCESS_KEY}
secret_key = ${S3_SECRET_KEY}
host_base = ${S3_ENDPOINT}
host_bucket = ${S3_ENDPOINT}
use_https = ${USE_HTTPS:-True}
signature_v2 = ${SIGNATURE_V2:-False}
EOF
# 检查 /data/local-csi 路径是否存在
if [ ! -d "/data/local-csi" ]; then
echo "目录 /data/local-csi 不存在,退出备份"
exit 0
fi
# 检查目录下是否有至少一个子目录
DIR_COUNT=$(find /data/local-csi -mindepth 1 -maxdepth 1 -type d | wc -l)
if [ "$DIR_COUNT" -eq 0 ]; then
echo "目录 /data/local-csi 中没有子目录,退出备份"
# exit 0
else
# 遍历所有目录
find /data/local-csi -mindepth 1 -maxdepth 1 -type d | while read dir; do
DIR_NAME=$(basename "$dir")
TIMESTAMP=$(date +%Y-%m-%d-%H-%M-%S)
BACKUP_NAME="backup-${DIR_NAME}-${TIMESTAMP}.tar.gz"
BACKUP_PATH="/tmp/${BACKUP_NAME}"
echo "正在备份目录: $dir"
if [[ "$DIR_NAME" == *"pg"* ]] && [ -d "/data/local-csi/$DIR_NAME/pg_wal" ]; then
echo "检测到 PostgreSQL 数据目录,使用特殊处理..."
TEMP_DIR="/tmp/data/${TIMESTAMP}/"
mkdir -p "$TEMP_DIR/$DIR_NAME"
rsync -a "/data/local-csi/$DIR_NAME/" "$TEMP_DIR/$DIR_NAME/"
# 创建备份压缩包
tar -czf "$BACKUP_PATH" -C "$TEMP_DIR" "$DIR_NAME"
# 清理临时目录
rm -rf "$TEMP_DIR"
else
# tar -czf "$BACKUP_PATH" -C "/data/local-csi" "$DIR_NAME"
# 普通目录使用标准备份,但忽略文件变更警告
tar --warning=no-file-changed -czf "$BACKUP_PATH" -C "/data/local-csi" "$DIR_NAME" || {
echo "备份 $DIR_NAME 时出现警告,继续处理..."
}
fi
# 计算上传路径
BACKUP_TARGET_PATH="s3://${S3_BUCKET}/${S3_SUBPATH}/${HOSTNAME}/${DIR_NAME}/"
echo "上传备份 $BACKUP_PATH 到 S3 路径: $BACKUP_TARGET_PATH"
# 上传备份文件
s3cmd put "$BACKUP_PATH" "${BACKUP_TARGET_PATH}${BACKUP_NAME}"
# 删除本地临时备份
rm "$BACKUP_PATH"
# 清理旧备份
echo "清理旧备份,保留最新的 $BACKUPS_TO_KEEP 个文件"
# 获取所有备份文件并按时间排序
BACKUP_FILES=$(s3cmd ls "${BACKUP_TARGET_PATH}" | sort -k1,2)
BACKUP_COUNT=$(echo "$BACKUP_FILES" | grep -c "backup-${DIR_NAME}")
if [ "$BACKUP_COUNT" -gt "$BACKUPS_TO_KEEP" ]; then
# 计算需要删除的文件数量
DELETE_COUNT=$((BACKUP_COUNT - BACKUPS_TO_KEEP))
# 获取需要删除的文件列表
FILES_TO_DELETE=$(echo "$BACKUP_FILES" | head -n $DELETE_COUNT | awk '{print $4}')
# 删除文件
for FILE in $FILES_TO_DELETE; do
echo "删除旧备份: $FILE"
s3cmd rm "$FILE"
done
fi
done
fi
END_TIME=$(date +%s)
echo "备份完成"
DURATION=$((END_TIME - START_TIME))
MSG_TITLE="✅ [K3s Backup] $NODE_NAME Backup Success"
MSG_TEXT="Host: $NODE_NAME, Source: /data, Source Size: $SOURCE_SIZE, Duration: ${DURATION}s"
JSON_PAYLOAD=$(jq -n \
--arg title "$MSG_TITLE" \
--arg text "$MSG_TEXT" \
'{msg_type: "post", content: {post: {zh_cn: {title: $title, content: [[{tag: "text", text: $text}]]}}}}')
echo "Sending notification to Feishu..."
curl -X POST -H "Content-Type: application/json" -d "$JSON_PAYLOAD" "$FEISHU_WEBHOOK_URL"
echo "Notification sent."
s3cmd-trigger.sh: |
#!/bin/bash
set -e
# 检查标记文件
if [ -f "/tmp/backup-triggered" ]; then
echo "发现备份触发标记,执行备份脚本..."
bash /scripts/backup.sh
rm /tmp/backup-triggered
echo "备份完成"
fi

View File

@ -1,33 +0,0 @@
apiVersion: batch/v1
kind: CronJob
metadata:
name: node-backup-job
namespace: backup-system
spec:
# 每天凌晨2点运行
schedule: "0 2 * * *"
concurrencyPolicy: Forbid
jobTemplate:
spec:
ttlSecondsAfterFinished: 86400 # 1天后删除已完成的任务
template:
spec:
serviceAccountName: backup-service-account
nodeSelector:
#kubernetes.io/hostname: "vkvm-us1"
region: us
containers:
- name: backup-trigger
image: bitnami/kubectl:latest
command:
- /bin/sh
- -c
- |
# 创建触发文件到所有备份 Pod 中
for pod in $(kubectl get pods -n backup-system -l app=node-backup -o jsonpath='{.items[*].metadata.name}'); do
echo "触发 Pod $pod 执行备份"
sleep 60
kubectl exec $pod -n backup-system -- touch /tmp/backup-triggered
done
echo "所有节点备份已触发"
restartPolicy: OnFailure

View File

@ -1,120 +0,0 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: node-backup-daemon
namespace: backup-system
spec:
selector:
matchLabels:
app: node-backup
template:
metadata:
labels:
app: node-backup
spec:
#nodeSelector:
# kubernetes.io/hostname: "vkvm-us1"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: NotIn
values:
- vkvm-us2
- aliyun-gz2
# - matchExpressions:
# #- key: region
# # operator: NotIn
# # values:
# # - cn
# - key: kubernetes.io/hostname
# operator: In
# values:
# - zgocloud-us1
containers:
- name: backup-container
image: alpine:latest
imagePullPolicy: IfNotPresent
command:
- /bin/sh
- -c
- |
# 安装依赖工具
apk add --no-cache bash s3cmd tar rsync curl jq
# 启动一个循环,每分钟检查一次触发文件
while true; do
bash /scripts/s3cmd-trigger.sh
date
sleep 60
done
env:
- name: NODE_NAME # 使用 Downward API 获取 Pod 运行的节点名
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: S3_ENDPOINT
valueFrom:
secretKeyRef:
name: s3-credentials
key: endpoint
- name: S3_ACCESS_KEY
valueFrom:
secretKeyRef:
name: s3-credentials
key: access-key
- name: S3_SECRET_KEY
valueFrom:
secretKeyRef:
name: s3-credentials
key: secret-key
- name: S3_BUCKET
valueFrom:
secretKeyRef:
name: s3-credentials
key: bucket
- name: S3_SUBPATH
valueFrom:
configMapKeyRef:
name: backup-config
key: subpath
optional: true
- name: BACKUPS_TO_KEEP
valueFrom:
configMapKeyRef:
name: backup-config
key: backups-to-keep
optional: true
- name: USE_HTTPS
valueFrom:
configMapKeyRef:
name: backup-config
key: use-https
optional: true
- name: SIGNATURE_V2
valueFrom:
configMapKeyRef:
name: backup-config
key: signature-v2
optional: true
volumeMounts:
- name: host-data
mountPath: /data
- name: scripts
mountPath: /scripts
- name: feishu-webhook-volume
mountPath: /etc/feishu-webhook # 挂载飞书 Webhook Secret
readOnly: true
volumes:
- name: host-data
hostPath:
path: /data
- name: scripts
configMap:
name: backup-script
defaultMode: 0755
- name: feishu-webhook-volume
secret:
secretName: feishu-webhook

View File

@ -1,4 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: backup-system

View File

@ -1,9 +0,0 @@
apiVersion: v1
data:
# https://open.feishu.cn/open-apis/bot/v2/hook/f2a8d634-6a90-4f86-ac2f-ef6a53dbd680
url: aHR0cHM6Ly9vcGVuLmZlaXNodS5jbi9vcGVuLWFwaXMvYm90L3YyL2hvb2svZjJhOGQ2MzQtNmE5MC00Zjg2LWFjMmYtZWY2YTUzZGJkNjgw
kind: Secret
metadata:
name: feishu-webhook
namespace: backup-system
type: Opaque

View File

@ -1,15 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: s3-credentials
namespace: backup-system
type: Opaque
data:
# 这些值需要使用base64编码替换
endpoint: aHR0cHM6Ly9hcGkubWluaW8uc2t5Ynl0ZS5tZQ== # https://api.minio.skybyte.me
access-key: RVZuWFViR2xld2t0dFF0em9XUWs= # EVnXUbGlewkttQtzoWQk
secret-key: THNxVFRmc0VEVzBFY3Buc09aOUxyTnhwc21zajdIMGxlR2R0WHBwRg== # LsqTTfsEDW0EcpnsOZ9LrNxpsmsj7H0leGdtXppF
bucket: YmFja3Vwcw== # backups
# https://open.feishu.cn/open-apis/bot/v2/hook/f2a8d634-6a90-4f86-ac2f-ef6a53dbd680
#feishu-webhook: aHR0cHM6Ly9vcGVuLmZlaXNodS5jbi9vcGVuLWFwaXMvYm90L3YyL2hvb2svZjJhOGQ2MzQtNmE5MC00Zjg2LWFjMmYtZWY2YTUzZGJkNjgw