first commit

This commit is contained in:
songtianlun 2025-04-12 13:11:34 +08:00
commit 2bb098c6e3
254 changed files with 82627 additions and 0 deletions

View File

@ -0,0 +1,14 @@
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: alist-tgz1-pvc
namespace: alist
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 2Gi
storageClassName: local-tgz1

26
alist/ingress.yaml Normal file
View File

@ -0,0 +1,26 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: alist-ingress
namespace: alist
annotations:
cert-manager.io/cluster-issuer: "dnspod-cluster-issuer"
spec:
ingressClassName: nginx
tls:
- hosts:
- res.frytea.com
secretName: res-frytea-com-tls
rules:
- host: res.frytea.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: alist-svc
port:
name: web

52
alist/load.yaml Normal file
View File

@ -0,0 +1,52 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: alist-sts
namespace: alist
labels:
app: alist
spec:
replicas: 1
selector:
matchLabels:
app: alist
template:
metadata:
annotations: {}
labels:
app: alist
spec:
nodeSelector:
# region: cn
kubernetes.io/hostname: claw-hk2
#affinity:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: kubernetes.io/hostname
# operator: In
# values:
# - tencent-gz1
# - tencent-sh1
containers:
- name: alist
image: xhofe/alist:v3.42.0
ports:
- containerPort: 5244
name: web
env:
- name: PUID
value: "0"
- name: GUID
value: "0"
- name: UMASK
value: "022"
volumeMounts:
- name: alist-data
mountPath: /opt/alist/data
volumes:
- name: alist-data
persistentVolumeClaim:
claimName: alist-clhk2-pvc

14
alist/pvc-clhk2.yaml Normal file
View File

@ -0,0 +1,14 @@
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: alist-clhk2-pvc
namespace: alist
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 2Gi
storageClassName: local-clhk2

14
alist/pvc.yaml Normal file
View File

@ -0,0 +1,14 @@
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: alist-longhorn-tcn-pvc
namespace: alist
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 2Gi
storageClassName: longhorn-tcn

12
alist/service.yaml Normal file
View File

@ -0,0 +1,12 @@
apiVersion: v1
kind: Service
metadata:
name: alist-svc
namespace: alist
spec:
selector:
app: alist
ports:
- name: web
port: 5244
targetPort: 5244

80
backups/cfg.yml Normal file
View File

@ -0,0 +1,80 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: backup-script
namespace: backup-system
data:
backup.sh: |
#!/bin/bash
set -e
# 获取环境变量
MINIO_ENDPOINT="${MINIO_ENDPOINT}"
MINIO_ACCESS_KEY="${MINIO_ACCESS_KEY}"
MINIO_SECRET_KEY="${MINIO_SECRET_KEY}"
MINIO_BUCKET="${MINIO_BUCKET}"
MINIO_SUBPATH="${MINIO_SUBPATH:-backups}"
BACKUPS_TO_KEEP="${BACKUPS_TO_KEEP:-7}"
# 获取主机名
HOSTNAME=$(hostname)
# 检查 /data/local-csi 路径是否存在
if [ ! -d "/data/local-csi" ]; then
echo "目录 /data/local-csi 不存在,退出备份"
exit 0
fi
# 检查目录下是否有至少一个子目录
DIR_COUNT=$(find /data/local-csi -mindepth 1 -maxdepth 1 -type d | wc -l)
if [ "$DIR_COUNT" -eq 0 ]; then
echo "目录 /data/local-csi 中没有子目录,退出备份"
exit 0
fi
# 配置 MinIO 客户端
mc alias set local-minio $MINIO_ENDPOINT $MINIO_ACCESS_KEY $MINIO_SECRET_KEY
# 遍历所有目录
find /data/local-csi -mindepth 1 -maxdepth 1 -type d | while read dir; do
DIR_NAME=$(basename "$dir")
TIMESTAMP=$(date +%Y-%m-%d-%H-%M-%S)
BACKUP_NAME="backup-${DIR_NAME}-${TIMESTAMP}.tar.gz"
BACKUP_PATH="/tmp/${BACKUP_NAME}"
echo "正在备份目录: $dir"
# 创建备份压缩包
tar -czf "$BACKUP_PATH" -C "/data/local-csi" "$DIR_NAME"
# 上传到 MinIO
BACKUP_TARGET_PATH="${MINIO_SUBPATH}/${HOSTNAME}/${DIR_NAME}/"
echo "上传备份 $BACKUP_PATH 到 MinIO 路径: $BACKUP_TARGET_PATH"
# 确保目标目录存在
mc ls "local-minio/${MINIO_BUCKET}/${BACKUP_TARGET_PATH}" > /dev/null 2>&1 || \
mc mb -p "local-minio/${MINIO_BUCKET}/${BACKUP_TARGET_PATH}"
# 上传备份文件
mc cp "$BACKUP_PATH" "local-minio/${MINIO_BUCKET}/${BACKUP_TARGET_PATH}${BACKUP_NAME}"
# 删除本地临时备份
rm "$BACKUP_PATH"
# 清理旧备份
echo "清理旧备份,保留最新的 $BACKUPS_TO_KEEP 个文件"
BACKUP_COUNT=$(mc ls "local-minio/${MINIO_BUCKET}/${BACKUP_TARGET_PATH}" | wc -l)
if [ "$BACKUP_COUNT" -gt "$BACKUPS_TO_KEEP" ]; then
# 列出备份并按时间排序,删除最老的备份
mc ls --json "local-minio/${MINIO_BUCKET}/${BACKUP_TARGET_PATH}" | \
jq -r '. | select(.type=="file") | [.key, .lastModified] | @tsv' | \
sort -k2 | head -n $(($BACKUP_COUNT - $BACKUPS_TO_KEEP)) | \
while read file_info; do
FILE=$(echo "$file_info" | cut -f1)
echo "删除旧备份: $FILE"
mc rm "local-minio/${MINIO_BUCKET}/${BACKUP_TARGET_PATH}${FILE}"
done
fi
done
echo "备份完成"

173
backups/load.yml Normal file
View File

@ -0,0 +1,173 @@
apiVersion: v1
kind: Namespace
metadata:
name: backup-system
---
apiVersion: batch/v1
kind: CronJob
metadata:
name: node-backup-job
namespace: backup-system
spec:
# 每天凌晨2点运行
schedule: "0 2 * * *"
concurrencyPolicy: Forbid
jobTemplate:
spec:
ttlSecondsAfterFinished: 86400 # 1天后删除已完成的任务
template:
spec:
serviceAccountName: backup-service-account
nodeSelector:
kubernetes.io/hostname: "vkvm-us1"
containers:
- name: backup-trigger
image: bitnami/kubectl:latest
command:
- /bin/sh
- -c
- |
kubectl label daemonset/node-backup-daemon trigger-backup=true --overwrite -n backup-system && \
sleep 60 && \
kubectl label daemonset/node-backup-daemon trigger-backup- -n backup-system
restartPolicy: OnFailure
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: node-backup-daemon
namespace: backup-system
spec:
selector:
matchLabels:
app: node-backup
template:
metadata:
labels:
app: node-backup
spec:
nodeSelector:
kubernetes.io/hostname: "vkvm-us1"
containers:
- name: backup-container
image: minio/mc:latest
imagePullPolicy: IfNotPresent
command:
- /bin/sh
- -c
- |
# 添加依赖的工具
apk add --no-cache jq bash findutils tar curl
# 等待触发备份
while true; do
if [ "$(curl -s -H "Authorization: Bearer $(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" \
https://kubernetes.default.svc/apis/apps/v1/namespaces/backup-system/daemonsets/node-backup-daemon \
--cacert /var/run/secrets/kubernetes.io/serviceaccount/ca.crt \
-X GET | jq -r '.metadata.labels["trigger-backup"]')" = "true" ]; then
echo "备份触发,执行备份脚本..."
bash /scripts/backup.sh
echo "备份完成"
fi
# 每分钟检查一次
sleep 60
done
env:
- name: MINIO_ENDPOINT
valueFrom:
secretKeyRef:
name: minio-credentials
key: endpoint
- name: MINIO_ACCESS_KEY
valueFrom:
secretKeyRef:
name: minio-credentials
key: access-key
- name: MINIO_SECRET_KEY
valueFrom:
secretKeyRef:
name: minio-credentials
key: secret-key
- name: MINIO_BUCKET
valueFrom:
secretKeyRef:
name: minio-credentials
key: bucket
- name: MINIO_SUBPATH
valueFrom:
configMapKeyRef:
name: backup-config
key: subpath
optional: true
- name: BACKUPS_TO_KEEP
valueFrom:
configMapKeyRef:
name: backup-config
key: backups-to-keep
optional: true
volumeMounts:
- name: host-data
mountPath: /data
- name: scripts
mountPath: /scripts
volumes:
- name: host-data
hostPath:
path: /data
- name: scripts
configMap:
name: backup-script
defaultMode: 0755
---
apiVersion: v1
kind: Secret
metadata:
name: minio-credentials
namespace: backup-system
type: Opaque
data:
# 这些值需要使用base64编码替换
endpoint: aHR0cHM6Ly9hcGkubWluaW8uc2t5Ynl0ZS5tZQ== # https://api.minio.skybyte.me
access-key: RVZuWFViR2xld2t0dFF0em9XUWs= # EVnXUbGlewkttQtzoWQk
secret-key: THNxVFRmc0VEVzBFY3Buc09aOUxyTnhwc21zajdIMGxlR2R0WHBwRg== # LsqTTfsEDW0EcpnsOZ9LrNxpsmsj7H0leGdtXppF
bucket: YmFja3Vwcw== # backups
---
apiVersion: v1
kind: ConfigMap
metadata:
name: backup-config
namespace: backup-system
data:
subpath: "backups"
backups-to-keep: "3"
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: backup-service-account
namespace: backup-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: backup-role
namespace: backup-system
rules:
- apiGroups: ["apps"]
resources: ["daemonsets"]
verbs: ["get", "patch", "update"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: backup-role-binding
namespace: backup-system
subjects:
- kind: ServiceAccount
name: backup-service-account
namespace: backup-system
roleRef:
kind: Role
name: backup-role
apiGroup: rbac.authorization.k8s.io

25
bj-pfd2/ingress.yaml Normal file
View File

@ -0,0 +1,25 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: app
namespace: bj-pfd2
annotations:
cert-manager.io/cluster-issuer: "cf-cluster-issuer"
spec:
ingressClassName: nginx
tls:
- hosts:
- bjpfd2.skybyte.me
secretName: bjpfd2-skybyte-me-tls
rules:
- host: bjpfd2.skybyte.me
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: app
port:
name: web

40
bj-pfd2/load.yaml Normal file
View File

@ -0,0 +1,40 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: app
namespace: bj-pfd2
labels:
app: app
spec:
replicas: 1
selector:
matchLabels:
app: app
template:
metadata:
annotations: {}
labels:
app: app
spec:
#nodeSelector:
# region: us
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: region
operator: NotIn
values:
- cn
containers:
- name: app
image: songtianlun/bj-pfd2:latest
imagePullPolicy: Always
ports:
- containerPort: 6010
name: web
env:
- name: TZ
value: "Asia/Shanghai"

12
bj-pfd2/service.yaml Normal file
View File

@ -0,0 +1,12 @@
apiVersion: v1
kind: Service
metadata:
name: app
namespace: bj-pfd2
spec:
selector:
app: app
ports:
- name: web
port: 6010
targetPort: 6010

View File

@ -0,0 +1,8 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: ads-txt-config
namespace: blog
data:
ads.txt: |
google.com, pub-7296634171837358, DIRECT, f08c47fec0942fa0

14
blog/archive/pvc.yaml Normal file
View File

@ -0,0 +1,14 @@
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: typecho-nfs-aliyun-gz-pvc
namespace: blog
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 5Gi
storageClassName: nfs-aliyun-gz

View File

@ -0,0 +1,86 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: rss-data-local-agz2
namespace: blog
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Mi # 根据需要调整存储大小
storageClassName: local-agz2
---
apiVersion: batch/v1
kind: CronJob
metadata:
name: rss-to-listmonk
namespace: blog
spec:
schedule: "0 8 * * *" # 每天早上 8:00 执行
concurrencyPolicy: Forbid # 不允许并发执行
successfulJobsHistoryLimit: 3 # 保留 3 个成功的任务历史
failedJobsHistoryLimit: 1 # 保留 1 个失败的任务历史
jobTemplate:
spec:
template:
spec:
nodeSelector:
region: cn
dc: aliyun-gz
containers:
- name: rss-to-listmonk
image: alpine:latest
command:
- /bin/sh
- -c
- |
sed -i 's/dl-cdn.alpinelinux.org/mirrors.nju.edu.cn/g' /etc/apk/repositories
apk add --no-cache git make python3 py3-virtualenv
git config --global advice.detachedHead false
git clone https://${GITEA_TOKEN}@git.frytea.com/songtianlun/listmonk_RSS_to_mail.git /tmp/listmonk_RSS_to_mail
cd /tmp/listmonk_RSS_to_mail && make
env:
- name: RSS_URL
value: "https://frytea.com/feed/"
- name: LISTMONK_API_URL
value: "https://listmonk.frytea.com/api/campaigns"
- name: LISTMONK_TOKEN
valueFrom:
secretKeyRef:
name: listmonk-credentials
key: token
- name: LISTMONK_SEND_LIST_ID
value: "3"
- name: GITEA_TOKEN
valueFrom:
secretKeyRef:
name: gitea-credentials
key: access-token
volumeMounts:
- name: rss-data
mountPath: /data
volumes:
- name: rss-data
persistentVolumeClaim:
claimName: rss-data-local-agz2
restartPolicy: OnFailure
---
apiVersion: v1
kind: Secret
metadata:
name: gitea-credentials
namespace: blog
type: Opaque
data:
access-token: MTA0NmYzNTgwOTIzZTZmM2E4ZjkxNjA2ZWEwMTA4MzFlNDdjN2UxMA==
---
apiVersion: v1
kind: Secret
metadata:
name: listmonk-credentials
namespace: blog
type: Opaque
data:
token: Ym90Ok5qM3AyeXpGRnVjcFNSVDRNWk43TDV4MU9Ecm1RY0lp

48
blog/ingress.yaml Normal file
View File

@ -0,0 +1,48 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: app
namespace: blog
annotations:
cert-manager.io/cluster-issuer: "dnspod-cluster-issuer"
spec:
ingressClassName: nginx
tls:
- hosts:
- frytea.com
- www.frytea.com
- blog.frytea.com
secretName: frytea-com-tls
rules:
- host: frytea.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: app
port:
name: web
- host: www.frytea.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: app
port:
name: web
- host: blog.frytea.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: app
port:
name: web

72
blog/load.yaml Normal file
View File

@ -0,0 +1,72 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: app
namespace: blog
labels:
app: typecho
spec:
replicas: 1
selector:
matchLabels:
app: typecho
template:
metadata:
annotations: {}
labels:
app: typecho
spec:
nodeSelector:
#kubernetes.io/hostname: tencent-sh1
dc: aliyun-gz
containers:
- name: app
image: joyqi/typecho:1.2.1-php7.4-apache
ports:
- containerPort: 80
name: web
env:
- name: TYPECHO_DB_ADAPTER
value: "Pdo_Mysql"
- name: TYPECHO_DB_HOST
value: "172.26.12.22"
- name: TYPECHO_DB_PORT
value: "3306"
- name: TYPECHO_DB_USER
value: "frytea_com"
- name: TYPECHO_DB_PASSWORD
value: "XSKG27FynHZcQ4Xj"
- name: TYPECHO_DB_DATABASE
value: "frytea_com"
- name: TYPECHO_DB_CHARSET
value: "utf8mb4"
- name: TYPECHO_DB_PREFIX
value: "typecho_"
- name: TYPECHO_DB_ENGINE
value: "InnoDB"
- name: TYPECHO_DB_NEXT
value: "keep"
- name: TYPECHO_SITE_URL
value: "https://frytea.com"
- name: TYPECHO_INSTALL
value: "1"
#- name: PHP_MAX_EXECUTION_TIME
# value: "600"
#- name: PHP_TZ
# value: "Asia/Shanghai"
- name: TIMEZONE
value: "Asia/Shanghai"
volumeMounts:
- name: typecho-data
mountPath: /app/usr
- name: ads-txt
mountPath: /app/ads.txt
subPath: ads.txt
volumes:
- name: typecho-data
persistentVolumeClaim:
claimName: typecho-local-agz2-pvc
- name: ads-txt
configMap:
name: ads-txt-config

14
blog/pvc.yaml Normal file
View File

@ -0,0 +1,14 @@
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: typecho-local-agz2-pvc
namespace: blog
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 5Gi
storageClassName: local-agz2

12
blog/service.yaml Normal file
View File

@ -0,0 +1,12 @@
apiVersion: v1
kind: Service
metadata:
name: app
namespace: blog
spec:
selector:
app: typecho
ports:
- name: web
port: 80
targetPort: 80

26
busuanzi/ingress.yaml Normal file
View File

@ -0,0 +1,26 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: busuanzi-ingress
namespace: busuanzi
annotations:
cert-manager.io/cluster-issuer: "dnspod-cluster-issuer"
spec:
ingressClassName: nginx
tls:
- hosts:
- busuanzi.frytea.com
secretName: busuanzi-frytea-com-tls
rules:
- host: busuanzi.frytea.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: app
port:
name: web

82
busuanzi/load.yaml Normal file
View File

@ -0,0 +1,82 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: redis-sts
namespace: busuanzi
labels:
app: redis
spec:
replicas: 1
selector:
matchLabels:
app: redis
template:
metadata:
annotations: {}
labels:
app: redis
spec:
nodeSelector:
kubernetes.io/hostname: tencent-gz1
containers:
- name: redis
image: redis:alpine
ports:
- containerPort: 6379
name: redis
volumeMounts:
- name: busuanzi-redis-data
mountPath: /data
volumes:
- name: busuanzi-redis-data
persistentVolumeClaim:
claimName: busuanzi-redis-tgz1-pvc
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: app
namespace: busuanzi
labels:
app: busuanzi
spec:
replicas: 1
selector:
matchLabels:
app: busuanzi
template:
metadata:
annotations: {}
labels:
app: busuanzi
spec:
nodeSelector:
region: "cn"
# kubernetes.io/hostname: tencent-gz1
containers:
- name: app
image: xcsoft/busuanzi:latest
ports:
- containerPort: 8080
name: web
env:
- name: WEB_LOG
value: "true"
- name: WEB_DEBUG
value: "false"
- name: WEB_CORS
value: "*"
- name: BSZ_EXPIRE
value: "0"
- name: BSZ_SECRET
value: "Ivk7LQ5LTkRAZguEDn3IeVzvfv6pxO"
- name: API_SERVER
value: "https://busuanzi.frytea.com"
- name: REDIS_ADDRESS
value: "redis.busuanzi.svc.cluster.local:6379"
- name: BSZ_PATHSTYLE
value: "true"
- name: BSZ_ENCRYPT
value: "MD516"

14
busuanzi/pvc.yaml Normal file
View File

@ -0,0 +1,14 @@
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: busuanzi-redis-tgz1-pvc
namespace: busuanzi
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 2Gi
storageClassName: local-tgz1

26
busuanzi/service.yaml Normal file
View File

@ -0,0 +1,26 @@
apiVersion: v1
kind: Service
metadata:
name: redis
namespace: busuanzi
spec:
selector:
app: redis
ports:
- name: redis
port: 6379
targetPort: 6379
---
apiVersion: v1
kind: Service
metadata:
name: app
namespace: busuanzi
spec:
selector:
app: busuanzi
ports:
- name: web
port: 8080
targetPort: 8080

View File

@ -0,0 +1,44 @@
apiVersion: v1
data:
app.conf: |
appname = casdoor
httpport = 8000
runmode = prod
copyrequestbody = true
driverName = mysql
#dataSourceName = casdoor:1LIHGK143gCqR3xd@tcp(172.26.12.22:3306)/
dataSourceName = casdoor:1LIHGK143gCqR3xd@tcp(8.134.120.243:3306)/
dbName = casdoor
tableNamePrefix =
showSql = false
redisEndpoint =
defaultStorageProvider =
isCloudIntranet = false
authState = "casdoor"
#socks5Proxy = "127.0.0.1:10808"
verificationCodeTimeout = 10
initScore = 0
logPostOnly = true
isUsernameLowered = false
origin = "https://casdoor.frytea.com"
originFrontend =
staticBaseUrl = "https://cdn.casbin.org"
isDemoMode = false
batchSize = 100
enableErrorMask = false
enableGzip = true
inactiveTimeoutMinutes =
ldapServerPort = 389
radiusServerPort = 1812
radiusSecret = "secret"
quota = {"organization": -1, "user": -1, "application": -1, "provider": -1}
logConfig = {"filename": "logs/casdoor.log", "maxdays":99999, "perm":"0770"}
initDataNewOnly = false
initDataFile = "./init_data.json"
frontendBaseDir = "../cc_0"
kind: ConfigMap
metadata:
labels:
io.kompose.service: casdoor
name: casdoor-cm0
namespace: casdoor

View File

@ -0,0 +1,54 @@
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
kompose.cmd: kompose convert -f compose.yaml -n casdoor --controller statefulset
kompose.version: 1.34.0 (cbf2835db)
labels:
io.kompose.service: casdoor
name: casdoor
namespace: casdoor
spec:
replicas: 1
selector:
matchLabels:
io.kompose.service: casdoor
#serviceName: casdoor
template:
metadata:
labels:
io.kompose.service: casdoor
spec:
nodeSelector:
region: cn
#dc: "aliyun-gz"
#kubernetes.io/hostname: aliyun-gz2
containers:
- command:
- /bin/sh
- -c
- ./server --createDatabase=true
env:
- name: RUNNING_IN_DOCKER
value: "true"
image: casbin/casdoor:latest
name: casdoor
ports:
- containerPort: 8000
protocol: TCP
volumeMounts:
- mountPath: /conf
name: casdoor-cm0
#- mountPath: /logs
# name: casdoor-logs
volumes:
- configMap:
items:
- key: app.conf
path: app.conf
name: casdoor-cm0
name: casdoor-cm0
#- name: casdoor-logs
# persistentVolumeClaim:
# claimName: logs-agz2-pvc
restartPolicy: Always

View File

@ -0,0 +1,5 @@
apiVersion: v1
kind: Namespace
metadata:
name: casdoor
namespace: casdoor

View File

@ -0,0 +1,19 @@
apiVersion: v1
kind: Service
metadata:
annotations:
kompose.cmd: kompose convert -f compose.yaml -n casdoor --controller statefulset
kompose.version: 1.34.0 (cbf2835db)
labels:
io.kompose.service: casdoor
name: casdoor
namespace: casdoor
spec:
clusterIP: None
ports:
- name: web
port: 8000
targetPort: 8000
selector:
io.kompose.service: casdoor
type: ClusterIP

View File

@ -0,0 +1,53 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
annotations:
kompose.cmd: kompose convert -f compose.yaml -n casdoor --controller statefulset
kompose.version: 1.34.0 (cbf2835db)
labels:
io.kompose.service: casdoor
name: casdoor
namespace: casdoor
spec:
replicas: 1
selector:
matchLabels:
io.kompose.service: casdoor
serviceName: casdoor
template:
metadata:
labels:
io.kompose.service: casdoor
spec:
nodeSelector:
dc: "aliyun-gz"
kubernetes.io/hostname: aliyun-gz2
containers:
- command:
- /bin/sh
- -c
- ./server --createDatabase=true
env:
- name: RUNNING_IN_DOCKER
value: "true"
image: casbin/casdoor:latest
name: casdoor
ports:
- containerPort: 8000
protocol: TCP
volumeMounts:
- mountPath: /conf
name: casdoor-cm0
- mountPath: /logs
name: casdoor-logs
volumes:
- configMap:
items:
- key: app.conf
path: app.conf
name: casdoor-cm0
name: casdoor-cm0
- name: casdoor-logs
persistentVolumeClaim:
claimName: logs-agz2-pvc
restartPolicy: Always

25
casdoor/ingress.yaml Normal file
View File

@ -0,0 +1,25 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: app
namespace: casdoor
annotations:
cert-manager.io/cluster-issuer: "dnspod-cluster-issuer"
spec:
ingressClassName: nginx
tls:
- hosts:
- casdoor.frytea.com
secretName: casdoor-frytea-com-tls
rules:
- host: casdoor.frytea.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: casdoor
port:
name: web

14
casdoor/pvc.yaml Normal file
View File

@ -0,0 +1,14 @@
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: logs-agz2-pvc
namespace: casdoor
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
storageClassName: local-agz2

@ -0,0 +1 @@
Subproject commit 83f14a4d2509476ce74ba3612ff16403fd59ff7d

13286
cert-manager/cert-manager.yaml Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,20 @@
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: cf-cluster-issuer
spec:
acme:
email: "ca@frytea.com"
preferredChain: ""
privateKeySecretRef:
name: cf-cluster-issuer-key
#server: https://acme-staging-v02.api.letsencrypt.org/directory
server: https://acme-v02.api.letsencrypt.org/directory
solvers:
- dns01:
cloudflare:
email: songtianlun@frytea.com
apiTokenSecretRef:
name: cf-api-token-secret
key: api-token

View File

@ -0,0 +1,9 @@
apiVersion: v1
kind: Secret
metadata:
name: cf-api-token-secret
namespace: cert-manager
type: Opaque
stringData:
api-token: FL28s_qAJsI0N9w6glURN_bxOeOCkX9-2jeLTtRt

12
cert-manager/demo.yaml Normal file
View File

@ -0,0 +1,12 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: test2-tsh1-frytea-com-tls
namespace: default
spec:
dnsNames:
- test2.tsh1.frytea.com # 要签发证书的域名
issuerRef:
kind: ClusterIssuer
name: dnspod-cluster-issuer # 引用 ClusterIssuer指示采用 dns01 方式进行校验
secretName: test2-tsh1-frytea-com-tls # 最终签发出来的证书会保存在这个 Secret 里面

View File

@ -0,0 +1,23 @@
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: dnspod-dev-cluster-issuer
spec:
acme:
email: "ca@frytea.com"
preferredChain: ""
privateKeySecretRef:
name: dnspod-cluster-issuer-key
server: https://acme-staging-v02.api.letsencrypt.org/directory
#server: https://acme-v02.api.letsencrypt.org/directory
solvers:
- dns01:
webhook:
config:
secretId: AKIDFMGwRjnjSyIPK1VlqkSwEKK7h4FLNSM3
secretKeyRef:
key: secret-key
name: dnspod-secret
ttl: 600
groupName: acme.imroc.cc
solverName: dnspod

View File

@ -0,0 +1,23 @@
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: dnspod-cluster-issuer
spec:
acme:
email: "ca@frytea.com"
preferredChain: ""
privateKeySecretRef:
name: dnspod-cluster-issuer-key
#server: https://acme-staging-v02.api.letsencrypt.org/directory
server: https://acme-v02.api.letsencrypt.org/directory
solvers:
- dns01:
webhook:
config:
secretId: AKIDFMGwRjnjSyIPK1VlqkSwEKK7h4FLNSM3
secretKeyRef:
key: secret-key
name: dnspod-secret
ttl: 600
groupName: acme.imroc.cc
solverName: dnspod

View File

@ -0,0 +1,8 @@
apiVersion: v1
kind: Secret
metadata:
name: dnspod-secret
namespace: cert-manager
type: Opaque
stringData:
secret-key: WozxhDVbWDgy2tWv87sPkstgFcljaHl3

1476
cert-manager/values.yaml Normal file

File diff suppressed because it is too large Load Diff

92
cgs/ingress.yaml Normal file
View File

@ -0,0 +1,92 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: app
namespace: cgs
annotations:
cert-manager.io/cluster-issuer: "cf-cluster-issuer"
spec:
ingressClassName: nginx
tls:
- hosts:
- cgs1.skybyte.me
- cgs2.skybyte.me
- chat1.vkus1.skybyte.me
- chat1.cchk1.skybyte.me
- chat1.lnf2.skybyte.me
- chat2.lnf2.skybyte.me
- chat1.clhk1.skybyte.me
secretName: cgs1-skybyte-me-tls
rules:
- host: cgs1.skybyte.me
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: app
port:
name: web
- host: cgs2.skybyte.me
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: app
port:
name: web
- host: chat1.vkus1.skybyte.me
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: app
port:
name: web
- host: chat1.cchk1.skybyte.me
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: app
port:
name: web
- host: chat1.lnf2.skybyte.me
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: app
port:
name: web
- host: chat2.lnf2.skybyte.me
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: app
port:
name: web
- host: chat1.clhk1.skybyte.me
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: app
port:
name: web

50
cgs/load.yaml Normal file
View File

@ -0,0 +1,50 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: app
namespace: cgs
labels:
app: cgs
spec:
replicas: 5
selector:
matchLabels:
app: cgs
template:
metadata:
annotations: {}
labels:
app: cgs
spec:
#nodeSelector:
# region: us
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: region
operator: NotIn
values:
- cn
- key: kubernetes.io/hostname
operator: NotIn
values:
- claw-hk2
containers:
- name: app
image: songtianlun/chatgpt-web:latest
imagePullPolicy: Always
ports:
- containerPort: 3002
name: web
env:
- name: OPENAI_API_BASE_URL
value: "https://www.gptapi.us"
- name: OPENAI_API_KEY
value: "sk-ctwJWUefglo8FVeY54A7FeDe86834e728e0cCc3c0f5071D0"
- name: OPENAI_API_MODEL
value: "gpt-4o-mini"
- name: TZ
value: "Asia/Shanghai"

12
cgs/service.yaml Normal file
View File

@ -0,0 +1,12 @@
apiVersion: v1
kind: Service
metadata:
name: app
namespace: cgs
spec:
selector:
app: cgs
ports:
- name: web
port: 3002
targetPort: 3002

View File

@ -0,0 +1,25 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: app
namespace: chatgpt-web-midjourney-proxy
annotations:
cert-manager.io/cluster-issuer: "cf-cluster-issuer"
spec:
ingressClassName: nginx
tls:
- hosts:
- cmj.skybyte.me
secretName: cmj-skybyte-me-tls
rules:
- host: cmj.skybyte.me
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: app
port:
name: web

View File

@ -0,0 +1,66 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: app
namespace: chatgpt-web-midjourney-proxy
labels:
app: app
spec:
replicas: 1
selector:
matchLabels:
app: app
template:
metadata:
annotations: {}
labels:
app: app
spec:
#nodeSelector:
# region: us
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: region
operator: NotIn
values:
- cn
containers:
- name: app
image: ydlhero/chatgpt-web-midjourney-proxy:v2.22.7
imagePullPolicy: Always
ports:
- containerPort: 3002
name: web
env:
- name: OPENAI_API_BASE_URL
value: "https://api.uniapi.io"
- name: OPENAI_API_KEY
value: "sk-NUeflgBYPEnMY0IpAc4a493d19D84628B91f4aF80672549e"
- name: OPENAI_API_MODEL
value: "claude-3-5-sonnet-latest"
- name: MJ_SERVER
value: "https://api.uniapi.io"
- name: MJ_API_SECRET
value: "sk-NUeflgBYPEnMY0IpAc4a493d19D84628B91f4aF80672549e"
- name: API_UPLOADER
value: "0"
- name: UPLOAD_TYPE
value: "R2"
- name: R2_DOMAIN
value: "https://209b775a76842f6f305193e41de86be1.r2.cloudflarestorage.com/chatgpt-web-midjourney-proxy"
- name: R2_KEY_ID
value: "e3a1364804a16bd8861409ca600907b4"
- name: R2_KEY_SECRET
value: "7833d3522e36117303b7ee4d435481eb8238ca6b2e9ae223d3837367dcccf7e7"
- name: AUTH_SECRET_KEY
value: "tianlun666"
- name: AUTH_SECRET_ERROR_COUNT
value: "3"
- name: AUTH_SECRET_ERROR_TIME
value: "10"
- name: TZ
value: "Asia/Shanghai"

View File

@ -0,0 +1,12 @@
apiVersion: v1
kind: Service
metadata:
name: app
namespace: chatgpt-web-midjourney-proxy
spec:
selector:
app: app
ports:
- name: web
port: 3002
targetPort: 3002

View File

@ -0,0 +1,17 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-aliyun-gz
provisioner: nfs.csi.k8s.io
parameters:
share: "/csi"
server: "28364f4a1fa-eok75.cn-guangzhou.nas.aliyuncs.com"
#server: "172.26.12.20"
subDir: "${pvc.metadata.namespace}/${pvc.metadata.name}"
reclaimPolicy: Delete
#volumeBindingMode: WaitForFirstConsumer
volumeBindingMode: Immediate
allowVolumeExpansion: true
mountOptions:
# - nolock,tcp,noresvport
- vers=3,nolock,proto=tcp,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2,noresvport

20
csi-driver-nfs/pod.yaml Normal file
View File

@ -0,0 +1,20 @@
apiVersion: v1
kind: Pod
metadata:
name: volume-test
spec:
nodeSelector:
dc: aliyun-gz
containers:
- name: volume-test
image: nginx:stable-alpine
imagePullPolicy: IfNotPresent
volumeMounts:
- name: volv
mountPath: /data
ports:
- containerPort: 80
volumes:
- name: volv
persistentVolumeClaim:
claimName: nfs-aliyun-pvc

12
csi-driver-nfs/pvc.yaml Normal file
View File

@ -0,0 +1,12 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nfs-aliyun-pvc
spec:
accessModes:
- ReadWriteOnce
storageClassName: nfs-aliyun-gz
resources:
requests:
storage: 128Mi

196
csi-driver-nfs/values.yaml Normal file
View File

@ -0,0 +1,196 @@
customLabels: {}
image:
baseRepo: registry.k8s.io
nfs:
repository: registry.k8s.io/sig-storage/nfsplugin
tag: v4.10.0
pullPolicy: IfNotPresent
csiProvisioner:
repository: registry.k8s.io/sig-storage/csi-provisioner
tag: v5.2.0
pullPolicy: IfNotPresent
csiResizer:
repository: registry.k8s.io/sig-storage/csi-resizer
tag: v1.13.1
pullPolicy: IfNotPresent
csiSnapshotter:
repository: registry.k8s.io/sig-storage/csi-snapshotter
tag: v8.2.0
pullPolicy: IfNotPresent
livenessProbe:
repository: registry.k8s.io/sig-storage/livenessprobe
tag: v2.15.0
pullPolicy: IfNotPresent
nodeDriverRegistrar:
repository: registry.k8s.io/sig-storage/csi-node-driver-registrar
tag: v2.13.0
pullPolicy: IfNotPresent
externalSnapshotter:
repository: registry.k8s.io/sig-storage/snapshot-controller
tag: v8.2.0
pullPolicy: IfNotPresent
serviceAccount:
create: true # When true, service accounts will be created for you. Set to false if you want to use your own.
controller: csi-nfs-controller-sa # Name of Service Account to be created or used
node: csi-nfs-node-sa # Name of Service Account to be created or used
rbac:
create: true
name: nfs
driver:
name: nfs.csi.k8s.io
mountPermissions: 0
feature:
enableFSGroupPolicy: true
enableInlineVolume: false
propagateHostMountOptions: false
kubeletDir: /var/lib/kubelet
controller:
name: csi-nfs-controller
replicas: 1
strategyType: Recreate
runOnMaster: false
runOnControlPlane: true
enableSnapshotter: true
useTarCommandInSnapshot: false
livenessProbe:
healthPort: 29652
logLevel: 5
workingMountDir: /tmp
dnsPolicy: ClusterFirstWithHostNet # available values: Default, ClusterFirstWithHostNet, ClusterFirst
defaultOnDeletePolicy: delete # available values: delete, retain
affinity: {}
nodeSelector:
dc: aliyun-gz
priorityClassName: system-cluster-critical
tolerations:
- key: "node-role.kubernetes.io/master"
operator: "Exists"
effect: "NoSchedule"
- key: "node-role.kubernetes.io/controlplane"
operator: "Exists"
effect: "NoSchedule"
- key: "node-role.kubernetes.io/control-plane"
operator: "Exists"
effect: "NoSchedule"
- key: "CriticalAddonsOnly"
operator: "Exists"
effect: "NoSchedule"
resources:
csiProvisioner:
limits:
memory: 400Mi
requests:
cpu: 10m
memory: 20Mi
csiResizer:
limits:
memory: 400Mi
requests:
cpu: 10m
memory: 20Mi
csiSnapshotter:
limits:
memory: 200Mi
requests:
cpu: 10m
memory: 20Mi
livenessProbe:
limits:
memory: 100Mi
requests:
cpu: 10m
memory: 20Mi
nfs:
limits:
memory: 200Mi
requests:
cpu: 10m
memory: 20Mi
node:
name: csi-nfs-node
dnsPolicy: ClusterFirstWithHostNet # available values: Default, ClusterFirstWithHostNet, ClusterFirst
maxUnavailable: 1
logLevel: 5
livenessProbe:
healthPort: 29653
affinity: {}
nodeSelector:
dc: aliyun-gz
priorityClassName: system-cluster-critical
tolerations:
- operator: "Exists"
resources:
livenessProbe:
limits:
memory: 100Mi
requests:
cpu: 10m
memory: 20Mi
nodeDriverRegistrar:
limits:
memory: 100Mi
requests:
cpu: 10m
memory: 20Mi
nfs:
limits:
memory: 300Mi
requests:
cpu: 10m
memory: 20Mi
externalSnapshotter:
enabled: false
name: snapshot-controller
priorityClassName: system-cluster-critical
deletionPolicy: Delete
controller:
replicas: 1
resources:
limits:
memory: 300Mi
requests:
cpu: 10m
memory: 20Mi
# Create volume snapshot CRDs.
customResourceDefinitions:
enabled: true #if set true, VolumeSnapshot, VolumeSnapshotContent and VolumeSnapshotClass CRDs will be created. Set it false, If they already exist in cluster.
## volumeSnapshotClass resource example:
volumeSnapshotClass:
create: false
name: csi-nfs-snapclass
deletionPolicy: Delete
## Reference to one or more secrets to be used when pulling images
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
imagePullSecrets: []
# - name: "image-pull-secret"
## StorageClass resource example:
storageClass:
create: false
# name: nfs-csi
# annotations:
# storageclass.kubernetes.io/is-default-class: "true"
# parameters:
# server: nfs-server.default.svc.cluster.local
# share: /
# subDir:
# mountPermissions: "0"
# csi.storage.k8s.io/provisioner-secret is only needed for providing mountOptions in DeleteVolume
# csi.storage.k8s.io/provisioner-secret-name: "mount-options"
# csi.storage.k8s.io/provisioner-secret-namespace: "default"
# reclaimPolicy: Delete
# volumeBindingMode: Immediate
# mountOptions:
# - nfsvers=4.1

3166
dify-helm/values.yaml Normal file

File diff suppressed because it is too large Load Diff

26
gitea/ingress.yaml Normal file
View File

@ -0,0 +1,26 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: gitea-ingress
namespace: gitea
annotations:
cert-manager.io/cluster-issuer: "dnspod-cluster-issuer"
spec:
ingressClassName: nginx
tls:
- hosts:
- git.frytea.com
secretName: git-frytea-com-tls
rules:
- host: git.frytea.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: gitea
port:
name: web

41
gitea/load.yaml Normal file
View File

@ -0,0 +1,41 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: app
namespace: gitea
labels:
app: gitea
spec:
replicas: 1
selector:
matchLabels:
app: gitea
template:
metadata:
annotations: {}
labels:
app: gitea
spec:
nodeSelector:
kubernetes.io/hostname: tencent-gz1
containers:
- name: app
image: gitea/gitea:1.22.4
ports:
- containerPort: 3000
name: web
env:
- name: USER_UID
value: "1000"
- name: USER_GID
value: "1000"
- name: TZ
value: "Asia/Shanghai"
volumeMounts:
- name: gitea-data
mountPath: /data
volumes:
- name: gitea-data
persistentVolumeClaim:
claimName: gitea-tgz1-pvc

14
gitea/pvc.yaml Normal file
View File

@ -0,0 +1,14 @@
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: gitea-tgz1-pvc
namespace: gitea
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
storageClassName: local-tgz1

12
gitea/service.yaml Normal file
View File

@ -0,0 +1,12 @@
apiVersion: v1
kind: Service
metadata:
name: gitea
namespace: gitea
spec:
selector:
app: gitea
ports:
- name: web
port: 3000
targetPort: 3000

26
grok-mirror/ingress.yaml Normal file
View File

@ -0,0 +1,26 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: app
namespace: grok-mirror
annotations:
cert-manager.io/cluster-issuer: "cf-cluster-issuer"
spec:
ingressClassName: nginx
tls:
- hosts:
- grok.skybyte.me
secretName: grok-skybyte-me-tls
rules:
- host: grok.skybyte.me
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: app
port:
name: web

51
grok-mirror/load.yaml Normal file
View File

@ -0,0 +1,51 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: app
namespace: grok-mirror
labels:
app: gm
spec:
replicas: 1
selector:
matchLabels:
app: gm
template:
metadata:
annotations: {}
labels:
app: gm
spec:
nodeSelector:
region: us
dc: vkus
containers:
- name: app
image: dairoot/grok-gateway:latest
imagePullPolicy: Always
ports:
- containerPort: 8080
name: web
env:
#- name: ADMIN_PASSWORD
# value: "sotilu,GM2025"
- name: AUTHORIZATION
# value: ""
value: "eyJhbGciOiJIUzI1NiJ9.eyJzZXNzaW9uX2lkIjoiYjQwMGFkOGUtNDIyYi00M2M1LWJmOTUtN2FhYTkyMjQ1NzYyIn0.zq4dvwYQN-8iG0IKoEZWrtiqJHvfXmue_tN_zs82MfU"
# value: "xai-O66riftpH8YAJSk3lC6VkmSaeq9ZjlAog8zXz926aaDMhG1tIVvhwsWkZ4hNaCthRlZBbnYFdeESpzAT"
- name: ENABLE_MIRROR_API
value: "false"
- name: GOOGLEADS
value: "<script async src=\"https://pagead2.googlesyndication.com/pagead/js/adsbygoogle.js?client=ca-pub-7296634171837358\" crossorigin=\"anonymous\"></script></br><script defer data-domain=\"grok.skybyte.me\" src=\"https://plausible.frytea.com/js/script.js\"></script>"
- name: HOST
value: "https://grok.skybyte.me"
- name: TZ
value: "Asia/Shanghai"
volumeMounts:
- name: gm-data
mountPath: /app/.cache_data
volumes:
- name: gm-data
persistentVolumeClaim:
claimName: gm-longhorn-us-pvc

14
grok-mirror/pvc.yaml Normal file
View File

@ -0,0 +1,14 @@
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: gm-longhorn-us-pvc
namespace: grok-mirror
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
storageClassName: longhorn-us

12
grok-mirror/service.yaml Normal file
View File

@ -0,0 +1,12 @@
apiVersion: v1
kind: Service
metadata:
name: app
namespace: grok-mirror
spec:
selector:
app: gm
ports:
- name: web
port: 8080
targetPort: 8080

1015
harbor-helm/values.yaml Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,41 @@
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: easyimages2-images-nfs-aliyun-gz-pvc
namespace: imagehost
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
storageClassName: nfs-aliyun-gz
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: easyimages2-i-nfs-aliyun-gz-pvc
namespace: imagehost
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Mi
storageClassName: nfs-aliyun-gz
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: easyimages2-config-nfs-aliyun-gz-pvc
namespace: imagehost
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Mi
storageClassName: nfs-aliyun-gz

48
imagehost/ingress.yaml Normal file
View File

@ -0,0 +1,48 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: app
namespace: imagehost
annotations:
cert-manager.io/cluster-issuer: "dnspod-cluster-issuer"
spec:
ingressClassName: nginx
tls:
- hosts:
- image.frytea.com
- imagehost-cdn.frytea.com
- cdn-imagehost.frytea.com
secretName: image-frytea-com-tls
rules:
- host: image.frytea.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: app
port:
name: web
- host: imagehost-cdn.frytea.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: app
port:
name: web
- host: cdn-imagehost.frytea.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: app
port:
name: web

58
imagehost/load.yaml Normal file
View File

@ -0,0 +1,58 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: app
namespace: imagehost
labels:
app: easyimages2
spec:
replicas: 1
selector:
matchLabels:
app: easyimages2
template:
metadata:
annotations: {}
labels:
app: easyimages2
spec:
nodeSelector:
#kubernetes.io/hostname: tencent-sh1
dc: aliyun-gz
containers:
- name: app
image: ddsderek/easyimage:latest
ports:
- containerPort: 80
name: web
env:
- name: PUID
value: "1000"
- name: PGID
value: "1000"
- name: DEBUG
value: "false"
- name: TZ
value: "Asia/Shanghai"
volumeMounts:
- name: config
mountPath: /app/web/config
- name: i
mountPath: /app/web/i
- name: images
mountPath: /app/web/images
volumes:
- name: config
persistentVolumeClaim:
claimName: easyimages2-config-local-agz2-pvc
- name: i
persistentVolumeClaim:
claimName: easyimages2-i-local-agz2-pvc
- name: images
persistentVolumeClaim:
claimName: easyimages2-images-local-agz2-pvc

41
imagehost/pvc.yaml Normal file
View File

@ -0,0 +1,41 @@
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: easyimages2-images-local-agz2-pvc
namespace: imagehost
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
storageClassName: local-agz2
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: easyimages2-i-local-agz2-pvc
namespace: imagehost
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Mi
storageClassName: local-agz2
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: easyimages2-config-local-agz2-pvc
namespace: imagehost
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Mi
storageClassName: local-agz2

12
imagehost/service.yaml Normal file
View File

@ -0,0 +1,12 @@
apiVersion: v1
kind: Service
metadata:
name: app
namespace: imagehost
spec:
selector:
app: easyimages2
ports:
- name: web
port: 80
targetPort: 80

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,14 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
labels:
io.kompose.service: app-claim0
name: app-claim0
namespace: listmonk
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
storageClassName: local-tgz1

35
listmonk/app-ingress.yaml Normal file
View File

@ -0,0 +1,35 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
annotations:
cert-manager.io/cluster-issuer: "dnspod-cluster-issuer"
kompose.cmd: kompose convert -f docker-compose.yml -n listmonk
kompose.controller.type: statefulset
kompose.service.expose: listmonk.frytea.com
kompose.service.expose.ingress-class-name: nginx
kompose.service.expose.tls-secret: listmonk-frytea-com
kompose.version: 1.34.0 (cbf2835db)
kompose.volume.size: 1Gi
kompose.volume.storage-class-name: local-tgz1
kompose.volume.type: persistentVolumeClaim
labels:
io.kompose.service: app
name: app
namespace: listmonk
spec:
ingressClassName: nginx
rules:
- host: listmonk.frytea.com
http:
paths:
- backend:
service:
name: app
port:
number: 9000
path: /
pathType: Prefix
tls:
- hosts:
- listmonk.frytea.com
secretName: listmonk-frytea-com

24
listmonk/app-service.yaml Normal file
View File

@ -0,0 +1,24 @@
apiVersion: v1
kind: Service
metadata:
annotations:
kompose.cmd: kompose convert -f docker-compose.yml -n listmonk
kompose.controller.type: statefulset
kompose.service.expose: listmonk.frytea.com
kompose.service.expose.ingress-class-name: nginx
kompose.service.expose.tls-secret: listmonk-frytea-com
kompose.version: 1.34.0 (cbf2835db)
kompose.volume.size: 1Gi
kompose.volume.storage-class-name: local-tgz1
kompose.volume.type: persistentVolumeClaim
labels:
io.kompose.service: app
name: app
namespace: listmonk
spec:
ports:
- name: "9000"
port: 9000
targetPort: 9000
selector:
io.kompose.service: app

View File

@ -0,0 +1,88 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
annotations:
kompose.cmd: kompose convert -f docker-compose.yml -n listmonk
kompose.controller.type: statefulset
kompose.service.expose: listmonk.frytea.com
kompose.service.expose.ingress-class-name: nginx
kompose.service.expose.tls-secret: listmonk-frytea-com
kompose.version: 1.34.0 (cbf2835db)
kompose.volume.size: 1Gi
kompose.volume.storage-class-name: local-tgz1
kompose.volume.type: persistentVolumeClaim
labels:
io.kompose.service: app
name: app
namespace: listmonk
spec:
replicas: 1
selector:
matchLabels:
io.kompose.service: app
serviceName: app
template:
metadata:
labels:
io.kompose.service: app
spec:
nodeSelector:
kubernetes.io/hostname: tencent-gz1
containers:
- args:
- sh
- -c
- ./listmonk --install --idempotent --yes --config '' && ./listmonk --upgrade --yes --config '' && ./listmonk --config ''
env:
- name: LISTMONK_ADMIN_PASSWORD
value: sotilu,LM2024
- name: LISTMONK_ADMIN_USER
value: songtianlun
- name: LISTMONK_app__address
value: 0.0.0.0:9000
- name: LISTMONK_db__database
value: listmonk
- name: LISTMONK_db__host
value: db.listmonk.svc.cluster.local
- name: LISTMONK_db__max_idle
value: "25"
- name: LISTMONK_db__max_lifetime
value: 300s
- name: LISTMONK_db__max_open
value: "25"
- name: LISTMONK_db__password
value: listmonk
- name: LISTMONK_db__port
value: "5432"
- name: LISTMONK_db__ssl_mode
value: disable
- name: LISTMONK_db__user
value: listmonk
- name: TZ
value: Etc/UTC
image: listmonk/listmonk:v4.1.0
name: listmonk-app
ports:
- containerPort: 9000
protocol: TCP
volumeMounts:
- mountPath: /listmonk/uploads
name: app-claim0
hostname: listmonk.frytea.com
restartPolicy: Always
volumes:
- name: app-claim0
persistentVolumeClaim:
claimName: app-claim0
volumeClaimTemplates:
- metadata:
labels:
io.kompose.service: app-claim0
name: app-claim0
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
storageClassName: local-tgz1

View File

@ -0,0 +1,14 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
labels:
io.kompose.service: db-claim0
name: db-claim0
namespace: listmonk
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 5Gi
storageClassName: local-tgz1

21
listmonk/db-service.yaml Normal file
View File

@ -0,0 +1,21 @@
apiVersion: v1
kind: Service
metadata:
annotations:
kompose.cmd: kompose convert -f docker-compose.yml -n listmonk
kompose.controller.type: statefulset
kompose.version: 1.34.0 (cbf2835db)
kompose.volume.size: 5Gi
kompose.volume.storage-class-name: local-tgz1
kompose.volume.type: persistentVolumeClaim
labels:
io.kompose.service: db
name: db
namespace: listmonk
spec:
ports:
- name: "5432"
port: 5432
targetPort: 5432
selector:
io.kompose.service: db

View File

@ -0,0 +1,67 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
annotations:
kompose.cmd: kompose convert -f docker-compose.yml -n listmonk
kompose.controller.type: statefulset
kompose.version: 1.34.0 (cbf2835db)
kompose.volume.size: 5Gi
kompose.volume.storage-class-name: local-tgz1
kompose.volume.type: persistentVolumeClaim
labels:
io.kompose.service: db
name: db
namespace: listmonk
spec:
replicas: 1
selector:
matchLabels:
io.kompose.service: db
serviceName: db
template:
metadata:
labels:
io.kompose.service: db
spec:
nodeSelector:
kubernetes.io/hostname: tencent-gz1
containers:
- env:
- name: POSTGRES_DB
value: listmonk
- name: POSTGRES_PASSWORD
value: listmonk
- name: POSTGRES_USER
value: listmonk
image: postgres:17-alpine
livenessProbe:
exec:
command:
- pg_isready -U listmonk
failureThreshold: 6
periodSeconds: 10
timeoutSeconds: 5
name: listmonk-db
ports:
- containerPort: 5432
protocol: TCP
volumeMounts:
- mountPath: /var/lib/postgresql/data
name: db-claim0
restartPolicy: Always
volumes:
- name: db-claim0
persistentVolumeClaim:
claimName: db-claim0
volumeClaimTemplates:
- metadata:
labels:
io.kompose.service: db-claim0
name: db-claim0
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 5Gi
storageClassName: local-tgz1

View File

@ -0,0 +1,76 @@
x-db-credentials: &db-credentials # Use the default POSTGRES_ credentials if they're available or simply default to "listmonk"
POSTGRES_USER: &db-user listmonk # for database user, password, and database name
POSTGRES_PASSWORD: &db-password listmonk
POSTGRES_DB: &db-name listmonk
services:
# listmonk app
app:
image: listmonk/listmonk:v4.1.0
container_name: listmonk_app
restart: unless-stopped
labels:
kompose.controller.type: statefulset
kompose.volume.type: persistentVolumeClaim
kompose.volume.storage-class-name: local-tgz1
kompose.volume.size: 1Gi
kompose.service.expose: listmonk.frytea.com
kompose.service.expose.ingress-class-name: nginx
kompose.service.expose.tls-secret: listmonk-frytea-com
ports:
- "127.0.0.1:9000:9000" # To change the externally exposed port, change to: $custom_port:9000
networks:
- listmonk
hostname: listmonk.frytea.com # Recommend using FQDN for hostname
depends_on:
- db
command: [sh, -c, "./listmonk --install --idempotent --yes --config '' && ./listmonk --upgrade --yes --config '' && ./listmonk --config ''"]
# --config (file) param is set to empty so that listmonk only uses the env vars (below) for config.
# --install --idempotent ensures that DB installation happens only once on an empty DB, on the first ever start.
# --upgrade automatically runs any DB migrations when a new image is pulled.
environment: # The same params as in config.toml are passed as env vars here.
LISTMONK_app__address: 0.0.0.0:9000
LISTMONK_db__user: *db-user
LISTMONK_db__password: *db-password
LISTMONK_db__database: *db-name
LISTMONK_db__host: listmonk_db
LISTMONK_db__port: 5432
LISTMONK_db__ssl_mode: disable
LISTMONK_db__max_open: 25
LISTMONK_db__max_idle: 25
LISTMONK_db__max_lifetime: 300s
TZ: Etc/UTC
LISTMONK_ADMIN_USER: ${LISTMONK_ADMIN_USER:-songtianlun} # If these (optional) are set during the first `docker compose up`, then the Super Admin user is automatically created.
LISTMONK_ADMIN_PASSWORD: ${LISTMONK_ADMIN_PASSWORD:-sotilu,LM2024} # Otherwise, the user can be setup on the web app after the first visit to http://localhost:9000
volumes:
- ./data/uploads:/listmonk/uploads:rw # Mount an uploads directory on the host to /listmonk/uploads inside the container.
# To use this, change directory path in Admin -> Settings -> Media to /listmonk/uploads
# Postgres database
db:
image: postgres:17-alpine
container_name: listmonk_db
labels:
kompose.controller.type: statefulset
kompose.volume.type: persistentVolumeClaim
kompose.volume.size: 5Gi
kompose.volume.storage-class-name: local-tgz1
restart: unless-stopped
ports:
- "127.0.0.1:5433:5432"
networks:
- listmonk
environment:
<<: *db-credentials
healthcheck:
test: ["CMD-SHELL", "pg_isready -U listmonk"]
interval: 10s
timeout: 5s
retries: 6
volumes:
- ./data/db:/var/lib/postgresql/data
networks:
listmonk:

View File

@ -0,0 +1,5 @@
apiVersion: v1
kind: Namespace
metadata:
name: listmonk
namespace: listmonk

26
lobe-chat/.env Normal file
View File

@ -0,0 +1,26 @@
TZ=Asia/Shanghai
APP_URL=https://lchat.frytea.com/
KEY_VAULTS_SECRET=Pgl3OONDcCJNh8owuuZH3F84Ho6qSGlK
DATABASE_URL=postgresql://lobechat_owner:yWxzhvX4Qj9m@ep-hidden-credit-a120sf7h.ap-southeast-1.aws.neon.tech/lobechat?sslmode=require
NEXT_AUTH_SECRET=NX2kaPE923dt6BL2U8e9oSre5RfoT7hg
NEXT_AUTH_SSO_PROVIDERS=casdoor
#NEXTAUTH_URL=https://lchat.frytea.com/api/auth
AUTH_URL=https://lchat.frytea.com/api/auth
#AUTH_LOGTO_ID=usq7qnlitbye6dtw8pcop
#AUTH_LOGTO_SECRET=Vku8NfdBfgzAYALioCRw1mnd7XBeNmSZ
#AUTH_LOGTO_ISSUER=https://logto.frytea.com/oidc
#NEXT_PUBLIC_CLERK_PUBLISHABLE_KEY=pk_test_Z3JhdGVmdWwtdGhydXNoLTIwLmNsZXJrLmFjY291bnRzLmRldiQ
#CLERK_SECRET_KEY=sk_test_xeXIJuNWD9I7cxTy4SGKTifbN2j6tJV5CKcxLaLIUp
#CLERK_WEBHOOK_SECRET=whsec_y/wxY3hTj6gcSpQUkUt9wQ+6hizOH/za
AUTH_CASDOOR_ID=337128fd1dfba160a0ae
AUTH_CASDOOR_SECRET=f3c1952a4656871db25367ad7def32376df31cf3
AUTH_CASDOOR_ISSUER=https://casdoor.frytea.com
CASDOOR_WEBHOOK_SECRET=uQ7KgEfrB6QVJWxj8k3HYaTkQej5rI9W
S3_ACCESS_KEY_ID=LTAI5tEFwBwK7rk9HvFgF5LW
S3_SECRET_ACCESS_KEY=ZyT8NyOYnb64vHeCwNt4aPizxQmOyO
S3_ENDPOINT=https://oss-cn-guangzhou.aliyuncs.com
S3_BUCKET=frytea-lobecha
S3_PUBLIC_DOMAIN=https://frytea-lobechat.oss-cn-guangzhou.aliyuncs.com
S3_ENABLE_PATH_STYLE=0
S3_REGION=cn-guangzhou

12
lobe-chat/compose.yaml Normal file
View File

@ -0,0 +1,12 @@
name: lobe-chat-database
services:
lobe:
image: lobehub/lobe-chat-database:v1.52.11
container_name: lobe-chat
ports:
- '127.0.0.1:3210:3210'
env_file:
- .env
# depends_on:
# - postgresql
restart: always

View File

@ -0,0 +1,38 @@
apiVersion: v1
data:
APP_URL: https://lchat.frytea.com/
AUTH_CASDOOR_ID: 337128fd1dfba160a0ae
AUTH_CASDOOR_ISSUER: https://casdoor.frytea.com
AUTH_CASDOOR_SECRET: f3c1952a4656871db25367ad7def32376df31cf3
AUTH_URL: https://lchat.frytea.com/api/auth
CASDOOR_WEBHOOK_SECRET: uQ7KgEfrB6QVJWxj8k3HYaTkQej5rI9W
#DATABASE_URL: postgresql://lobechat_owner:yWxzhvX4Qj9m@ep-hidden-credit-a120sf7h.ap-southeast-1.aws.neon.tech/lobechat?sslmode=require
DATABASE_URL: postgresql://postgres:PEw9jLtFZ69CBAT2@pg.lobe-chat.svc.cluster.local:5432/lobechat
#DATABASE_URL: postgresql://lobechat-us_owner:npg_3v2GPnADLjFi@ep-wispy-glitter-a6109ylt-pooler.us-west-2.aws.neon.tech/lobechat-us?sslmode=require
#DATABASE_URL: postgresql://lobechat:fTjzEgByBmr7KYSp@pgm-7xv432722d038s47.pg.rds.aliyuncs.com/lobechat
#DATABASE_URL: postgresql://lobechat:fTjzEgByBmr7KYSp@172.26.12.25/lobechat
#DATABASE_URL: postgresql://postgres:iY8tr7SKuTqr&4qN@172.26.12.25/lobechat
KEY_VAULTS_SECRET: Pgl3OONDcCJNh8owuuZH3F84Ho6qSGlK
NEXT_AUTH_SECRET: NX2kaPE923dt6BL2U8e9oSre5RfoT7hg
NEXT_AUTH_SSO_PROVIDERS: casdoor
TZ: Asia/Shanghai
#S3_ACCESS_KEY_ID: LTAI5tK9oKaYL1hSaeyuMrAu
#S3_BUCKET: frytea-lobechat
#S3_ENABLE_PATH_STYLE: "0"
#S3_ENDPOINT: https://oss-cn-guangzhou.aliyuncs.com
#S3_PUBLIC_DOMAIN: https://frytea-lobechat.oss-cn-guangzhou.aliyuncs.com
#S3_REGION: cn-guangzhou
#S3_SECRET_ACCESS_KEY: ml1MUNZQLcQPRClp5ooDvLLCK8urZg
S3_ACCESS_KEY_ID: PhZp3EQKl4wmuRoGQf0q
S3_SECRET_ACCESS_KEY: hieux4bYCAjWi5Addlehos9k1WvUiV2U8uO4RmhI
S3_BUCKET: lobechat
S3_ENABLE_PATH_STYLE: "1"
S3_ENDPOINT: https://api.minio.skybyte.me
S3_PUBLIC_DOMAIN: https://api.minio.skybyte.me
S3_REGION: us-west-1
kind: ConfigMap
metadata:
labels:
io.kompose.service: lobe-env
name: env
namespace: lobe-chat

25
lobe-chat/ingress.yaml Normal file
View File

@ -0,0 +1,25 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: lobe
namespace: lobe-chat
annotations:
cert-manager.io/cluster-issuer: "dnspod-cluster-issuer"
spec:
ingressClassName: nginx
tls:
- hosts:
- lchat.frytea.com
secretName: lchat-frytea-com-tls
rules:
- host: lchat.frytea.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: lobe
port:
name: web

View File

@ -0,0 +1,5 @@
apiVersion: v1
kind: Namespace
metadata:
name: lobe-chat
namespace: lobe-chat

View File

@ -0,0 +1,137 @@
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
kompose.cmd: kompose convert -f compose.yaml -n lobe-chat
kompose.version: 1.34.0 (cbf2835db)
labels:
io.kompose.service: lobe
name: lobe
namespace: lobe-chat
spec:
replicas: 1
selector:
matchLabels:
io.kompose.service: lobe
template:
metadata:
annotations:
kompose.cmd: kompose convert -f compose.yaml -n lobe-chat
kompose.version: 1.34.0 (cbf2835db)
labels:
io.kompose.service: lobe
spec:
#nodeSelector:
# region: us
# #dc: aliyun-gz
# #kubernetes.io/hostname: aliyun-gz3
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- vkvm-us1
- vkvm-us2
containers:
- env:
- name: SEARXNG_URL
value: https://searxng.skybyte.me
- name: APP_URL
valueFrom:
configMapKeyRef:
key: APP_URL
name: env
- name: AUTH_CASDOOR_ID
valueFrom:
configMapKeyRef:
key: AUTH_CASDOOR_ID
name: env
- name: AUTH_CASDOOR_ISSUER
valueFrom:
configMapKeyRef:
key: AUTH_CASDOOR_ISSUER
name: env
- name: AUTH_CASDOOR_SECRET
valueFrom:
configMapKeyRef:
key: AUTH_CASDOOR_SECRET
name: env
- name: AUTH_URL
valueFrom:
configMapKeyRef:
key: AUTH_URL
name: env
- name: CASDOOR_WEBHOOK_SECRET
valueFrom:
configMapKeyRef:
key: CASDOOR_WEBHOOK_SECRET
name: env
- name: DATABASE_URL
valueFrom:
configMapKeyRef:
key: DATABASE_URL
name: env
- name: KEY_VAULTS_SECRET
valueFrom:
configMapKeyRef:
key: KEY_VAULTS_SECRET
name: env
- name: NEXT_AUTH_SECRET
valueFrom:
configMapKeyRef:
key: NEXT_AUTH_SECRET
name: env
- name: NEXT_AUTH_SSO_PROVIDERS
valueFrom:
configMapKeyRef:
key: NEXT_AUTH_SSO_PROVIDERS
name: env
- name: S3_ACCESS_KEY_ID
valueFrom:
configMapKeyRef:
key: S3_ACCESS_KEY_ID
name: env
- name: S3_BUCKET
valueFrom:
configMapKeyRef:
key: S3_BUCKET
name: env
- name: S3_ENABLE_PATH_STYLE
valueFrom:
configMapKeyRef:
key: S3_ENABLE_PATH_STYLE
name: env
- name: S3_ENDPOINT
valueFrom:
configMapKeyRef:
key: S3_ENDPOINT
name: env
- name: S3_PUBLIC_DOMAIN
valueFrom:
configMapKeyRef:
key: S3_PUBLIC_DOMAIN
name: env
- name: S3_REGION
valueFrom:
configMapKeyRef:
key: S3_REGION
name: env
- name: S3_SECRET_ACCESS_KEY
valueFrom:
configMapKeyRef:
key: S3_SECRET_ACCESS_KEY
name: env
- name: TZ
valueFrom:
configMapKeyRef:
key: TZ
name: env
image: lobehub/lobe-chat-database:1.74.10
name: lobe-chat
ports:
- containerPort: 3210
protocol: TCP
restartPolicy: Always

View File

@ -0,0 +1,17 @@
apiVersion: v1
kind: Service
metadata:
annotations:
kompose.cmd: kompose convert -f compose.yaml -n lobe-chat
kompose.version: 1.34.0 (cbf2835db)
labels:
io.kompose.service: lobe
name: lobe
namespace: lobe-chat
spec:
ports:
- name: web
port: 3210
targetPort: 3210
selector:
io.kompose.service: lobe

45
lobe-chat/pg/load.yaml Normal file
View File

@ -0,0 +1,45 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: pg
namespace: lobe-chat
labels:
app: pg
spec:
replicas: 1
selector:
matchLabels:
app: pg
template:
metadata:
annotations: {}
labels:
app: pg
spec:
nodeSelector:
region: us
kubernetes.io/hostname: vkvm-us1
containers:
- name: pg
#image: postgres:16
image: pgvector/pgvector:pg16
ports:
- containerPort: 5432
name: pg
env:
- name: POSTGRES_USER
value: "postgres"
- name: POSTGRES_PASSWORD
value: "PEw9jLtFZ69CBAT2"
- name: POSTGRES_DB
value: "lobechat"
- name: TZ
value: "Asia/Shanghai"
volumeMounts:
- name: pg-data
mountPath: /var/lib/postgresql/data
volumes:
- name: pg-data
persistentVolumeClaim:
claimName: pg-data-local-vkus1-pvc

14
lobe-chat/pg/pvc.yaml Normal file
View File

@ -0,0 +1,14 @@
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: pg-data-local-vkus1-pvc
namespace: lobe-chat
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
storageClassName: local-vkus1

14
lobe-chat/pg/service.yaml Normal file
View File

@ -0,0 +1,14 @@
apiVersion: v1
kind: Service
metadata:
name: pg
namespace: lobe-chat
spec:
type: NodePort
selector:
app: pg
ports:
- name: pg
port: 5432
targetPort: 5432
nodePort: 32432

3795
loki/archive/raw-values.yaml Normal file

File diff suppressed because it is too large Load Diff

87
loki/values.yaml Normal file
View File

@ -0,0 +1,87 @@
loki:
commonConfig:
replication_factor: 1
schemaConfig:
configs:
- from: "2024-04-01"
store: tsdb
object_store: s3
schema: v13
index:
prefix: loki_index_
period: 24h
storage_config:
aws:
region: cn-shanghai
bucketnames: k3s-loki-dev-chunk
s3forcepathstyle: false
pattern_ingester:
enabled: true
limits_config:
allow_structured_metadata: true
volume_enabled: true
ruler:
enable_api: true
storage:
type: s3
bucketNames:
chunks: loki-chunk
ruler: loki-ruler
admin: loki-admin
s3:
# s3 URL can be used to specify the endpoint, access key, secret key, and bucket name this works well for S3 compatible storages or are hosting Loki on-premises and want to use S3 as the storage backend. Either use the s3 URL or the individual fields below (AWS endpoint, region, secret).
#s3: s3://access_key:secret_access_key@custom_endpoint/bucket_name
# AWS endpoint URL
endpoint: api.minio.skybyte.me
# AWS region where the S3 bucket is located
region: us-west-1
# AWS secret access key
secretAccessKey: 3QaYyFxyE974NRSFU37U
# AWS access key ID
accessKeyId: cORLoTgAzX0X8Tesqqaw7hHEpruerT5svnyDhtG5
# AWS signature version (e.g., v2 or v4)
#signatureVersion: <your-signature-version>
# Forces the path style for S3 (true/false)
s3ForcePathStyle: true
# Allows insecure (HTTP) connections (true/false)
insecure: false
# HTTP configuration settings
http_config: {}
nodeSelector:
region: us
minio:
enabled: false
deploymentMode: SingleBinary
singleBinary:
replicas: 1
# Zero out replica counts of other deployment modes
backend:
replicas: 0
read:
replicas: 0
write:
replicas: 0
ingester:
replicas: 0
querier:
replicas: 0
queryFrontend:
replicas: 0
queryScheduler:
replicas: 0
distributor:
replicas: 0
compactor:
replicas: 0
indexGateway:
replicas: 0
bloomCompactor:
replicas: 0
bloomGateway:
replicas: 0

1
longhorn/auth Normal file
View File

@ -0,0 +1 @@
songtianlun:$apr1$cyDcdunW$0MOlFOY4OnRXcOQgOcXve/

3
longhorn/create_auth.sh Normal file
View File

@ -0,0 +1,3 @@
USER=songtianlun; PASSWORD=sotilu,LH2025; echo "${USER}:$(openssl passwd -stdin -apr1 <<< ${PASSWORD})" >> auth
kubectl -n longhorn-system create secret generic basic-auth --from-file=auth

View File

@ -0,0 +1,34 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: longhorn-ingress
namespace: longhorn-system
annotations:
# type of authentication
nginx.ingress.kubernetes.io/auth-type: basic
# prevent the controller from redirecting (308) to HTTPS
nginx.ingress.kubernetes.io/ssl-redirect: 'true'
# name of the secret that contains the user/password definitions
nginx.ingress.kubernetes.io/auth-secret: basic-auth
# message to display with an appropriate context why the authentication is required
nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required '
# custom max body size for file uploading like backing image uploading
nginx.ingress.kubernetes.io/proxy-body-size: 10000m
cert-manager.io/cluster-issuer: "cf-cluster-issuer"
spec:
ingressClassName: nginx
tls:
- hosts:
- longhorn.skybyte.me
secretName: longhorn-skybyte-me-tls
rules:
- host: longhorn.skybyte.me
http:
paths:
- pathType: Prefix
path: "/"
backend:
service:
name: longhorn-frontend
port:
number: 80

View File

@ -0,0 +1,91 @@
apiVersion: v1
kind: Secret
metadata:
name: minio-secret
namespace: default
type: Opaque
data:
AWS_ACCESS_KEY_ID: bG9uZ2hvcm4tdGVzdC1hY2Nlc3Mta2V5 # longhorn-test-access-key
AWS_SECRET_ACCESS_KEY: bG9uZ2hvcm4tdGVzdC1zZWNyZXQta2V5 # longhorn-test-secret-key
AWS_ENDPOINTS: aHR0cHM6Ly9taW5pby1zZXJ2aWNlLmRlZmF1bHQ6OTAwMA== # https://minio-service.default:9000
AWS_CERT: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURMRENDQWhTZ0F3SUJBZ0lSQU1kbzQycGhUZXlrMTcvYkxyWjVZRHN3RFFZSktvWklodmNOQVFFTEJRQXcKR2pFWU1CWUdBMVVFQ2hNUFRHOXVaMmh2Y200Z0xTQlVaWE4wTUNBWERUSXdNRFF5TnpJek1EQXhNVm9ZRHpJeApNakF3TkRBek1qTXdNREV4V2pBYU1SZ3dGZ1lEVlFRS0V3OU1iMjVuYUc5eWJpQXRJRlJsYzNRd2dnRWlNQTBHCkNTcUdTSWIzRFFFQkFRVUFBNElCRHdBd2dnRUtBb0lCQVFEWHpVdXJnUFpEZ3pUM0RZdWFlYmdld3Fvd2RlQUQKODRWWWF6ZlN1USs3K21Oa2lpUVBvelVVMmZvUWFGL1BxekJiUW1lZ29hT3l5NVhqM1VFeG1GcmV0eDBaRjVOVgpKTi85ZWFJNWRXRk9teHhpMElPUGI2T0RpbE1qcXVEbUVPSXljdjRTaCsvSWo5Zk1nS0tXUDdJZGxDNUJPeThkCncwOVdkckxxaE9WY3BKamNxYjN6K3hISHd5Q05YeGhoRm9tb2xQVnpJbnlUUEJTZkRuSDBuS0lHUXl2bGhCMGsKVHBHSzYxc2prZnFTK3hpNTlJeHVrbHZIRXNQcjFXblRzYU9oaVh6N3lQSlorcTNBMWZoVzBVa1JaRFlnWnNFbQovZ05KM3JwOFhZdURna2kzZ0UrOElXQWRBWHExeWhqRDdSSkI4VFNJYTV0SGpKUUtqZ0NlSG5HekFnTUJBQUdqCmF6QnBNQTRHQTFVZER3RUIvd1FFQXdJQ3BEQVRCZ05WSFNVRUREQUtCZ2dyQmdFRkJRY0RBVEFQQmdOVkhSTUIKQWY4RUJUQURBUUgvTURFR0ExVWRFUVFxTUNpQ0NXeHZZMkZzYUc5emRJSVZiV2x1YVc4dGMyVnlkbWxqWlM1awpaV1poZFd4MGh3Ui9BQUFCTUEwR0NTcUdTSWIzRFFFQkN3VUFBNElCQVFDbUZMMzlNSHVZMzFhMTFEajRwMjVjCnFQRUM0RHZJUWozTk9kU0dWMmQrZjZzZ3pGejFXTDhWcnF2QjFCMVM2cjRKYjJQRXVJQkQ4NFlwVXJIT1JNU2MKd3ViTEppSEtEa0Jmb2U5QWI1cC9VakpyS0tuajM0RGx2c1cvR3AwWTZYc1BWaVdpVWorb1JLbUdWSTI0Q0JIdgpnK0JtVzNDeU5RR1RLajk0eE02czNBV2xHRW95YXFXUGU1eHllVWUzZjFBWkY5N3RDaklKUmVWbENtaENGK0JtCmFUY1RSUWN3cVdvQ3AwYmJZcHlERFlwUmxxOEdQbElFOW8yWjZBc05mTHJVcGFtZ3FYMmtYa2gxa3lzSlEralAKelFadHJSMG1tdHVyM0RuRW0yYmk0TktIQVFIcFc5TXUxNkdRakUxTmJYcVF0VEI4OGpLNzZjdEg5MzRDYWw2VgotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t
AWS_CERT_KEY: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2UUlCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktjd2dnU2pBZ0VBQW9JQkFRRFh6VXVyZ1BaRGd6VDMKRFl1YWViZ2V3cW93ZGVBRDg0VllhemZTdVErNyttTmtpaVFQb3pVVTJmb1FhRi9QcXpCYlFtZWdvYU95eTVYagozVUV4bUZyZXR4MFpGNU5WSk4vOWVhSTVkV0ZPbXh4aTBJT1BiNk9EaWxNanF1RG1FT0l5Y3Y0U2grL0lqOWZNCmdLS1dQN0lkbEM1Qk95OGR3MDlXZHJMcWhPVmNwSmpjcWIzeit4SEh3eUNOWHhoaEZvbW9sUFZ6SW55VFBCU2YKRG5IMG5LSUdReXZsaEIwa1RwR0s2MXNqa2ZxUyt4aTU5SXh1a2x2SEVzUHIxV25Uc2FPaGlYejd5UEpaK3EzQQoxZmhXMFVrUlpEWWdac0VtL2dOSjNycDhYWXVEZ2tpM2dFKzhJV0FkQVhxMXloakQ3UkpCOFRTSWE1dEhqSlFLCmpnQ2VIbkd6QWdNQkFBRUNnZ0VBZlVyQ1hrYTN0Q2JmZjNpcnp2cFFmZnVEbURNMzV0TmlYaDJTQVpSVW9FMFYKbSsvZ1UvdnIrN2s2eUgvdzhMOXhpZXFhQTljVkZkL0JuTlIrMzI2WGc2dEpCNko2ZGZxODJZdmZOZ0VDaUFMaQpqalNGemFlQmhnT3ZsWXZHbTR5OTU1Q0FGdjQ1cDNac1VsMTFDRXJlL1BGbGtaWHRHeGlrWFl6NC85UTgzblhZCnM2eDdPYTgyUjdwT2lraWh3Q0FvVTU3Rjc4ZWFKOG1xTmkwRlF2bHlxSk9QMTFCbVp4dm54ZU11S2poQjlPTnAKTFNwMWpzZXk5bDZNR2pVbjBGTG53RHZkVWRiK0ZlUEkxTjdWYUNBd3hJK3JHa3JTWkhnekhWWE92VUpON2t2QQpqNUZPNW9uNGgvK3hXbkYzM3lxZ0VvWWZ0MFFJL2pXS2NOV1d1a2pCd1FLQmdRRGVFNlJGRUpsT2Q1aVcxeW1qCm45RENnczVFbXFtRXN3WU95bkN3U2RhK1lNNnZVYmlac1k4WW9wMVRmVWN4cUh2NkFQWGpVd2NBUG1QVE9KRW8KMlJtS0xTYkhsTnc4bFNOMWJsWDBEL3Mzamc1R3VlVW9nbW5TVnhMa0h1OFhKR0o3VzFReEUzZG9IUHRrcTNpagpoa09QTnJpZFM0UmxqNTJwYkhscjUvQzRjUUtCZ1FENHhFYmpuck1heFV2b0xxVTRvT2xiOVc5UytSUllTc0cxCmxJUmgzNzZTV0ZuTTlSdGoyMTI0M1hkaE4zUFBtSTNNeiswYjdyMnZSUi9LMS9Cc1JUQnlrTi9kbkVuNVUxQkEKYm90cGZIS1Jvc1FUR1hIQkEvM0JrNC9qOWplU3RmVXgzZ2x3eUI0L2hORy9KM1ZVV2FXeURTRm5qZFEvcGJsRwp6VWlsSVBmK1l3S0JnUUNwMkdYYmVJMTN5TnBJQ3psS2JqRlFncEJWUWVDQ29CVHkvUHRncUtoM3BEeVBNN1kyCnZla09VMWgyQVN1UkhDWHRtQXgzRndvVXNxTFFhY1FEZEw4bXdjK1Y5eERWdU02TXdwMDBjNENVQmE1L2d5OXoKWXdLaUgzeFFRaVJrRTZ6S1laZ3JqSkxYYXNzT1BHS2cxbEFYV1NlckRaV3R3MEEyMHNLdXQ0NlEwUUtCZ0hGZQpxZHZVR0ZXcjhvTDJ0dzlPcmVyZHVJVTh4RnZVZmVFdHRRTVJ2N3pjRE5qT0gxUnJ4Wk9aUW0ySW92dkp6MTIyCnFKMWhPUXJtV3EzTHFXTCtTU3o4L3pqMG4vWERWVUIzNElzTFR2ODJDVnVXN2ZPRHlTSnVDRlpnZ0VVWkxZd3oKWDJRSm4xZGRSV1Z6S3hKczVJbDNXSERqL3dXZWxnaEJSOGtSZEZOM0FvR0FJNldDdjJQQ1lUS1ZZNjAwOFYwbgpyTDQ3YTlPanZ0Yy81S2ZxSjFpMkpKTUgyQi9jbU1WRSs4M2dpODFIU1FqMWErNnBjektmQVppZWcwRk9nL015ClB6VlZRYmpKTnY0QzM5KzdxSDg1WGdZTXZhcTJ0aDFEZWUvQ3NsMlM4QlV0cW5mc0VuMUYwcWhlWUJZb2RibHAKV3RUaE5oRi9oRVhzbkJROURyWkJKT1U9Ci0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K
---
# same secret for longhorn-system namespace
apiVersion: v1
kind: Secret
metadata:
name: minio-secret
namespace: longhorn-system
type: Opaque
data:
AWS_ACCESS_KEY_ID: bG9uZ2hvcm4tdGVzdC1hY2Nlc3Mta2V5 # longhorn-test-access-key
AWS_SECRET_ACCESS_KEY: bG9uZ2hvcm4tdGVzdC1zZWNyZXQta2V5 # longhorn-test-secret-key
AWS_ENDPOINTS: aHR0cHM6Ly9taW5pby1zZXJ2aWNlLmRlZmF1bHQ6OTAwMA== # https://minio-service.default:9000
AWS_CERT: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURMRENDQWhTZ0F3SUJBZ0lSQU1kbzQycGhUZXlrMTcvYkxyWjVZRHN3RFFZSktvWklodmNOQVFFTEJRQXcKR2pFWU1CWUdBMVVFQ2hNUFRHOXVaMmh2Y200Z0xTQlVaWE4wTUNBWERUSXdNRFF5TnpJek1EQXhNVm9ZRHpJeApNakF3TkRBek1qTXdNREV4V2pBYU1SZ3dGZ1lEVlFRS0V3OU1iMjVuYUc5eWJpQXRJRlJsYzNRd2dnRWlNQTBHCkNTcUdTSWIzRFFFQkFRVUFBNElCRHdBd2dnRUtBb0lCQVFEWHpVdXJnUFpEZ3pUM0RZdWFlYmdld3Fvd2RlQUQKODRWWWF6ZlN1USs3K21Oa2lpUVBvelVVMmZvUWFGL1BxekJiUW1lZ29hT3l5NVhqM1VFeG1GcmV0eDBaRjVOVgpKTi85ZWFJNWRXRk9teHhpMElPUGI2T0RpbE1qcXVEbUVPSXljdjRTaCsvSWo5Zk1nS0tXUDdJZGxDNUJPeThkCncwOVdkckxxaE9WY3BKamNxYjN6K3hISHd5Q05YeGhoRm9tb2xQVnpJbnlUUEJTZkRuSDBuS0lHUXl2bGhCMGsKVHBHSzYxc2prZnFTK3hpNTlJeHVrbHZIRXNQcjFXblRzYU9oaVh6N3lQSlorcTNBMWZoVzBVa1JaRFlnWnNFbQovZ05KM3JwOFhZdURna2kzZ0UrOElXQWRBWHExeWhqRDdSSkI4VFNJYTV0SGpKUUtqZ0NlSG5HekFnTUJBQUdqCmF6QnBNQTRHQTFVZER3RUIvd1FFQXdJQ3BEQVRCZ05WSFNVRUREQUtCZ2dyQmdFRkJRY0RBVEFQQmdOVkhSTUIKQWY4RUJUQURBUUgvTURFR0ExVWRFUVFxTUNpQ0NXeHZZMkZzYUc5emRJSVZiV2x1YVc4dGMyVnlkbWxqWlM1awpaV1poZFd4MGh3Ui9BQUFCTUEwR0NTcUdTSWIzRFFFQkN3VUFBNElCQVFDbUZMMzlNSHVZMzFhMTFEajRwMjVjCnFQRUM0RHZJUWozTk9kU0dWMmQrZjZzZ3pGejFXTDhWcnF2QjFCMVM2cjRKYjJQRXVJQkQ4NFlwVXJIT1JNU2MKd3ViTEppSEtEa0Jmb2U5QWI1cC9VakpyS0tuajM0RGx2c1cvR3AwWTZYc1BWaVdpVWorb1JLbUdWSTI0Q0JIdgpnK0JtVzNDeU5RR1RLajk0eE02czNBV2xHRW95YXFXUGU1eHllVWUzZjFBWkY5N3RDaklKUmVWbENtaENGK0JtCmFUY1RSUWN3cVdvQ3AwYmJZcHlERFlwUmxxOEdQbElFOW8yWjZBc05mTHJVcGFtZ3FYMmtYa2gxa3lzSlEralAKelFadHJSMG1tdHVyM0RuRW0yYmk0TktIQVFIcFc5TXUxNkdRakUxTmJYcVF0VEI4OGpLNzZjdEg5MzRDYWw2VgotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: longhorn-test-minio
namespace: default
labels:
app: longhorn-test-minio
spec:
replicas: 1
selector:
matchLabels:
app: longhorn-test-minio
template:
metadata:
labels:
app: longhorn-test-minio
spec:
volumes:
- name: minio-volume
emptyDir: {}
- name: minio-certificates
secret:
secretName: minio-secret
items:
- key: AWS_CERT
path: public.crt
- key: AWS_CERT_KEY
path: private.key
containers:
- name: minio
image: minio/minio:RELEASE.2022-02-01T18-00-14Z
command: ["sh", "-c", "mkdir -p /storage/backupbucket && mkdir -p /root/.minio/certs && ln -s /root/certs/private.key /root/.minio/certs/private.key && ln -s /root/certs/public.crt /root/.minio/certs/public.crt && exec minio server /storage"]
env:
- name: MINIO_ROOT_USER
valueFrom:
secretKeyRef:
name: minio-secret
key: AWS_ACCESS_KEY_ID
- name: MINIO_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: minio-secret
key: AWS_SECRET_ACCESS_KEY
ports:
- containerPort: 9000
volumeMounts:
- name: minio-volume
mountPath: "/storage"
- name: minio-certificates
mountPath: "/root/certs"
readOnly: true
---
apiVersion: v1
kind: Service
metadata:
name: minio-service
namespace: default
spec:
selector:
app: longhorn-test-minio
ports:
- port: 9000
targetPort: 9000
protocol: TCP
sessionAffinity: ClientIP

View File

@ -0,0 +1,10 @@
apiVersion: v1
kind: Secret
metadata:
name: minio-secret
namespace: longhorn-system
type: Opaque
data:
AWS_ACCESS_KEY_ID: YVZOZDRlNzNJRFBBaU5heUZFb08= # aVNd4e73IDPAiNayFEoO # longhorn-test-access-key
AWS_SECRET_ACCESS_KEY: emM4NzlCVklpMzFzOFdCM2lmNjRrTFFFRmRDWFU2Q2JENDRXRDJFaQ== # zc879BVIi31s8WB3if64kLQEFdCXU6CbD44WD2Ei # longhorn-test-secret-key
AWS_ENDPOINTS: aHR0cHM6Ly9hcGkubWluaW8uc2t5Ynl0ZS5tZQ== # https://api.minio.skybyte.me

View File

@ -0,0 +1,11 @@
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: longhorn-agz
provisioner: driver.longhorn.io
volumeBindingMode: Immediate
parameters:
numberOfReplicas: "1"
staleReplicaTimeout: "480" # 8 hours in minutes
# diskSelector: ""
nodeSelector: "agz"

View File

@ -0,0 +1,11 @@
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: longhorn-cn
provisioner: driver.longhorn.io
volumeBindingMode: Immediate
parameters:
numberOfReplicas: "2"
staleReplicaTimeout: "480" # 8 hours in minutes
# diskSelector: ""
nodeSelector: "cn"

View File

@ -0,0 +1,11 @@
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: longhorn-tcn
provisioner: driver.longhorn.io
volumeBindingMode: Immediate
parameters:
numberOfReplicas: "2"
staleReplicaTimeout: "480" # 8 hours in minutes
# diskSelector: ""
nodeSelector: "tcn"

View File

@ -0,0 +1,11 @@
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: longhorn-us
provisioner: driver.longhorn.io
parameters:
numberOfReplicas: "2"
staleReplicaTimeout: "480" # 8 hours in minutes
# diskSelector: ""
nodeSelector: "us"

View File

@ -0,0 +1,11 @@
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: longhorn-vkus
provisioner: driver.longhorn.io
parameters:
numberOfReplicas: "2"
staleReplicaTimeout: "480" # 8 hours in minutes
# diskSelector: ""
nodeSelector: "vkus"

518
longhorn/values.yaml Normal file
View File

@ -0,0 +1,518 @@
# Default values for longhorn.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
global:
# -- Toleration for nodes allowed to run user-deployed components such as Longhorn Manager, Longhorn UI, and Longhorn Driver Deployer.
tolerations: []
# -- Node selector for nodes allowed to run user-deployed components such as Longhorn Manager, Longhorn UI, and Longhorn Driver Deployer.
nodeSelector:
longhorn-ready: "true"
cattle:
# -- Default system registry.
systemDefaultRegistry: ""
windowsCluster:
# -- Setting that allows Longhorn to run on a Rancher Windows cluster.
enabled: false
# -- Toleration for Linux nodes that can run user-deployed Longhorn components.
tolerations:
- key: "cattle.io/os"
value: "linux"
effect: "NoSchedule"
operator: "Equal"
# -- Node selector for Linux nodes that can run user-deployed Longhorn components.
nodeSelector:
kubernetes.io/os: "linux"
longhorn-ready: "true"
defaultSetting:
# -- Toleration for system-managed Longhorn components.
taintToleration: cattle.io/os=linux:NoSchedule
# -- Node selector for system-managed Longhorn components.
systemManagedComponentsNodeSelector: kubernetes.io/os:linux
networkPolicies:
# -- Setting that allows you to enable network policies that control access to Longhorn pods.
enabled: false
# -- Distribution that determines the policy for allowing access for an ingress. (Options: "k3s", "rke2", "rke1")
type: "k3s"
image:
longhorn:
engine:
# -- Repository for the Longhorn Engine image.
repository: longhornio/longhorn-engine
# -- Tag for the Longhorn Engine image.
tag: v1.8.1
manager:
# -- Repository for the Longhorn Manager image.
repository: longhornio/longhorn-manager
# -- Tag for the Longhorn Manager image.
tag: v1.8.1
ui:
# -- Repository for the Longhorn UI image.
repository: longhornio/longhorn-ui
# -- Tag for the Longhorn UI image.
tag: v1.8.1
instanceManager:
# -- Repository for the Longhorn Instance Manager image.
repository: longhornio/longhorn-instance-manager
# -- Tag for the Longhorn Instance Manager image.
tag: v1.8.1
shareManager:
# -- Repository for the Longhorn Share Manager image.
repository: longhornio/longhorn-share-manager
# -- Tag for the Longhorn Share Manager image.
tag: v1.8.1
backingImageManager:
# -- Repository for the Backing Image Manager image. When unspecified, Longhorn uses the default value.
repository: longhornio/backing-image-manager
# -- Tag for the Backing Image Manager image. When unspecified, Longhorn uses the default value.
tag: v1.8.1
supportBundleKit:
# -- Repository for the Longhorn Support Bundle Manager image.
repository: longhornio/support-bundle-kit
# -- Tag for the Longhorn Support Bundle Manager image.
tag: v0.0.52
csi:
attacher:
# -- Repository for the CSI attacher image. When unspecified, Longhorn uses the default value.
repository: longhornio/csi-attacher
# -- Tag for the CSI attacher image. When unspecified, Longhorn uses the default value.
tag: v4.8.1
provisioner:
# -- Repository for the CSI Provisioner image. When unspecified, Longhorn uses the default value.
repository: longhornio/csi-provisioner
# -- Tag for the CSI Provisioner image. When unspecified, Longhorn uses the default value.
tag: v5.2.0
nodeDriverRegistrar:
# -- Repository for the CSI Node Driver Registrar image. When unspecified, Longhorn uses the default value.
repository: longhornio/csi-node-driver-registrar
# -- Tag for the CSI Node Driver Registrar image. When unspecified, Longhorn uses the default value.
tag: v2.13.0
resizer:
# -- Repository for the CSI Resizer image. When unspecified, Longhorn uses the default value.
repository: longhornio/csi-resizer
# -- Tag for the CSI Resizer image. When unspecified, Longhorn uses the default value.
tag: v1.13.2
snapshotter:
# -- Repository for the CSI Snapshotter image. When unspecified, Longhorn uses the default value.
repository: longhornio/csi-snapshotter
# -- Tag for the CSI Snapshotter image. When unspecified, Longhorn uses the default value.
tag: v8.2.0
livenessProbe:
# -- Repository for the CSI liveness probe image. When unspecified, Longhorn uses the default value.
repository: longhornio/livenessprobe
# -- Tag for the CSI liveness probe image. When unspecified, Longhorn uses the default value.
tag: v2.15.0
openshift:
oauthProxy:
# -- Repository for the OAuth Proxy image. Specify the upstream image (for example, "quay.io/openshift/origin-oauth-proxy"). This setting applies only to OpenShift users.
repository: ""
# -- Tag for the OAuth Proxy image. Specify OCP/OKD version 4.1 or later (including version 4.15, which is available at quay.io/openshift/origin-oauth-proxy:4.15). This setting applies only to OpenShift users.
tag: ""
# -- Image pull policy that applies to all user-deployed Longhorn components, such as Longhorn Manager, Longhorn driver, and Longhorn UI.
pullPolicy: IfNotPresent
service:
ui:
# -- Service type for Longhorn UI. (Options: "ClusterIP", "NodePort", "LoadBalancer", "Rancher-Proxy")
type: ClusterIP
# -- NodePort port number for Longhorn UI. When unspecified, Longhorn selects a free port between 30000 and 32767.
nodePort: null
manager:
# -- Service type for Longhorn Manager.
type: ClusterIP
# -- NodePort port number for Longhorn Manager. When unspecified, Longhorn selects a free port between 30000 and 32767.
nodePort: ""
persistence:
# -- Setting that allows you to specify the default Longhorn StorageClass.
defaultClass: true
# -- Filesystem type of the default Longhorn StorageClass.
defaultFsType: ext4
# -- mkfs parameters of the default Longhorn StorageClass.
defaultMkfsParams: ""
# -- Replica count of the default Longhorn StorageClass.
defaultClassReplicaCount: 3
# -- Data locality of the default Longhorn StorageClass. (Options: "disabled", "best-effort")
defaultDataLocality: disabled
# -- Reclaim policy that provides instructions for handling of a volume after its claim is released. (Options: "Retain", "Delete")
reclaimPolicy: Delete
# -- Setting that allows you to enable live migration of a Longhorn volume from one node to another.
migratable: false
# -- Setting that disables the revision counter and thereby prevents Longhorn from tracking all write operations to a volume. When salvaging a volume, Longhorn uses properties of the volume-head-xxx.img file (the last file size and the last time the file was modified) to select the replica to be used for volume recovery.
disableRevisionCounter: "true"
# -- Set NFS mount options for Longhorn StorageClass for RWX volumes
nfsOptions: ""
recurringJobSelector:
# -- Setting that allows you to enable the recurring job selector for a Longhorn StorageClass.
enable: false
# -- Recurring job selector for a Longhorn StorageClass. Ensure that quotes are used correctly when specifying job parameters. (Example: `[{"name":"backup", "isGroup":true}]`)
jobList: []
backingImage:
# -- Setting that allows you to use a backing image in a Longhorn StorageClass.
enable: false
# -- Backing image to be used for creating and restoring volumes in a Longhorn StorageClass. When no backing images are available, specify the data source type and parameters that Longhorn can use to create a backing image.
name: ~
# -- Data source type of a backing image used in a Longhorn StorageClass.
# If the backing image exists in the cluster, Longhorn uses this setting to verify the image.
# If the backing image does not exist, Longhorn creates one using the specified data source type.
dataSourceType: ~
# -- Data source parameters of a backing image used in a Longhorn StorageClass.
# You can specify a JSON string of a map. (Example: `'{\"url\":\"https://backing-image-example.s3-region.amazonaws.com/test-backing-image\"}'`)
dataSourceParameters: ~
# -- Expected SHA-512 checksum of a backing image used in a Longhorn StorageClass.
expectedChecksum: ~
defaultDiskSelector:
# -- Setting that allows you to enable the disk selector for the default Longhorn StorageClass.
enable: false
# -- Disk selector for the default Longhorn StorageClass. Longhorn uses only disks with the specified tags for storing volume data. (Examples: "nvme,sata")
selector: ""
defaultNodeSelector:
# -- Setting that allows you to enable the node selector for the default Longhorn StorageClass.
enable: false
# -- Node selector for the default Longhorn StorageClass. Longhorn uses only nodes with the specified tags for storing volume data. (Examples: "storage,fast")
selector: ""
# -- Setting that allows you to enable automatic snapshot removal during filesystem trim for a Longhorn StorageClass. (Options: "ignored", "enabled", "disabled")
removeSnapshotsDuringFilesystemTrim: ignored
# -- Setting that allows you to specify the data engine version for the default Longhorn StorageClass. (Options: "v1", "v2")
dataEngine: v1
# -- Setting that allows you to specify the backup target for the default Longhorn StorageClass.
backupTargetName: default
preUpgradeChecker:
# -- Setting that allows Longhorn to perform pre-upgrade checks. Disable this setting when installing Longhorn using Argo CD or other GitOps solutions.
jobEnabled: true
# -- Setting that allows Longhorn to perform upgrade version checks after starting the Longhorn Manager DaemonSet Pods. Disabling this setting also disables `preUpgradeChecker.jobEnabled`. Longhorn recommends keeping this setting enabled.
upgradeVersionCheck: true
csi:
# -- kubelet root directory. When unspecified, Longhorn uses the default value.
kubeletRootDir: ~
# -- Replica count of the CSI Attacher. When unspecified, Longhorn uses the default value ("3").
attacherReplicaCount: ~
# -- Replica count of the CSI Provisioner. When unspecified, Longhorn uses the default value ("3").
provisionerReplicaCount: ~
# -- Replica count of the CSI Resizer. When unspecified, Longhorn uses the default value ("3").
resizerReplicaCount: ~
# -- Replica count of the CSI Snapshotter. When unspecified, Longhorn uses the default value ("3").
snapshotterReplicaCount: ~
defaultSettings:
# -- Setting that allows Longhorn to automatically attach a volume and create snapshots or backups when recurring jobs are run.
allowRecurringJobWhileVolumeDetached: ~
# -- Setting that allows Longhorn to automatically create a default disk only on nodes with the label "node.longhorn.io/create-default-disk=true" (if no other disks exist). When this setting is disabled, Longhorn creates a default disk on each node that is added to the cluster.
createDefaultDiskLabeledNodes: ~
# -- Default path for storing data on a host. The default value is "/var/lib/longhorn/".
defaultDataPath: ~
# -- Default data locality. A Longhorn volume has data locality if a local replica of the volume exists on the same node as the pod that is using the volume.
defaultDataLocality: ~
# -- Setting that allows scheduling on nodes with healthy replicas of the same volume. This setting is disabled by default.
replicaSoftAntiAffinity: ~
# -- Setting that automatically rebalances replicas when an available node is discovered.
replicaAutoBalance: ~
# -- Percentage of storage that can be allocated relative to hard drive capacity. The default value is "100".
storageOverProvisioningPercentage: ~
# -- Percentage of minimum available disk capacity. When the minimum available capacity exceeds the total available capacity, the disk becomes unschedulable until more space is made available for use. The default value is "25".
storageMinimalAvailablePercentage: ~
# -- Percentage of disk space that is not allocated to the default disk on each new Longhorn node.
storageReservedPercentageForDefaultDisk: ~
# -- Upgrade Checker that periodically checks for new Longhorn versions. When a new version is available, a notification appears on the Longhorn UI. This setting is enabled by default
upgradeChecker: ~
# -- The Upgrade Responder sends a notification whenever a new Longhorn version that you can upgrade to becomes available. The default value is https://longhorn-upgrade-responder.rancher.io/v1/checkupgrade.
upgradeResponderURL: ~
# -- Default number of replicas for volumes created using the Longhorn UI. For Kubernetes configuration, modify the `numberOfReplicas` field in the StorageClass. The default value is "3".
defaultReplicaCount: ~
# -- Default name of Longhorn static StorageClass. "storageClassName" is assigned to PVs and PVCs that are created for an existing Longhorn volume. "storageClassName" can also be used as a label, so it is possible to use a Longhorn StorageClass to bind a workload to an existing PV without creating a Kubernetes StorageClass object. "storageClassName" needs to be an existing StorageClass. The default value is "longhorn-static".
defaultLonghornStaticStorageClass: ~
# -- Number of minutes that Longhorn keeps a failed backup resource. When the value is "0", automatic deletion is disabled.
failedBackupTTL: ~
# -- Number of minutes that Longhorn allows for the backup execution. The default value is "1".
backupExecutionTimeout: ~
# -- Setting that restores recurring jobs from a backup volume on a backup target and creates recurring jobs if none exist during backup restoration.
restoreVolumeRecurringJobs: ~
# -- Maximum number of successful recurring backup and snapshot jobs to be retained. When the value is "0", a history of successful recurring jobs is not retained.
recurringSuccessfulJobsHistoryLimit: ~
# -- Maximum number of failed recurring backup and snapshot jobs to be retained. When the value is "0", a history of failed recurring jobs is not retained.
recurringFailedJobsHistoryLimit: ~
# -- Maximum number of snapshots or backups to be retained.
recurringJobMaxRetention: ~
# -- Maximum number of failed support bundles that can exist in the cluster. When the value is "0", Longhorn automatically purges all failed support bundles.
supportBundleFailedHistoryLimit: ~
# -- Taint or toleration for system-managed Longhorn components.
# Specify values using a semicolon-separated list in `kubectl taint` syntax (Example: key1=value1:effect; key2=value2:effect).
taintToleration: ~
# -- Node selector for system-managed Longhorn components.
systemManagedComponentsNodeSelector: "longhorn-ready:true"
# -- PriorityClass for system-managed Longhorn components.
# This setting can help prevent Longhorn components from being evicted under Node Pressure.
# Notice that this will be applied to Longhorn user-deployed components by default if there are no priority class values set yet, such as `longhornManager.priorityClass`.
priorityClass: &defaultPriorityClassNameRef "longhorn-critical"
# -- Setting that allows Longhorn to automatically salvage volumes when all replicas become faulty (for example, when the network connection is interrupted). Longhorn determines which replicas are usable and then uses these replicas for the volume. This setting is enabled by default.
autoSalvage: ~
# -- Setting that allows Longhorn to automatically delete a workload pod that is managed by a controller (for example, daemonset) whenever a Longhorn volume is detached unexpectedly (for example, during Kubernetes upgrades). After deletion, the controller restarts the pod and then Kubernetes handles volume reattachment and remounting.
autoDeletePodWhenVolumeDetachedUnexpectedly: ~
# -- Setting that prevents Longhorn Manager from scheduling replicas on a cordoned Kubernetes node. This setting is enabled by default.
disableSchedulingOnCordonedNode: ~
# -- Setting that allows Longhorn to schedule new replicas of a volume to nodes in the same zone as existing healthy replicas. Nodes that do not belong to any zone are treated as existing in the zone that contains healthy replicas. When identifying zones, Longhorn relies on the label "topology.kubernetes.io/zone=<Zone name of the node>" in the Kubernetes node object.
replicaZoneSoftAntiAffinity: ~
# -- Setting that allows scheduling on disks with existing healthy replicas of the same volume. This setting is enabled by default.
replicaDiskSoftAntiAffinity: ~
# -- Policy that defines the action Longhorn takes when a volume is stuck with a StatefulSet or Deployment pod on a node that failed.
nodeDownPodDeletionPolicy: ~
# -- Policy that defines the action Longhorn takes when a node with the last healthy replica of a volume is drained.
nodeDrainPolicy: ~
# -- Setting that allows automatic detaching of manually-attached volumes when a node is cordoned.
detachManuallyAttachedVolumesWhenCordoned: ~
# -- Number of seconds that Longhorn waits before reusing existing data on a failed replica instead of creating a new replica of a degraded volume.
replicaReplenishmentWaitInterval: ~
# -- Maximum number of replicas that can be concurrently rebuilt on each node.
concurrentReplicaRebuildPerNodeLimit: ~
# -- Maximum number of volumes that can be concurrently restored on each node using a backup. When the value is "0", restoration of volumes using a backup is disabled.
concurrentVolumeBackupRestorePerNodeLimit: ~
# -- Setting that disables the revision counter and thereby prevents Longhorn from tracking all write operations to a volume. When salvaging a volume, Longhorn uses properties of the "volume-head-xxx.img" file (the last file size and the last time the file was modified) to select the replica to be used for volume recovery. This setting applies only to volumes created using the Longhorn UI.
disableRevisionCounter: "true"
# -- Image pull policy for system-managed pods, such as Instance Manager, engine images, and CSI Driver. Changes to the image pull policy are applied only after the system-managed pods restart.
systemManagedPodsImagePullPolicy: ~
# -- Setting that allows you to create and attach a volume without having all replicas scheduled at the time of creation.
allowVolumeCreationWithDegradedAvailability: ~
# -- Setting that allows Longhorn to automatically clean up the system-generated snapshot after replica rebuilding is completed.
autoCleanupSystemGeneratedSnapshot: ~
# -- Setting that allows Longhorn to automatically clean up the snapshot generated by a recurring backup job.
autoCleanupRecurringJobBackupSnapshot: ~
# -- Maximum number of engines that are allowed to concurrently upgrade on each node after Longhorn Manager is upgraded. When the value is "0", Longhorn does not automatically upgrade volume engines to the new default engine image version.
concurrentAutomaticEngineUpgradePerNodeLimit: ~
# -- Number of minutes that Longhorn waits before cleaning up the backing image file when no replicas in the disk are using it.
backingImageCleanupWaitInterval: ~
# -- Number of seconds that Longhorn waits before downloading a backing image file again when the status of all image disk files changes to "failed" or "unknown".
backingImageRecoveryWaitInterval: ~
# -- Percentage of the total allocatable CPU resources on each node to be reserved for each instance manager pod when the V1 Data Engine is enabled. The default value is "12".
guaranteedInstanceManagerCPU: ~
# -- Setting that notifies Longhorn that the cluster is using the Kubernetes Cluster Autoscaler.
kubernetesClusterAutoscalerEnabled: ~
# -- Setting that allows Longhorn to automatically delete an orphaned resource and the corresponding data (for example, stale replicas). Orphaned resources on failed or unknown nodes are not automatically cleaned up.
orphanAutoDeletion: ~
# -- Storage network for in-cluster traffic. When unspecified, Longhorn uses the Kubernetes cluster network.
storageNetwork: ~
# -- Flag that prevents accidental uninstallation of Longhorn.
deletingConfirmationFlag: ~
# -- Timeout between the Longhorn Engine and replicas. Specify a value between "8" and "30" seconds. The default value is "8".
engineReplicaTimeout: ~
# -- Setting that allows you to enable and disable snapshot hashing and data integrity checks.
snapshotDataIntegrity: ~
# -- Setting that allows disabling of snapshot hashing after snapshot creation to minimize impact on system performance.
snapshotDataIntegrityImmediateCheckAfterSnapshotCreation: ~
# -- Setting that defines when Longhorn checks the integrity of data in snapshot disk files. You must use the Unix cron expression format.
snapshotDataIntegrityCronjob: ~
# -- Setting that allows Longhorn to automatically mark the latest snapshot and its parent files as removed during a filesystem trim. Longhorn does not remove snapshots containing multiple child files.
removeSnapshotsDuringFilesystemTrim: ~
# -- Setting that allows fast rebuilding of replicas using the checksum of snapshot disk files. Before enabling this setting, you must set the snapshot-data-integrity value to "enable" or "fast-check".
fastReplicaRebuildEnabled: ~
# -- Number of seconds that an HTTP client waits for a response from a File Sync server before considering the connection to have failed.
replicaFileSyncHttpClientTimeout: ~
# -- Number of seconds that Longhorn allows for the completion of replica rebuilding and snapshot cloning operations.
longGRPCTimeOut: ~
# -- Log levels that indicate the type and severity of logs in Longhorn Manager. The default value is "Info". (Options: "Panic", "Fatal", "Error", "Warn", "Info", "Debug", "Trace")
logLevel: ~
# -- Setting that allows you to specify a backup compression method.
backupCompressionMethod: ~
# -- Maximum number of worker threads that can concurrently run for each backup.
backupConcurrentLimit: ~
# -- Maximum number of worker threads that can concurrently run for each restore operation.
restoreConcurrentLimit: ~
# -- Setting that allows you to enable the V1 Data Engine.
v1DataEngine: ~
# -- Setting that allows you to enable the V2 Data Engine, which is based on the Storage Performance Development Kit (SPDK). The V2 Data Engine is an experimental feature and should not be used in production environments.
v2DataEngine: ~
# -- Setting that allows you to configure maximum huge page size (in MiB) for the V2 Data Engine.
v2DataEngineHugepageLimit: ~
# -- Number of millicpus on each node to be reserved for each Instance Manager pod when the V2 Data Engine is enabled. The default value is "1250".
v2DataEngineGuaranteedInstanceManagerCPU: ~
# -- CPU cores on which the Storage Performance Development Kit (SPDK) target daemon should run. The SPDK target daemon is located in each Instance Manager pod. Ensure that the number of cores is less than or equal to the guaranteed Instance Manager CPUs for the V2 Data Engine. The default value is "0x1".
v2DataEngineCPUMask: ~
# -- Setting that allows scheduling of empty node selector volumes to any node.
allowEmptyNodeSelectorVolume: ~
# -- Setting that allows scheduling of empty disk selector volumes to any disk.
allowEmptyDiskSelectorVolume: ~
# -- Setting that allows Longhorn to periodically collect anonymous usage data for product improvement purposes. Longhorn sends collected data to the [Upgrade Responder](https://github.com/longhorn/upgrade-responder) server, which is the data source of the Longhorn Public Metrics Dashboard (https://metrics.longhorn.io). The Upgrade Responder server does not store data that can be used to identify clients, including IP addresses.
allowCollectingLonghornUsageMetrics: ~
# -- Setting that temporarily prevents all attempts to purge volume snapshots.
disableSnapshotPurge: ~
# -- Maximum snapshot count for a volume. The value should be between 2 to 250
snapshotMaxCount: ~
# -- Setting that allows you to configure the log level of the SPDK target daemon (spdk_tgt) of the V2 Data Engine.
v2DataEngineLogLevel: ~
# -- Setting that allows you to configure the log flags of the SPDK target daemon (spdk_tgt) of the V2 Data Engine.
v2DataEngineLogFlags: ~
# -- Setting that freezes the filesystem on the root partition before a snapshot is created.
freezeFilesystemForSnapshot: ~
# -- Setting that automatically cleans up the snapshot when the backup is deleted.
autoCleanupSnapshotWhenDeleteBackup: ~
# -- Setting that allows Longhorn to detect node failure and immediately migrate affected RWX volumes.
rwxVolumeFastFailover: ~
# -- Setting that allows you to update the default backupstore.
defaultBackupStore:
# -- Endpoint used to access the default backupstore. (Options: "NFS", "CIFS", "AWS", "GCP", "AZURE")
backupTarget: ~
# -- Name of the Kubernetes secret associated with the default backup target.
backupTargetCredentialSecret: ~
# -- Number of seconds that Longhorn waits before checking the default backupstore for new backups. The default value is "300". When the value is "0", polling is disabled.
pollInterval: ~
privateRegistry:
# -- Setting that allows you to create a private registry secret.
createSecret: ~
# -- URL of a private registry. When unspecified, Longhorn uses the default system registry.
registryUrl: ~
# -- User account used for authenticating with a private registry.
registryUser: ~
# -- Password for authenticating with a private registry.
registryPasswd: ~
# -- Kubernetes secret that allows you to pull images from a private registry. This setting applies only when creation of private registry secrets is enabled. You must include the private registry name in the secret name.
registrySecret: ~
longhornManager:
log:
# -- Format of Longhorn Manager logs. (Options: "plain", "json")
format: plain
# -- PriorityClass for Longhorn Manager.
priorityClass: *defaultPriorityClassNameRef
# -- Toleration for Longhorn Manager on nodes allowed to run Longhorn components.
tolerations: []
## If you want to set tolerations for Longhorn Manager DaemonSet, delete the `[]` in the line above
## and uncomment this example block
# - key: "key"
# operator: "Equal"
# value: "value"
# effect: "NoSchedule"
# -- Node selector for Longhorn Manager. Specify the nodes allowed to run Longhorn Manager.
nodeSelector:
longhorn-ready: "true"
## If you want to set node selector for Longhorn Manager DaemonSet, delete the `{}` in the line above
## and uncomment this example block
# label-key1: "label-value1"
# label-key2: "label-value2"
# -- Annotation for the Longhorn Manager service.
serviceAnnotations: {}
## If you want to set annotations for the Longhorn Manager service, delete the `{}` in the line above
## and uncomment this example block
# annotation-key1: "annotation-value1"
# annotation-key2: "annotation-value2"
longhornDriver:
log:
# -- Format of longhorn-driver logs. (Options: "plain", "json")
format: plain
# -- PriorityClass for Longhorn Driver.
priorityClass: *defaultPriorityClassNameRef
# -- Toleration for Longhorn Driver on nodes allowed to run Longhorn components.
tolerations: []
## If you want to set tolerations for Longhorn Driver Deployer Deployment, delete the `[]` in the line above
## and uncomment this example block
# - key: "key"
# operator: "Equal"
# value: "value"
# effect: "NoSchedule"
# -- Node selector for Longhorn Driver. Specify the nodes allowed to run Longhorn Driver.
nodeSelector:
longhorn-ready: "true"
## If you want to set node selector for Longhorn Driver Deployer Deployment, delete the `{}` in the line above
## and uncomment this example block
# label-key1: "label-value1"
# label-key2: "label-value2"
longhornUI:
# -- Replica count for Longhorn UI.
replicas: 2
# -- PriorityClass for Longhorn UI.
priorityClass: *defaultPriorityClassNameRef
# -- Toleration for Longhorn UI on nodes allowed to run Longhorn components.
tolerations: []
## If you want to set tolerations for Longhorn UI Deployment, delete the `[]` in the line above
## and uncomment this example block
# - key: "key"
# operator: "Equal"
# value: "value"
# effect: "NoSchedule"
# -- Node selector for Longhorn UI. Specify the nodes allowed to run Longhorn UI.
nodeSelector:
longhorn-ready: "true"
## If you want to set node selector for Longhorn UI Deployment, delete the `{}` in the line above
## and uncomment this example block
# label-key1: "label-value1"
# label-key2: "label-value2"
ingress:
# -- Setting that allows Longhorn to generate ingress records for the Longhorn UI service.
enabled: false
# -- IngressClass resource that contains ingress configuration, including the name of the Ingress controller.
# ingressClassName can replace the kubernetes.io/ingress.class annotation used in earlier Kubernetes releases.
ingressClassName: ~
# -- Hostname of the Layer 7 load balancer.
host: sslip.io
# -- Setting that allows you to enable TLS on ingress records.
tls: false
# -- Setting that allows you to enable secure connections to the Longhorn UI service via port 443.
secureBackends: false
# -- TLS secret that contains the private key and certificate to be used for TLS. This setting applies only when TLS is enabled on ingress records.
tlsSecret: longhorn.local-tls
# -- Default ingress path. You can access the Longhorn UI by following the full ingress path {{host}}+{{path}}.
path: /
# -- Ingress path type. To maintain backward compatibility, the default value is "ImplementationSpecific".
pathType: ImplementationSpecific
## If you're using kube-lego, you will want to add:
## kubernetes.io/tls-acme: true
##
## For a full list of possible ingress annotations, please see
## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/annotations.md
##
## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set
# -- Ingress annotations in the form of key-value pairs.
annotations:
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: true
# -- Secret that contains a TLS private key and certificate. Use secrets if you want to use your own certificates to secure ingresses.
secrets:
## If you're providing your own certificates, please use this to add the certificates as secrets
## key and certificate should start with -----BEGIN CERTIFICATE----- or
## -----BEGIN RSA PRIVATE KEY-----
##
## name should line up with a tlsSecret set further up
## If you're using kube-lego, this is unneeded, as it will create the secret for you if it is not set
##
## It is also possible to create and manage the certificates outside of this helm chart
## Please see README.md for more information
# - name: longhorn.local-tls
# key:
# certificate:
# -- Setting that allows you to enable pod security policies (PSPs) that allow privileged Longhorn pods to start. This setting applies only to clusters running Kubernetes 1.25 and earlier, and with the built-in Pod Security admission controller enabled.
enablePSP: false
# -- Specify override namespace, specifically this is useful for using longhorn as sub-chart and its release namespace is not the `longhorn-system`.
namespaceOverride: ""
# -- Annotation for the Longhorn Manager DaemonSet pods. This setting is optional.
annotations: {}
serviceAccount:
# -- Annotations to add to the service account
annotations: {}
metrics:
serviceMonitor:
# -- Setting that allows the creation of a Prometheus ServiceMonitor resource for Longhorn Manager components.
enabled: false
# -- Additional labels for the Prometheus ServiceMonitor resource.
additionalLabels: {}
# -- Annotations for the Prometheus ServiceMonitor resource.
annotations: {}
# -- Interval at which Prometheus scrapes the metrics from the target.
interval: ""
# -- Timeout after which Prometheus considers the scrape to be failed.
scrapeTimeout: ""
# -- Configures the relabeling rules to apply the targets metadata labels. See the [Prometheus Operator
# documentation](https://prometheus-operator.dev/docs/api-reference/api/#monitoring.coreos.com/v1.Endpoint) for
# formatting details.
relabelings: []
# -- Configures the relabeling rules to apply to the samples before ingestion. See the [Prometheus Operator
# documentation](https://prometheus-operator.dev/docs/api-reference/api/#monitoring.coreos.com/v1.Endpoint) for
# formatting details.
metricRelabelings: []
## openshift settings
openshift:
# -- Setting that allows Longhorn to integrate with OpenShift.
enabled: false
ui:
# -- Route for connections between Longhorn and the OpenShift web console.
route: "longhorn-ui"
# -- Port for accessing the OpenShift web console.
port: 443
# -- Port for proxy that provides access to the OpenShift web console.
proxy: 8443
# -- Setting that allows Longhorn to generate code coverage profiles.
enableGoCoverDir: false

36
memos/ingress.yaml Normal file
View File

@ -0,0 +1,36 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: memos-ingress
namespace: memos
annotations:
cert-manager.io/cluster-issuer: "dnspod-cluster-issuer"
spec:
ingressClassName: nginx
tls:
- hosts:
- memos.frytea.com
- memos.tsh1.frytea.com
secretName: memos-tgz1-frytea-com-tls
rules:
- host: memos.frytea.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: memos-svc
port:
name: web
- host: memos.tsh1.frytea.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: memos-svc
port:
name: web

34
memos/load-new.yaml Normal file
View File

@ -0,0 +1,34 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: memos-sts-new
namespace: memos
labels:
app: memos
spec:
replicas: 1
selector:
matchLabels:
app: memos
template:
metadata:
annotations: {}
labels:
app: memos
spec:
nodeSelector:
kubernetes.io/hostname: tencent-gz1
containers:
- name: memos
image: neosmemo/memos:0.23.0
ports:
- containerPort: 5230
name: web
volumeMounts:
- name: memos-data
mountPath: /var/opt/memos
volumes:
- name: memos-data
persistentVolumeClaim:
claimName: memos-tgz1-pvc-new

Some files were not shown because too many files have changed in this diff Show More