manifests/n8n-helm/values.yaml
2025-04-12 13:11:34 +08:00

657 lines
20 KiB
YAML

# README
# High level values structure, overview and explanation of the values.yaml file.
# 1. Global and chart wide values, like the image repository, image tag, etc.
# 2. Ingress, (default is nginx, but you can change it to your own ingress controller)
# 3. Main n8n app configuration + kubernetes specific settings
# 4. Worker related settings + kubernetes specific settings
# 5. Webhook related settings + kubernetes specific settings
# 6. Raw Resources to pass through your own manifests like GatewayAPI, ServiceMonitor etc.
# 7. Redis related settings + kubernetes specific settings
#
# General Config
#
# default .Chart.Name
nameOverride:
# default .Chart.Name or .Values.nameOverride
fullnameOverride:
#
# Common Kubernetes Config Settings for this entire n8n deployment
#
image:
repository: n8nio/n8n
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: ""
imagePullSecrets: []
#
# Ingress
#
ingress:
enabled: true
annotations:
cert-manager.io/cluster-issuer: "cf-cluster-issuer"
# define a custom ingress class Name, like "traefik" or "nginx"
className: "nginx"
hosts:
- host: n8n.skybyte.me
paths: /
tls:
- hosts:
- n8n.skybyte.me
secretName: n8n-skybyte-me-tls
# the main (n8n) application related configuration + Kubernetes specific settings
# The config: {} dictionary is converted to environmental variables in the ConfigMap.
main:
# See https://docs.n8n.io/hosting/configuration/environment-variables/ for all values.
config: {}
# n8n:
# db:
# type: postgresdb
# postgresdb:
# host: 192.168.0.52
# Dictionary for secrets, unlike config:, the values here will end up in the secret file.
# The YAML entry db.postgresdb.password: my_secret is transformed DB_POSTGRESDB_password=bXlfc2VjcmV0
# See https://docs.n8n.io/hosting/configuration/environment-variables/
secret: {}
# n8n:
# if you run n8n stateless, you should provide an encryption key here.
# encryption_key:
#
# database:
# postgresdb:
# password: 'big secret'
# Extra environmental variables, so you can reference other configmaps and secrets into n8n as env vars.
extraEnv:
# N8N_DB_POSTGRESDB_NAME:
# valueFrom:
# secretKeyRef:
# name: db-app
# key: dbname
#
# N8n Kubernetes specific settings
#
persistence:
# If true, use a Persistent Volume Claim, If false, use emptyDir
enabled: true
# what type volume, possible options are [existing, emptyDir, dynamic] dynamic for Dynamic Volume Provisioning, existing for using an existing Claim
type: dynamic
# Persistent Volume Storage Class
# If defined, storageClassName: <storageClass>
# If set to "-", storageClassName: "", which disables dynamic provisioning
# If undefined (the default) or set to null, no storageClassName spec is
# set, choosing the default provisioner. (gp2 on AWS, standard on
# GKE, AWS & OpenStack)
#
storageClass: "local-vkus2"
# PVC annotations
#
# If you need this annotation include it under `values.yml` file and pvc.yml template will add it.
# This is not maintained at Helm v3 anymore.
# https://github.com/8gears/n8n-helm-chart/issues/8
#
# annotations:
# helm.sh/resource-policy: keep
# Persistent Volume Access Mode
#
accessModes:
- ReadWriteOnce
# Persistent Volume size
size: 1Gi
# Use an existing PVC
# existingClaim:
extraVolumes: []
# - name: db-ca-cert
# secret:
# secretName: db-ca
# items:
# - key: ca.crt
# path: ca.crt
extraVolumeMounts: []
# - name: db-ca-cert
# mountPath: /etc/ssl/certs/postgresql
# readOnly: true
# Number of desired pods. More than one pod is supported in n8n enterprise.
replicaCount: 1
# here you can specify the deployment strategy as Recreate or RollingUpdate with optional maxSurge and maxUnavailable
# If these options are not set, default values are 25%
# deploymentStrategy:
# type: Recreate | RollingUpdate
# maxSurge: "50%"
# maxUnavailable: "50%"
deploymentStrategy:
type: "Recreate"
# maxSurge: "50%"
# maxUnavailable: "50%"
serviceAccount:
# Specifies whether a service account should be created
create: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
podAnnotations: {}
podLabels: {}
podSecurityContext:
runAsNonRoot: true
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
# here you can specify lifecycle hooks - it can be used e.g., to easily add packages to the container without building
# your own docker image
# see https://github.com/8gears/n8n-helm-chart/pull/30
lifecycle: {}
# here's the sample configuration to add mysql-client to the container
# lifecycle:
# postStart:
# exec:
# command: ["/bin/sh", "-c", "apk add mysql-client"]
# here you can override a command for main container
# it may be used to override a starting script (e.g., to resolve issues like https://github.com/n8n-io/n8n/issues/6412) or run additional preparation steps (e.g., installing additional software)
command: []
# sample configuration that overrides starting script and solves above issue (also it runs n8n as root, so be careful):
# command:
# - tini
# - --
# - /bin/sh
# - -c
# - chmod o+rx /root; chown -R node /root/.n8n || true; chown -R node /root/.n8n; ln -s /root/.n8n /home/node; chown -R node /home/node || true; node /usr/local/bin/n8n
# here you can override the livenessProbe for the main container
# it may be used to increase the timeout for the livenessProbe (e.g., to resolve issues like
livenessProbe:
httpGet:
path: /healthz
port: http
# initialDelaySeconds: 30
# periodSeconds: 10
# timeoutSeconds: 5
# failureThreshold: 6
# successThreshold: 1
# here you can override the readinessProbe for the main container
# it may be used to increase the timeout for the readinessProbe (e.g., to resolve issues like
readinessProbe:
httpGet:
path: /healthz
port: http
# initialDelaySeconds: 30
# periodSeconds: 10
# timeoutSeconds: 5
# failureThreshold: 6
# successThreshold: 1
# List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started.
# See https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
initContainers: []
# - name: init-data-dir
# image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
# command: [ "/bin/sh", "-c", "mkdir -p /home/node/.n8n/" ]
# volumeMounts:
# - name: data
# mountPath: /home/node/.n8n
service:
annotations: {}
# -- Service types allow you to specify what kind of Service you want.
# E.g., ClusterIP, NodePort, LoadBalancer, ExternalName
type: ClusterIP
# -- Service port
port: 80
resources: {}
# We usually recommend not specifying default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 100
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
nodeSelector: {}
tolerations: []
affinity: {}
# # # # # # # # # # # # # # # #
#
# Worker related settings
#
worker:
enabled: false
count: 2
# You can define the number of jobs a worker can run in parallel by using the concurrency flag. It defaults to 10. To change it:
concurrency: 10
#
# Worker Kubernetes specific settings
#
persistence:
# If true, use a Persistent Volume Claim, If false, use emptyDir
enabled: false
# what type volume, possible options are [existing, emptyDir, dynamic] dynamic for Dynamic Volume Provisioning, existing for using an existing Claim
type: emptyDir
# Persistent Volume Storage Class
# If defined, storageClassName: <storageClass>
# If set to "-", storageClassName: "", which disables dynamic provisioning
# If undefined (the default) or set to null, no storageClassName spec is
# set, choosing the default provisioner. (gp2 on AWS, standard on
# GKE, AWS & OpenStack)
#
# storageClass: "-"
# PVC annotations
#
# If you need this annotation include it under `values.yml` file and pvc.yml template will add it.
# This is not maintained at Helm v3 anymore.
# https://github.com/8gears/n8n-helm-chart/issues/8
#
# annotations:
# helm.sh/resource-policy: keep
# Persistent Volume Access Mode
accessModes:
- ReadWriteOnce
# Persistent Volume size
size: 1Gi
# Use an existing PVC
# existingClaim:
# Number of desired pods.
replicaCount: 1
# here you can specify the deployment strategy as Recreate or RollingUpdate with optional maxSurge and maxUnavailable
# If these options are not set, default values are 25%
# deploymentStrategy:
# type: RollingUpdate
# maxSurge: "50%"
# maxUnavailable: "50%"
deploymentStrategy:
type: "Recreate"
# maxSurge: "50%"
# maxUnavailable: "50%"
serviceAccount:
# Specifies whether a service account should be created
create: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
podAnnotations: {}
podLabels: {}
podSecurityContext:
runAsNonRoot: true
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
# here you can specify lifecycle hooks - it can be used e.g., to easily add packages to the container without building
# your own docker image
# see https://github.com/8gears/n8n-helm-chart/pull/30
lifecycle: {}
# here's the sample configuration to add mysql-client to the container
# lifecycle:
# postStart:
# exec:
# command: ["/bin/sh", "-c", "apk add mysql-client"]
# here you can override a command for worker container
# it may be used to override a starting script (e.g., to resolve issues like https://github.com/n8n-io/n8n/issues/6412) or
# run additional preparation steps (e.g., installing additional software)
command: []
# sample configuration that overrides starting script and solves above issue (also it runs n8n as root, so be careful):
# command:
# - tini
# - --
# - /bin/sh
# - -c
# - chmod o+rx /root; chown -R node /root/.n8n || true; chown -R node /root/.n8n; ln -s /root/.n8n /home/node; chown -R node /home/node || true; node /usr/local/bin/n8n
# command args
commandArgs: []
# here you can override the livenessProbe for the main container
# it may be used to increase the timeout for the livenessProbe (e.g., to resolve issues like
livenessProbe:
httpGet:
path: /healthz
port: http
# initialDelaySeconds: 30
# periodSeconds: 10
# timeoutSeconds: 5
# failureThreshold: 6
# successThreshold: 1
# here you can override the readinessProbe for the main container
# it may be used to increase the timeout for the readinessProbe (e.g., to resolve issues like
readinessProbe:
httpGet:
path: /healthz
port: http
# initialDelaySeconds: 30
# periodSeconds: 10
# timeoutSeconds: 5
# failureThreshold: 6
# successThreshold: 1
# List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started.
# See https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
initContainers: []
service:
annotations: {}
# -- Service types allow you to specify what kind of Service you want.
# E.g., ClusterIP, NodePort, LoadBalancer, ExternalName
type: ClusterIP
# -- Service port
port: 80
resources: {}
# We usually recommend not specifying default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 100
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
nodeSelector: {}
tolerations: []
affinity: {}
# Webhook related settings
# With .Values.scaling.webhook.enabled=true you disable Webhooks from the main process, but you enable the processing on a different Webhook instance.
# See https://github.com/8gears/n8n-helm-chart/issues/39#issuecomment-1579991754 for the full explanation.
# Webhook processes rely on Redis too.
webhook:
enabled: false
# additional (to main) config for webhook
config: {}
# additional (to main) config for webhook
secret: {}
# Extra environmental variables, so you can reference other configmaps and secrets into n8n as env vars.
extraEnv: {}
# WEBHOOK_URL:
# value: "http://webhook.domain.tld"
#
# Webhook Kubernetes specific settings
#
persistence:
# If true, use a Persistent Volume Claim, If false, use emptyDir
enabled: false
# what type volume, possible options are [existing, emptyDir, dynamic] dynamic for Dynamic Volume Provisioning, existing for using an existing Claim
type: emptyDir
# Persistent Volume Storage Class
# If defined, storageClassName: <storageClass>
# If set to "-", storageClassName: "", which disables dynamic provisioning
# If undefined (the default) or set to null, no storageClassName spec is
# set, choosing the default provisioner. (gp2 on AWS, standard on
# GKE, AWS & OpenStack)
#
# storageClass: "-"
# PVC annotations
#
# If you need this annotation include it under `values.yml` file and pvc.yml template will add it.
# This is not maintained at Helm v3 anymore.
# https://github.com/8gears/n8n-helm-chart/issues/8
#
# annotations:
# helm.sh/resource-policy: keep
# Persistent Volume Access Mode
#
accessModes:
- ReadWriteOnce
# Persistent Volume size
#
size: 1Gi
# Use an existing PVC
#
# existingClaim:
# Number of desired pods.
replicaCount: 1
# here you can specify the deployment strategy as Recreate or RollingUpdate with optional maxSurge and maxUnavailable
# If these options are not set, default values are 25%
# deploymentStrategy:
# type: RollingUpdate
# maxSurge: "50%"
# maxUnavailable: "50%"
deploymentStrategy:
type: "Recreate"
nameOverride: ""
fullnameOverride: ""
serviceAccount:
# Specifies whether a service account should be created
create: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
podAnnotations: {}
podLabels: {}
podSecurityContext:
runAsNonRoot: true
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
# here you can specify lifecycle hooks - it can be used e.g., to easily add packages to the container without building
# your own docker image
# see https://github.com/8gears/n8n-helm-chart/pull/30
lifecycle: {}
# here's the sample configuration to add mysql-client to the container
# lifecycle:
# postStart:
# exec:
# command: ["/bin/sh", "-c", "apk add mysql-client"]
# here you can override a command for main container
# it may be used to override a starting script (e.g., to resolve issues like https://github.com/n8n-io/n8n/issues/6412) or
# run additional preparation steps (e.g., installing additional software)
command: []
# sample configuration that overrides starting script and solves above issue (also it runs n8n as root, so be careful):
# command:
# - tini
# - --
# - /bin/sh
# - -c
# - chmod o+rx /root; chown -R node /root/.n8n || true; chown -R node /root/.n8n; ln -s /root/.n8n /home/node; chown -R node /home/node || true; node /usr/local/bin/n8n
# Command Arguments
commandArgs: []
# here you can override the livenessProbe for the main container
# it may be used to increase the timeout for the livenessProbe (e.g., to resolve issues like
livenessProbe:
httpGet:
path: /healthz
port: http
# initialDelaySeconds: 30
# periodSeconds: 10
# timeoutSeconds: 5
# failureThreshold: 6
# successThreshold: 1
# here you can override the readinessProbe for the main container
# it may be used to increase the timeout for the readinessProbe (e.g., to resolve issues like
readinessProbe:
httpGet:
path: /healthz
port: http
# initialDelaySeconds: 30
# periodSeconds: 10
# timeoutSeconds: 5
# failureThreshold: 6
# successThreshold: 1
# List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started.
# See https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
initContainers: []
service:
annotations: {}
# -- Service types allow you to specify what kind of Service you want.
# E.g., ClusterIP, NodePort, LoadBalancer, ExternalName
type: ClusterIP
# -- Service port
port: 80
resources: {}
# We usually recommend not specifying default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 100
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
nodeSelector: {}
tolerations: []
affinity: {}
#
# Additional resources
#
# Takes a list of Kubernetes resources and merges each resource with a default metadata.labels map and
# installs the result.
# Use this to add any arbitrary Kubernetes manifests alongside this chart instead of kubectl and scripts.
resources: []
# - apiVersion: v1
# kind: ConfigMap
# metadata:
# name: example-config
# data:
# example.property.1: "value1"
# example.property.2: "value2"
# As an alternative to the above, you can also use a string as the value of the data field.
# - |
# apiVersion: v1
# kind: ConfigMap
# metadata:
# name: example-config-string
# data:
# example.property.1: "value1"
# example.property.2: "value2"
# Add additional templates.
# In contrast to the resources field, these templates are not merged with the default metadata.labels map.
# The templates are rendered with the values.yaml file as the context.
templates: []
# - |
# apiVersion: v1
# kind: ConfigMap
# metadata:
# name: my-config
# stringData:
# image_name: {{ .Values.image.repository }}
# Bitnami Valkey configuration
# https://artifacthub.io/packages/helm/bitnami/valkey
redis:
enabled: false
architecture: standalone
primary:
persistence:
enabled: false
existingClaim: ""
size: 2Gi