303 lines
9.6 KiB
YAML
303 lines
9.6 KiB
YAML
# -- the initial number of nodes in the CouchDB cluster.
|
|
clusterSize: 1
|
|
|
|
# -- If allowAdminParty is enabled the cluster will start up without any database
|
|
# administrator account; i.e., all users will be granted administrative
|
|
# access. Otherwise, the system will look for a Secret called
|
|
# <ReleaseName>-couchdb containing `adminUsername`, `adminPassword` and
|
|
# `cookieAuthSecret` keys. See the `createAdminSecret` flag.
|
|
# ref: https://kubernetes.io/docs/concepts/configuration/secret/
|
|
allowAdminParty: false
|
|
|
|
# Set it to true to automatically enable the cluster after installation.
|
|
# It will create a post-install job that will send the {"action": "finish_cluster"}
|
|
# message to CouchDB to finalize the cluster and add the defaultDatabases listed.
|
|
# Note that this job needs service.enabled to be set to true and if you use adminHash,
|
|
# a valid adminPassword in the secret. Also set the --wait flag when you install to
|
|
# avoid first jobs failure (helm install --wait ...)
|
|
autoSetup:
|
|
enabled: false
|
|
image:
|
|
repository: curlimages/curl
|
|
tag: latest
|
|
pullPolicy: Always
|
|
defaultDatabases:
|
|
- _global_changes
|
|
|
|
# -- If createAdminSecret is enabled a Secret called <ReleaseName>-couchdb will
|
|
# be created containing auto-generated credentials. Users who prefer to set
|
|
# these values themselves have a couple of options:
|
|
#
|
|
# 1) The `adminUsername`, `adminPassword`, `adminHash`, and `cookieAuthSecret`
|
|
# can be defined directly in the chart's values. Note that all of a chart's
|
|
# values are currently stored in plaintext in a ConfigMap in the tiller
|
|
# namespace.
|
|
#
|
|
# 2) This flag can be disabled and a Secret with the required keys can be
|
|
# created ahead of time.
|
|
createAdminSecret: true
|
|
|
|
adminUsername: admin
|
|
adminPassword: sotiluOC2024
|
|
# adminHash: -pbkdf2-this_is_not_necessarily_secure_either
|
|
# cookieAuthSecret: neither_is_this
|
|
|
|
## When enabled, will deploy a networkpolicy that allows CouchDB pods to
|
|
## communicate with each other for clustering and ingress on port 5984
|
|
networkPolicy:
|
|
enabled: true
|
|
|
|
## Use an alternate scheduler, e.g. "stork".
|
|
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
|
|
##
|
|
# schedulerName:
|
|
|
|
# Use a service account
|
|
serviceAccount:
|
|
enabled: true
|
|
create: true
|
|
# name:
|
|
|
|
# imagePullSecrets:
|
|
# - name: myimagepullsecret
|
|
|
|
# -- The storage volume used by each Pod in the StatefulSet. If a
|
|
# persistentVolume is not enabled, the Pods will use `emptyDir` ephemeral
|
|
# local storage. Setting the storageClass attribute to "-" disables dynamic
|
|
# provisioning of Persistent Volumes; leaving it unset will invoke the default
|
|
# provisioner.
|
|
persistentVolume:
|
|
enabled: true
|
|
# NOTE: the number of existing claims must match the cluster size
|
|
existingClaims: []
|
|
annotations: {}
|
|
accessModes:
|
|
- ReadWriteOnce
|
|
size: 5Gi
|
|
storageClass: "local-tgz1"
|
|
|
|
# Experimental - FEATURE STATE: Kubernetes v1.27 [beta]
|
|
# Field controls if and how PVCs are deleted during the lifecycle
|
|
# of a StatefulSet
|
|
# ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention
|
|
persistentVolumeClaimRetentionPolicy:
|
|
enabled: false
|
|
whenScaled: Retain
|
|
whenDeleted: Retain
|
|
|
|
## The CouchDB image
|
|
image:
|
|
repository: couchdb
|
|
tag: 3.3.3
|
|
pullPolicy: IfNotPresent
|
|
|
|
## Experimental integration with Lucene-powered fulltext search
|
|
searchImage:
|
|
repository: kocolosk/couchdb-search
|
|
tag: 0.2.0
|
|
pullPolicy: IfNotPresent
|
|
|
|
# -- Flip this to flag to include the Search container in each Pod
|
|
enableSearch: false
|
|
|
|
initImage:
|
|
repository: busybox
|
|
tag: latest
|
|
pullPolicy: Always
|
|
|
|
## CouchDB is happy to spin up cluster nodes in parallel, but if you encounter
|
|
## problems you can try setting podManagementPolicy to the StatefulSet default
|
|
## `OrderedReady`
|
|
podManagementPolicy: Parallel
|
|
|
|
## To better tolerate Node failures, we can prevent Kubernetes scheduler from
|
|
## assigning more than one Pod of CouchDB StatefulSet per Node using podAntiAffinity.
|
|
nodeSelector:
|
|
kubernetes.io/hostname: tencent-gz1
|
|
affinity: {}
|
|
# podAntiAffinity:
|
|
# requiredDuringSchedulingIgnoredDuringExecution:
|
|
# - labelSelector:
|
|
# matchExpressions:
|
|
# - key: "app"
|
|
# operator: In
|
|
# values:
|
|
# - couchdb
|
|
# topologyKey: "kubernetes.io/hostname"
|
|
|
|
## To control how Pods are spread across your cluster among failure-domains such as regions,
|
|
## zones, nodes, and other user-defined topology domains use topologySpreadConstraints.
|
|
topologySpreadConstraints: {}
|
|
# topologySpreadConstraints:
|
|
# - maxSkew: 1
|
|
# topologyKey: "topology.kubernetes.io/zone"
|
|
# whenUnsatisfiable: ScheduleAnyway
|
|
# labelSelector:
|
|
# matchLabels:
|
|
# app: couchdb
|
|
|
|
## Optional pod labels
|
|
labels: {}
|
|
|
|
## Optional pod annotations
|
|
annotations: {}
|
|
|
|
## Optional tolerations
|
|
tolerations: []
|
|
|
|
## A StatefulSet requires a headless Service to establish the stable network
|
|
## identities of the Pods, and that Service is created automatically by this
|
|
## chart without any additional configuration. The Service block below refers
|
|
## to a second Service that governs how clients connect to the CouchDB cluster.
|
|
service:
|
|
annotations: {}
|
|
enabled: true
|
|
type: ClusterIP
|
|
externalPort: 5984
|
|
targetPort: 5984
|
|
labels: {}
|
|
extraPorts: []
|
|
# - name: sqs
|
|
# port: 4984
|
|
# targetPort: 4984
|
|
# protocol: TCP
|
|
|
|
## If you need to expose any additional ports on the CouchDB container, for example
|
|
## if you're running CouchDB container with additional processes that need to
|
|
## be accessible outside of the pod, you can define them here.
|
|
extraPorts: []
|
|
# - name: sqs
|
|
# containerPort: 4984
|
|
|
|
## An Ingress resource can provide name-based virtual hosting and TLS
|
|
## termination among other things for CouchDB deployments which are accessed
|
|
## from outside the Kubernetes cluster.
|
|
## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/
|
|
ingress:
|
|
enabled: true
|
|
className: nginx
|
|
hosts:
|
|
- ocdb.frytea.com
|
|
path: /
|
|
annotations:
|
|
cert-manager.io/cluster-issuer: "dnspod-cluster-issuer"
|
|
# kubernetes.io/ingress.class: nginx
|
|
# kubernetes.io/tls-acme: "true"
|
|
tls:
|
|
# Secrets must be manually created in the namespace.
|
|
- secretName: ocdb-frytea-com-tls
|
|
hosts:
|
|
- ocdb.frytea.com
|
|
|
|
## Optional resource requests and limits for the CouchDB container
|
|
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
|
|
resources: {}
|
|
# requests:
|
|
# cpu: 100m
|
|
# memory: 128Mi
|
|
# limits:
|
|
# cpu: 56
|
|
# memory: 256Gi
|
|
|
|
## Optional resource requests and limits for the CouchDB init container
|
|
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
|
|
initResources: {}
|
|
# requests:
|
|
# cpu: 100m
|
|
# memory: 128Mi
|
|
# limits:
|
|
# cpu: 500m
|
|
# memory: 128Mi
|
|
|
|
# -- erlangFlags is a map that is passed to the Erlang VM as flags using the
|
|
# ERL_FLAGS env. The `name` flag is required to establish connectivity
|
|
# between cluster nodes.
|
|
# ref: http://erlang.org/doc/man/erl.html#init_flags
|
|
erlangFlags:
|
|
name: couchdb
|
|
# Older versions of the official CouchDB image (anything prior to 3.2.1)
|
|
# do not act on the COUCHDB_ERLANG_COOKIE environment variable, so if you
|
|
# want to cluster these deployments it's necessary to pass in a cookie here
|
|
# setcookie: make-something-up
|
|
|
|
# -- couchdbConfig will override default CouchDB configuration settings.
|
|
# The contents of this map are reformatted into a .ini file laid down
|
|
# by a ConfigMap object.
|
|
# ref: http://docs.couchdb.org/en/latest/config/index.html
|
|
couchdbConfig:
|
|
couchdb:
|
|
uuid: 3412fa1da6497409260d596e6301de91 # Unique identifier for this CouchDB server instance
|
|
# cluster:
|
|
# q: 8 # Create 8 shards for each database
|
|
chttpd:
|
|
bind_address: any
|
|
# chttpd.require_valid_user disables all the anonymous requests to the port
|
|
# 5984 when is set to true.
|
|
require_valid_user: true
|
|
max_document_size: 50000000
|
|
# required to use Fauxton if chttpd.require_valid_user is set to true
|
|
httpd:
|
|
WWW-Authenticate: "Basic realm=\"administrator\""
|
|
enable_cors: true
|
|
|
|
# Kubernetes local cluster domain.
|
|
# This is used to generate FQDNs for peers when joining the CouchDB cluster.
|
|
dns:
|
|
clusterDomainSuffix: cluster.local
|
|
|
|
## Configure liveness and readiness probe values
|
|
## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
|
|
livenessProbe:
|
|
enabled: true
|
|
failureThreshold: 3
|
|
initialDelaySeconds: 0
|
|
periodSeconds: 10
|
|
successThreshold: 1
|
|
timeoutSeconds: 1
|
|
readinessProbe:
|
|
enabled: true
|
|
failureThreshold: 3
|
|
initialDelaySeconds: 0
|
|
periodSeconds: 10
|
|
successThreshold: 1
|
|
timeoutSeconds: 1
|
|
|
|
# Control an optional pod disruption budget
|
|
podDisruptionBudget:
|
|
# toggle creation of pod disruption budget, disabled by default
|
|
enabled: false
|
|
# minAvailable: 1
|
|
maxUnavailable: 1
|
|
|
|
# CouchDB 3.2.0 adds in a metrics endpoint on the path `/_node/_local/_prometheus`.
|
|
# Optionally, a standalone, unauthenticated port can be exposed for these metrics.
|
|
prometheusPort:
|
|
enabled: false
|
|
bind_address: "0.0.0.0"
|
|
port: 17986
|
|
|
|
# Configure arbitrary sidecar containers for CouchDB pods created by the
|
|
# StatefulSet
|
|
sidecars: {}
|
|
# - name: foo
|
|
# image: "busybox"
|
|
# imagePullPolicy: IfNotPresent
|
|
# resources:
|
|
# requests:
|
|
# cpu: "0.1"
|
|
# memory: 10Mi
|
|
# command: ['echo "foo";']
|
|
# volumeMounts:
|
|
# - name: database-storage
|
|
# mountPath: /opt/couchdb/data/
|
|
|
|
# Placement manager to annotate each document in the nodes DB with "zone" attribute
|
|
# recording the zone where node has been scheduled
|
|
# Ref: https://docs.couchdb.org/en/stable/cluster/sharding.html#specifying-database-placement
|
|
placementConfig:
|
|
enabled: false
|
|
image:
|
|
repository: caligrafix/couchdb-autoscaler-placement-manager
|
|
tag: 0.1.0
|
|
|