chart_name
stringlengths
3
30
templates
list
values
stringlengths
104
39.6k
gce-ingress
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"gce-ingress.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because som...
# Default values for gce-ingress. # This is a YAML-formatted file. # Declare variables to be passed into your templates. nameOverride: "" fullnameOverride: "" rbac: # Specifies whether RBAC resources should be created create: true serviceAccount: # Specifies whether a ServiceAccount should be created create: true # The name of the ServiceAccount to use. # If not set and create is true, a name is generated using the fullname template name: # gce-ingress needs credentials to log into GCE. Create a secret with the key # of key.json with the contents of a GCE service account that has permissions to create # and modify load balancers. The key should be in the JSON format. # Example: # Your secret should look like: # apiVersion: v1 # kind: Secret # metadata: # name: gce-key # type: Opaque # data: # key.json: < base64 encoded JSON service account key> secret: ~ ## If the google auth file is saved in a different secret key you can specify it here # secretKey: key.json # gce config, replace values to match your environment config: projectID: network: subnetwork: nodeInstancePrefix: nodeTags: # tokenUrl should probably be left as nil tokenUrl: "nil" controller: replicaCount: 1 image: repository: k8s.gcr.io/ingress-gce-glbc-amd64 tag: v1.4.0 pullPolicy: IfNotPresent resources: {} # requests: # cpu: 10m # memory: 50Mi nodeSelector: {} tolerations: [] affinity: {} defaultBackend: replicaCount: 1 image: repository: k8s.gcr.io/defaultbackend tag: "1.4" pullPolicy: IfNotPresent resources: {} # limits: # cpu: 10m # memory: 20Mi # requests: # cpu: 10m # memory: 20Mi nodeSelector: {} tolerations: [] affinity: {} service: type: NodePort port: 80
prometheus-blackbox-exporter
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"prometheus-blackbox-exporter.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 ...
restartPolicy: Always kind: Deployment podDisruptionBudget: {} # maxUnavailable: 0 ## Enable pod security policy pspEnabled: true strategy: rollingUpdate: maxSurge: 1 maxUnavailable: 0 type: RollingUpdate image: repository: prom/blackbox-exporter tag: v0.16.0 pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. ## Secrets must be manually created in the namespace. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ ## # pullSecrets: # - myRegistrKeySecretName ## User to run blackbox-exporter container as runAsUser: 1000 readOnlyRootFilesystem: true runAsNonRoot: true livenessProbe: httpGet: path: /health port: http readinessProbe: httpGet: path: /health port: http nodeSelector: {} tolerations: [] affinity: {} secretConfig: false config: modules: http_2xx: prober: http timeout: 5s http: valid_http_versions: ["HTTP/1.1", "HTTP/2"] no_follow_redirects: false preferred_ip_protocol: "ip4" extraConfigmapMounts: [] # - name: certs-configmap # mountPath: /etc/secrets/ssl/ # subPath: certificates.crt # (optional) # configMap: certs-configmap # readOnly: true # defaultMode: 420 ## Additional secret mounts # Defines additional mounts with secrets. Secrets must be manually created in the namespace. extraSecretMounts: [] # - name: secret-files # mountPath: /etc/secrets # secretName: blackbox-secret-files # readOnly: true # defaultMode: 420 allowIcmp: false resources: {} # limits: # memory: 300Mi # requests: # memory: 50Mi priorityClassName: "" service: annotations: {} type: ClusterIP port: 9115 serviceAccount: # Specifies whether a ServiceAccount should be created create: true # The name of the ServiceAccount to use. # If not set and create is true, a name is generated using the fullname template name: annotations: {} ## An Ingress resource can provide name-based virtual hosting and TLS ## termination among other things for CouchDB deployments which are accessed ## from outside the Kubernetes cluster. ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/ ingress: enabled: false hosts: [] # - chart-example.local path: '/' annotations: {} # kubernetes.io/ingress.class: nginx # kubernetes.io/tls-acme: "true" tls: [] # Secrets must be manually created in the namespace. # - secretName: chart-example-tls # hosts: # - chart-example.local podAnnotations: {} extraArgs: [] # --history.limit=1000 replicas: 1 serviceMonitor: ## If true, a ServiceMonitor CRD is created for a prometheus operator ## https://github.com/coreos/prometheus-operator ## enabled: false # Default values that will be used for all ServiceMonitors created by `targets` defaults: labels: {} interval: 30s scrapeTimeout: 30s module: http_2xx targets: # - name: example # Human readable URL that will appear in Prometheus / AlertManager # url: http://example.com/healthz # The URL that blackbox will scrape # labels: {} # List of labels for ServiceMonitor. Overrides value set in `defaults` # interval: 60s # Scraping interval. Overrides value set in `defaults` # scrapeTimeout: 60s # Scrape timeout. Overrides value set in `defaults` # module: http_2xx # Module used for scraping. Overrides value set in `defaults` ## Custom PrometheusRules to be defined ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions prometheusRule: enabled: false additionalLabels: {} namespace: "" rules: [] ## Network policy for chart networkPolicy: # Enable network policy and allow access from anywhere enabled: false # Limit access only from monitoring namespace allowMonitoringNamespace: false
gangway
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"gangway.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Ku...
replicaCount: 1 image: repository: gcr.io/heptio-images/gangway tag: v3.2.0 pullPolicy: IfNotPresent ## Optional array of imagePullSecrets containing private registry credentials ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ pullSecrets: [] # - name: secretName nameOverride: "" fullnameOverride: "" # Specify a CA cert to trust for self signed certificates at the Oauth2 URLs. Be careful to indent one level beyond the # trustedCACert key: # trustedCACert: |- # -----BEGIN CERTIFICATE----- # ... # -----END CERTIFICATE----- # Add Env Variables to pod env: {} # Add annotations to the pod podAnnotations: {} gangway: # The address to listen on. Defaults to 0.0.0.0 to listen on all interfaces. # Env var: GANGWAY_HOST # host: 0.0.0.0 serviceAccountName: "" # The port to listen on. Defaults to 8080. # Env var: GANGWAY_PORT port: 8080 # Should Gangway serve TLS vs. plain HTTP? Default: false # Env var: GANGWAY_SERVE_TLS # serveTLS: false # The public cert file (including root and intermediates) to use when serving TLS. # Env var: GANGWAY_CERT_FILE # certFile: /etc/gangway/tls/tls.crt # The private key file when serving TLS. # Env var: GANGWAY_KEY_FILE # keyFile: /etc/gangway/tls/tls.key # The cluster name. Used in UI and kubectl config instructions. # Env var: GANGWAY_CLUSTER_NAME clusterName: "${GANGWAY_CLUSTER_NAME}" # OAuth2 URL to start authorization flow. # Env var: GANGWAY_AUTHORIZE_URL authorizeURL: "https://${DNS_NAME}/authorize" # OAuth2 URL to obtain access tokens. # Env var: GANGWAY_TOKEN_URL tokenURL: "https://${DNS_NAME}/oauth/token" # Endpoint that provides user profile information [optional]. Not all providers # will require this. # Env var: GANGWAY_AUDIENCE audience: "https://${DNS_NAME}/userinfo" # Used to specify the scope of the requested Oauth authorization. scopes: ["openid", "profile", "email", "offline_access"] # Where to redirect back to. This should be a URL where gangway is reachable. # Typically this also needs to be registered as part of the oauth application # with the oAuth provider. # Env var: GANGWAY_REDIRECT_URL redirectURL: "https://${GANGWAY_REDIRECT_URL}/callback" # API client ID as indicated by the identity provider # Env var: GANGWAY_CLIENT_ID clientID: "${GANGWAY_CLIENT_ID}" # API client secret as indicated by the identity provider # Env var: GANGWAY_CLIENT_SECRET clientSecret: "${GANGWAY_CLIENT_SECRET}" # Some identity providers accept an empty client secret, this # is not generally considered a good idea. If you have to use an # empty secret and accept the risks that come with that then you can # set this to true. # allowEmptyClientSecret: false # The JWT claim to use as the username. This is used in UI. # Default is "nickname". This is combined with the clusterName # for the "user" portion of the kubeconfig. # Env var: GANGWAY_USERNAME_CLAIM usernameClaim: "sub" # The API server endpoint used to configure kubectl # Env var: GANGWAY_APISERVER_URL apiServerURL: "https://${GANGWAY_APISERVER_URL}" # The path to find the CA bundle for the API server. Used to configure kubectl. # This is typically mounted into the default location for workloads running on # a Kubernetes cluster and doesn't need to be set. # Env var: GANGWAY_CLUSTER_CA_PATH # cluster_ca_path: "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" # The path gangway uses to create urls (defaults to "") # Env var: GANGWAY_HTTP_PATH # httpPath: "https://${GANGWAY_HTTP_PATH}" # The key to use when encrypting the contents of cookies. # You can leave this blank and the chart will generate a random key, however # you must use that with caution. Subsequent upgrades to the deployment will # regenerate this key which will cause Gangway to error when attempting to # decrypt cookies stored in users' browsers which were encrypted with the old # key. # TL;DR: Safe to use the auto generation in test environments, provide your # own in procution. # sessionKey: tls: {} # certData: | # -----BEGIN CERTIFICATE----- # ... # -----END CERTIFICATE----- # keyData: | # -----BEGIN ENCRYPTED PRIVATE KEY----- # ... # -----END ENCRYPTED PRIVATE KEY----- # Name of an existing secret containing `tls.cert` and `tls.key`. # Mounted on the default tls path `/etc/gangway/tls` # existingSecret: "" extraVolumes: [] extraVolumeMounts: [] livenessProbe: # HTTP or HTTPS scheme: HTTP readinessProbe: # HTTP or HTTPS scheme: HTTP service: type: ClusterIP port: 80 # Specifies a loadBalancerIP when using LoadBalancer service type # loadBalancerIP: 192.168.0.51 annotations: {} ingress: enabled: false annotations: {} # kubernetes.io/ingress.class: nginx # kubernetes.io/tls-acme: "true" path: / hosts: - chart-example.local tls: [] # - secretName: chart-example-tls # hosts: # - chart-example.local resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. # limits: # cpu: 100m # memory: 128Mi # requests: # cpu: 100m # memory: 128Mi nodeSelector: {} tolerations: [] affinity: {}
kubernetes-dashboard
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"kubernetes-dashboard.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars be...
# Default values for kubernetes-dashboard # This is a YAML-formatted file. # Declare name/value pairs to be passed into your templates. # name: value image: repository: k8s.gcr.io/kubernetes-dashboard-amd64 tag: v1.10.1 pullPolicy: IfNotPresent pullSecrets: [] replicaCount: 1 ## Here annotations can be added to the kubernetes dashboard deployment annotations: {} ## Here labels can be added to the kubernetes dashboard deployment ## labels: {} # kubernetes.io/name: "Kubernetes Dashboard" ## Enable possibility to skip login enableSkipLogin: false ## Serve application over HTTP without TLS enableInsecureLogin: false ## Additional container arguments ## # extraArgs: # - --enable-skip-login # - --enable-insecure-login # - --system-banner="Welcome to Kubernetes" ## Additional container environment variables ## extraEnv: [] # - name: SOME_VAR # value: 'some value' # Annotations to be added to kubernetes dashboard pods ## Recommended value # podAnnotations: # seccomp.security.alpha.kubernetes.io/pod: 'runtime/default' podAnnotations: {} # Add custom labels to pods podLabels: {} ## SecurityContext for the kubernetes dashboard container ## Recommended values # dashboardContainerSecurityContext: # allowPrivilegeEscalation: false # readOnlyRootFilesystem: true ## The two values below can be set here or at podLevel (using variable .securityContext) # runAsUser: 1001 # runAsGroup: 2001 dashboardContainerSecurityContext: {} ## Node labels for pod assignment ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ ## nodeSelector: {} ## List of node taints to tolerate (requires Kubernetes >= 1.6) tolerations: [] # - key: "key" # operator: "Equal|Exists" # value: "value" # effect: "NoSchedule|PreferNoSchedule|NoExecute" ## Affinity ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity affinity: {} # priorityClassName: "" service: type: ClusterIP externalPort: 443 ## This allows an override of the heapster service name ## Default: {{ .Chart.Name }} ## # nameOverride: # LoadBalancerSourcesRange is a list of allowed CIDR values, which are combined with ServicePort to # set allowed inbound rules on the security group assigned to the master load balancer # loadBalancerSourceRanges: [] ## Kubernetes Dashboard Service annotations ## ## For GCE ingress, the following annotation is required: ## service.alpha.kubernetes.io/app-protocols: '{"https":"HTTPS"}' if enableInsecureLogin=false ## or ## service.alpha.kubernetes.io/app-protocols: '{"http":"HTTP"}' if enableInsecureLogin=true annotations: {} ## Here labels can be added to the Kubernetes Dashboard service ## labels: {} # kubernetes.io/name: "Kubernetes Dashboard" resources: limits: cpu: 100m memory: 100Mi requests: cpu: 100m memory: 100Mi ingress: ## If true, Kubernetes Dashboard Ingress will be created. ## enabled: false ## Kubernetes Dashboard Ingress annotations ## ## Add custom labels # labels: # key: value # annotations: # kubernetes.io/ingress.class: nginx # kubernetes.io/tls-acme: 'true' ## If you plan to use TLS backend with enableInsecureLogin set to false ## (default), you need to uncomment the below. ## If you use ingress-nginx < 0.21.0 # nginx.ingress.kubernetes.io/secure-backends: "true" ## if you use ingress-nginx >= 0.21.0 # nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" ## Kubernetes Dashboard Ingress paths ## paths: - / # - /* ## Kubernetes Dashboard Ingress hostnames ## Must be provided if Ingress is enabled ## # hosts: # - kubernetes-dashboard.domain.com ## Kubernetes Dashboard Ingress TLS configuration ## Secrets must be manually created in the namespace ## # tls: # - secretName: kubernetes-dashboard-tls # hosts: # - kubernetes-dashboard.domain.com rbac: # Specifies whether RBAC resources should be created create: true # Specifies whether cluster-admin ClusterRole will be used for dashboard # ServiceAccount (NOT RECOMMENDED). clusterAdminRole: false # Start in ReadOnly mode. # Only dashboard-related Secrets and ConfigMaps will still be available for writing. # # Turn OFF clusterAdminRole to use clusterReadOnlyRole. # # The basic idea of the clusterReadOnlyRole comparing to the clusterAdminRole # is not to hide all the secrets and sensitive data but more # to avoid accidental changes in the cluster outside the standard CI/CD. # # Same as for clusterAdminRole, it is NOT RECOMMENDED to use this version in production. # Instead you should review the role and remove all potentially sensitive parts such as # access to persistentvolumes, pods/log etc. clusterReadOnlyRole: false serviceAccount: # Specifies whether a service account should be created create: true # The name of the service account to use. # If not set and create is true, a name is generated using the fullname template name: livenessProbe: # Number of seconds to wait before sending first probe initialDelaySeconds: 30 # Number of seconds to wait for probe response timeoutSeconds: 30 podDisruptionBudget: # https://kubernetes.io/docs/tasks/run-application/configure-pdb/ enabled: false minAvailable: maxUnavailable: ## PodSecurityContext for pod level securityContext ## # securityContext: # runAsUser: 1001 # runAsGroup: 2001 securityContext: {} networkPolicy: false
nginx-lego
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"nginx-lego.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 24 -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 24 chars because some Kubernetes name fi...
## nginx-lego spins up a scalable ingress provider that can also provision SSL certs ## See https://github.com/jetstack/kube-lego/tree/master/examples/nginx for more information on implementation ## Nginx configuration ## ref: https://github.com/kubernetes/contrib/tree/master/ingress/controllers/nginx#automated-certificate-management-with-kube-lego ## nginx: replicaCount: 1 image: repository: k8s.gcr.io/nginx-ingress-controller tag: "0.8.3" pullPolicy: IfNotPresent service: type: LoadBalancer monitoring: false resources: limits: cpu: 1 memory: 2Gi requests: cpu: 1 memory: 128Mi configmap: proxy_connect_timeout: "30" proxy_read_timeout: "600" proxy_send_imeout: "600" hsts_include_subdomains: "false" body_size: "64m" server_name_hash_bucket_size: "256" # TODO: figure out how to expose `{nginx_addr}:8080/nginx_status`, on existing service or create new one? enable_vts_status: "false" ## Default Backend configuration ## To run a different 404 page for the managed domains please see the documentation below ## ref: https://github.com/kubernetes/contrib/tree/master/404-server ## default: replicaCount: 1 image: repository: k8s.gcr.io/defaultbackend tag: "1.0" pullPolicy: IfNotPresent resources: limits: cpu: 1 memory: 2Gi requests: cpu: 1 memory: 128Mi ## kube-lego configuration ## ref: https://github.com/jetstack/kube-lego ## lego: enabled: false replicaCount: 1 image: repository: jetstack/kube-lego tag: "0.1.3" pullPolicy: IfNotPresent configmap: email: "my@email.tld" # Production Let's Encrypt server # url: "https://acme-v01.api.letsencrypt.org/directory" # Test Let's Encrypt server url: "https://acme-staging.api.letsencrypt.org/directory " resources: limits: cpu: 1 memory: 2Gi requests: cpu: 1 memory: 128Mi
wavefront
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"wavefront.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"wav...
## Default values for Wavefront ## This is a unique name for the cluster ## All metrics will receive a `cluster` tag with this value ## Required clusterName: KUBERNETES_CLUSTER_NAME ## Wavefront URL (cluster) and API Token ## Required wavefront: url: https://YOUR_CLUSTER.wavefront.com token: YOUR_API_TOKEN ## Wavefront Collector is responsible to get all Kubernetes metrics from your cluster. ## It will capture Kubernetes resources metrics available from the kubelets, ## as well as auto-discovery capabilities. collector: enabled: true image: repository: wavefronthq/wavefront-kubernetes-collector tag: 1.0.3 pullPolicy: IfNotPresent ## If set to true, DaemonSet will be used for the collector. ## If set to false, Deployment will be used for the collector. ## Setting this to true is strongly recommended useDaemonset: true ## max number of CPUs that can be used simultaneously. Less than 1 for default (number of cores) # maxProcs: 0 ## log level one of: info, debug, or trace. (default info) # logLevel: info ## The resolution at which the collector will retain metrics. (default 60s) # interval: 60s ## How often collected data is flushed (default 10s) # flushInterval: 10s ## Timeout for exporting data (default 20s) # sinkDelay: 20s ## If set to true, will use the unauthenticated real only port for the kubelet ## If set to false, will use the encrypted full access port for the kubelet (default false) # useReadOnlyPort: false ## If set to true, metrics will be sent to Wavefront via a Wavefront Proxy. ## When true you must either specify a value for `collector.proxyAddress` or set `proxy.enabled` to true ## If set to false, metrics will be sent to Wavefront via the Direct Ingestion API useProxy: true ## Can be used to specify a specific address for the Wavefront Proxy ## The proxy can be anywhere network reachable including outside of the cluster ## Required if `collector.useProxy` is true and `proxy.enabled` is false # proxyAddress: wavefront-proxy:2878 ## If set to true Kubernetes API Server will also be scraped for metrics (default false) # apiServerMetrics: false ## Map of tags to apply to all metrics collected by the collector (default empty) # tags: ## sample tags to include (env, region) # env: production # region: us-west-2 ## Rules based discovery configuration ## Ref: https://github.com/wavefrontHQ/wavefront-kubernetes-collector/blob/master/docs/discovery.md discovery: enabled: true ## When specified, this replaces `prometheus.io` as the prefix for annotations used to ## auto-discover Prometheus endpoints # annotationPrefix: "wavefront.com" ## Can be used to add additional discovery rules # config: ## auto-discover a sample prometheus application # - name: prom-example # type: prometheus # selectors: # labels: # k8s-app: # - prom-example # port: 8080 # path: /metrics # prefix: kube.prom-example. # tags: # alt_name: sample-app ## auto-discover mongodb pods (replace USER:PASSWORD) # - name: mongodb # type: telegraf/mongodb # selectors: # images: # - '*mongodb:*' # port: 27017 # conf: | # servers = ["mongodb://USER:PASSWORD${host}:${port}"] # gather_perdb_stats = true # filters: # metricBlacklist: # - 'mongodb.member.status' # - 'mongodb.state' # - 'mongodb.db.stats.type' ## auto-discover rabbitmq pods (replace USER and PASSWORD) # - name: rabbitmq # type: telegraf/rabbitmq # selectors: # images: # - '*rabbitmq:*' # port: 15672 # conf: | # url = "http://${host}:${port}" # username = "USER" # password = "PASSWORD" ## Wavefront Collector resource requests and limits ## Make sure to keep requests and limits equal to keep the pods in the Guaranteed QoS class ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ resources: requests: cpu: 200m memory: 256Mi limits: cpu: 200m memory: 256Mi ## Wavefront Proxy is a metrics forwarder that is used to relay metrics to the Wavefront SaaS service. ## It can receive metrics from the Wavefront Collector as well as other metrics collection services ## within your cluster. The proxy also supports preprocessor rules to allow you to further filter ## and enhance your metric names, and tags. Should network connectivity fall between the proxy and ## Wavefront SaaS service, the proxy will buffer metrics, which will be flushed when connectivity resumes. ## Ref: https://docs.wavefront.com/proxies.html proxy: enabled: true image: repository: wavefronthq/proxy tag: 5.7 pullPolicy: IfNotPresent ## The port number the proxy will listen on for metrics in Wavefront data format. ## This is usually 2878 port: 2878 ## The port nubmer the proxy will listen on for tracing spans in Wavefront trace data format. ## This is usually 30000 # tracePort: 30000 ## The port nubmer the proxy will listen on for tracing spans in Jaeger data format. ## This is usually 30001 # jaegerPort: 30001 ## The port nubmer the proxy will listen on for tracing spans in Zipkin data format. ## This is usually 9411 # zipkinPort: 9411 ## Sampling rate to apply to tracing spans sent to the proxy. ## This rate is applied to all data formats the proxy is listening on. ## Value should be between 0.0 and 1.0. Default is 1.0 # traceSamplingRate: 0.25 ## When this is set to a value greater than 0, ## spans that are greater than or equal to this value will be sampled. # traceSamplingDuration: 500 ## Any configuration property can be passed to the proxy via command line args in ## in the format: `--<property_name> <value>`. Multiple properties can be specified ## separated by whitespace. ## Ref: https://docs.wavefront.com/proxies_configuring.html # args: ## Proxy is a Java application. By default Java will consume upto 4G of heap memory. ## This can be used to override the default. Uses the `-Xmx` command line option for java # heap: 1024m ## Preprocessor rules is a powerful way to apply filtering or to enhance metrics as they flow ## through the proxy. You can configure the rules here. By default a rule to drop Kubernetes ## generated labels is applied to remove unecessary and often noisy tags. ## Ref: https://docs.wavefront.com/proxies_preprocessor_rules.html # preprocessor: # rules.yaml: | # '2878': # # fix %2F to be a / instead. May be required on EKS. # - rule : fix-forward-slash # action : replaceRegex # scope : pointLine # search : "%2F" # replace : "/" # # replace bad characters ("&", "$", "!", "@") with underscores in the entire point line string # - rule : replace-badchars # action : replaceRegex # scope : pointLine # search : "[&\\$!@]" # replace : "_" ## Specifies whether RBAC resources should be created rbac: create: true ## Specifies whether a ServiceAccount should be created serviceAccount: create: true ## The name of the ServiceAccount to use. ## If not set and create is true, a name is generated using the fullname template name: ## kube-state-metrics are used to get metrics about the state of the Kubernetes scheduler ## If enabled the kube-state-metrics chart will be installed as a subchart and the collector ## will be configured to capture metrics. kubeStateMetrics: enabled: true
ghost
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"ghost.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kube...
## Global Docker image parameters ## Please, note that this will override the image parameters, including dependencies, configured to use the global value ## Current available global Docker image parameters: imageRegistry and imagePullSecrets ## # global: # imageRegistry: myRegistryName # imagePullSecrets: # - myRegistryKeySecretName # storageClass: myStorageClass ## Bitnami Ghost image version ## ref: https://hub.docker.com/r/bitnami/ghost/tags/ ## image: registry: docker.io repository: bitnami/ghost tag: 3.9.0-debian-10-r0 ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images ## pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. ## Secrets must be manually created in the namespace. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ ## # pullSecrets: # - myRegistryKeySecretName ## String to partially override ghost.fullname template (will maintain the release name) ## # nameOverride: ## String to fully override ghost.fullname template ## # fullnameOverride: ## Init containers parameters: ## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup ## volumePermissions: image: registry: docker.io repository: bitnami/minideb tag: buster pullPolicy: Always ## Optionally specify an array of imagePullSecrets. ## Secrets must be manually created in the namespace. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ ## # pullSecrets: # - myRegistryKeySecretName ## Ghost protocol, host, port and path to create application URLs ## ref: https://github.com/bitnami/bitnami-docker-ghost#configuration ## ghostProtocol: http # ghostHost: # ghostPort: ghostPath: / ## User of the application ## ref: https://github.com/bitnami/bitnami-docker-ghost#configuration ## ghostUsername: user@example.com ## Application password ## Defaults to a random 10-character alphanumeric string if not set ## ref: https://github.com/bitnami/bitnami-docker-ghost#configuration ## # ghostPassword: ## Admin email ## ref: https://github.com/bitnami/bitnami-docker-ghost#configuration ## ghostEmail: user@example.com ## Ghost Blog name ## ref: https://github.com/bitnami/bitnami-docker-ghost#environment-variables ## ghostBlogTitle: User's Blog ## Set to `yes` to allow the container to be started with blank passwords ## ref: https://github.com/bitnami/bitnami-docker-wordpress#environment-variables allowEmptyPassword: "yes" ## SMTP mail delivery configuration ## ref: https://github.com/bitnami/bitnami-docker-redmine/#smtp-configuration ## # smtpHost: # smtpPort: # smtpUser: # smtpPassword: # smtpFromAddress # smtpService: ## Configure extra options for liveness and readiness probes ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) ## livenessProbe: enabled: true initialDelaySeconds: 120 periodSeconds: 10 timeoutSeconds: 5 failureThreshold: 6 successThreshold: 1 readinessProbe: enabled: true initialDelaySeconds: 30 periodSeconds: 5 timeoutSeconds: 3 failureThreshold: 6 successThreshold: 1 ## ## External database configuration ## externalDatabase: ## All of these values are only used when mariadb.enabled is set to false ## Database host host: localhost ## non-root Username for Wordpress Database user: bn_ghost ## Database password password: "" ## Database name database: bitnami_ghost ## Database port number port: 3306 ## ## MariaDB chart configuration ## ## https://github.com/helm/charts/blob/master/stable/mariadb/values.yaml ## mariadb: ## Whether to deploy a mariadb server to satisfy the applications database requirements. To use an external database set this to false and configure the externalDatabase parameters enabled: true ## Disable MariaDB replication replication: enabled: false ## Create a database and a database user ## ref: https://github.com/bitnami/bitnami-docker-mariadb/blob/master/README.md#creating-a-database-user-on-first-run ## db: name: bitnami_ghost user: bn_ghost ## If the password is not specified, mariadb will generates a random password ## # password: ## MariaDB admin password ## ref: https://github.com/bitnami/bitnami-docker-mariadb/blob/master/README.md#setting-the-root-password-on-first-run ## # rootUser: # password: ## Enable persistence using Persistent Volume Claims ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ ## master: persistence: enabled: true ## mariadb data Persistent Volume Storage Class ## If defined, storageClassName: <storageClass> ## If set to "-", storageClassName: "", which disables dynamic provisioning ## If undefined (the default) or set to null, no storageClassName spec is ## set, choosing the default provisioner. (gp2 on AWS, standard on ## GKE, AWS & OpenStack) ## # storageClass: "-" accessMode: ReadWriteOnce size: 8Gi ## Kubernetes configuration ## For minikube, set this to NodePort, elsewhere use LoadBalancer ## service: type: LoadBalancer # HTTP Port port: 80 ## loadBalancerIP: ## ## nodePorts: ## http: <to set explicitly, choose port between 30000-32767> nodePorts: http: "" ## Enable client source IP preservation ## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip ## externalTrafficPolicy: Cluster ## Service annotations done as key:value pairs annotations: ## Pod Security Context ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ ## securityContext: enabled: true fsGroup: 1001 runAsUser: 1001 ## Enable persistence using Persistent Volume Claims ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ ## persistence: enabled: true ## ghost data Persistent Volume Storage Class ## If defined, storageClassName: <storageClass> ## If set to "-", storageClassName: "", which disables dynamic provisioning ## If undefined (the default) or set to null, no storageClassName spec is ## set, choosing the default provisioner. (gp2 on AWS, standard on ## GKE, AWS & OpenStack) ## # storageClass: "-" accessMode: ReadWriteOnce size: 8Gi path: /bitnami ## Configure resource requests and limits ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ ## resources: requests: memory: 512Mi cpu: 300m ## Configure the ingress resource that allows you to access the ## Ghost installation. Set up the URL ## ref: http://kubernetes.io/docs/user-guide/ingress/ ## ingress: ## Set to true to enable ingress record generation enabled: false ## Set this to true in order to add the corresponding annotations for cert-manager certManager: false ## Ingress annotations done as key:value pairs ## For a full list of possible ingress annotations, please see ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md ## ## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set ## If certManager is set to true, annotation kubernetes.io/tls-acme: "true" will automatically be set annotations: # kubernetes.io/ingress.class: nginx ## The list of hostnames to be covered with this ingress record. ## Most likely this will be just one host, but in the event more hosts are needed, this is an array hosts: - name: ghost.local path: / ## Set this to true in order to enable TLS on the ingress record tls: false ## Optionally specify the TLS hosts for the ingress record ## Useful when the Ingress controller supports www-redirection ## If not specified, the above host name will be used # tlsHosts: # - www.ghost.local # - ghost.local ## If TLS is set to true, you must declare what secret will store the key/certificate for TLS tlsSecret: ghost.local-tls secrets: ## If you're providing your own certificates, please use this to add the certificates as secrets ## key and certificate should start with -----BEGIN CERTIFICATE----- or ## -----BEGIN RSA PRIVATE KEY----- ## ## name should line up with a tlsSecret set further up ## If you're using cert-manager, this is unneeded, as it will create the secret for you if it is not set ## ## It is also possible to create and manage the certificates outside of this helm chart ## Please see README.md for more information # - name: ghost.local-tls # key: # certificate: ## Node selector for pod assignment ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector ## nodeSelector: {} ## Affinity for pod assignment ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity ## affinity: {}
fluentd
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"fluentd.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Ku...
# Default values for fluentd. # This is a YAML-formatted file. # Declare variables to be passed into your templates. replicaCount: 1 image: repository: gcr.io/google-containers/fluentd-elasticsearch tag: v2.4.0 pullPolicy: IfNotPresent # pullSecrets: # - secret1 # - secret2 output: host: elasticsearch-client.default.svc.cluster.local port: 9200 scheme: http sslVersion: TLSv1 buffer_chunk_limit: 2M buffer_queue_limit: 8 env: {} # Extra Environment Values - allows yaml definitions extraEnvVars: # - name: VALUE_FROM_SECRET # valueFrom: # secretKeyRef: # name: secret_name # key: secret_key # extraVolumes: # - name: es-certs # secret: # defaultMode: 420 # secretName: es-certs # extraVolumeMounts: # - name: es-certs # mountPath: /certs # readOnly: true plugins: enabled: false pluginsList: [] service: annotations: {} type: ClusterIP # loadBalancerIP: # type: NodePort # nodePort: # Used to create Service records ports: - name: "monitor-agent" protocol: TCP containerPort: 24220 metrics: enabled: false service: port: 24231 serviceMonitor: enabled: false additionalLabels: {} # namespace: monitoring # interval: 30s # scrapeTimeout: 10s annotations: {} # prometheus.io/scrape: "true" # prometheus.io/port: "24231" # Pod Labels deployment: labels: {} ingress: enabled: false annotations: kubernetes.io/ingress.class: nginx # kubernetes.io/tls-acme: "true" # # Depending on which version of ingress controller you may need to configure properly - https://kubernetes.github.io/ingress-nginx/examples/rewrite/#rewrite-target # nginx.ingress.kubernetes.io/rewrite-target: / labels: [] # If doing TCP or UDP ingress rule don't forget to update your Ingress Controller to accept TCP connections - https://kubernetes.github.io/ingress-nginx/user-guide/exposing-tcp-udp-services/ hosts: # - name: "http-input.local" # protocol: TCP # servicePort: 9880 # path: / tls: {} # Secrets must be manually created in the namespace. # - secretName: http-input-tls # hosts: # - http-input.local configMaps: general.conf: | # Prevent fluentd from handling records containing its own logs. Otherwise # it can lead to an infinite loop, when error in sending one message generates # another message which also fails to be sent and so on. <match fluentd.**> @type null </match> # Used for health checking <source> @type http port 9880 bind 0.0.0.0 </source> # Emits internal metrics to every minute, and also exposes them on port # 24220. Useful for determining if an output plugin is retryring/erroring, # or determining the buffer queue length. <source> @type monitor_agent bind 0.0.0.0 port 24220 tag fluentd.monitor.metrics </source> system.conf: |- <system> root_dir /tmp/fluentd-buffers/ </system> forward-input.conf: | <source> @type forward port 24224 bind 0.0.0.0 </source> output.conf: | <match **> @id elasticsearch @type elasticsearch @log_level info include_tag_key true # Replace with the host/port to your Elasticsearch cluster. host "#{ENV['OUTPUT_HOST']}" port "#{ENV['OUTPUT_PORT']}" scheme "#{ENV['OUTPUT_SCHEME']}" ssl_version "#{ENV['OUTPUT_SSL_VERSION']}" logstash_format true <buffer> @type file path /var/log/fluentd-buffers/kubernetes.system.buffer flush_mode interval retry_type exponential_backoff flush_thread_count 2 flush_interval 5s retry_forever retry_max_interval 30 chunk_limit_size "#{ENV['OUTPUT_BUFFER_CHUNK_LIMIT']}" queue_limit_length "#{ENV['OUTPUT_BUFFER_QUEUE_LIMIT']}" overflow_action block </buffer> </match> resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. # limits: # cpu: 500m # memory: 200Mi # requests: # cpu: 500m # memory: 200Mi rbac: # Specifies whether RBAC resources should be created create: true serviceAccount: # Specifies whether a ServiceAccount should be created create: true # The name of the ServiceAccount to use. # If not set and create is true, a name is generated using the fullname template name: ## Persist data to a persistent volume persistence: enabled: false ## If defined, storageClassName: <storageClass> ## If set to "-", storageClassName: "", which disables dynamic provisioning ## If undefined (the default) or set to null, no storageClassName spec is ## set, choosing the default provisioner. (gp2 on AWS, standard on ## GKE, AWS & OpenStack) ## # storageClass: "-" # annotations: {} accessMode: ReadWriteOnce size: 10Gi nodeSelector: {} tolerations: [] affinity: {} # Enable autoscaling using HorizontalPodAutoscaler autoscaling: enabled: false minReplicas: 2 maxReplicas: 5 metrics: - type: Resource resource: name: cpu target: type: Utilization averageUtilization: 60 - type: Resource resource: name: memory target: type: Utilization averageUtilization: 60 # Consider to set higher value when using in conjuction with autoscaling # Full description about this field: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.15/#pod-v1-core terminationGracePeriodSeconds: 30
opa
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"opa.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubern...
# Default values for opa. # ----------------------- # # The 'opa' key embeds an OPA configuration file. See https://www.openpolicyagent.org/docs/configuration.html for more details. # Use 'opa: false' to disable the OPA configuration and rely on configmaps for policy loading. # See https://www.openpolicyagent.org/docs/latest/kubernetes-admission-control/#3-deploy-opa-on-top-of-kubernetes and the `mgmt.configmapPolicies` section below for more details. opa: services: controller: url: 'https://www.openpolicyagent.org' bundles: quickstart: service: controller resource: /bundles/helm-kubernetes-quickstart default_decision: /helm_kubernetes_quickstart/main # Setup the webhook using cert-manager certManager: enabled: false # Expose the prometheus scraping endpoint prometheus: enabled: false ## ServiceMonitor consumed by prometheus-operator serviceMonitor: ## If the operator is installed in your cluster, set to true to create a Service Monitor Entry enabled: false interval: "15s" ## Namespace in which the service monitor is created # namespace: monitoring # Added to the ServiceMonitor object so that prometheus-operator is able to discover it ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec additionalLabels: {} # Annotations in the deployment template annotations: {} # Bootstrap policies to load upon startup # Define policies in the form of: # <policyName> : |- # <regoBody> # For example, to mask the entire input body in the decision logs: # bootstrapPolicies: # log: |- # package system.log # mask["/input"] bootstrapPolicies: {} # To enforce mutating policies, change to MutatingWebhookConfiguration. admissionControllerKind: ValidatingWebhookConfiguration # To _fail closed_ on failures, change to Fail. During initial testing, we # recommend leaving the failure policy as Ignore. admissionControllerFailurePolicy: Ignore # Adds a namespace selector to the admission controller webhook admissionControllerNamespaceSelector: matchExpressions: - {key: openpolicyagent.org/webhook, operator: NotIn, values: [ignore]} # SideEffectClass for the webhook, setting to None enables dry-run admissionControllerSideEffect: Unknown # To restrict the kinds of operations and resources that are subject to OPA # policy checks, see the settings below. By default, all resources and # operations are subject to OPA policy checks. admissionControllerRules: - operations: ["*"] apiGroups: ["*"] apiVersions: ["*"] resources: ["*"] # Controls a PodDisruptionBudget for the OPA pod. Suggested use if having opa # always running for admission control is important podDisruptionBudget: enabled: false minAvailable: 1 # maxUnavailable: 1 # The helm Chart will automatically generate a CA and server certificate for # the OPA. If you want to supply your own certificates, set the field below to # false and add the PEM encoded CA certificate and server key pair below. # # WARNING: The common name name in the server certificate MUST match the # hostname of the service that exposes the OPA to the apiserver. For example. # if the service name is created in the "default" nanamespace with name "opa" # the common name MUST be set to "opa.default.svc". # # If the common name is not set correctly, the apiserver will refuse to # communicate with the OPA. generateAdmissionControllerCerts: true admissionControllerCA: "" admissionControllerCert: "" admissionControllerKey: "" authz: # Disable if you don't want authorization. # Mostly useful for debugging. enabled: true # Use hostNetwork setting on OPA pod hostNetwork: enabled: false # Docker image and tag to deploy. image: openpolicyagent/opa imageTag: 0.15.1 imagePullPolicy: IfNotPresent # One or more secrets to be used when pulling images imagePullSecrets: [] # - registrySecretName # Port to which the opa pod will bind itself # NOTE IF you use a different port make sure it maches the ones in the readinessProbe # and livenessProbe port: 443 extraArgs: [] mgmt: enabled: true image: openpolicyagent/kube-mgmt imageTag: "0.10" imagePullPolicy: IfNotPresent # NOTE insecure http port conjointly used for mgmt access and prometheus metrics export port: 8181 extraArgs: [] resources: {} data: enabled: false configmapPolicies: # NOTE IF you use these, remember to update the RBAC rules below to allow # permissions to get, list, watch, patch and update configmaps enabled: false namespaces: [opa, kube-federation-scheduling-policy] requireLabel: true replicate: # NOTE IF you use these, remember to update the RBAC rules below to allow # permissions to replicate these things cluster: [] # - [group/]version/resource namespace: [] # - [group/]version/resource path: kubernetes # Log level for OPA ('debug', 'info', 'error') (app default=info) logLevel: info # Log format for OPA ('text', 'json') (app default=text) logFormat: text # Number of OPA replicas to deploy. OPA maintains an eventually consistent # cache of policies and data. If you want high availability you can deploy two # or more replicas. replicas: 1 # To control how the OPA is scheduled on the cluster, set the affinity, # tolerations and nodeSelector values below. For example, to deploy OPA onto # the master nodes, 1 replica per node: # # affinity: # podAntiAffinity: # requiredDuringSchedulingIgnoredDuringExecution: # - labelSelector: # matchExpressions: # - key: "app" # operator: In # values: # - opa # topologyKey: "kubernetes.io/hostname" # tolerations: # - key: "node-role.kubernetes.io/master" # effect: NoSchedule # operator: Exists # nodeSelector: # kubernetes.io/role: "master" affinity: {} tolerations: [] nodeSelector: {} # To control the CPU and memory resource limits and requests for OPA, set the # field below. resources: {} rbac: # If true, create & use RBAC resources # create: true rules: cluster: [] # - apiGroups: # - "" # resources: # - namespaces # verbs: # - get # - list # - watch serviceAccount: # Specifies whether a ServiceAccount should be created create: true # The name of the ServiceAccount to use. # If not set and create is true, a name is generated using the fullname template name: # This proxy allows opa to make Kubernetes SubjectAccessReview checks against the # Kubernetes API. You can get a rego function at github.com/open-policy-agent/library sar: enabled: false image: lachlanevenson/k8s-kubectl imageTag: latest imagePullPolicy: IfNotPresent resources: {} # To control the liveness and readiness probes change the fields below. readinessProbe: httpGet: path: /health scheme: HTTPS port: 443 initialDelaySeconds: 3 periodSeconds: 5 livenessProbe: httpGet: path: /health scheme: HTTPS port: 443 initialDelaySeconds: 3 periodSeconds: 5 # Set a priorityClass using priorityClassName # priorityClassName: # Timeout for a webhook call in seconds. # Starting in kubernetes 1.14 you can set the timeout and it is # encouraged to use a small timeout for webhooks. If the webhook call times out, the request # the request is handled according to the webhook'sfailure policy. # timeoutSeconds: 20 securityContext: enabled: false runAsNonRoot: true runAsUser: 1 deploymentStrategy: {} # rollingUpdate: # maxSurge: 1 # maxUnavailable: 0 # type: RollingUpdate extraContainers: [] ## Additional containers to be added to the opa pod. # - name: example-app # image: example/example-app:latest # args: # - "run" # - "--port=11811" # - "--config=/etc/example-app-conf/config.yaml" # - "--opa-endpoint=https://localhost:443" # ports: # - name: http # containerPort: 11811 # protocol: TCP # volumeMounts: # - name: example-app-auth-config # mountPath: /etc/example-app-conf extraVolumes: [] ## Additional volumes to the opa pod. # - name: example-app-auth-config # secret: # secretName: example-app-auth-config extraPorts: [] ## Additional ports to the opa services. Useful to expose extra container ports. # - port: 11811 # protocol: TCP # name: http # targetPort: http
sematext-agent
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"sematext-agent.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because ...
agent: image: repository: sematext/agent tag: latest pullPolicy: Always service: port: 80 type: ClusterIP config: PIPELINE_CONSOLE_OUTPUT: false PIPELINE_NULL_OUTPUT: false API_SERVER_HOST: 0.0.0.0 LOGGING_WRITE_EVENTS: false LOGGING_REQUEST_TRACKING: false AUTODISCO_ALLOWED_NAMESPACES: "default" LOGGING_LEVEL: info resources: {} logagent: image: repository: sematext/logagent tag: latest pullPolicy: Always config: LOGSENE_BULK_SIZE: "1000" LOGSENE_LOG_INTERVAL: "10000" # Refer to logagent docs for custom config at https://sematext.com/docs/logagent/config-file/ customConfigs: [] # logagent.conf: |- # options: # printStats: 60 # suppress: true # geoipEnabled: true # diskBufferDir: /tmp/sematext-logagent # parser: # patternFiles: # - /etc/logagent/patterns.yml # output: # logsene: # module: elasticsearch # url: ${LOGSENE_RECEIVER_URL} resources: {} extraHostVolumeMounts: {} # - name: <mountName> # hostPath: <hostPath> # mountPath: <mountPath> priorityClassName: rbac: # Specifies whether RBAC resources should be created create: true serviceAccount: # Specifies whether a ServiceAccount should be created create: true # The name of the ServiceAccount to use. # If not set and create is true, a name is generated using the fullname template name: # represents the infra token where most metrics, packages, processes, etc. are shipped infraToken: null # determines the token for the container app (container metrics are delivered here) containerToken: null # logs token to send logs logsToken: null # for private images # imagePullSecrets: region: US # support for custom URLs serverBaseUrl: null eventsReceiverUrl: null logsReceiverUrl: null tolerations: [] # Node labels for pod assignment # Ref: https://kubernetes.io/docs/user-guide/node-selection/ nodeSelector: {}
rookout
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"rookout.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Ku...
image: repository: rookout/agent tag: 0.2.3 pullPolicy: IfNotPresent service: type: ClusterIP listenAll: True tags: '' rookout: token: '' nodeSelector: {} tolerations: [] affinity: {} replicaCount: 1
atlantis
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"atlantis.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some K...
## -------------------------- ## # Values to override for your instance. ## -------------------------- ## ## An option to override the atlantis url, ## if not using an ingress, set it to the external IP. # atlantisUrl: http://10.0.0.0 # Replace this with your own repo whitelist: orgWhitelist: <replace-me> # logLevel: "debug" # If using GitHub, specify like the following: # github: # user: foo # token: bar # secret: baz # GitHub Enterprise only: # hostname: github.your.org # (The chart will perform the base64 encoding for you for values that are stored in secrets.) # If using GitLab, specify like the following: # gitlab: # user: foo # token: bar # secret: baz # GitLab Enterprise only: # hostname: gitlab.your.org # (The chart will perform the base64 encoding for you for values that are stored in secrets.) # If using Bitbucket, specify like the following: # bitbucket: # user: foo # token: bar # Bitbucket Server only: # secret: baz # baseURL: https://bitbucket.yourorganization.com # (The chart will perform the base64 encoding for you for values that are stored in secrets.) # If managing secrets outside the chart for the webhook, use this variable to reference the secret name # vcsSecretName: 'mysecret' # When referencing Terraform modules in private repositories, it may be helpful # (necessary?) to use redirection in a .gitconfig like so: # gitconfig: | # [url "https://YOUR_GH_TOKEN@github.com"] # insteadOf = https://github.com # [url "https://YOUR_GH_TOKEN@github.com"] # insteadOf = ssh://git@github.com # [url "https://oauth2:YOUR_GITLAB_TOKEN@gitlab.com"] # insteadOf = https://gitlab.com # [url "https://oauth2:YOUR_GITLAB_TOKEN@gitlab.com"] # insteadOf = ssh://git@gitlab.com # Source: https://stackoverflow.com/questions/42148841/github-clone-with-oauth-access-token # If managing secrets outside the chart for the gitconfig, use this variable to reference the secret name # gitconfigSecretName: 'mygitconfigsecret' # To specify AWS credentials to be mapped to ~/.aws: # aws: # credentials: | # [default] # aws_access_key_id=YOUR_ACCESS_KEY_ID # aws_secret_access_key=YOUR_SECRET_ACCESS_KEY # region=us-east-1 # config: | # [profile a_role_to_assume] # role_arn = arn:aws:iam::123456789:role/service-role/roleToAssume # source_profile = default # To reference an already existing Secret object with AWS credentials # awsSecretName: 'mysecretwithawscreds' ## To be used for mounting credential files (when using google provider). serviceAccountSecrets: # credentials: <json file as base64 encoded string> # credentials-staging: <json file as base64 encoded string> ## -------------------------- ## # Default values for atlantis (override as needed). ## -------------------------- ## image: repository: runatlantis/atlantis tag: v0.14.0 pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. ## Secrets must be manually created in the namespace. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ ## # imagePullSecrets: # - myRegistryKeySecretName ## Use Server Side Repo Config, ## ref: https://www.runatlantis.io/docs/server-side-repo-config.html ## Example default configuration # repoConfig: | # --- # repos: # - id: /.*/ # apply_requirements: [] # workflow: default # allowed_overrides: [] # allow_custom_workflows: false # workflows: # default: # plan: # steps: [init, plan] # apply: # steps: [apply] # allowForkPRs enables atlantis to run on a fork Pull Requests allowForkPRs: false ## defaultTFVersion set the default terraform version to be used in atlantis server # defaultTFVersion: 0.12.0 # disableApplyAll disables running `atlantis apply` without any flags disableApplyAll: false # We only need to check every 60s since Atlantis is not a high-throughput service. livenessProbe: enabled: true periodSeconds: 60 initialDelaySeconds: 5 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 5 scheme: HTTP readinessProbe: enabled: true periodSeconds: 60 initialDelaySeconds: 5 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 5 scheme: HTTP service: type: NodePort port: 80 podTemplate: annotations: {} # kube2iam example: # iam.amazonaws.com/role: role-arn labels: {} statefulSet: annotations: {} labels: {} ingress: enabled: true annotations: {} # kubernetes.io/ingress.class: nginx # kubernetes.io/tls-acme: "true" path: / host: chart-example.local tls: [] # - secretName: chart-example-tls # hosts: # - chart-example.local labels: {} resources: requests: memory: 1Gi cpu: 100m limits: memory: 1Gi cpu: 100m # Disk space for Atlantis to check out repositories dataStorage: 5Gi replicaCount: 1 ## test container details test: enabled: true image: lachlanevenson/k8s-kubectl imageTag: v1.4.8-bash nodeSelector: {} tolerations: [] affinity: {} serviceAccount: # Specifies whether a ServiceAccount should be created create: true # The name of the ServiceAccount to use. # If not set and create is true, a name is generated using the fullname template name: # Annotations for the Service Account # Example: # # annotations: # annotation1: value # annotation2: value annotations: {} # tlsSecretName: tls # Optionally specify additional environment variables to be populated from Kubernetes secrets. # Useful for passing in TF_VAR_foo or other secret environment variables from Kubernetes secrets. environmentSecrets: [] # environmentSecrets: # - name: THE_ENV_VAR # secretKeyRef: # name: the_k8s_secret_name # key: the_key_of_the_value_in_the_secret # Optionally specify additional Kubernetes secrets to load environment variables from. # All key-value pairs within these secrets will be set as environment variables. # Note that any variables set here will be ignored if also defined in the env block of the atlantis statefulset. # For example, providing ATLANTIS_GH_USER here and defining a value for github.user will result in the github.user value being used. loadEnvFromSecrets: [] # loadEnvFromSecrets: # - secret_one # - secret_two # Optionally specify google service account credentials as Kubernetes secrets. If you are using the terraform google provider you can specify the credentials as "${file("/var/secrets/some-secret-name/key.json")}". googleServiceAccountSecrets: [] # googleServiceAccountSecrets: # - name: some-secret-name # secretName: the_k8s_secret_name # Optionally specify additional volumes for the pod. extraVolumes: [] # extraVolumes: # - name: some-volume-name # emptyDir: {} # Optionally specify additional volume mounts for the container. extraVolumeMounts: [] # extraVolumeMounts: # - name: some-volume-name # mountPath: /path/in/container extraManifests: [] # extraManifests: # - apiVersion: cloud.google.com/v1beta1 # kind: BackendConfig # metadata: # name: "{{ .Release.Name }}-test" # spec: # securityPolicy: # name: "gcp-cloud-armor-policy-test"
prometheus-cloudwatch-exporter
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"prometheus-cloudwatch-exporter.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 6...
# Default values for prometheus-cloudwatch-exporter. # This is a YAML-formatted file. # Declare variables to be passed into your templates. replicaCount: 1 image: repository: prom/cloudwatch-exporter tag: cloudwatch_exporter-0.8.0 pullPolicy: IfNotPresent # Example proxy configuration: # command: # - 'java' # - '-Dhttp.proxyHost=proxy.example.com' # - '-Dhttp.proxyPort=3128' # - '-Dhttps.proxyHost=proxy.example.com' # - '-Dhttps.proxyPort=3128' # - '-jar' # - '/cloudwatch_exporter.jar' # - '9106' # - '/config/config.yml' command: [] containerPort: 9106 service: type: ClusterIP port: 9106 portName: http annotations: {} labels: {} resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. # limits: # cpu: 100m # memory: 128Mi # requests: # cpu: 100m # memory: 128Mi aws: role: # The name of a pre-created secret in which AWS credentials are stored. When # set, aws_access_key_id is assumed to be in a field called access_key, # aws_secret_access_key is assumed to be in a field called secret_key, and the # session token, if it exists, is assumed to be in a field called # security_token secret: name: includesSessionToken: false # Note: Do not specify the aws_access_key_id and aws_secret_access_key if you specified role or secret.name before aws_access_key_id: aws_secret_access_key: serviceAccount: # Specifies whether a ServiceAccount should be created create: true # The name of the ServiceAccount to use. # If not set and create is true, a name is generated using the fullname template name: # annotations: # Will add the provided map to the annotations for the crated serviceAccount # e.g. # annotations: # eks.amazonaws.com/role-arn: arn:aws:iam::1234567890:role/prom-cloudwatch-exporter-oidc rbac: # Specifies whether RBAC resources should be created create: true config: |- # This is the default configuration for prometheus-cloudwatch-exporter region: eu-west-1 period_seconds: 240 metrics: - aws_namespace: AWS/ELB aws_metric_name: HealthyHostCount aws_dimensions: [AvailabilityZone, LoadBalancerName] aws_statistics: [Average] - aws_namespace: AWS/ELB aws_metric_name: UnHealthyHostCount aws_dimensions: [AvailabilityZone, LoadBalancerName] aws_statistics: [Average] - aws_namespace: AWS/ELB aws_metric_name: RequestCount aws_dimensions: [AvailabilityZone, LoadBalancerName] aws_statistics: [Sum] - aws_namespace: AWS/ELB aws_metric_name: Latency aws_dimensions: [AvailabilityZone, LoadBalancerName] aws_statistics: [Average] - aws_namespace: AWS/ELB aws_metric_name: SurgeQueueLength aws_dimensions: [AvailabilityZone, LoadBalancerName] aws_statistics: [Maximum, Sum] nodeSelector: {} tolerations: [] affinity: {} # Configurable health checks against the /healthy and /ready endpoints livenessProbe: path: /-/healthy initialDelaySeconds: 30 periodSeconds: 5 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 3 readinessProbe: path: /-/ready initialDelaySeconds: 30 periodSeconds: 5 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 3 serviceMonitor: # When set true then use a ServiceMonitor to configure scraping enabled: false # Set the namespace the ServiceMonitor should be deployed # namespace: monitoring # Set how frequently Prometheus should scrape # interval: 30s # Set path to cloudwatch-exporter telemtery-path # telemetryPath: /metrics # Set labels for the ServiceMonitor, use this to define your scrape label for Prometheus Operator # labels: # Set timeout for scrape # timeout: 10s # Set relabelings for the ServiceMonitor, use to apply to samples before scraping # relabelings: [] # Set metricRelabelings for the ServiceMonitor, use to apply to samples for ingestion # metricRelabelings: [] # # Example - note the Kubernetes convention of camelCase instead of Prometheus' snake_case # metricRelabelings: # - sourceLabels: [dbinstance_identifier] # action: replace # replacement: mydbname # targetLabel: dbname prometheusRule: # Specifies whether a PrometheusRule should be created enabled: false # Set the namespace the PrometheusRule should be deployed # namespace: monitoring # Set labels for the PrometheusRule, use this to define your scrape label for Prometheus Operator # labels: # Example - note the Kubernetes convention of camelCase instead of Prometheus' # rules: # - alert: ELB-Low-BurstBalance # annotations: # message: The ELB BurstBalance during the last 10 minutes is lower than 80%. # expr: aws_ebs_burst_balance_average < 80 # for: 10m # labels: # severity: warning # - alert: ELB-Low-BurstBalance # annotations: # message: The ELB BurstBalance during the last 10 minutes is lower than 50%. # expr: aws_ebs_burst_balance_average < 50 # for: 10m # labels: # severity: warning # - alert: ELB-Low-BurstBalance # annotations: # message: The ELB BurstBalance during the last 10 minutes is lower than 30%. # expr: aws_ebs_burst_balance_average < 30 # for: 10m # labels: # severity: critical ingress: enabled: false annotations: {} # kubernetes.io/ingress.class: nginx # kubernetes.io/tls-acme: "true" labels: {} path: / hosts: - chart-example.local tls: [] # - secretName: chart-example-tls # hosts: # - chart-example.local securityContext: runAsUser: 65534 # run as nobody user instead of root
kuberhealthy
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nSetup a chart name\n*/}}\n{{- define \"kuberhealthy.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nReturn the appropriate apiVersion for RBAC APIs.\n*/}}\n{{- define \"rbac.apiVersion\" -}...
# Default values for kuberhealthy. # This is a YAML-formatted file. # Declare variables to be passed into your templates. prometheus: enabled: false name: "prometheus" enableScraping: true serviceMonitor: false enableAlerting: true image: repository: quay.io/comcast/kuberhealthy tag: v1.0.2 resources: requests: cpu: 100m memory: 80Mi limits: cpu: 400m memory: 200Mi tolerations: # change to true to tolerate and deploy to masters master: false deployment: replicas: 2 maxSurge: 0 maxUnavailable: 1 imagePullPolicy: IfNotPresent podAnnotations: {} command: - /app/kuberhealthy # use this to override location of the test-image, see: https://github.com/Comcast/kuberhealthy/blob/master/docs/FLAGS.md # args: # - -dsPauseContainerImageOverride # - your-repo/google_containers/pause:0.8.0 securityContext: runAsNonRoot: true runAsUser: 999 allowPrivilegeEscalation: false # Please remember that changing the service type to LoadBalancer # will expose Kuberhealthy to the internet, which could cause # error messages shown by Kuberhealthy to be exposed to the # public internet. It is recommended to create the service # with ClusterIP, then to manually edit the service in order to # securely expose the port in an appropriate way for your # specific environment. service: externalPort: 80 type: ClusterIP annotations: {}
heartbeat
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"heartbeat.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some ...
image: repository: docker.elastic.co/beats/heartbeat tag: 6.7.0 pullPolicy: IfNotPresent config: # See https://www.elastic.co/guide/en/beats/heartbeat/current/heartbeat-reference-yml.html for reference heartbeat.monitors: - type: icmp schedule: '*/5 * * * * * *' hosts: ["localhost"] ipv4: true timeout: 16s wait: 1s processors: - add_cloud_metadata: output.file: path: "/usr/share/heartbeat/data" filename: heartbeat rotate_every_kb: 10000 number_of_files: 5 # output.elasticsearch: # hosts: ["elasticsearch:9200"] # protocol: "https" # username: "elastic" # password: "changeme" # List of beat plugins plugins: [] # - kinesis.so hostNetwork: false # A map of additional environment variables extraVars: {} # test1: "test2" # Add additional volumes and mounts, for example to read other log files on the host extraVolumes: [] # - hostPath: # path: /var/log # name: varlog extraVolumeMounts: [] # - name: varlog # mountPath: /host/var/log # readOnly: true resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. # limits: # cpu: 100m # memory: 200Mi # requests: # cpu: 100m # memory: 100Mi priorityClassName: "" nodeSelector: {} tolerations: [] rbac: # Specifies whether RBAC resources should be created create: true serviceAccount: # Specifies whether a ServiceAccount should be created create: true # The name of the ServiceAccount to use. # If not set and create is true, a name is generated using the fullname template name:
distributed-jmeter
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"distributed-jmeter.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars beca...
# Default values for distributed-jmeter. # This is a YAML-formatted file. # Declare variables to be passed into your templates. master: ## The number of pods in the master deployment replicaCount: 1 server: ## The number of pods in the server deployment replicaCount: 3 image: ## Specify an imagePullPolicy ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images pullPolicy: IfNotPresent ## The repository and image ## ref: https://hub.docker.com/r/pedrocesarti/jmeter-docker/ repository: "pedrocesarti/jmeter-docker" ## The tag for the image ## ref: https://hub.docker.com/r/pedrocesarti/jmeter-docker/tags/ tag: 3.3
gocd
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"gocd.name\" -}}\n{{- default .Chart.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are...
# Default values for gocd. # This is a YAML-formatted file. # Declare variables to be passed into your templates. rbac: # Specifies whether rbac resources must be created. create: true # The API version to use while creating the rbac resources. Use `kubectl api-versions | grep rbac` to find which abi versions are supported for your cluster. apiVersion: v1 # Create a cluster role binding with the existing role, do not create a new one. If left blank, a new cluster role is created. roleRef: serviceAccount: # Specifies whether a service account should be created create: true # The name of the ServiceAccount to use. # If not set and create is true, a name is generated using the fullname template # If create is false and a name is not specified, the default service account is used for the cluster role binding. name: server: # server.enabled is the toggle to run GoCD Server. Change to false for Agent Only Deployment. enabled: true # server.annotations is the annotations for the GoCD Server Deployment and Pod spec. annotations: deployment: # iam.amazonaws.com/role: arn:aws:iam::xxx:role/my-custom-role pod: # iam.amazonaws.com/role: arn:aws:iam::xxx:role/my-custom-role # Specify security settings for GoCD Server Pod securityContext: # Specify the container user for the GoCD server pod runAsUser: 1000 # Specify the container group for the GoCD server pod runAsGroup: 0 # Specify the container supplementary group for the GoCD server pod fsGroup: 0 # server.shouldPreconfigure is used to invoke a script to pre configure the elastic agent profile and the plugin settings in the GoCD server. # Note: If this value is set to true, then, the serviceAccount.name is configured for the GoCD server pod. The service account token is mounted as a secret and is used in the lifecycle hook. # Note: An attempt to preconfigure the GoCD server is made. There are cases where the pre-configuration can fail and the GoCD server starts with an empty config. shouldPreconfigure: true preconfigureCommand: - "/bin/bash" - "/preconfigure_server.sh" # server.preStop - array of commands to use in the server pre-stop lifecycle hook # preStop: # - "/bin/bash" # - "/backup_and_stop.sh" # server.terminationGracePeriodSeconds is the optional duration in seconds the gocd server pod needs to terminate gracefully. # Note: SIGTERM is issued immediately after the pod deletion request is sent. If the pod doesn't terminate, k8s waits for terminationGracePeriodSeconds before issuing SIGKILL. # server.terminationGracePeriodSeconds: 60 image: # server.image.repository is the GoCD Server image name repository: "gocd/gocd-server" # server.image.tag is the GoCD Server image's tag tag: # server.image.pullPolicy is the GoCD Server image's pull policy pullPolicy: "IfNotPresent" ## Configure GoCD server resource requests and limits ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ ## resources: {} # requests: # memory: 512Mi # cpu: 300m # limits: # cpu: 100m # memory: 1024Mi # Sidecar containers that runs alongside GoCD server. # https://kubernetes.io/docs/concepts/workloads/pods/pod-overview/ sidecarContainers: [] # - name: sidecar-container # image: sidecar-image:latest # volumeMounts: # - name: goserver-vol # mountPath: /godata # specify init containers, e.g. to prepopulate home directories etc initContainers: [] # - name: download-kubectl # image: "ellerbrock/alpine-bash-curl-ssl:latest" # imagePullPolicy: "IfNotPresent" # volumeMounts: # - name: kubectl # mountPath: /download # workingDir: /download # command: ["/bin/bash"] # args: # - "-c" # - 'curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl && chmod +x ./kubectl' # specify restart policy for server restartPolicy: Always ## Additional GoCD server pod labels ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ nodeSelector: {} ## Affinity for assigning pods to specific nodes ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ affinity: {} ## Tolerations for allowing pods to be scheduled on nodes with matching taints ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ tolerations: {} healthCheck: # server.healthCheck.initialDelaySeconds is the initial delays in seconds to start the health checks initialDelaySeconds: 90 # server.healthCheck.periodSeconds is the health check interval duration periodSeconds: 15 # server.healthCheck.failureThreshold is the number of unsuccessful attempts made to the GoCD server health check endpoint before the container is restarted (for liveness) or marked as unready (for readiness) failureThreshold: 10 env: # server.env.goServerJvmOpts is a list of JVM options, which needs to be provided to the GoCD Server, typically prefixed with -D unless otherwise stated. # Example: "-Xmx4096mb -Dfoo=bar" goServerJvmOpts: # server.env.extraEnvVars is the list of environment variables passed to GoCD Server extraEnvVars: - name: GOCD_PLUGIN_INSTALL_kubernetes-elastic-agents value: https://github.com/gocd/kubernetes-elastic-agents/releases/download/v3.4.0-196/kubernetes-elastic-agent-3.4.0-196.jar - name: GOCD_PLUGIN_INSTALL_docker-registry-artifact-plugin value: https://github.com/gocd/docker-registry-artifact-plugin/releases/download/v1.1.0-104/docker-registry-artifact-plugin-1.1.0-104.jar service: # server.service.type is the GoCD Server service type type: "NodePort" # server.service.httpPort is the GoCD Server HTTP port httpPort: 8153 # Provide the nodeHttpPort and nodeHttpsPort if you want the service to be exposed on specific ports. Without this, random node ports will be assigned. # server.service.nodeHttpPort is the GoCD Server Service Node HTTP port nodeHttpPort: # server.service.nodeHttpPort is the GoCD Server Service Node HTTPS port nodeHttpsPort: annotations: ## When using LoadBalancer service type, use the following AWS certificate from ACM ## https://aws.amazon.com/documentation/acm/ # service.beta.kubernetes.io/aws-load-balancer-ssl-cert: "arn:aws:acm:eu-west-1:123456789:certificate/abc123-abc123-abc123-abc123" # service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "https" # service.beta.kubernetes.io/aws-load-balancer-backend-port: "https" ## When using LoadBalancer service type, whitelist these source IP ranges ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/ # loadBalancerSourceRanges: # - 192.168.1.10/32 ingress: # server.ingress.enabled is the toggle to enable/disable GoCD Server Ingress enabled: true # server.ingress.hosts is used to create an Ingress record. # hosts: # - ci.example.com annotations: # kubernetes.io/ingress.class: nginx # kubernetes.io/tls-acme: "true" tls: # - secretName: ci-example-tls # hosts: # - ci.example.com persistence: # server.persistence.enabled is the toggle for server volume persistence. enabled: true accessMode: "ReadWriteOnce" # The storage space that should be claimed from the persistent volume size: 2Gi # If defined, storageClassName: <storageClass> # If set to "-", storageClassName: "", which disables dynamic provisioning # If undefined (the default) or set to null, no storageClassName spec is # set, choosing 'standard' storage class available with the default provisioner (gcd-pd on GKE, hostpath on minikube, etc). # storageClass: "-" # A manually managed Persistent Volume and Claim # If defined, PVC must be created manually before volume will be bound existingClaim: # To choose a suitable persistent volume from available static persistent volumes, selectors are used. pvSelector: # matchLabels: # volume-type: ssd name: # server.persistence.name.dockerEntryPoint name of the volume mounted at /docker-entrypoint.d/ on the server dockerEntryPoint: goserver-vol # "" for the volume root subpath: # godata is where the config, db, plugins are stored godata: godata # homego can be used for storing and mounting secrets homego: homego # custom entrypoint scripts that should be run before starting the GoCD server inside the container. dockerEntryPoint: scripts # server.persistence.extraVolumes additional server volumes extraVolumes: [] # - name: gocd-server-init-scripts # configMap: # name: gocd-server-init-scripts # defaultMode: 0755 # - name: github-key # secret: # secretName: github-key # defaultMode: 0744 # server.persistence.extraVolumeMounts additional server volumeMounts extraVolumeMounts: [] # - name: github-key # mountPath: /etc/config/keys/ # readOnly: true # - name: gocd-server-init-scripts # mountPath: /docker-entrypoint.d/ # server.hostAliases allows the modification of the hosts file inside a container hostAliases: # - ip: "192.168.1.10" # hostnames: # - "example.com" # - "www.example.com" security: ssh: # server.security.ssh.enabled is the toggle to enable/disable mounting of ssh secret on GoCD server pods enabled: false # server.security.ssh.secretName specifies the name of the k8s secret object that contains the ssh key and known hosts secretName: gocd-server-ssh # server.security.ssh.defaultMode specifies the permission of the files in ~/.ssh directory defaultMode: agent: # specifies overrides for agent specific service account creation serviceAccount: # specifies whether the top level service account (also used by the server) should be reused as the service account for gocd agents reuseTopLevelServiceAccount: false # if reuseTopLevelServiceAccount is false, this field specifies the name of an existing service account to be associated with gocd agents # If field is empty, the service account "default" will be used. name: # agent.annotations is the annotations for the GoCD Agent Deployment and Pod Spec annotations: deployment: # iam.amazonaws.com/role: arn:aws:iam::xxx:role/my-custom-role pod: # iam.amazonaws.com/role: arn:aws:iam::xxx:role/my-custom-role # Specify security settings for GoCD Agent Pod securityContext: # Specify the container user for all the GoCD agent pods runAsUser: 1000 # Specify the container group for all the GoCD agent pods runAsGroup: 0 # Specify the container supplementary group for all the GoCD agent pods fsGroup: 0 # agent.replicaCount is the GoCD Agent replicas Count. Specify the number of GoCD agents to run replicaCount: 0 # agent.preStop - array of command and arguments to run in the agent pre-stop lifecycle hook # preStop: # - "/bin/bash" # - "/disable_and_stop.sh" # agent.postStart - array of command and arguments to run in agent post-start lifecycle hook # postStart: # - "/bin/bash" # - "/agent_startup.sh" # agent.deployStrategy is the strategy explained in detail at https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy # agent.terminationGracePeriodSeconds is the optional duration in seconds the gocd agent pods need to terminate gracefully. # Note: SIGTERM is issued immediately after the pod deletion request is sent. If the pod doesn't terminate, k8s waits for terminationGracePeriodSeconds before issuing SIGKILL. # agent.terminationGracePeriodSeconds: 60 deployStrategy: {} image: # agent.image.repository is the GoCD Agent image name repository: "gocd/gocd-agent-alpine-3.9" # agent.image.tag is the GoCD Agent image's tag tag: # agent.image.pullPolicy is the GoCD Agent image's pull policy pullPolicy: "IfNotPresent" env: # agent.env.goServerUrl is the GoCD Server Url goServerUrl: # agent.env.agentAutoRegisterKey is the GoCD Agent auto-register key agentAutoRegisterKey: # agent.env.agentAutoRegisterResources is the GoCD Agent auto-register resources agentAutoRegisterResources: # agent.env.agentAutoRegisterEnvironments is the GoCD Agent auto-register Environments # deprecated because of a typo. Use agent.env.agentAutoRegisterEnvironments instead agentAutoRegisterEnvironemnts: # agent.env.agentAutoRegisterEnvironments is the GoCD Agent auto-register Environments agentAutoRegisterEnvironments: # agent.env.agentAutoRegisterHostname is the GoCD Agent auto-register hostname agentAutoRegisterHostname: # agent.env.goAgentJvmOpts is the GoCD Agent JVM options goAgentJvmOpts: # agent.env.goAgentBootstrapperArgs is the GoCD Agent bootstrapper args goAgentBootstrapperArgs: # agent.env.goAgentBootstrapperJvmArgs is the GoCD Agent bootstrapper JVM args goAgentBootstrapperJvmArgs: # agent.env.extraEnvVars is the list of environment variables passed to GoCD Agent extraEnvVars: persistence: # agent.persistence.enabled is the toggle for agent volume persistence. Change to true if a persistent volume is available and configured manually. enabled: false accessMode: "ReadWriteOnce" size: 1Gi # If defined, storageClassName: <storageClass> # If set to "-", storageClassName: "", which disables dynamic provisioning # If undefined (the default) or set to null, no storageClassName spec is # set, choosing 'standard' storage class available with the default provisioner (gcd-pd on GKE, hostpath on minikube, etc). # storageClass: "-" # A manually managed Persistent Volume and Claim # If defined, PVC must be created manually before volume will be bound existingClaim: pvSelector: # matchLabels: # app: godata-gocd-agent name: # agent.persistence.name.dockerEntryPoint name of the volume mounted at /docker-entrypoint.d/ on the agent dockerEntryPoint: goagent-vol # "" for the volume root subpath: homego: homego dockerEntryPoint: scripts # agent.persistence.extraVolumes additional agent volumes extraVolumes: [] # - name: gocd-agent-init-scripts # configMap: # name: gocd-agent-init-scripts # defaultMode: 0755 # - name: github-key # secret: # secretName: github-key # defaultMode: 0744 # agent.persistence.extraVolumeMounts additional agent volumeMounts extraVolumeMounts: [] # - name: github-key # mountPath: /etc/config/keys/ # readOnly: true # - name: gocd-agent-init-scripts # mountPath: /docker-entrypoint.d/ # specify init containers, e.g. to prepopulate home directories etc initContainers: [] # - name: download-kubectl # image: "ellerbrock/alpine-bash-curl-ssl:latest" # imagePullPolicy: "IfNotPresent" # volumeMounts: # - name: kubectl # mountPath: /download # workingDir: /download # command: ["/bin/bash"] # args: # - "-c" # - 'curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl && chmod +x ./kubectl' # specify restart policy for agents restartPolicy: Always # agent.privileged is needed for running Docker-in-Docker (DinD) agents privileged: false healthCheck: # agent.healthCheck.enable is the toggle for GoCD agent health checks enabled: false # agent.healthCheck.initialDelaySeconds is the initial delays in seconds to start the health checks initialDelaySeconds: 60 # agent.healthCheck.periodSeconds is the health check interval duration periodSeconds: 60 # agent.healthCheck.failureThreshold is the health check failure threshold of GoCD agent failureThreshold: 60 security: ssh: # agent.security.ssh.enabled is the toggle to enable/disable mounting of ssh secret on GoCD agent pods enabled: false # agent.security.ssh.secretName specifies the name of the k8s secret object that contains the ssh key and known hosts secretName: gocd-agent-ssh # agent.security.ssh.defaultMode specifies the permission of the files in ~/.ssh directory defaultMode: ## Configure GoCD agent resource requests and limits ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ ## resources: {} # requests: # memory: 512Mi # cpu: 300m # limits: # cpu: 100m # memory: 1024Mi # agent.hostAliases allows the modification of the hosts file inside a container hostAliases: # - ip: "192.168.1.10" # hostnames: # - "example.com" # - "www.example.com" ## Additional GoCD agent pod labels ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ nodeSelector: {} ## Affinity for assigning pods to specific nodes ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ affinity: {} ## Tolerations for allowing pods to be scheduled on nodes with matching taints ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ tolerations: {}
katafygio
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"katafygio.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some ...
# Default values for the katafygio chart. # This is a YAML-formatted file. # Declare variables to be passed into your templates. # gitUrl (optional) is a remote git repository that Katafygio can clone, and where # it can push changes. If gitUrl is not defined, Katafygio will still maintain a # pod-local git repository, which can be on a persistent volume (see above). # gitUrl: https://user:token@github.com/myorg/myrepos.git # noGit disable git versioning when true (will only keep an unversioned local dump up-to-date). noGit: false # healthcheckPort is the TCP port Katafygio will listen for health check requests. healthcheckPort: 8080 # logLevel can be info, warning, error, or fatal. logLevel: warning # logOutput can be stdout, stderr, or syslog. logOutput: stdout # logServer (optional) provide the address of a remote syslog server. # logServer: "localhost:514" # filter is an (optional) label selector used to restrict backups to selected objects. # filter: "app in (foo, bar)" # excludeKind is an array of excluded (not backuped) Kubernetes objects kinds. excludeKind: - replicaset - endpoints - event # excludeObject is an array of specific Kubernetes objects to exclude from dumps # (the format is: objectkind:namespace/objectname). # excludeObject: # - "configmap:kube-system/leader-elector" # resyncInterval is the interval (in seconds) between full catch-up resyncs # (to catch possibly missed events). Set to 0 to disable resyncs. resyncInterval: 300 # localDir is the path where we'll dump and commit cluster objects. localDir: "/var/lib/katafygio/data" # persistence for the localDir dump directory. Note that configuring gitUrl # is an other way to achieve persistence. persistence: enabled: true ## If defined, storageClassName: <storageClass> ## If set to "-", storageClassName: "", which disables dynamic provisioning ## If undefined (the default) or set to null, no storageClassName spec is ## set, choosing the default provisioner. (gp2 on AWS, standard on ## GKE, AWS & OpenStack) ## storageClass: "" accessMode: ReadWriteOnce size: 1Gi # existingClaim: "" # rbac allow to enable or disable RBAC role and binding. Katafygio needs # read-only access to all Kubernetes API groups and resources. rbac: # Specifies whether RBAC resources should be created create: true # serviceAccount is used to provide a dedicated serviceAccount when using RBAC # (or to fallback to the namespace's "default" SA if name is left empty). serviceAccount: # Specifies whether a ServiceAccount should be created create: true # The name of the ServiceAccount to use. # If not set and create is true, a name is generated using the fullname template name: image: repository: bpineau/katafygio tag: v0.8.1 pullPolicy: IfNotPresent # resources define the deployment's cpu and memory resources. # Katafygio only needs about 50Mi of memory as a baseline, and more depending # on the cluster's content. For instance, on a 45 nodes cluster with about 2k # pods and 1k services, Katafygio use about 250Mi. resources: {} # limits: # cpu: 100m # memory: 128Mi # requests: # cpu: 100m # memory: 128Mi replicaCount: 1 nodeSelector: {} tolerations: [] affinity: {}
moodle
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"moodle.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kub...
## Global Docker image parameters ## Please, note that this will override the image parameters, including dependencies, configured to use the global value ## Current available global Docker image parameters: imageRegistry and imagePullSecrets ## # global: # imageRegistry: myRegistryName # imagePullSecrets: # - myRegistryKeySecretName # storageClass: myStorageClass ## Bitnami Moodle image version ## ref: https://hub.docker.com/r/bitnami/moodle/tags/ ## image: registry: docker.io repository: bitnami/moodle tag: 3.8.2-debian-10-r0 ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images ## pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. ## Secrets must be manually created in the namespace. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ ## # pullSecrets: # - myRegistryKeySecretName ## String to partially override moodle.fullname template (will maintain the release name) ## # nameOverride: ## String to fully override moodle.fullname template ## # fullnameOverride: ## Skip Moodle installation wizard. Useful for migrations and restoring from SQL dump ## ref: https://github.com/bitnami/bitnami-docker-moodle#configuration ## moodleSkipInstall: "no" ## User of the application ## ref: https://github.com/bitnami/bitnami-docker-moodle#configuration ## moodleUsername: user ## Application password ## Defaults to a random 10-character alphanumeric string if not set ## ref: https://github.com/bitnami/bitnami-docker-moodle#configuration ## # moodlePassword: ## Admin email ## ref: https://github.com/bitnami/bitnami-docker-moodle#configuration moodleEmail: user@example.com ## Set to `yes` to allow the container to be started with blank passwords ## ref: https://github.com/bitnami/bitnami-docker-moodle#environment-variables allowEmptyPassword: "yes" ## ## External database configuration ## externalDatabase: ## Database host host: ## Database host port: 3306 ## Database user user: bn_moodle ## Database password password: ## Database name database: bitnami_moodle ## SMTP mail delivery configuration ## ref: https://github.com/bitnami/bitnami-docker-moodle/#smtp-configuration # smtpHost: # smtpPort: # smtpUser: # smtpPassword: # smtpProtocol: ## ## MariaDB chart configuration ## ## https://github.com/helm/charts/blob/master/stable/mariadb/values.yaml ## mariadb: ## Whether to deploy a mariadb server to satisfy the applications database requirements. To use an external database set this to false and configure the externalDatabase parameters enabled: true ## Disable MariaDB replication replication: enabled: false ## Create a database and a database user ## ref: https://github.com/bitnami/bitnami-docker-mariadb/blob/master/README.md#creating-a-database-user-on-first-run ## db: name: bitnami_moodle user: bn_moodle ## If the password is not specified, mariadb will generates a random password ## # password: ## MariaDB admin password ## ref: https://github.com/bitnami/bitnami-docker-mariadb/blob/master/README.md#setting-the-root-password-on-first-run ## # rootUser: # password: ## Enable persistence using Persistent Volume Claims ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ ## master: persistence: enabled: true ## mariadb data Persistent Volume Storage Class ## If defined, storageClassName: <storageClass> ## If set to "-", storageClassName: "", which disables dynamic provisioning ## If undefined (the default) or set to null, no storageClassName spec is ## set, choosing the default provisioner. (gp2 on AWS, standard on ## GKE, AWS & OpenStack) ## # storageClass: "-" accessModes: - ReadWriteOnce size: 8Gi ## Define affinity for the pod ## Sometimes required when persistent volumes are defined externally ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector # affinity: # nodeAffinity: # requiredDuringSchedulingIgnoredDuringExecution: # nodeSelectorTerms: # - matchExpressions: # - key: node-role.kubernetes.io/master # operator: Exists # values: # - machine01 # preferredDuringSchedulingIgnoredDuringExecution: # - weight: 1 # preference: # matchExpressions: # - key: another-node-label-key # operator: In # values: # - another-node-label-value # # resources: # requests: # memory: 768Mi # cpu: 750m ## Kubernetes configuration ## For minikube, set this to NodePort, for ingress ClusterIP, elsewhere use LoadBalancer ## service: type: LoadBalancer # HTTP Port port: 80 # HTTPS Port httpsPort: 443 ## ## nodePorts: ## http: <to set explicitly, choose port between 30000-32767> ## https: <to set explicitly, choose port between 30000-32767> nodePorts: http: "" https: "" ## Enable client source IP preservation ## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip ## externalTrafficPolicy: Cluster ## Configure the ingress resource that allows you to access the ## Moodle installation. Set up the URL ## ref: http://kubernetes.io/docs/user-guide/ingress/ ## ingress: ## Set to true to enable ingress record generation enabled: false ## The list of hostnames to be covered with this ingress record. ## Most likely this will be just one host, but in the event more hosts are needed, this is an array hosts: - name: moodle.local ## Set this to true in order to enable TLS on the ingress record ## A side effect of this will be that the backend moodle service will be connected at port 443 tls: false ## Set this to true in order to add the corresponding annotations for cert-manager certManager: false ## If TLS is set to true, you must declare what secret will store the key/certificate for TLS tlsSecret: moodle.local-tls ## Ingress annotations done as key:value pairs ## For a full list of possible ingress annotations, please see ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md ## ## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set annotations: # kubernetes.io/ingress.class: nginx secrets: ## If you're providing your own certificates, please use this to add the certificates as secrets ## key and certificate should start with -----BEGIN CERTIFICATE----- or ## -----BEGIN RSA PRIVATE KEY----- ## ## name should line up with a tlsSecret set further up ## If you're using cert-manager, this is unneeded, as it will create the secret for you if it is not set ## ## It is also possible to create and manage the certificates outside of this helm chart ## Please see README.md for more information # - name: moodle.local-tls # key: # certificate: ## Enable persistence using Persistent Volume Claims ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ ## persistence: enabled: true ## database data Persistent Volume Storage Class ## If defined, storageClassName: <storageClass> ## If set to "-", storageClassName: "", which disables dynamic provisioning ## If undefined (the default) or set to null, no storageClassName spec is ## set, choosing the default provisioner. (gp2 on AWS, standard on ## GKE, AWS & OpenStack) ## # storageClass: "-" accessMode: ReadWriteOnce size: 8Gi # existingClaim: "" ## Define affinity for the moodle pod ## Sometimes required when persistent volumes are defined externally ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector # affinity: # nodeAffinity: # requiredDuringSchedulingIgnoredDuringExecution: # nodeSelectorTerms: # - matchExpressions: # - key: node-role.kubernetes.io/master # operator: In # values: # - machine01 # preferredDuringSchedulingIgnoredDuringExecution: # - weight: 1 # preference: # matchExpressions: # - key: another-node-label-key # operator: In # values: # - another-node-label-value ## Configure resource requests and limits ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ ## resources: requests: memory: 512Mi cpu: 300m ## Configure extra options for liveness and readiness probes ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) livenessProbe: enabled: true initialDelaySeconds: 600 periodSeconds: 10 timeoutSeconds: 5 failureThreshold: 6 successThreshold: 1 readinessProbe: enabled: true initialDelaySeconds: 30 periodSeconds: 5 timeoutSeconds: 3 failureThreshold: 6 successThreshold: 1 ## Pod annotations ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ ## podAnnotations: {} ## Prometheus Exporter / Metrics ## metrics: enabled: false image: registry: docker.io repository: bitnami/apache-exporter tag: 0.7.0-debian-10-r39 pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. ## Secrets must be manually created in the namespace. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ ## # pullSecrets: # - myRegistryKeySecretName ## Metrics exporter pod Annotation and Labels podAnnotations: prometheus.io/scrape: "true" prometheus.io/port: "9117" ## Metrics exporter resource requests and limits ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ ## # resources: {}
collabora-code
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"collabora-code.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because ...
# Default values for collabora-code. # This is a YAML-formatted file. # Declare variables to be passed into your templates. replicaCount: 1 image: repository: collabora/code tag: 4.0.3.1 pullPolicy: IfNotPresent strategy: Recreate nameOverride: "" fullnameOverride: "" service: type: ClusterIP port: 9980 ingress: enabled: false annotations: {} paths: [] hosts: [] tls: [] collabora: DONT_GEN_SSL_CERT: true domain: nextcloud\\.domain extra_params: --o:ssl.termination=true --o:ssl.enable=false server_name: collabora\.domain password: examplepass username: admin dictionaries: de_DE en_GB en_US es_ES fr_FR it nl pt_BR pt_PT ru securitycontext: allowPrivilegeEscalation: true capabilities: add: - MKNOD resources: {} # limits: # cpu: 100m # memory: 128Mi # requests: # cpu: 100m # memory: 128Mi nodeSelector: {} tolerations: [] affinity: {} livenessProbe: enabled: true initialDelaySeconds: 30 timeoutSeconds: 2 periodSeconds: 10 successThreshold: 1 failureThreshold: 3 scheme: HTTP path: / readinessProbe: enabled: true initialDelaySeconds: 30 timeoutSeconds: 2 periodSeconds: 10 successThreshold: 1 failureThreshold: 3 scheme: HTTP path: /
suitecrm
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"suitecrm.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some K...
## Global Docker image parameters ## Please, note that this will override the image parameters, including dependencies, configured to use the global value ## Current available global Docker image parameters: imageRegistry and imagePullSecrets ## # global: # imageRegistry: myRegistryName # imagePullSecrets: # - myRegistryKeySecretName # storageClass: myStorageClass ## Bitnami SuiteCRM image version ## ref: https://hub.docker.com/r/bitnami/suitecrm/tags/ ## image: registry: docker.io repository: bitnami/suitecrm tag: 7.11.12-debian-10-r18 ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images ## pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. ## Secrets must be manually created in the namespace. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ ## # pullSecrets: # - myRegistryKeySecretName ## String to partially override suitecrm.fullname template (will maintain the release name) ## # nameOverride: ## String to fully override suitecrm.fullname template ## # fullnameOverride: ## SuiteCRM host to create application URLs ## ref: https://github.com/bitnami/bitnami-docker-suitecrm#configuration ## # suitecrmHost: ## SuiteCRM validate user IP ## ref: https://github.com/bitnami/bitnami-docker-suitecrm#configuration ## suitecrmValidateUserIP: "no" ## User of the application ## ref: https://github.com/bitnami/bitnami-docker-suitecrm#configuration ## suitecrmUsername: user ## Application password ## Defaults to a random 10-character alphanumeric string if not set ## ref: https://github.com/bitnami/bitnami-docker-suitecrm#configuration ## # suitecrmPassword: ## Admin email ## ref: https://github.com/bitnami/bitnami-docker-suitecrm#configuration ## suitecrmEmail: user@example.com ## Lastname ## ref: https://github.com/bitnami/bitnami-docker-suitecrm#configuration ## suitecrmLastName: Name ## Set to `yes` to allow the container to be started with blank passwords ## ref: https://github.com/bitnami/bitnami-docker-suitecrm#environment-variables allowEmptyPassword: "yes" ## ## External database configuration ## externalDatabase: ## Database host host: ## Database host port: 3306 ## Database user user: bn_suitecrm ## Database password password: ## Database name database: bitnami_suitecrm ## SMTP mail delivery configuration ## ref: https://github.com/bitnami/bitnami-docker-suitecrm/#smtp-configuration ## # suitecrmSmtpHost: # suitecrmSmtpPort: # suitecrmSmtpUser: # suitecrmSmtpPassword: # suitecrmSmtpProtocol: ## ## MariaDB chart configuration ## ## https://github.com/helm/charts/blob/master/stable/mariadb/values.yaml ## mariadb: ## Whether to deploy a mariadb server to satisfy the applications database requirements. To use an external database set this to false and configure the externalDatabase parameters enabled: true ## Disable MariaDB replication replication: enabled: false ## Create a database and a database user ## ref: https://github.com/bitnami/bitnami-docker-mariadb/blob/master/README.md#creating-a-database-user-on-first-run ## db: name: bitnami_suitecrm user: bn_suitecrm ## If the password is not specified, mariadb will generates a random password ## # password: ## MariaDB admin password ## ref: https://github.com/bitnami/bitnami-docker-mariadb/blob/master/README.md#setting-the-root-password-on-first-run ## # rootUser: # password: ## Enable persistence using Persistent Volume Claims ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ ## master: persistence: enabled: true ## mariadb data Persistent Volume Storage Class ## If defined, storageClassName: <storageClass> ## If set to "-", storageClassName: "", which disables dynamic provisioning ## If undefined (the default) or set to null, no storageClassName spec is ## set, choosing the default provisioner. (gp2 on AWS, standard on ## GKE, AWS & OpenStack) ## # storageClass: "-" accessMode: ReadWriteOnce size: 8Gi service: type: LoadBalancer # HTTP Port port: 80 # HTTPS Port httpsPort: 443 ## loadBalancerIP for the SuiteCRM Service (optional, cloud specific) ## ref: http://kubernetes.io/docs/user-guide/services/#type-loadbalancer ## ## loadBalancerIP: ## ## nodePorts: ## http: <to set explicitly, choose port between 30000-32767> ## https: <to set explicitly, choose port between 30000-32767> nodePorts: http: "" https: "" ## Enable client source IP preservation ## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip ## externalTrafficPolicy: Cluster ## Enable persistence using Persistent Volume Claims ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ ## persistence: enabled: true ## suitecrm data Persistent Volume Storage Class ## If defined, storageClassName: <storageClass> ## If set to "-", storageClassName: "", which disables dynamic provisioning ## If undefined (the default) or set to null, no storageClassName spec is ## set, choosing the default provisioner. (gp2 on AWS, standard on ## GKE, AWS & OpenStack) ## # storageClass: "-" ## A manually managed Persistent Volume and Claim ## Requires persistence.enabled: true ## If defined, PVC must be created manually before volume will be bound # existingClaim: accessMode: ReadWriteOnce size: 8Gi ## Configure resource requests and limits ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ ## resources: {} # requests: # memory: 512Mi # cpu: 300m ## Configure the ingress resource that allows you to access the ## SuiteCRM installation. Set up the URL ## ref: http://kubernetes.io/docs/user-guide/ingress/ ## ingress: ## Set to true to enable ingress record generation enabled: false ## Set this to true in order to add the corresponding annotations for cert-manager certManager: false ## Ingress annotations done as key:value pairs ## For a full list of possible ingress annotations, please see ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md ## ## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set ## If certManager is set to true, annotation kubernetes.io/tls-acme: "true" will automatically be set annotations: # kubernetes.io/ingress.class: nginx ## The list of hostnames to be covered with this ingress record. ## Most likely this will be just one host, but in the event more hosts are needed, this is an array hosts: - name: suitecrm.local path: / ## Set this to true in order to enable TLS on the ingress record tls: false ## Optionally specify the TLS hosts for the ingress record ## Useful when the Ingress controller supports www-redirection ## If not specified, the above host name will be used # tlsHosts: # - www.suitecrm.local # - suitecrm.local ## If TLS is set to true, you must declare what secret will store the key/certificate for TLS tlsSecret: suitecrm.local-tls secrets: ## If you're providing your own certificates, please use this to add the certificates as secrets ## key and certificate should start with -----BEGIN CERTIFICATE----- or ## -----BEGIN RSA PRIVATE KEY----- ## ## name should line up with a tlsSecret set further up ## If you're using cert-manager, this is unneeded, as it will create the secret for you if it is not set ## ## It is also possible to create and manage the certificates outside of this helm chart ## Please see README.md for more information # - name: suitecrm.local-tls # key: # certificate: ## Pod annotations ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ ## podAnnotations: {} ## Affinity for pod assignment ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity ## affinity: {} ## Prometheus Exporter / Metrics ## metrics: enabled: false image: registry: docker.io repository: bitnami/apache-exporter tag: 0.7.0-debian-10-r37 pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. ## Secrets must be manually created in the namespace. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ ## # pullSecrets: # - myRegistryKeySecretName ## Metrics exporter pod Annotation and Labels podAnnotations: prometheus.io/scrape: "true" prometheus.io/port: "9117" ## Metrics exporter resource requests and limits ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ ## # resources: {}
bookstack
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"bookstack.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some ...
# Default values for bookstack. # This is a YAML-formatted file. # Declare variables to be passed into your templates. replicaCount: 1 image: repository: solidnerd/bookstack tag: 0.27.5 pullPolicy: IfNotPresent app: # Laravel APP_KEY. Generate one with `php artisan key:generate` and put here if you want a static key. key: env: {} ## ## External database configuration ## externalDatabase: ## Database host host: ## Database host port: 3306 ## Database user user: bookstack ## Database password password: ## Database name database: bookstack ## ## MariaDB chart configuration ## mariadb: ## Whether to deploy a mariadb server to satisfy the applications database requirements. To use an external database set this to false and configure the externalDatabase parameters enabled: true ## Disable MariaDB replication replication: enabled: false ## Create a database and a database user ## ref: https://github.com/bitnami/bitnami-docker-mariadb/blob/master/README.md#creating-a-database-user-on-first-run ## db: name: bookstack user: bookstack ## If the password is not specified, mariadb will generates a random password ## # password: ## MariaDB admin password ## ref: https://github.com/bitnami/bitnami-docker-mariadb/blob/master/README.md#setting-the-root-password-on-first-run ## # rootUser: # password: ## Enable persistence using Persistent Volume Claims ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ ## master: persistence: enabled: false ## mariadb data Persistent Volume Storage Class ## If defined, storageClassName: <storageClass> ## If set to "-", storageClassName: "", which disables dynamic provisioning ## If undefined (the default) or set to null, no storageClassName spec is ## set, choosing the default provisioner. (gp2 on AWS, standard on ## GKE, AWS & OpenStack) ## # storageClass: "-" accessMode: ReadWriteOnce size: 8Gi service: type: ClusterIP port: 80 # If true, create & use Pod Security Policy resources # https://kubernetes.io/docs/concepts/policy/pod-security-policy/ podSecurityPolicy: enabled: false ## For RBAC support: rbac: # Specifies whether RBAC resources should be created create: true serviceAccount: # Specifies whether a ServiceAccount should be created create: true # The name of the ServiceAccount to use. # If not set and create is true, a name is generated using the fullname template name: ## Enable persistence using Persistent Volume Claims ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ ## persistence: # Persistence for the public/uploads folder uploads: enabled: true ## database data Persistent Volume Storage Class ## If defined, storageClassName: <storageClass> ## If set to "-", storageClassName: "", which disables dynamic provisioning ## If undefined (the default) or set to null, no storageClassName spec is ## set, choosing the default provisioner. (gp2 on AWS, standard on ## GKE, AWS & OpenStack) ## # storageClass: "-" accessMode: ReadWriteOnce size: 8Gi # Persistence for the public/storage folder storage: enabled: true ## database data Persistent Volume Storage Class ## If defined, storageClassName: <storageClass> ## If set to "-", storageClassName: "", which disables dynamic provisioning ## If undefined (the default) or set to null, no storageClassName spec is ## set, choosing the default provisioner. (gp2 on AWS, standard on ## GKE, AWS & OpenStack) ## # storageClass: "-" accessMode: ReadWriteOnce size: 8Gi ingress: enabled: false annotations: {} # kubernetes.io/ingress.class: nginx # kubernetes.io/tls-acme: "true" path: / hosts: - bookstack-example.local tls: [] # - secretName: chart-example-tls # hosts: # - chart-example.local resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. # limits: # cpu: 100m # memory: 128Mi # requests: # cpu: 100m # memory: 128Mi nodeSelector: {} tolerations: [] affinity: {} ## Enable ldap authentication. See https://www.bookstackapp.com/docs/admin/ldap-auth/ for details on how to set it up. ldap: enabled: false server: base_dn: dn: pass: userFilter: version:
signalfx-agent
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"signalfx-agent.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because ...
# Version of the signalfx-agent to deploy. This will be the default for the # docker image tag if not overridden with imageTag agentVersion: 3.6.1 # The access token for SignalFx. REQUIRED signalFxAccessToken: "" # Docker image configuration image: # Image pull policy for the agent pod pullPolicy: IfNotPresent # The docker image to use for the agent repository: quay.io/signalfx/signalfx-agent # tag defaults to the agentVersion but can be overridden tag: # pullSecret is not needed for our standard image pullSecret: # How many agent pods can be unavailable at a time when rolling out a new # version of the agent rollingUpdateMaxUnavailable: 1 # Namespace to deploy agent in (Optional: Will default to release namespace) namespace: # RBAC config for the agent rbac: create: true # You might need custom rules if you are pulling secrets to configure # monitors. customRules: [] # Service account config for the agent pods serviceAccount: # Specifies whether a ServiceAccount should be created create: true # The name of the ServiceAccount to use. # If not set and create is true, a name is generated using the fullname template name: # This adds some tolerations to the pods that the DaemonSet creates that # should allow the agent to run on the master nodes of newer versions of K8s # that are deployed with standard master taints (see daemonset.yaml). If you # need custom tolerations, see the 'tolerations' config option below. runOnMaster: true # You can specify additional tolerations for the pods that the DaemonSet # creates. tolerations: [] # Extra labels to put on agent pods. Values must be strings per the k8s label # schema. extraPodLabels: {} # You can specify a custom agent config file with the agentConfig value. If # you specify this, all of the options below here will no longer be applicable # since they are used to render a default config (see configmap.yaml template). agentConfig: # clusterName must be provided. It is an arbitrary value that identifies this # K8s cluster in SignalFx. This will be the value of the 'kubernetes_cluster' # dimension on every metric sent by the agent. clusterName: # How frequently to send metrics by default in the agent. This can be # overridden by individual monitors. metricIntervalSeconds: 10 # The log level of the agent. Valid values are 'debug', 'info', 'warn', and # 'error'. Info is a good default and won't be too spamy. Note that 'debug' # may dump sensitive values in the provided configuration so use with care. logLevel: info # Whether to ignore TLS validation issue when connecting to the main K8s API # server. This should almost never need to be set to true since the CA cert is # provided with the service account token automatically by K8s. apiServerSkipVerify: false # Additional options for connecting to the Kubelet. These options are # equivalent to what is under the 'kubeletAPI' key of the 'kubelet-stats' # monitor. By default, the agent tries to use its service account if kubelet # authentication is required. kubeletAPI: authType: serviceAccount # Any values put in this object correspond to the 'collectd' config block of # the agent config collectd: {} # How often to send cAdvisor-based container metrics. Defaults to whatever is # in metricIntervalSeconds. containerStatsIntervalSeconds: # If true, K8s cluster-level metrics will be collected (e.g. pod counts, # deployment status, etc). The agents will decide amongst themselves which # instance should send the metrics so that they are only sent once. gatherClusterMetrics: true # Enables the docker-container-stats monitor with some specific config that # causes it to send container stats from Docker with certain dimensions from # container labels that makes it easy to correlate metrics between cadvisor and # docker. Note that docker metrics are not sent for pause containers by # default. gatherDockerMetrics: true # A list of metric names that are collected by monitors but are not to be sent # to SignalFx. This default set include a lot of highly specific or duplicated # cAdvisor metrics that cause a large increase in DPM for little value for most # customers. metricNamesToExclude: - container_cpu_user_seconds_total - container_cpu_system_seconds_total - container_cpu_utilization_per_core - container_fs_reads_total - container_fs_sector_reads_total - container_fs_reads_merged_total - container_fs_read_seconds_total - container_fs_writes_total - container_fs_sector_writes_total - container_fs_writes_merged_total - container_fs_write_seconds_total - container_fs_io_current - container_fs_io_time_seconds_total - container_fs_io_time_weighted_seconds_total - container_last_seen - container_tasks_state - pod_network_receive_packets_total - pod_network_receive_packets_dropped_total - pod_network_transmit_packets_total - pod_network_transmit_packets_dropped_total - machine_cpu_frequency_khz # A list of monitor configurations to include in the agent config. These # values correspond exactly to what goes under 'monitors' in the agent config. # The following are a set of monitors with discovery rules that should cover # many standard deployments. Most users will want to override this with their # own monitors and discovery rules. monitors: - type: collectd/activemq discoveryRule: container_image =~ "activemq" && private_port == 1099 - type: collectd/apache discoveryRule: container_image =~ "apache" && private_port == 80 - type: collectd/cassandra discoveryRule: container_image =~ "cassandra" && private_port == 7199 - type: collectd/consul discoveryRule: container_image =~ "consul" && private_port == 8500 - type: collectd/elasticsearch discoveryRule: container_image =~ "elasticsearch" && port == 9200 - type: collectd/etcd discoveryRule: container_image =~ "etcd" && port == 2379 # REQUIRED clusterName: my-cluster - type: collectd/haproxy discoveryRule: container_image =~ "haproxy" && port == 9000 - type: collectd/kafka discoveryRule: container_image =~ "kafka" && private_port == 9092 - type: collectd/memcached discoveryRule: container_image =~ "memcache" && private_port == 11211 - type: collectd/mongodb discoveryRule: container_image =~ "mongo" && private_port == 27017 # REQUIRED databases: - mydatabase - type: collectd/mysql discoveryRule: container_image =~ "mysql" && private_port == 3306 # REQUIRED username: admin databases: - name: mydb - type: collectd/nginx discoveryRule: container_image =~ "nginx" && private_port == 80 - type: collectd/rabbitmq discoveryRule: container_image =~ "rabbitmq" && private_port == 15672 - type: collectd/redis discoveryRule: container_image =~ "redis" && private_port == 6379 - type: collectd/spark discoveryRule: container_image =~ "spark" && private_port == 8080 isMaster: true collectApplicationMetrics: true clusterType: Standalone - type: collectd/spark discoveryRule: container_image =~ "spark" && private_port >= 8081 isMaster: false clusterType: Standalone - type: collectd/zookeeper discoveryRule: container_image =~ "zookeeper" && private_port == 2181
jasperreports
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"jasperreports.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because s...
## Global Docker image parameters ## Please, note that this will override the image parameters, including dependencies, configured to use the global value ## Current available global Docker image parameters: imageRegistry and imagePullSecrets ## # global: # imageRegistry: myRegistryName # imagePullSecrets: # - myRegistryKeySecretName # storageClass: myStorageClass ## Bitnami JasperReports image version ## ref: https://hub.docker.com/r/bitnami/dokuwiki/tags/ ## image: registry: docker.io repository: bitnami/jasperreports tag: 7.2.0-debian-10-r24 ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images ## pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. ## Secrets must be manually created in the namespace. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ ## # pullSecrets: # - myRegistryKeySecretName ## String to partially override jasperreports.fullname template (will maintain the release name) ## # nameOverride: ## String to fully override jasperreports.fullname template ## # fullnameOverride: ## User of the application ## ref: https://github.com/bitnami/bitnami-docker-jasperreports#configuration ## jasperreportsUsername: user ## Application password ## Defaults to a random 10-character alphanumeric string if not set ## ref: https://github.com/bitnami/bitnami-docker-jasperreports#configuration ## # jasperreportsPassword: # ## Application mail ## ref: https://github.com/bitnami/bitnami-docker-jasperreports#configuration ## jasperreportsEmail: user@example.com ## Set to `yes` to allow the container to be started with blank passwords ## ref: https://github.com/bitnami/bitnami-docker-jasperreports#environment-variables allowEmptyPassword: "yes" ## ## External database configuration ## externalDatabase: ## Database host host: ## Database host port: 3306 ## Database user user: bn_jasperreports ## Database password password: ## Database name database: bitnami_jasperreports ## SMTP mail delivery configuration ## ref: https://github.com/bitnami/bitnami-docker-jasperreports#smtp-configuration ## # smtpHost: # smtpPort: # smtpEmail: # smtpUser: # smtpPassword: # smtpProtocol: ## ## MariaDB chart configuration ## ## https://github.com/helm/charts/blob/master/stable/mariadb/values.yaml ## mariadb: ## Whether to deploy a mariadb server to satisfy the applications database requirements. To use an external database set this to false and configure the externalDatabase parameters enabled: true ## Disable MariaDB replication replication: enabled: false ## Create a database and a database user ## ref: https://github.com/bitnami/bitnami-docker-mariadb/blob/master/README.md#creating-a-database-user-on-first-run ## db: name: bitnami_jasperreports user: bn_jasperreports ## If the password is not specified, mariadb will generates a random password ## # password: ## MariaDB admin password ## ref: https://github.com/bitnami/bitnami-docker-mariadb/blob/master/README.md#setting-the-root-password-on-first-run ## # rootUser: # password: ## Enable persistence using Persistent Volume Claims ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ ## master: persistence: enabled: true ## mariadb data Persistent Volume Storage Class ## If defined, storageClassName: <storageClass> ## If set to "-", storageClassName: "", which disables dynamic provisioning ## If undefined (the default) or set to null, no storageClassName spec is ## set, choosing the default provisioner. (gp2 on AWS, standard on ## GKE, AWS & OpenStack) ## # storageClass: "-" accessMode: ReadWriteOnce size: 8Gi ## Kubernetes configuration ## For minikube, set this to NodePort, elsewhere use LoadBalancer ## service: type: LoadBalancer # HTTP Port port: 80 ## ## nodePorts: ## http: <to set explicitly, choose port between 30000-32767> ## https: <to set explicitly, choose port between 30000-32767> nodePorts: http: "" ## Enable client source IP preservation ## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip ## externalTrafficPolicy: Cluster ## Enable persistence using Persistent Volume Claims ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ ## persistence: enabled: true ## database data Persistent Volume Storage Class ## If defined, storageClassName: <storageClass> ## If set to "-", storageClassName: "", which disables dynamic provisioning ## If undefined (the default) or set to null, no storageClassName spec is ## set, choosing the default provisioner. (gp2 on AWS, standard on ## GKE, AWS & OpenStack) ## # storageClass: "-" accessMode: ReadWriteOnce size: 8Gi ## Configure resource requests and limits ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ ## resources: requests: memory: 512Mi cpu: 300m ## Configure the ingress resource that allows you to access the ## JasperReports installation. Set up the URL ## ref: http://kubernetes.io/docs/user-guide/ingress/ ## ingress: ## Set to true to enable ingress record generation enabled: false ## Set this to true in order to add the corresponding annotations for cert-manager certManager: false ## Ingress annotations done as key:value pairs ## For a full list of possible ingress annotations, please see ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md ## ## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set ## If certManager is set to true, annotation kubernetes.io/tls-acme: "true" will automatically be set annotations: # kubernetes.io/ingress.class: nginx ## The list of hostnames to be covered with this ingress record. ## Most likely this will be just one host, but in the event more hosts are needed, this is an array hosts: - name: jasperreports.local path: / ## Set this to true in order to enable TLS on the ingress record tls: false ## Optionally specify the TLS hosts for the ingress record ## Useful when the Ingress controller supports www-redirection ## If not specified, the above host name will be used # tlsHosts: # - www.jasperreports.local # - jasperreports.local ## If TLS is set to true, you must declare what secret will store the key/certificate for TLS tlsSecret: jasperreports.local-tls secrets: ## If you're providing your own certificates, please use this to add the certificates as secrets ## key and certificate should start with -----BEGIN CERTIFICATE----- or ## -----BEGIN RSA PRIVATE KEY----- ## ## name should line up with a tlsSecret set further up ## If you're using cert-manager, this is unneeded, as it will create the secret for you if it is not set ## ## It is also possible to create and manage the certificates outside of this helm chart ## Please see README.md for more information # - name: jasperreports.local-tls # key: # certificate: ## Affinity for pod assignment ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity ## affinity: {}
dex
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"dex.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubern...
# Default values for dex # This is a YAML-formatted file. # Declare name/value pairs to be passed into your templates. # name: value image: quay.io/dexidp/dex imageTag: "v2.24.0" imagePullPolicy: "IfNotPresent" imagePullSecrets: {} inMiniKube: false nodeSelector: {} podLabels: {} podAnnotations: {} priorityClassName: "" initContainers: [] tolerations: [] # - key: CriticalAddonsOnly # operator: Exists # - key: foo # operator: Equal # value: bar # effect: NoSchedule securityContext: # Rejecting containers trying to run with root privileges # runAsNonRoot: true # Preventing privilege escalation to root privileges # allowPrivilegeEscalation: false # Set the user ID used to run the container # runAsUser: 1001 # Set the primary group ID used to run all processes within any container of the pod # runAsGroup: 1001 # Set the group ID associated with the container # fsGroup: 1001 replicas: 1 # resources: # limits: # cpu: 100m # memory: 50Mi # requests: # cpu: 100m # memory: 50Mi # grpc support grpc: true # https termination by dex itself https: false # expose prometheus metrics ? telemetry: false ports: web: containerPort: 5556 # for service.type: NodePort nodePort: 32000 servicePort: 32000 # Relevant only when grpc support is enabled grpc: containerPort: 5000 # for service.type: NodePort nodePort: 35000 servicePort: 35000 telemetry: containerPort: 5558 # for service.type: NodePort nodePort: 37000 servicePort: 37000 livenessProbe: enabled: true initialDelaySeconds: 1 failureThreshold: 1 httpPath: "/healthz" periodSeconds: 10 timeoutSeconds: 1 readinessProbe: enabled: true initialDelaySeconds: 1 failureThreshold: 1 httpPath: "/healthz" periodSeconds: 10 timeoutSeconds: 1 service: type: ClusterIP # Override IP for the Service Type: LoadBalancer. # This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. # loadBalancerIP: 127.0.0.1 annotations: {} ingress: enabled: false annotations: {} # kubernetes.io/ingress.class: nginx # kubernetes.io/tls-acme: "true" path: / hosts: - dex.example.com tls: [] # - secretName: dex-example-tls # hosts: # - dex.example.com extraVolumes: [] extraVolumeMounts: [] certs: securityContext: enabled: true runAsUser: 65534 fsGroup: 65534 image: gcr.io/google_containers/kubernetes-dashboard-init-amd64 imageTag: "v1.0.0" imagePullPolicy: "IfNotPresent" # Section below is relevant only when https termination is enabled web: create: true activeDeadlineSeconds: 300 caDays: 10000 certDays: 10000 altNames: - dex.example.com altIPs: {} secret: tlsName: dex-web-server-tls caName: dex-web-server-ca pod: annotations: {} nodeSelector: {} tolerations: [] # - key: CriticalAddonsOnly # operator: Exists # - key: foo # operator: Equal # value: bar # effect: NoSchedule affinity: {} # podAntiAffinity: # preferredDuringSchedulingIgnoredDuringExecution: # - weight: 5 # podAffinityTerm: # topologyKey: "kubernetes.io/hostname" # labelSelector: # matchLabels: # app: {{ template "dex.name" . }} # release: "{{ .Release.Name }}" # Section below is relevant only when grpc support is enabled grpc: create: true activeDeadlineSeconds: 300 altNames: - dex.example.com altIPs: {} secret: serverTlsName: dex-grpc-server-tls clientTlsName: dex-grpc-client-tls caName: dex-grpc-ca pod: annotations: {} nodeSelector: {} tolerations: [] # - key: CriticalAddonsOnly # operator: Exists # - key: foo # operator: Equal # value: bar # effect: NoSchedule affinity: {} # podAntiAffinity: # preferredDuringSchedulingIgnoredDuringExecution: # - weight: 5 # podAffinityTerm: # topologyKey: "kubernetes.io/hostname" # labelSelector: # matchLabels: # app: {{ template "dex.name" . }} # release: "{{ .Release.Name }}" env: [] rbac: # Specifies whether RBAC resources should be created create: true crd: # Specifies whether dex's CRDs are already present (if not cluster role and cluster role binding will be created # to enable dex to create them). Depends on rbac.create present: false serviceAccount: # Specifies whether a ServiceAccount should be created create: true # The name of the ServiceAccount to use. # If not set and create is true, a name is generated using the fullname template name: affinity: {} # podAntiAffinity: # preferredDuringSchedulingIgnoredDuringExecution: # - weight: 5 # podAffinityTerm: # topologyKey: "kubernetes.io/hostname" # labelSelector: # matchLabels: # app: {{ template "dex.name" . }} # release: "{{ .Release.Name }}" podDisruptionBudget: {} # maxUnavailable: 1 config: issuer: http://dex.example.com:8080 storage: type: kubernetes config: inCluster: true logger: level: debug web: # port is taken from ports section above address: 0.0.0.0 tlsCert: /etc/dex/tls/https/server/tls.crt tlsKey: /etc/dex/tls/https/server/tls.key allowedOrigins: [] # Section below is relevant only when grpc support is enabled grpc: # port is taken from ports section above address: 127.0.0.1 tlsCert: /etc/dex/tls/grpc/server/tls.crt tlsKey: /etc/dex/tls/grpc/server/tls.key tlsClientCA: /etc/dex/tls/grpc/ca/tls.crt connectors: [] # - type: github # id: github # name: GitHub # config: # clientID: xxxxxxxxxxxxxxx # clientSecret: yyyyyyyyyyyyyyyyyyyyy # redirectURI: https://dex.minikube.local:5556/callback # org: kubernetes oauth2: alwaysShowLoginScreen: false skipApprovalScreen: true # expiry: # signingKeys: "6h" # idTokens: "24h" # staticClients: # - id: example-app # redirectURIs: # - 'http://192.168.42.219:31850/oauth2/callback' # name: 'Example App' # secret: ZXhhbXBsZS1hcHAtc2VjcmV0 # enablePasswordDB: true # staticPasswords: # - email: "admin@example.com" # # bcrypt hash of the string "password" # hash: "$2a$10$2b2cU8CPhOTaGrs1HRQuAueS7JTT5ZHsHSzYiFPm1leZck7Mc8T4W" # username: "admin" # userID: "08a8684b-db88-4b73-90a9-3cd1661f5466" # frontend: # logoURL: https://example.com/yourlogo.png
hlf-ord
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"hlf-ord.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Ku...
## Default values for hlf-ord. ## This is a YAML-formatted file. ## Declare variables to be passed into your templates. image: repository: hyperledger/fabric-orderer tag: 1.4.3 pullPolicy: IfNotPresent service: # Cluster IP or LoadBalancer type: ClusterIP port: 7050 portMetrics: 9443 ingress: enabled: false annotations: {} # kubernetes.io/ingress.class: nginx # nginx.ingress.kubernetes.io/ssl-redirect: "true" # nginx.ingress.kubernetes.io/backend-protocol: "GRPC" # certmanager.k8s.io/cluster-issuer: "letsencrypt-staging" path: / hosts: - hlf-ord.local tls: [] # - secretName: hlf-ord-tls # hosts: # - hlf-ord.local persistence: enabled: true annotations: {} ## If defined, storageClassName: <storageClass> ## If set to "-", storageClassName: "", which disables dynamic provisioning ## If undefined (the default) or set to null, no storageClassName spec is ## set, choosing the default provisioner. (gp2 on AWS, standard on ## GKE, AWS & OpenStack) ## storageClass: "" accessMode: ReadWriteOnce size: 1Gi # existingClaim: "" ################################## ## Orderer configuration options # ################################## ord: ## Type of Orderer, `solo` or `kafka` type: solo ## MSP ID of the Orderer mspID: OrdererMSP # TLS tls: server: enabled: "false" client: enabled: "false" metrics: provider: "disabled" statsd: network: "udp" address: "127.0.0.1:8125" writeInterval: "30s" prefix: "" secrets: ## These secrets should contain the Orderer crypto materials and credentials ord: {} ## Credentials, saved under keys 'CA_USERNAME' and 'CA_PASSWORD' # cred: hlf--ord1-cred ## Certificate, saved under key 'cert.pem' # cert: hlf--ord1-idcert ## Key, saved under 'key.pem' # key: hlf--ord1-idkey ## CA Cert, saved under 'cacert.pem' # caCert: hlf--ord1-cacert ## Intermediate CA Cert (optional), saved under 'intermediatecacert.pem' # intCaCert: hlf--ord1-caintcert ## TLS secret, saved under keys 'tls.crt' and 'tls.key' (to conform with K8S nomenclature) # tls: hlf--ord1-tls ## TLS root CA certificate saved under key 'cert.pem' # tlsRootCert: hlf--ord-tlsrootcert ## TLS client root CA certificates saved under any names (as there may be multiple) # tlsClientRootCerts: hlf--peer-tlsrootcert ## This should contain "genesis" block derived from a configtx.yaml ## configtxgen -profile OrdererGenesis -outputBlock genesis.block # genesis: hlf--genesis ## This should contain the Certificate of the Orderer Organisation admin ## This is necessary to successfully run the orderer # adminCert: hlf--ord-admincert resources: {} ## We usually recommend not to specify default resources and to leave this as a conscious ## choice for the user. This also increases chances charts run on environments with little ## resources, such as Minikube. If you do want to specify resources, uncomment the following ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. # limits: # cpu: 100m # memory: 128Mi # requests: # cpu: 100m # memory: 128Mi nodeSelector: {} tolerations: [] affinity: {} ## Suggested antiAffinity, as each Orderer should be on a separate Node for resilience # podAntiAffinity: # requiredDuringSchedulingIgnoredDuringExecution: # - topologyKey: "kubernetes.io/hostname" # labelSelector: # matchLabels: # app: hlf-ord
namerd
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"namerd.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kub...
# Default values for namerd. # This is a YAML-formatted file. # Declare variables to be passed into your templates. replicaCount: 3 namerd: image: repository: buoyantio/namerd:0.9.1 pullPolicy: IfNotPresent resources: limits: cpu: 500m memory: 512Mi requests: cpu: 0 memory: 512Mi kubectl: image: repository: buoyantio/kubectl:v1.4.0 pullPolicy: IfNotPresent resources: # limits: # cpu: 10m # memory: 32Mi requests: cpu: 0 memory: 32Mi service: type: ClusterIP syncPort: 4100 apiPort: 4180 adminPort: 9991
couchdb
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"couchdb.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Ku...
## clusterSize is the initial size of the CouchDB cluster. clusterSize: 3 ## If allowAdminParty is enabled the cluster will start up without any database ## administrator account; i.e., all users will be granted administrative ## access. Otherwise, the system will look for a Secret called ## <ReleaseName>-couchdb containing `adminUsername`, `adminPassword` and ## `cookieAuthSecret` keys. See the `createAdminSecret` flag. ## ref: https://kubernetes.io/docs/concepts/configuration/secret/ allowAdminParty: false ## If createAdminSecret is enabled a Secret called <ReleaseName>-couchdb will ## be created containing auto-generated credentials. Users who prefer to set ## these values themselves have a couple of options: ## ## 1) The `adminUsername`, `adminPassword`, and `cookieAuthSecret` can be ## defined directly in the chart's values. Note that all of a chart's values ## are currently stored in plaintext in a ConfigMap in the tiller namespace. ## ## 2) This flag can be disabled and a Secret with the required keys can be ## created ahead of time. createAdminSecret: true adminUsername: admin # adminPassword: this_is_not_secure # cookieAuthSecret: neither_is_this ## Use an alternate scheduler, e.g. "stork". ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ ## # schedulerName: ## The storage volume used by each Pod in the StatefulSet. If a ## persistentVolume is not enabled, the Pods will use `emptyDir` ephemeral ## local storage. Setting the storageClass attribute to "-" disables dynamic ## provisioning of Persistent Volumes; leaving it unset will invoke the default ## provisioner. persistentVolume: enabled: false accessModes: - ReadWriteOnce size: 10Gi # storageClass: "-" ## The CouchDB image image: repository: couchdb tag: 2.3.1 pullPolicy: IfNotPresent ## Experimental integration with Lucene-powered fulltext search searchImage: repository: kocolosk/couchdb-search tag: 0.1.2 pullPolicy: IfNotPresent ## Flip this to flag to include the Search container in each Pod enableSearch: false initImage: repository: busybox tag: latest pullPolicy: Always ## CouchDB is happy to spin up cluster nodes in parallel, but if you encounter ## problems you can try setting podManagementPolicy to the StatefulSet default ## `OrderedReady` podManagementPolicy: Parallel ## To better tolerate Node failures, we can prevent Kubernetes scheduler from ## assigning more than one Pod of CouchDB StatefulSet per Node using podAntiAffinity. affinity: # podAntiAffinity: # requiredDuringSchedulingIgnoredDuringExecution: # - labelSelector: # matchExpressions: # - key: "app" # operator: In # values: # - couchdb # topologyKey: "kubernetes.io/hostname" ## A StatefulSet requires a headless Service to establish the stable network ## identities of the Pods, and that Service is created automatically by this ## chart without any additional configuration. The Service block below refers ## to a second Service that governs how clients connect to the CouchDB cluster. service: # annotations: enabled: true type: ClusterIP externalPort: 5984 ## An Ingress resource can provide name-based virtual hosting and TLS ## termination among other things for CouchDB deployments which are accessed ## from outside the Kubernetes cluster. ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/ ingress: enabled: false hosts: - chart-example.local annotations: # kubernetes.io/ingress.class: nginx # kubernetes.io/tls-acme: "true" tls: # Secrets must be manually created in the namespace. # - secretName: chart-example-tls # hosts: # - chart-example.local ## Optional resource requests and limits for the CouchDB container ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ resources: {} # requests: # cpu: 100m # memory: 128Mi # limits: # cpu: 56 # memory: 256Gi ## erlangFlags is a map that is passed to the Erlang VM as flags using the ## ERL_FLAGS env. `name` and `setcookie` flags are minimally required to ## establish connectivity between cluster nodes. ## ref: http://erlang.org/doc/man/erl.html#init_flags erlangFlags: name: couchdb setcookie: monster ## couchdbConfig will override default CouchDB configuration settings. ## The contents of this map are reformatted into a .ini file laid down ## by a ConfigMap object. ## ref: http://docs.couchdb.org/en/latest/config/index.html couchdbConfig: # cluster: # q: 8 # Create 8 shards for each database chttpd: bind_address: any # chttpd.require_valid_user disables all the anonymous requests to the port # 5984 when is set to true. require_valid_user: false # Kubernetes local cluster domain. # This is used to generate FQDNs for peers when joining the CouchDB cluster. dns: clusterDomainSuffix: cluster.local
docker-registry
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"docker-registry.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because...
# Default values for docker-registry. # This is a YAML-formatted file. # Declare variables to be passed into your templates. replicaCount: 1 updateStrategy: # type: RollingUpdate # rollingUpdate: # maxSurge: 1 # maxUnavailable: 0 podAnnotations: {} podLabels: {} image: repository: registry tag: 2.7.1 pullPolicy: IfNotPresent # imagePullSecrets: # - name: docker service: name: registry type: ClusterIP # clusterIP: port: 5000 # nodePort: # loadBalancerIP: # loadBalancerSourceRanges: annotations: {} # foo.io/bar: "true" ingress: enabled: false path: / # Used to create an Ingress record. hosts: - chart-example.local annotations: {} # kubernetes.io/ingress.class: nginx # kubernetes.io/tls-acme: "true" labels: {} tls: # Secrets must be manually created in the namespace. # - secretName: chart-example-tls # hosts: # - chart-example.local resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. # limits: # cpu: 100m # memory: 128Mi # requests: # cpu: 100m # memory: 128Mi persistence: accessMode: 'ReadWriteOnce' enabled: false size: 10Gi # storageClass: '-' # set the type of filesystem to use: filesystem, s3 storage: filesystem # Set this to name of secret for tls certs # tlsSecretName: registry.docker.example.com secrets: haSharedSecret: "" htpasswd: "" # Secrets for Azure # azure: # accountName: "" # accountKey: "" # container: "" # Secrets for S3 access and secret keys # s3: # accessKey: "" # secretKey: "" # Secrets for Swift username and password # swift: # username: "" # password: "" # Options for s3 storage type: # s3: # region: us-east-1 # regionEndpoint: s3.us-east-1.amazonaws.com # bucket: my-bucket # encrypt: false # secure: true # Options for swift storage type: # swift: # authurl: http://swift.example.com/ # container: my-container configData: version: 0.1 log: fields: service: registry storage: cache: blobdescriptor: inmemory http: addr: :5000 headers: X-Content-Type-Options: [nosniff] health: storagedriver: enabled: true interval: 10s threshold: 3 securityContext: enabled: true runAsUser: 1000 fsGroup: 1000 priorityClassName: "" podDisruptionBudget: {} # maxUnavailable: 1 # minAvailable: 2 nodeSelector: {} affinity: {} tolerations: [] extraVolumeMounts: [] ## Additional volumeMounts to the registry container. # - mountPath: /secret-data # name: cloudfront-pem-secret # readOnly: true extraVolumes: [] ## Additional volumes to the pod. # - name: cloudfront-pem-secret # secret: # secretName: cloudfront-credentials # items: # - key: cloudfront.pem # path: cloudfront.pem # mode: 511
distributed-tensorflow
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"distributed-tensorflow.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars ...
# Default values for distributed-tensorflow. # This is a YAML-formatted file. # Declare variables to be passed into your templates. worker: number: 2 podManagementPolicy: Parallel image: repository: dysproz/distributed-tf tag: 1.7.0 pullPolicy: IfNotPresent port: 9000 ps: number: 2 podManagementPolicy: Parallel image: repository: dysproz/distributed-tf tag: 1.7.0 pullPolicy: IfNotPresent port: 8000 # optimize for training hyperparams: batchsize: 20 learningrate: 0.001 trainsteps: 0
grafana
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"grafana.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Ku...
rbac: create: true pspEnabled: true pspUseAppArmor: true namespaced: false extraRoleRules: [] # - apiGroups: [] # resources: [] # verbs: [] extraClusterRoleRules: [] # - apiGroups: [] # resources: [] # verbs: [] serviceAccount: create: true name: nameTest: # annotations: replicas: 1 ## See `kubectl explain poddisruptionbudget.spec` for more ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ podDisruptionBudget: {} # minAvailable: 1 # maxUnavailable: 1 ## See `kubectl explain deployment.spec.strategy` for more ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy deploymentStrategy: type: RollingUpdate readinessProbe: httpGet: path: /api/health port: 3000 livenessProbe: httpGet: path: /api/health port: 3000 initialDelaySeconds: 60 timeoutSeconds: 30 failureThreshold: 10 ## Use an alternate scheduler, e.g. "stork". ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ ## # schedulerName: "default-scheduler" image: repository: grafana/grafana tag: 7.1.1 sha: "" pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. ## Secrets must be manually created in the namespace. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ ## # pullSecrets: # - myRegistrKeySecretName testFramework: enabled: true image: "bats/bats" tag: "v1.1.0" imagePullPolicy: IfNotPresent securityContext: {} securityContext: runAsUser: 472 runAsGroup: 472 fsGroup: 472 extraConfigmapMounts: [] # - name: certs-configmap # mountPath: /etc/grafana/ssl/ # subPath: certificates.crt # (optional) # configMap: certs-configmap # readOnly: true extraEmptyDirMounts: [] # - name: provisioning-notifiers # mountPath: /etc/grafana/provisioning/notifiers ## Assign a PriorityClassName to pods if set # priorityClassName: downloadDashboardsImage: repository: curlimages/curl tag: 7.70.0 sha: "" pullPolicy: IfNotPresent downloadDashboards: env: {} resources: {} ## Pod Annotations # podAnnotations: {} ## Pod Labels # podLabels: {} podPortName: grafana ## Deployment annotations # annotations: {} ## Expose the grafana service to be accessed from outside the cluster (LoadBalancer service). ## or access it from within the cluster (ClusterIP service). Set the service type and the port to serve it. ## ref: http://kubernetes.io/docs/user-guide/services/ ## service: type: ClusterIP port: 80 targetPort: 3000 # targetPort: 4181 To be used with a proxy extraContainer annotations: {} labels: {} portName: service extraExposePorts: [] # - name: keycloak # port: 8080 # targetPort: 8080 # type: ClusterIP # overrides pod.spec.hostAliases in the grafana deployment's pods hostAliases: [] # - ip: "1.2.3.4" # hostnames: # - "my.host.com" ingress: enabled: false # Values can be templated annotations: {} # kubernetes.io/ingress.class: nginx # kubernetes.io/tls-acme: "true" labels: {} path: / hosts: - chart-example.local ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. extraPaths: [] # - path: /* # backend: # serviceName: ssl-redirect # servicePort: use-annotation tls: [] # - secretName: chart-example-tls # hosts: # - chart-example.local resources: {} # limits: # cpu: 100m # memory: 128Mi # requests: # cpu: 100m # memory: 128Mi ## Node labels for pod assignment ## ref: https://kubernetes.io/docs/user-guide/node-selection/ # nodeSelector: {} ## Tolerations for pod assignment ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ ## tolerations: [] ## Affinity for pod assignment ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity ## affinity: {} extraInitContainers: [] ## Enable an Specify container in extraContainers. This is meant to allow adding an authentication proxy to a grafana pod extraContainers: | # - name: proxy # image: quay.io/gambol99/keycloak-proxy:latest # args: # - -provider=github # - -client-id= # - -client-secret= # - -github-org=<ORG_NAME> # - -email-domain=* # - -cookie-secret= # - -http-address=http://0.0.0.0:4181 # - -upstream-url=http://127.0.0.1:3000 # ports: # - name: proxy-web # containerPort: 4181 ## Volumes that can be used in init containers that will not be mounted to deployment pods extraContainerVolumes: [] # - name: volume-from-secret # secret: # secretName: secret-to-mount # - name: empty-dir-volume # emptyDir: {} ## Enable persistence using Persistent Volume Claims ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ ## persistence: type: pvc enabled: false # storageClassName: default accessModes: - ReadWriteOnce size: 10Gi # annotations: {} finalizers: - kubernetes.io/pvc-protection # subPath: "" # existingClaim: initChownData: ## If false, data ownership will not be reset at startup ## This allows the prometheus-server to be run with an arbitrary user ## enabled: true ## initChownData container image ## image: repository: busybox tag: "1.31.1" sha: "" pullPolicy: IfNotPresent ## initChownData resource requests and limits ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ ## resources: {} # limits: # cpu: 100m # memory: 128Mi # requests: # cpu: 100m # memory: 128Mi # Administrator credentials when not using an existing secret (see below) adminUser: admin # adminPassword: strongpassword # Use an existing secret for the admin user. admin: existingSecret: "" userKey: admin-user passwordKey: admin-password ## Define command to be executed at startup by grafana container ## Needed if using `vault-env` to manage secrets (ref: https://banzaicloud.com/blog/inject-secrets-into-pods-vault/) ## Default is "run.sh" as defined in grafana's Dockerfile # command: # - "sh" # - "/run.sh" ## Use an alternate scheduler, e.g. "stork". ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ ## # schedulerName: ## Extra environment variables that will be pass onto deployment pods env: {} ## "valueFrom" environment variable references that will be added to deployment pods ## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#envvarsource-v1-core ## Renders in container spec as: ## env: ## ... ## - name: <key> ## valueFrom: ## <value rendered as YAML> envValueFrom: {} ## The name of a secret in the same kubernetes namespace which contain values to be added to the environment ## This can be useful for auth tokens, etc. Value is templated. envFromSecret: "" ## Sensible environment variables that will be rendered as new secret object ## This can be useful for auth tokens, etc envRenderSecret: {} ## Additional grafana server secret mounts # Defines additional mounts with secrets. Secrets must be manually created in the namespace. extraSecretMounts: [] # - name: secret-files # mountPath: /etc/secrets # secretName: grafana-secret-files # readOnly: true # subPath: "" ## Additional grafana server volume mounts # Defines additional volume mounts. extraVolumeMounts: [] # - name: extra-volume # mountPath: /mnt/volume # readOnly: true # existingClaim: volume-claim ## Pass the plugins you want installed as a list. ## plugins: [] # - digrich-bubblechart-panel # - grafana-clock-panel ## Configure grafana datasources ## ref: http://docs.grafana.org/administration/provisioning/#datasources ## datasources: {} # datasources.yaml: # apiVersion: 1 # datasources: # - name: Prometheus # type: prometheus # url: http://prometheus-prometheus-server # access: proxy # isDefault: true ## Configure notifiers ## ref: http://docs.grafana.org/administration/provisioning/#alert-notification-channels ## notifiers: {} # notifiers.yaml: # notifiers: # - name: email-notifier # type: email # uid: email1 # # either: # org_id: 1 # # or # org_name: Main Org. # is_default: true # settings: # addresses: an_email_address@example.com # delete_notifiers: ## Configure grafana dashboard providers ## ref: http://docs.grafana.org/administration/provisioning/#dashboards ## ## `path` must be /var/lib/grafana/dashboards/<provider_name> ## dashboardProviders: {} # dashboardproviders.yaml: # apiVersion: 1 # providers: # - name: 'default' # orgId: 1 # folder: '' # type: file # disableDeletion: false # editable: true # options: # path: /var/lib/grafana/dashboards/default ## Configure grafana dashboard to import ## NOTE: To use dashboards you must also enable/configure dashboardProviders ## ref: https://grafana.com/dashboards ## ## dashboards per provider, use provider name as key. ## dashboards: {} # default: # some-dashboard: # json: | # $RAW_JSON # custom-dashboard: # file: dashboards/custom-dashboard.json # prometheus-stats: # gnetId: 2 # revision: 2 # datasource: Prometheus # local-dashboard: # url: https://example.com/repository/test.json # local-dashboard-base64: # url: https://example.com/repository/test-b64.json # b64content: true ## Reference to external ConfigMap per provider. Use provider name as key and ConfiMap name as value. ## A provider dashboards must be defined either by external ConfigMaps or in values.yaml, not in both. ## ConfigMap data example: ## ## data: ## example-dashboard.json: | ## RAW_JSON ## dashboardsConfigMaps: {} # default: "" ## Grafana's primary configuration ## NOTE: values in map will be converted to ini format ## ref: http://docs.grafana.org/installation/configuration/ ## grafana.ini: paths: data: /var/lib/grafana/data logs: /var/log/grafana plugins: /var/lib/grafana/plugins provisioning: /etc/grafana/provisioning analytics: check_for_updates: true log: mode: console grafana_net: url: https://grafana.net ## grafana Authentication can be enabled with the following values on grafana.ini # server: # The full public facing url you use in browser, used for redirects and emails # root_url: # https://grafana.com/docs/grafana/latest/auth/github/#enable-github-in-grafana # auth.github: # enabled: false # allow_sign_up: false # scopes: user:email,read:org # auth_url: https://github.com/login/oauth/authorize # token_url: https://github.com/login/oauth/access_token # api_url: https://github.com/user # team_ids: # allowed_organizations: # client_id: # client_secret: ## LDAP Authentication can be enabled with the following values on grafana.ini ## NOTE: Grafana will fail to start if the value for ldap.toml is invalid # auth.ldap: # enabled: true # allow_sign_up: true # config_file: /etc/grafana/ldap.toml ## Grafana's LDAP configuration ## Templated by the template in _helpers.tpl ## NOTE: To enable the grafana.ini must be configured with auth.ldap.enabled ## ref: http://docs.grafana.org/installation/configuration/#auth-ldap ## ref: http://docs.grafana.org/installation/ldap/#configuration ldap: enabled: false # `existingSecret` is a reference to an existing secret containing the ldap configuration # for Grafana in a key `ldap-toml`. existingSecret: "" # `config` is the content of `ldap.toml` that will be stored in the created secret config: "" # config: |- # verbose_logging = true # [[servers]] # host = "my-ldap-server" # port = 636 # use_ssl = true # start_tls = false # ssl_skip_verify = false # bind_dn = "uid=%s,ou=users,dc=myorg,dc=com" ## Grafana's SMTP configuration ## NOTE: To enable, grafana.ini must be configured with smtp.enabled ## ref: http://docs.grafana.org/installation/configuration/#smtp smtp: # `existingSecret` is a reference to an existing secret containing the smtp configuration # for Grafana. existingSecret: "" userKey: "user" passwordKey: "password" ## Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders ## Requires at least Grafana 5 to work and can't be used together with parameters dashboardProviders, datasources and dashboards sidecar: image: repository: kiwigrid/k8s-sidecar tag: 0.1.151 sha: "" imagePullPolicy: IfNotPresent resources: {} # limits: # cpu: 100m # memory: 100Mi # requests: # cpu: 50m # memory: 50Mi # skipTlsVerify Set to true to skip tls verification for kube api calls # skipTlsVerify: true enableUniqueFilenames: false dashboards: enabled: false SCProvider: true # label that the configmaps with dashboards are marked with label: grafana_dashboard # folder in the pod that should hold the collected dashboards (unless `defaultFolderName` is set) folder: /tmp/dashboards # The default folder name, it will create a subfolder under the `folder` and put dashboards in there instead defaultFolderName: null # If specified, the sidecar will search for dashboard config-maps inside this namespace. # Otherwise the namespace in which the sidecar is running will be used. # It's also possible to specify ALL to search in all namespaces searchNamespace: null # provider configuration that lets grafana manage the dashboards provider: # name of the provider, should be unique name: sidecarProvider # orgid as configured in grafana orgid: 1 # folder in which the dashboards should be imported in grafana folder: '' # type of the provider type: file # disableDelete to activate a import-only behaviour disableDelete: false # allow updating provisioned dashboards from the UI allowUiUpdates: false datasources: enabled: false # label that the configmaps with datasources are marked with label: grafana_datasource # If specified, the sidecar will search for datasource config-maps inside this namespace. # Otherwise the namespace in which the sidecar is running will be used. # It's also possible to specify ALL to search in all namespaces searchNamespace: null notifiers: enabled: false # label that the configmaps with notifiers are marked with label: grafana_notifier # If specified, the sidecar will search for notifier config-maps inside this namespace. # Otherwise the namespace in which the sidecar is running will be used. # It's also possible to specify ALL to search in all namespaces searchNamespace: null ## Override the deployment namespace ## namespaceOverride: ""
etcd-operator
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"etcd-operator.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because s...
# Default values for etcd-operator. # This is a YAML-formatted file. # Declare variables to be passed into your templates. global: ## Reference to one or more secrets to be used when pulling images ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ ## imagePullSecrets: [] # - name: "image-pull-secret" ## Install Default RBAC roles and bindings rbac: create: true apiVersion: v1 ## Service account name and whether to create it serviceAccount: create: true name: # Select what to deploy deployments: etcdOperator: true # one time deployment, delete once completed, # Ref: https://github.com/coreos/etcd-operator/blob/master/doc/user/walkthrough/backup-operator.md backupOperator: true # one time deployment, delete once completed # Ref: https://github.com/coreos/etcd-operator/blob/master/doc/user/walkthrough/restore-operator.md restoreOperator: true # creates custom resources, not all required, # you could use `helm template --values <values.yaml> --name release_name ... ` # and create the resources yourself to deploy on your cluster later customResources: createEtcdClusterCRD: false createBackupCRD: false createRestoreCRD: false # etcdOperator etcdOperator: priorityClassName: "" name: etcd-operator replicaCount: 1 image: repository: quay.io/coreos/etcd-operator tag: v0.9.4 pullPolicy: Always resources: cpu: 100m memory: 128Mi ## Node labels for etcd-operator pod assignment ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ nodeSelector: {} ## additional command arguments go here; will be translated to `--key=value` form ## e.g., analytics: true commandArgs: {} tolerations: [] ## Configurable health checks against the /readyz endpoint that etcd-operator exposes readinessProbe: enabled: false initialDelaySeconds: 0 periodSeconds: 10 timeoutSeconds: 1 successThreshold: 1 failureThreshold: 3 livenessProbe: enabled: false initialDelaySeconds: 0 periodSeconds: 10 timeoutSeconds: 1 successThreshold: 1 failureThreshold: 3 # backup spec backupOperator: priorityClassName: "" name: etcd-backup-operator replicaCount: 1 image: repository: quay.io/coreos/etcd-operator tag: v0.9.4 pullPolicy: Always resources: cpu: 100m memory: 128Mi spec: storageType: S3 s3: s3Bucket: awsSecret: ## Node labels for etcd pod assignment ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ nodeSelector: {} ## additional command arguments go here; will be translated to `--key=value` form ## e.g., analytics: true commandArgs: {} securityContext: {} tolerations: [] # restore spec restoreOperator: priorityClassName: "" name: etcd-restore-operator replicaCount: 1 image: repository: quay.io/coreos/etcd-operator tag: v0.9.4 pullPolicy: Always port: 19999 resources: cpu: 100m memory: 128Mi spec: s3: # The format of "path" must be: "<s3-bucket-name>/<path-to-backup-file>" # e.g: "etcd-snapshot-bucket/v1/default/example-etcd-cluster/3.2.10_0000000000000001_etcd.backup" path: awsSecret: ## Node labels for etcd pod assignment ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ nodeSelector: {} ## additional command arguments go here; will be translated to `--key=value` form ## e.g., analytics: true commandArgs: {} securityContext: {} tolerations: [] ## etcd-cluster specific values etcdCluster: name: etcd-cluster size: 3 version: 3.2.25 image: repository: quay.io/coreos/etcd tag: v3.2.25 pullPolicy: Always enableTLS: false # TLS configs tls: static: member: peerSecret: etcd-peer-tls serverSecret: etcd-server-tls operatorSecret: etcd-client-tls ## etcd cluster pod specific values ## Ref: https://github.com/coreos/etcd-operator/blob/master/doc/user/spec_examples.md#three-members-cluster-with-resource-requirement pod: busyboxImage: busybox:1.28.0-glibc ## Antiaffinity for etcd pod assignment ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity antiAffinity: false resources: limits: cpu: 100m memory: 128Mi requests: cpu: 100m memory: 128Mi ## Node labels for etcd pod assignment ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ nodeSelector: {} securityContext: {} tolerations: []
signalsciences
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"signalsciences.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because ...
# Default values for signalsciences. # This is a YAML-formatted file. # Declare variables to be passed into your templates. replicaCount: 1 image: repository: signalsciences/sigsci-agent tag: 4.6.0 pullPolicy: IfNotPresent daemonset: {} ## Annotations to add to the DaemonSet's Pods # podAnnotations: # scheduler.alpha.kubernetes.io/tolerations: '[{"key": "example", "value": "foo"}]' ## Allow the DaemonSet to schedule on tainted nodes (requires Kubernetes >= 1.6) # tolerations: [] ## Allow the DaemonSet to schedule on selected nodes # Ref: https://kubernetes.io/docs/user-guide/node-selection/ # nodeSelector: {} ## Allow the DaemonSet to schedule ussing affinity rules # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity # affinity: {} ## Allow the DaemonSet to perform a rolling update on helm update ## ref: https://kubernetes.io/docs/tasks/manage-daemon/update-daemon-set/ # updateStrategy: RollingUpdate signalsciences: ## You'll need to set this to your agent accessKeyId before the agent will run ## ref: https://docs.signalsciences.net/install-guides/#step-1-agent-installation ## # accessKeyId: ## Use existing Secret which stores accessKeyId instead of creating a new one # accessKeyIdExistingSecret: ## You'll need to set this to your agent secretAccessKey before the agent will run ## ref: https://docs.signalsciences.net/install-guides/#step-1-agent-installation # secretAccessKey: ## Use existing Secret which stores the secretAccessKey instead of creating a new one # secretAccessKeyExistingSecret: ## For added security, it is recommended that the sigsci-agent container be executed ## with the root filesystem mounted read only. The agent, however, still needs to write ## some temporary files such as the socket file for RPC communication and some periodically ## updated files such as GeoIP data agentTempVolume: /sigsci/tmp # If required (default is /sigsci/tmp/sigsci.sock for the container) # socketAddress: /sigsci/tmp/sigsci.sock resources: requests: cpu: 200m memory: 256Mi limits: cpu: 200m memory: 256Mi
metrics-server
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"metrics-server.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because ...
rbac: # Specifies whether RBAC resources should be created create: true pspEnabled: false serviceAccount: # Specifies whether a ServiceAccount should be created create: true # The name of the ServiceAccount to use. # If not set and create is true, a name is generated using the fullname template name: apiService: # Specifies if the v1beta1.metrics.k8s.io API service should be created. # # You typically want this enabled! If you disable API service creation you have to # manage it outside of this chart for e.g horizontal pod autoscaling to # work with this release. create: true hostNetwork: # Specifies if metrics-server should be started in hostNetwork mode. # # You would require this enabled if you use alternate overlay networking for pods and # API server unable to communicate with metrics-server. As an example, this is required # if you use Weave network on EKS enabled: false image: repository: k8s.gcr.io/metrics-server-amd64 tag: v0.3.6 pullPolicy: IfNotPresent imagePullSecrets: [] # - registrySecretName args: [] # enable this if you have self-signed certificates, see: https://github.com/kubernetes-incubator/metrics-server # - --kubelet-insecure-tls resources: {} nodeSelector: {} tolerations: [] affinity: {} replicas: 1 extraContainers: [] podLabels: {} podAnnotations: {} # The following annotations guarantee scheduling for critical add-on pods. # See more at: https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/ # scheduler.alpha.kubernetes.io/critical-pod: '' ## Set a pod priorityClassName # priorityClassName: system-node-critical extraVolumeMounts: [] # - name: secrets # mountPath: /etc/kubernetes/secrets # readOnly: true extraVolumes: [] # - name: secrets # secret: # secretName: kube-apiserver livenessProbe: httpGet: path: /healthz port: https scheme: HTTPS initialDelaySeconds: 20 readinessProbe: httpGet: path: /healthz port: https scheme: HTTPS initialDelaySeconds: 20 securityContext: allowPrivilegeEscalation: false capabilities: drop: ["all"] readOnlyRootFilesystem: true runAsGroup: 10001 runAsNonRoot: true runAsUser: 10001 service: annotations: {} labels: {} # Add these labels to have metrics-server show up in `kubectl cluster-info` # kubernetes.io/cluster-service: "true" # kubernetes.io/name: "Metrics-server" port: 443 type: ClusterIP podDisruptionBudget: # https://kubernetes.io/docs/tasks/run-application/configure-pdb/ enabled: false minAvailable: maxUnavailable: testImage: repository: busybox tag: latest pullPolicy: IfNotPresent
hackmd
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"hackmd.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kub...
# Default values for hackmd. # This is a YAML-formatted file. # Declare variables to be passed into your templates. replicaCount: 1 deploymentStrategy: RollingUpdate image: repository: hackmdio/hackmd tag: 1.3.0-alpine pullPolicy: IfNotPresent service: name: hackmd type: ClusterIP port: 3000 ingress: enabled: false annotations: {} # kubernetes.io/ingress.class: nginx # kubernetes.io/tls-acme: "true" path: / hosts: - chart-example.local tls: [] # - secretName: chart-example-tls # hosts: # - chart-example.local resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. # limits: # cpu: 100m # memory: 128Mi # requests: # cpu: 100m # memory: 128Mi persistence: enabled: true ## hackmd data Persistent Volume access modes ## Must match those of existing PV or dynamic provisioner ## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ ## accessModes: - ReadWriteOnce annotations: {} existingClaim: "" size: 2Gi ## A manually managed Persistent Volume and Claim ## Requires persistence.enabled: true ## If defined, PVC must be created manually before volume will be bound # existingClaim: ## database data Persistent Volume Storage Class ## If defined, storageClassName: <storageClass> ## If set to "-", storageClassName: "", which disables dynamic provisioning ## If undefined (the default) or set to null, no storageClassName spec is ## set, choosing the default provisioner. (gp2 on AWS, standard on ## GKE, AWS & OpenStack) ## # storageClass: "-" podAnnotations: {} extraVars: [] nodeSelector: {} tolerations: [] affinity: {} ## Configuration values for the postgresql dependency. ## ref: https://github.com/kubernetes/charts/blob/master/stable/postgresql/README.md ## postgresql: install: true image: tag: "9.6" postgresqlUsername: "hackmd" postgresqlDatabase: "hackmd" ## Default: random 10 character string # postgresqlPassword:
spartakus
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes...
extraArgs: {} ## Container image ## image: repository: k8s.gcr.io/spartakus-amd64 tag: v1.0.0 pullPolicy: IfNotPresent ## Node labels for pod assignment ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ ## nodeSelector: {} ## Annotations to be added to pods ## podAnnotations: {} replicaCount: 1 ## Resource requests and limits ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ ## resources: {} # limits: # cpu: 2m # memory: 8Mi # requests: # cpu: 2m # memory: 8Mi ## A version 4 UUID to uniquely identify the cluster ## If not provided, Helm will generate automatically at install-time. ## uuid: ""
orangehrm
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"orangehrm.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some ...
## Global Docker image parameters ## Please, note that this will override the image parameters, including dependencies, configured to use the global value ## Current available global Docker image parameters: imageRegistry and imagePullSecrets ## # global: # imageRegistry: myRegistryName # imagePullSecrets: # - myRegistryKeySecretName # storageClass: myStorageClass ## Bitnami OrangeHRM image version ## ref: https://hub.docker.com/r/bitnami/orangehrm/tags/ ## image: registry: docker.io repository: bitnami/orangehrm tag: 4.3.4-0-debian-10-r26 ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images ## pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. ## Secrets must be manually created in the namespace. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ ## # pullSecrets: # - myRegistryKeySecretName ## String to partially override orangehrm.fullname template (will maintain the release name) ## # nameOverride: ## String to fully override orangehrm.fullname template ## # fullnameOverride: ## User of the application ## ref: https://github.com/bitnami/bitnami-docker-orangehrm#configuration ## orangehrmUsername: admin ## Application password ## Defaults to a random 10-character alphanumeric string if not set ## ref: https://github.com/bitnami/bitnami-docker-orangehrm#configuration ## # orangehrmPassword: ## Set to `yes` to allow the container to be started with blank passwords ## ref: https://github.com/bitnami/bitnami-docker-orangehrm#environment-variables allowEmptyPassword: "yes" ## ## External database configuration ## externalDatabase: ## Database host host: ## Database host port: 3306 ## Database user user: bn_orangehrm ## Database password password: ## Database name database: bitnami_orangehrm ## SMTP mail delivery configuration ## ref: https://github.com/bitnami/bitnami-docker-orangehrm/#smtp-configuration # smtpHost: # smtpPort: # smtpUser: # smtpPassword: # smtpProtocol: ## ## MariaDB chart configuration ## ## https://github.com/helm/charts/blob/master/stable/mariadb/values.yaml ## mariadb: ## Whether to deploy a mariadb server to satisfy the applications database requirements. To use an external database set this to false and configure the externalDatabase parameters enabled: true ## Disable MariaDB replication replication: enabled: false ## Create a database and a database user ## ref: https://github.com/bitnami/bitnami-docker-mariadb/blob/master/README.md#creating-a-database-user-on-first-run ## db: name: bitnami_orangehrm user: bn_orangehrm ## If the password is not specified, mariadb will generates a random password ## # password: ## MariaDB admin password ## ref: https://github.com/bitnami/bitnami-docker-mariadb/blob/master/README.md#setting-the-root-password-on-first-run ## # rootUser: # password: ## Enable persistence using Persistent Volume Claims ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ ## master: persistence: enabled: true ## mariadb data Persistent Volume Storage Class ## If defined, storageClassName: <storageClass> ## If set to "-", storageClassName: "", which disables dynamic provisioning ## If undefined (the default) or set to null, no storageClassName spec is ## set, choosing the default provisioner. (gp2 on AWS, standard on ## GKE, AWS & OpenStack) ## # storageClass: "-" accessMode: ReadWriteOnce size: 8Gi ## Kubernetes configuration ## For minikube, set this to NodePort, elsewhere use LoadBalancer or ClusterIP ## service: type: LoadBalancer # HTTP Port port: 80 # HTTPS Port httpsPort: 443 ## ## nodePorts: ## http: <to set explicitly, choose port between 30000-32767> ## https: <to set explicitly, choose port between 30000-32767> nodePorts: http: "" https: "" ## Enable client source IP preservation ## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip ## externalTrafficPolicy: Cluster ## Enable persistence using Persistent Volume Claims ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ ## persistence: enabled: true orangehrm: ## orangehrm data Persistent Volume Storage Class ## If defined, storageClassName: <storageClass> ## If set to "-", storageClassName: "", which disables dynamic provisioning ## If undefined (the default) or set to null, no storageClassName spec is ## set, choosing the default provisioner. (gp2 on AWS, standard on ## GKE, AWS & OpenStack) ## # storageClass: "-" accessMode: ReadWriteOnce size: 8Gi ## Configure resource requests and limits ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ ## resources: requests: memory: 512Mi cpu: 300m ## Pod annotations ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ ## podAnnotations: {} ## Configure the ingress resource that allows you to access the ## OrangeHRM installation. Set up the URL ## ref: http://kubernetes.io/docs/user-guide/ingress/ ## ingress: ## Set to true to enable ingress record generation enabled: false ## Set this to true in order to add the corresponding annotations for cert-manager certManager: false ## Ingress annotations done as key:value pairs ## For a full list of possible ingress annotations, please see ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md ## ## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set ## If certManager is set to true, annotation kubernetes.io/tls-acme: "true" will automatically be set annotations: # kubernetes.io/ingress.class: nginx ## The list of hostnames to be covered with this ingress record. ## Most likely this will be just one host, but in the event more hosts are needed, this is an array hosts: - name: orangehrm.local path: / ## Set this to true in order to enable TLS on the ingress record tls: false ## Optionally specify the TLS hosts for the ingress record ## Useful when the Ingress controller supports www-redirection ## If not specified, the above host name will be used # tlsHosts: # - www.orangehrm.local # - orangehrm.local ## If TLS is set to true, you must declare what secret will store the key/certificate for TLS tlsSecret: orangehrm.local-tls secrets: ## If you're providing your own certificates, please use this to add the certificates as secrets ## key and certificate should start with -----BEGIN CERTIFICATE----- or ## -----BEGIN RSA PRIVATE KEY----- ## ## name should line up with a tlsSecret set further up ## If you're using cert-manager, this is unneeded, as it will create the secret for you if it is not set ## ## It is also possible to create and manage the certificates outside of this helm chart ## Please see README.md for more information # - name: orangehrm.local-tls # key: # certificate: ## Affinity for pod assignment ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity ## affinity: {} ## Prometheus Exporter / Metrics ## metrics: enabled: false image: registry: docker.io repository: bitnami/apache-exporter tag: 0.7.0-debian-10-r30 pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. ## Secrets must be manually created in the namespace. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ ## # pullSecrets: # - myRegistryKeySecretName ## Metrics exporter pod Annotation and Labels podAnnotations: prometheus.io/scrape: "true" prometheus.io/port: "9117" ## Metrics exporter resource requests and limits ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ ## # resources: {}
postgresql
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"postgresql.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some...
## Global Docker image parameters ## Please, note that this will override the image parameters, including dependencies, configured to use the global value ## Current available global Docker image parameters: imageRegistry and imagePullSecrets ## global: postgresql: {} # imageRegistry: myRegistryName # imagePullSecrets: # - myRegistryKeySecretName # storageClass: myStorageClass ## Bitnami PostgreSQL image version ## ref: https://hub.docker.com/r/bitnami/postgresql/tags/ ## image: registry: docker.io repository: bitnami/postgresql tag: 11.7.0-debian-10-r9 ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images ## pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. ## Secrets must be manually created in the namespace. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ ## # pullSecrets: # - myRegistryKeySecretName ## Set to true if you would like to see extra information on logs ## It turns BASH and NAMI debugging in minideb ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging debug: false ## String to partially override postgresql.fullname template (will maintain the release name) ## # nameOverride: ## String to fully override postgresql.fullname template ## # fullnameOverride: ## ## Init containers parameters: ## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup ## volumePermissions: enabled: false image: registry: docker.io repository: bitnami/minideb tag: buster ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images ## pullPolicy: Always ## Optionally specify an array of imagePullSecrets. ## Secrets must be manually created in the namespace. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ ## # pullSecrets: # - myRegistryKeySecretName ## Init container Security Context ## Note: the chown of the data folder is done to securityContext.runAsUser ## and not the below volumePermissions.securityContext.runAsUser ## When runAsUser is set to special value "auto", init container will try to chwon the ## data folder to autodetermined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` ## "auto" is especially useful for OpenShift which has scc with dynamic userids (and 0 is not allowed). ## You may want to use this volumePermissions.securityContext.runAsUser="auto" in combination with ## pod securityContext.enabled=false and shmVolume.chmod.enabled=false ## securityContext: runAsUser: 0 ## Use an alternate scheduler, e.g. "stork". ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ ## # schedulerName: ## Pod Security Context ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ ## securityContext: enabled: true fsGroup: 1001 runAsUser: 1001 ## Pod Service Account ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ serviceAccount: enabled: false ## Name of an already existing service account. Setting this value disables the automatic service account creation. # name: replication: enabled: false user: repl_user password: repl_password slaveReplicas: 1 ## Set synchronous commit mode: on, off, remote_apply, remote_write and local ## ref: https://www.postgresql.org/docs/9.6/runtime-config-wal.html#GUC-WAL-LEVEL synchronousCommit: "off" ## From the number of `slaveReplicas` defined above, set the number of those that will have synchronous replication ## NOTE: It cannot be > slaveReplicas numSynchronousReplicas: 0 ## Replication Cluster application name. Useful for defining multiple replication policies applicationName: my_application ## PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`) ## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-user-on-first-run (see note!) # postgresqlPostgresPassword: ## PostgreSQL user (has superuser privileges if username is `postgres`) ## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run postgresqlUsername: postgres ## PostgreSQL password ## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run ## # postgresqlPassword: ## PostgreSQL password using existing secret ## existingSecret: secret ## Mount PostgreSQL secret as a file instead of passing environment variable # usePasswordFile: false ## Create a database ## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-on-first-run ## # postgresqlDatabase: ## PostgreSQL data dir ## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md ## postgresqlDataDir: /bitnami/postgresql/data ## An array to add extra environment variables ## For example: ## extraEnv: ## - name: FOO ## value: "bar" ## # extraEnv: extraEnv: [] ## Name of a ConfigMap containing extra env vars ## # extraEnvVarsCM: ## Specify extra initdb args ## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md ## # postgresqlInitdbArgs: ## Specify a custom location for the PostgreSQL transaction log ## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md ## # postgresqlInitdbWalDir: ## PostgreSQL configuration ## Specify runtime configuration parameters as a dict, using camelCase, e.g. ## {"sharedBuffers": "500MB"} ## Alternatively, you can put your postgresql.conf under the files/ directory ## ref: https://www.postgresql.org/docs/current/static/runtime-config.html ## # postgresqlConfiguration: ## PostgreSQL extended configuration ## As above, but _appended_ to the main configuration ## Alternatively, you can put your *.conf under the files/conf.d/ directory ## https://github.com/bitnami/bitnami-docker-postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf ## # postgresqlExtendedConf: ## PostgreSQL client authentication configuration ## Specify content for pg_hba.conf ## Default: do not create pg_hba.conf ## Alternatively, you can put your pg_hba.conf under the files/ directory # pgHbaConfiguration: |- # local all all trust # host all all localhost trust # host mydatabase mysuser 192.168.0.0/24 md5 ## ConfigMap with PostgreSQL configuration ## NOTE: This will override postgresqlConfiguration and pgHbaConfiguration # configurationConfigMap: ## ConfigMap with PostgreSQL extended configuration # extendedConfConfigMap: ## initdb scripts ## Specify dictionary of scripts to be run at first boot ## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory ## # initdbScripts: # my_init_script.sh: | # #!/bin/sh # echo "Do something." ## ConfigMap with scripts to be run at first boot ## NOTE: This will override initdbScripts # initdbScriptsConfigMap: ## Secret with scripts to be run at first boot (in case it contains sensitive information) ## NOTE: This can work along initdbScripts or initdbScriptsConfigMap # initdbScriptsSecret: ## Specify the PostgreSQL username and password to execute the initdb scripts # initdbUser: # initdbPassword: ## Optional duration in seconds the pod needs to terminate gracefully. ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods ## # terminationGracePeriodSeconds: 30 ## LDAP configuration ## ldap: enabled: false url: "" server: "" port: "" prefix: "" suffix: "" baseDN: "" bindDN: "" bind_password: search_attr: "" search_filter: "" scheme: "" tls: false ## PostgreSQL service configuration service: ## PosgresSQL service type type: ClusterIP # clusterIP: None port: 5432 ## Specify the nodePort value for the LoadBalancer and NodePort service types. ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport ## # nodePort: ## Provide any additional annotations which may be required. ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart annotations: {} ## Set the LoadBalancer service type to internal only. ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer ## # loadBalancerIP: ## Load Balancer sources ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service ## # loadBalancerSourceRanges: # - 10.10.10.0/24 ## Start master and slave(s) pod(s) without limitations on shm memory. ## By default docker and containerd (and possibly other container runtimes) ## limit `/dev/shm` to `64M` (see e.g. the ## [docker issue](https://github.com/docker-library/postgres/issues/416) and the ## [containerd issue](https://github.com/containerd/containerd/issues/3654), ## which could be not enough if PostgreSQL uses parallel workers heavily. ## shmVolume: ## Set `shmVolume.enabled` to `true` to mount a new tmpfs volume to remove ## this limitation. ## enabled: true ## Set to `true` to `chmod 777 /dev/shm` on a initContainer. ## This option is ingored if `volumePermissions.enabled` is `false` ## chmod: enabled: true ## PostgreSQL data Persistent Volume Storage Class ## If defined, storageClassName: <storageClass> ## If set to "-", storageClassName: "", which disables dynamic provisioning ## If undefined (the default) or set to null, no storageClassName spec is ## set, choosing the default provisioner. (gp2 on AWS, standard on ## GKE, AWS & OpenStack) ## persistence: enabled: true ## A manually managed Persistent Volume and Claim ## If defined, PVC must be created manually before volume will be bound ## The value is evaluated as a template, so, for example, the name can depend on .Release or .Chart ## # existingClaim: ## The path the volume will be mounted at, useful when using different ## PostgreSQL images. ## mountPath: /bitnami/postgresql ## The subdirectory of the volume to mount to, useful in dev environments ## and one PV for multiple services. ## subPath: "" # storageClass: "-" accessModes: - ReadWriteOnce size: 8Gi annotations: {} ## updateStrategy for PostgreSQL StatefulSet and its slaves StatefulSets ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies updateStrategy: type: RollingUpdate ## ## PostgreSQL Master parameters ## master: ## Node, affinity, tolerations, and priorityclass settings for pod assignment ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption nodeSelector: {} affinity: {} tolerations: [] labels: {} annotations: {} podLabels: {} podAnnotations: {} priorityClassName: "" extraInitContainers: | # - name: do-something # image: busybox # command: ['do', 'something'] ## Additional PostgreSQL Master Volume mounts ## extraVolumeMounts: [] ## Additional PostgreSQL Master Volumes ## extraVolumes: [] ## Add sidecars to the pod ## ## For example: ## sidecars: ## - name: your-image-name ## image: your-image ## imagePullPolicy: Always ## ports: ## - name: portname ## containerPort: 1234 sidecars: [] ## ## PostgreSQL Slave parameters ## slave: ## Node, affinity, tolerations, and priorityclass settings for pod assignment ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption nodeSelector: {} affinity: {} tolerations: [] labels: {} annotations: {} podLabels: {} podAnnotations: {} priorityClassName: "" extraInitContainers: | # - name: do-something # image: busybox # command: ['do', 'something'] ## Additional PostgreSQL Slave Volume mounts ## extraVolumeMounts: [] ## Additional PostgreSQL Slave Volumes ## extraVolumes: [] ## Add sidecars to the pod ## ## For example: ## sidecars: ## - name: your-image-name ## image: your-image ## imagePullPolicy: Always ## ports: ## - name: portname ## containerPort: 1234 sidecars: [] ## Configure resource requests and limits ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ ## resources: requests: memory: 256Mi cpu: 250m networkPolicy: ## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. ## enabled: false ## The Policy model to apply. When set to false, only pods with the correct ## client label will have network access to the port PostgreSQL is listening ## on. When true, PostgreSQL will accept connections from any source ## (with the correct destination port). ## allowExternal: true ## if explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace ## and that match other criteria, the ones that have the good label, can reach the DB. ## But sometimes, we want the DB to be accessible to clients from other namespaces, in this case, we can use this ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added. ## # explicitNamespacesSelector: # matchLabels: # role: frontend # matchExpressions: # - {key: role, operator: In, values: [frontend]} ## Configure extra options for liveness and readiness probes ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) livenessProbe: enabled: true initialDelaySeconds: 30 periodSeconds: 10 timeoutSeconds: 5 failureThreshold: 6 successThreshold: 1 readinessProbe: enabled: true initialDelaySeconds: 5 periodSeconds: 10 timeoutSeconds: 5 failureThreshold: 6 successThreshold: 1 ## Configure metrics exporter ## metrics: enabled: false # resources: {} service: type: ClusterIP annotations: prometheus.io/scrape: "true" prometheus.io/port: "9187" loadBalancerIP: serviceMonitor: enabled: false additionalLabels: {} # namespace: monitoring # interval: 30s # scrapeTimeout: 10s ## Custom PrometheusRule to be defined ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions prometheusRule: enabled: false additionalLabels: {} namespace: "" rules: [] ## These are just examples rules, please adapt them to your needs. ## Make sure to constraint the rules to the current postgresql service. # - alert: HugeReplicationLag # expr: pg_replication_lag{service="{{ template "postgresql.fullname" . }}-metrics"} / 3600 > 1 # for: 1m # labels: # severity: critical # annotations: # description: replication for {{ template "postgresql.fullname" . }} PostgreSQL is lagging by {{ "{{ $value }}" }} hour(s). # summary: PostgreSQL replication is lagging by {{ "{{ $value }}" }} hour(s). image: registry: docker.io repository: bitnami/postgres-exporter tag: 0.8.0-debian-10-r28 pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. ## Secrets must be manually created in the namespace. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ ## # pullSecrets: # - myRegistryKeySecretName ## Define additional custom metrics ## ref: https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file # customMetrics: # pg_database: # query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')" # metrics: # - name: # usage: "LABEL" # description: "Name of the database" # - size_bytes: # usage: "GAUGE" # description: "Size of the database in bytes" ## Pod Security Context ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ ## securityContext: enabled: false runAsUser: 1001 ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) ## Configure extra options for liveness and readiness probes livenessProbe: enabled: true initialDelaySeconds: 5 periodSeconds: 10 timeoutSeconds: 5 failureThreshold: 6 successThreshold: 1 readinessProbe: enabled: true initialDelaySeconds: 5 periodSeconds: 10 timeoutSeconds: 5 failureThreshold: 6 successThreshold: 1
inbucket
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"inbucket.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some K...
image: repository: jhillyerd/inbucket tag: release-2.0.0 pullPolicy: IfNotPresent service: annotations: {} clusterIP: "" externalIPs: [] loadBalancerIP: "" loadBalancerSourceRanges: [] type: ClusterIP port: http: 9000 smtp: 2500 pop3: 1100 nodePort: http: "" smtp: "" pop3: "" extraEnv: INBUCKET_LOGLEVEL: "info" INBUCKET_MAILBOXNAMING: "local" INBUCKET_SMTP_ADDR: "0.0.0.0:2500" INBUCKET_SMTP_DOMAIN: "inbucket" INBUCKET_SMTP_MAXRECIPIENTS: "200" INBUCKET_SMTP_MAXMESSAGEBYTES: "10240000" INBUCKET_SMTP_DEFAULTACCEPT: "true" INBUCKET_SMTP_REJECTDOMAINS: "" INBUCKET_SMTP_DEFAULTSTORE: "true" INBUCKET_SMTP_DISCARDDOMAINS: "" INBUCKET_SMTP_TIMEOUT: "300s" INBUCKET_POP3_ADDR: "0.0.0.0:1100" INBUCKET_POP3_DOMAIN: "inbucket" INBUCKET_POP3_TIMEOUT: "600s" INBUCKET_WEB_ADDR: "0.0.0.0:9000" INBUCKET_WEB_UIDIR: "ui" INBUCKET_WEB_GREETINGFILE: "ui/greeting.html" INBUCKET_WEB_TEMPLATECACHE: "true" INBUCKET_WEB_MAILBOXPROMPT: "@inbucket" INBUCKET_WEB_COOKIEAUTHKEY: "" INBUCKET_WEB_MONITORVISIBLE: "true" INBUCKET_WEB_MONITORHISTORY: "30" INBUCKET_STORAGE_TYPE: "memory" INBUCKET_STORAGE_PARAMS: "" INBUCKET_STORAGE_RETENTIONPERIOD: "24h" INBUCKET_STORAGE_RETENTIONSLEEP: "50ms" INBUCKET_STORAGE_MAILBOXMSGCAP: "500" ingress: enabled: false annotations: {} path: / hosts: - inbucket.example.com tls: [] # - hosts: # - inbucket.example.com # secretName: tls-inbucket podAnnotations: {} resources: {}
vsphere-cpi
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"cpi.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kube...
# Default values for vSphere CPI. # This is a YAML-formatted file. # vSohere CPI values are grouped by component global: config: enabled: false config: enabled: false vcenter: "vcenter.local" username: "user" password: "pass" datacenter: "dc" ## Specify if a Pod Security Policy for kube-state-metrics must be created ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ podSecurityPolicy: enabled: false annotations: {} # Specify pod annotations # Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor # Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp # Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl # # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' # Run containers to have security context. Default is 'nobody' (65534/65534) in distroless securityContext: enabled: true runAsUser: 1001 fsGroup: 1001 rbac: # Specifies whether RBAC resources should be created create: true serviceAccount: # Specifies whether a ServiceAccount should be created create: true # The name of the ServiceAccount to use. name: cloud-controller-manager daemonset: annotations: {} image: gcr.io/cloud-provider-vsphere/cpi/release/manager tag: v1.2.1 pullPolicy: IfNotPresent dnsPolicy: ClusterFirst cmdline: logging: 2 # Location of the cloud configmap to be mounted on the filesystem cloudConfig: dir: "/etc/cloud" file: "vsphere.conf" additionalParams: {} replicaCount: 1 resources: {} # limits: # cpu: 500m # memory: 512Mi # requests: # cpu: 256m # memory: 128Mi podAnnotations: {} ## Additional pod labels ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ podLabels: {} ## Allows for the default selector to be replaced with user-defined ones nodeSelector: {} ## Allows for the default tolerations to be replaced with user-defined ones tolerations: [] service: enabled: false annotations: {} type: ClusterIP # List of IP ranges that are allowed to access the load balancer (if supported) loadBalancerSourceRanges: [] # endpointPort: externally accessible port for UI and API endpointPort: 43001 # targetPort: the internal port the UI and API are exposed on targetPort: 43001 ingress: enabled: false annotations: {} # Used to create an Ingress record. # hosts: # - chart-example.local # annotations: # kubernetes.io/ingress.class: nginx # kubernetes.io/tls-acme: "true" # tls: # Secrets must be manually created in the namespace. # - secretName: chart-example-tls # hosts: # - chart-example.local
minecraft
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"minecraft.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some ...
# ref: https://hub.docker.com/r/itzg/minecraft-server/ image: itzg/minecraft-server imageTag: latest ## Configure resource requests and limits ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ ## resources: requests: memory: 512Mi cpu: 500m # upgrade strategy type (e.g. Recreate or RollingUpdate) strategyType: Recreate nodeSelector: {} tolerations: [] affinity: {} securityContext: # Security context settings runAsUser: 1000 fsGroup: 2000 # Most of these map to environment variables. See Minecraft for details: # https://hub.docker.com/r/itzg/minecraft-server/ livenessProbe: command: - mc-monitor - status - localhost:25565 initialDelaySeconds: 30 periodSeconds: 5 failureThreshold: 10 successThreshold: 1 timeoutSeconds: 1 readinessProbe: command: - mc-monitor - status - localhost:25565 initialDelaySeconds: 30 periodSeconds: 5 failureThreshold: 10 successThreshold: 1 timeoutSeconds: 1 minecraftServer: # This must be overridden, since we can't accept this for the user. eula: "FALSE" # One of: LATEST, SNAPSHOT, or a specific version (ie: "1.7.9"). version: "1.14.4" # This can be one of "VANILLA", "FORGE", "SPIGOT", "BUKKIT", "PAPER", "FTB", "SPONGEVANILLA" type: "VANILLA" # If type is set to FORGE, this sets the version; this is ignored if forgeInstallerUrl is set forgeVersion: # If type is set to SPONGEVANILLA, this sets the version spongeVersion: # If type is set to FORGE, this sets the URL to download the Forge installer forgeInstallerUrl: # If type is set to BUKKIT, this sets the URL to download the Bukkit package bukkitDownloadUrl: # If type is set to SPIGOT, this sets the URL to download the Spigot package spigotDownloadUrl: # If type is set to PAPER, this sets the URL to download the PaperSpigot package paperDownloadUrl: # If type is set to FTB, this sets the server mod to run. You can also provide the URL to download the FTB package ftbServerMod: # Set to true if running Feed The Beast and get an error like "unable to launch forgemodloader" ftbLegacyJavaFixer: false # One of: peaceful, easy, normal, and hard difficulty: easy # A comma-separated list of player names to whitelist. whitelist: # A comma-separated list of player names who should be admins. ops: # A server icon URL for server listings. Auto-scaled and transcoded. icon: # Max connected players. maxPlayers: 20 # This sets the maximum possible size in blocks, expressed as a radius, that the world border can obtain. maxWorldSize: 10000 # Allows players to travel to the Nether. allowNether: true # Allows server to announce when a player gets an achievement. announcePlayerAchievements: true # Enables command blocks. enableCommandBlock: true # If true, players will always join in the default gameMode even if they were previously set to something else. forcegameMode: false # Defines whether structures (such as villages) will be generated. generateStructures: true # If set to true, players will be set to spectator mode if they die. hardcore: false # The maximum height in which building is allowed. maxBuildHeight: 256 # The maximum number of milliseconds a single tick may take before the server watchdog stops the server with the message. -1 disables this entirely. maxTickTime: 60000 # Determines if animals will be able to spawn. spawnAnimals: true # Determines if monsters will be spawned. spawnMonsters: true # Determines if villagers will be spawned. spawnNPCs: true # Max view distance (in chunks). viewDistance: 10 # Define this if you want a specific map generation seed. levelSeed: # One of: creative, survival, adventure, spectator gameMode: survival # Message of the Day motd: "Welcome to Minecraft on Kubernetes!" # If true, enable player-vs-player damage. pvp: false # One of: DEFAULT, FLAT, LARGEBIOMES, AMPLIFIED, CUSTOMIZED levelType: DEFAULT # When levelType == FLAT or CUSTOMIZED, this can be used to further customize map generation. # ref: https://hub.docker.com/r/itzg/minecraft-server/ generatorSettings: worldSaveName: world # If set, this URL will be downloaded at startup and used as a starting point downloadWorldUrl: # force re-download of server file forceReDownload: false # If set, the modpack at this URL will be downloaded at startup downloadModpackUrl: # If true, old versions of downloaded mods will be replaced with new ones from downloadModpackUrl removeOldMods: false # Check accounts against Minecraft account service. onlineMode: true # If you adjust this, you may need to adjust resources.requests above to match. memory: 512M # General JVM options to be passed to the Minecraft server invocation jvmOpts: "" # Options like -X that need to proceed general JVM options jvmXXOpts: "" serviceType: LoadBalancer loadBalancerIP: # loadBalancerSourceRanges: [] ## Set the externalTrafficPolicy in the Service to either Cluster or Local # externalTrafficPolicy: Cluster rcon: # If you enable this, make SURE to change your password below. enabled: false port: 25575 password: "CHANGEME!" serviceType: LoadBalancer loadBalancerIP: # loadBalancerSourceRanges: [] ## Set the externalTrafficPolicy in the Service to either Cluster or Local # externalTrafficPolicy: Cluster query: # If you enable this, your server will be "published" to Gamespy enabled: false port: 25565 ## Additional minecraft container environment variables ## extraEnv: {} persistence: ## minecraft data Persistent Volume Storage Class ## If defined, storageClassName: <storageClass> ## If set to "-", storageClassName: "", which disables dynamic provisioning ## If undefined (the default) or set to null, no storageClassName spec is ## set, choosing the default provisioner. (gp2 on AWS, standard on ## GKE, AWS & OpenStack) ## # storageClass: "-" dataDir: # Set this to false if you don't care to persist state between restarts. enabled: true # existingClaim: nil Size: 1Gi podAnnotations: {} deploymentAnnotations: {}
percona
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"percona.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}...
## percona image ## ref: https://hub.docker.com/_/percona/ image: "percona" ## percona image version ## ref: https://hub.docker.com/r/library/percona/tags/ ## imageTag: "5.7.26" ## Specify password for root user ## ## Default: random 10 character string # mysqlRootPassword: testing ## Create a database user ## # mysqlUser: # mysqlPassword: ## Allow unauthenticated access, uncomment to enable ## # mysqlAllowEmptyPassword: true ## Create a database ## # mysqlDatabase: ## Specify an imagePullPolicy (Required) ## It's recommended to change this to 'Always' if the image tag is 'latest' ## ref: http://kubernetes.io/docs/user-guide/images/#updating-images ## imagePullPolicy: IfNotPresent ## Use an alternate scheduler, e.g. "stork". ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ ## # schedulerName: "default-scheduler" ## Persist data to a persistent volume persistence: enabled: false ## percona data Persistent Volume Storage Class ## If defined, storageClassName: <storageClass> ## If set to "-", storageClassName: "", which disables dynamic provisioning ## If undefined (the default) or set to null, no storageClassName spec is ## set, choosing the default provisioner. (gp2 on AWS, standard on ## GKE, AWS & OpenStack) ## # storageClass: "-" accessMode: ReadWriteOnce size: 8Gi ## Configure resource requests and limits ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ ## resources: requests: memory: 256Mi cpu: 100m ## Node labels for pod assignment ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ ## nodeSelector: {} ## Tolerations labels for pod assignment ## Allow the scheduling on tainted nodes (requires Kubernetes >= 1.6) ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ ## tolerations: [] ## Affinity labels for pod assignment ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity ## affinity: {} ## Configure resources for the init container ## initResources: {} # limits: # cpu: 25m # memory: 128Mi # requests: # cpu: 25m # memory 128Mi ## Override image used by init container ## initImage: repository: "busybox" tag: "1.25.0" pullPolicy: "IfNotPresent"
zetcd
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"zetcd.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kube...
# Default values for zetcd. # This is a YAML-formatted file. # Declare variables to be passed into your templates. replicaCount: 1 image: repository: quay.io/coreos/zetcd tag: v0.0.3 pullPolicy: IfNotPresent service: name: zetcd type: ClusterIP externalPort: 2181 internalPort: 2181 resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. # limits: # cpu: 100m # memory: 128Mi # requests: # cpu: 100m # memory: 128Mi ## Node labels for pod assignment ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ ## nodeSelector: {} etcd: operatorEnabled: true endpoints: localhost:2379 # Communication with etcd can be encrypted and authenticated with a certificate. # In order to enable it, add 'tls' section providing existing secret # containing CA certificate, client certificate and client key. # tls: # existingSecret: etcd-tls-secret # cert: client.crt # key: client.key # ca: ca.crt etcd-operator: cluster: enabled: true
kapacitor
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"kapacitor.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some ...
## kapacitor image version ## ref: https://hub.docker.com/r/library/kapacitor/tags/ ## image: repository: "kapacitor" tag: "1.5.2-alpine" pullPolicy: "IfNotPresent" ## Specify a service type, defaults to NodePort ## ref: http://kubernetes.io/docs/user-guide/services/ ## service: type: ClusterIP ## Persist data to a persistent volume ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ ## persistence: enabled: true ## kapacitor data Persistent Volume Storage Class ## If defined, storageClassName: <storageClass> ## If set to "-", storageClassName: "", which disables dynamic provisioning ## If undefined (the default) or set to null, no storageClassName spec is ## set, choosing the default provisioner. (gp2 on AWS, standard on ## GKE, AWS & OpenStack) ## # storageClass: "-" accessMode: ReadWriteOnce size: 8Gi # existingClaim: "" ## Configure resource requests and limits ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ ## resources: requests: memory: 256Mi cpu: 0.1 limits: memory: 2Gi cpu: 2 ## Set the environment variables for kapacitor (or anything else you want to use) ## ref: https://hub.docker.com/_/kapacitor/ ## ref: https://docs.influxdata.com/kapacitor/latest/administration/configuration/ ## # Examples below # # envVars: # KAPACITOR_SLACK_ENABLED: true # KAPACITOR_SLACK_URL: "http://slack.com/xxxxx/xxxxx/xxxx/xxxxxxx" # KAPACITOR_HTTP_LOG_ENABLED: true # KAPACITOR_LOGGING_LEVEL: "INFO" # # or, at your terminal, with # # helm install --name kapacitor-rls --set influxURL=http://influxurl.com,envVars.KAPACITOR_SLACK_ENABLED=true,envVars.KAPACITOR_SLACK_URL="http://slack.com/xxxxx/xxxxx/xxxx/xxxxxxx" stable/kapacitor ## Set the URL of InfluxDB instance to create subscription on ## ref: https://docs.influxdata.com/kapacitor/v1.1/introduction/getting_started/ ## # influxURL: http://influxdb-influxdb.tick:8086 ## Name of an existing Secrect used to set the environment variables for the ## InfluxDB user and password. The expected keys in the secret are ## `influxdb-user` and `influxdb-password`. ## # existingSecret: influxdb-auth
unbound
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\n\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"unbound.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a fully qualified app name\n*/}}\n{{- define \"unbound.fullname\" -}...
replicaCount: 1 # values that pertain to the unbound container, for more information # on unbound configuration see http://unbound.net/documentation/unbound.conf.html unbound: image: repository: markbnj/unbound-docker tag: "0.1.0" pullPolicy: IfNotPresent verbosity: 1 numThreads: 1 statsInterval: 0 statsCumulative: "no" serverPort: 53 # values that pertain to the exechealthz container, for more information see # https://github.com/kubernetes/contrib/tree/master/exec-healthz healthz: image: repository: gcr.io/google-containers/exechealthz tag: "1.2" pullPolicy: IfNotPresent resources: {} nodeSelector: {} tolerations: [] affinity: {} # clusterIP: # Controls which IP address ranges unbound will allow queries from. # If you want to use unbound as an upstream for kube-dns, or allow other pods # to query the resolver directly, you'll at least need to allow the # clusterIpV4Cidr range. # allowedIpRanges: # - "10.10.10.10/20" # You can set as many forward zones as needed by specifying the zone name # and forward hosts. Forward hosts can be set by hostname or ip. # forwardZones: # - name: "fake.net" # forwardHosts: # - "fake1.host.net" # forwardIps: # - "10.10.10.10" # Unbound can store DNS records in a "local zone." This facility can be used to # assign context-specific names to a given IP address, and could also be used for # private DNS if you don't want or have an external resolver. # localRecords: # - name: "fake3.host.net" # ip: "10.12.10.10"
spring-cloud-data-flow
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"scdf.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default short app name to use for resource naming.\nWe truncate at 63 chars b...
# Default values for spring-cloud-data-flow. rbac: # Specifies whether RBAC resources should be created create: true serviceAccount: # Specifies whether a service account should be created create: true # The name of the service account to use. # If not set and create is true, a name is generated using the serviceAccountName template name: initContainers: dbWait: image: "busybox" tag: "1.30.1" imagePullPolicy: IfNotPresent server: image: springcloud/spring-cloud-dataflow-server version: 2.6.0 imagePullPolicy: IfNotPresent platformName: default trustCerts: false service: type: LoadBalancer externalPort: 80 annotations: {} labels: {} loadBalancerSourceRanges: [] configMap: resources: {} # limits: # cpu: 1.0 # memory: 2048Mi # requests: # cpu: 0.5 # memory: 640Mi extraEnv: {} containerConfiguration: {} # container: # registry-configurations: # default: # registry-host: registry-1.docker.io # authorization-type: dockeroauth2 skipper: enabled: true image: springcloud/spring-cloud-skipper-server version: 2.5.0 imagePullPolicy: IfNotPresent platformName: default trustCerts: false service: type: ClusterIP annotations: {} labels: {} configMap: resources: {} # limits: # cpu: 1.0 # memory: 1024Mi # requests: # cpu: 0.5 # memory: 640Mi extraEnv: {} deployer: resourceLimits: cpu: 500m memory: 1024Mi readinessProbe: initialDelaySeconds: 120 livenessProbe: initialDelaySeconds: 90 rabbitmq: enabled: true rabbitmq: username: user # this value will be encoded into a secret password: changeme rabbitmq-ha: enabled: false rabbitmqUsername: user kafka: enabled: false replicas: 1 configurationOverrides: "offsets.topic.replication.factor": 1 "confluent.support.metrics.enable": false zookeeper: replicaCount: 1 mysql: enabled: true mysqlDatabase: dataflow ## If you are using an external database, ## you must specify the following database details database: driver: scheme: host: port: user: scdf password: dataflow: dataflow skipper: skipper features: streaming: enabled: true batch: enabled: true monitoring: enabled: false ## If you are using an ingress server then you can override the following ## default values to create an Ingress resource ingress: enabled: false server: host: data-flow.local grafana: host: grafana.local protocol: https grafana: service: type: LoadBalancer admin: existingSecret: scdf-grafana-secret userKey: admin-user passwordKey: admin-password defaultUsername: YWRtaW4= defaultPassword: cGFzc3dvcmQ= extraConfigmapMounts: - name: scdf-grafana-ds-cm mountPath: /etc/grafana/provisioning/datasources/datasources.yaml subPath: datasources.yaml configMap: scdf-grafana-ds-cm readOnly: true dashboardProviders: dashboardproviders.yaml: apiVersion: 1 providers: - name: default orgId: 1 folder: type: file disableDeletion: true editable: false options: path: /var/lib/grafana/dashboards/default dashboards: default: scdf-applications: url: https://raw.githubusercontent.com/spring-cloud/spring-cloud-dataflow/master/src/grafana/prometheus/docker/grafana/dashboards/scdf-applications.json scdf-streams: url: https://raw.githubusercontent.com/spring-cloud/spring-cloud-dataflow/master/src/grafana/prometheus/docker/grafana/dashboards/scdf-streams.json scdf-task-batch: url: https://raw.githubusercontent.com/spring-cloud/spring-cloud-dataflow/master/src/grafana/prometheus/docker/grafana/dashboards/scdf-task-batch.json prometheus: podSecurityPolicy: enabled: true alertmanager: enabled: false kubeStateMetrics: enabled: false nodeExporter: enabled: false pushgateway: enabled: false server: global: scrape_interval: 10s scrape_timeout: 9s evaluation_interval: 10s extraScrapeConfigs: | - job_name: 'proxied-applications' metrics_path: '/metrics/connected' kubernetes_sd_configs: - role: pod namespaces: names: - {{ .Release.Namespace }} relabel_configs: - source_labels: [__meta_kubernetes_pod_label_app] action: keep regex: prometheus-proxy - source_labels: [__meta_kubernetes_pod_container_port_number] action: keep regex: 8080 - job_name: 'proxies' metrics_path: '/metrics/proxy' kubernetes_sd_configs: - role: pod namespaces: names: - {{ .Release.Namespace }} relabel_configs: - source_labels: [__meta_kubernetes_pod_label_app] action: keep regex: prometheus-proxy - source_labels: [__meta_kubernetes_pod_container_port_number] action: keep regex: 8080 - action: labelmap regex: __meta_kubernetes_pod_label_(.+) - source_labels: [__meta_kubernetes_pod_name] action: replace target_label: kubernetes_pod_name proxy: service: type: LoadBalancer
kibana
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"kibana.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kub...
image: repository: "docker.elastic.co/kibana/kibana-oss" tag: "6.7.0" pullPolicy: "IfNotPresent" testFramework: enabled: "true" image: "dduportal/bats" tag: "0.4.0" commandline: args: [] env: {} ## All Kibana configuration options are adjustable via env vars. ## To adjust a config option to an env var uppercase + replace `.` with `_` ## Ref: https://www.elastic.co/guide/en/kibana/current/settings.html ## For kibana < 6.6, use ELASTICSEARCH_URL instead # ELASTICSEARCH_HOSTS: http://elasticsearch-client:9200 # SERVER_PORT: 5601 # LOGGING_VERBOSE: "true" # SERVER_DEFAULTROUTE: "/app/kibana" envFromSecrets: {} ## Create a secret manually. Reference it here to inject environment variables # ELASTICSEARCH_USERNAME: # from: # secret: secret-name-here # key: ELASTICSEARCH_USERNAME # ELASTICSEARCH_PASSWORD: # from: # secret: secret-name-here # key: ELASTICSEARCH_PASSWORD files: kibana.yml: ## Default Kibana configuration from kibana-docker. server.name: kibana server.host: "0" ## For kibana < 6.6, use elasticsearch.url instead elasticsearch.hosts: http://elasticsearch:9200 ## Custom config properties below ## Ref: https://www.elastic.co/guide/en/kibana/current/settings.html # server.port: 5601 # logging.verbose: "true" # server.defaultRoute: "/app/kibana" deployment: annotations: {} service: type: ClusterIP # clusterIP: None # portName: kibana-svc externalPort: 443 internalPort: 5601 # authProxyPort: 5602 To be used with authProxyEnabled and a proxy extraContainer ## External IP addresses of service ## Default: nil ## # externalIPs: # - 192.168.0.1 # ## LoadBalancer IP if service.type is LoadBalancer ## Default: nil ## # loadBalancerIP: 10.2.2.2 annotations: {} # Annotation example: setup ssl with aws cert when service.type is LoadBalancer # service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:us-east-1:EXAMPLE_CERT labels: {} ## Label example: show service URL in `kubectl cluster-info` # kubernetes.io/cluster-service: "true" ## Limit load balancer source ips to list of CIDRs (where available) # loadBalancerSourceRanges: [] selector: {} ingress: enabled: false # hosts: # - kibana.localhost.localdomain # - localhost.localdomain/kibana # annotations: # kubernetes.io/ingress.class: nginx # kubernetes.io/tls-acme: "true" # tls: # - secretName: chart-example-tls # hosts: # - chart-example.local serviceAccount: # Specifies whether a service account should be created create: false # The name of the service account to use. # If not set and create is true, a name is generated using the fullname template # If set and create is false, the service account must be existing name: livenessProbe: enabled: false path: /status initialDelaySeconds: 30 timeoutSeconds: 10 readinessProbe: enabled: false path: /status initialDelaySeconds: 30 timeoutSeconds: 10 periodSeconds: 10 successThreshold: 5 # Enable an authproxy. Specify container in extraContainers authProxyEnabled: false extraContainers: | # - name: proxy # image: quay.io/gambol99/keycloak-proxy:latest # args: # - --resource=uri=/* # - --discovery-url=https://discovery-url # - --client-id=client # - --client-secret=secret # - --listen=0.0.0.0:5602 # - --upstream-url=http://127.0.0.1:5601 # ports: # - name: web # containerPort: 9090 extraVolumeMounts: [] extraVolumes: [] resources: {} # limits: # cpu: 100m # memory: 300Mi # requests: # cpu: 100m # memory: 300Mi priorityClassName: "" # Affinity for pod assignment # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity # affinity: {} # Tolerations for pod assignment # Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ tolerations: [] # Node labels for pod assignment # Ref: https://kubernetes.io/docs/user-guide/node-selection/ nodeSelector: {} podAnnotations: {} replicaCount: 1 revisionHistoryLimit: 3 # Custom labels for pod assignment podLabels: {} # To export a dashboard from a running Kibana 6.3.x use: # curl --user <username>:<password> -XGET https://kibana.yourdomain.com:5601/api/kibana/dashboards/export?dashboard=<some-dashboard-uuid> > my-dashboard.json # A dashboard is defined by a name and a string with the json payload or the download url dashboardImport: enabled: false timeout: 60 basePath: / xpackauth: enabled: false username: myuser password: mypass dashboards: {} # k8s: https://raw.githubusercontent.com/monotek/kibana-dashboards/master/k8s-fluentd-elasticsearch.json # List of plugins to install using initContainer # NOTE : We notice that lower resource constraints given to the chart + plugins are likely not going to work well. plugins: # set to true to enable plugins installation enabled: false # set to true to remove all kibana plugins before installation reset: false # Use <plugin_name,version,url> to add/upgrade plugin values: # - elastalert-kibana-plugin,1.0.1,https://github.com/bitsensor/elastalert-kibana-plugin/releases/download/1.0.1/elastalert-kibana-plugin-1.0.1-6.4.2.zip # - logtrail,0.1.31,https://github.com/sivasamyk/logtrail/releases/download/v0.1.31/logtrail-6.6.0-0.1.31.zip # - other_plugin persistentVolumeClaim: # set to true to use pvc enabled: false # set to true to use you own pvc existingClaim: false annotations: {} accessModes: - ReadWriteOnce size: "5Gi" ## If defined, storageClassName: <storageClass> ## If set to "-", storageClassName: "", which disables dynamic provisioning ## If undefined (the default) or set to null, no storageClassName spec is ## set, choosing the default provisioner. (gp2 on AWS, standard on ## GKE, AWS & OpenStack) ## # storageClass: "-" # default security context securityContext: enabled: false allowPrivilegeEscalation: false runAsUser: 1000 fsGroup: 2000 extraConfigMapMounts: [] # - name: logtrail-configs # configMap: kibana-logtrail # mountPath: /usr/share/kibana/plugins/logtrail/logtrail.json # subPath: logtrail.json # Add your own init container or uncomment and modify the given example. initContainers: {} ## Don't start kibana till Elasticsearch is reachable. ## Ensure that it is available at http://elasticsearch:9200 ## # es-check: # <- will be used as container name # image: "appropriate/curl:latest" # imagePullPolicy: "IfNotPresent" # command: # - "/bin/sh" # - "-c" # - | # is_down=true # while "$is_down"; do # if curl -sSf --fail-early --connect-timeout 5 http://elasticsearch:9200; then # is_down=false # else # sleep 5 # fi # done
bitcoind
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"bitcoind.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some K...
# Default values for bitcoind. # This is a YAML-formatted file. # Declare variables to be passed into your templates. terminationGracePeriodSeconds: 30 image: repository: arilot/docker-bitcoind tag: 0.17.1 pullPolicy: IfNotPresent service: rpcPort: 8332 p2pPort: 8333 testnetPort: 18332 testnetP2pPort: 18333 persistence: enabled: true ## database data Persistent Volume Storage Class ## If defined, storageClassName: <storageClass> ## If set to "-", storageClassName: "", which disables dynamic provisioning ## If undefined (the default) or set to null, no storageClassName spec is ## set, choosing the default provisioner. (gp2 on AWS, standard on ## GKE, AWS & OpenStack) ## # storageClass: "-" accessMode: ReadWriteOnce size: 300Gi ## Configure resource requests and limits ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ ## resources: {} # requests: # memory: 512Mi # cpu: 300m # Custom bitcoind configuration file used to override default bitcoind settings configurationFile: bitcoin.conf: |- server=1 printtoconsole=1 rpcuser=rpcuser rpcpassword=rpcpassword
spark-history-server
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"spark-history-server.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars be...
gceStorage: 5Gi pvcStorage: 1Mi pvStorage: 1Mi pvName: nfs-pv pvcName: nfs-pvc enableExampleNFS: true
phabricator
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"phabricator.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because som...
## Global Docker image parameters ## Please, note that this will override the image parameters, including dependencies, configured to use the global value ## Current available global Docker image parameters: imageRegistry and imagePullSecrets ## # global: # imageRegistry: myRegistryName # imagePullSecrets: # - myRegistryKeySecretName # storageClass: myStorageClass ## Bitnami Phabricator image version ## ref: https://hub.docker.com/r/bitnami/phabricator/tags/ ## image: registry: docker.io repository: bitnami/phabricator tag: 2020.7.0-debian-10-r10 ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images ## pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. ## Secrets must be manually created in the namespace. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ ## # pullSecrets: # - myRegistryKeySecretName ## String to partially override phabricator.fullname template (will maintain the release name) ## # nameOverride: ## String to fully override phabricator.fullname template ## # fullnameOverride: ## Phabricator host to create application URLs ## ref: https://github.com/bitnami/bitnami-docker-phabricator#configuration ## # phabricatorHost: ## Phabricator alternate domain to upload files ## ref: https://github.com/bitnami/bitnami-docker-phabricator#configuration ## # phabricatorAlternateFileDomain: ## User of the application ## ref: https://github.com/bitnami/bitnami-docker-phabricator#configuration ## phabricatorUsername: user ## Application password ## Defaults to a random 10-character alphanumeric string if not set ## ref: https://github.com/bitnami/bitnami-docker-phabricator#configuration ## # phabricatorPassword: ## Admin email ## ref: https://github.com/bitnami/bitnami-docker-phabricator#configuration ## phabricatorEmail: user@example.com ## First name ## ref: https://github.com/bitnami/bitnami-docker-phabricator#environment-variables ## phabricatorFirstName: First Name ## Last name ## ref: https://github.com/bitnami/bitnami-docker-phabricator#environment-variables ## phabricatorLastName: Last Name ## SMTP mail delivery configuration ## ref: https://github.com/bitnami/bitnami-docker-phabricator/#smtp-configuration ## # smtpHost: # smtpPort: # smtpUser: # smtpPassword: # smtpProtocol: ## ## MariaDB chart configuration ## ## https://github.com/helm/charts/blob/master/stable/mariadb/values.yaml ## mariadb: ## Whether to deploy a mariadb server to satisfy the applications database requirements. To use an external database set this to false and configure the externalDatabase parameters enabled: true ## Disable MariaDB replication replication: enabled: false ## MariaDB admin password ## ref: https://github.com/bitnami/bitnami-docker-mariadb/blob/master/README.md#setting-the-root-password-on-first-run ## # rootUser: # password: ## Enable persistence using Persistent Volume Claims ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ ## master: ## Disable local_infile for MariaDB: https://secure.phabricator.com/T13238 extraFlags: "--local-infile=0" persistence: enabled: true ## mariadb data Persistent Volume Storage Class ## If defined, storageClassName: <storageClass> ## If set to "-", storageClassName: "", which disables dynamic provisioning ## If undefined (the default) or set to null, no storageClassName spec is ## set, choosing the default provisioner. (gp2 on AWS, standard on ## GKE, AWS & OpenStack) ## # storageClass: "-" accessMode: ReadWriteOnce size: 8Gi ## Kubernetes configuration ## For minikube, set this to NodePort, elsewhere use LoadBalancer ## service: type: LoadBalancer # HTTP Port port: 80 # HTTPS Port httpsPort: 443 ## loadBalancerIP: ## ## nodePorts: ## http: <to set explicitly, choose port between 30000-32767> ## https: <to set explicitly, choose port between 30000-32767> nodePorts: http: "" https: "" ## Enable client source IP preservation ## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip ## externalTrafficPolicy: Cluster ## Enable persistence using Persistent Volume Claims ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ ## persistence: enabled: true ## Phabricator data Persistent Volume Storage Class ## If defined, storageClassName: <storageClass> ## If set to "-", storageClassName: "", which disables dynamic provisioning ## If undefined (the default) or set to null, no storageClassName spec is ## set, choosing the default provisioner. (gp2 on AWS, standard on ## GKE, AWS & OpenStack) ## # storageClass: "-" accessMode: ReadWriteOnce size: 8Gi ## Configure the ingress resource that allows you to access the ## Phabricator installation. Set up the URL ## ref: http://kubernetes.io/docs/user-guide/ingress/ ## ingress: ## Set to true to enable ingress record generation ## enabled: false ## Set this to true in order to add the corresponding annotations for cert-manager certManager: false ## Ingress annotations done as key:value pairs ## If certManager is set to true, annotation kubernetes.io/tls-acme: "true" will automatically be set ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md ## annotations: # kubernetes.io/ingress.class: nginx ## The list of hostnames to be covered with this ingress record. ## Most likely this will be just one host, but in the event more hosts are needed, this is an array ## hosts: - name: phabricator.local path: / ## Set this to true in order to enable TLS on the ingress record tls: false ## Optionally specify the TLS hosts for the ingress record ## Useful when the Ingress controller supports www-redirection ## If not specified, the above host name will be used # tlsHosts: # - www.phabricator.local # - phabricator.local ## If TLS is set to true, you must declare what secret will store the key/certificate for TLS tlsSecret: phabricator.local-tls secrets: ## If you're providing your own certificates, please use this to add the certificates as secrets ## key and certificate should start with -----BEGIN CERTIFICATE----- or ## -----BEGIN RSA PRIVATE KEY----- ## ## name should line up with a tlsSecret set further up ## If you're using cert-manager, this is unneeded, as it will create the secret for you if it is not set ## ## It is also possible to create and manage the certificates outside of this helm chart ## Please see README.md for more information # - name: phabricator.local-tls # key: # certificate: ## Configure resource requests and limits ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ ## resources: requests: memory: 512Mi cpu: 300m ## Pod annotations ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ ## podAnnotations: {} ## Prometheus Exporter / Metrics ## metrics: enabled: false image: registry: docker.io repository: bitnami/apache-exporter tag: 0.7.0-debian-10-r37 pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. ## Secrets must be manually created in the namespace. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ ## # pullSecrets: # - myRegistryKeySecretName ## Metrics exporter pod Annotation and Labels podAnnotations: prometheus.io/scrape: "true" prometheus.io/port: "9117" ## Metrics exporter resource requests and limits ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ ## # resources: {} ## Node labels for pod assignment ## ref: https://kubernetes.io/docs/user-guide/node-selection/ ## # nodeSelector: {} ## Affinity for pod assignment ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity ## # affinity: {} ## Tolerations for pod assignment ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ ## # tolerations: []
gcloud-endpoints
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 24 -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 24 chars because some Kubernetes name fields are li...
## Google Cloud Endpoints Runtime image ## ref: https://cloud.google.com/endpoints/docs/quickstart-container-engine#deploying_the_sample_api_to_the_cluster image: b.gcr.io/endpoints/endpoints-runtime:1 ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images ## # imagePullPolicy: ## Set the application server address to which ESP proxies the requests. For ## GRPC backends, please use grpc:// prefix, e.g. grpc://localhost:8081. ## (default: localhost:8081) ## # backend: ## Set the name of the Endpoints service. If omitted and serviceConfigURL not ## specified, ESP contacts the metadata service to fetch the service name. ## (default: None) ## # service: ## Specify the URL to fetch the service configuration. (default: None) ## # serviceConfigURL: ## Expose a port to accept HTTP/1.x connections. Note that if you do not ## specify httpPort, http2Port, and sslPort, then the default httpPort 8080 is ## set. (default: None) ## # httpPort: 8080 ## Expose a port to accept HTTP/2 connections. Note that this cannot be the ## same port as HTTP/1.x port. (default: None) ## # http2Port: ## Expose a port for HTTPS requests. Accepts both HTTP/1.x and HTTP/2 ## connections. (default: None) ## # sslPort: ## Set the ESP status port. Status information is available at ## /endpoints_status location over HTTP/1.x. (default: 8090) ## statusPort: 8090 ## Set the config version of the Endpoints service. If omitted and ## serviceConfigURL not specified, ESP contacts the metadata service to fetch ## the service version. (default: None) ## # version: ## Set the service account key JSON file. Used to access the service control ## and the service management. If the option is omitted, ESP contacts the ## metadata service to fetch an access token. (default: None) ## # serviceAccountKey: ## Set a custom nginx config file. (default: None) ## # nginxConfig: ## Kubernetes configuration ## For minikube, set this to NodePort, elsewhere use LoadBalancer ## serviceType: ClusterIP ## Configure resource requests and limits ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ ## resources: requests: memory: 128Mi cpu: 100m
mssql-linux
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"mssql.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kube...
acceptEula: value: "n" edition: value: Express collation: SQL_Latin1_General_CP1_CI_AS lcid: 1033 hadr: 0 agent: enabled: false # Override sapassword in templates/secret.yaml # sapassword: "MyStrongPassword1234" existingSecret: "" existingSecretKey: sapassword image: repository: microsoft/mssql-server-linux tag: 2017-CU5 pullPolicy: IfNotPresent ## It is possible to specify docker registry credentials ## See https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod # pullSecrets: # - name: regsecret ## Use an alternate scheduler, e.g. "stork". ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ ## # schedulerName: "default-scheduler" service: # If headless is set to TRUE then the service type is ignored headless: false type: ClusterIP port: 1433 nodePort: annotations: {} labels: {} loadBalancerIP: deployment: annotations: {} labels: {} pod: annotations: {} labels: {} persistence: enabled: true # existingDataClaim: # existingTransactionLogClaim: # existingBackupClaim: # existingMasterClaim: storageClass: "" dataAccessMode: ReadWriteOnce dataSize: 1Gi transactionLogAccessMode: ReadWriteOnce transactionLogSize: 1Gi backupAccessMode: ReadWriteOnce backupSize: 1Gi masterAccessMode: ReadWriteOnce masterSize: 1Gi livenessprobe: initialDelaySeconds: 15 periodSeconds: 20 readinessprobe: initialDelaySeconds: 5 periodSeconds: 10 resources: limits: # cpu: 100m memory: 2Gi # requests: # cpu: 100m # memory: 2Gi nodeSelector: {} # kubernetes.io/hostname: minikube tolerations: [] affinity: {} securityContext: {} # runAsUser: 1000
zeppelin
[ "# _helpers.yaml\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"zeppelin.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 24 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 24 chars because some ...
zeppelin: image: dylanmei/zeppelin:0.7.2 resources: limits: memory: "4096Mi" cpu: "2000m" hadoop: useConfigMap: false configMapName: hadoop-hadoop configPath: /usr/hadoop-2.7.3/etc/hadoop spark: driverMemory: 1g executorMemory: 1g numExecutors: 2 ingress: enabled: false annotations: {} # kubernetes.io/ingress.class: nginx # kubernetes.io/tls-acme: "true" # nginx.ingress.kubernetes.io/auth-secret: zeppelin-tls-secret path: / hosts: - zeppelin.local tls: [] # - secretName: zeppelin-tls-secret # hosts: zeppelin.local nodeSelector: {}
prometheus-snmp-exporter
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"prometheus-snmp-exporter.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 char...
restartPolicy: Always image: repository: prom/snmp-exporter tag: v0.14.0 pullPolicy: IfNotPresent nodeSelector: {} tolerations: [] affinity: {} # config: ## For RBAC support: rbac: # Specifies whether RBAC resources should be created create: true serviceAccount: # Specifies whether a ServiceAccount should be created create: true # The name of the ServiceAccount to use. # If not set and create is true, a name is generated using the fullname template name: resources: {} # limits: # memory: 300Mi # requests: # memory: 50Mi service: annotations: {} type: ClusterIP port: 9116 ## An Ingress resource can provide name-based virtual hosting and TLS ## termination among other things for CouchDB deployments which are accessed ## from outside the Kubernetes cluster. ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/ ingress: enabled: false hosts: [] # - chart-example.local annotations: {} # kubernetes.io/ingress.class: nginx # kubernetes.io/tls-acme: "true" tls: [] # Secrets must be manually created in the namespace. # - secretName: chart-example-tls # hosts: # - chart-example.local podAnnotations: {} extraArgs: [] # --history.limit=1000 replicas: 1 ## Monitors ConfigMap changes and POSTs to a URL ## Ref: https://github.com/jimmidyson/configmap-reload ## configmapReload: ## configmap-reload container name ## name: configmap-reload ## configmap-reload container image ## image: repository: jimmidyson/configmap-reload tag: v0.2.2 pullPolicy: IfNotPresent ## configmap-reload resource requests and limits ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ ## resources: {} # Enable this if you're using https://github.com/coreos/prometheus-operator serviceMonitor: enabled: false namespace: monitoring # fallback to the prometheus default unless specified # interval: 10s ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/helm/charts/tree/master/stable/prometheus-operator#tldr) ## [Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#prometheus-operator-1) ## [Kube Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#exporters) selector: prometheus: kube-prometheus # Retain the job and instance labels of the metrics pushed to the snmp-exporter # [Scraping SNMP-exporter](https://github.com/prometheus/snmp_exporter#configure-the-snmp_exporter-as-a-target-to-scrape) honorLabels: true params: enabled: false conf: module: - if_mib target: - 127.0.0.1 path: /snmp scrapeTimeout: 10s
express-gateway
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n\n{{- define \"eg.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffi...
# Default values for Express Gateway. # Declare variables to be passed into your templates. image: repository: expressgateway/express-gateway tag: v1.16.9 pullPolicy: IfNotPresent # Specify Express Gateway Admin API admin: # HTTPS traffic on the admin port https: true hostname: 0.0.0.0 servicePort: 9876 containerPort: 9876 # Admin Service type type: NodePort # Specify Express Gateway main listening service proxy: # HTTPS traffic on the proxy port https: true tls: {} servicePort: 8080 containerPort: 8080 type: NodePort # readinessProbe for EG pods readinessProbe: httpGet: path: "/users" port: admin scheme: HTTP initialDelaySeconds: 5 timeoutSeconds: 1 periodSeconds: 10 successThreshold: 1 failureThreshold: 2 # livenessProbe for EG pods livenessProbe: httpGet: path: "/users" port: admin scheme: HTTP initialDelaySeconds: 10 timeoutSeconds: 5 periodSeconds: 30 successThreshold: 1 failureThreshold: 2 # Tolerations for pod assignment # Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ tolerations: [] # Node labels for pod assignment # Ref: https://kubernetes.io/docs/user-guide/node-selection/ nodeSelector: {} # Annotation to be added to pods podAnnotations: {} # pod count replicaCount: 1 # Express Gateway has a choice of either run with with transactional data in memory or use Redis as # backend database. Redis is used by default. redis: password: express-gateway storage: emulate: true namespace: EG crypto: cipherKey: sensitiveKey algorithm: aes256 saltRounds: 10 session: secret: keyboard cat resave: false saveUninitialized: false accessTokens: timeToExpiry: 7200000 refreshTokens: timeToExpiry: 7200000 authorizationCodes: timeToExpiry: 300000
sonarqube
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"sonarqube.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some ...
# Default values for sonarqube. # This is a YAML-formatted file. # Declare variables to be passed into your templates. replicaCount: 1 # This will use the default deployment strategy unless it is overriden deploymentStrategy: {} image: repository: sonarqube tag: 7.9.2-community # If using a private repository, the name of the imagePullSecret to use # pullSecret: my-repo-secret # Set security context for sonarqube pod securityContext: fsGroup: 999 # Settings to configure elasticsearch host requirements elasticsearch: configureNode: true bootstrapChecks: true service: type: ClusterIP externalPort: 9000 internalPort: 9000 labels: annotations: {} # May be used in example for internal load balancing in GCP: # cloud.google.com/load-balancer-type: Internal # loadBalancerSourceRanges: # - 0.0.0.0/0 # loadBalancerIP: 1.2.3.4 ingress: enabled: false # Used to create an Ingress record. hosts: - name: sonar.organization.com # default paths for "/" and "/*" will be added path: / # If a different path is defined, that path and {path}/* will be added to the ingress resource # path: /sonarqube annotations: {} # kubernetes.io/ingress.class: nginx # kubernetes.io/tls-acme: "true" # This property allows for reports up to a certain size to be uploaded to SonarQube # nginx.ingress.kubernetes.io/proxy-body-size: "8m" # Additional labels for Ingress manifest file # labels: # traffic-type: external # traffic-type: internal tls: [] # Secrets must be manually created in the namespace. # - secretName: chart-example-tls # hosts: # - chart-example.local # Affinity for pod assignment # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity affinity: {} # Tolerations for pod assignment # Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ tolerations: [] # Node labels for pod assignment # Ref: https://kubernetes.io/docs/user-guide/node-selection/ nodeSelector: {} # hostAliases allows the modification of the hosts file inside a container hostAliases: [] # - ip: "192.168.1.10" # hostnames: # - "example.com" # - "www.example.com" readinessProbe: initialDelaySeconds: 60 periodSeconds: 30 failureThreshold: 6 # If an ingress *path* other than the root (/) is defined, it should be reflected here # A trailing "/" must be included sonarWebContext: / # sonarWebContext: /sonarqube/ livenessProbe: initialDelaySeconds: 60 periodSeconds: 30 # If an ingress *path* other than the root (/) is defined, it should be reflected here # A trailing "/" must be included sonarWebContext: / # sonarWebContext: /sonarqube/ # If an ingress *path* is defined, it should be reflected here # sonar.web.context: /sonarqube ## Provide a secret containing one or more certificate files in the keys that will be added to cacerts ## The cacerts file will be set via SONARQUBE_WEB_JVM_OPTS ## # caCerts: # secret: my-secret ## Values to add to SONARQUBE_WEB_JVM_OPTS ## # jvmOpts: "-Djava.net.preferIPv4Stack=true" jvmOpts: "" ## Environment variables to attach to the pods ## # env: # - name: VARIABLE # value: my-value # Set annotations for pods annotations: {} resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. # limits: # cpu: 100m # memory: 128Mi # requests: # cpu: 100m # memory: 128Mi persistence: enabled: false ## Set annotations on pvc annotations: {} ## Specify an existing volume claim instead of creating a new one. ## When using this option all following options like storageClass, accessMode and size are ignored. # existingClaim: ## If defined, storageClassName: <storageClass> ## If set to "-", storageClassName: "", which disables dynamic provisioning ## If undefined (the default) or set to null, no storageClassName spec is ## set, choosing the default provisioner. (gp2 on AWS, standard on ## GKE, AWS & OpenStack) ## storageClass: accessMode: ReadWriteOnce size: 10Gi ## Specify extra volumes. Refer to ".spec.volumes" specification : https://kubernetes.io/fr/docs/concepts/storage/volumes/ volumes: [] ## Specify extra mounts. Refer to ".spec.containers.volumeMounts" specification : https://kubernetes.io/fr/docs/concepts/storage/volumes/ mounts: [] # List of plugins to install. # For example: # plugins: # install: # - "https://github.com/AmadeusITGroup/sonar-stash/releases/download/1.3.0/sonar-stash-plugin-1.3.0.jar" # - "https://github.com/SonarSource/sonar-ldap/releases/download/2.2-RC3/sonar-ldap-plugin-2.2.0.601.jar" plugins: install: [] lib: [] # initContainerImage: alpine:3.10.3 # deleteDefaultPlugins: true resources: {} # We allow the plugins init container to have a separate resources declaration because # the initContainer does not take as much resources. # A custom sonar.properties file can be provided via dictionary. # For example: # sonarProperties: # sonar.forceAuthentication: true # sonar.security.realm: LDAP # ldap.url: ldaps://organization.com # Additional sonar properties to load from a secret with a key "secret.properties" (must be a string) # sonarSecretProperties: # Kubernetes secret that contains the encryption key for the sonarqube instance. # The secret must contain the key 'sonar-secret.txt'. # The 'sonar.secretKeyPath' property will be set automatically. # sonarSecretKey: "settings-encryption-secret" ## Configuration value to select database type ## Option to use "postgresql" or "mysql" database type, by default "postgresql" is chosen ## Set the "enable" field to true of the database type you select (if you want to use internal database) and false of the one you don't select database: type: "postgresql" ## Configuration values for postgresql dependency ## ref: https://github.com/kubernetes/charts/blob/master/stable/postgresql/README.md postgresql: # Enable to deploy the PostgreSQL chart enabled: true # To use an external PostgreSQL instance, set enabled to false and uncomment # the line below: # postgresqlServer: "" # To use an external secret for the password for an external PostgreSQL # instance, set enabled to false and provide the name of the secret on the # line below: # postgresqlPasswordSecret: "" postgresqlUsername: "sonarUser" postgresqlPassword: "sonarPass" postgresqlDatabase: "sonarDB" # Specify the TCP port that PostgreSQL should use service: port: 5432 ## Configuration values for the mysql dependency ## ref: https://github.com/kubernetes/charts/blob/master/stable/mysql/README.md ## mysql: # Enable to deploy the mySQL chart enabled: false # To use an external mySQL instance, set enabled to false and uncomment # the line below: # mysqlServer: "" # To use an external secret for the password for an external mySQL instance, # set enabled to false and provide the name of the secret on the line below: # mysqlPasswordSecret: "" mysqlUser: "sonarUser" mysqlPassword: "sonarPass" mysqlDatabase: "sonarDB" # mysqlParams: # useSSL: "true" # Specify the TCP port that mySQL should use service: port: 3306 # # Additional labels to add to the pods: # podLabels: # key: value podLabels: {} # For compatibility with 8.0 replace by "/opt/sq" sonarqubeFolder: /opt/sonarqube
luigi
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"luigi.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kube...
# Default values for luigi. # This is a YAML-formatted file. # Declare variables to be passed into your templates. replicaCount: 1 image: repository: axiom/docker-luigi tag: 2.7.2-alpine pullPolicy: IfNotPresent service: name: luigi type: LoadBalancer externalPort: 80 # Luigi config: these values should mattch the luigi documentation # https://luigi.readthedocs.io/en/stable/configuration.html config: | [core] logging_conf_file=/etc/luigi/logging.cfg [scheduler] record_task_history=true state-path=/luigi/state/luigi-state.pickle [task_history] db_connection=mysql://luigi-mysql/luigidb # creates a persistent volume claim for # luigi state pickel persistence: enabled: false size: 1G accessMode: ReadWriteOnce # Ingress for ui access for use with authentication like oauth-proxy # depending on the authentication you use. You may only need one ingress. ingressUI: enabled: false path: / # Used to create an Ingress record. # hosts: # - chart-example.local # annotations: # kubernetes.io/ingress.class: nginx # kubernetes.io/tls-acme: "true" # tls: # Secrets must be manually created in the namespace. # - secretName: chart-example-tls # hosts: # - chart-example.local # Ingress for api access viahttps and whatever authentication you use ingressAPI: enabled: false path: / # Used to create an Ingress record. # hosts: # - chart-example.local # annotations: # kubernetes.io/ingress.class: nginx # kubernetes.io/tls-acme: "true" # tls: # Secrets must be manually created in the namespace. # - secretName: chart-example-tls # hosts: # - chart-example.local resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. # limits: # cpu: 100m # memory: 128Mi # requests: # cpu: 100m # memory: 128Mi mysql: mysqlDatabase: luigidb mysqlAllowEmptyPassword: true persistence: enabled: false
metricbeat
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"metricbeat.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some...
image: repository: docker.elastic.co/beats/metricbeat tag: 6.7.0 pullPolicy: IfNotPresent # The instances created by daemonset retrieve most metrics from the host daemonset: enabled: true podAnnotations: [] priorityClassName: "" tolerations: - key: node-role.kubernetes.io/master operator: Exists effect: NoSchedule nodeSelector: {} resources: {} hostNetwork: true dnsPolicy: ClusterFirstWithHostNet config: metricbeat.config: modules: path: ${path.config}/modules.d/*.yml reload.enabled: false processors: - add_cloud_metadata: output.file: path: "/usr/share/metricbeat/data" filename: metricbeat rotate_every_kb: 10000 number_of_files: 5 # If overrideConfig is not empty, metricbeat chart's default config won't be used at all. overrideConfig: {} modules: system: enabled: true config: - module: system period: 10s metricsets: - cpu - load - memory - network - process - process_summary # - core # - diskio # - socket processes: ['.*'] process.include_top_n: by_cpu: 5 # include top 5 processes by CPU by_memory: 5 # include top 5 processes by memory - module: system period: 1m metricsets: - filesystem - fsstat processors: - drop_event.when.regexp: system.filesystem.mount_point: '^/(sys|cgroup|proc|dev|etc|host|lib)($|/)' kubernetes: enabled: true config: - module: kubernetes metricsets: - node - system - pod - container - volume period: 10s host: ${NODE_NAME} hosts: ["localhost:10255"] # If using Red Hat OpenShift remove the previous hosts entry and # uncomment these settings: # hosts: ["https://${HOSTNAME}:10250"] # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token # ssl.certificate_authorities: # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt # If overrideModules is not empty, metricbeat chart's default modules won't be used at all. overrideModules: {} # The instance created by deployment retrieves metrics that are unique for the whole cluster, like Kubernetes events or kube-state-metrics deployment: enabled: true podAnnotations: [] priorityClassName: "" tolerations: [] nodeSelector: {} resources: {} config: metricbeat.config: modules: path: ${path.config}/modules.d/*.yml reload.enabled: false processors: - add_cloud_metadata: output.file: path: "/usr/share/metricbeat/data" filename: metricbeat rotate_every_kb: 10000 number_of_files: 5 # If overrideConfig is not empty, metricbeat chart's default config won't be used at all. overrideConfig: {} modules: kubernetes: enabled: true config: - module: kubernetes metricsets: - state_node - state_deployment - state_replicaset - state_pod - state_container # Uncomment this to get k8s events: # - event period: 10s hosts: ["kube-state-metrics:8080"] # If overrideModules is not empty, metricbeat chart's default modules won't be used at all. overrideModules: {} # List of beat plugins plugins: [] # - kinesis.so # additional environment # extraEnv: # - name: test1 # value: "test1" # - name: test2 # value: "test2" # Add additional volumes and mounts, for example to read other log files on the host extraVolumes: [] # - hostPath: # path: /var/log # name: varlog extraVolumeMounts: [] # - name: varlog # mountPath: /host/var/log # readOnly: true extraSecrets: [] # - name: ca-cert # data: # ca.pem: |- # -----BEGIN CERTIFICATE----- # ... # -----END CERTIFICATE----- # - name: userdata # data: # id: userid # pw: userpassword resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. # limits: # cpu: 100m # memory: 200Mi # requests: # cpu: 100m # memory: 100Mi rbac: # Specifies whether RBAC resources should be created create: true pspEnabled: false serviceAccount: # Specifies whether a ServiceAccount should be created create: true # The name of the ServiceAccount to use. # If not set and create is true, a name is generated using the fullname template name:
traefik
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"traefik.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because som...
## Default values for Traefik image: traefik imageTag: 1.7.26 imagePullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. ## Secrets must be manually created in the namespace. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ # imagePullSecrets: # - "regsecret" testFramework: enabled: false image: "dduportal/bats" tag: "0.4.0" ## can switch the service type to NodePort if required serviceType: LoadBalancer # loadBalancerIP: "" # loadBalancerSourceRanges: [] whiteListSourceRange: [] externalTrafficPolicy: Cluster replicas: 1 # startupArguments: # - "--ping" # - "--ping.entrypoint=http" # /ping health-check entry point. # pingEntryPoint: http podDisruptionBudget: {} # maxUnavailable: 1 # minAvailable: 2 # priorityClassName: "" # rootCAs: [] resources: {} debug: enabled: false # logLevel: error # maxIdleConnsPerHost: 200 deploymentStrategy: {} # rollingUpdate: # maxSurge: 1 # maxUnavailable: 0 # type: RollingUpdate securityContext: {} useNonPriviledgedPorts: false env: {} nodeSelector: {} # key: value affinity: {} # key: value tolerations: [] # - key: "key" # operator: "Equal|Exists" # value: "value" # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" ## Kubernetes ingress filters # kubernetes: # endpoint: # namespaces: # - default # labelSelector: # ingressClass: # ingressEndpoint: # hostname: "localhost" # ip: "127.0.0.1" # publishedService: "namespace/servicename" # useDefaultPublishedService: false fileBackend: "" # as in same traefik.toml # # [backends] # [backends.backend1] # # ... # [backends.backend2] # # ... # [frontends] # [frontends.frontend1] # # ... # [frontends.frontend2] # # or separated file from configFiles # filename = "/configs/rules.toml" proxyProtocol: enabled: false # trustedIPs is required when enabled trustedIPs: [] # - 10.0.0.0/8 forwardedHeaders: enabled: false # trustedIPs is required when enabled trustedIPs: [] # - 10.0.0.0/8 ## Add arbitrary ConfigMaps to deployment ## Will be mounted to /configs/, i.e. myconfig.json would ## be mounted to /configs/myconfig.json. configFiles: {} # myconfig.json: | # filecontents... ## Add arbitrary Secrets to deployment ## Will be mounted to /secrets/, i.e. file.name would ## be mounted to /secrets/mysecret.txt. ## The contents will be base64 encoded when added secretFiles: {} # mysecret.txt: | # filecontents... ssl: enabled: false enforced: false permanentRedirect: false upstream: false insecureSkipVerify: false generateTLS: false # defaultCN: "example.com" # or *.example.com defaultSANList: [] # - example.com # - test1.example.com defaultIPList: [] # - 1.2.3.4 # cipherSuites: [] # https://docs.traefik.io/configuration/entrypoints/#specify-minimum-tls-version # tlsMinVersion: VersionTLS12 # https://docs.traefik.io/configuration/entrypoints/#strict-sni-checking # sniStrict: false defaultCert: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVtekNDQTRPZ0F3SUJBZ0lKQUpBR1FsTW1DMGt5TUEwR0NTcUdTSWIzRFFFQkJRVUFNSUdQTVFzd0NRWUQKVlFRR0V3SlZVekVSTUE4R0ExVUVDQk1JUTI5c2IzSmhaRzh4RURBT0JnTlZCQWNUQjBKdmRXeGtaWEl4RkRBUwpCZ05WQkFvVEMwVjRZVzF3YkdWRGIzSndNUXN3Q1FZRFZRUUxFd0pKVkRFV01CUUdBMVVFQXhRTktpNWxlR0Z0CmNHeGxMbU52YlRFZ01CNEdDU3FHU0liM0RRRUpBUllSWVdSdGFXNUFaWGhoYlhCc1pTNWpiMjB3SGhjTk1UWXgKTURJME1qRXdPVFV5V2hjTk1UY3hNREkwTWpFd09UVXlXakNCanpFTE1Ba0dBMVVFQmhNQ1ZWTXhFVEFQQmdOVgpCQWdUQ0VOdmJHOXlZV1J2TVJBd0RnWURWUVFIRXdkQ2IzVnNaR1Z5TVJRd0VnWURWUVFLRXd0RmVHRnRjR3hsClEyOXljREVMTUFrR0ExVUVDeE1DU1ZReEZqQVVCZ05WQkFNVURTb3VaWGhoYlhCc1pTNWpiMjB4SURBZUJna3EKaGtpRzl3MEJDUUVXRVdGa2JXbHVRR1Y0WVcxd2JHVXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEJBUUVGQUFPQwpBUThBTUlJQkNnS0NBUUVBdHVKOW13dzlCYXA2SDROdUhYTFB6d1NVZFppNGJyYTFkN1ZiRUJaWWZDSStZNjRDCjJ1dThwdTNhVTVzYXVNYkQ5N2pRYW95VzZHOThPUHJlV284b3lmbmRJY3RFcmxueGpxelUyVVRWN3FEVHk0bkEKNU9aZW9SZUxmZXFSeGxsSjE0VmlhNVFkZ3l3R0xoRTlqZy9jN2U0WUp6bmg5S1dZMnFjVnhEdUdEM2llaHNEbgphTnpWNFdGOWNJZm1zOHp3UHZPTk5MZnNBbXc3dUhUKzNiSzEzSUloeDI3ZmV2cXVWcENzNDFQNnBzdStWTG4yCjVIRHk0MXRoQkN3T0wrTithbGJ0ZktTcXM3TEFzM25RTjFsdHpITHZ5MGE1RGhkakpUd2tQclQrVXhwb0tCOUgKNFpZazErRUR0N09QbGh5bzM3NDFRaE4vSkNZK2RKbkFMQnNValFJREFRQUJvNEgzTUlIME1CMEdBMVVkRGdRVwpCQlJwZVc1dFhMdHh3TXJvQXM5d2RNbTUzVVVJTERDQnhBWURWUjBqQklHOE1JRzVnQlJwZVc1dFhMdHh3TXJvCkFzOXdkTW01M1VVSUxLR0JsYVNCa2pDQmp6RUxNQWtHQTFVRUJoTUNWVk14RVRBUEJnTlZCQWdUQ0VOdmJHOXkKWVdSdk1SQXdEZ1lEVlFRSEV3ZENiM1ZzWkdWeU1SUXdFZ1lEVlFRS0V3dEZlR0Z0Y0d4bFEyOXljREVMTUFrRwpBMVVFQ3hNQ1NWUXhGakFVQmdOVkJBTVVEU291WlhoaGJYQnNaUzVqYjIweElEQWVCZ2txaGtpRzl3MEJDUUVXCkVXRmtiV2x1UUdWNFlXMXdiR1V1WTI5dGdna0FrQVpDVXlZTFNUSXdEQVlEVlIwVEJBVXdBd0VCL3pBTkJna3EKaGtpRzl3MEJBUVVGQUFPQ0FRRUFjR1hNZms4TlpzQit0OUtCemwxRmw2eUlqRWtqSE8wUFZVbEVjU0QyQjRiNwpQeG5NT2pkbWdQcmF1SGI5dW5YRWFMN3p5QXFhRDZ0YlhXVTZSeENBbWdMYWpWSk5aSE93NDVOMGhyRGtXZ0I4CkV2WnRRNTZhbW13QzFxSWhBaUE2MzkwRDNDc2V4N2dMNm5KbzdrYnIxWVdVRzN6SXZveGR6OFlEclpOZVdLTEQKcFJ2V2VuMGxNYnBqSVJQNFhac25DNDVDOWdWWGRoM0xSZTErd3lRcTZoOVFQaWxveG1ENk5wRTlpbVRPbjJBNQovYkozVktJekFNdWRlVTZrcHlZbEpCemRHMXVhSFRqUU9Xb3NHaXdlQ0tWVVhGNlV0aXNWZGRyeFF0aDZFTnlXCnZJRnFhWng4NCtEbFNDYzkzeWZrL0dsQnQrU0tHNDZ6RUhNQjlocVBiQT09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K defaultKey: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBdHVKOW13dzlCYXA2SDROdUhYTFB6d1NVZFppNGJyYTFkN1ZiRUJaWWZDSStZNjRDCjJ1dThwdTNhVTVzYXVNYkQ5N2pRYW95VzZHOThPUHJlV284b3lmbmRJY3RFcmxueGpxelUyVVRWN3FEVHk0bkEKNU9aZW9SZUxmZXFSeGxsSjE0VmlhNVFkZ3l3R0xoRTlqZy9jN2U0WUp6bmg5S1dZMnFjVnhEdUdEM2llaHNEbgphTnpWNFdGOWNJZm1zOHp3UHZPTk5MZnNBbXc3dUhUKzNiSzEzSUloeDI3ZmV2cXVWcENzNDFQNnBzdStWTG4yCjVIRHk0MXRoQkN3T0wrTithbGJ0ZktTcXM3TEFzM25RTjFsdHpITHZ5MGE1RGhkakpUd2tQclQrVXhwb0tCOUgKNFpZazErRUR0N09QbGh5bzM3NDFRaE4vSkNZK2RKbkFMQnNValFJREFRQUJBb0lCQUhrTHhka0dxNmtCWWQxVAp6MkU4YWFENnhneGpyY2JSdGFCcTc3L2hHbVhuQUdaWGVWcE81MG1SYW8wbHZ2VUgwaE0zUnZNTzVKOHBrdzNmCnRhWTQxT1dDTk1PMlYxb1MvQmZUK3Zsblh6V1hTemVQa0pXd2lIZVZMdVdEaVVMQVBHaWl4emF2RFMyUnlQRmEKeGVRdVNhdE5pTDBGeWJGMG5Zd3pST3ZoL2VSa2NKVnJRZlZudU1melFkOGgyMzZlb1UxU3B6UnhSNklubCs5UApNc1R2Wm5OQmY5d0FWcFo5c1NMMnB1V1g3SGNSMlVnem5oMDNZWUZJdGtDZndtbitEbEdva09YWHBVM282aWY5ClRIenBleHdubVJWSmFnRG85bTlQd2t4QXowOW80cXExdHJoU1g1U2p1K0xyNFJvOHg5bytXdUF1VnVwb0lHd0wKMWVseERFRUNnWUVBNzVaWGp1enNJR09PMkY5TStyYVFQcXMrRHZ2REpzQ3gyZnRudk1WWVJKcVliaGt6YnpsVQowSHBCVnk3NmE3WmF6Umxhd3RGZ3ljMlpyQThpM0F3K3J6d1pQclNJeWNieC9nUVduRzZlbFF1Y0FFVWdXODRNCkdSbXhKUGlmOGRQNUxsZXdRalFjUFJwZVoxMzlYODJreGRSSEdma1pscHlXQnFLajBTWExRSEVDZ1lFQXcybkEKbUVXdWQzZFJvam5zbnFOYjBlYXdFUFQrbzBjZ2RyaENQOTZQK1pEekNhcURUblZKV21PeWVxRlk1eVdSSEZOLwpzbEhXU2lTRUFjRXRYZys5aGlMc0RXdHVPdzhUZzYyN2VrOEh1UUtMb2tWWEFUWG1NZG9xOWRyQW9INU5hV2lECmRSY3dEU2EvamhIN3RZV1hKZDA4VkpUNlJJdU8vMVZpbDBtbEk5MENnWUVBb2lsNkhnMFNUV0hWWDNJeG9raEwKSFgrK1ExbjRYcFJ5VEg0eldydWY0TjlhYUxxNTY0QThmZGNodnFiWGJHeEN6U3RxR1E2cW1peUU1TVpoNjlxRgoyd21zZEpxeE14RnEzV2xhL0lxSzM0cTZEaHk3cUNld1hKVGRKNDc0Z3kvY0twZkRmeXZTS1RGZDBFejNvQTZLCmhqUUY0L2lNYnpxUStQREFQR0YrVHFFQ2dZQmQ1YnZncjJMMURzV1FJU3M4MHh3MDBSZDdIbTRaQVAxdGJuNk8KK0IvUWVNRC92UXBaTWV4c1hZbU9lV2Noc3FCMnJ2eW1MOEs3WDY1NnRWdGFYay9nVzNsM3ZVNTdYSFF4Q3RNUwpJMVYvcGVSNHRiN24yd0ZncFFlTm1XNkQ4QXk4Z0xiaUZhRkdRSDg5QWhFa0dTd1d5cWJKc2NoTUZZOUJ5OEtUCkZaVWZsUUtCZ0V3VzJkVUpOZEJMeXNycDhOTE1VbGt1ZnJxbllpUTNTQUhoNFZzWkg1TXU0MW55Yi95NUUyMW4KMk55d3ltWGRlb3VJcFZjcUlVTXl0L3FKRmhIcFJNeVEyWktPR0QyWG5YaENNVlRlL0FQNDJod294Nm02QkZpQgpvemZFa2wwak5uZmREcjZrL1p2MlQ1TnFzaWxaRXJBQlZGOTBKazdtUFBIa0Q2R1ZMUUJ4Ci0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg== # Basic auth to protect all the routes. Can use htpasswd to generate passwords # > htpasswd -n -b testuser testpass # > testuser:$apr1$JXRA7j2s$LpVns9vsme8FHN0r.aSt11 auth: {} # basic: # testuser: $apr1$JXRA7j2s$LpVns9vsme8FHN0r.aSt11 # a list of any extra certificate/key filenames you want included in the traefik instance. This must be used in conjunction with # the "secretFiles" parameter to include the certs on each traefik pod. the expected format is: # extraCerts: # - certFile: /secrets/cert1.crt # keyFile: /secrets/key1.key # - certFile: /secrets/cert2.crt # keyFile: /secrets/key2.key # mtls: # enabled: true # optional: false # clientCaCerts: [] # # When mTLS is enabled, the set of CA certificates used to validate client TLS certificates. # # https://docs.traefik.io/configuration/entrypoints/#tls-mutual-authentication # # CA certificates should be in PEM format. kvprovider: ## If you want to run Traefik in HA mode, you will need to setup a KV Provider. Therefore you can choose one of ## * etcd ## * consul ## * boltdb ## * zookeeper ## ## ref: https://docs.traefik.io/user-guide/cluster/ ## storeAcme has to be enabled to support HA Support using acme, but at least one kvprovider is needed storeAcme: false acmeStorageLocation: traefik/acme/account importAcme: false # etcd: # endpoint: etcd-service:2379 # useAPIV3: false # watch: true # prefix: traefik ## Override default configuration template. ## For advanced users :) ## ## Optional # filename: consul.tmpl # username: foo # password: bar # tls: # ca: "/etc/ssl/ca.crt" # cert: "/etc/ssl/consul.crt" # key: "/etc/ssl/consul.key" # insecureSkipVerify: true # # consul: # endpoint: consul-service:8500 # watch: true # prefix: traefik ## Override default configuration template. ## For advanced users :) ## ## Optional # filename: consul.tmpl # username: foo # password: bar # tls: # ca: "/etc/ssl/ca.crt" # cert: "/etc/ssl/consul.crt" # key: "/etc/ssl/consul.key" # insecureSkipVerify: true ## only relevant for etcd acme: keyType: RSA4096 enabled: false email: admin@example.com onHostRule: true staging: true ## Specify a custom ACME server endpoint ## Optional # caServer: https://acme-staging-v02.api.letsencrypt.org/directory logging: false # Configure a Let's Encrypt certificate to be managed by default. # This is the only way to request wildcard certificates (works only with dns challenge). domains: enabled: false # List of sets of main and (optional) SANs to generate for # for wildcard certificates see https://docs.traefik.io/configuration/acme/#wildcard-domains domainsList: # - main: "*.example.com" # - sans: # - "example.com" # - main: "*.example2.com" # - sans: # - "test1.example2.com" # - "test2.example2.com" ## ACME challenge type: "tls-sni-01", "tls-alpn-01", "http-01" or "dns-01" ## Note that "tls-sni-01" has been DEPRECATED. challengeType: tls-alpn-01 ## Configure dnsProvider to perform domain verification using dns challenge ## Applicable only if using the dns-01 challenge type delayBeforeCheck: 0 resolvers: [] # - 1.1.1.1:53 # - 8.8.8.8:53 ## Configure the endpoint used for the HTTP challenge ## Applicable only if using the http-01 challenge type httpChallenge: entrypoint: http dnsProvider: name: nil existingSecretName: "" auroradns: AURORA_USER_ID: "" AURORA_KEY: "" AURORA_ENDPOINT: "" azure: AZURE_CLIENT_ID: "" AZURE_CLIENT_SECRET: "" AZURE_SUBSCRIPTION_ID: "" AZURE_TENANT_ID: "" AZURE_RESOURCE_GROUP: "" cloudflare: CF_API_EMAIL: "" CF_API_KEY: "" digitalocean: DO_AUTH_TOKEN: "" dnsimple: DNSIMPLE_OAUTH_TOKEN: "" DNSIMPLE_BASE_URL: "" dnsmadeeasy: DNSMADEEASY_API_KEY: "" DNSMADEEASY_API_SECRET: "" DNSMADEEASY_SANDBOX: "" dnspod: DNSPOD_API_KEY: "" dreamhost: DREAMHOST_API_KEY: "" dyn: DYN_CUSTOMER_NAME: "" DYN_USER_NAME: "" DYN_PASSWORD: "" exoscale: EXOSCALE_API_KEY: "" EXOSCALE_API_SECRET: "" EXOSCALE_ENDPOINT: "" gandi: GANDI_API_KEY: "" godaddy: GODADDY_API_KEY: "" GODADDY_API_SECRET: "" gcloud: GCE_PROJECT: "" GCE_SERVICE_ACCOUNT_FILE: "" linode: LINODE_API_KEY: "" namecheap: NAMECHEAP_API_USER: "" NAMECHEAP_API_KEY: "" ns1: NS1_API_KEY: "" otc: OTC_DOMAIN_NAME: "" OTC_USER_NAME: "" OTC_PASSWORD: "" OTC_PROJECT_NAME: "" OTC_IDENTITY_ENDPOINT: "" ovh: OVH_ENDPOINT: "" OVH_APPLICATION_KEY: "" OVH_APPLICATION_SECRET: "" OVH_CONSUMER_KEY: "" pdns: PDNS_API_URL: "" rackspace: RACKSPACE_USER: "" RACKSPACE_API_KEY: "" rfc2136: RFC2136_NAMESERVER: "" RFC2136_TSIG_ALGORITHM: "" RFC2136_TSIG_KEY: "" RFC2136_TSIG_SECRET: "" RFC2136_TIMEOUT: "" route53: AWS_REGION: "" AWS_ACCESS_KEY_ID: "" AWS_SECRET_ACCESS_KEY: "" vultr: VULTR_API_KEY: "" ## Save ACME certs to a persistent volume. ## WARNING: If you do not do this and you did not have configured ## a kvprovider, you will re-request certs every time a pod (re-)starts ## and you WILL be rate limited! persistence: enabled: true annotations: {} ## acme data Persistent Volume Storage Class ## If defined, storageClassName: <storageClass> ## If set to "-", storageClassName: "", which disables dynamic provisioning ## If undefined (the default) or set to null, no storageClassName spec is ## set, choosing the default provisioner. (gp2 on AWS, standard on ## GKE, AWS & OpenStack) ## # storageClass: "-" accessMode: ReadWriteOnce size: 1Gi ## A manually managed Persistent Volume Claim ## Requires persistence.enabled: true ## If defined, PVC must be created manually before volume will be bound ## # existingClaim: dashboard: enabled: false domain: traefik.example.com # serviceType: ClusterIP service: {} # annotations: # key: value ingress: {} # annotations: # key: value # labels: # key: value # tls: # - hosts: # - traefik.example.com # secretName: traefik-default-cert auth: {} # basic: # username: password statistics: {} ## Number of recent errors to show in the ‘Health’ tab # recentErrors: service: # annotations: # key: value # labels: # key: value ## Further config for service of type NodePort ## Default config with empty string "" will assign a dynamic ## nodePort to http and https ports nodePorts: http: "" https: "" ## If static nodePort configuration is required it can be enabled as below ## Configure ports in allowable range (eg. 30000 - 32767 on minikube) # nodePorts: # http: 30080 # https: 30443 gzip: enabled: true traefikLogFormat: json accessLogs: enabled: false filters: {} # statusCodes: # - "200" # - "300-302" # retryAttempts: true # minDuration: "10ms" ## Path to the access logs file. If not provided, Traefik defaults it to stdout. # filePath: "" format: common # choices are: common, json ## for JSON logging, finer-grained control over what is logged. Fields can be ## retained or dropped, and request headers can be retained, dropped or redacted fields: # choices are keep, drop defaultMode: keep names: {} # ClientUsername: drop headers: # choices are keep, drop, redact defaultMode: keep names: {} # Authorization: redact rbac: enabled: false ## Enable the /metrics endpoint, for now only supports prometheus ## set to true to enable metric collection by prometheus metrics: prometheus: enabled: false # buckets: [0.1,0.3,1.2,5] service: # Set a custom service name name: annotations: prometheus.io/scrape: "true" port: 9100 type: ClusterIP # loadBalancerIP: "" # loadBalancerSourceRanges: [] # externalIP: "" # externalTrafficPolicy: Cluster # nodePort: 9100 # serviceMonitor: # When set true and if Prometheus Operator is installed then use a ServiceMonitor to configure scraping # enabled: false # Set the namespace the ServiceMonitor should be deployed # namespace: monitoring # Set how frequently Prometheus should scrape # interval: 30s # Set labels for the ServiceMonitor, use this to define your scrape label for Prometheus Operator # labels: datadog: enabled: false # address: localhost:8125 # pushinterval: 10s statsd: enabled: false # address: localhost:8125 # pushinterval: 10s deployment: # labels to add to the deployment # labels: # key: value # annotations: # key: value # labels to add to the pod container metadata # podLabels: # key: value # podAnnotations: # key: value hostPort: httpEnabled: false httpsEnabled: false dashboardEnabled: false # httpPort: 80 # httpsPort: 443 # dashboardPort: 8080 sendAnonymousUsage: false tracing: enabled: false serviceName: traefik # backend: choices are jaeger, zipkin, datadog # jaeger: # localAgentHostPort: "127.0.0.1:6831" # samplingServerURL: http://localhost:5778/sampling # samplingType: const # samplingParam: 1.0 # zipkin: # httpEndpoint: http://localhost:9411/api/v1/spans # debug: false # sameSpan: false # id128bit: true # datadog: # localAgentHostPort: "127.0.0.1:8126" # debug: false # globalTag: "" ## Create HorizontalPodAutoscaler object. ## # autoscaling: # minReplicas: 1 # maxReplicas: 10 # metrics: # - type: Resource # resource: # name: cpu # targetAverageUtilization: 60 # - type: Resource # resource: # name: memory # targetAverageUtilization: 60 ## Timeouts ## # timeouts: # ## responding are timeouts for incoming requests to the Traefik instance # responding: # readTimeout: 0s # writeTimeout: 0s # idleTimeout: 180s # ## forwarding are timeouts for requests forwarded to the backend servers # forwarding: # dialTimeout: 30s # responseHeaderTimeout: 0s # forwardAuth: # entryPoints: ["http", "https"] # address: https://authserver.com/auth # trustForwardHeader: true # Any extra volumes to define for the pod extraVolumes: [] # - name: example-name # hostPath: # path: /path/on/host # type: DirectoryOrCreate # Any extra volume mounts to define for the Traefik container extraVolumeMounts: [] # - name: example-name # mountPath: /path/in/container
ignite
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"ignite.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kub...
# Default values for ignite. replicaCount: 2 image: repository: apacheignite/ignite tag: 2.7.6 pullPolicy: IfNotPresent nameOverride: "" fullnameOverride: "" rbac: create: true serviceAccount: create: true name: dataStorage: config: |- env: OPTION_LIBS: "ignite-kubernetes,ignite-rest-http" IGNITE_QUIET: "false" JVM_OPTS: "-Djava.net.preferIPv4Stack=true" peerClassLoadingEnabled: false ## envFrom can be used to pass configmaps or secrets as environment # envFrom: # - configMapRef: # name: env-configmap # - secretRef: # name: env-secrets ## Additional init containers to run before the pods. ## for example, be used to run a sidecar that chown Logs storage . extraInitContainers: [] # - name: volume-mount-hack # image: busybox # command: ["sh", "-c", "chown -R 1000:1000 logs"] ## Additional containers to run alongside the pods ## This could, for example, be used to run jmx-exporter extraContainers: [] # - name: jmxexporter # image: sscaling/jmx-prometheus-exporter # command: ["sh", "-c", "chown -R 1000:1000 logs"] persistence: enabled: false persistenceVolume: size: 8Gi provisioner: kubernetes.io/aws-ebs provisionerParameters: type: gp2 fsType: ext4 walVolume: size: 8Gi provisioner: kubernetes.io/aws-ebs provisionerParameters: type: gp2 fsType: ext4 ## extraVolumes and extraVolumeMounts allows you to mount other volumes ## Example Use Cases: ## mount certificates to enable tls extraVolumes: # - name: ignite-keystore # secret: # defaultMode: 288 # secretName: ignite-keystore # - name: ignite-trustsore # secret: # defaultMode: 288 # secretName: ignite-truststore # extraVolumeMounts: # - name: ignite-keystore # mountPath: /certs/keystore # readOnly: true # - name: ignite-truststore # mountPath: /certs/truststore # readOnly: true resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. # limits: # cpu: 100m # memory: 128Mi # requests: # cpu: 100m # memory: 128Mi nodeSelector: {} tolerations: [] affinity: {} priorityClassName: ""
node-problem-detector
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"node-problem-detector.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars...
settings: # Custom monitor definitions to add to Node Problem Detector - to be # mounted at /custom-config. These are in addition to pre-packaged monitor # definitions provided within the default docker image available at /config: # https://github.com/kubernetes/node-problem-detector/tree/master/config custom_monitor_definitions: {} # docker-monitor-filelog.json: | # { # "plugin": "filelog", # "pluginConfig": { # "timestamp": "^time=\"(\\S*)\"", # "message": "msg=\"([^\n]*)\"", # "timestampFormat": "2006-01-02T15:04:05.999999999-07:00" # }, # "logPath": "/var/log/docker.log", # "lookback": "5m", # "bufferSize": 10, # "source": "docker-monitor", # "conditions": [], # "rules": [ # { # "type": "temporary", # "reason": "CorruptDockerImage", # "pattern": "Error trying v2 registry: failed to register layer: rename /var/lib/docker/image/(.+) /var/lib/docker/image/(.+): directory not empty.*" # } # ] # } log_monitors: - /config/kernel-monitor.json - /config/docker-monitor.json # An example of activating a custom log monitor definition in # Node Problem Detector # - /custom-config/docker-monitor-filelog.json custom_plugin_monitors: [] prometheus_address: 0.0.0.0 prometheus_port: 20257 # The period at which k8s-exporter does forcibly sync with apiserver heartBeatPeriod: 5m0s hostpath: logdir: /var/log/ image: repository: k8s.gcr.io/node-problem-detector tag: v0.8.1 pullPolicy: IfNotPresent nameOverride: "" fullnameOverride: "" rbac: create: true pspEnabled: false # Flag to run Node Problem Detector on the host's network. This is typically # not recommended, but may be useful for certain use cases. hostNetwork: false priorityClassName: "" securityContext: privileged: true resources: {} annotations: {} labels: {} tolerations: - effect: NoSchedule operator: Exists serviceAccount: # Specifies whether a ServiceAccount should be created create: true # The name of the ServiceAccount to use. # If not set and create is true, a name is generated using the fullname template name: affinity: {} nodeSelector: {} metrics: serviceMonitor: enabled: false additionalLabels: {} env: # - name: FOO # value: BAR # - name: POD_NAME # valueFrom: # fieldRef: # fieldPath: metadata.name extraVolumes: [] extraVolumeMounts: [] updateStrategy: RollingUpdate maxUnavailable: 1
efs-provisioner
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"efs-provisioner.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because...
# # Default values for EFS provisioner service # https://github.com/kubernetes-incubator/external-storage/tree/master/aws/efs # ## Deploy environment label, e.g. dev, test, prod ## global: deployEnv: dev ## Containers ## replicaCount: 1 revisionHistoryLimit: 10 image: repository: quay.io/external_storage/efs-provisioner tag: v2.4.0 pullPolicy: IfNotPresent # If specified, use these secrets to access the images # pullSecrets: # - registry-secret busyboxImage: repository: gcr.io/google_containers/busybox tag: 1.27 pullPolicy: IfNotPresent ## Extra env variables and envFrom extraEnv: [] envFrom: [] ## Deployment annotations ## annotations: {} ## Configure provisioner ## https://github.com/kubernetes-incubator/external-storage/tree/master/aws/efs#deployment ## efsProvisioner: # If specified, use this DNS or IP to connect the EFS # dnsName: "my-custom-efs-dns.com" efsFileSystemId: fs-12345678 awsRegion: us-east-2 path: /example-pv provisionerName: example.com/aws-efs storageClass: name: aws-efs isDefault: false gidAllocate: enabled: true gidMin: 40000 gidMax: 50000 reclaimPolicy: Delete mountOptions: [] ## Enable RBAC ## rbac: # Specifies whether RBAC resources should be created create: true ## Create or use ServiceAccount ## serviceAccount: # Specifies whether a ServiceAccount should be created create: true # The name of the ServiceAccount to use. # If not set and create is true, a name is generated using the fullname template name: "" annotations: {} # eks.amazonaws.com/role-arn: arn:aws:iam::AWS_ACCOUNT_ID:role/IAM_ROLE_NAME ## Annotations to be added to deployment ## podAnnotations: {} # iam.amazonaws.com/role: efs-provisioner-role ## Labels to be added to deployment ## podLabels: {} # environment: production ## Node labels for pod assignment ## nodeSelector: {} # Affinity for pod assignment # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity affinity: {} # Tolerations for pod assignment # Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ tolerations: {} ## Configure resources ## resources: {} # To specify resources, uncomment the following lines, adjust them as necessary, # and remove the curly braces after 'resources:'. # limits: # cpu: 200m # memory: 128Mi # requests: # cpu: 100m # memory: 128Mi priorityClassName: "" # Configure podsecuritypolicy # Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ podSecurityPolicy: enabled: true annotations: {}
redis
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"redis.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nExpand the chart plus release name (used by the chart label)\n*/}}\n{{- define \"redi...
## Global Docker image parameters ## Please, note that this will override the image parameters, including dependencies, configured to use the global value ## Current available global Docker image parameters: imageRegistry and imagePullSecrets ## global: # imageRegistry: myRegistryName # imagePullSecrets: # - myRegistryKeySecretName # storageClass: myStorageClass redis: {} ## Bitnami Redis image version ## ref: https://hub.docker.com/r/bitnami/redis/tags/ ## image: registry: docker.io repository: bitnami/redis ## Bitnami Redis image tag ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links ## tag: 5.0.7-debian-10-r32 ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images ## pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. ## Secrets must be manually created in the namespace. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ ## # pullSecrets: # - myRegistryKeySecretName ## String to partially override redis.fullname template (will maintain the release name) ## # nameOverride: ## String to fully override redis.fullname template ## # fullnameOverride: ## Cluster settings cluster: enabled: true slaveCount: 2 ## Use redis sentinel in the redis pod. This will disable the master and slave services and ## create one redis service with ports to the sentinel and the redis instances sentinel: enabled: false ## Require password authentication on the sentinel itself ## ref: https://redis.io/topics/sentinel usePassword: true ## Bitnami Redis Sentintel image version ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ ## image: registry: docker.io repository: bitnami/redis-sentinel ## Bitnami Redis image tag ## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links ## tag: 5.0.7-debian-10-r27 ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images ## pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. ## Secrets must be manually created in the namespace. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ ## # pullSecrets: # - myRegistryKeySecretName masterSet: mymaster initialCheckTimeout: 5 quorum: 2 downAfterMilliseconds: 60000 failoverTimeout: 18000 parallelSyncs: 1 port: 26379 ## Additional Redis configuration for the sentinel nodes ## ref: https://redis.io/topics/config ## configmap: ## Enable or disable static sentinel IDs for each replicas ## If disabled each sentinel will generate a random id at startup ## If enabled, each replicas will have a constant ID on each start-up ## staticID: false ## Configure extra options for Redis Sentinel liveness and readiness probes ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) ## livenessProbe: enabled: true initialDelaySeconds: 5 periodSeconds: 5 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 5 readinessProbe: enabled: true initialDelaySeconds: 5 periodSeconds: 5 timeoutSeconds: 1 successThreshold: 1 failureThreshold: 5 ## Redis Sentinel resource requests and limits ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ # resources: # requests: # memory: 256Mi # cpu: 100m ## Redis Sentinel Service properties service: ## Redis Sentinel Service type type: ClusterIP sentinelPort: 26379 redisPort: 6379 ## Specify the nodePort value for the LoadBalancer and NodePort service types. ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport ## # sentinelNodePort: # redisNodePort: ## Provide any additional annotations which may be required. This can be used to ## set the LoadBalancer service type to internal only. ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer ## annotations: {} labels: {} loadBalancerIP: ## Specifies the Kubernetes Cluster's Domain Name. ## clusterDomain: cluster.local networkPolicy: ## Specifies whether a NetworkPolicy should be created ## enabled: false ## The Policy model to apply. When set to false, only pods with the correct ## client label will have network access to the port Redis is listening ## on. When true, Redis will accept connections from any source ## (with the correct destination port). ## # allowExternal: true ## Allow connections from other namespacess. Just set label for namespace and set label for pods (optional). ## ingressNSMatchLabels: {} ingressNSPodMatchLabels: {} serviceAccount: ## Specifies whether a ServiceAccount should be created ## create: false ## The name of the ServiceAccount to use. ## If not set and create is true, a name is generated using the fullname template name: rbac: ## Specifies whether RBAC resources should be created ## create: false role: ## Rules to create. It follows the role specification # rules: # - apiGroups: # - extensions # resources: # - podsecuritypolicies # verbs: # - use # resourceNames: # - gce.unprivileged rules: [] ## Redis pod Security Context securityContext: enabled: true fsGroup: 1001 runAsUser: 1001 ## sysctl settings for master and slave pods ## ## Uncomment the setting below to increase the net.core.somaxconn value ## # sysctls: # - name: net.core.somaxconn # value: "10000" ## Use password authentication usePassword: true ## Redis password (both master and slave) ## Defaults to a random 10-character alphanumeric string if not set and usePassword is true ## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run ## password: "" ## Use existing secret (ignores previous password) # existingSecret: ## Password key to be retrieved from Redis secret ## # existingSecretPasswordKey: ## Mount secrets as files instead of environment variables usePasswordFile: false ## Persist data to a persistent volume (Redis Master) persistence: {} ## A manually managed Persistent Volume and Claim ## Requires persistence.enabled: true ## If defined, PVC must be created manually before volume will be bound # existingClaim: # Redis port redisPort: 6379 ## ## Redis Master parameters ## master: ## Redis command arguments ## ## Can be used to specify command line arguments, for example: ## command: "/run.sh" ## Additional Redis configuration for the master nodes ## ref: https://redis.io/topics/config ## configmap: ## Redis additional command line flags ## ## Can be used to specify command line flags, for example: ## ## extraFlags: ## - "--maxmemory-policy volatile-ttl" ## - "--repl-backlog-size 1024mb" extraFlags: [] ## Comma-separated list of Redis commands to disable ## ## Can be used to disable Redis commands for security reasons. ## Commands will be completely disabled by renaming each to an empty string. ## ref: https://redis.io/topics/security#disabling-of-specific-commands ## disableCommands: - FLUSHDB - FLUSHALL ## Redis Master additional pod labels and annotations ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ podLabels: {} podAnnotations: {} ## Redis Master resource requests and limits ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ # resources: # requests: # memory: 256Mi # cpu: 100m ## Use an alternate scheduler, e.g. "stork". ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ ## # schedulerName: ## Configure extra options for Redis Master liveness and readiness probes ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) ## livenessProbe: enabled: true initialDelaySeconds: 5 periodSeconds: 5 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 5 readinessProbe: enabled: true initialDelaySeconds: 5 periodSeconds: 5 timeoutSeconds: 1 successThreshold: 1 failureThreshold: 5 ## Redis Master Node selectors and tolerations for pod assignment ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature ## # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} # tolerations: [] ## Redis Master pod/node affinity/anti-affinity ## affinity: {} ## Redis Master Service properties service: ## Redis Master Service type type: ClusterIP port: 6379 ## Specify the nodePort value for the LoadBalancer and NodePort service types. ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport ## # nodePort: ## Provide any additional annotations which may be required. This can be used to ## set the LoadBalancer service type to internal only. ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer ## annotations: {} labels: {} loadBalancerIP: # loadBalancerSourceRanges: ["10.0.0.0/8"] ## Enable persistence using Persistent Volume Claims ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ ## persistence: enabled: true ## The path the volume will be mounted at, useful when using different ## Redis images. path: /data ## The subdirectory of the volume to mount to, useful in dev environments ## and one PV for multiple services. subPath: "" ## redis data Persistent Volume Storage Class ## If defined, storageClassName: <storageClass> ## If set to "-", storageClassName: "", which disables dynamic provisioning ## If undefined (the default) or set to null, no storageClassName spec is ## set, choosing the default provisioner. (gp2 on AWS, standard on ## GKE, AWS & OpenStack) ## # storageClass: "-" accessModes: - ReadWriteOnce size: 8Gi ## Persistent Volume selectors ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector matchLabels: {} matchExpressions: {} ## Update strategy, can be set to RollingUpdate or onDelete by default. ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets statefulset: updateStrategy: RollingUpdate ## Partition update strategy ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions # rollingUpdatePartition: ## Redis Master pod priorityClassName # priorityClassName: {} ## ## Redis Slave properties ## Note: service.type is a mandatory parameter ## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis Master ## slave: ## Slave Service properties service: ## Redis Slave Service type type: ClusterIP ## Redis port port: 6379 ## Specify the nodePort value for the LoadBalancer and NodePort service types. ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport ## # nodePort: ## Provide any additional annotations which may be required. This can be used to ## set the LoadBalancer service type to internal only. ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer ## annotations: {} labels: {} loadBalancerIP: # loadBalancerSourceRanges: ["10.0.0.0/8"] ## Redis slave port port: 6379 ## Can be used to specify command line arguments, for example: ## command: "/run.sh" ## Additional Redis configuration for the slave nodes ## ref: https://redis.io/topics/config ## configmap: ## Redis extra flags extraFlags: [] ## List of Redis commands to disable disableCommands: - FLUSHDB - FLUSHALL ## Redis Slave pod/node affinity/anti-affinity ## affinity: {} ## Configure extra options for Redis Slave liveness and readiness probes ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) ## livenessProbe: enabled: true initialDelaySeconds: 30 periodSeconds: 10 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 5 readinessProbe: enabled: true initialDelaySeconds: 5 periodSeconds: 10 timeoutSeconds: 10 successThreshold: 1 failureThreshold: 5 ## Redis slave Resource # resources: # requests: # memory: 256Mi # cpu: 100m ## Redis slave selectors and tolerations for pod assignment # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} # tolerations: [] ## Use an alternate scheduler, e.g. "stork". ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ ## # schedulerName: ## Redis slave pod Annotation and Labels podLabels: {} podAnnotations: {} ## Redis slave pod priorityClassName # priorityClassName: {} ## Enable persistence using Persistent Volume Claims ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ ## persistence: enabled: true ## The path the volume will be mounted at, useful when using different ## Redis images. path: /data ## The subdirectory of the volume to mount to, useful in dev environments ## and one PV for multiple services. subPath: "" ## redis data Persistent Volume Storage Class ## If defined, storageClassName: <storageClass> ## If set to "-", storageClassName: "", which disables dynamic provisioning ## If undefined (the default) or set to null, no storageClassName spec is ## set, choosing the default provisioner. (gp2 on AWS, standard on ## GKE, AWS & OpenStack) ## # storageClass: "-" accessModes: - ReadWriteOnce size: 8Gi ## Persistent Volume selectors ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector matchLabels: {} matchExpressions: {} ## Update strategy, can be set to RollingUpdate or onDelete by default. ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets statefulset: updateStrategy: RollingUpdate ## Partition update strategy ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions # rollingUpdatePartition: ## Prometheus Exporter / Metrics ## metrics: enabled: false image: registry: docker.io repository: bitnami/redis-exporter tag: 1.4.0-debian-10-r3 pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. ## Secrets must be manually created in the namespace. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ ## # pullSecrets: # - myRegistryKeySecretName ## Metrics exporter resource requests and limits ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ ## # resources: {} ## Extra arguments for Metrics exporter, for example: ## extraArgs: ## check-keys: myKey,myOtherKey # extraArgs: {} ## Metrics exporter pod Annotation and Labels podAnnotations: prometheus.io/scrape: "true" prometheus.io/port: "9121" # podLabels: {} # Enable this if you're using https://github.com/coreos/prometheus-operator serviceMonitor: enabled: false ## Specify a namespace if needed # namespace: monitoring # fallback to the prometheus default unless specified # interval: 10s ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/helm/charts/tree/master/stable/prometheus-operator#tldr) ## [Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#prometheus-operator-1) ## [Kube Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#exporters) selector: prometheus: kube-prometheus ## Custom PrometheusRule to be defined ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions prometheusRule: enabled: false additionalLabels: {} namespace: "" rules: [] ## These are just examples rules, please adapt them to your needs. ## Make sure to constraint the rules to the current postgresql service. # - alert: RedisDown # expr: redis_up{service="{{ template "redis.fullname" . }}-metrics"} == 0 # for: 2m # labels: # severity: error # annotations: # summary: Redis instance {{ "{{ $instance }}" }} down # description: Redis instance {{ "{{ $instance }}" }} is down. # - alert: RedisMemoryHigh # expr: > # redis_memory_used_bytes{service="{{ template "redis.fullname" . }}-metrics"} * 100 # / # redis_memory_max_bytes{service="{{ template "redis.fullname" . }}-metrics"} # > 90 =< 100 # for: 2m # labels: # severity: error # annotations: # summary: Redis instance {{ "{{ $instance }}" }} is using too much memory # description: Redis instance {{ "{{ $instance }}" }} is using {{ "{{ $value }}" }}% of its available memory. # - alert: RedisKeyEviction # expr: increase(redis_evicted_keys_total{service="{{ template "redis.fullname" . }}-metrics"}[5m]) > 0 # for: 1s # labels: # severity: error # annotations: # summary: Redis instance {{ "{{ $instance }}" }} has evicted keys # description: Redis instance {{ "{{ $instance }}" }} has evicted {{ "{{ $value }}" }} keys in the last 5 minutes. ## Metrics exporter pod priorityClassName # priorityClassName: {} service: type: ClusterIP ## Use serviceLoadBalancerIP to request a specific static IP, ## otherwise leave blank # loadBalancerIP: annotations: {} labels: {} ## ## Init containers parameters: ## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup ## volumePermissions: enabled: false image: registry: docker.io repository: bitnami/minideb tag: buster pullPolicy: Always ## Optionally specify an array of imagePullSecrets. ## Secrets must be manually created in the namespace. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ ## # pullSecrets: # - myRegistryKeySecretName resources: {} # resources: # requests: # memory: 128Mi # cpu: 100m ## Redis config file ## ref: https://redis.io/topics/config ## configmap: |- # Enable AOF https://redis.io/topics/persistence#append-only-file appendonly yes # Disable RDB persistence, AOF persistence already enabled. save "" ## Sysctl InitContainer ## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) sysctlImage: enabled: false command: [] registry: docker.io repository: bitnami/minideb tag: buster pullPolicy: Always ## Optionally specify an array of imagePullSecrets. ## Secrets must be manually created in the namespace. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ ## # pullSecrets: # - myRegistryKeySecretName mountHostSys: false resources: {} # resources: # requests: # memory: 128Mi # cpu: 100m ## PodSecurityPolicy configuration ## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ ## podSecurityPolicy: ## Specifies whether a PodSecurityPolicy should be created ## create: false
prometheus-rabbitmq-exporter
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"prometheus-rabbitmq-exporter.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 ...
# Default values for prometheus-rabbitmq-exporter. # This is a YAML-formatted file. # Declare variables to be passed into your templates. replicaCount: 1 image: repository: kbudde/rabbitmq-exporter tag: v0.29.0 pullPolicy: IfNotPresent service: type: ClusterIP externalPort: 9419 internalPort: 9419 resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. # limits: # cpu: 100m # memory: 128Mi # requests: # cpu: 100m # memory: 128Mi nodeSelector: {} tolerations: [] affinity: {} loglevel: info rabbitmq: url: http://myrabbit:15672 user: guest password: guest # If existingPasswordSecret is set then password is ignored existingPasswordSecret: ~ capabilities: bert,no_sort include_queues: ".*" include_vhost: ".*" skip_queues: "^$" skip_verify: "false" skip_vhost: "^$" exporters: "exchange,node,overview,queue" output_format: "TTY" timeout: 30 max_queues: 0 annotations: {} # prometheus.io/scrape: "true" # prometheus.io/path: "/metrics" # prometheus.io/port: 9419 prometheus: monitor: enabled: false additionalLabels: {} interval: 15s namespace: [] rules: enabled: false additionalLabels: {}
influxdb
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"influxdb.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some K...
## influxdb image version ## ref: https://hub.docker.com/r/library/influxdb/tags/ image: repository: "influxdb" tag: "1.7.9-alpine" pullPolicy: IfNotPresent ## If specified, use these secrets to access the images # pullSecrets: # - registry-secret serviceAccount: create: true name: annotations: {} ## Customize liveness, readiness and startup probes ## ref: https://docs.influxdata.com/influxdb/v1.7/tools/api/#ping-http-endpoint ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ ## livenessProbe: {} # initialDelaySeconds: 30 # timeoutSeconds: 5 readinessProbe: {} # initialDelaySeconds: 5 # timeoutSeconds: 1 startupProbe: enabled: false # failureThreshold: 6 # periodSeconds: 5 ## Specify a service type ## NodePort is default ## ref: http://kubernetes.io/docs/user-guide/services/ ## service: ## Add annotations to service # annotations: {} type: ClusterIP ## Persist data to a persistent volume ## persistence: enabled: true ## influxdb data Persistent Volume Storage Class ## If defined, storageClassName: <storageClass> ## If set to "-", storageClassName: "", which disables dynamic provisioning ## If undefined (the default) or set to null, no storageClassName spec is ## set, choosing the default provisioner. (gp2 on AWS, standard on ## GKE, AWS & OpenStack) ## # storageClass: "-" annotations: accessMode: ReadWriteOnce size: 8Gi ## Deploy InfluxDB Enterprise - License required ## ref: https://www.influxdata.com/products/influxdb-enterprise/ enterprise: enabled: false licensekey: {} clusterSize: 4 meta: image: ## This image contains the enterprise meta node package for clustering. ## It is meant to be used in conjunction with the influxdb:data package of the same version. ## ref: https://hub.docker.com/_/influxdb tag: meta clusterSize: 3 ## seed is hashed and used as `internal-shared-secret` for Meta service. seed: dead-beef-cafe-bae ## Configure resource requests and limits ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ resources: {} # resources: # requests: # memory: 512Mi # cpu: 2 # limits: # memory: 1Gi # cpu: 4 ## Create default user through Kubernetes job ## Defaults indicated below ## setDefaultUser: enabled: false ## Image of the container used for job ## Default: appropriate/curl:latest ## image: appropriate/curl:latest ## Deadline for job so it does not retry forever. ## Default: activeDeadline: 300 ## activeDeadline: 300 ## Specify the number of retries before considering job as failed. ## https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#pod-backoff-failure-policy ## backoffLimit: 6 ## Hook delete policy for helm. ## Default: hookDeletePolicy: hook-succeeded ## hookDeletePolicy: hook-succeeded ## Restart policy for job ## Default: OnFailure restartPolicy: OnFailure user: ## The user name ## Default: "admin" username: "admin" ## User password ## single quotes must be escaped (\') ## Default: (Randomly generated 10 characters of AlphaNum) # password: ## The user name and password are obtained from an existing secret. The expected ## keys are `influxdb-user` and `influxdb-password`. ## If set, the username and password values above are ignored. # existingSecret: influxdb-auth ## User privileges ## Default: "WITH ALL PRIVILEGES" privileges: "WITH ALL PRIVILEGES" ## Configure resource requests and limits ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ resources: {} # requests: # memory: 256Mi # cpu: 0.1 # limits: # memory: 16Gi # cpu: 8 # Annotations to be added to InfluxDB pods podAnnotations: {} ingress: enabled: false tls: false # secretName: my-tls-cert # only needed if tls above is true hostname: influxdb.foobar.com annotations: # kubernetes.io/ingress.class: "nginx" # kubernetes.io/tls-acme: "true" ## Use an alternate scheduler, e.g. "stork". ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ ## # schedulerName: ## Node labels for pod assignment ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ ## nodeSelector: {} ## Affinity for pod assignment ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity ## affinity: {} ## Tolerations for pod assignment ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ ## tolerations: [] # - key: "key" # operator: "Equal|Exists" # value: "value" # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" ## The InfluxDB image uses several environment variables to automatically ## configure certain parts of the server. ## Ref: https://hub.docker.com/_/influxdb/ env: {} # - name: INFLUXDB_DB # value: "demo" ## InfluxDB configuration ## ref: https://docs.influxdata.com/influxdb/v1.7/administration/config config: reporting_disabled: false rpc: {} meta: {} data: {} coordinator: {} retention: {} shard_precreation: {} monitor: {} http: {} logging: {} subscriber: {} graphite: {} collectd: {} opentsdb: {} udp: {} continuous_queries: {} tls: {} # Allow executing custom init scripts # # If the container finds any files with the extensions .sh or .iql inside of the # /docker-entrypoint-initdb.d folder, it will execute them. The order they are # executed in is determined by the shell. This is usually alphabetical order. initScripts: enabled: false scripts: init.iql: |+ CREATE DATABASE "telegraf" WITH DURATION 30d REPLICATION 1 NAME "rp_30d" backup: enabled: false schedule: "0 0 * * *" annotations: {} ## Google Cloud Storage # gcs: # serviceAccountSecret: influxdb-backup-key # serviceAccountSecretKey: key.json # destination: gs://bucket/influxdb ## Azure ## Secret is expected to have connection string stored in `connection-string` field ## Existing container will be used or private one withing storage account will be created. # azure: # storageAccountSecret: influxdb-backup-azure-key # destination_container: influxdb-container # destination_path: ""
newrelic-infrastructure
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"newrelic.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some K...
# - Specify either the New Relic license key or the secret which # contains it. # # - Specify the Kubernetes cluster name. # https://docs.newrelic.com/docs/kubernetes-monitoring-integration # # licenseKey: # customSecretName: # customSecretLicenseKey: # cluster: # # IMPORTANT: the previous values can also be set as global so that they # can be shared by other newrelic product's charts # # global: # licenseKey: # customSecretName: # customSecretLicenseKey: # cluster: # # kubeStateMetricsUrl - if provided, the discovery process for kube-state-metrics endpoint won't be triggered # Only HTTP is accepted. This is an example value: http://172.17.0.3:8080 # disableKubeStateMetrics - disables KSM parsing by the DaemonSet. Defaults to "false" if not provided. verboseLog: false # This can be set, the default is shown below # logFile: /var/log/nr-infra.log image: repository: newrelic/infrastructure-k8s tag: 1.21.0 pullPolicy: IfNotPresent resources: limits: memory: 300M requests: cpu: 100m memory: 150M privileged: true rbac: # Specifies whether RBAC resources should be created create: true pspEnabled: false serviceAccount: # Specifies whether a ServiceAccount should be created create: true # The name of the ServiceAccount to use. # If not set and create is true, a name is generated using the fullname template name: # If you wish to provide additional labels to apply to the pod(s), specify # them here # podLabels: # If you wish to provide your own newrelic.yml file include it under config: # the sample config file is included here as an example. Some options have # been omitted because they are handled either by variables, or a secret. They # are display_name, license_key, log_file and verbose. # config: # # New Relic Infrastructure configuration file # # Lines that begin with # are comment lines and are ignored by the # Infrastructure agent. If options have command line equivalents, New Relic # will use the command line option to override any value set in this file. # # # Option : proxy # Value : Useful if your firewall rules require the agent to use a # proxy URL (HTTP or HTTPS) to communicate with New Relic. # Default: none # # proxy: https://user:password@hostname:port # # Option : Optional custom attributes # Use optional key-value pairs to build filter sets, group your results,ª # annotate your Insights data, etc. # # custom_attributes: # environment: production # service: login service # team: alpha-team # # Pod scheduling proirity # Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ # priorityClassName: high-priority # Node labels for pod assignment # Ref: https://kubernetes.io/docs/user-guide/node-selection/ nodeSelector: {} # These are default tolerations to be able to run the New Relic Kubernetes # integration. tolerations: - operator: "Exists" effect: "NoSchedule" - operator: "Exists" effect: "NoExecute" updateStrategy: RollingUpdate # Custom attributes to be passed to the New Relic agent customAttribues: "'{\"clusterName\":\"$(CLUSTER_NAME)\"}'" # etcdTlsSecretName: newrelic-infra-etcd-tls-secret etcdTlsSecretNamespace: default # If you wish to monitor services running on Kubernetes you can provide integrations # configuration under integrations_config. You just need to create a new entry where # the "name" is the filename of the configuration file and the data is the content of # the integration configuration. The name must end in ".yaml" as this will be the # filename generated and the Infrastructure agent only looks for YAML files. The data # part is the actual integration configuration as described in the spec here: # https://docs.newrelic.com/docs/integrations/integrations-sdk/file-specifications/integration-configuration-file-specifications-agent-v180 # For example, if you wanted do to monitor a Redis instance that has a label "app=redis" # you could do so by adding following entry: # integrations_config: # - name: nri-rabbit.yaml # data: # discovery: # command: # # Run NRI Discovery for Kubernetes # # https://github.com/newrelic/nri-discovery-kubernetes # exec: /var/db/newrelic-infra/nri-discovery-kubernetes # match: # label.app: redis # integrations: # - name: nri-redis # env: # # using the discovered IP as the hostname address # HOSTNAME: ${discovery.ip} # PORT: 6379 # labels: # env: test # For more details on monitoring services on Kubernetes see # https://docs.newrelic.com/docs/integrations/kubernetes-integration/link-apps-services/monitor-services-running-kubernetes integrations_config: {}
home-assistant
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"home-assistant.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because ...
# Default values for home-assistant. # This is a YAML-formatted file. # Declare variables to be passed into your templates. image: repository: homeassistant/home-assistant tag: 0.108.7 pullPolicy: IfNotPresent pullSecrets: [] # upgrade strategy type (e.g. Recreate or RollingUpdate) strategyType: Recreate # Probes configuration probes: liveness: enabled: true scheme: HTTP initialDelaySeconds: 60 failureThreshold: 5 timeoutSeconds: 10 readiness: enabled: true scheme: HTTP initialDelaySeconds: 60 failureThreshold: 5 timeoutSeconds: 10 startup: enabled: false scheme: HTTP failureThreshold: 30 periodSeconds: 10 service: type: ClusterIP port: 8123 portName: api additionalPorts: [] # - name: homematicproxy # port: 2001 # targetPort: 2001 annotations: {} labels: {} clusterIP: "" ## List of IP addresses at which the hass-configurator service is available ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips ## externalIPs: [] loadBalancerIP: "" loadBalancerSourceRanges: [] # nodePort: 30000 publishNotReadyAddresses: false ingress: enabled: false annotations: {} # kubernetes.io/ingress.class: nginx # kubernetes.io/tls-acme: "true" path: / hosts: - home-assistant.local tls: [] # - secretName: home-assistant-tls # hosts: # - home-assistant.local hostNetwork: false persistence: enabled: true ## home-assistant data Persistent Volume Storage Class ## If defined, storageClassName: <storageClass> ## If set to "-", storageClassName: "", which disables dynamic provisioning ## If undefined (the default) or set to null, no storageClassName spec is ## set, choosing the default provisioner. (gp2 on AWS, standard on ## GKE, AWS & OpenStack) ## # storageClass: "-" ## ## If you want to reuse an existing claim, you can pass the name of the PVC using ## the existingClaim variable # existingClaim: your-claim ## ## If you want to use a volume on the host machine instead of a PVC: # hostPath: /path/to/the/config/folder accessMode: ReadWriteOnce size: 5Gi ## Additional hass container environment variable ## For instance to add a http_proxy ## extraEnv: {} ## Additional hass container environment variable from k8s secrets ## For instance to add a password ## can use `!env_var` in the home assistant configuration to reference these variables extraEnvSecrets: # Example # This will set ${MQTT_PASSWORD} to the 'password' key from the 'mqtt' secret # MQTT_PASSWORD: # secret: mqtt # key: password ## If you'd like to provide your own Kubernetes Secret object instead of passing your values ## individually, pass in the name of a created + populated Secret. ## All secrets will be mounted as environment variables, with each key/value mapping to a ## corresponding environment variable. ## extraSecretForEnvFrom: [] # - home-assistant-secrets # Enable pod security context (must be `true` if runAsUser or fsGroup are set) usePodSecurityContext: true # Set runAsUser to 1000 to let home-assistant run as non-root user 'hass' which exists in 'runningman84/alpine-homeassistant' docker image. # When setting runAsUser to a different value than 0 also set fsGroup to the same value: # runAsUser: <defaults to 0> # fsGroup: <will be omitted in deployment if runAsUser is 0> git: enabled: false ## we just use the hass-configurator container image ## you can use any image which has git and openssh installed ## image: repository: causticlab/hass-configurator-docker tag: 0.3.5-x86_64 pullPolicy: IfNotPresent ## Specify the command that runs in the git-sync container to pull in configuration. # command: [] # Committer settings user: name: "" email: "" # repo: secret: git-creds syncPath: /config keyPath: /root/.ssh zwave: enabled: false device: ttyACM0 # Mount devices or folders from the host machine. Can be used for USB device mounting. hostMounts: [] # Example # - name: zha # hostPath: /dev/serial/by-id/usb-Silicon_Labs_HubZ_Smart_Home_Controller_6120245D-if01-port0 configurator: enabled: false ## hass-configurator container image ## image: repository: causticlab/hass-configurator-docker tag: 0.3.5-x86_64 pullPolicy: IfNotPresent ## URL for the home assistant API endpoint # hassApiUrl: http://home-assistant:8123/api/ ## home assistant API password # hassApiPassword: ## path where the home assistant configuration is stored basepath: /config ## don't allow switching out of the base path enforceBasepath: true ## username for basic auth for accessing the configurator # username: ## password (sha256-hash) for basic auth for accessing the configurator ## For example "test" would be "{sha256}9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08" # password: ## Additional hass-configurator container environment variable ## For instance to add a http_proxy ## extraEnv: {} ingress: enabled: false annotations: {} # kubernetes.io/ingress.class: nginx # kubernetes.io/tls-acme: "true" path: / hosts: - home-assistant.local tls: [] # - secretName: home-assistant-tls # hosts: # - home-assistant.local service: type: ClusterIP port: 3218 annotations: {} labels: {} clusterIP: "" ## List of IP addresses at which the hass-configurator service is available ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips ## externalIPs: [] loadBalancerIP: "" loadBalancerSourceRanges: [] # externalTrafficPolicy: Local # nodePort: 30000 ## Add support for Prometheus # settings has to be enabled in configuration.yaml # https://www.home-assistant.io/components/prometheus/ monitoring: enabled: false serviceMonitor: # When set true and if Prometheus Operator is installed then use a ServiceMonitor to configure scraping enabled: true # Set the namespace the ServiceMonitor should be deployed # namespace: monitoring # Set how frequently Prometheus should scrape # interval: 30s # Set labels for the ServiceMonitor, use this to define your scrape label for Prometheus Operator # labels: # Set bearerTokenFile for home assistant auth (use long lived access tokens) # bearerTokenFile: # Set bearerTokenSecret for home assistant auth (use long lived access tokens) # bearerTokenSecret: vscode: enabled: false ## code-server container image ## image: repository: codercom/code-server tag: 3.1.1 pullPolicy: IfNotPresent ## VSCode password # password: ## path where the home assistant configuration is stored hassConfig: /config ## path where the VS Code data should reside vscodePath: /config/.vscode ## Additional hass-vscode container environment variable ## For instance to add a http_proxy ## extraEnv: {} ingress: enabled: false annotations: {} # kubernetes.io/ingress.class: nginx # kubernetes.io/tls-acme: "true" path: / hosts: - home-assistant.local tls: [] # - secretName: home-assistant-tls # hosts: # - home-assistant.local service: type: ClusterIP port: 80 annotations: {} labels: {} clusterIP: "" ## List of IP addresses at which the hass-vscode service is available ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips ## externalIPs: [] loadBalancerIP: "" loadBalancerSourceRanges: [] # nodePort: 30000 appdaemon: enabled: false ## code-server container image ## image: repository: acockburn/appdaemon tag: 3.0.5 pullPolicy: IfNotPresent ## Home Assistant API token # haToken: ## Additional hass-vscode container environment variable ## For instance to add a http_proxy ## extraEnv: {} ingress: enabled: false annotations: {} # kubernetes.io/ingress.class: nginx # kubernetes.io/tls-acme: "true" path: / hosts: - appdaemon.local tls: [] # - secretName: appdaemon-tls # hosts: # - appdaemon.local service: type: ClusterIP port: 5050 annotations: {} labels: {} clusterIP: "" ## List of IP addresses at which the hass-appdaemon service is available ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips ## externalIPs: [] loadBalancerIP: "" loadBalancerSourceRanges: [] # nodePort: 30000 resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. # limits: # cpu: 100m # memory: 128Mi # requests: # cpu: 100m # memory: 128Mi nodeSelector: {} tolerations: [] affinity: {} podAnnotations: {} # Any extra volumes to define for the pod extraVolumes: [] # - name: example-name # hostPath: # path: /path/on/host # type: DirectoryOrCreate # Any extra volume mounts to define for the containers extraVolumeMounts: [] # - name: example-name # mountPath: /path/in/container
openldap
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"openldap.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some K...
# Default values for openldap. # This is a YAML-formatted file. # Declare variables to be passed into your templates. replicaCount: 1 # Define deployment strategy - IMPORTANT: use rollingUpdate: null when use Recreate strategy. # It prevents from merging with existing map keys which are forbidden. strategy: {} # type: RollingUpdate # rollingUpdate: # maxSurge: 1 # maxUnavailable: 0 # # or # # type: Recreate # rollingUpdate: null image: # From repository https://github.com/osixia/docker-openldap repository: osixia/openldap tag: 1.2.4 pullPolicy: IfNotPresent # Spcifies an existing secret to be used for admin and config user passwords existingSecret: "" # settings for enabling TLS tls: enabled: false secret: "" # The name of a kubernetes.io/tls type secret to use for TLS CA: enabled: false secret: "" # The name of a generic secret to use for custom CA certificate (ca.crt) ## Add additional labels to all resources extraLabels: {} ## Add additional annotations to pods podAnnotations: {} service: annotations: {} ldapPort: 389 sslLdapPort: 636 # Only used if tls.enabled is true ## List of IP addresses at which the service is available ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips ## externalIPs: [] loadBalancerIP: "" loadBalancerSourceRanges: [] type: ClusterIP # Default configuration for openldap as environment variables. These get injected directly in the container. # Use the env variables from https://github.com/osixia/docker-openldap#beginner-guide env: LDAP_ORGANISATION: "Example Inc." LDAP_DOMAIN: "example.org" LDAP_BACKEND: "hdb" LDAP_TLS: "true" LDAP_TLS_ENFORCE: "false" LDAP_REMOVE_CONFIG_AFTER_SETUP: "true" # Default Passwords to use, stored as a secret. If unset, passwords are auto-generated. # You can override these at install time with # helm install openldap --set openldap.adminPassword=<passwd>,openldap.configPassword=<passwd> # adminPassword: admin # configPassword: config # Custom openldap configuration files used to override default settings # customLdifFiles: # 01-default-users.ldif: |- # Predefine users here ## Persist data to a persistent volume persistence: enabled: false ## database data Persistent Volume Storage Class ## If defined, storageClassName: <storageClass> ## If set to "-", storageClassName: "", which disables dynamic provisioning ## If undefined (the default) or set to null, no storageClassName spec is ## set, choosing the default provisioner. (gp2 on AWS, standard on ## GKE, AWS & OpenStack) ## # storageClass: "-" accessMode: ReadWriteOnce size: 8Gi # existingClaim: "" resources: {} # requests: # cpu: "100m" # memory: "256Mi" # limits: # cpu: "500m" # memory: "512Mi" initResources: {} # requests: # cpu: "100m" # memory: "128Mi" # limits: # cpu: "100m" # memory: "128Mi" nodeSelector: {} tolerations: [] affinity: {} ## test container details test: enabled: false image: repository: dduportal/bats tag: 0.4.0 # Set the container log level # Valid log levels: none, error, warning, info (default), debug, trace logLevel: info
aerospike
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"aerospike.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some ...
# Default values for aerospike. terminationGracePeriodSeconds: 30 replicaCount: 1 nodeSelector: {} image: repository: aerospike/aerospike-server tag: 4.5.0.5 pullPolicy: IfNotPresent # pass custom command. This is equivalent of Entrypoint in docker command: [] # pass custom args. This is equivalent of Cmd in docker args: [] # Set as empty object {} if no volumes need to be created # See confFile below persistentVolume: {} # - mountPath: /opt/aerospike/data # name: aerospike-data # template: # accessModes: [ "ReadWriteOnce" ] # # storageClassName: "standard" # resources: # requests: # storage: "36G" # selector: # matchLabels: # diskname: "aerospike-data" service: type: ClusterIP # Provide any additional annotations which may be required. # The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart annotations: {} loadBalancerIP: clusterIP: None # This field takes a list of IP CIDR ranges, which Kubernetes will use to configure firewall exceptions # loadBalancerSourceRanges: # - 10.0.0.0/8 nodePort: {} # turns on a sidecar that scrapes 'localhost:3000' and exposes to port 9134 # a docker image built from this repo works well: https://github.com/alicebob/asprom # but you will need to build/host it yourself metrics: serviceMonitor: {} labels: {} annotations: {} tolerations: [] resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. # limits: # cpu: 100m # memory: 128Mi # requests: # cpu: 100m # memory: 128Mi confFile: |- #default config file service { user root group root paxos-protocol v5 paxos-single-replica-limit 1 pidfile /var/run/aerospike/asd.pid service-threads 4 transaction-queues 4 transaction-threads-per-queue 4 proto-fd-max 15000 } logging { file /var/log/aerospike/aerospike.log { context any info } console { context any info } } network { service { address any port 3000 } heartbeat { address any interval 150 #REPLACE_THIS_LINE_WITH_MESH_CONFIG mode mesh port 3002 timeout 20 protocol v3 } fabric { port 3001 } info { port 3003 } } namespace test { replication-factor 2 memory-size 1G default-ttl 5d storage-engine device { file /opt/aerospike/data/test.dat filesize 4G } }
voyager
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"voyager.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Ku...
## ## Voyager chart configuration ## # Docker registry containing Voyager & HAProxy images dockerRegistry: appscode ## Tags for Docker images imageTags: ## Docker image tag containing Voyager voyager: 6.0.0 ## Docker image tag containing HAProxy binary haproxy: 1.7.10-6.0.0 ## Optionally specify an array of imagePullSecrets. ## Secrets must be manually created in the namespace. ## ref: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod ## # imagePullSecrets: # - name: myRegistryKeySecretName ## Specify a imagePullPolicy ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images ## imagePullPolicy: IfNotPresent ## Use cloud provider here. cloudProvider: ## The path to the cloud provider configuration file. Empty string for no configuration file. ## ie. for azure use /etc/kubernetes/azure.json cloudConfig: '' ## Installs voyager operator as critical addon ## https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/ criticalAddon: false ## Log level for voyager logLevel: 3 persistence: enabled: false hostPath: /etc/kubernetes ## Node labels for pod assignment ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ ## nodeSelector: {} ## Install Default RBAC roles and bindings rbac: # Specifies whether RBAC resources should be created create: true serviceAccount: # Specifies whether a ServiceAccount should be created create: true # The name of the ServiceAccount to use. # If not set and create is true, a name is generated using the fullname template name: # this flag can be set to 'voyager' to handle only ingress # with annotation kubernetes.io/ingress.class=voyager. ingressClass: apiserver: # groupPriorityMinimum is the minimum priority the group should have. Please see # https://github.com/kubernetes/kube-aggregator/blob/release-1.9/pkg/apis/apiregistration/v1beta1/types.go#L58-L64 # for more information on proper values of this field. groupPriorityMinimum: 10000 # versionPriority is the ordering of this API inside of the group. Please see # https://github.com/kubernetes/kube-aggregator/blob/release-1.9/pkg/apis/apiregistration/v1beta1/types.go#L66-L70 # for more information on proper values of this field versionPriority: 15 # enableAdmissionWebhook is used to configure apiserver as ValidationWebhook for Voyager CRDs enableAdmissionWebhook: false # CA certificate used by main Kubernetes api server ca:
pgadmin
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"pgadmin.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Ku...
# Default values for pgadmin. replicaCount: 1 ## pgAdmin container image ## image: repository: dpage/pgadmin4 tag: 4.18 pullPolicy: IfNotPresent service: type: ClusterIP port: 80 ## Strategy used to replace old Pods by new ones ## Ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy ## strategy: {} # type: RollingUpdate # rollingUpdate: # maxSurge: 0 # maxUnavailable: 1 ## Server definitions will be loaded at launch time. This allows connection ## information to be pre-loaded into the instance of pgAdmin in the container. ## Ref: https://www.pgadmin.org/docs/pgadmin4/4.13/import_export_servers.html ## serverDefinitions: ## If true, server definitions will be created ## enabled: false servers: |- # "1": { # "Name": "Minimally Defined Server", # "Group": "Servers", # "Port": 5432, # "Username": "postgres", # "Host": "localhost", # "SSLMode": "prefer", # "MaintenanceDB": "postgres" # } ingress: ## If true, pgAdmin Ingress will be created ## enabled: false ## pgAdmin Ingress annotations ## annotations: {} # kubernetes.io/ingress.class: nginx # kubernetes.io/tls-acme: "true" ## pgAdmin Ingress hostnames with optional path ## Must be provided if Ingress is enabled hosts: - host: chart-example.local paths: [] ## pgAdmin Ingress TLS configuration ## Secrets must be manually created in the namespace tls: [] # - secretName: chart-example-tls # hosts: # - chart-example.local ## pgAdmin startup configuration ## Values in here get injected as environment variables ## env: email: chart@example.local password: SuperSecret ## If True, allows pgAdmin to create session cookies based on IP address ## Ref: https://www.pgadmin.org/docs/pgadmin4/4.18/config_py.html # enhanced_cookie_protection: "False" persistentVolume: ## If true, pgAdmin will create/use a Persistent Volume Claim ## If false, use emptyDir ## enabled: true ## pgAdmin Persistent Volume Claim annotations ## annotations: {} ## pgAdmin Persistent Volume access modes ## Must match those of existing PV or dynamic provisioner ## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ accessModes: - ReadWriteOnce ## pgAdmin Persistent Volume Size ## size: 10Gi ## pgAdmin Persistent Volume Storage Class ## If defined, storageClassName: <storageClass> ## If set to "-", storageClassName: "", which disables dynamic provisioning ## If undefined (the default) or set to null, no storageClassName spec is ## set, choosing the default provisioner. (gp2 on AWS, standard on ## GKE, AWS & OpenStack) ## # storageClass: "-" # existingClaim: "" ## Security context to be added to pgAdmin pods ## securityContext: runAsUser: 5050 runAsGroup: 5050 fsGroup: 5050 resources: {} # limits: # cpu: 100m # memory: 128Mi # requests: # cpu: 100m # memory: 128Mi ## pgAdmin readiness and liveness probe initial delay and timeout ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ ## livenessProbe: initialDelaySeconds: 30 periodSeconds: 60 timeoutSeconds: 15 successThreshold: 1 failureThreshold: 3 readinessProbe: initialDelaySeconds: 30 periodSeconds: 60 timeoutSeconds: 15 successThreshold: 1 failureThreshold: 3 ## Node labels for pgAdmin pod assignment ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ ## nodeSelector: {} ## Node tolerations for server scheduling to nodes with taints ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ ## tolerations: [] ## Pod affinity ## affinity: {}
kubedb
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"kubedb.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kub...
## ## KubeDB chart configuration ## # Docker registry containing KubeDB images dockerRegistry: kubedb ## Tags for Docker images imageTags: ## Docker image tag containing KubeDB operator operator: 0.8.0-beta.2 ## Docker image tag containing KubeDB exporter exporter: 0.8.0-beta.2 ## Docker image tag containing KubeDB apiserver apiserver: 0.1.0-beta.2 # Declare variables to be passed into your templates. replicaCount: 1 ## Optionally specify an array of imagePullSecrets. ## Secrets must be manually created in the namespace. ## ref: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod ## # imagePullSecrets: # - name: myRegistryKeySecretName ## Specify a imagePullPolicy ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images ## imagePullPolicy: IfNotPresent ## Installs KubeDB operator as critical addon ## https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/ criticalAddon: false rbac: # Specifies whether RBAC resources should be created create: true serviceAccount: # Specifies whether a ServiceAccount should be created create: true # The name of the ServiceAccount to use. # If not set and create is true, a name is generated using the fullname template name: apiserver: # groupPriorityMinimum is the minimum priority the group should have. Please see # https://github.com/kubernetes/kube-aggregator/blob/release-1.9/pkg/apis/apiregistration/v1beta1/types.go#L58-L64 # for more information on proper values of this field. groupPriorityMinimum: 10000 # versionPriority is the ordering of this API inside of the group. Please see # https://github.com/kubernetes/kube-aggregator/blob/release-1.9/pkg/apis/apiregistration/v1beta1/types.go#L66-L70 # for more information on proper values of this field versionPriority: 15 # enableAdmissionWebhook is used to configure apiserver as admission webhook for KubeDB CRDs enableAdmissionWebhook: false # CA certificate used by main Kubernetes api server ca:
stash
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"stash.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kube...
# Default values for stash. # This is a YAML-formatted file. # Declare variables to be passed into your templates. replicaCount: 1 operator: image: appscode/stash tag: 0.7.0-rc.1 pushgateway: image: prom/pushgateway tag: v0.4.0 ## Optionally specify an array of imagePullSecrets. ## Secrets must be manually created in the namespace. ## ref: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod ## # imagePullSecrets: # - name: myRegistryKeySecretName ## Specify a imagePullPolicy ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images ## imagePullPolicy: IfNotPresent ## Installs Stash operator as critical addon ## https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/ criticalAddon: false ## Install Default RBAC roles and bindings rbac: # Specifies whether RBAC resources should be created create: true serviceAccount: # Specifies whether a ServiceAccount should be created create: true # The name of the ServiceAccount to use. # If not set and create is true, a name is generated using the fullname template name: apiserver: # groupPriorityMinimum is the minimum priority the group should have. Please see # https://github.com/kubernetes/kube-aggregator/blob/release-1.9/pkg/apis/apiregistration/v1beta1/types.go#L58-L64 # for more information on proper values of this field. groupPriorityMinimum: 10000 # versionPriority is the ordering of this API inside of the group. Please see # https://github.com/kubernetes/kube-aggregator/blob/release-1.9/pkg/apis/apiregistration/v1beta1/types.go#L66-L70 # for more information on proper values of this field versionPriority: 15 # enableAdmissionWebhook is used to configure apiserver as ValidationWebhook for Voyager CRDs enableAdmissionWebhook: false # CA certificate used by main Kubernetes api server ca:
rabbitmq
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"rabbitmq.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some K...
## Global Docker image parameters ## Please, note that this will override the image parameters, including dependencies, configured to use the global value ## Current available global Docker image parameters: imageRegistry and imagePullSecrets ## # global: # imageRegistry: myRegistryName # imagePullSecrets: # - myRegistryKeySecretName # storageClass: myStorageClass ## Bitnami RabbitMQ image version ## ref: https://hub.docker.com/r/bitnami/rabbitmq/tags/ ## image: registry: docker.io repository: bitnami/rabbitmq tag: 3.8.2-debian-10-r30 ## set to true if you would like to see extra information on logs ## it turns BASH and NAMI debugging in minideb ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging debug: false ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images ## pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. ## Secrets must be manually created in the namespace. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ ## # pullSecrets: # - myRegistryKeySecretName ## String to partially override rabbitmq.fullname template (will maintain the release name) ## # nameOverride: ## String to fully override rabbitmq.fullname template ## # fullnameOverride: ## Use an alternate scheduler, e.g. "stork". ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ ## # schedulerName: ## does your cluster have rbac enabled? assume yes by default rbacEnabled: true ## RabbitMQ should be initialized one by one when building cluster for the first time. ## Therefore, the default value of podManagementPolicy is 'OrderedReady' ## Once the RabbitMQ participates in the cluster, it waits for a response from another ## RabbitMQ in the same cluster at reboot, except the last RabbitMQ of the same cluster. ## If the cluster exits gracefully, you do not need to change the podManagementPolicy ## because the first RabbitMQ of the statefulset always will be last of the cluster. ## However if the last RabbitMQ of the cluster is not the first RabbitMQ due to a failure, ## you must change podManagementPolicy to 'Parallel'. ## ref : https://www.rabbitmq.com/clustering.html#restarting ## podManagementPolicy: OrderedReady ## section of specific values for rabbitmq rabbitmq: ## RabbitMQ application username ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables ## username: user ## RabbitMQ application password ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables ## # password: # existingPasswordSecret: name-of-existing-secret ## Erlang cookie to determine whether different nodes are allowed to communicate with each other ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables ## # erlangCookie: # existingErlangSecret: name-of-existing-secret ## Node name to cluster with. e.g.: `clusternode@hostname` ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables ## # rabbitmqClusterNodeName: ## Value for the RABBITMQ_LOGS environment variable ## ref: https://www.rabbitmq.com/logging.html#log-file-location ## logs: '-' ## RabbitMQ Max File Descriptors ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables ## ref: https://www.rabbitmq.com/install-debian.html#kernel-resource-limits ## setUlimitNofiles: true ulimitNofiles: '65536' ## RabbitMQ maximum available scheduler threads and online scheduler threads ## ref: https://hamidreza-s.github.io/erlang/scheduling/real-time/preemptive/migration/2016/02/09/erlang-scheduler-details.html#scheduler-threads ## maxAvailableSchedulers: 2 onlineSchedulers: 1 ## Plugins to enable plugins: "rabbitmq_management rabbitmq_peer_discovery_k8s" ## Extra plugins to enable ## Use this instead of `plugins` to add new plugins extraPlugins: "rabbitmq_auth_backend_ldap" ## Clustering settings clustering: address_type: hostname k8s_domain: cluster.local ## Rebalance master for queues in cluster when new replica is created ## ref: https://www.rabbitmq.com/rabbitmq-queues.8.html#rebalance rebalance: false loadDefinition: enabled: false secretName: load-definition ## environment variables to configure rabbitmq ## ref: https://www.rabbitmq.com/configure.html#customise-environment env: {} ## Configuration file content: required cluster configuration ## Do not override unless you know what you are doing. To add more configuration, use `extraConfiguration` of `advancedConfiguration` instead configuration: |- ## Clustering cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s cluster_formation.k8s.host = kubernetes.default.svc.cluster.local cluster_formation.node_cleanup.interval = 10 cluster_formation.node_cleanup.only_log_warning = true cluster_partition_handling = autoheal # queue master locator queue_master_locator=min-masters # enable guest user loopback_users.guest = false ## Configuration file content: extra configuration ## Use this instead of `configuration` to add more configuration extraConfiguration: |- #disk_free_limit.absolute = 50MB #management.load_definitions = /app/load_definition.json ## Configuration file content: advanced configuration ## Use this as additional configuraton in classic config format (Erlang term configuration format) ## ## If you set LDAP with TLS/SSL enabled and you are using self-signed certificates, uncomment these lines. ## advancedConfiguration: |- ## [{ ## rabbitmq_auth_backend_ldap, ## [{ ## ssl_options, ## [{ ## verify, verify_none ## }, { ## fail_if_no_peer_cert, ## false ## }] ## ]} ## }]. ## advancedConfiguration: |- ## Enable encryption to rabbitmq ## ref: https://www.rabbitmq.com/ssl.html ## tls: enabled: false failIfNoPeerCert: true sslOptionsVerify: verify_peer caCertificate: |- serverCertificate: |- serverKey: |- # existingSecret: name-of-existing-secret-to-rabbitmq ## LDAP configuration ## ldap: enabled: false server: "" port: "389" user_dn_pattern: cn=${username},dc=example,dc=org tls: # If you enabled TLS/SSL you can set advaced options using the advancedConfiguration parameter. enabled: false ## Kubernetes service type service: type: ClusterIP ## Node port ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables ## # nodePort: 30672 ## Set the LoadBalancerIP ## # loadBalancerIP: ## Node port Tls ## # nodeTlsPort: 30671 ## Amqp port ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables ## port: 5672 ## Amqp Tls port ## tlsPort: 5671 ## Dist port ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables ## distPort: 25672 ## RabbitMQ Manager port ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables ## managerPort: 15672 ## Service annotations annotations: {} # service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0 ## Load Balancer sources ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service ## # loadBalancerSourceRanges: # - 10.10.10.0/24 ## Extra ports to expose # extraPorts: ## Extra ports to be included in container spec, primarily informational # extraContainerPorts: # Additional pod labels to apply podLabels: {} ## Pod Security Context ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ ## securityContext: enabled: true fsGroup: 1001 runAsUser: 1001 extra: {} persistence: ## this enables PVC templates that will create one per pod enabled: true ## rabbitmq data Persistent Volume Storage Class ## If defined, storageClassName: <storageClass> ## If set to "-", storageClassName: "", which disables dynamic provisioning ## If undefined (the default) or set to null, no storageClassName spec is ## set, choosing the default provisioner. (gp2 on AWS, standard on ## GKE, AWS & OpenStack) ## # storageClass: "-" accessMode: ReadWriteOnce ## Existing PersistentVolumeClaims ## The value is evaluated as a template ## So, for example, the name can depend on .Release or .Chart # existingClaim: "" # If you change this value, you might have to adjust `rabbitmq.diskFreeLimit` as well. size: 8Gi # persistence directory, maps to the rabbitmq data directory path: /opt/bitnami/rabbitmq/var/lib/rabbitmq ## Configure resource requests and limits ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ ## resources: {} networkPolicy: ## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. ## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ ## enabled: false ## The Policy model to apply. When set to false, only pods with the correct ## client label will have network access to the ports RabbitMQ is listening ## on. When true, RabbitMQ will accept connections from any source ## (with the correct destination port). ## allowExternal: true ## Additional NetworkPolicy Ingress "from" rules to set. Note that all rules are OR-ed. ## # additionalRules: # - matchLabels: # - role: frontend # - matchExpressions: # - key: role # operator: In # values: # - frontend ## Replica count, set to 1 to provide a default available cluster replicas: 1 ## Pod priority ## https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ # priorityClassName: "" ## updateStrategy for RabbitMQ statefulset ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies updateStrategy: type: RollingUpdate ## Node labels and tolerations for pod assignment ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature nodeSelector: {} tolerations: [] affinity: {} podDisruptionBudget: {} # maxUnavailable: 1 # minAvailable: 1 ## annotations for rabbitmq pods podAnnotations: {} ## Configure the ingress resource that allows you to access the ## Wordpress installation. Set up the URL ## ref: http://kubernetes.io/docs/user-guide/ingress/ ## ingress: ## Set to true to enable ingress record generation enabled: false ## The list of hostnames to be covered with this ingress record. ## Most likely this will be just one host, but in the event more hosts are needed, this is an array ## hostName: foo.bar.com path: / ## Set this to true in order to enable TLS on the ingress record ## A side effect of this will be that the backend wordpress service will be connected at port 443 tls: false ## If TLS is set to true, you must declare what secret will store the key/certificate for TLS tlsSecret: myTlsSecret ## Ingress annotations done as key:value pairs ## If you're using kube-lego, you will want to add: ## kubernetes.io/tls-acme: true ## ## For a full list of possible ingress annotations, please see ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md ## ## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set annotations: {} # kubernetes.io/ingress.class: nginx # kubernetes.io/tls-acme: true ## The following settings are to configure the frequency of the lifeness and readiness probes livenessProbe: enabled: true initialDelaySeconds: 120 timeoutSeconds: 20 periodSeconds: 30 failureThreshold: 6 successThreshold: 1 readinessProbe: enabled: true initialDelaySeconds: 10 timeoutSeconds: 20 periodSeconds: 30 failureThreshold: 3 successThreshold: 1 metrics: enabled: false image: registry: docker.io repository: bitnami/rabbitmq-exporter tag: 0.29.0-debian-10-r28 pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. ## Secrets must be manually created in the namespace. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ ## # pullSecrets: # - myRegistryKeySecretName ## environment variables to configure rabbitmq_exporter ## ref: https://github.com/kbudde/rabbitmq_exporter#configuration env: {} ## Metrics exporter port port: 9419 ## RabbitMQ address to connect to (from the same Pod, usually the local loopback address). ## If your Kubernetes cluster does not support IPv6, you can change to `127.0.0.1` in order to force IPv4. ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-overview/#networking rabbitmqAddress: localhost ## Comma-separated list of extended scraping capabilities supported by the target RabbitMQ server ## ref: https://github.com/kbudde/rabbitmq_exporter#extended-rabbitmq-capabilities capabilities: "bert,no_sort" resources: {} annotations: prometheus.io/scrape: "true" prometheus.io/port: "9419" livenessProbe: enabled: true initialDelaySeconds: 15 timeoutSeconds: 5 periodSeconds: 30 failureThreshold: 6 successThreshold: 1 readinessProbe: enabled: true initialDelaySeconds: 5 timeoutSeconds: 5 periodSeconds: 30 failureThreshold: 3 successThreshold: 1 ## Prometheus Service Monitor ## ref: https://github.com/coreos/prometheus-operator ## https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint serviceMonitor: ## If the operator is installed in your cluster, set to true to create a Service Monitor Entry enabled: false ## Specify the namespace in which the serviceMonitor resource will be created # namespace: "" ## Specify the interval at which metrics should be scraped interval: 30s ## Specify the timeout after which the scrape is ended # scrapeTimeout: 30s ## Specify Metric Relabellings to add to the scrape endpoint # relabellings: ## Specify honorLabels parameter to add the scrape endpoint honorLabels: false ## Specify the release for ServiceMonitor. Sometimes it should be custom for prometheus operator to work # release: "" ## Used to pass Labels that are used by the Prometheus installed in your cluster to select Service Monitors to work with ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec additionalLabels: {} ## Custom PrometheusRule to be defined ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions prometheusRule: enabled: false additionalLabels: {} namespace: "" rules: [] ## List of reules, used as template by Helm. ## These are just examples rules inspired from https://awesome-prometheus-alerts.grep.to/rules.html ## Please adapt them to your needs. ## Make sure to constraint the rules to the current rabbitmq service. ## Also make sure to escape what looks like helm template. # - alert: RabbitmqDown # expr: rabbitmq_up{service="{{ template "rabbitmq.fullname" . }}"} == 0 # for: 5m # labels: # severity: error # annotations: # summary: Rabbitmq down (instance {{ "{{ $labels.instance }}" }}) # description: RabbitMQ node down # - alert: ClusterDown # expr: | # sum(rabbitmq_running{service="{{ template "rabbitmq.fullname" . }}"}) # < {{ .Values.replicas }} # for: 5m # labels: # severity: error # annotations: # summary: Cluster down (instance {{ "{{ $labels.instance }}" }}) # description: | # Less than {{ .Values.replicas }} nodes running in RabbitMQ cluster # VALUE = {{ "{{ $value }}" }} # - alert: ClusterPartition # expr: rabbitmq_partitions{service="{{ template "rabbitmq.fullname" . }}"} > 0 # for: 5m # labels: # severity: error # annotations: # summary: Cluster partition (instance {{ "{{ $labels.instance }}" }}) # description: | # Cluster partition # VALUE = {{ "{{ $value }}" }} # - alert: OutOfMemory # expr: | # rabbitmq_node_mem_used{service="{{ template "rabbitmq.fullname" . }}"} # / rabbitmq_node_mem_limit{service="{{ template "rabbitmq.fullname" . }}"} # * 100 > 90 # for: 5m # labels: # severity: warning # annotations: # summary: Out of memory (instance {{ "{{ $labels.instance }}" }}) # description: | # Memory available for RabbmitMQ is low (< 10%)\n VALUE = {{ "{{ $value }}" }} # LABELS: {{ "{{ $labels }}" }} # - alert: TooManyConnections # expr: rabbitmq_connectionsTotal{service="{{ template "rabbitmq.fullname" . }}"} > 1000 # for: 5m # labels: # severity: warning # annotations: # summary: Too many connections (instance {{ "{{ $labels.instance }}" }}) # description: | # RabbitMQ instance has too many connections (> 1000) # VALUE = {{ "{{ $value }}" }}\n LABELS: {{ "{{ $labels }}" }} ## ## Init containers parameters: ## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup ## volumePermissions: enabled: false image: registry: docker.io repository: bitnami/minideb tag: buster pullPolicy: Always ## Optionally specify an array of imagePullSecrets. ## Secrets must be manually created in the namespace. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ ## # pullSecrets: # - myRegistryKeySecretName resources: {} ## forceBoot: executes 'rabbitmqctl force_boot' to force boot cluster shut down unexpectedly in an ## unknown order. ## ref: https://www.rabbitmq.com/rabbitmqctl.8.html#force_boot ## forceBoot: enabled: false ## Optionally specify extra secrets to be created by the chart. ## This can be useful when combined with load_definitions to automatically create the secret containing the definitions to be loaded. ## extraSecrets: {} # load-definition: # load_definition.json: | # { # ... # }
openvpn
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"openvpn.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 24 -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name field...
# Default values for openvpn. # This is a YAML-formatted file. # Declare variables to be passed into your templates. replicaCount: 1 updateStrategy: {} # type: RollingUpdate # rollingUpdate: # maxSurge: 1 # maxUnavailable: 0 # For supporting pulling from private registries imagePullSecretName: image: repository: jfelten/openvpn-docker tag: 1.1.0 pullPolicy: IfNotPresent service: type: LoadBalancer externalPort: 443 internalPort: 443 # hostPort: 443 externalIPs: [] nodePort: 32085 # clusterIP: None # LoadBalancerSourceRanges: 0.0.0.0/0 # loadBalancerIP: 10.0.0.1 ## Here annotations can be added to the openvpn service # annotations: # external-dns.alpha.kubernetes.io/hostname: vpn.example.com annotations: {} ## Here annotations can be added to the openvpn pod # podAnnotations: # backup.ark.heptio.com/backup-volumes: certs podAnnotations: {} # Add privileged init container to enable IPv4 forwarding ipForwardInitContainer: false resources: limits: cpu: 300m memory: 128Mi requests: cpu: 300m memory: 128Mi readinessProbe: initialDelaySeconds: 5 periodSeconds: 5 successThreshold: 2 persistence: enabled: true # subPath: openvpn ## A manually managed Persistent Volume and Claim ## Requires persistence.enabled: true ## If defined, PVC must be created manually before volume will be bound # existingClaim: ## openvpn data Persistent Volume Storage Class ## If defined, storageClassName: <storageClass> ## If set to "-", storageClassName: "", which disables dynamic provisioning ## If undefined (the default) or set to null, no storageClassName spec is ## set, choosing the default provisioner. (gp2 on AWS, standard on ## GKE, AWS & OpenStack) ## # storageClass: "-" accessMode: ReadWriteOnce size: 2M openvpn: # Network allocated for openvpn clients (default: 10.240.0.0). OVPN_NETWORK: 10.240.0.0 # Network subnet allocated for openvpn client (default: 255.255.0.0). OVPN_SUBNET: 255.255.0.0 # Protocol used by openvpn tcp or udp (default: udp). OVPN_PROTO: tcp # Kubernetes pod network (optional). OVPN_K8S_POD_NETWORK: "10.0.0.0" # Kubernetes pod network subnet (optional). OVPN_K8S_POD_SUBNET: "255.0.0.0" # Kubernetes service network (optional). # Define openvpn.OVPN_K8S_SVC_NETWORK and openvpn.OVPN_K8S_SVC_SUBNET if it's needed to create a separate route to Kubernates service subnet # OVPN_K8S_SVC_NETWORK: # Kubernetes service network subnet (optional). # OVPN_K8S_SVC_SUBNET: # Set default route which openvpn figures basing on network routes inside openvpn pod DEFAULT_ROUTE_ENABLED: true # Server certificate data # keystoreSecret: # secret with openvpn certificates. If specified, certificates are taken from the secret # create secret with such command: # kubectl create secret generic openvpn-keystore-secret --from-file=./server.key --from-file=./ca.crt --from-file=./server.crt --from-file=./dh.pem [--from-file=./crl.pem] [--from-file=./ta.key] # Push a `dhcp-option DOMAIN` config dhcpOptionDomain: true # Redirect all client traffic through VPN redirectGateway: true # Use/generate certificate revocation list useCrl: false # Use/generate a ta.key (https://openvpn.net/community-resources/hardening-openvpn-security/) taKey: false # Override default cipher # cipher: AES-256-CBC # Lines appended to the end of the server configuration file # serverConf: | # max-clients 100 # client-to-client # Lines appended to the end of the client configuration file # Example: if all of your clients are Ubuntu (18.04+) you may need to install # the update-systemd-resolved package (apt install update-systemd-resolved) then # set the following to make sure systemd-resolved routes DNS requests correctly: # clientConf: | # script-security 2 # up /etc/openvpn/update-systemd-resolved # up-restart # down /etc/openvpn/update-systemd-resolved # down-pre # Enable istio support for openvpn connections istio: enabled: false proxy: port: 15001 iptablesExtra: [] # - -A FORWARD -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT # - -A FORWARD -m conntrack --ctstate NEW -d 10.240.0.0/255.255.0.0 -j ACCEPT # - -A FORWARD -j REJECT # Enable CCD support ccd: enabled: false config: {} # johndoe: "ifconfig-push 10.240.100.10 10.240.100.11" # janedoe: "ifconfig-push 10.240.100.20 10.240.100.21" nodeSelector: {} tolerations: []
k8s-spot-termination-handler
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"k8s-spot-termination-handler.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 ...
# Default values for k8s-spot-termination-handler. # This is a YAML-formatted file. # Declare variables to be passed into your templates. rbac: # Specifies whether RBAC resources should be created create: true serviceAccount: # Specifies whether a ServiceAccount should be created create: true # The name of the ServiceAccount to use. # If not set and create is true, a name is generated using the fullname template name: image: repository: kubeaws/kube-spot-termination-notice-handler tag: 1.13.7-1 pullPolicy: IfNotPresent ## Optional array of imagePullSecrets containing private registry credentials ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ imagePullSecrets: [] # - name: secretName # URL of EC2 spot instance termination notice endpoint noticeUrl: http://169.254.169.254/latest/meta-data/spot/termination-time # Poll the metadata every pollInterval seconds for termination events: pollInterval: 5 # Set VERBOSE=1 to get more output # verbose: 1 # Send notifications to a Slack webhook URL - replace with your own value and uncomment: # slackUrl: https://hooks.slack.com/services/EXAMPLE123/EXAMPLE123/example1234567 # Set the cluster name to be reported in a Slack message # clusterName: test # Silence logspout by default - set to true to enable logs arriving in logspout enableLogspout: false # Trigger instance removal from AutoScaling Group on termination notice detachAsg: false # Grace period for node draining gracePeriod: 120 ## The name of a secret in the same kubernetes namespace which contain values to be added to the environment ## This can be useful for auth tokens, etc envFromSecret: "" resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. # limits: # cpu: 100m # memory: 128Mi # requests: # cpu: 10m # memory: 32Mi # Add a priority class to the deamonset priorityClassName: "" nodeSelector: {} # "node-role.kubernetes.io/spot-worker": "true" tolerations: [] # - key: "dedicated" # operator: "Equal" # value: "gpu" # effect: "NoSchedule" affinity: {} hostNetwork: true # annotations to be added to pods podAnnotations: {} # If the spot handler was installed before Kubernetes version 1.6 # then you need to explicitly set the value below otherwise # you will have to manually cycle your pods every time you perform the update. # Default value for Kubernetes v1.5 and before was "OnDelete". updateStrategy: RollingUpdate maxUnavailable: 1 podSecurityContext: {}
spotify-docker-gc
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"spotify-docker-gc.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars becau...
image: registry: docker.io org: spotify repository: docker-gc tag: latest pullPolicy: "IfNotPresent" cron: schedule: "0 0 * * *" log: /var/log/crond.log env: # grace period in seconds before garbage collecting gracePeriodSeconds: "0" # docker (client) api version to use in container, to match node host (server) api version # dockerAPIVersion: "1.23" # List any image or container exclusions here # exclude: # images: |- # spotify/cassandra:latest # redis:.* # 9681260c3ad5 # containers: |- # mariadb-data # inimitable_quokka # Optionally specify an array of imagePullSecrets. # Secrets must be manually created in the namespace. # ref: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod # # imagePullSecrets: # - name: myRegistryKeySecretName ## Resource requirements for spotify-docker-gc container ## Ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ ## resources: {} ## Node tolerations for spotify-docker-gc scheduling to nodes with taints ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ ## tolerations: [] # - key: "key" # operator: "Equal|Exists" # value: "value" # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" ## Node labels for spotify-docker-gc pod assignment ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ ## nodeSelector: {}
terracotta
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"terracotta.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some...
# Terracotta Image definitions are available at : https://github.com/Terracotta-OSS/docker image: repository: terracotta/terracotta-server-oss tag: 5.6.0 pullPolicy: Always website: "https://github.com/Terracotta-OSS/docker" sampleEhcacheClientImage: repository: terracotta/sample-ehcache-client tag: 5.6.0 replicaCount: 2 # specify as many offheap resources as you want; provided you have enough resources on the node # and you have caching clients using those resources for their clustered tiers ! offheaps: - name: offheap-1 unit: MB size: 512 - name: offheap-2 unit: MB size: 256 service: type: ClusterIP terracottaPort: 9410 syncPort: 9430 # None is headless service; still useful for the StatefulSet that relies on it clusterIP: "None" tolerateUnreadyEndpoints: true resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. # limits: # cpu: 100m # memory: 128Mi # requests: # cpu: 100m # memory: 128Mi nodeSelector: {} tolerations: [] affinity: {} rbac: # Specifies whether RBAC resources should be created create: true serviceAccount: # Specifies whether a ServiceAccount should be created create: true # The name of the ServiceAccount to use. # If not set and create is true, a name is generated using the fullname template name:
burrow
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"burrow.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kub...
image: repository: hyperledger/burrow tag: 0.29.0 pullPolicy: IfNotPresent chain: logLevel: info extraSeeds: [] testing: false restore: enabled: false dumpURL: "" config: BurrowDir: ".burrow" Tendermint: Seeds: "" SeedMode: false ListenHost: "0.0.0.0" ListenPort: "26656" ExternalAddress: "" Moniker: "" Keys: GRPCServiceEnabled: true AllowBadFilePermissions: true RemoteAddress: "" KeysDirectory: "/keys" RPC: Info: Enabled: true ListenHost: "0.0.0.0" ListenPort: "26658" Profiler: Enabled: false ListenHost: "0.0.0.0" ListenPort: "6060" GRPC: Enabled: true ListenHost: "0.0.0.0" ListenPort: "10997" Metrics: Enabled: true ListenHost: "0.0.0.0" ListenPort: "9102" MetricsPath: "/metrics" BlockSampleSize: 100 Logging: ExcludeTrace: true NonBlocking: true RootSink: Output: OutputType: "stderr" Format: "json" validators: - name: Validator_0 address: AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA nodeAddress: AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA contracts: # wait required to ensure chain readiness enabled: false image: "" tag: "" deploy: "" extraArgs: {} environment: inline: {} secrets: [] organization: "user" persistence: enabled: true size: 80Gi storageClass: standard accessMode: ReadWriteOnce persistentVolumeReclaimPolicy: "Retain" peer: service: type: ClusterIP ingress: enabled: false hosts: [] grpc: service: type: ClusterIP loadBalance: true ingress: enabled: false hosts: [] annotations: {} tls: {} info: service: type: ClusterIP loadBalance: true ingress: enabled: false # exposing partial ingress only exposes # the /accounts and /blocks paths outside the cluster partial: false pathLeader: "/" annotations: {} hosts: [] tls: {} # resources: # limits: # cpu: 500m # memory: 1Gi # requests: # cpu: 100m # memory: 256Mi livenessProbe: enabled: true path: /status?block_seen_time_within=10m initialDelaySeconds: 240 timeoutSeconds: 1 periodSeconds: 30 readinessProbe: enabled: true path: /status initialDelaySeconds: 5 podAnnotations: {} podLabels: {} # Affinity for pod assignment # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity affinity: {} # Tolerations for pod assignment # Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ tolerations: [] # Node labels for pod assignment # Ref: https://kubernetes.io/docs/user-guide/node-selection/ nodeSelector: {}
openebs
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"openebs.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Ku...
# Default values for openebs. # This is a YAML-formatted file. # Declare variables to be passed into your templates. rbac: # Specifies whether RBAC resources should be created create: true pspEnabled: false serviceAccount: create: true name: release: # "openebs.io/version" label for control plane components version: "1.11.0" image: pullPolicy: IfNotPresent repository: "" apiserver: enabled: true image: "openebs/m-apiserver" imageTag: "1.11.0" replicas: 1 ports: externalPort: 5656 internalPort: 5656 sparse: enabled: "false" nodeSelector: {} tolerations: [] affinity: {} healthCheck: initialDelaySeconds: 30 periodSeconds: 60 defaultStorageConfig: enabled: "true" # Directory used by the OpenEBS to store debug information and so forth # that are generated in the course of running OpenEBS containers. varDirectoryPath: baseDir: "/var/openebs" provisioner: enabled: true image: "openebs/openebs-k8s-provisioner" imageTag: "1.11.0" replicas: 1 nodeSelector: {} tolerations: [] affinity: {} healthCheck: initialDelaySeconds: 30 periodSeconds: 60 localprovisioner: enabled: true image: "openebs/provisioner-localpv" imageTag: "1.11.0" replicas: 1 basePath: "/var/openebs/local" nodeSelector: {} tolerations: [] affinity: {} healthCheck: initialDelaySeconds: 30 periodSeconds: 60 snapshotOperator: enabled: true controller: image: "openebs/snapshot-controller" imageTag: "1.11.0" provisioner: image: "openebs/snapshot-provisioner" imageTag: "1.11.0" replicas: 1 upgradeStrategy: "Recreate" nodeSelector: {} tolerations: [] affinity: {} healthCheck: initialDelaySeconds: 30 periodSeconds: 60 ndm: enabled: true image: "openebs/node-disk-manager-amd64" imageTag: "0.6.0" sparse: path: "/var/openebs/sparse" size: "10737418240" count: "0" filters: enableOsDiskExcludeFilter: true enableVendorFilter: true excludeVendors: "CLOUDBYT,OpenEBS" enablePathFilter: true includePaths: "" excludePaths: "loop,fd0,sr0,/dev/ram,/dev/dm-,/dev/md,/dev/rbd" probes: enableSeachest: false nodeSelector: {} tolerations: [] healthCheck: initialDelaySeconds: 30 periodSeconds: 60 ndmOperator: enabled: true image: "openebs/node-disk-operator-amd64" imageTag: "0.6.0" replicas: 1 upgradeStrategy: Recreate nodeSelector: {} tolerations: [] healthCheck: initialDelaySeconds: 30 periodSeconds: 60 readinessCheck: initialDelaySeconds: 4 periodSeconds: 10 failureThreshold: 1 webhook: enabled: true image: "openebs/admission-server" imageTag: "1.11.0" failurePolicy: Ignore replicas: 1 healthCheck: initialDelaySeconds: 30 periodSeconds: 60 nodeSelector: {} tolerations: [] affinity: {} jiva: image: "openebs/jiva" imageTag: "1.11.0" replicas: 3 defaultStoragePath: "/var/openebs" cstor: pool: image: "openebs/cstor-pool" imageTag: "1.11.0" poolMgmt: image: "openebs/cstor-pool-mgmt" imageTag: "1.11.0" target: image: "openebs/cstor-istgt" imageTag: "1.11.0" volumeMgmt: image: "openebs/cstor-volume-mgmt" imageTag: "1.11.0" helper: image: "openebs/linux-utils" imageTag: "1.11.0" featureGates: enabled: false GPTBasedUUID: enabled: false featureGateFlag: "GPTBasedUUID" crd: enableInstall: true policies: monitoring: enabled: true image: "openebs/m-exporter" imageTag: "1.11.0" analytics: enabled: true # Specify in hours the duration after which a ping event needs to be sent. pingInterval: "24h"
sensu
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"sensu.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kube...
# Docker image name image: "sstarcher/sensu" # Docker image tag imageTag: "0.28" # Image pull policy for the container pullPolicy: "IfNotPresent" # How many sensu containers to spawn replicaCount: 1 # How to publish the service http://kubernetes.io/docs/user-guide/services/#publishing-services---service-types serviceType: ClusterIP # Service port to expose Sensu on httpPort: 4567 # If set to true, the service will be exposed via the Deis Router if setup https://github.com/deis/router deis: routable: false domains: sensu # CPU and Memory limit and request for Sensu Server server: resources: requests: cpu: 100m memory: 100Mi # CPU and Memory limit and request for Sensu Api api: resources: requests: cpu: 50m memory: 100Mi # Redis configuration REDIS_PORT: 6379 REDIS_DB: 0 REDIS_AUTO_RECONNECT: true REDIS_RECONNECT_ON_ERROR: true # Redis chart configuration redis: persistence: enabled: false
osclass
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"osclass.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"oscla...
## Global Docker image parameters ## Please, note that this will override the image parameters, including dependencies, configured to use the global value ## Current available global Docker image parameters: imageRegistry and imagePullSecrets ## # global: # imageRegistry: myRegistryName # imagePullSecrets: # - myRegistryKeySecretName # storageClass: myStorageClass ## Bitnami Osclass image version ## ref: https://hub.docker.com/r/bitnami/osclass/tags/ ## image: registry: docker.io repository: bitnami/osclass tag: 3.7.4-debian-10-r24 ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images ## pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. ## Secrets must be manually created in the namespace. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ ## # pullSecrets: # - myRegistryKeySecretName ## String to partially override osclass.fullname template (will maintain the release name) ## # nameOverride: ## String to fully override osclass.fullname template ## # fullnameOverride: ## Osclass host to create application URLs ## ref: https://github.com/bitnami/bitnami-docker-osclass#configuration ## # osclassHost: ## User of the application ## ref: https://github.com/bitnami/bitnami-docker-osclass#configuration ## osclassUsername: user ## Application password ## Defaults to a random 10-character alphanumeric string if not set ## ref: https://github.com/bitnami/bitnami-docker-osclass#configuration ## # osclassPassword: ## Admin email ## ref: https://github.com/bitnami/bitnami-docker-osclass#configuration ## osclassEmail: user@example.com ## Application title ## ref: https://github.com/bitnami/bitnami-docker-osclass#configuration osclassWebTitle: 'Sample Web Page' ## Allow site to appear in search engines ## ref: https://github.com/bitnami/bitnami-docker-osclass#configuration osclassPingEngines: 1 ## Automatically send usage statistics and crash reports to Osclass ## ref: https://github.com/bitnami/bitnami-docker-osclass#configuration ## osclassSaveStats: 1 ## Set to `yes` to allow the container to be started with blank passwords ## ref: https://github.com/bitnami/bitnami-docker-osclass#environment-variables allowEmptyPassword: "yes" ## ## External database configuration ## externalDatabase: ## Database host host: ## Database host port: 3306 ## Database user user: bn_osclass ## Database password password: ## Database name database: bitnami_osclass ## SMTP mail delivery configuration ## ref: https://github.com/bitnami/bitnami-docker-osclass/#smtp-configuration ## # smtpHost: # smtpPort: # smtpUser: # smtpPassword: # smtpProtocol: ## ## MariaDB chart configuration ## ## https://github.com/helm/charts/blob/master/stable/mariadb/values.yaml ## mariadb: ## Whether to deploy a mariadb server to satisfy the applications database requirements. To use an external database set this to false and configure the externalDatabase parameters enabled: true ## Disable MariaDB replication replication: enabled: false ## Create a database and a database user ## ref: https://github.com/bitnami/bitnami-docker-mariadb/blob/master/README.md#creating-a-database-user-on-first-run ## db: name: bitnami_osclass user: bn_osclass ## If the password is not specified, mariadb will generates a random password ## # password: ## MariaDB admin password ## ref: https://github.com/bitnami/bitnami-docker-mariadb/blob/master/README.md#setting-the-root-password-on-first-run ## # rootUser: # password: ## Enable persistence using Persistent Volume Claims ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ ## master: persistence: enabled: true ## mariadb data Persistent Volume Storage Class ## If defined, storageClassName: <storageClass> ## If set to "-", storageClassName: "", which disables dynamic provisioning ## If undefined (the default) or set to null, no storageClassName spec is ## set, choosing the default provisioner. (gp2 on AWS, standard on ## GKE, AWS & OpenStack) ## # storageClass: "-" accessMode: ReadWriteOnce size: 8Gi ## Kubernetes configuration ## For minikube, set this to NodePort, elsewhere use LoadBalancer ## service: type: LoadBalancer # HTTP Port port: 80 # HTTPS Port httpsPort: 443 ## loadBalancerIP: ## ## nodePorts: ## http: <to set explicitly, choose port between 30000-32767> ## https: <to set explicitly, choose port between 30000-32767> nodePorts: http: "" https: "" ## Enable client source IP preservation ## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip ## externalTrafficPolicy: Cluster ## Configure the ingress resource that allows you to access the ## osclass installation. Set up the URL ## ref: http://kubernetes.io/docs/user-guide/ingress/ ## ingress: ## Set to true to enable ingress record generation enabled: false ## Set this to true in order to add the corresponding annotations for cert-manager certManager: false ## Ingress annotations done as key:value pairs ## For a full list of possible ingress annotations, please see ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md ## ## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set ## If certManager is set to true, annotation kubernetes.io/tls-acme: "true" will automatically be set annotations: # kubernetes.io/ingress.class: nginx ## The list of hostnames to be covered with this ingress record. ## Most likely this will be just one host, but in the event more hosts are needed, this is an array hosts: - name: osclass.local path: / ## Set this to true in order to enable TLS on the ingress record tls: false ## Optionally specify the TLS hosts for the ingress record ## Useful when the Ingress controller supports www-redirection ## If not specified, the above host name will be used # tlsHosts: # - www.osclass.local # - osclass.local ## If TLS is set to true, you must declare what secret will store the key/certificate for TLS tlsSecret: osclass.local-tls secrets: ## If you're providing your own certificates, please use this to add the certificates as secrets ## key and certificate should start with -----BEGIN CERTIFICATE----- or ## -----BEGIN RSA PRIVATE KEY----- ## ## name should line up with a tlsSecret set further up ## If you're using cert-manager, this is unneeded, as it will create the secret for you if it is not set ## ## It is also possible to create and manage the certificates outside of this helm chart ## Please see README.md for more information # - name: osclass.local-tls # key: # certificate: ## Enable persistence using Persistent Volume Claims ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ ## persistence: enabled: true osclass: ## osclass data Persistent Volume Storage Class ## If defined, storageClassName: <storageClass> ## If set to "-", storageClassName: "", which disables dynamic provisioning ## If undefined (the default) or set to null, no storageClassName spec is ## set, choosing the default provisioner. (gp2 on AWS, standard on ## GKE, AWS & OpenStack) ## # storageClass: "-" accessMode: ReadWriteOnce size: 8Gi ## Configure resource requests and limits ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ ## resources: requests: memory: 512Mi cpu: 300m ## Pod annotations ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ ## podAnnotations: {} ## Affinity for pod assignment ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity ## affinity: {} ## Prometheus Exporter / Metrics ## metrics: enabled: false image: registry: docker.io repository: bitnami/apache-exporter tag: 0.7.0-debian-10-r33 pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. ## Secrets must be manually created in the namespace. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ ## # pullSecrets: # - myRegistryKeySecretName ## Metrics exporter pod Annotation and Labels podAnnotations: prometheus.io/scrape: "true" prometheus.io/port: "9117" ## Metrics exporter resource requests and limits ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ ## # resources: {}
sealed-secrets
[ "# _helpers.tpl\n{{/*\nExpand to the namespace sealed-secrets installs into.\n*/}}\n{{- define \"sealed-secrets.namespace\" -}}\n{{- default .Release.Namespace .Values.namespace -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"sealed-secrets.chart\" -}}\n{{- pr...
image: repository: quay.io/bitnami/sealed-secrets-controller tag: v0.13.1 pullPolicy: IfNotPresent resources: {} nodeSelector: {} tolerations: [] affinity: {} controller: # controller.create: `true` if Sealed Secrets controller should be created create: true # namespace: Namespace to deploy the controller. namespace: "" serviceAccount: # serviceAccount.create: Whether to create a service account or not create: true # serviceAccount.name: The name of the service account to create or use name: "" rbac: # rbac.create: `true` if rbac resources should be created create: true pspEnabled: false # secretName: The name of the TLS secret containing the key used to encrypt secrets secretName: "sealed-secrets-key" ingress: enabled: false annotations: {} # kubernetes.io/ingress.class: nginx # kubernetes.io/tls-acme: "true" path: /v1/cert.pem hosts: - chart-example.local tls: [] # - secretName: chart-example-tls # hosts: # - chart-example.local crd: # crd.create: `true` if the crd resources should be created create: true # crd.keep: `true` if the sealed secret CRD should be kept when the chart is deleted keep: true networkPolicy: false securityContext: # securityContext.runAsUser defines under which user the operator Pod and its containers/processes run. runAsUser: 1001 # securityContext.fsGroup defines the filesystem group fsGroup: 65534 podAnnotations: {} podLabels: {} priorityClassName: "" serviceMonitor: # Enables ServiceMonitor creation for the Prometheus Operator create: false # How frequently Prometheus should scrape the ServiceMonitor interval: # Extra labels to apply to the sealed-secrets ServiceMonitor labels: # The namespace where the ServiceMonitor is deployed, defaults to the installation namespace namespace: # The timeout after which the scrape is ended scrapeTimeout: dashboards: # If enabled, sealed-secrets will create a configmap with a dashboard in json that's going to be picked up by grafana # See https://github.com/helm/charts/tree/master/stable/grafana#configuration - `sidecar.dashboards.enabled` create: false # Extra labels to apply to the dashboard configmaps labels: # The namespace where the dashboards are deployed, defaults to the installation namespace namespace:
gitlab-ee
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"gitlab-ee.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some ...
## GitLab EE image ## ref: https://hub.docker.com/r/gitlab/gitlab-ee/tags/ ## image: gitlab/gitlab-ee:9.4.1-ee.0 ## Specify a imagePullPolicy ## 'Always' if imageTag is 'latest', else set to 'IfNotPresent' ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images ## # imagePullPolicy: ## The URL (with protocol) that your users will use to reach the install. ## ref: https://docs.gitlab.com/omnibus/settings/configuration.html#configuring-the-external-url-for-gitlab ## # externalUrl: http://your-domain.com/ ## Change the initial default admin password if set. If not set, you'll be ## able to set it when you first visit your install. ## # gitlabRootPassword: "" ## For minikube, set this to NodePort, elsewhere use LoadBalancer ## ref: http://kubernetes.io/docs/user-guide/services/#publishing-services---service-types ## serviceType: LoadBalancer ## Configure external service ports ## ref: http://kubernetes.io/docs/user-guide/services/ sshPort: 22 httpPort: 80 httpsPort: 443 ## Configure resource requests and limits ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ ## resources: ## GitLab requires a good deal of resources. We have split out Postgres and ## redis, which helps some. Refer to the guidelines for larger installs. ## ref: https://docs.gitlab.com/ee/install/requirements.html#hardware-requirements requests: memory: 1Gi cpu: 500m limits: memory: 2Gi cpu: 1 ## Enable persistence using Persistent Volume Claims ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ ## ref: https://docs.gitlab.com/ee/install/requirements.html#storage ## persistence: ## This volume persists generated configuration files, keys, and certs. ## gitlabEtc: enabled: true size: 1Gi ## If defined, volume.beta.kubernetes.io/storage-class: <storageClass> ## Default: volume.alpha.kubernetes.io/storage-class: default ## # storageClass: accessMode: ReadWriteOnce ## This volume is used to store git data and other project files. ## ref: https://docs.gitlab.com/omnibus/settings/configuration.html#storing-git-data-in-an-alternative-directory ## gitlabData: enabled: true size: 10Gi ## If defined, volume.beta.kubernetes.io/storage-class: <storageClass> ## Default: volume.alpha.kubernetes.io/storage-class: default ## # storageClass: accessMode: ReadWriteOnce ## Configuration values for the postgresql dependency. ## ref: https://github.com/kubernetes/charts/blob/master/stable/postgresql/README.md ## postgresql: # 9.6 is the newest supported version for the GitLab container imageTag: "9.6" cpu: 1000m memory: 1Gi postgresUser: gitlab postgresPassword: gitlab postgresDatabase: gitlab persistence: size: 10Gi ## Configuration values for the redis dependency. ## ref: https://github.com/kubernetes/charts/blob/master/stable/redis/README.md ## redis: redisPassword: "gitlab" resources: requests: memory: 1Gi persistence: size: 10Gi
weave-scope
[ "# _helpers.tpl\n{{/* Helm standard labels */}}\n{{- define \"weave-scope.helm_std_labels\" }}\nchart: {{ .Chart.Name }}-{{ .Chart.Version }}\nheritage: {{ .Release.Service }}\nrelease: {{ .Release.Name }}\napp: {{ template \"toplevel.name\" . }}\n{{- end }}\n\n{{/* Weave Scope default annotations */}}\n{{- define ...
# Where defaults exist, the values are set to them here. # Values with no preferred or common defaults are set to empty strings. global: # global.image: the image that will be used for this release image: repository: weaveworks/scope tag: 1.12.0 # global.image.pullPolicy: must be Always, IfNotPresent, or Never pullPolicy: "IfNotPresent" # global.service.*: the configuration of the service used to access the frontend service: # global.service.name: the short name desired for the frontend service # global.service.name may be specified if you need to use a specific service name, but will be generated if not specified # global.service.name is a global so we can access its value easily from the agent subchart # name: "weave-scope-app" # global.service.port: (required if frontend.enabled == true) the port exposed by the Scope frontend service # global.service.port is a global so we can access its value easily from the agent subchart port: 80 # global.service.type: (required if frontend.enabled == true) the type of the frontend service -- must be ClusterIP, NodePort or LoadBalancer # global.service.type is a global to keep it with the other values for configuring the frontend service type: "ClusterIP" # weave-scope-frontend.* controls how the Scope frontend is installed weave-scope-frontend: enabled: true # weave-scope-frontend.resources.*: controls requests/limits for the frontend # weave-scope-frontend.resources.* values are all optional but should not be set to empty values # resources: # requests: # weave-scope-frontend.resources.requests.cpu: CPU req. in MHz (m) # cpu: "" # weave-scope-frontend.resources.requests.memory: memory req. in MiB (Mi) # memory: "" # limits: # weave-scope-frontend.resources.limits.cpu: CPU limit in MHz (m) # cpu: "" # weave-scope-frontend.resources.limits.memory: memory limit in MiB (Mi) # memory: "" flags: [] # weave-scope-frontend Ingress ingress: # If true, weave-scope-frontend ingress will be created enabled: false annotations: {} # kubernetes.io/ingress.class: nginx # kubernetes.io/tls-acme: "true" # weave-scope-frontend path(s) must be provided if Ingress is enabled paths: [] # weave-scope-frontend hostname(s) must be provided if Ingress is enabled hosts: - weave-scope.example.test # Ingress TLS secret # Must be created manually in the namespace tls: [] # - secretName: weave-scope-example-tls # hosts: # - weave-scope.example.test # weave-scope-agent.* controls how the Weave Scope node agent pods are installed weave-scope-agent: enabled: true flags: [] # priorityClassName: # weave-scope-agent.dockerBridge: (required if agent.enabled == true) the name of the Docker bridge interface dockerBridge: "docker0" # weave-scope-agent.scopeFrontendAddr: the host:port of a Scope frontend to send data to # weave-scope-agent.scopeFrontendAddr is only needed for some cases where the frontend is deployed separately from the agent scopeFrontendAddr: "" # weave-scope-agent.probeToken: the token used to connect to Weave Cloud # weave-scope-agent.probeToken is not needed for connecting to non-cloud Scope frontends probeToken: "" # weave-scope-agent.rbac.*: controls RBAC resource creation/use # Enabling readOnly adds --probe.no-controls to args list. readOnly: false # weave-scope-agent.resources.*: controls requests/limits for the agent # weave-scope-agent.resources.* values are all optional but should not be set to empty values # resources: # requests: # weave-scope-agent.resources.requests.cpu: CPU req. in MHz (m) # cpu: "" # weave-scope-agent.resources.requests.memory: memory req. in MiB (Mi) # memory: "" # limits: # weave-scope-agent.resources.limits.cpu: CPU limit in MHz (m) # cpu: "" # weave-scope-agent.resources.limits.memory: memory limit in MiB (Mi) # memory: "" # weave-scope-agent.* controls how the Weave Scope node agent pods are installed weave-scope-cluster-agent: enabled: true flags: [] # weave-scope-cluster-agent.scopeFrontendAddr: the host:port of a Scope frontend to send data to # weave-scope-cluster-agent.scopeFrontendAddr is only needed for some cases where the frontend is deployed separately from the agent scopeFrontendAddr: "" # weave-scope-cluster-agent.probeToken: the token used to connect to Weave Cloud # weave-scope-cluster-agent.probeToken is not needed for connecting to non-cloud Scope frontends probeToken: "" # weave-scope-cluster-agent.rbac.*: controls RBAC resource creation/use rbac: # weave-scope-cluster-agent.rbac.create: whether RBAC resources should be created # weave-scope-cluster-agent.rbac.create *must* be set to false if RBAC is not enabled in the cluster # weave-scope-cluster-agent.rbac.create *may* be set to false in an RBAC-enabled cluster to allow for external management of RBAC create: true # Enabling readOnly adds --probe.no-controls to args list. readOnly: false serviceAccount: # Specifies whether a ServiceAccount should be created create: true # The name of the ServiceAccount to use. # If not set and create is true, a name is generated using the fullname template # name: "weave-scope" # weave-scope-cluster-agent.resources.*: controls requests/limits for the agent # weave-scope-cluster-agent.resources.* values are all optional but should not be set to empty values # resources: # requests: # weave-scope-cluster-agent.resources.requests.cpu: CPU req. in MHz (m) # cpu: "" # weave-scope-cluster-agent.resources.requests.memory: memory req. in MiB (Mi) # memory: "" # limits: # weave-scope-cluster-agent.resources.limits.cpu: CPU limit in MHz (m) # cpu: "" # weave-scope-cluster-agent.resources.limits.memory: memory limit in MiB (Mi) # memory: ""
eventrouter
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"eventrouter.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because som...
# Default values for eventrouter. image: repository: gcr.io/heptio-images/eventrouter tag: v0.3 pullPolicy: IfNotPresent resources: {} # limits: # cpu: 100m # memory: 128Mi # requests: # cpu: 100m # memory: 128Mi serviceAccount: # Specifies whether a ServiceAccount should be created create: true # The name of the ServiceAccount to use. # If not set and create is true, a name is generated using the fullname template name: rbac: # Specifies whether RBAC resources should be created create: true tolerations: [] nodeSelector: {} sink: glog podAnnotations: {} containerPorts: [] securityContext: {} # runAsUser: 1000 enablePrometheus: true
neo4j
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"neo4j.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kube...
# Default values for Neo4j. # This is a YAML-formatted file. # Declare name/value pairs to be passed into your templates. # name: value name: "neo4j" # Specs for the Neo4j docker image image: "neo4j" imageTag: "4.0.3-enterprise" imagePullPolicy: "IfNotPresent" # imagePullSecret: registry-secret acceptLicenseAgreement: "no" podDisruptionBudget: {} # minAvailable: 2 # maxUnavailable: 1 ## Node labels for pod assignment ## ref: https://kubernetes.io/docs/user-guide/node-selection/ nodeSelector: {} ## Tolerations for pod assignment ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ tolerations: [] ## Affinity for pod assignment ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity affinity: {} # Use password authentication authEnabled: true ## Specify password for neo4j user ## Defaults to a random 10-character alphanumeric string if not set and authEnabled is true # neo4jPassword: # Specify cluster domain (used eg. as suffix in definition of NEO4J_causal__clustering_initial__discovery__members environment variable) clusterDomain: "cluster.local" # Specs for the images used for running tests against the Helm package # https://github.com/mneedham/k8s-kubectl this is a general kubectl docker image testImage: "markhneedham/k8s-kubectl" testImageTag: "master" # Whether or not to use APOC: https://neo4j.com/labs/apoc/ # Comment out if you do not want to use it. useAPOC: "true" # The default name of the Neo4j database to use. # See https://neo4j.com/docs/operations-manual/current/manage-databases/introduction/#manage-databases-default defaultDatabase: "neo4j" # Cores core: # configMap: "my-custom-configmap" numberOfServers: 3 persistentVolume: ## whether or not persistence is enabled ## enabled: true ## core server data Persistent Volume mount root path ## mountPath: /data ## core server data Persistent Volume size ## size: 10Gi ## core server data Persistent Volume Storage Class ## If defined, storageClassName: <storageClass> ## If set to "-", storageClassName: "", which disables dynamic provisioning ## If undefined (the default) or set to null, no storageClassName spec is ## set, choosing the default provisioner. (gp2 on AWS, standard on ## GKE, AWS & OpenStack) ## storageClass: "-" ## Subdirectory of core server data Persistent Volume to mount ## Useful if the volume's root directory is not empty ## ## subPath: "" sidecarContainers: [] ## Additional containers to be added to the Neo4j core pod. # - name: my-sidecar # image: nginx:latest initContainers: [] ## init containers to run before the Neo4j core pod e.g. to install plugins ## They can also be used to restore from last available backup, to ensure that newly joining ## core members have less TX history to catch up on before joining the cluster. ## Note that this is specifically *not* needed for APOC, which is included by default. # - name: init-plugins # image: "appropriate/curl:latest" # imagePullPolicy: "IfNotPresent" # volumeMounts: # - name: plugins # mountPath: /plugins # command: # - "/bin/sh" # - "-c" # - | # curl -L https://somesite.com/path/to/plugin.jar -O # cp plugin.jar /plugins/ # Read Replicas readReplica: # configMap: "my-custom-configmap" resources: {} # limits: # cpu: 100m # memory: 512Mi # requests: # cpu: 100m # memory: 512Mi autoscaling: enabled: false targetAverageUtilization: 70 minReplicas: 1 maxReplicas: 3 numberOfServers: 0 sidecarContainers: [] ## Additional containers to be added to the Neo4j replica pod. # - name: my-sidecar # image: nginx:latest initContainers: [] ## init containers to run before the Neo4j replica pod e.g. to install custom plugins ## They can also be used to restore from last available backup, to ensure that newly joining ## core members have less TX history to catch up on before joining the cluster. ## Note that this is specifically *not* needed for APOC, which is included by default. # - name: init-plugins # image: "appropriate/curl:latest" # imagePullPolicy: "IfNotPresent" # volumeMounts: # - name: plugins # mountPath: /plugins # command: # - "/bin/sh" # - "-c" # - | # curl -L https://somesite.com/path/to/plugin.jar -O # cp plugin.jar /plugins/ resources: {} # limits: # cpu: 100m # memory: 512Mi # requests: # cpu: 100m # memory: 512Mi
metabase
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"metabase.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some K...
# Currently Metabase is not horizontly scalable. See # https://github.com/metabase/metabase/issues/1446 and # https://github.com/metabase/metabase/issues/2754 # NOTE: Should remain 1 replicaCount: 1 podAnnotations: {} podLabels: {} image: repository: metabase/metabase tag: v0.36.3 pullPolicy: IfNotPresent ## String to fully override metabase.fullname template ## # fullnameOverride: # Config Jetty web server listen: host: "0.0.0.0" port: 3000 ssl: # If you have an ssl certificate and would prefer to have Metabase run over HTTPS enabled: false # port: 8443 # keyStore: |- # << JKS KEY STORE >> # keyStorePassword: storepass jetty: # maxThreads: 254 # minThreads: 8 # maxQueued: -1 # maxIdleTime: 60000 # Backend database database: # Database type (h2 / mysql / postgres), default: h2 type: h2 # encryptionKey: << YOUR ENCRYPTION KEY >> ## Only need when you use mysql / postgres # host: # port: # dbname: # username: # password: ## Alternatively, use a connection URI for full configurability. Example for SSL enabled Postgres. # connectionURI: postgres://user:password@host:port/database?ssl=true&sslmode=require&sslfactory=org.postgresql.ssl.NonValidatingFactory" ## If a secret with the database credentials already exists, use the following values: # existingSecret: # existingSecretUsernameKey: # existingSecretPasswordKey: # existingSecretConnectionURIKey: password: # Changing Metabase password complexity: # weak: no character constraints # normal: at least 1 digit (default) # strong: minimum 8 characters w/ 2 lowercase, 2 uppercase, 1 digit, and 1 special character complexity: normal length: 6 timeZone: UTC emojiLogging: true # javaOpts: # pluginsDirectory: # siteUrl: session: {} # maxSessionAge: # sessionCookies: livenessProbe: initialDelaySeconds: 120 timeoutSeconds: 30 failureThreshold: 6 readinessProbe: initialDelaySeconds: 30 timeoutSeconds: 3 periodSeconds: 5 service: name: metabase type: ClusterIP externalPort: 80 internalPort: 3000 # Used to fix NodePort when service.type: NodePort. nodePort: annotations: {} # Used to add custom annotations to the Service. # service.beta.kubernetes.io/aws-load-balancer-internal: "0.0.0.0/0" ingress: enabled: false # Used to create Ingress record (should used with service.type: ClusterIP). hosts: # - metabase.domain.com # The ingress path. Useful to host metabase on a subpath, such as `/metabase`. path: / labels: # Used to add custom labels to the Ingress # Useful if for example you have multiple Ingress controllers and want your Ingress controllers to bind to specific Ingresses # traffic: internal annotations: {} # kubernetes.io/ingress.class: nginx # kubernetes.io/tls-acme: "true" tls: # Secrets must be manually created in the namespace. # - secretName: metabase-tls # hosts: # - metabase.domain.com # A custom log4j.properties file can be provided using a multiline YAML string. # See https://github.com/metabase/metabase/blob/master/resources/log4j.properties # # log4jProperties: resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. # limits: # cpu: 100m # memory: 128Mi # requests: # cpu: 100m # memory: 128Mi ## Node labels for pod assignment ## ref: https://kubernetes.io/docs/user-guide/node-selection/ # nodeSelector: {} ## Tolerations for pod assignment ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ ## tolerations: [] ## Affinity for pod assignment ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity ## affinity: {}
janusgraph
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"janusgraph.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some...
# Default values for JanusGraph chart. # This is a YAML-formatted file. # Declare variables to be passed into your templates. image: repository: gcr.io/cloud-solutions-images/janusgraph tag: v2 pullPolicy: IfNotPresent ## The default configuration provided here uses attached storage for db and indexing ## For a distributed deployment, increase the number of replicas and choose ## a distributed backend for storage and indexing below (i.e. hbase and elasticsearch) replicaCount: 1 ## set any pod specific resource requests here resources: {} extraEnvs: {} service: type: ClusterIP # Change to LoadBalancer if you plan to access JanusGraph outside k8s cluster port: 8182 serviceAnnotations: # the following line is ignored unless unless using a LoadBalancer with GCP # cloud.google.com/load-balancer-type: "Internal" ## This chart can deploy the Elasticsearch as a dependency. ## Use this section to provide elasticsearch chart specific values elasticsearch: deploy: false # change to true if you want to deploy Elasticsearch as a requirement along with this chart rbac: create: true # required for kubernetes >1.7 properties: ## use this section to add or adjust JanusGraph properties as needed ## all uncommented values in this section will be placed in the janusgraph.properties file ## see http://docs.janusgraph.org/0.2.0/storage-backends.html, choose the desired storage backend ## (i.e. berkeleyje, cassandra, cassandrathrift, cql, embeddedcassandra, hbase, inmemory ) ## for Cloud Bigtable choose hbase storage.backend: berkeleyje storage.directory: /db/berkeley ## Google Cloud Bigtable specific configuration ## To use Cloud Bigtable, uncomment the following three lines and replace values # storage.hbase.ext.google.bigtable.instance.id: <your-cbt-instance> # replace with your Cloud Bigtable Instance ID # storage.hbase.ext.google.bigtable.project.id: <your-cbt-project> # replace with your Cloud Bigtable Project ID # storage.hbase.ext.hbase.client.connection.impl: com.google.cloud.bigtable.hbase1_x.BigtableConnection # required for using Cloud Bigtable ## Indexing/Search backend configuration (see http://docs.janusgraph.org/latest/index-backends.html) index.search.backend: lucene index.search.directory: /db/searchindex ## choose the index backend you want to use: elasticsearch, es, solr or lucene (default "lucene") ## if you plan to use elasticsearch, change to "index.search.backend=elasticsearch" ## Elasticsearch configuration (see http://docs.janusgraph.org/latest/elasticsearch.html) ## This property is only relevant if you are using Elasticsearch as your index backend. # index.search.hostname: <your-es-hostname> ## Only set this if you plan to use an elasticsearch deployment created outside of this chart, ## If you plan to deploy Elasticsearch as a requirement with this helm chart, ## then leave this commented out or empty, it will be filled in automatically ## other common properties # cache.db-cache: true # cache.db-cache-clean-wait: 20 # cache.db-cache-time: 180000 # cache.db-cache-size: 0.5 ## when using local storage and indexing, choose whether to persist day persistence: enabled: true # set to false if you are testing and do not want to persist data path: /db accessMode: ReadWriteOnce size: 4Gi # adjust size as needed depending on the size of local storage and indexing required existingClaim: # to reattach to previously used storage, provide an existing claim (or use --set) ## To make adjustments to janusgraph.properties and gremlin-server.yaml, provide a ## custom ConfigMap in your k8s cluster (using the helm created ConfigMap as a base). configMapOverrideName: "" # nodeSelector: # beta.kubernetes.io/os: linux # beta.kubernetes.io/arch: amd64
drupal
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"drupal.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kub...
## Global Docker image parameters ## Please, note that this will override the image parameters, including dependencies, configured to use the global value ## Current available global Docker image parameters: imageRegistry and imagePullSecrets ## # global: # imageRegistry: myRegistryName # imagePullSecrets: # - myRegistryKeySecretName # storageClass: myStorageClass ## Bitnami Drupal image version ## ref: https://hub.docker.com/r/bitnami/drupal/tags/ ## image: registry: docker.io repository: bitnami/drupal tag: 8.8.3-debian-10-r1 ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images ## pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. ## Secrets must be manually created in the namespace. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ ## # pullSecrets: # - myRegistryKeySecretName ## String to partially override drupal.fullname template (will maintain the release name) ## # nameOverride: ## String to fully override drupal.fullname template ## # fullnameOverride: ## Installation Profile ## ref: https://github.com/bitnami/bitnami-docker-drupal#configuration ## drupalProfile: standard ## User of the application ## ref: https://github.com/bitnami/bitnami-docker-drupal#configuration ## drupalUsername: user ## Application password ## Defaults to a random 10-character alphanumeric string if not set ## ref: https://github.com/bitnami/bitnami-docker-drupal#configuration ## # drupalPassword: ## Admin email ## ref: https://github.com/bitnami/bitnami-docker-drupal#configuration ## drupalEmail: user@example.com ## Set to `yes` to allow the container to be started with blank passwords ## ref: https://github.com/bitnami/bitnami-docker-drupal#environment-variables allowEmptyPassword: "yes" ## External database configuration ## externalDatabase: ## Database host host: localhost ## Database host port: 3306 ## Database user user: bn_drupal ## Database password password: "" ## Database name database: bitnami_drupal ## ## MariaDB chart configuration ## ## https://github.com/helm/charts/blob/master/stable/mariadb/values.yaml ## mariadb: ## Whether to deploy a mariadb server to satisfy the applications database requirements. To use an external database set this to false and configure the externalDatabase parameters enabled: true ## Disable MariaDB replication replication: enabled: false ## Create a database and a database user ## ref: https://github.com/bitnami/bitnami-docker-mariadb/blob/master/README.md#creating-a-database-user-on-first-run ## db: name: bitnami_drupal user: bn_drupal ## If the password is not specified, mariadb will generates a random password ## # password: ## MariaDB admin password ## ref: https://github.com/bitnami/bitnami-docker-mariadb/blob/master/README.md#setting-the-root-password-on-first-run ## # rootUser: # password: ## Enable persistence using Persistent Volume Claims ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ ## master: persistence: enabled: true ## mariadb data Persistent Volume Storage Class ## If defined, storageClassName: <storageClass> ## If set to "-", storageClassName: "", which disables dynamic provisioning ## If undefined (the default) or set to null, no storageClassName spec is ## set, choosing the default provisioner. (gp2 on AWS, standard on ## GKE, AWS & OpenStack) ## # storageClass: "-" accessMode: ReadWriteOnce size: 8Gi ## Kubernetes configuration ## For minikube, set this to NodePort, elsewhere use LoadBalancer ## Use ClusterIP if your setup includes ingress controller ## service: type: LoadBalancer # HTTP Port port: 80 # HTTPS Port httpsPort: 443 ## ## nodePorts: ## http: <to set explicitly, choose port between 30000-32767> ## https: <to set explicitly, choose port between 30000-32767> nodePorts: http: "" https: "" ## Enable client source IP preservation ## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip ## externalTrafficPolicy: Cluster ## Configure the ingress resource that allows you to access the ## Drupal installation. Set up the URL ## ref: http://kubernetes.io/docs/user-guide/ingress/ ## ingress: ## Set to true to enable ingress record generation ## enabled: false ## Set this to true in order to add the corresponding annotations for cert-manager ## certManager: false ## When the ingress is enabled, a host pointing to this will be created ## hostname: drupal.local ## Ingress annotations done as key:value pairs ## For a full list of possible ingress annotations, please see ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md ## ## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set ## If certManager is set to true, annotation kubernetes.io/tls-acme: "true" will automatically be set ## annotations: {} # kubernetes.io/ingress.class: nginx ## The list of additional hostnames to be covered with this ingress record. ## Most likely the hostname above will be enough, but in the event more hosts are needed, this is an array ## hosts: ## - name: drupal.local ## path: / ## The tls configuration for the ingress ## see: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls ## tls: ## - hosts: ## - drupal.local ## secretName: drupal.local-tls secrets: ## If you're providing your own certificates, please use this to add the certificates as secrets ## key and certificate should start with -----BEGIN CERTIFICATE----- or ## -----BEGIN RSA PRIVATE KEY----- ## ## name should line up with a tlsSecret set further up ## If you're using cert-manager, this is unneeded, as it will create the secret for you if it is not set ## ## It is also possible to create and manage the certificates outside of this helm chart ## Please see README.md for more information # - name: drupal.local-tls # key: # certificate: ## Enable persistence using Persistent Volume Claims ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ ## persistence: enabled: true drupal: ## drupal data Persistent Volume Storage Class ## If defined, storageClassName: <storageClass> ## If set to "-", storageClassName: "", which disables dynamic provisioning ## If undefined (the default) or set to null, no storageClassName spec is ## set, choosing the default provisioner. (gp2 on AWS, standard on ## GKE, AWS & OpenStack) ## # storageClass: "-" accessMode: ReadWriteOnce size: 8Gi ## A manually managed Persistent Volume Claim ## Requires persistence.enabled: true ## If defined, PVC must be created manually before volume will be bound ## # existingClaim: ## If defined, the drupal-data volume will mount to the specified hostPath. ## Requires persistence.enabled: true ## Requires persistence.existingClaim: nil|false ## Default: nil. ## # hostPath: ## Configure resource requests and limits ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ ## resources: requests: memory: 512Mi cpu: 300m ## Configure volume mounts. This is useful for images whose data mount paths are ## different than the default. ## volumeMounts: drupal: mountPath: /bitnami/drupal ## Pass extra environment variables to the Drupal container. ## # extraVars: # - name: EXTRA_VAR_1 # value: extra-var-value-1 # - name: EXTRA_VAR_2 # value: extra-var-value-2 ## Configure liveness and readiness probes. ## Drupal core exposes /user/login to unauthenticated requests, making it a good ## default liveness and readiness path. However, that may not always be the ## case. For example, if the image value is overridden to an image containing a ## module that alters that route, or an image that does not auto-install Drupal. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ # livenessProbe: httpGet: path: /user/login port: http initialDelaySeconds: 120 readinessProbe: httpGet: path: /user/login port: http initialDelaySeconds: 30 ## Pod annotations ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ ## podAnnotations: {} ## Affinity for pod assignment ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity ## affinity: {} ## Prometheus Exporter / Metrics ## metrics: enabled: false image: registry: docker.io repository: bitnami/apache-exporter tag: 0.7.0-debian-10-r39 pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. ## Secrets must be manually created in the namespace. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ ## # pullSecrets: # - myRegistryKeySecretName ## Metrics exporter pod Annotation and Labels podAnnotations: prometheus.io/scrape: "true" prometheus.io/port: "9117" ## Metrics exporter resource requests and limits ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ ## # resources: {}
risk-advisor
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"risk-advisor.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because so...
# Default values for risk-advisor. # This is a YAML-formatted file. # Declare variables to be passed into your templates. replicaCount: 1 image: repository: pposkrobko/risk-advisor tag: v1.0.0 pullPolicy: IfNotPresent service: type: NodePort port: 9997 targetPort: 9997 nodePort: 31111 # resources: # limits: # cpu: 100m # memory: 128Mi # requests: # cpu: 100m # memory: 128Mi
nextcloud
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"nextcloud.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some ...
## Official nextcloud image version ## ref: https://hub.docker.com/r/library/nextcloud/tags/ ## image: repository: nextcloud tag: 17.0.0-apache pullPolicy: IfNotPresent # pullSecrets: # - myRegistrKeySecretName nameOverride: "" fullnameOverride: "" # Number of replicas to be deployed replicaCount: 1 ## Allowing use of ingress controllers ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/ ## ingress: enabled: false annotations: {} # nginx.ingress.kubernetes.io/proxy-body-size: 4G # kubernetes.io/tls-acme: "true" # certmanager.k8s.io/cluster-issuer: letsencrypt-prod # nginx.ingress.kubernetes.io/server-snippet: |- # server_tokens off; # proxy_hide_header X-Powered-By; # rewrite ^/.well-known/webfinger /public.php?service=webfinger last; # rewrite ^/.well-known/host-meta /public.php?service=host-meta last; # rewrite ^/.well-known/host-meta.json /public.php?service=host-meta-json; # location = /.well-known/carddav { # return 301 $scheme://$host/remote.php/dav; # } # location = /.well-known/caldav { # return 301 $scheme://$host/remote.php/dav; # } # location = /robots.txt { # allow all; # log_not_found off; # access_log off; # } # location ~ ^/(?:build|tests|config|lib|3rdparty|templates|data)/ { # deny all; # } # location ~ ^/(?:autotest|occ|issue|indie|db_|console) { # deny all; # } # tls: # - secretName: nextcloud-tls # hosts: # - nextcloud.kube.home labels: {} # Allow configuration of lifecycle hooks # ref: https://kubernetes.io/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/ lifecycle: {} # postStartCommand: [] # preStopCommand: [] nextcloud: host: nextcloud.kube.home username: admin password: changeme update: 0 datadir: /var/www/html/data tableprefix: persistence: subPath: mail: enabled: false fromAddress: user domain: domain.com smtp: host: domain.com secure: ssl port: 465 authtype: LOGIN name: user password: pass # PHP Configuration files # Will be injected in /usr/local/etc/php/conf.d phpConfigs: {} # Default config files # IMPORTANT: Will be used only if you put extra configs, otherwise default will come from nextcloud itself # Default confgurations can be found here: https://github.com/nextcloud/docker/tree/master/16.0/apache/config defaultConfigs: # To protect /var/www/html/config .htaccess: true # Redis default configuration redis.config.php: true # Apache configuration for rewrite urls apache-pretty-urls.config.php: true # Define APCu as local cache apcu.config.php: true # Apps directory configs apps.config.php: true # Used for auto configure database autoconfig.php: true # SMTP default configuration smtp.config.php: true # Extra config files created in /var/www/html/config/ # ref: https://docs.nextcloud.com/server/15/admin_manual/configuration_server/config_sample_php_parameters.html#multiple-config-php-file configs: {} # For example, to use S3 as primary storage # ref: https://docs.nextcloud.com/server/13/admin_manual/configuration_files/primary_storage.html#simple-storage-service-s3 # # configs: # s3.config.php: |- # <?php # $CONFIG = array ( # 'objectstore' => array( # 'class' => '\\OC\\Files\\ObjectStore\\S3', # 'arguments' => array( # 'bucket' => 'my-bucket', # 'autocreate' => true, # 'key' => 'xxx', # 'secret' => 'xxx', # 'region' => 'us-east-1', # 'use_ssl' => true # ) # ) # ); ## Strategy used to replace old pods ## IMPORTANT: use with care, it is suggested to leave as that for upgrade purposes ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy strategy: type: Recreate # type: RollingUpdate # rollingUpdate: # maxSurge: 1 # maxUnavailable: 0 ## ## Extra environment variables extraEnv: # - name: SOME_SECRET_ENV # valueFrom: # secretKeyRef: # name: nextcloud # key: secret_key # Extra mounts for the pods. Example shown is for connecting a legacy NFS volume # to NextCloud pods in Kubernetes. This can then be configured in External Storage extraVolumes: # - name: nfs # nfs: # server: "10.0.0.1" # path: "/nextcloud_data" # readOnly: false extraVolumeMounts: # - name: nfs # mountPath: "/legacy_data" nginx: ## You need to set an fpm version of the image for nextcloud if you want to use nginx! enabled: false image: repository: nginx tag: alpine pullPolicy: IfNotPresent config: # This generates the default nginx config as per the nextcloud documentation default: true # custom: |- # worker_processes 1;.. resources: {} internalDatabase: enabled: true name: nextcloud ## ## External database configuration ## externalDatabase: enabled: false ## Supported database engines: mysql or postgresql type: mysql ## Database host host: ## Database user user: nextcloud ## Database password password: ## Database name database: nextcloud ## Use a existing secret existingSecret: enabled: false # secretName: nameofsecret # usernameKey: username # passwordKey: password ## ## MariaDB chart configuration ## mariadb: ## Whether to deploy a mariadb server to satisfy the applications database requirements. To use an external database set this to false and configure the externalDatabase parameters enabled: false db: name: nextcloud user: nextcloud password: changeme ## Enable persistence using Persistent Volume Claims ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ ## persistence: enabled: false accessMode: ReadWriteOnce size: 8Gi redis: enabled: false usePassword: false ## Cronjob to execute Nextcloud background tasks ## ref: https://docs.nextcloud.com/server/latest/admin_manual/configuration_server/background_jobs_configuration.html#cron-jobs ## cronjob: enabled: false # Nexcloud image is used as default but only curl is needed image: {} # repository: nextcloud # tag: 16.0.3-apache # pullPolicy: IfNotPresent # pullSecrets: # - myRegistrKeySecretName # Every 15 minutes # Note: Setting this to any any other value than 15 minutes might # cause issues with how nextcloud background jobs are executed schedule: "*/15 * * * *" annotations: {} # Set curl's insecure option if you use e.g. self-signed certificates curlInsecure: false failedJobsHistoryLimit: 5 successfulJobsHistoryLimit: 2 # If not set, nextcloud deployment one will be set # resources: # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. # limits: # cpu: 100m # memory: 128Mi # requests: # cpu: 100m # memory: 128Mi # If not set, nextcloud deployment one will be set # nodeSelector: {} # If not set, nextcloud deployment one will be set # tolerations: [] # If not set, nextcloud deployment one will be set # affinity: {} service: type: ClusterIP port: 8080 loadBalancerIP: nil nodePort: nil ## Enable persistence using Persistent Volume Claims ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ ## persistence: # Nextcloud Data (/var/www/html) enabled: false annotations: {} ## nextcloud data Persistent Volume Storage Class ## If defined, storageClassName: <storageClass> ## If set to "-", storageClassName: "", which disables dynamic provisioning ## If undefined (the default) or set to null, no storageClassName spec is ## set, choosing the default provisioner. (gp2 on AWS, standard on ## GKE, AWS & OpenStack) ## # storageClass: "-" ## A manually managed Persistent Volume and Claim ## Requires persistence.enabled: true ## If defined, PVC must be created manually before volume will be bound # existingClaim: accessMode: ReadWriteOnce size: 8Gi resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. # limits: # cpu: 100m # memory: 128Mi # requests: # cpu: 100m # memory: 128Mi ## Liveness and readiness probe values ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes ## livenessProbe: enabled: true initialDelaySeconds: 30 periodSeconds: 15 timeoutSeconds: 5 failureThreshold: 3 successThreshold: 1 readinessProbe: enabled: true initialDelaySeconds: 30 periodSeconds: 15 timeoutSeconds: 5 failureThreshold: 3 successThreshold: 1 ## Enable pod autoscaling using HorizontalPodAutoscaler ## ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/ ## hpa: enabled: false cputhreshold: 60 minPods: 1 maxPods: 10 nodeSelector: {} tolerations: [] affinity: {} ## Prometheus Exporter / Metrics ## metrics: enabled: false replicaCount: 1 # The metrics exporter needs to know how you serve Nextcloud either http or https https: false timeout: 5s image: repository: xperimental/nextcloud-exporter tag: v0.3.0 pullPolicy: IfNotPresent ## Metrics exporter resource requests and limits ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ ## # resources: {} ## Metrics exporter pod Annotation and Labels # podAnnotations: {} # podLabels: {} service: type: ClusterIP ## Use serviceLoadBalancerIP to request a specific static IP, ## otherwise leave blank # loadBalancerIP: annotations: prometheus.io/scrape: "true" prometheus.io/port: "9205" labels: {}
tomcat
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"tomcat.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kub...
# Default values for the chart. # This is a YAML-formatted file. # Declare variables to be passed into your templates. replicaCount: 1 image: webarchive: repository: ananwaresystems/webarchive tag: "1.0" tomcat: repository: tomcat tag: "7.0" pullPolicy: IfNotPresent pullSecrets: [] deploy: directory: /usr/local/tomcat/webapps service: name: http type: LoadBalancer externalPort: 80 internalPort: 8080 hostPort: 8009 ingress: enabled: false annotations: {} # kubernetes.io/ingress.class: nginx # kubernetes.io/tls-acme: "true" path: / hosts: - chart-example.local tls: [] # - secretName: chart-example-tls # hosts: # - chart-example.local env: [] # - name: env # value: test extraVolumes: [] # - name: extra # emptyDir: {} extraVolumeMounts: [] # - name: extra # mountPath: /usr/local/tomcat/webapps/app # readOnly: true extraInitContainers: [] # - name: do-something # image: busybox # command: ['do', 'something'] readinessProbe: path: "/sample" initialDelaySeconds: 60 periodSeconds: 30 failureThreshold: 6 timeoutSeconds: 5 livenessProbe: path: "/sample" initialDelaySeconds: 60 periodSeconds: 30 failureThreshold: 6 timeoutSeconds: 5 resources: {} # limits: # cpu: 100m # memory: 256Mi # requests: # cpu: 100m # memory: 256Mi nodeSelector: {} tolerations: [] affinity: {}
selenium
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"selenium.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some K...
global: ## NodeSelector to be used in every deployment ## hub, chrome, firefox, chromeDebug and firefoxDebug ## can also be specified at chart level see below nodeSelector: # label: value ## Configure HostAliases hostAliases: [] affinity: # label: value tolerations: # label: value ## imagePullSecrets is the secret to use to pull the image from in case of a private flavour image ## to be used in every deployment hub, chrome, firefox, chromeDebug and firefoxDebug ## can also be specified at chart level see below. ## Specifying secret at a chart level will override the global option imagePullSecrets: hub: ## The repository and image ## ref: https://hub.docker.com/r/selenium/hub/ image: "selenium/hub" ## The tag for the image ## ref: https://hub.docker.com/r/selenium/hub/tags/ tag: "3.141.59" ## imagePullSecrets is the secret to use to pull the image from in case of a private flavour image # imagePullSecrets: "regcred" ## Specify an imagePullPolicy ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images pullPolicy: "IfNotPresent" ## The port which the hub listens on port: 4444 ## The port the service listens on servicePort: 4444 ## nodePort - The node port the service exposed on # nodePort: 30044 ## Timeout for probe Hub readiness via HTTP request on hub.probePath readinessTimeout: 1 ## Initial delay before performing the first readinessProbe readinessDelay: 15 ## Timeout for probe Hub liveness via HTTP request on hub.probePath livenessTimeout: 1 ## Path for checking readiness and liveness via HTTP Request probePath: "/wd/hub/status" # Configure security context on the hub pod # securityContext: # fsGroup: 1000 # runAsUser: 1000 ## Additional environment variables to set extraEnvs: [] # extraEnvs: # - name: FOO # valueFrom: # secretKeyRef: # key: FOO # name: secret-resource ## Set the JAVA_TOOL_OPTIONS environment variable ## If you find your selenium hub is OOMKilled, try adding -XX:+UseSerialGC javaOpts: "-Xmx400m" ## Set the SE_OPTS environment variable ## ref: http://www.seleniumhq.org/docs/07_selenium_grid.jsp#node-configuration # seOpts: ## Defining a JMX port will open the port on the container, however, it ## requires additional javaOpts, ie ## javaOpts: > ## -Dcom.sun.management.jmxremote.port=4000 ## -Dcom.sun.management.jmxremote.authenticate=false ## -Dcom.sun.management.jmxremote.ssl=false ## ref: http://openjdk.java.net/groups/jmx/ # jmxPort: 4000 ## Configure resource requests and limits ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ resources: limits: cpu: ".5" memory: "512Mi" ## Configure annotations on the hub pod podAnnotations: {} ## Configure labels on the hub pod podLabels: {} ## The type of service to create ## Values: ClusterIP, NodePort, LoadBalancer, or ExternalName ## ref: https://kubernetes.io/docs/user-guide/services/ serviceType: "LoadBalancer" ## The LoadBalancer IP Address ## ref: https://kubernetes.io/docs/user-guide/services/ ## serviceLoadBalancerIP: "40.121.183.52" loadBalancerSourceRanges: [] ## Control where client requests go, to the same pod or round-robin ## Values: ClientIP or None ## ref: https://kubernetes.io/docs/user-guide/services/ serviceSessionAffinity: "None" ## Define various attributes of the service # serviceAnnotations: # # internal AWS ELB # service.beta.kubernetes.io/aws-load-balancer-internal: "0.0.0.0/0" ## ref: https://github.com/SeleniumHQ/selenium/wiki/Grid2#configuring-the-nodes ## In milliseconds # gridNewSessionWaitTimeout: -1 # gridJettyMaxThreads: -1 ## In milliseconds # gridNodePolling: 5000 ## In milliseconds # gridCleanUpCycle: 5000 ## In seconds # gridTimeout: 30 ## In seconds # gridBrowserTimeout: 0 # gridMaxSession: 5 ## In milliseconds # gridUnregisterIfStillDownAfter: 30000 # timeZone: UTC ## NodeSelector to be used for the hub nodeSelector: # label: value ingress: enabled: false annotations: {} # kubernetes.io/ingress.class: nginx # kubernetes.io/tls-acme: "true" path: / hosts: - selenium-hub.local tls: [] # - secretName: selenium-hub-tls # hosts: # - selenium-hub.local chrome: ## Enable the creation of a node-chrome pod enabled: false ## DaemonSet instead of Deployment runAsDaemonSet: false ## The repository and image ## ref: https://hub.docker.com/r/selenium/node-chrome/ image: "selenium/node-chrome" ## The tag for the image ## ref: https://hub.docker.com/r/selenium/node-chrome/tags/ tag: "3.141.59" ## imagePullSecrets is the secret to use to pull the image from in case of a private flavour image # imagePullSecrets: "regcred" ## Specify an imagePullPolicy ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images pullPolicy: "IfNotPresent" ## The number of pods in the deployment. This is ignored if runAsDaemonSet is enabled. replicas: 1 ## When true will add a liveness check to the pod enableLivenessProbe: false ## When true will wait for current running sessions to finish before terminating the pod waitForRunningSessions: false ## Configure annotations on the chrome pods podAnnotations: {} ## Configure Labels on the chrome pods podLabels: {} # Configure security context on the chrome pods # securityContext: # fsGroup: 1000 # runAsUser: 1000 ## Additional environment variables to set extraEnvs: [] # extraEnvs: # - name: FOO # valueFrom: # secretKeyRef: # key: FOO # name: secret-resource ## Set the JAVA_TOOL_OPTIONS environment variable ## If you find your selenium node is OOMKilled, try adding -XX:+UseSerialGC javaOpts: "-Xmx900m" ## Set the SE_OPTS environment variable ## ref: http://www.seleniumhq.org/docs/07_selenium_grid.jsp#node-configuration # seOpts: ## Defining a JMX port will open the port on the container, however, it ## requires additional javaOpts, ie ## javaOpts: > ## -Dcom.sun.management.jmxremote.port=4000 ## -Dcom.sun.management.jmxremote.authenticate=false ## -Dcom.sun.management.jmxremote.ssl=false ## ref: http://openjdk.java.net/groups/jmx/ # jmxPort: 4000 ## User defined volumes ## ref: https://kubernetes.io/docs/user-guide/volumes/ volumes: ## https://github.com/kubernetes/kubernetes/pull/34928#issuecomment-277952723 ## http://stackoverflow.com/questions/39852716/chrome-driver-throwing-org-openqa-selenium-remote-sessionnotfoundexception-whe ## Chrome wants more than 64mb of shared memory. Docker/k8s default to 64mb. - name: dshm emptyDir: medium: Memory volumeMounts: - mountPath: /dev/shm name: dshm ## Configure resource requests and limits ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ resources: limits: cpu: ".5" memory: "1000Mi" ## Characteristics of the browser window # screenWidth: 1280 # screenHeight: 1024 # screenDepth: 24 # display: :10 ## Selenium node options # chromeVersion: # nodeMaxInstances: 1 # nodeMaxSession: 1 ## In milliseconds # nodeRegisterCycle: 5000 # nodePort: 5555 # timeZone: UTC ## NodeSelector to be used for chrome nodeSelector: # label: value chromeDebug: ## Enable the creation of a node-chrome-debug pod enabled: false ## DaemonSet instead of Deployment runAsDaemonSet: false ## The repository and image ## ref: https://hub.docker.com/r/selenium/node-chrome-debug/ image: "selenium/node-chrome-debug" ## The tag for the image ## ref: https://hub.docker.com/r/selenium/node-chrome-debug/tags/ tag: "3.141.59" ## imagePullSecrets is the secret to use to pull the image from in case of a private flavour image # imagePullSecrets: "regcred" ## Specify an imagePullPolicy ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images pullPolicy: "IfNotPresent" ## The number of pods in the deployment. This is ignored if runAsDaemonSet is enabled. replicas: 1 ## When true will add a liveness check to the pod enableLivenessProbe: false ## When true will wait for current running sessions to finish before terminating the pod waitForRunningSessions: false ## Configure annotations on the chrome debug pods podAnnotations: {} ## Configure labels on the chrome debug pods podLabels: {} # Configure security context on the chrome debug pods # securityContext: # fsGroup: 1000 # runAsUser: 1000 ## Additional environment variables to set extraEnvs: [] # extraEnvs: # - name: FOO # valueFrom: # secretKeyRef: # key: FOO # name: secret-resource ## Set the JAVA_TOOL_OPTIONS environment variable ## If you find your selenium hub is OOMKilled, try adding -XX:+UseSerialGC javaOpts: "-Xmx900m" ## Set the SE_OPTS environment variable ## ref: http://www.seleniumhq.org/docs/07_selenium_grid.jsp#node-configuration # seOpts: ## Defining a JMX port will open the port on the container, however, it ## requires additional javaOpts, ie ## javaOpts: > ## -Dcom.sun.management.jmxremote.port=4000 ## -Dcom.sun.management.jmxremote.authenticate=false ## -Dcom.sun.management.jmxremote.ssl=false ## ref: http://openjdk.java.net/groups/jmx/ # jmxPort: 4000 ## User defined volumes ## ref: https://kubernetes.io/docs/user-guide/volumes/ volumes: ## https://github.com/kubernetes/kubernetes/pull/34928#issuecomment-277952723 ## http://stackoverflow.com/questions/39852716/chrome-driver-throwing-org-openqa-selenium-remote-sessionnotfoundexception-whe ## Chrome wants more than 64mb of shared memory. Docker/k8s default to 64mb. - name: dshm emptyDir: medium: Memory volumeMounts: - mountPath: /dev/shm name: dshm ## Configure resource requests and limits ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ resources: limits: cpu: ".5" memory: "1500Mi" ## Characteristics of the browser window # screenWidth: 1280 # screenHeight: 1024 # screenDepth: 24 # display: :10 ## Selenium node options # chromeVersion: # nodeMaxInstances: 1 # nodeMaxSession: 1 ## In milliseconds # nodeRegisterCycle: 5000 # nodePort: 5555 # timeZone: UTC ## NodeSelector to be used for chromeDebug nodeSelector: # label: value firefox: ## Enable the creation of a node-firefox pod enabled: false ## DaemonSet instead of Deployment runAsDaemonSet: false ## The repository and image ## ref: https://hub.docker.com/r/selenium/node-firefox/ image: "selenium/node-firefox" ## The tag for the image ## ref: https://hub.docker.com/r/selenium/node-firefox/tags/ tag: "3.141.59" ## imagePullSecrets is the secret to use to pull the image from in case of a private flavour image # imagePullSecrets: "regcred" ## Specify an imagePullPolicy ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images pullPolicy: "IfNotPresent" ## The number of pods in the deployment. This is ignored if runAsDaemonSet is enabled. replicas: 1 ## When true will add a liveness check to the pod enableLivenessProbe: false ## When true will wait for current running sessions to finish before terminating the pod waitForRunningSessions: false ## Configure annotations on the firefox pods podAnnotations: {} ## Configure labels on the firefox pods podLabels: {} # Configure security context on the firefox pods # securityContext: # fsGroup: 1000 # runAsUser: 1000 ## Additional environment variables to set extraEnvs: [] # extraEnvs: # - name: FOO # valueFrom: # secretKeyRef: # key: FOO # name: secret-resource ## Set the JAVA_TOOL_OPTIONS environment variable ## If you find your selenium hub is OOMKilled, try adding -XX:+UseSerialGC javaOpts: "-Xmx900m" ## Set the SE_OPTS environment variable ## ref: http://www.seleniumhq.org/docs/07_selenium_grid.jsp#node-configuration # seOpts: ## Defining a JMX port will open the port on the container, however, it ## requires additional javaOpts, ie ## javaOpts: > ## -Dcom.sun.management.jmxremote.port=4000 ## -Dcom.sun.management.jmxremote.authenticate=false ## -Dcom.sun.management.jmxremote.ssl=false ## ref: http://openjdk.java.net/groups/jmx/ # jmxPort: 4000 volumes: ## https://docs.openshift.com/container-platform/3.6/dev_guide/shared_memory.html - name: dshm emptyDir: medium: Memory volumeMounts: - mountPath: /dev/shm name: dshm ## Configure resource requests and limits ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ resources: limits: cpu: ".5" memory: "1000Mi" ## Characteristics of the browser window # screenWidth: 1280 # screenHeight: 1024 # screenDepth: 24 # display: :10 ## Selenium node options # firefoxVersion: # nodeMaxInstances: 1 # nodeMaxSession: 1 ## In milliseconds # nodeRegisterCycle: 5000 # nodePort: 5555 # timeZone: UTC ## NodeSelector to be used for firefox nodeSelector: # label: value firefoxDebug: ## Enable the creation of a node-firefox-debug pod enabled: false ## DaemonSet instead of Deployment runAsDaemonSet: false ## The repository and image ## ref: https://hub.docker.com/r/selenium/node-firefox-debug/ image: "selenium/node-firefox-debug" ## The tag for the image ## ref: https://hub.docker.com/r/selenium/node-firefox-debug/tags/ tag: "3.141.59" ## imagePullSecrets is the secret to use to pull the image from in case of a private flavour image # imagePullSecrets: "regcred" ## Specify an imagePullPolicy ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images pullPolicy: "IfNotPresent" ## The number of pods in the deployment. This is ignored if runAsDaemonSet is enabled. replicas: 1 ## When true will add a liveness check to the pod enableLivenessProbe: false ## When true will wait for current running sessions to finish before terminating the pod waitForRunningSessions: false ## Configure annotations on the firefox debug pods podAnnotations: {} ## Configure labels on the firefox debug pods podLabels: {} # Configure security context on the firefox debug pods # securityContext: # fsGroup: 1000 # runAsUser: 1000 ## Additional environment variables to set extraEnvs: [] # extraEnvs: # - name: FOO # valueFrom: # secretKeyRef: # key: FOO # name: secret-resource ## Set the JAVA_TOOL_OPTIONS environment variable ## If you find your selenium hub is OOMKilled, try adding -XX:+UseSerialGC javaOpts: "-Xmx900m" ## Set the SE_OPTS environment variable ## ref: http://www.seleniumhq.org/docs/07_selenium_grid.jsp#node-configuration # seOpts: ## Defining a JMX port will open the port on the container, however, it ## requires additional javaOpts, ie ## javaOpts: > ## -Dcom.sun.management.jmxremote.port=4000 ## -Dcom.sun.management.jmxremote.authenticate=false ## -Dcom.sun.management.jmxremote.ssl=false ## ref: http://openjdk.java.net/groups/jmx/ # jmxPort: 4000 volumes: ## https://docs.openshift.com/container-platform/3.6/dev_guide/shared_memory.html - name: dshm emptyDir: medium: Memory volumeMounts: - mountPath: /dev/shm name: dshm ## Configure resource requests and limits ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ resources: limits: cpu: ".5" memory: "1500Mi" ## Characteristics of the browser window # screenWidth: 1280 # screenHeight: 1024 # screenDepth: 24 # display: :10 ## Selenium node options # firefoxVersion: # nodeMaxInstances: 1 # nodeMaxSession: 1 ## In milliseconds # nodeRegisterCycle: 5000 # nodePort: 5555 # timeZone: UTC ## NodeSelector to be used for firefoxDebug nodeSelector: # label: value
hazelcast-jet
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"hazelcast-jet.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because s...
## Hazelcast Jet image version ## ref: https://hub.docker.com/r/hazelcast/hazelcast-jet/tags/ ## image: # repository is the Hazelcast Jet image name repository: "hazelcast/hazelcast-jet" # tag is the Hazelcast Jet image tag tag: "4.1" # pullPolicy is the Docker image pull policy # It's recommended to change this to 'Always' if the image tag is 'latest' # ref: http://kubernetes.io/docs/user-guide/images/#updating-images # pullPolicy: IfNotPresent # pullSecrets is an array of docker-registry secret names # Secrets must be manually created in the namespace. # ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ # pullSecrets: # - myRegistryKeySecretName # Cluster settings cluster: # memberCount is the number Hazelcast Jet members memberCount: 2 # Hazelcast Jet properties jet: # javaOpts are additional JAVA_OPTS properties for Hazelcast Jet member javaOpts: # loggingLevel is the level of Hazelcast logs (SEVERE, WARNING, INFO, # CONFIG, FINE, FINER, and FINEST) Note that changing this value # requires setting securityContext.runAsUser to 0. # loggingLevel: # Jet and Hazelcast IMDG YAML configuration files yaml: hazelcast: cluster-name: jet network: join: multicast: enabled: false kubernetes: enabled: true service-name: ${serviceName} namespace: ${namespace} resolve-not-ready-addresses: true rest-api: enabled: true endpoint-groups: HEALTH_CHECK: enabled: true management-center: enabled: ${hazelcast.mancenter.enabled} url: ${hazelcast.mancenter.url} hazelcast-jet: instance: # period between flow control packets in milliseconds flow-control-period: 100 # number of backup copies to configure for Hazelcast IMaps used internally in a Jet job backup-count: 1 # the delay after which auto-scaled jobs will restart if a new member is added to the # cluster. The default is 10 seconds. Has no effect on jobs with auto scaling disabled scale-up-delay-millis: 10000 # Sets whether lossless job restart is enabled for the node. With # lossless restart you can restart the whole cluster without losing the # jobs and their state. The feature is implemented on top of the Hot # Restart feature of Hazelcast IMDG which persists the data to disk. lossless-restart-enabled: false edge-defaults: # capacity of the concurrent SPSC queue between each two processors queue-size: 1024 # network packet size limit in bytes, only applies to distributed edges packet-size-limit: 16384 # receive window size multiplier, only applies to distributed edges receive-window-multiplier: 3 metrics: # whether metrics collection is enabled enabled: true # whether jmx mbean metrics collection is enabled jmx-enabled: true # the number of seconds the metrics will be retained on the instance retention-seconds: 120 # the metrics collection interval in seconds collection-interval-seconds: 5 # whether metrics should be collected for data structures. Metrics # collection can have some overhead if there is a large number of data # structures metrics-for-data-structures: false # configurationFiles are any additional Hazelcast Jet configuration files # configurationFiles: # affinity specifies the affinity/anti-affinity of different pods. The commented out # example below shows how you could ensure your hazelcast jet pods are scheduled on # different Kubernetes nodes # affinity: # podAntiAffinity: # requiredDuringSchedulingIgnoredDuringExecution: # - labelSelector: # matchExpressions: # - key: app.kubernetes.io/name # operator: In # values: # - hazelcast-jet # - key: role # operator: In # values: # - hazelcast-jet # topologyKey: kubernetes.io/hostname # tolerations enable Hazelcast Jet PODs to be able to run on nodes with taints # tolerations: # nodeSelector is an array of Hazelcast Node labels for POD assignments # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector nodeSelector: {} # hostPort is a port under which Hazelcast Jet PODs are exposed on the host machines # hostPort: gracefulShutdown: enabled: true maxWaitSeconds: 600 # Hazelcast Liveness probe livenessProbe: # enabled is a flag to used to enable liveness probe enabled: true # initialDelaySeconds is a delay before liveness probe is initiated initialDelaySeconds: 30 # periodSeconds decides how often to perform the probe periodSeconds: 10 # timeoutSeconds decides when the probe times out timeoutSeconds: 5 # successThreshold is the minimum consecutive successes for the probe to be considered successful after having failed successThreshold: 1 # failureThreshold is the minimum consecutive failures for the probe to be considered failed after having succeeded failureThreshold: 3 # url path that will be called to check liveness path: /hazelcast/health/node-state # port that will be used in liveness probe calls # port: # HTTPS or HTTP scheme scheme: HTTP # Hazelcast Readiness probe readinessProbe: # enabled is a flag to used to enable readiness probe enabled: true # initialDelaySeconds is a delay before readiness probe is initiated initialDelaySeconds: 30 # periodSeconds decides how often to perform the probe periodSeconds: 10 # timeoutSeconds decides when the probe times out timeoutSeconds: 1 # successThreshold is the minimum consecutive successes for the probe to be considered successful after having failed successThreshold: 1 # failureThreshold is the minimum consecutive failures for the probe to be considered failed after having succeeded failureThreshold: 3 # url path that will be called to check readiness path: /hazelcast/health/ready # port that will be used in readiness probe calls # port: # HTTPS or HTTP scheme scheme: HTTP # Configure resource requests and limits # ref: http://kubernetes.io/docs/user-guide/compute-resources/ # # resources: # requests: # memory: 256Mi # cpu: 100m # Hazelcast Service properties service: # type defines the Kubernetes service type ('ClusterIP', 'LoadBalancer', or 'NodePort') type: ClusterIP # port is the Kubernetes service port port: 5701 # clusterIP set to None makes the service headless # It is required if DNS Lookup is used (https://github.com/hazelcast/hazelcast-kubernetes#dns-lookup) clusterIP: "None" # Role-based Access Control rbac: # Specifies whether RBAC resources should be created # It is not required if DNS Lookup is used (https://github.com/hazelcast/hazelcast-kubernetes#dns-lookup) create: true serviceAccount: # Specifies whether a ServiceAccount should be created create: true # The name of the ServiceAccount to use. # If not set and create is true, a name is generated using the fullname template name: # Security Context properties securityContext: # enabled is a flag to enable Security Context enabled: true # runAsUser is the user ID used to run the container runAsUser: 65534 # runAsGroup is the primary group ID used to run all processes within any container of the pod runAsGroup: 65534 # fsGroup is the group ID associated with the container fsGroup: 65534 # readOnlyRootFilesystem is a flag to enable readOnlyRootFilesystem for the Hazelcast security context readOnlyRootFilesystem: true # Allows to enable a Prometheus to scrape pods metrics: enabled: false service: type: ClusterIP port: 8080 annotations: prometheus.io/scrape: "true" prometheus.io/path: "/metrics" prometheus.io/port: "8080" # customVolume is the configuration for a volume will be mounted as '/data/custom/' (e.g. to mount a volume with custom JARs) # customVolume: # Hazelcast Jet Management Center application properties managementcenter: # enabled is a flag to enable Hazelcast Jet Management Center application enabled: true ## Hazelcast Jet Management Center image version ## ref: https://hub.docker.com/r/hazelcast/hazelcast-jet-management-center/tags/ ## image: # repository is the Hazelcast Jet Management Center image name repository: "hazelcast/hazelcast-jet-management-center" # tag is the Hazelcast Jet Management Center image tag tag: "4.1" # pullPolicy is the Docker image pull policy # It's recommended to change this to 'Always' if the image tag is 'latest' # ref: http://kubernetes.io/docs/user-guide/images/#updating-images # pullPolicy: IfNotPresent # pullSecrets is an array of docker-registry secret names # Secrets must be manually created in the namespace. # ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ # pullSecrets: # - myRegistryKeySecretName # javaOpts are additional JAVA_OPTS properties for Hazelcast Jet Management Center javaOpts: # licenseKey is the license key for Hazelcast Jet Management Center # if not provided, it can be filled in the Management Center web interface licenseKey: # licenseKeySecretName is the name of the secret where the Hazelcast Jet Management Center License Key is stored (can be used instead of licenseKey) # licenseKeySecretName: # affinity specifies the Hazelcast Jet Management Center affinity/anti-affinity of different pods # affinity: # tolerations enable Hazelcast Jet Management Center POD to be able to run on nodes with taints # tolerations: # nodeSelector is an array of Hazelcast Jet Management Center Node labels for POD assignments # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector nodeSelector: {} # Jet Client configuration YAML file which will be used by Hazelcast Jet Management Center yaml: hazelcast-client: cluster-name: jet network: kubernetes: enabled: true namespace: ${namespace} service-name: ${serviceName} resolve-not-ready-addresses: true # configurationFiles are any additional Hazelcast Jet Client configuration files # configurationFiles: # Configure resource requests and limits # ref: http://kubernetes.io/docs/user-guide/compute-resources/ # # resources: # requests: # memory: 256Mi # cpu: 100m # Hazelcast Jet Management Center Service properties service: # type defines the Kubernetes service type ('ClusterIP', 'LoadBalancer', or 'NodePort') type: LoadBalancer # port is the Kubernetes service port port: 8081 # service https port httpsPort: 443 # service annotations for management center annotations: {} # Hazelcast Jet Management Center Liveness probe livenessProbe: # enabled is a flag to used to enable liveness probe enabled: true # initialDelaySeconds is a delay before liveness probe is initiated initialDelaySeconds: 30 # periodSeconds decides how often to perform the probe periodSeconds: 10 # timeoutSeconds decides when the probe times out timeoutSeconds: 5 # successThreshold is the minimum consecutive successes for the probe to be considered successful after having failed successThreshold: 1 # failureThreshold is the minimum consecutive failures for the probe to be considered failed after having succeeded failureThreshold: 3 # Hazelcast Jet Management Center Readiness probe readinessProbe: # enabled is a flag to used to enable readiness probe enabled: true # initialDelaySeconds is a delay before readiness probe is initiated initialDelaySeconds: 30 # periodSeconds decides how often to perform the probe periodSeconds: 10 # timeoutSeconds decides when the probe times out timeoutSeconds: 1 # successThreshold is the minimum consecutive successes for the probe to be considered successful after having failed successThreshold: 1 # failureThreshold is the minimum consecutive failures for the probe to be considered failed after having succeeded failureThreshold: 3
prestashop
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"prestashop.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some...
## Global Docker image parameters ## Please, note that this will override the image parameters, including dependencies, configured to use the global value ## Current available global Docker image parameters: imageRegistry and imagePullSecrets ## # global: # imageRegistry: myRegistryName # imagePullSecrets: # - myRegistryKeySecretName # storageClass: myStorageClass ## Bitnami PrestaShop image version ## ref: https://hub.docker.com/r/bitnami/prestashop/tags/ ## image: registry: docker.io repository: bitnami/prestashop tag: 1.7.6-4-debian-10-r0 ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images ## pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. ## Secrets must be manually created in the namespace. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ ## # pullSecrets: # - myRegistryKeySecretName ## String to partially override prestashop.fullname template (will maintain the release name) ## # nameOverride: ## String to fully override prestashop.fullname template ## # fullnameOverride: ## PrestaShop host to create application URLs ## ref: https://github.com/bitnami/bitnami-docker-prestashop#configuration ## # prestashopHost: ## User of the application ## ref: https://github.com/bitnami/bitnami-docker-prestashop#configuration ## prestashopUsername: user@example.com ## Application password ## Defaults to a random 10-character alphanumeric string if not set ## ref: https://github.com/bitnami/bitnami-docker-prestashop#configuration ## # prestashopPassword: ## Admin email ## ref: https://github.com/bitnami/bitnami-docker-prestashop#configuration ## prestashopEmail: user@example.com ## First Name ## ref: https://github.com/bitnami/bitnami-docker-prestashop#configuration ## prestashopFirstName: Bitnami ## Last Name ## ref: https://github.com/bitnami/bitnami-docker-prestashop#configuration ## prestashopLastName: User ## Cookie Check IP ## ref: https://github.com/bitnami/bitnami-docker-prestashop#configuration ## prestashopCookieCheckIP: "no" ## Country ## ref: https://github.com/bitnami/bitnami-docker-prestashop#configuration ## prestashopCountry: "us" ## Language ## ref: https://github.com/bitnami/bitnami-docker-prestashop#configuration ## prestashopLanguage: "en" ## Set to `yes` to allow the container to be started with blank passwords ## ref: https://github.com/bitnami/bitnami-docker-prestashop#environment-variables allowEmptyPassword: "yes" ## ## External database configuration ## externalDatabase: ## Database host host: ## Database host port: 3306 ## Database user user: bn_jasperreports ## Database password password: ## Database name database: bitnami_prestashop ## SMTP mail delivery configuration ## ref: https://github.com/bitnami/bitnami-docker-prestashop/#smtp-configuration ## # smtpHost: # smtpPort: # smtpUser: # smtpPassword: # smtpProtocol: ## ## MariaDB chart configuration ## ## https://github.com/helm/charts/blob/master/stable/mariadb/values.yaml ## mariadb: ## Whether to deploy a mariadb server to satisfy the applications database requirements. To use an external database set this to false and configure the externalDatabase parameters enabled: true ## Tag for the Bitnami MariaDB image to use ## ref: https://github.com/bitnami/bitnami-docker-mariadb image: registry: docker.io repository: bitnami/mariadb tag: 10.1.44-debian-10-r32 ## Disable MariaDB replication replication: enabled: false ## Create a database and a database user ## ref: https://github.com/bitnami/bitnami-docker-mariadb/blob/master/README.md#creating-a-database-user-on-first-run ## db: name: bitnami_prestashop user: bn_prestashop ## If the password is not specified, mariadb will generates a random password ## # password: ## MariaDB admin password ## ref: https://github.com/bitnami/bitnami-docker-mariadb/blob/master/README.md#setting-the-root-password-on-first-run ## # rootUser: # password: ## Enable persistence using Persistent Volume Claims ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ ## master: persistence: enabled: true ## mariadb data Persistent Volume Storage Class ## If defined, storageClassName: <storageClass> ## If set to "-", storageClassName: "", which disables dynamic provisioning ## If undefined (the default) or set to null, no storageClassName spec is ## set, choosing the default provisioner. (gp2 on AWS, standard on ## GKE, AWS & OpenStack) ## # storageClass: "-" accessMode: ReadWriteOnce size: 8Gi ## Kubernetes configuration ## For minikube, set this to NodePort, elsewhere use LoadBalancer ## service: type: LoadBalancer # HTTP Port port: 80 # HTTPS Port httpsPort: 443 ## loadBalancerIP for the PrestaShop Service (optional, cloud specific) ## ref: http://kubernetes.io/docs/user-guide/services/#type-loadbalancer ## loadBalancerIP ## ## nodePorts: ## http: <to set explicitly, choose port between 30000-32767> ## https: <to set explicitly, choose port between 30000-32767> nodePorts: http: "" https: "" ## Enable client source IP preservation ## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip ## externalTrafficPolicy: Local ## Configure the ingress resource that allows you to access the ## PrestaShop installation. Set up the URL ## ref: http://kubernetes.io/docs/user-guide/ingress/ ## ingress: ## Set to true to enable ingress record generation enabled: false ## Set this to true in order to add the corresponding annotations for cert-manager certManager: false ## Ingress annotations done as key:value pairs ## For a full list of possible ingress annotations, please see ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md ## ## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set ## If certManager is set to true, annotation kubernetes.io/tls-acme: "true" will automatically be set annotations: # kubernetes.io/ingress.class: nginx ## The list of hostnames to be covered with this ingress record. ## Most likely this will be just one host, but in the event more hosts are needed, this is an array hosts: - name: prestashop.local path: / # Set this to true in order to enable TLS on the ingress record tls: false ## If TLS is set to true, you must declare what secret will store the key/certificate for TLS tlsSecret: prestashop.local-tls secrets: ## If you're providing your own certificates, please use this to add the certificates as secrets ## key and certificate should start with -----BEGIN CERTIFICATE----- or ## -----BEGIN RSA PRIVATE KEY----- ## ## name should line up with a tlsSecret set further up ## If you're using cert-manager, this is unneeded, as it will create the secret for you if it is not set ## ## It is also possible to create and manage the certificates outside of this helm chart ## Please see README.md for more information # - name: prestashop.local-tls # key: # certificate: ## Control where client requests go, to the same pod or round-robin ## Values: ClientIP or None ## ref: https://kubernetes.io/docs/user-guide/services/ sessionAffinity: "None" ## Enable persistence using Persistent Volume Claims ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ ## persistence: enabled: true ## Prestashop Data Persistent Volume Storage Class ## If defined, storageClassName: <storageClass> ## If set to "-", storageClassName: "", which disables dynamic provisioning ## If undefined (the default) or set to null, no storageClassName spec is ## set, choosing the default provisioner. (gp2 on AWS, standard on ## GKE, AWS & OpenStack) ## # storageClass: "-" ## A manually managed Persistent Volume and Claim ## Requires persistence.enabled: true ## If defined, PVC must be created manually before volume will be bound # existingClaim: accessMode: ReadWriteOnce size: 8Gi ## Configure resource requests and limits ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ ## resources: requests: memory: 512Mi cpu: 300m ## Configure extra options for liveness and readiness probes ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) livenessProbe: enabled: true initialDelaySeconds: 600 periodSeconds: 10 timeoutSeconds: 5 failureThreshold: 6 successThreshold: 1 readinessProbe: enabled: true initialDelaySeconds: 30 periodSeconds: 5 timeoutSeconds: 3 failureThreshold: 6 successThreshold: 1 ## Pod annotations ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ ## podAnnotations: {} ## Affinity for pod assignment ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity ## affinity: {} ## Prometheus Exporter / Metrics ## metrics: enabled: false image: registry: docker.io repository: bitnami/apache-exporter tag: 0.7.0-debian-10-r38 pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. ## Secrets must be manually created in the namespace. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ ## # pullSecrets: # - myRegistryKeySecretName ## Metrics exporter pod Annotation and Labels podAnnotations: prometheus.io/scrape: "true" prometheus.io/port: "9117" ## Metrics exporter resource requests and limits ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ ## # resources: {}
kiam
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"kiam.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kuber...
extraArgs: {} agent: ## If false, agent will not be installed ## enabled: true ## agent container name ## name: agent image: repository: quay.io/uswitch/kiam tag: v3.3 pullPolicy: IfNotPresent ## agent whitelist of proxy routes matching this reg-ex ## # whiteListRouteRegexp: ## Logging settings ## log: jsonOutput: true level: info ## Host networking settings ## host: iptables: false port: 8181 interface: cali+ ## Prometheus metrics ## prometheus: scrape: true port: 9620 syncInterval: 5s ## Annotations to be added to pods ## podAnnotations: {} ## Labels to be added to pods ## podLabels: {} ## Annotations to be added to service ## serviceAnnotations: {} ## Labels to be added to service ## serviceLabels: {} ## Used to assign priority to agent pods ## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ ## priorityClassName: "" ## Strategy for DaemonSet updates (requires Kubernetes 1.6+) ## Ref: https://kubernetes.io/docs/tasks/manage-daemon/update-daemon-set/ ## updateStrategy: OnDelete ## Pod DNS policy ## Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pods-dns-policy ## dnsPolicy: ClusterFirstWithHostNet ## Node labels for pod assignment ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ ## nodeSelector: {} # kubernetes.io/role: node ## Pod tolerations ## Ref https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ ## tolerations: [] ## Agent container resources ## Ref https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ ## # Ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#affinity-v1-core # Expects input structure as per specification for example: # affinity: # nodeAffinity: # requiredDuringSchedulingIgnoredDuringExecution: # nodeSelectorTerms: # - matchExpressions: # - key: foo.bar.com/role # operator: In # values: # - master affinity: {} resources: {} ## Additional container arguments ## extraArgs: {} ## Additional container environment variables ## extraEnv: {} ## Additional container hostPath mounts ## extraHostPathMounts: [] # - name: ssl-certs # mountPath: /etc/ssl/certs ## Running on Amazon Linux or RHEL distros: # hostPath: /etc/pki/ca-trust/extracted/pem ## else: # hostPath: /usr/share/ca-certificates # readOnly: true ## Timeout when creating the kiam gateway ## gatewayTimeoutCreation: 50ms ## Base64-encoded PEM values for agent's CA certificate(s), certificate and private key ## tlsFiles: ca: cert: key: ## Secret name of agent's TLS certificates ## tlsSecret: ## Agent TLS Certificate filenames tlsCerts: certFileName: cert keyFileName: key caFileName: ca server: ## If false, server will not be installed ## enabled: true ## server container name ## name: server image: repository: quay.io/uswitch/kiam tag: v3.3 pullPolicy: IfNotPresent ## Logging settings ## log: jsonOutput: true level: info ## Prometheus metrics ## prometheus: scrape: true port: 9620 syncInterval: 5s ## Annotations to be added to pods ## podAnnotations: {} ## Labels to be added to pods ## podLabels: {} ## Annotations to be added to service ## serviceAnnotations: {} ## Labels to be added to service ## serviceLabels: {} ## Used to assign priority to server pods ## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ ## priorityClassName: "" ## Strategy for DaemonSet updates (requires Kubernetes 1.6+) ## Ref: https://kubernetes.io/docs/tasks/manage-daemon/update-daemon-set/ ## updateStrategy: OnDelete # Ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#affinity-v1-core # Expects input structure as per specification for example: # affinity: # nodeAffinity: # requiredDuringSchedulingIgnoredDuringExecution: # nodeSelectorTerms: # - matchExpressions: # - key: foo.bar.com/role # operator: In # values: # - master affinity: {} ## Node labels for pod assignment ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ ## nodeSelector: {} # kubernetes.io/role: master ## Pod tolerations ## Ref https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ ## tolerations: [] ## Server container resources ## Ref https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ ## resources: {} ## Additional container arguments ## extraArgs: {} ## Additional container environment variables ## extraEnv: {} ## Additional container hostPath mounts ## extraHostPathMounts: [] # - name: ssl-certs # mountPath: /etc/ssl/certs ## Running on Amazon Linux or RHEL distros: # hostPath: /etc/pki/ca-trust/extracted/pem ## else: # hostPath: /usr/share/ca-certificates # readOnly: true service: port: 443 targetPort: 443 ## Timeout when creating the kiam gateway ## gatewayTimeoutCreation: 50ms ## Server probe configuration probes: serverAddress: 127.0.0.1 ## Base64-encoded PEM values for server's CA certificate(s), certificate and private key ## tlsFiles: ca: cert: key: ## Secret name of server's TLS certificates ## tlsSecret: ## Base ARN for IAM roles ## If not specified use EC2 metadata service to detect ARN prefix ## roleBaseArn: null ## Pod cache settings ## cache: syncInterval: 1m ## IAM role for the server to assume ## assumeRoleArn: null ## Session duration for STS tokens ## sessionDuration: 15m ## Use hostNetwork for server ## Set this to true when running the servers on the same nodes as the agents useHostNetwork: false ## Agent TLS Certificate filenames tlsCerts: certFileName: cert keyFileName: key caFileName: ca rbac: # Specifies whether RBAC resources should be created create: true psp: # Specifies whether PodSecurityPolicies should be created create: false serviceAccounts: agent: create: true name: server: create: true name:
parse
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"parse.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"parse.c...
## Global Docker image parameters ## Please, note that this will override the image parameters, including dependencies, configured to use the global value ## Current available global Docker image parameters: imageRegistry and imagePullSecrets ## # global: # imageRegistry: myRegistryName # imagePullSecrets: # - myRegistryKeySecretName # storageClass: myStorageClass ## String to partially override parse.fullname template (will maintain the release name) ## # nameOverride: ## String to fully override parse.fullname template ## # fullnameOverride: ## Init containers parameters: ## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section. ## volumePermissions: enabled: false image: registry: docker.io repository: bitnami/minideb tag: buster pullPolicy: Always ## Optionally specify an array of imagePullSecrets. ## Secrets must be manually created in the namespace. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ ## # pullSecrets: # - myRegistryKeySecretName resources: {} ## Kubernetes serviceType for Parse Deployment ## ref: http://kubernetes.io/docs/user-guide/services/#publishing-services---service-types ## service: type: LoadBalancer # Parse dashboard HTTP Port port: 80 ## loadBalancerIP: ## ## nodePorts: ## http: <to set explicitly, choose port between 30000-32767> nodePorts: http: "" ## Enable client source IP preservation ## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip ## externalTrafficPolicy: Cluster ## loadBalancerIP for the Parse Service (optional, cloud specific) ## ref: http://kubernetes.io/docs/user-guide/services/#type-loadbalancer ## # server: ## Bitnami Parse image version ## ref: https://hub.docker.com/r/bitnami/parse/tags/ ## image: registry: docker.io repository: bitnami/parse tag: 3.10.0-debian-10-r30 ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images ## pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. ## Secrets must be manually created in the namespace. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ ## # pullSecrets: # - myRegistryKeySecretName ## Parse Server Pod Security Context ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ ## securityContext: enabled: true fsGroup: 1001 runAsUser: 1001 ## Parse Server Port ## ref: https://github.com/bitnami/bitnami-docker-parse#configuration ## port: 1337 ## Parse API mount path ## ref: https://github.com/bitnami/bitnami-docker-parse#configuration ## mountPath: /parse ## Parse Server App ID ## ref: https://github.com/bitnami/bitnami-docker-parse#configuration ## appId: myappID ## Parse Server Master Key ## ref: https://github.com/bitnami/bitnami-docker-parse#configuration ## # masterKey: ## An array to add extra env vars ## For example: ## extraEnvVars: ## - name: PARSE_SERVER_ALLOW_CLIENT_CLASS_CREATION ## value: "true" ## extraEnvVars: [] ## Name of a ConfigMap containing extra env vars ## extraEnvVarsCM: ## Name of a Secret containing extra env vars ## extraEnvVarsSecret: ## Enable Cloud Clode ## ref: https://github.com/bitnami/bitnami-docker-parse#how-to-deploy-your-cloud-functions-with-parse-cloud-code ## enableCloudCode: false ## Cloud Code scripts ## Specify dictionary of Cloud Code scripts and content ## Alternatively, you can put your scripts under the files/cloud directory ## # cloudCodeScripts: # main.js: | # Parse.Cloud.define("sayHelloWorld", function(request, response) { # return "Hello world!"; # }); ## ConfigMap with Cloud Code scripts ## NOTE: This will override cloudCodeScripts ## # existingCloudCodeScriptsCM ## Parse Server pods' resource requests and limits ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ ## resources: {} ## Parse Server pods' liveness and readiness probes ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) ## livenessProbe: enabled: true initialDelaySeconds: 120 periodSeconds: 10 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 5 readinessProbe: enabled: true initialDelaySeconds: 30 periodSeconds: 5 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 5 ## Affinity for pod assignment. Evaluated as a template ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity ## affinity: {} ## Node labels for pod assignment. Evaluated as a template ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ ## nodeSelector: {} ## Tolerations for pod assignment. Evaluated as a template ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ ## tolerations: {} dashboard: ## Enable deployment of Parse Dashboard ## enabled: true ## Bitnami Parse Dashboard image version ## ref: https://hub.docker.com/r/bitnami/parse-dashboard/tags/ ## image: registry: docker.io repository: bitnami/parse-dashboard tag: 2.0.5-debian-10-r27 ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images ## pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. ## Secrets must be manually created in the namespace. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ ## # pullSecrets: # - myRegistryKeySecretName ## Parse Dashboard Pod Security Context ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ ## securityContext: enabled: true fsGroup: 1001 runAsUser: 1001 ## Parse Dashboard application username ## ref: https://github.com/bitnami/bitnami-docker-parse-dashboard#configuration ## username: user ## Parse Dashboard application password ## Defaults to a random 10-character alphanumeric string if not set ## ref: https://github.com/bitnami/bitnami-docker-parse-dashboard#configuration ## # password: ## Parse Dashboard application name ## ref: https://github.com/bitnami/bitnami-docker-parse-dashboard#configuration ## appName: MyDashboard ## Parse Dashboard pods' resource requests and limits ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ ## resources: {} ## Parse Dashboard pods' liveness and readiness probes ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) ## livenessProbe: enabled: true initialDelaySeconds: 240 periodSeconds: 10 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 5 readinessProbe: enabled: true initialDelaySeconds: 30 periodSeconds: 5 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 5 ## Affinity for pod assignment. Evaluated as a template ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity ## affinity: {} ## Node labels for pod assignment. Evaluated as a template ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ ## nodeSelector: {} ## Tolerations for pod assignment. Evaluated as a template ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ ## tolerations: {} ## Protocol to form URLs to Parse ## parseServerUrlProtocol: "http" ## An array to add extra env vars ## For example: ## extraEnvVars: ## - name: KIBANA_ELASTICSEARCH_URL ## value: test ## extraEnvVars: [] ## Name of a ConfigMap containing extra env vars ## extraEnvVarsCM: ## Name of a Secret containing extra env vars ## extraEnvVarsSecret: ## Configure the ingress resource that allows you to access the ## Parse installation. ## ref: http://kubernetes.io/docs/user-guide/ingress/ ## ingress: ## Set to true to enable ingress record generation ## enabled: false ## Set this to true in order to add the corresponding annotations for cert-manager ## certManager: false ## Ingress annotations done as key:value pairs. If certManager is set to true, ## the annotation 'kubernetes.io/tls-acme: "true"' will automatically be set ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md ## annotations: # kubernetes.io/ingress.class: nginx dashboard: ## The list of hostnames to be covered with this ingress record. ## Most likely this will be just one host, but in the event more hosts are needed, this is an array ## hosts: - name: parse.local path: / ## Set this to true in order to enable TLS on the ingress record ## tls: false ## Optionally specify the TLS hosts for the ingress record ## Useful when the Ingress controller supports www-redirection ## If not specified, the above host name will be used # tlsHosts: # - www.parse.local # - parse.local ## If TLS is set to true, you must declare what secret will store the key/certificate for TLS ## tlsSecret: parse.local-tls server: ## The list of hostnames to be covered with this ingress record. ## Most likely this will be just one host, but in the event more hosts are needed, this is an array ## hosts: - name: parse-server.local path: / ## Set this to true in order to enable TLS on the ingress record ## tls: false ## Optionally specify the TLS hosts for the ingress record ## Useful when the Ingress controller supports www-redirection ## If not specified, the above host name will be used # tlsHosts: # - www.parse-server.local # - parse-server.local ## If TLS is set to true, you must declare what secret will store the key/certificate for TLS ## tlsSecret: parse.local-tls secrets: ## If you're providing your own certificates, please use this to add the certificates as secrets ## key and certificate should start with -----BEGIN CERTIFICATE----- or ## -----BEGIN RSA PRIVATE KEY----- ## ## name should line up with a tlsSecret set further up ## If you're using cert-manager, this is unneeded, as it will create the secret for you if it is not set ## ## It is also possible to create and manage the certificates outside of this helm chart ## Please see README.md for more information # - name: parse.local-tls # key: # certificate: ## Enable persistence using Persistent Volume Claims ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ ## persistence: enabled: true ## parse data Persistent Volume Storage Class ## If defined, storageClassName: <storageClass> ## If set to "-", storageClassName: "", which disables dynamic provisioning ## If undefined (the default) or set to null, no storageClassName spec is ## set, choosing the default provisioner. (gp2 on AWS, standard on ## GKE, AWS & OpenStack) ## # storageClass: "-" accessMode: ReadWriteOnce size: 8Gi ## ## MongoDB chart configuration ## ## https://github.com/helm/charts/blob/master/stable/mongodb/values.yaml ## mongodb: ## MongoDB Password authentication usePassword: true ## If the password is not specified, MongoDB will generate a random password ## # mongodbRootPassword: ## Enable persistence using Persistent Volume Claims ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ ## persistence: enabled: true ## mongodb data Persistent Volume Storage Class ## If defined, storageClassName: <storageClass> ## If set to "-", storageClassName: "", which disables dynamic provisioning ## If undefined (the default) or set to null, no storageClassName spec is ## set, choosing the default provisioner. (gp2 on AWS, standard on ## GKE, AWS & OpenStack) ## # storageClass: "-" accessMode: ReadWriteOnce size: 8Gi
prometheus-mysql-exporter
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"prometheus-mysql-exporter.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 cha...
# Default values for prometheus-mysql-exporter. # This is a YAML-formatted file. # Declare variables to be passed into your templates. replicaCount: 1 image: repository: "prom/mysqld-exporter" tag: "v0.11.0" pullPolicy: "IfNotPresent" service: labels: {} annotations: {} name: mysql-exporter type: ClusterIP externalPort: 9104 internalPort: 9104 serviceMonitor: # enabled should be set to true to enable prometheus-operator discovery of this service enabled: false # interval is the interval at which metrics should be scraped # interval: 30s # scrapeTimeout is the timeout after which the scrape is ended # scrapeTimeout: 10s # additionalLabels is the set of additional labels to add to the ServiceMonitor additionalLabels: {} jobLabel: "" targetLabels: [] podTargetLabels: [] metricRelabelings: [] resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. # limits: # cpu: 100m # memory: 128Mi # requests: # cpu: 100m # memory: 128Mi nodeSelector: {} tolerations: [] affinity: {} podLabels: {} annotations: prometheus.io/scrape: "true" prometheus.io/path: "/metrics" prometheus.io/port: "9104" collectors: {} # auto_increment.columns: false # binlog_size: false # engine_innodb_status: false # engine_tokudb_status: false # global_status: true # global_variables: true # info_schema.clientstats: false # info_schema.innodb_metrics: false # info_schema.innodb_tablespaces: false # info_schema.innodb_cmp: false # info_schema.innodb_cmpmem: false # info_schema.processlist: false # info_schema.processlist.min_time: 0 # info_schema.query_response_time: false # info_schema.tables: true # info_schema.tables.databases: '*' # info_schema.tablestats: false # info_schema.schemastats: false # info_schema.userstats: false # perf_schema.eventsstatements: false # perf_schema.eventsstatements.digest_text_limit: 120 # perf_schema.eventsstatements.limit: false # perf_schema.eventsstatements.timelimit: 86400 # perf_schema.eventswaits: false # perf_schema.file_events: false # perf_schema.file_instances: false # perf_schema.indexiowaits: false # perf_schema.tableiowaits: false # perf_schema.tablelocks: false # perf_schema.replication_group_member_stats: false # slave_status: true # slave_hosts: false # heartbeat: false # heartbeat.database: heartbeat # heartbeat.table: heartbeat # mysql connection params which build the DATA_SOURCE_NAME env var of the docker container mysql: db: "" host: "localhost" param: "" pass: "password" port: 3306 protocol: "" user: "exporter" existingSecret: false # cloudsqlproxy https://cloud.google.com/sql/docs/mysql/sql-proxy cloudsqlproxy: enabled: false image: repo: "gcr.io/cloudsql-docker/gce-proxy" tag: "1.14" pullPolicy: "IfNotPresent" instanceConnectionName: "project:us-central1:dbname" port: "3306" credentials: '{ "type": "service_account", "project_id": "project", "private_key_id": "KEYID1", "private_key": "-----BEGIN PRIVATE KEY-----\sdajsdnasd\n-----END PRIVATE KEY-----\n", "client_email": "user@project.iam.gserviceaccount.com", "client_id": "111111111", "auth_uri": "https://accounts.google.com/o/oauth2/auth", "token_uri": "https://accounts.google.com/o/oauth2/token", "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/user%40project.iam.gserviceaccount.com" }'
nginx-ldapauth-proxy
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"nginx-ldapauth-proxy.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars be...
# Default values for nginx-ldapauth-proxy. # This is a YAML-formatted file. # Declare variables to be passed into your templates. replicaCount: 1 image: repository: dweomer/nginx-auth-ldap tag: 1.13.5-on-alpine-3.5 pullPolicy: IfNotPresent # pullSecrets: # - docker-secret service: name: nginx-ldapauth type: ClusterIP externalPort: 443 internalPort: 80 proxy: protocol: "ldap" port: 443 host: "kubernetes.default.svc.cluster.local" authName: "Auth Required" ldapHost: "" ldapPort: 389 ldapGroup: "memberUid" ldapDN: "dc=example,dc=com" ldapFilter: "objectClass=organizationalPerson" ldapBindDN: "cn=auth,dc=example,dc=com" requires: - name: "authGroup" filter: "cn=secret,ou=groups,dc=example,dc=com" secrets: ldapBindPassword: "" ingress: enabled: false # Used to create an Ingress record. hosts: - ldapauth-service.local annotations: # kubernetes.io/ingress.class: nginx # kubernetes.io/tls-acme: "true" tls: # Secrets must be manually created in the namespace. # - secretName: chart-example-tls # hosts: # - chart-example.local resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. # limits: # cpu: 100m # memory: 128Mi # requests: # cpu: 100m # memory: 128Mi nodeSelector: {} tolerations: [] affinity: {}