Dataset Viewer
Auto-converted to Parquet Duplicate
chart_name
stringlengths
3
30
templates
sequence
values
stringlengths
104
39.6k
gce-ingress
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"gce-ingress.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because som...
# Default values for gce-ingress. # This is a YAML-formatted file. # Declare variables to be passed into your templates. nameOverride: "" fullnameOverride: "" rbac: # Specifies whether RBAC resources should be created create: true serviceAccount: # Specifies whether a ServiceAccount should be created create: true # The name of the ServiceAccount to use. # If not set and create is true, a name is generated using the fullname template name: # gce-ingress needs credentials to log into GCE. Create a secret with the key # of key.json with the contents of a GCE service account that has permissions to create # and modify load balancers. The key should be in the JSON format. # Example: # Your secret should look like: # apiVersion: v1 # kind: Secret # metadata: # name: gce-key # type: Opaque # data: # key.json: < base64 encoded JSON service account key> secret: ~ ## If the google auth file is saved in a different secret key you can specify it here # secretKey: key.json # gce config, replace values to match your environment config: projectID: network: subnetwork: nodeInstancePrefix: nodeTags: # tokenUrl should probably be left as nil tokenUrl: "nil" controller: replicaCount: 1 image: repository: k8s.gcr.io/ingress-gce-glbc-amd64 tag: v1.4.0 pullPolicy: IfNotPresent resources: {} # requests: # cpu: 10m # memory: 50Mi nodeSelector: {} tolerations: [] affinity: {} defaultBackend: replicaCount: 1 image: repository: k8s.gcr.io/defaultbackend tag: "1.4" pullPolicy: IfNotPresent resources: {} # limits: # cpu: 10m # memory: 20Mi # requests: # cpu: 10m # memory: 20Mi nodeSelector: {} tolerations: [] affinity: {} service: type: NodePort port: 80
prometheus-blackbox-exporter
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"prometheus-blackbox-exporter.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 ...
restartPolicy: Always kind: Deployment podDisruptionBudget: {} # maxUnavailable: 0 ## Enable pod security policy pspEnabled: true strategy: rollingUpdate: maxSurge: 1 maxUnavailable: 0 type: RollingUpdate image: repository: prom/blackbox-exporter tag: v0.16.0 pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. ## Secrets must be manually created in the namespace. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ ## # pullSecrets: # - myRegistrKeySecretName ## User to run blackbox-exporter container as runAsUser: 1000 readOnlyRootFilesystem: true runAsNonRoot: true livenessProbe: httpGet: path: /health port: http readinessProbe: httpGet: path: /health port: http nodeSelector: {} tolerations: [] affinity: {} secretConfig: false config: modules: http_2xx: prober: http timeout: 5s http: valid_http_versions: ["HTTP/1.1", "HTTP/2"] no_follow_redirects: false preferred_ip_protocol: "ip4" extraConfigmapMounts: [] # - name: certs-configmap # mountPath: /etc/secrets/ssl/ # subPath: certificates.crt # (optional) # configMap: certs-configmap # readOnly: true # defaultMode: 420 ## Additional secret mounts # Defines additional mounts with secrets. Secrets must be manually created in the namespace. extraSecretMounts: [] # - name: secret-files # mountPath: /etc/secrets # secretName: blackbox-secret-files # readOnly: true # defaultMode: 420 allowIcmp: false resources: {} # limits: # memory: 300Mi # requests: # memory: 50Mi priorityClassName: "" service: annotations: {} type: ClusterIP port: 9115 serviceAccount: # Specifies whether a ServiceAccount should be created create: true # The name of the ServiceAccount to use. # If not set and create is true, a name is generated using the fullname template name: annotations: {} ## An Ingress resource can provide name-based virtual hosting and TLS ## termination among other things for CouchDB deployments which are accessed ## from outside the Kubernetes cluster. ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/ ingress: enabled: false hosts: [] # - chart-example.local path: '/' annotations: {} # kubernetes.io/ingress.class: nginx # kubernetes.io/tls-acme: "true" tls: [] # Secrets must be manually created in the namespace. # - secretName: chart-example-tls # hosts: # - chart-example.local podAnnotations: {} extraArgs: [] # --history.limit=1000 replicas: 1 serviceMonitor: ## If true, a ServiceMonitor CRD is created for a prometheus operator ## https://github.com/coreos/prometheus-operator ## enabled: false # Default values that will be used for all ServiceMonitors created by `targets` defaults: labels: {} interval: 30s scrapeTimeout: 30s module: http_2xx targets: # - name: example # Human readable URL that will appear in Prometheus / AlertManager # url: http://example.com/healthz # The URL that blackbox will scrape # labels: {} # List of labels for ServiceMonitor. Overrides value set in `defaults` # interval: 60s # Scraping interval. Overrides value set in `defaults` # scrapeTimeout: 60s # Scrape timeout. Overrides value set in `defaults` # module: http_2xx # Module used for scraping. Overrides value set in `defaults` ## Custom PrometheusRules to be defined ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions prometheusRule: enabled: false additionalLabels: {} namespace: "" rules: [] ## Network policy for chart networkPolicy: # Enable network policy and allow access from anywhere enabled: false # Limit access only from monitoring namespace allowMonitoringNamespace: false
gangway
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"gangway.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Ku...
replicaCount: 1 image: repository: gcr.io/heptio-images/gangway tag: v3.2.0 pullPolicy: IfNotPresent ## Optional array of imagePullSecrets containing private registry credentials ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ pullSecrets: [] # - name: secretName nameOverride: "" fullnameOverride: "" # Specify a CA cert to trust for self signed certificates at the Oauth2 URLs. Be careful to indent one level beyond the # trustedCACert key: # trustedCACert: |- # -----BEGIN CERTIFICATE----- # ... # -----END CERTIFICATE----- # Add Env Variables to pod env: {} # Add annotations to the pod podAnnotations: {} gangway: # The address to listen on. Defaults to 0.0.0.0 to listen on all interfaces. # Env var: GANGWAY_HOST # host: 0.0.0.0 serviceAccountName: "" # The port to listen on. Defaults to 8080. # Env var: GANGWAY_PORT port: 8080 # Should Gangway serve TLS vs. plain HTTP? Default: false # Env var: GANGWAY_SERVE_TLS # serveTLS: false # The public cert file (including root and intermediates) to use when serving TLS. # Env var: GANGWAY_CERT_FILE # certFile: /etc/gangway/tls/tls.crt # The private key file when serving TLS. # Env var: GANGWAY_KEY_FILE # keyFile: /etc/gangway/tls/tls.key # The cluster name. Used in UI and kubectl config instructions. # Env var: GANGWAY_CLUSTER_NAME clusterName: "${GANGWAY_CLUSTER_NAME}" # OAuth2 URL to start authorization flow. # Env var: GANGWAY_AUTHORIZE_URL authorizeURL: "https://${DNS_NAME}/authorize" # OAuth2 URL to obtain access tokens. # Env var: GANGWAY_TOKEN_URL tokenURL: "https://${DNS_NAME}/oauth/token" # Endpoint that provides user profile information [optional]. Not all providers # will require this. # Env var: GANGWAY_AUDIENCE audience: "https://${DNS_NAME}/userinfo" # Used to specify the scope of the requested Oauth authorization. scopes: ["openid", "profile", "email", "offline_access"] # Where to redirect back to. This should be a URL where gangway is reachable. # Typically this also needs to be registered as part of the oauth application # with the oAuth provider. # Env var: GANGWAY_REDIRECT_URL redirectURL: "https://${GANGWAY_REDIRECT_URL}/callback" # API client ID as indicated by the identity provider # Env var: GANGWAY_CLIENT_ID clientID: "${GANGWAY_CLIENT_ID}" # API client secret as indicated by the identity provider # Env var: GANGWAY_CLIENT_SECRET clientSecret: "${GANGWAY_CLIENT_SECRET}" # Some identity providers accept an empty client secret, this # is not generally considered a good idea. If you have to use an # empty secret and accept the risks that come with that then you can # set this to true. # allowEmptyClientSecret: false # The JWT claim to use as the username. This is used in UI. # Default is "nickname". This is combined with the clusterName # for the "user" portion of the kubeconfig. # Env var: GANGWAY_USERNAME_CLAIM usernameClaim: "sub" # The API server endpoint used to configure kubectl # Env var: GANGWAY_APISERVER_URL apiServerURL: "https://${GANGWAY_APISERVER_URL}" # The path to find the CA bundle for the API server. Used to configure kubectl. # This is typically mounted into the default location for workloads running on # a Kubernetes cluster and doesn't need to be set. # Env var: GANGWAY_CLUSTER_CA_PATH # cluster_ca_path: "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" # The path gangway uses to create urls (defaults to "") # Env var: GANGWAY_HTTP_PATH # httpPath: "https://${GANGWAY_HTTP_PATH}" # The key to use when encrypting the contents of cookies. # You can leave this blank and the chart will generate a random key, however # you must use that with caution. Subsequent upgrades to the deployment will # regenerate this key which will cause Gangway to error when attempting to # decrypt cookies stored in users' browsers which were encrypted with the old # key. # TL;DR: Safe to use the auto generation in test environments, provide your # own in procution. # sessionKey: tls: {} # certData: | # -----BEGIN CERTIFICATE----- # ... # -----END CERTIFICATE----- # keyData: | # -----BEGIN ENCRYPTED PRIVATE KEY----- # ... # -----END ENCRYPTED PRIVATE KEY----- # Name of an existing secret containing `tls.cert` and `tls.key`. # Mounted on the default tls path `/etc/gangway/tls` # existingSecret: "" extraVolumes: [] extraVolumeMounts: [] livenessProbe: # HTTP or HTTPS scheme: HTTP readinessProbe: # HTTP or HTTPS scheme: HTTP service: type: ClusterIP port: 80 # Specifies a loadBalancerIP when using LoadBalancer service type # loadBalancerIP: 192.168.0.51 annotations: {} ingress: enabled: false annotations: {} # kubernetes.io/ingress.class: nginx # kubernetes.io/tls-acme: "true" path: / hosts: - chart-example.local tls: [] # - secretName: chart-example-tls # hosts: # - chart-example.local resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. # limits: # cpu: 100m # memory: 128Mi # requests: # cpu: 100m # memory: 128Mi nodeSelector: {} tolerations: [] affinity: {}
kubernetes-dashboard
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"kubernetes-dashboard.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars be...
# Default values for kubernetes-dashboard # This is a YAML-formatted file. # Declare name/value pairs to be passed into your templates. # name: value image: repository: k8s.gcr.io/kubernetes-dashboard-amd64 tag: v1.10.1 pullPolicy: IfNotPresent pullSecrets: [] replicaCount: 1 ## Here annotations can be added to the kubernetes dashboard deployment annotations: {} ## Here labels can be added to the kubernetes dashboard deployment ## labels: {} # kubernetes.io/name: "Kubernetes Dashboard" ## Enable possibility to skip login enableSkipLogin: false ## Serve application over HTTP without TLS enableInsecureLogin: false ## Additional container arguments ## # extraArgs: # - --enable-skip-login # - --enable-insecure-login # - --system-banner="Welcome to Kubernetes" ## Additional container environment variables ## extraEnv: [] # - name: SOME_VAR # value: 'some value' # Annotations to be added to kubernetes dashboard pods ## Recommended value # podAnnotations: # seccomp.security.alpha.kubernetes.io/pod: 'runtime/default' podAnnotations: {} # Add custom labels to pods podLabels: {} ## SecurityContext for the kubernetes dashboard container ## Recommended values # dashboardContainerSecurityContext: # allowPrivilegeEscalation: false # readOnlyRootFilesystem: true ## The two values below can be set here or at podLevel (using variable .securityContext) # runAsUser: 1001 # runAsGroup: 2001 dashboardContainerSecurityContext: {} ## Node labels for pod assignment ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ ## nodeSelector: {} ## List of node taints to tolerate (requires Kubernetes >= 1.6) tolerations: [] # - key: "key" # operator: "Equal|Exists" # value: "value" # effect: "NoSchedule|PreferNoSchedule|NoExecute" ## Affinity ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity affinity: {} # priorityClassName: "" service: type: ClusterIP externalPort: 443 ## This allows an override of the heapster service name ## Default: {{ .Chart.Name }} ## # nameOverride: # LoadBalancerSourcesRange is a list of allowed CIDR values, which are combined with ServicePort to # set allowed inbound rules on the security group assigned to the master load balancer # loadBalancerSourceRanges: [] ## Kubernetes Dashboard Service annotations ## ## For GCE ingress, the following annotation is required: ## service.alpha.kubernetes.io/app-protocols: '{"https":"HTTPS"}' if enableInsecureLogin=false ## or ## service.alpha.kubernetes.io/app-protocols: '{"http":"HTTP"}' if enableInsecureLogin=true annotations: {} ## Here labels can be added to the Kubernetes Dashboard service ## labels: {} # kubernetes.io/name: "Kubernetes Dashboard" resources: limits: cpu: 100m memory: 100Mi requests: cpu: 100m memory: 100Mi ingress: ## If true, Kubernetes Dashboard Ingress will be created. ## enabled: false ## Kubernetes Dashboard Ingress annotations ## ## Add custom labels # labels: # key: value # annotations: # kubernetes.io/ingress.class: nginx # kubernetes.io/tls-acme: 'true' ## If you plan to use TLS backend with enableInsecureLogin set to false ## (default), you need to uncomment the below. ## If you use ingress-nginx < 0.21.0 # nginx.ingress.kubernetes.io/secure-backends: "true" ## if you use ingress-nginx >= 0.21.0 # nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" ## Kubernetes Dashboard Ingress paths ## paths: - / # - /* ## Kubernetes Dashboard Ingress hostnames ## Must be provided if Ingress is enabled ## # hosts: # - kubernetes-dashboard.domain.com ## Kubernetes Dashboard Ingress TLS configuration ## Secrets must be manually created in the namespace ## # tls: # - secretName: kubernetes-dashboard-tls # hosts: # - kubernetes-dashboard.domain.com rbac: # Specifies whether RBAC resources should be created create: true # Specifies whether cluster-admin ClusterRole will be used for dashboard # ServiceAccount (NOT RECOMMENDED). clusterAdminRole: false # Start in ReadOnly mode. # Only dashboard-related Secrets and ConfigMaps will still be available for writing. # # Turn OFF clusterAdminRole to use clusterReadOnlyRole. # # The basic idea of the clusterReadOnlyRole comparing to the clusterAdminRole # is not to hide all the secrets and sensitive data but more # to avoid accidental changes in the cluster outside the standard CI/CD. # # Same as for clusterAdminRole, it is NOT RECOMMENDED to use this version in production. # Instead you should review the role and remove all potentially sensitive parts such as # access to persistentvolumes, pods/log etc. clusterReadOnlyRole: false serviceAccount: # Specifies whether a service account should be created create: true # The name of the service account to use. # If not set and create is true, a name is generated using the fullname template name: livenessProbe: # Number of seconds to wait before sending first probe initialDelaySeconds: 30 # Number of seconds to wait for probe response timeoutSeconds: 30 podDisruptionBudget: # https://kubernetes.io/docs/tasks/run-application/configure-pdb/ enabled: false minAvailable: maxUnavailable: ## PodSecurityContext for pod level securityContext ## # securityContext: # runAsUser: 1001 # runAsGroup: 2001 securityContext: {} networkPolicy: false
nginx-lego
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"nginx-lego.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 24 -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 24 chars because some Kubernetes name fi...
## nginx-lego spins up a scalable ingress provider that can also provision SSL certs ## See https://github.com/jetstack/kube-lego/tree/master/examples/nginx for more information on implementation ## Nginx configuration ## ref: https://github.com/kubernetes/contrib/tree/master/ingress/controllers/nginx#automated-certificate-management-with-kube-lego ## nginx: replicaCount: 1 image: repository: k8s.gcr.io/nginx-ingress-controller tag: "0.8.3" pullPolicy: IfNotPresent service: type: LoadBalancer monitoring: false resources: limits: cpu: 1 memory: 2Gi requests: cpu: 1 memory: 128Mi configmap: proxy_connect_timeout: "30" proxy_read_timeout: "600" proxy_send_imeout: "600" hsts_include_subdomains: "false" body_size: "64m" server_name_hash_bucket_size: "256" # TODO: figure out how to expose `{nginx_addr}:8080/nginx_status`, on existing service or create new one? enable_vts_status: "false" ## Default Backend configuration ## To run a different 404 page for the managed domains please see the documentation below ## ref: https://github.com/kubernetes/contrib/tree/master/404-server ## default: replicaCount: 1 image: repository: k8s.gcr.io/defaultbackend tag: "1.0" pullPolicy: IfNotPresent resources: limits: cpu: 1 memory: 2Gi requests: cpu: 1 memory: 128Mi ## kube-lego configuration ## ref: https://github.com/jetstack/kube-lego ## lego: enabled: false replicaCount: 1 image: repository: jetstack/kube-lego tag: "0.1.3" pullPolicy: IfNotPresent configmap: email: "my@email.tld" # Production Let's Encrypt server # url: "https://acme-v01.api.letsencrypt.org/directory" # Test Let's Encrypt server url: "https://acme-staging.api.letsencrypt.org/directory " resources: limits: cpu: 1 memory: 2Gi requests: cpu: 1 memory: 128Mi
wavefront
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"wavefront.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"wav...
## Default values for Wavefront ## This is a unique name for the cluster ## All metrics will receive a `cluster` tag with this value ## Required clusterName: KUBERNETES_CLUSTER_NAME ## Wavefront URL (cluster) and API Token ## Required wavefront: url: https://YOUR_CLUSTER.wavefront.com token: YOUR_API_TOKEN ## Wavefront Collector is responsible to get all Kubernetes metrics from your cluster. ## It will capture Kubernetes resources metrics available from the kubelets, ## as well as auto-discovery capabilities. collector: enabled: true image: repository: wavefronthq/wavefront-kubernetes-collector tag: 1.0.3 pullPolicy: IfNotPresent ## If set to true, DaemonSet will be used for the collector. ## If set to false, Deployment will be used for the collector. ## Setting this to true is strongly recommended useDaemonset: true ## max number of CPUs that can be used simultaneously. Less than 1 for default (number of cores) # maxProcs: 0 ## log level one of: info, debug, or trace. (default info) # logLevel: info ## The resolution at which the collector will retain metrics. (default 60s) # interval: 60s ## How often collected data is flushed (default 10s) # flushInterval: 10s ## Timeout for exporting data (default 20s) # sinkDelay: 20s ## If set to true, will use the unauthenticated real only port for the kubelet ## If set to false, will use the encrypted full access port for the kubelet (default false) # useReadOnlyPort: false ## If set to true, metrics will be sent to Wavefront via a Wavefront Proxy. ## When true you must either specify a value for `collector.proxyAddress` or set `proxy.enabled` to true ## If set to false, metrics will be sent to Wavefront via the Direct Ingestion API useProxy: true ## Can be used to specify a specific address for the Wavefront Proxy ## The proxy can be anywhere network reachable including outside of the cluster ## Required if `collector.useProxy` is true and `proxy.enabled` is false # proxyAddress: wavefront-proxy:2878 ## If set to true Kubernetes API Server will also be scraped for metrics (default false) # apiServerMetrics: false ## Map of tags to apply to all metrics collected by the collector (default empty) # tags: ## sample tags to include (env, region) # env: production # region: us-west-2 ## Rules based discovery configuration ## Ref: https://github.com/wavefrontHQ/wavefront-kubernetes-collector/blob/master/docs/discovery.md discovery: enabled: true ## When specified, this replaces `prometheus.io` as the prefix for annotations used to ## auto-discover Prometheus endpoints # annotationPrefix: "wavefront.com" ## Can be used to add additional discovery rules # config: ## auto-discover a sample prometheus application # - name: prom-example # type: prometheus # selectors: # labels: # k8s-app: # - prom-example # port: 8080 # path: /metrics # prefix: kube.prom-example. # tags: # alt_name: sample-app ## auto-discover mongodb pods (replace USER:PASSWORD) # - name: mongodb # type: telegraf/mongodb # selectors: # images: # - '*mongodb:*' # port: 27017 # conf: | # servers = ["mongodb://USER:PASSWORD${host}:${port}"] # gather_perdb_stats = true # filters: # metricBlacklist: # - 'mongodb.member.status' # - 'mongodb.state' # - 'mongodb.db.stats.type' ## auto-discover rabbitmq pods (replace USER and PASSWORD) # - name: rabbitmq # type: telegraf/rabbitmq # selectors: # images: # - '*rabbitmq:*' # port: 15672 # conf: | # url = "http://${host}:${port}" # username = "USER" # password = "PASSWORD" ## Wavefront Collector resource requests and limits ## Make sure to keep requests and limits equal to keep the pods in the Guaranteed QoS class ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ resources: requests: cpu: 200m memory: 256Mi limits: cpu: 200m memory: 256Mi ## Wavefront Proxy is a metrics forwarder that is used to relay metrics to the Wavefront SaaS service. ## It can receive metrics from the Wavefront Collector as well as other metrics collection services ## within your cluster. The proxy also supports preprocessor rules to allow you to further filter ## and enhance your metric names, and tags. Should network connectivity fall between the proxy and ## Wavefront SaaS service, the proxy will buffer metrics, which will be flushed when connectivity resumes. ## Ref: https://docs.wavefront.com/proxies.html proxy: enabled: true image: repository: wavefronthq/proxy tag: 5.7 pullPolicy: IfNotPresent ## The port number the proxy will listen on for metrics in Wavefront data format. ## This is usually 2878 port: 2878 ## The port nubmer the proxy will listen on for tracing spans in Wavefront trace data format. ## This is usually 30000 # tracePort: 30000 ## The port nubmer the proxy will listen on for tracing spans in Jaeger data format. ## This is usually 30001 # jaegerPort: 30001 ## The port nubmer the proxy will listen on for tracing spans in Zipkin data format. ## This is usually 9411 # zipkinPort: 9411 ## Sampling rate to apply to tracing spans sent to the proxy. ## This rate is applied to all data formats the proxy is listening on. ## Value should be between 0.0 and 1.0. Default is 1.0 # traceSamplingRate: 0.25 ## When this is set to a value greater than 0, ## spans that are greater than or equal to this value will be sampled. # traceSamplingDuration: 500 ## Any configuration property can be passed to the proxy via command line args in ## in the format: `--<property_name> <value>`. Multiple properties can be specified ## separated by whitespace. ## Ref: https://docs.wavefront.com/proxies_configuring.html # args: ## Proxy is a Java application. By default Java will consume upto 4G of heap memory. ## This can be used to override the default. Uses the `-Xmx` command line option for java # heap: 1024m ## Preprocessor rules is a powerful way to apply filtering or to enhance metrics as they flow ## through the proxy. You can configure the rules here. By default a rule to drop Kubernetes ## generated labels is applied to remove unecessary and often noisy tags. ## Ref: https://docs.wavefront.com/proxies_preprocessor_rules.html # preprocessor: # rules.yaml: | # '2878': # # fix %2F to be a / instead. May be required on EKS. # - rule : fix-forward-slash # action : replaceRegex # scope : pointLine # search : "%2F" # replace : "/" # # replace bad characters ("&", "$", "!", "@") with underscores in the entire point line string # - rule : replace-badchars # action : replaceRegex # scope : pointLine # search : "[&\\$!@]" # replace : "_" ## Specifies whether RBAC resources should be created rbac: create: true ## Specifies whether a ServiceAccount should be created serviceAccount: create: true ## The name of the ServiceAccount to use. ## If not set and create is true, a name is generated using the fullname template name: ## kube-state-metrics are used to get metrics about the state of the Kubernetes scheduler ## If enabled the kube-state-metrics chart will be installed as a subchart and the collector ## will be configured to capture metrics. kubeStateMetrics: enabled: true
ghost
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"ghost.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kube...
## Global Docker image parameters ## Please, note that this will override the image parameters, including dependencies, configured to use the global value ## Current available global Docker image parameters: imageRegistry and imagePullSecrets ## # global: # imageRegistry: myRegistryName # imagePullSecrets: # - myRegistryKeySecretName # storageClass: myStorageClass ## Bitnami Ghost image version ## ref: https://hub.docker.com/r/bitnami/ghost/tags/ ## image: registry: docker.io repository: bitnami/ghost tag: 3.9.0-debian-10-r0 ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images ## pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. ## Secrets must be manually created in the namespace. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ ## # pullSecrets: # - myRegistryKeySecretName ## String to partially override ghost.fullname template (will maintain the release name) ## # nameOverride: ## String to fully override ghost.fullname template ## # fullnameOverride: ## Init containers parameters: ## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup ## volumePermissions: image: registry: docker.io repository: bitnami/minideb tag: buster pullPolicy: Always ## Optionally specify an array of imagePullSecrets. ## Secrets must be manually created in the namespace. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ ## # pullSecrets: # - myRegistryKeySecretName ## Ghost protocol, host, port and path to create application URLs ## ref: https://github.com/bitnami/bitnami-docker-ghost#configuration ## ghostProtocol: http # ghostHost: # ghostPort: ghostPath: / ## User of the application ## ref: https://github.com/bitnami/bitnami-docker-ghost#configuration ## ghostUsername: user@example.com ## Application password ## Defaults to a random 10-character alphanumeric string if not set ## ref: https://github.com/bitnami/bitnami-docker-ghost#configuration ## # ghostPassword: ## Admin email ## ref: https://github.com/bitnami/bitnami-docker-ghost#configuration ## ghostEmail: user@example.com ## Ghost Blog name ## ref: https://github.com/bitnami/bitnami-docker-ghost#environment-variables ## ghostBlogTitle: User's Blog ## Set to `yes` to allow the container to be started with blank passwords ## ref: https://github.com/bitnami/bitnami-docker-wordpress#environment-variables allowEmptyPassword: "yes" ## SMTP mail delivery configuration ## ref: https://github.com/bitnami/bitnami-docker-redmine/#smtp-configuration ## # smtpHost: # smtpPort: # smtpUser: # smtpPassword: # smtpFromAddress # smtpService: ## Configure extra options for liveness and readiness probes ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) ## livenessProbe: enabled: true initialDelaySeconds: 120 periodSeconds: 10 timeoutSeconds: 5 failureThreshold: 6 successThreshold: 1 readinessProbe: enabled: true initialDelaySeconds: 30 periodSeconds: 5 timeoutSeconds: 3 failureThreshold: 6 successThreshold: 1 ## ## External database configuration ## externalDatabase: ## All of these values are only used when mariadb.enabled is set to false ## Database host host: localhost ## non-root Username for Wordpress Database user: bn_ghost ## Database password password: "" ## Database name database: bitnami_ghost ## Database port number port: 3306 ## ## MariaDB chart configuration ## ## https://github.com/helm/charts/blob/master/stable/mariadb/values.yaml ## mariadb: ## Whether to deploy a mariadb server to satisfy the applications database requirements. To use an external database set this to false and configure the externalDatabase parameters enabled: true ## Disable MariaDB replication replication: enabled: false ## Create a database and a database user ## ref: https://github.com/bitnami/bitnami-docker-mariadb/blob/master/README.md#creating-a-database-user-on-first-run ## db: name: bitnami_ghost user: bn_ghost ## If the password is not specified, mariadb will generates a random password ## # password: ## MariaDB admin password ## ref: https://github.com/bitnami/bitnami-docker-mariadb/blob/master/README.md#setting-the-root-password-on-first-run ## # rootUser: # password: ## Enable persistence using Persistent Volume Claims ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ ## master: persistence: enabled: true ## mariadb data Persistent Volume Storage Class ## If defined, storageClassName: <storageClass> ## If set to "-", storageClassName: "", which disables dynamic provisioning ## If undefined (the default) or set to null, no storageClassName spec is ## set, choosing the default provisioner. (gp2 on AWS, standard on ## GKE, AWS & OpenStack) ## # storageClass: "-" accessMode: ReadWriteOnce size: 8Gi ## Kubernetes configuration ## For minikube, set this to NodePort, elsewhere use LoadBalancer ## service: type: LoadBalancer # HTTP Port port: 80 ## loadBalancerIP: ## ## nodePorts: ## http: <to set explicitly, choose port between 30000-32767> nodePorts: http: "" ## Enable client source IP preservation ## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip ## externalTrafficPolicy: Cluster ## Service annotations done as key:value pairs annotations: ## Pod Security Context ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ ## securityContext: enabled: true fsGroup: 1001 runAsUser: 1001 ## Enable persistence using Persistent Volume Claims ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ ## persistence: enabled: true ## ghost data Persistent Volume Storage Class ## If defined, storageClassName: <storageClass> ## If set to "-", storageClassName: "", which disables dynamic provisioning ## If undefined (the default) or set to null, no storageClassName spec is ## set, choosing the default provisioner. (gp2 on AWS, standard on ## GKE, AWS & OpenStack) ## # storageClass: "-" accessMode: ReadWriteOnce size: 8Gi path: /bitnami ## Configure resource requests and limits ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ ## resources: requests: memory: 512Mi cpu: 300m ## Configure the ingress resource that allows you to access the ## Ghost installation. Set up the URL ## ref: http://kubernetes.io/docs/user-guide/ingress/ ## ingress: ## Set to true to enable ingress record generation enabled: false ## Set this to true in order to add the corresponding annotations for cert-manager certManager: false ## Ingress annotations done as key:value pairs ## For a full list of possible ingress annotations, please see ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md ## ## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set ## If certManager is set to true, annotation kubernetes.io/tls-acme: "true" will automatically be set annotations: # kubernetes.io/ingress.class: nginx ## The list of hostnames to be covered with this ingress record. ## Most likely this will be just one host, but in the event more hosts are needed, this is an array hosts: - name: ghost.local path: / ## Set this to true in order to enable TLS on the ingress record tls: false ## Optionally specify the TLS hosts for the ingress record ## Useful when the Ingress controller supports www-redirection ## If not specified, the above host name will be used # tlsHosts: # - www.ghost.local # - ghost.local ## If TLS is set to true, you must declare what secret will store the key/certificate for TLS tlsSecret: ghost.local-tls secrets: ## If you're providing your own certificates, please use this to add the certificates as secrets ## key and certificate should start with -----BEGIN CERTIFICATE----- or ## -----BEGIN RSA PRIVATE KEY----- ## ## name should line up with a tlsSecret set further up ## If you're using cert-manager, this is unneeded, as it will create the secret for you if it is not set ## ## It is also possible to create and manage the certificates outside of this helm chart ## Please see README.md for more information # - name: ghost.local-tls # key: # certificate: ## Node selector for pod assignment ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector ## nodeSelector: {} ## Affinity for pod assignment ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity ## affinity: {}
fluentd
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"fluentd.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Ku...
# Default values for fluentd. # This is a YAML-formatted file. # Declare variables to be passed into your templates. replicaCount: 1 image: repository: gcr.io/google-containers/fluentd-elasticsearch tag: v2.4.0 pullPolicy: IfNotPresent # pullSecrets: # - secret1 # - secret2 output: host: elasticsearch-client.default.svc.cluster.local port: 9200 scheme: http sslVersion: TLSv1 buffer_chunk_limit: 2M buffer_queue_limit: 8 env: {} # Extra Environment Values - allows yaml definitions extraEnvVars: # - name: VALUE_FROM_SECRET # valueFrom: # secretKeyRef: # name: secret_name # key: secret_key # extraVolumes: # - name: es-certs # secret: # defaultMode: 420 # secretName: es-certs # extraVolumeMounts: # - name: es-certs # mountPath: /certs # readOnly: true plugins: enabled: false pluginsList: [] service: annotations: {} type: ClusterIP # loadBalancerIP: # type: NodePort # nodePort: # Used to create Service records ports: - name: "monitor-agent" protocol: TCP containerPort: 24220 metrics: enabled: false service: port: 24231 serviceMonitor: enabled: false additionalLabels: {} # namespace: monitoring # interval: 30s # scrapeTimeout: 10s annotations: {} # prometheus.io/scrape: "true" # prometheus.io/port: "24231" # Pod Labels deployment: labels: {} ingress: enabled: false annotations: kubernetes.io/ingress.class: nginx # kubernetes.io/tls-acme: "true" # # Depending on which version of ingress controller you may need to configure properly - https://kubernetes.github.io/ingress-nginx/examples/rewrite/#rewrite-target # nginx.ingress.kubernetes.io/rewrite-target: / labels: [] # If doing TCP or UDP ingress rule don't forget to update your Ingress Controller to accept TCP connections - https://kubernetes.github.io/ingress-nginx/user-guide/exposing-tcp-udp-services/ hosts: # - name: "http-input.local" # protocol: TCP # servicePort: 9880 # path: / tls: {} # Secrets must be manually created in the namespace. # - secretName: http-input-tls # hosts: # - http-input.local configMaps: general.conf: | # Prevent fluentd from handling records containing its own logs. Otherwise # it can lead to an infinite loop, when error in sending one message generates # another message which also fails to be sent and so on. <match fluentd.**> @type null </match> # Used for health checking <source> @type http port 9880 bind 0.0.0.0 </source> # Emits internal metrics to every minute, and also exposes them on port # 24220. Useful for determining if an output plugin is retryring/erroring, # or determining the buffer queue length. <source> @type monitor_agent bind 0.0.0.0 port 24220 tag fluentd.monitor.metrics </source> system.conf: |- <system> root_dir /tmp/fluentd-buffers/ </system> forward-input.conf: | <source> @type forward port 24224 bind 0.0.0.0 </source> output.conf: | <match **> @id elasticsearch @type elasticsearch @log_level info include_tag_key true # Replace with the host/port to your Elasticsearch cluster. host "#{ENV['OUTPUT_HOST']}" port "#{ENV['OUTPUT_PORT']}" scheme "#{ENV['OUTPUT_SCHEME']}" ssl_version "#{ENV['OUTPUT_SSL_VERSION']}" logstash_format true <buffer> @type file path /var/log/fluentd-buffers/kubernetes.system.buffer flush_mode interval retry_type exponential_backoff flush_thread_count 2 flush_interval 5s retry_forever retry_max_interval 30 chunk_limit_size "#{ENV['OUTPUT_BUFFER_CHUNK_LIMIT']}" queue_limit_length "#{ENV['OUTPUT_BUFFER_QUEUE_LIMIT']}" overflow_action block </buffer> </match> resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. # limits: # cpu: 500m # memory: 200Mi # requests: # cpu: 500m # memory: 200Mi rbac: # Specifies whether RBAC resources should be created create: true serviceAccount: # Specifies whether a ServiceAccount should be created create: true # The name of the ServiceAccount to use. # If not set and create is true, a name is generated using the fullname template name: ## Persist data to a persistent volume persistence: enabled: false ## If defined, storageClassName: <storageClass> ## If set to "-", storageClassName: "", which disables dynamic provisioning ## If undefined (the default) or set to null, no storageClassName spec is ## set, choosing the default provisioner. (gp2 on AWS, standard on ## GKE, AWS & OpenStack) ## # storageClass: "-" # annotations: {} accessMode: ReadWriteOnce size: 10Gi nodeSelector: {} tolerations: [] affinity: {} # Enable autoscaling using HorizontalPodAutoscaler autoscaling: enabled: false minReplicas: 2 maxReplicas: 5 metrics: - type: Resource resource: name: cpu target: type: Utilization averageUtilization: 60 - type: Resource resource: name: memory target: type: Utilization averageUtilization: 60 # Consider to set higher value when using in conjuction with autoscaling # Full description about this field: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.15/#pod-v1-core terminationGracePeriodSeconds: 30
opa
[ "# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"opa.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubern...
# Default values for opa. # ----------------------- # # The 'opa' key embeds an OPA configuration file. See https://www.openpolicyagent.org/docs/configuration.html for more details. # Use 'opa: false' to disable the OPA configuration and rely on configmaps for policy loading. # See https://www.openpolicyagent.org/docs/latest/kubernetes-admission-control/#3-deploy-opa-on-top-of-kubernetes and the `mgmt.configmapPolicies` section below for more details. opa: services: controller: url: 'https://www.openpolicyagent.org' bundles: quickstart: service: controller resource: /bundles/helm-kubernetes-quickstart default_decision: /helm_kubernetes_quickstart/main # Setup the webhook using cert-manager certManager: enabled: false # Expose the prometheus scraping endpoint prometheus: enabled: false ## ServiceMonitor consumed by prometheus-operator serviceMonitor: ## If the operator is installed in your cluster, set to true to create a Service Monitor Entry enabled: false interval: "15s" ## Namespace in which the service monitor is created # namespace: monitoring # Added to the ServiceMonitor object so that prometheus-operator is able to discover it ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec additionalLabels: {} # Annotations in the deployment template annotations: {} # Bootstrap policies to load upon startup # Define policies in the form of: # <policyName> : |- # <regoBody> # For example, to mask the entire input body in the decision logs: # bootstrapPolicies: # log: |- # package system.log # mask["/input"] bootstrapPolicies: {} # To enforce mutating policies, change to MutatingWebhookConfiguration. admissionControllerKind: ValidatingWebhookConfiguration # To _fail closed_ on failures, change to Fail. During initial testing, we # recommend leaving the failure policy as Ignore. admissionControllerFailurePolicy: Ignore # Adds a namespace selector to the admission controller webhook admissionControllerNamespaceSelector: matchExpressions: - {key: openpolicyagent.org/webhook, operator: NotIn, values: [ignore]} # SideEffectClass for the webhook, setting to None enables dry-run admissionControllerSideEffect: Unknown # To restrict the kinds of operations and resources that are subject to OPA # policy checks, see the settings below. By default, all resources and # operations are subject to OPA policy checks. admissionControllerRules: - operations: ["*"] apiGroups: ["*"] apiVersions: ["*"] resources: ["*"] # Controls a PodDisruptionBudget for the OPA pod. Suggested use if having opa # always running for admission control is important podDisruptionBudget: enabled: false minAvailable: 1 # maxUnavailable: 1 # The helm Chart will automatically generate a CA and server certificate for # the OPA. If you want to supply your own certificates, set the field below to # false and add the PEM encoded CA certificate and server key pair below. # # WARNING: The common name name in the server certificate MUST match the # hostname of the service that exposes the OPA to the apiserver. For example. # if the service name is created in the "default" nanamespace with name "opa" # the common name MUST be set to "opa.default.svc". # # If the common name is not set correctly, the apiserver will refuse to # communicate with the OPA. generateAdmissionControllerCerts: true admissionControllerCA: "" admissionControllerCert: "" admissionControllerKey: "" authz: # Disable if you don't want authorization. # Mostly useful for debugging. enabled: true # Use hostNetwork setting on OPA pod hostNetwork: enabled: false # Docker image and tag to deploy. image: openpolicyagent/opa imageTag: 0.15.1 imagePullPolicy: IfNotPresent # One or more secrets to be used when pulling images imagePullSecrets: [] # - registrySecretName # Port to which the opa pod will bind itself # NOTE IF you use a different port make sure it maches the ones in the readinessProbe # and livenessProbe port: 443 extraArgs: [] mgmt: enabled: true image: openpolicyagent/kube-mgmt imageTag: "0.10" imagePullPolicy: IfNotPresent # NOTE insecure http port conjointly used for mgmt access and prometheus metrics export port: 8181 extraArgs: [] resources: {} data: enabled: false configmapPolicies: # NOTE IF you use these, remember to update the RBAC rules below to allow # permissions to get, list, watch, patch and update configmaps enabled: false namespaces: [opa, kube-federation-scheduling-policy] requireLabel: true replicate: # NOTE IF you use these, remember to update the RBAC rules below to allow # permissions to replicate these things cluster: [] # - [group/]version/resource namespace: [] # - [group/]version/resource path: kubernetes # Log level for OPA ('debug', 'info', 'error') (app default=info) logLevel: info # Log format for OPA ('text', 'json') (app default=text) logFormat: text # Number of OPA replicas to deploy. OPA maintains an eventually consistent # cache of policies and data. If you want high availability you can deploy two # or more replicas. replicas: 1 # To control how the OPA is scheduled on the cluster, set the affinity, # tolerations and nodeSelector values below. For example, to deploy OPA onto # the master nodes, 1 replica per node: # # affinity: # podAntiAffinity: # requiredDuringSchedulingIgnoredDuringExecution: # - labelSelector: # matchExpressions: # - key: "app" # operator: In # values: # - opa # topologyKey: "kubernetes.io/hostname" # tolerations: # - key: "node-role.kubernetes.io/master" # effect: NoSchedule # operator: Exists # nodeSelector: # kubernetes.io/role: "master" affinity: {} tolerations: [] nodeSelector: {} # To control the CPU and memory resource limits and requests for OPA, set the # field below. resources: {} rbac: # If true, create & use RBAC resources # create: true rules: cluster: [] # - apiGroups: # - "" # resources: # - namespaces # verbs: # - get # - list # - watch serviceAccount: # Specifies whether a ServiceAccount should be created create: true # The name of the ServiceAccount to use. # If not set and create is true, a name is generated using the fullname template name: # This proxy allows opa to make Kubernetes SubjectAccessReview checks against the # Kubernetes API. You can get a rego function at github.com/open-policy-agent/library sar: enabled: false image: lachlanevenson/k8s-kubectl imageTag: latest imagePullPolicy: IfNotPresent resources: {} # To control the liveness and readiness probes change the fields below. readinessProbe: httpGet: path: /health scheme: HTTPS port: 443 initialDelaySeconds: 3 periodSeconds: 5 livenessProbe: httpGet: path: /health scheme: HTTPS port: 443 initialDelaySeconds: 3 periodSeconds: 5 # Set a priorityClass using priorityClassName # priorityClassName: # Timeout for a webhook call in seconds. # Starting in kubernetes 1.14 you can set the timeout and it is # encouraged to use a small timeout for webhooks. If the webhook call times out, the request # the request is handled according to the webhook'sfailure policy. # timeoutSeconds: 20 securityContext: enabled: false runAsNonRoot: true runAsUser: 1 deploymentStrategy: {} # rollingUpdate: # maxSurge: 1 # maxUnavailable: 0 # type: RollingUpdate extraContainers: [] ## Additional containers to be added to the opa pod. # - name: example-app # image: example/example-app:latest # args: # - "run" # - "--port=11811" # - "--config=/etc/example-app-conf/config.yaml" # - "--opa-endpoint=https://localhost:443" # ports: # - name: http # containerPort: 11811 # protocol: TCP # volumeMounts: # - name: example-app-auth-config # mountPath: /etc/example-app-conf extraVolumes: [] ## Additional volumes to the opa pod. # - name: example-app-auth-config # secret: # secretName: example-app-auth-config extraPorts: [] ## Additional ports to the opa services. Useful to expose extra container ports. # - port: 11811 # protocol: TCP # name: http # targetPort: http
sematext-agent
["# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n(...TRUNCATED)
"agent:\n image:\n repository: sematext/agent\n tag: latest\n pullPolicy: Always\n servic(...TRUNCATED)
End of preview. Expand in Data Studio

Dataset Card for "helm-charts-uniform"

More Information needed

Downloads last month
12