From 3f0325fa9ffb05fbaf498ae5791caeefbab9d618 Mon Sep 17 00:00:00 2001
From: Peter Iglaev
Date: Thu, 4 Nov 2021 17:55:34 +0100
Subject: [PATCH] solution
---
README.md | 89 +-
docker-compose.yaml | 13 +
k8s/deploy/Chart.yaml | 3 +
k8s/deploy/templates/app-deployment.yaml | 22 +
k8s/deploy/templates/app-service.yaml | 11 +
k8s/deploy/templates/ingress.yaml | 18 +
k8s/deploy/templates/redis-deployment.yaml | 22 +
k8s/deploy/templates/redis-service.yaml | 11 +
k8s/deploy/values.yaml | 8 +
k8s/ingress-nginx/.helmignore | 22 +
k8s/ingress-nginx/CHANGELOG.md | 268 +
k8s/ingress-nginx/Chart.yaml | 22 +
k8s/ingress-nginx/OWNERS | 10 +
k8s/ingress-nginx/README.md | 227 +
.../controller-custom-ingressclass-flags.yaml | 7 +
.../ci/daemonset-customconfig-values.yaml | 14 +
.../ci/daemonset-customnodeport-values.yaml | 22 +
.../ci/daemonset-headers-values.yaml | 14 +
.../ci/daemonset-internal-lb-values.yaml | 14 +
.../ci/daemonset-nodeport-values.yaml | 10 +
.../ci/daemonset-podannotations-values.yaml | 17 +
...set-tcp-udp-configMapNamespace-values.yaml | 20 +
.../ci/daemonset-tcp-udp-values.yaml | 16 +
.../ci/daemonset-tcp-values.yaml | 14 +
.../ci/deamonset-default-values.yaml | 10 +
.../ci/deamonset-metrics-values.yaml | 12 +
.../ci/deamonset-psp-values.yaml | 13 +
.../ci/deamonset-webhook-and-psp-values.yaml | 13 +
.../ci/deamonset-webhook-values.yaml | 10 +
...eployment-autoscaling-behavior-values.yaml | 14 +
.../ci/deployment-autoscaling-values.yaml | 11 +
.../ci/deployment-customconfig-values.yaml | 12 +
.../ci/deployment-customnodeport-values.yaml | 20 +
.../ci/deployment-default-values.yaml | 8 +
.../ci/deployment-headers-values.yaml | 13 +
.../ci/deployment-internal-lb-values.yaml | 13 +
.../ci/deployment-metrics-values.yaml | 11 +
.../ci/deployment-nodeport-values.yaml | 9 +
.../ci/deployment-podannotations-values.yaml | 16 +
.../ci/deployment-psp-values.yaml | 10 +
...ent-tcp-udp-configMapNamespace-values.yaml | 19 +
.../ci/deployment-tcp-udp-values.yaml | 15 +
.../ci/deployment-tcp-values.yaml | 11 +
.../ci/deployment-webhook-and-psp-values.yaml | 12 +
.../deployment-webhook-resources-values.yaml | 23 +
.../ci/deployment-webhook-values.yaml | 9 +
k8s/ingress-nginx/templates/NOTES.txt | 71 +
k8s/ingress-nginx/templates/_helpers.tpl | 134 +
.../job-patch/clusterrole.yaml | 31 +
.../job-patch/clusterrolebinding.yaml | 20 +
.../job-patch/job-createSecret.yaml | 64 +
.../job-patch/job-patchWebhook.yaml | 66 +
.../admission-webhooks/job-patch/psp.yaml | 36 +
.../admission-webhooks/job-patch/role.yaml | 21 +
.../job-patch/rolebinding.yaml | 21 +
.../job-patch/serviceaccount.yaml | 13 +
.../validating-webhook.yaml | 45 +
k8s/ingress-nginx/templates/clusterrole.yaml | 81 +
.../templates/clusterrolebinding.yaml | 16 +
.../controller-configmap-addheaders.yaml | 11 +
.../controller-configmap-proxyheaders.yaml | 16 +
.../templates/controller-configmap-tcp.yaml | 14 +
.../templates/controller-configmap-udp.yaml | 14 +
.../templates/controller-configmap.yaml | 26 +
.../templates/controller-daemonset.yaml | 256 +
.../templates/controller-deployment.yaml | 257 +
.../templates/controller-hpa.yaml | 49 +
.../templates/controller-ingressclass.yaml | 21 +
.../templates/controller-keda.yaml | 39 +
.../controller-poddisruptionbudget.yaml | 16 +
.../templates/controller-prometheusrules.yaml | 21 +
.../templates/controller-psp.yaml | 86 +
.../templates/controller-role.yaml | 90 +
.../templates/controller-rolebinding.yaml | 18 +
.../controller-service-internal.yaml | 79 +
.../templates/controller-service-metrics.yaml | 44 +
.../templates/controller-service-webhook.yaml | 37 +
.../templates/controller-service.yaml | 91 +
.../templates/controller-serviceaccount.yaml | 11 +
.../templates/controller-servicemonitor.yaml | 45 +
.../templates/default-backend-deployment.yaml | 112 +
.../templates/default-backend-hpa.yaml | 30 +
.../default-backend-poddisruptionbudget.yaml | 16 +
.../templates/default-backend-psp.yaml | 33 +
.../templates/default-backend-role.yaml | 19 +
.../default-backend-rolebinding.yaml | 18 +
.../templates/default-backend-service.yaml | 38 +
.../default-backend-serviceaccount.yaml | 11 +
.../templates/dh-param-secret.yaml | 10 +
k8s/ingress-nginx/values.yaml | 856 ++
k8s/kube-prometheus-stack/.helmignore | 28 +
k8s/kube-prometheus-stack/CONTRIBUTING.md | 12 +
k8s/kube-prometheus-stack/Chart.lock | 12 +
k8s/kube-prometheus-stack/Chart.yaml | 50 +
k8s/kube-prometheus-stack/README.md | 480 ++
.../charts/grafana/.helmignore | 23 +
.../charts/grafana/Chart.yaml | 22 +
.../charts/grafana/README.md | 528 ++
.../charts/grafana/ci/default-values.yaml | 1 +
.../ci/with-dashboard-json-values.yaml | 53 +
.../grafana/ci/with-dashboard-values.yaml | 19 +
.../ci/with-image-renderer-values.yaml | 19 +
.../grafana/dashboards/custom-dashboard.json | 1 +
.../charts/grafana/templates/NOTES.txt | 54 +
.../charts/grafana/templates/_helpers.tpl | 163 +
.../charts/grafana/templates/_pod.tpl | 521 ++
.../charts/grafana/templates/clusterrole.yaml | 25 +
.../grafana/templates/clusterrolebinding.yaml | 24 +
.../configmap-dashboard-provider.yaml | 29 +
.../charts/grafana/templates/configmap.yaml | 82 +
.../templates/dashboards-json-configmap.yaml | 38 +
.../charts/grafana/templates/deployment.yaml | 50 +
.../grafana/templates/headless-service.yaml | 18 +
.../charts/grafana/templates/hpa.yaml | 20 +
.../templates/image-renderer-deployment.yaml | 119 +
.../image-renderer-network-policy.yaml | 76 +
.../templates/image-renderer-service.yaml | 30 +
.../charts/grafana/templates/ingress.yaml | 78 +
.../templates/poddisruptionbudget.yaml | 22 +
.../grafana/templates/podsecuritypolicy.yaml | 49 +
.../charts/grafana/templates/pvc.yaml | 33 +
.../charts/grafana/templates/role.yaml | 32 +
.../charts/grafana/templates/rolebinding.yaml | 25 +
.../charts/grafana/templates/secret-env.yaml | 14 +
.../charts/grafana/templates/secret.yaml | 26 +
.../charts/grafana/templates/service.yaml | 51 +
.../grafana/templates/serviceaccount.yaml | 13 +
.../grafana/templates/servicemonitor.yaml | 40 +
.../charts/grafana/templates/statefulset.yaml | 52 +
.../templates/tests/test-configmap.yaml | 17 +
.../tests/test-podsecuritypolicy.yaml | 29 +
.../grafana/templates/tests/test-role.yaml | 14 +
.../templates/tests/test-rolebinding.yaml | 17 +
.../templates/tests/test-serviceaccount.yaml | 9 +
.../charts/grafana/templates/tests/test.yaml | 48 +
.../charts/grafana/values.yaml | 749 ++
.../charts/kube-state-metrics/.helmignore | 21 +
.../charts/kube-state-metrics/Chart.yaml | 19 +
.../charts/kube-state-metrics/OWNERS | 6 +
.../charts/kube-state-metrics/README.md | 68 +
.../kube-state-metrics/templates/NOTES.txt | 10 +
.../kube-state-metrics/templates/_helpers.tpl | 47 +
.../templates/clusterrolebinding.yaml | 23 +
.../templates/deployment.yaml | 157 +
.../templates/kubeconfig-secret.yaml | 15 +
.../kube-state-metrics/templates/pdb.yaml | 20 +
.../templates/podsecuritypolicy.yaml | 42 +
.../templates/psp-clusterrole.yaml | 22 +
.../templates/psp-clusterrolebinding.yaml | 19 +
.../kube-state-metrics/templates/role.yaml | 190 +
.../templates/rolebinding.yaml | 27 +
.../kube-state-metrics/templates/service.yaml | 42 +
.../templates/serviceaccount.yaml | 18 +
.../templates/servicemonitor.yaml | 50 +
.../templates/stsdiscovery-role.yaml | 29 +
.../templates/stsdiscovery-rolebinding.yaml | 20 +
.../charts/kube-state-metrics/values.yaml | 214 +
.../prometheus-node-exporter/.helmignore | 21 +
.../prometheus-node-exporter/Chart.yaml | 18 +
.../charts/prometheus-node-exporter/README.md | 50 +
.../ci/port-values.yaml | 3 +
.../templates/NOTES.txt | 15 +
.../templates/_helpers.tpl | 66 +
.../templates/daemonset.yaml | 188 +
.../templates/endpoints.yaml | 18 +
.../templates/monitor.yaml | 35 +
.../templates/psp-clusterrole.yaml | 15 +
.../templates/psp-clusterrolebinding.yaml | 17 +
.../templates/psp.yaml | 56 +
.../templates/service.yaml | 23 +
.../templates/serviceaccount.yaml | 18 +
.../prometheus-node-exporter/values.yaml | 182 +
.../crds/crd-alertmanagerconfigs.yaml | 2441 ++++++
.../crds/crd-alertmanagers.yaml | 4899 ++++++++++++
.../crds/crd-podmonitors.yaml | 583 ++
.../crds/crd-probes.yaml | 569 ++
.../crds/crd-prometheuses.yaml | 7042 +++++++++++++++++
.../crds/crd-prometheusrules.yaml | 103 +
.../crds/crd-servicemonitors.yaml | 610 ++
.../crds/crd-thanosrulers.yaml | 5034 ++++++++++++
k8s/kube-prometheus-stack/templates/NOTES.txt | 4 +
.../templates/_helpers.tpl | 166 +
.../templates/alertmanager/alertmanager.yaml | 149 +
.../templates/alertmanager/extrasecret.yaml | 20 +
.../templates/alertmanager/ingress.yaml | 77 +
.../alertmanager/ingressperreplica.yaml | 67 +
.../alertmanager/podDisruptionBudget.yaml | 21 +
.../templates/alertmanager/psp-role.yaml | 21 +
.../alertmanager/psp-rolebinding.yaml | 18 +
.../templates/alertmanager/psp.yaml | 52 +
.../templates/alertmanager/secret.yaml | 27 +
.../templates/alertmanager/service.yaml | 50 +
.../alertmanager/serviceaccount.yaml | 20 +
.../alertmanager/servicemonitor.yaml | 45 +
.../alertmanager/serviceperreplica.yaml | 46 +
.../templates/exporters/core-dns/service.yaml | 24 +
.../exporters/core-dns/servicemonitor.yaml | 36 +
.../kube-api-server/servicemonitor.yaml | 39 +
.../kube-controller-manager/endpoints.yaml | 20 +
.../kube-controller-manager/service.yaml | 27 +
.../servicemonitor.yaml | 47 +
.../templates/exporters/kube-dns/service.yaml | 28 +
.../exporters/kube-dns/servicemonitor.yaml | 49 +
.../exporters/kube-etcd/endpoints.yaml | 20 +
.../exporters/kube-etcd/service.yaml | 27 +
.../exporters/kube-etcd/servicemonitor.yaml | 53 +
.../exporters/kube-proxy/endpoints.yaml | 20 +
.../exporters/kube-proxy/service.yaml | 27 +
.../exporters/kube-proxy/servicemonitor.yaml | 41 +
.../exporters/kube-scheduler/endpoints.yaml | 20 +
.../exporters/kube-scheduler/service.yaml | 27 +
.../kube-scheduler/servicemonitor.yaml | 47 +
.../kube-state-metrics/serviceMonitor.yaml | 63 +
.../exporters/kubelet/servicemonitor.yaml | 173 +
.../node-exporter/servicemonitor.yaml | 40 +
.../grafana/configmap-dashboards.yaml | 24 +
.../grafana/configmaps-datasources.yaml | 47 +
.../alertmanager-overview.yaml | 610 ++
.../grafana/dashboards-1.14/apiserver.yaml | 1747 ++++
.../dashboards-1.14/cluster-total.yaml | 1882 +++++
.../dashboards-1.14/controller-manager.yaml | 1177 +++
.../grafana/dashboards-1.14/etcd.yaml | 1116 +++
.../grafana/dashboards-1.14/k8s-coredns.yaml | 1531 ++++
.../k8s-resources-cluster.yaml | 3024 +++++++
.../k8s-resources-namespace.yaml | 2744 +++++++
.../dashboards-1.14/k8s-resources-node.yaml | 978 +++
.../dashboards-1.14/k8s-resources-pod.yaml | 2427 ++++++
.../k8s-resources-workload.yaml | 1986 +++++
.../k8s-resources-workloads-namespace.yaml | 2151 +++++
.../grafana/dashboards-1.14/kubelet.yaml | 2254 ++++++
.../dashboards-1.14/namespace-by-pod.yaml | 1464 ++++
.../namespace-by-workload.yaml | 1736 ++++
.../node-cluster-rsrc-use.yaml | 1063 +++
.../dashboards-1.14/node-rsrc-use.yaml | 1089 +++
.../grafana/dashboards-1.14/nodes.yaml | 991 +++
.../persistentvolumesusage.yaml | 577 ++
.../grafana/dashboards-1.14/pod-total.yaml | 1228 +++
.../prometheus-remote-write.yaml | 1670 ++++
.../grafana/dashboards-1.14/prometheus.yaml | 1235 +++
.../grafana/dashboards-1.14/proxy.yaml | 1257 +++
.../grafana/dashboards-1.14/scheduler.yaml | 1100 +++
.../grafana/dashboards-1.14/statefulset.yaml | 928 +++
.../dashboards-1.14/workload-total.yaml | 1438 ++++
.../templates/grafana/servicemonitor.yaml | 34 +
.../job-patch/clusterrole.yaml | 33 +
.../job-patch/clusterrolebinding.yaml | 20 +
.../job-patch/job-createSecret.yaml | 65 +
.../job-patch/job-patchWebhook.yaml | 66 +
.../admission-webhooks/job-patch/psp.yaml | 54 +
.../admission-webhooks/job-patch/role.yaml | 21 +
.../job-patch/rolebinding.yaml | 21 +
.../job-patch/serviceaccount.yaml | 17 +
.../mutatingWebhookConfiguration.yaml | 41 +
.../validatingWebhookConfiguration.yaml | 41 +
.../prometheus-operator/certmanager.yaml | 57 +
.../prometheus-operator/clusterrole.yaml | 80 +
.../clusterrolebinding.yaml | 17 +
.../prometheus-operator/deployment.yaml | 152 +
.../prometheus-operator/psp-clusterrole.yaml | 20 +
.../psp-clusterrolebinding.yaml | 17 +
.../templates/prometheus-operator/psp.yaml | 51 +
.../prometheus-operator/service.yaml | 55 +
.../prometheus-operator/serviceaccount.yaml | 16 +
.../prometheus-operator/servicemonitor.yaml | 44 +
.../templates/prometheus/_rules.tpl | 38 +
.../additionalAlertRelabelConfigs.yaml | 16 +
.../additionalAlertmanagerConfigs.yaml | 16 +
.../prometheus/additionalPrometheusRules.yaml | 43 +
.../prometheus/additionalScrapeConfigs.yaml | 16 +
.../templates/prometheus/clusterrole.yaml | 30 +
.../prometheus/clusterrolebinding.yaml | 18 +
.../templates/prometheus/csi-secret.yaml | 12 +
.../templates/prometheus/extrasecret.yaml | 20 +
.../templates/prometheus/ingress.yaml | 77 +
.../prometheus/ingressThanosSidecar.yaml | 76 +
.../prometheus/ingressperreplica.yaml | 67 +
.../prometheus/podDisruptionBudget.yaml | 21 +
.../templates/prometheus/podmonitors.yaml | 37 +
.../templates/prometheus/prometheus.yaml | 358 +
.../templates/prometheus/psp-clusterrole.yaml | 20 +
.../prometheus/psp-clusterrolebinding.yaml | 18 +
.../templates/prometheus/psp.yaml | 62 +
.../rules-1.14/alertmanager.rules.yaml | 175 +
.../templates/prometheus/rules-1.14/etcd.yaml | 179 +
.../prometheus/rules-1.14/general.rules.yaml | 60 +
.../prometheus/rules-1.14/k8s.rules.yaml | 163 +
.../kube-apiserver-availability.rules.yaml | 128 +
.../kube-apiserver-burnrate.rules.yaml | 328 +
.../kube-apiserver-histogram.rules.yaml | 49 +
.../rules-1.14/kube-apiserver-slos.yaml | 95 +
.../rules-1.14/kube-apiserver.rules.yaml | 358 +
.../kube-prometheus-general.rules.yaml | 31 +
.../kube-prometheus-node-recording.rules.yaml | 39 +
.../rules-1.14/kube-scheduler.rules.yaml | 63 +
.../rules-1.14/kube-state-metrics.yaml | 87 +
.../prometheus/rules-1.14/kubelet.rules.yaml | 39 +
.../rules-1.14/kubernetes-apps.yaml | 301 +
.../rules-1.14/kubernetes-resources.yaml | 159 +
.../rules-1.14/kubernetes-storage.yaml | 80 +
.../kubernetes-system-apiserver.yaml | 100 +
.../kubernetes-system-controller-manager.yaml | 41 +
.../rules-1.14/kubernetes-system-kubelet.yaml | 188 +
.../kubernetes-system-scheduler.yaml | 41 +
.../rules-1.14/kubernetes-system.yaml | 55 +
.../rules-1.14/node-exporter.rules.yaml | 79 +
.../prometheus/rules-1.14/node-exporter.yaml | 308 +
.../prometheus/rules-1.14/node-network.yaml | 39 +
.../prometheus/rules-1.14/node.rules.yaml | 51 +
.../rules-1.14/prometheus-operator.yaml | 113 +
.../prometheus/rules-1.14/prometheus.yaml | 307 +
.../templates/prometheus/service.yaml | 60 +
.../prometheus/serviceThanosSidecar.yaml | 36 +
.../serviceThanosSidecarExternal.yaml | 43 +
.../templates/prometheus/serviceaccount.yaml | 20 +
.../templates/prometheus/servicemonitor.yaml | 42 +
.../servicemonitorThanosSidecar.yaml | 41 +
.../templates/prometheus/servicemonitors.yaml | 38 +
.../prometheus/serviceperreplica.yaml | 46 +
k8s/kube-prometheus-stack/values.yaml | 2710 +++++++
redis-client | 1 +
src/Dockerfile | 24 +
go.mod => src/go.mod | 0
go.sum => src/go.sum | 0
main.go => src/main.go | 3 +-
324 files changed, 80730 insertions(+), 3 deletions(-)
create mode 100644 docker-compose.yaml
create mode 100644 k8s/deploy/Chart.yaml
create mode 100644 k8s/deploy/templates/app-deployment.yaml
create mode 100644 k8s/deploy/templates/app-service.yaml
create mode 100644 k8s/deploy/templates/ingress.yaml
create mode 100644 k8s/deploy/templates/redis-deployment.yaml
create mode 100644 k8s/deploy/templates/redis-service.yaml
create mode 100644 k8s/deploy/values.yaml
create mode 100644 k8s/ingress-nginx/.helmignore
create mode 100644 k8s/ingress-nginx/CHANGELOG.md
create mode 100644 k8s/ingress-nginx/Chart.yaml
create mode 100644 k8s/ingress-nginx/OWNERS
create mode 100644 k8s/ingress-nginx/README.md
create mode 100644 k8s/ingress-nginx/ci/controller-custom-ingressclass-flags.yaml
create mode 100644 k8s/ingress-nginx/ci/daemonset-customconfig-values.yaml
create mode 100644 k8s/ingress-nginx/ci/daemonset-customnodeport-values.yaml
create mode 100644 k8s/ingress-nginx/ci/daemonset-headers-values.yaml
create mode 100644 k8s/ingress-nginx/ci/daemonset-internal-lb-values.yaml
create mode 100644 k8s/ingress-nginx/ci/daemonset-nodeport-values.yaml
create mode 100644 k8s/ingress-nginx/ci/daemonset-podannotations-values.yaml
create mode 100644 k8s/ingress-nginx/ci/daemonset-tcp-udp-configMapNamespace-values.yaml
create mode 100644 k8s/ingress-nginx/ci/daemonset-tcp-udp-values.yaml
create mode 100644 k8s/ingress-nginx/ci/daemonset-tcp-values.yaml
create mode 100644 k8s/ingress-nginx/ci/deamonset-default-values.yaml
create mode 100644 k8s/ingress-nginx/ci/deamonset-metrics-values.yaml
create mode 100644 k8s/ingress-nginx/ci/deamonset-psp-values.yaml
create mode 100644 k8s/ingress-nginx/ci/deamonset-webhook-and-psp-values.yaml
create mode 100644 k8s/ingress-nginx/ci/deamonset-webhook-values.yaml
create mode 100644 k8s/ingress-nginx/ci/deployment-autoscaling-behavior-values.yaml
create mode 100644 k8s/ingress-nginx/ci/deployment-autoscaling-values.yaml
create mode 100644 k8s/ingress-nginx/ci/deployment-customconfig-values.yaml
create mode 100644 k8s/ingress-nginx/ci/deployment-customnodeport-values.yaml
create mode 100644 k8s/ingress-nginx/ci/deployment-default-values.yaml
create mode 100644 k8s/ingress-nginx/ci/deployment-headers-values.yaml
create mode 100644 k8s/ingress-nginx/ci/deployment-internal-lb-values.yaml
create mode 100644 k8s/ingress-nginx/ci/deployment-metrics-values.yaml
create mode 100644 k8s/ingress-nginx/ci/deployment-nodeport-values.yaml
create mode 100644 k8s/ingress-nginx/ci/deployment-podannotations-values.yaml
create mode 100644 k8s/ingress-nginx/ci/deployment-psp-values.yaml
create mode 100644 k8s/ingress-nginx/ci/deployment-tcp-udp-configMapNamespace-values.yaml
create mode 100644 k8s/ingress-nginx/ci/deployment-tcp-udp-values.yaml
create mode 100644 k8s/ingress-nginx/ci/deployment-tcp-values.yaml
create mode 100644 k8s/ingress-nginx/ci/deployment-webhook-and-psp-values.yaml
create mode 100644 k8s/ingress-nginx/ci/deployment-webhook-resources-values.yaml
create mode 100644 k8s/ingress-nginx/ci/deployment-webhook-values.yaml
create mode 100644 k8s/ingress-nginx/templates/NOTES.txt
create mode 100644 k8s/ingress-nginx/templates/_helpers.tpl
create mode 100644 k8s/ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml
create mode 100644 k8s/ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml
create mode 100644 k8s/ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml
create mode 100644 k8s/ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml
create mode 100644 k8s/ingress-nginx/templates/admission-webhooks/job-patch/psp.yaml
create mode 100644 k8s/ingress-nginx/templates/admission-webhooks/job-patch/role.yaml
create mode 100644 k8s/ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml
create mode 100644 k8s/ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml
create mode 100644 k8s/ingress-nginx/templates/admission-webhooks/validating-webhook.yaml
create mode 100644 k8s/ingress-nginx/templates/clusterrole.yaml
create mode 100644 k8s/ingress-nginx/templates/clusterrolebinding.yaml
create mode 100644 k8s/ingress-nginx/templates/controller-configmap-addheaders.yaml
create mode 100644 k8s/ingress-nginx/templates/controller-configmap-proxyheaders.yaml
create mode 100644 k8s/ingress-nginx/templates/controller-configmap-tcp.yaml
create mode 100644 k8s/ingress-nginx/templates/controller-configmap-udp.yaml
create mode 100644 k8s/ingress-nginx/templates/controller-configmap.yaml
create mode 100644 k8s/ingress-nginx/templates/controller-daemonset.yaml
create mode 100644 k8s/ingress-nginx/templates/controller-deployment.yaml
create mode 100644 k8s/ingress-nginx/templates/controller-hpa.yaml
create mode 100644 k8s/ingress-nginx/templates/controller-ingressclass.yaml
create mode 100644 k8s/ingress-nginx/templates/controller-keda.yaml
create mode 100644 k8s/ingress-nginx/templates/controller-poddisruptionbudget.yaml
create mode 100644 k8s/ingress-nginx/templates/controller-prometheusrules.yaml
create mode 100644 k8s/ingress-nginx/templates/controller-psp.yaml
create mode 100644 k8s/ingress-nginx/templates/controller-role.yaml
create mode 100644 k8s/ingress-nginx/templates/controller-rolebinding.yaml
create mode 100644 k8s/ingress-nginx/templates/controller-service-internal.yaml
create mode 100644 k8s/ingress-nginx/templates/controller-service-metrics.yaml
create mode 100644 k8s/ingress-nginx/templates/controller-service-webhook.yaml
create mode 100644 k8s/ingress-nginx/templates/controller-service.yaml
create mode 100644 k8s/ingress-nginx/templates/controller-serviceaccount.yaml
create mode 100644 k8s/ingress-nginx/templates/controller-servicemonitor.yaml
create mode 100644 k8s/ingress-nginx/templates/default-backend-deployment.yaml
create mode 100644 k8s/ingress-nginx/templates/default-backend-hpa.yaml
create mode 100644 k8s/ingress-nginx/templates/default-backend-poddisruptionbudget.yaml
create mode 100644 k8s/ingress-nginx/templates/default-backend-psp.yaml
create mode 100644 k8s/ingress-nginx/templates/default-backend-role.yaml
create mode 100644 k8s/ingress-nginx/templates/default-backend-rolebinding.yaml
create mode 100644 k8s/ingress-nginx/templates/default-backend-service.yaml
create mode 100644 k8s/ingress-nginx/templates/default-backend-serviceaccount.yaml
create mode 100644 k8s/ingress-nginx/templates/dh-param-secret.yaml
create mode 100644 k8s/ingress-nginx/values.yaml
create mode 100644 k8s/kube-prometheus-stack/.helmignore
create mode 100644 k8s/kube-prometheus-stack/CONTRIBUTING.md
create mode 100644 k8s/kube-prometheus-stack/Chart.lock
create mode 100644 k8s/kube-prometheus-stack/Chart.yaml
create mode 100644 k8s/kube-prometheus-stack/README.md
create mode 100644 k8s/kube-prometheus-stack/charts/grafana/.helmignore
create mode 100644 k8s/kube-prometheus-stack/charts/grafana/Chart.yaml
create mode 100644 k8s/kube-prometheus-stack/charts/grafana/README.md
create mode 100644 k8s/kube-prometheus-stack/charts/grafana/ci/default-values.yaml
create mode 100644 k8s/kube-prometheus-stack/charts/grafana/ci/with-dashboard-json-values.yaml
create mode 100644 k8s/kube-prometheus-stack/charts/grafana/ci/with-dashboard-values.yaml
create mode 100644 k8s/kube-prometheus-stack/charts/grafana/ci/with-image-renderer-values.yaml
create mode 100644 k8s/kube-prometheus-stack/charts/grafana/dashboards/custom-dashboard.json
create mode 100644 k8s/kube-prometheus-stack/charts/grafana/templates/NOTES.txt
create mode 100644 k8s/kube-prometheus-stack/charts/grafana/templates/_helpers.tpl
create mode 100644 k8s/kube-prometheus-stack/charts/grafana/templates/_pod.tpl
create mode 100644 k8s/kube-prometheus-stack/charts/grafana/templates/clusterrole.yaml
create mode 100644 k8s/kube-prometheus-stack/charts/grafana/templates/clusterrolebinding.yaml
create mode 100644 k8s/kube-prometheus-stack/charts/grafana/templates/configmap-dashboard-provider.yaml
create mode 100644 k8s/kube-prometheus-stack/charts/grafana/templates/configmap.yaml
create mode 100644 k8s/kube-prometheus-stack/charts/grafana/templates/dashboards-json-configmap.yaml
create mode 100644 k8s/kube-prometheus-stack/charts/grafana/templates/deployment.yaml
create mode 100644 k8s/kube-prometheus-stack/charts/grafana/templates/headless-service.yaml
create mode 100644 k8s/kube-prometheus-stack/charts/grafana/templates/hpa.yaml
create mode 100644 k8s/kube-prometheus-stack/charts/grafana/templates/image-renderer-deployment.yaml
create mode 100644 k8s/kube-prometheus-stack/charts/grafana/templates/image-renderer-network-policy.yaml
create mode 100644 k8s/kube-prometheus-stack/charts/grafana/templates/image-renderer-service.yaml
create mode 100644 k8s/kube-prometheus-stack/charts/grafana/templates/ingress.yaml
create mode 100644 k8s/kube-prometheus-stack/charts/grafana/templates/poddisruptionbudget.yaml
create mode 100644 k8s/kube-prometheus-stack/charts/grafana/templates/podsecuritypolicy.yaml
create mode 100644 k8s/kube-prometheus-stack/charts/grafana/templates/pvc.yaml
create mode 100644 k8s/kube-prometheus-stack/charts/grafana/templates/role.yaml
create mode 100644 k8s/kube-prometheus-stack/charts/grafana/templates/rolebinding.yaml
create mode 100644 k8s/kube-prometheus-stack/charts/grafana/templates/secret-env.yaml
create mode 100644 k8s/kube-prometheus-stack/charts/grafana/templates/secret.yaml
create mode 100644 k8s/kube-prometheus-stack/charts/grafana/templates/service.yaml
create mode 100644 k8s/kube-prometheus-stack/charts/grafana/templates/serviceaccount.yaml
create mode 100644 k8s/kube-prometheus-stack/charts/grafana/templates/servicemonitor.yaml
create mode 100644 k8s/kube-prometheus-stack/charts/grafana/templates/statefulset.yaml
create mode 100644 k8s/kube-prometheus-stack/charts/grafana/templates/tests/test-configmap.yaml
create mode 100644 k8s/kube-prometheus-stack/charts/grafana/templates/tests/test-podsecuritypolicy.yaml
create mode 100644 k8s/kube-prometheus-stack/charts/grafana/templates/tests/test-role.yaml
create mode 100644 k8s/kube-prometheus-stack/charts/grafana/templates/tests/test-rolebinding.yaml
create mode 100644 k8s/kube-prometheus-stack/charts/grafana/templates/tests/test-serviceaccount.yaml
create mode 100644 k8s/kube-prometheus-stack/charts/grafana/templates/tests/test.yaml
create mode 100644 k8s/kube-prometheus-stack/charts/grafana/values.yaml
create mode 100644 k8s/kube-prometheus-stack/charts/kube-state-metrics/.helmignore
create mode 100644 k8s/kube-prometheus-stack/charts/kube-state-metrics/Chart.yaml
create mode 100644 k8s/kube-prometheus-stack/charts/kube-state-metrics/OWNERS
create mode 100644 k8s/kube-prometheus-stack/charts/kube-state-metrics/README.md
create mode 100644 k8s/kube-prometheus-stack/charts/kube-state-metrics/templates/NOTES.txt
create mode 100644 k8s/kube-prometheus-stack/charts/kube-state-metrics/templates/_helpers.tpl
create mode 100644 k8s/kube-prometheus-stack/charts/kube-state-metrics/templates/clusterrolebinding.yaml
create mode 100644 k8s/kube-prometheus-stack/charts/kube-state-metrics/templates/deployment.yaml
create mode 100644 k8s/kube-prometheus-stack/charts/kube-state-metrics/templates/kubeconfig-secret.yaml
create mode 100644 k8s/kube-prometheus-stack/charts/kube-state-metrics/templates/pdb.yaml
create mode 100644 k8s/kube-prometheus-stack/charts/kube-state-metrics/templates/podsecuritypolicy.yaml
create mode 100644 k8s/kube-prometheus-stack/charts/kube-state-metrics/templates/psp-clusterrole.yaml
create mode 100644 k8s/kube-prometheus-stack/charts/kube-state-metrics/templates/psp-clusterrolebinding.yaml
create mode 100644 k8s/kube-prometheus-stack/charts/kube-state-metrics/templates/role.yaml
create mode 100644 k8s/kube-prometheus-stack/charts/kube-state-metrics/templates/rolebinding.yaml
create mode 100644 k8s/kube-prometheus-stack/charts/kube-state-metrics/templates/service.yaml
create mode 100644 k8s/kube-prometheus-stack/charts/kube-state-metrics/templates/serviceaccount.yaml
create mode 100644 k8s/kube-prometheus-stack/charts/kube-state-metrics/templates/servicemonitor.yaml
create mode 100644 k8s/kube-prometheus-stack/charts/kube-state-metrics/templates/stsdiscovery-role.yaml
create mode 100644 k8s/kube-prometheus-stack/charts/kube-state-metrics/templates/stsdiscovery-rolebinding.yaml
create mode 100644 k8s/kube-prometheus-stack/charts/kube-state-metrics/values.yaml
create mode 100644 k8s/kube-prometheus-stack/charts/prometheus-node-exporter/.helmignore
create mode 100644 k8s/kube-prometheus-stack/charts/prometheus-node-exporter/Chart.yaml
create mode 100644 k8s/kube-prometheus-stack/charts/prometheus-node-exporter/README.md
create mode 100644 k8s/kube-prometheus-stack/charts/prometheus-node-exporter/ci/port-values.yaml
create mode 100644 k8s/kube-prometheus-stack/charts/prometheus-node-exporter/templates/NOTES.txt
create mode 100644 k8s/kube-prometheus-stack/charts/prometheus-node-exporter/templates/_helpers.tpl
create mode 100644 k8s/kube-prometheus-stack/charts/prometheus-node-exporter/templates/daemonset.yaml
create mode 100644 k8s/kube-prometheus-stack/charts/prometheus-node-exporter/templates/endpoints.yaml
create mode 100644 k8s/kube-prometheus-stack/charts/prometheus-node-exporter/templates/monitor.yaml
create mode 100644 k8s/kube-prometheus-stack/charts/prometheus-node-exporter/templates/psp-clusterrole.yaml
create mode 100644 k8s/kube-prometheus-stack/charts/prometheus-node-exporter/templates/psp-clusterrolebinding.yaml
create mode 100644 k8s/kube-prometheus-stack/charts/prometheus-node-exporter/templates/psp.yaml
create mode 100644 k8s/kube-prometheus-stack/charts/prometheus-node-exporter/templates/service.yaml
create mode 100644 k8s/kube-prometheus-stack/charts/prometheus-node-exporter/templates/serviceaccount.yaml
create mode 100644 k8s/kube-prometheus-stack/charts/prometheus-node-exporter/values.yaml
create mode 100644 k8s/kube-prometheus-stack/crds/crd-alertmanagerconfigs.yaml
create mode 100644 k8s/kube-prometheus-stack/crds/crd-alertmanagers.yaml
create mode 100644 k8s/kube-prometheus-stack/crds/crd-podmonitors.yaml
create mode 100644 k8s/kube-prometheus-stack/crds/crd-probes.yaml
create mode 100644 k8s/kube-prometheus-stack/crds/crd-prometheuses.yaml
create mode 100644 k8s/kube-prometheus-stack/crds/crd-prometheusrules.yaml
create mode 100644 k8s/kube-prometheus-stack/crds/crd-servicemonitors.yaml
create mode 100644 k8s/kube-prometheus-stack/crds/crd-thanosrulers.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/NOTES.txt
create mode 100644 k8s/kube-prometheus-stack/templates/_helpers.tpl
create mode 100644 k8s/kube-prometheus-stack/templates/alertmanager/alertmanager.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/alertmanager/extrasecret.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/alertmanager/ingress.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/alertmanager/ingressperreplica.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/alertmanager/podDisruptionBudget.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/alertmanager/psp-role.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/alertmanager/psp-rolebinding.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/alertmanager/psp.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/alertmanager/secret.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/alertmanager/service.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/alertmanager/serviceaccount.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/alertmanager/servicemonitor.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/alertmanager/serviceperreplica.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/exporters/core-dns/service.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/exporters/core-dns/servicemonitor.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/exporters/kube-api-server/servicemonitor.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/exporters/kube-controller-manager/endpoints.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/exporters/kube-controller-manager/service.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/exporters/kube-controller-manager/servicemonitor.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/exporters/kube-dns/service.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/exporters/kube-dns/servicemonitor.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/exporters/kube-etcd/endpoints.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/exporters/kube-etcd/service.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/exporters/kube-etcd/servicemonitor.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/exporters/kube-proxy/endpoints.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/exporters/kube-proxy/service.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/exporters/kube-proxy/servicemonitor.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/exporters/kube-scheduler/endpoints.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/exporters/kube-scheduler/service.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/exporters/kube-scheduler/servicemonitor.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/exporters/kube-state-metrics/serviceMonitor.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/exporters/kubelet/servicemonitor.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/exporters/node-exporter/servicemonitor.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/grafana/configmap-dashboards.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/grafana/configmaps-datasources.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/grafana/dashboards-1.14/alertmanager-overview.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/grafana/dashboards-1.14/apiserver.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/grafana/dashboards-1.14/cluster-total.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/grafana/dashboards-1.14/controller-manager.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/grafana/dashboards-1.14/etcd.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/grafana/dashboards-1.14/k8s-coredns.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/grafana/dashboards-1.14/k8s-resources-cluster.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/grafana/dashboards-1.14/k8s-resources-namespace.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/grafana/dashboards-1.14/k8s-resources-node.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/grafana/dashboards-1.14/k8s-resources-pod.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/grafana/dashboards-1.14/k8s-resources-workload.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/grafana/dashboards-1.14/k8s-resources-workloads-namespace.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/grafana/dashboards-1.14/kubelet.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/grafana/dashboards-1.14/namespace-by-pod.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/grafana/dashboards-1.14/namespace-by-workload.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/grafana/dashboards-1.14/node-cluster-rsrc-use.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/grafana/dashboards-1.14/node-rsrc-use.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/grafana/dashboards-1.14/nodes.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/grafana/dashboards-1.14/persistentvolumesusage.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/grafana/dashboards-1.14/pod-total.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/grafana/dashboards-1.14/prometheus-remote-write.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/grafana/dashboards-1.14/prometheus.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/grafana/dashboards-1.14/proxy.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/grafana/dashboards-1.14/scheduler.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/grafana/dashboards-1.14/statefulset.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/grafana/dashboards-1.14/workload-total.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/grafana/servicemonitor.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus-operator/admission-webhooks/job-patch/clusterrole.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus-operator/admission-webhooks/job-patch/clusterrolebinding.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus-operator/admission-webhooks/job-patch/job-createSecret.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus-operator/admission-webhooks/job-patch/job-patchWebhook.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus-operator/admission-webhooks/job-patch/psp.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus-operator/admission-webhooks/job-patch/role.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus-operator/admission-webhooks/job-patch/rolebinding.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus-operator/admission-webhooks/job-patch/serviceaccount.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus-operator/admission-webhooks/mutatingWebhookConfiguration.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus-operator/admission-webhooks/validatingWebhookConfiguration.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus-operator/certmanager.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus-operator/clusterrole.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus-operator/clusterrolebinding.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus-operator/deployment.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus-operator/psp-clusterrole.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus-operator/psp-clusterrolebinding.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus-operator/psp.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus-operator/service.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus-operator/serviceaccount.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus-operator/servicemonitor.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus/_rules.tpl
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus/additionalAlertRelabelConfigs.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus/additionalAlertmanagerConfigs.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus/additionalPrometheusRules.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus/additionalScrapeConfigs.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus/clusterrole.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus/clusterrolebinding.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus/csi-secret.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus/extrasecret.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus/ingress.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus/ingressThanosSidecar.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus/ingressperreplica.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus/podDisruptionBudget.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus/podmonitors.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus/prometheus.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus/psp-clusterrole.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus/psp-clusterrolebinding.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus/psp.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus/rules-1.14/alertmanager.rules.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus/rules-1.14/etcd.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus/rules-1.14/general.rules.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus/rules-1.14/k8s.rules.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus/rules-1.14/kube-apiserver-availability.rules.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus/rules-1.14/kube-apiserver-burnrate.rules.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus/rules-1.14/kube-apiserver-histogram.rules.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus/rules-1.14/kube-apiserver-slos.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus/rules-1.14/kube-apiserver.rules.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus/rules-1.14/kube-prometheus-general.rules.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus/rules-1.14/kube-prometheus-node-recording.rules.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus/rules-1.14/kube-scheduler.rules.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus/rules-1.14/kube-state-metrics.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus/rules-1.14/kubelet.rules.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus/rules-1.14/kubernetes-apps.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus/rules-1.14/kubernetes-resources.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus/rules-1.14/kubernetes-storage.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus/rules-1.14/kubernetes-system-apiserver.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus/rules-1.14/kubernetes-system-controller-manager.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus/rules-1.14/kubernetes-system-kubelet.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus/rules-1.14/kubernetes-system-scheduler.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus/rules-1.14/kubernetes-system.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus/rules-1.14/node-exporter.rules.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus/rules-1.14/node-exporter.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus/rules-1.14/node-network.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus/rules-1.14/node.rules.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus/rules-1.14/prometheus-operator.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus/rules-1.14/prometheus.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus/service.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus/serviceThanosSidecar.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus/serviceThanosSidecarExternal.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus/serviceaccount.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus/servicemonitor.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus/servicemonitorThanosSidecar.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus/servicemonitors.yaml
create mode 100644 k8s/kube-prometheus-stack/templates/prometheus/serviceperreplica.yaml
create mode 100644 k8s/kube-prometheus-stack/values.yaml
create mode 160000 redis-client
create mode 100644 src/Dockerfile
rename go.mod => src/go.mod (100%)
rename go.sum => src/go.sum (100%)
rename main.go => src/main.go (91%)
diff --git a/README.md b/README.md
index dd49286..8cd4b2b 100644
--- a/README.md
+++ b/README.md
@@ -13,7 +13,92 @@ NOTE: The following tasks should take no more than 1 hour total.
2. create docker-compose.yaml to replicate a full running environment
so that a developer can run the entire application locally without having
to run any dependencies (i.e. redis) in a separate process.
-3. explain how you would monitor this application in production. Please
-write code to do the monitoring.
+3. Explain how you would monitor this application in production. Please write code/scripts to do the monitoring.
+
+
+### Kubernetes(MiniKube) Tasks
+1. Prepare local Kubernetes environment (using MiniKube) to run our application in pod/container. Store all relevant scripts (kubectl commands etc) in your forked repository.
+2. Suggest & create minimal local infrastructure to perform functional testing/monitoring of our application pod. Demonstrate monitoring of relevant results & metrics for normal app behavior and failure(s).
+
+
Please fork this repository and make a pull request with your changes.
+
+Please provide test monitoring results in any convenient form (files, images, additional notes) as a separate archive.
+
+
+
+## Solution
+
+1. Dockerfile: ./src/Dockerfile
+2. For developer:
+```sh
+# To see the docker compose manifest
+cat docker-compose.yaml
+# Start
+docker compose up
+# Stop
+docker compose down
+```
+
+3. Application monitoring.
+Mostly depends on metrics what we want.
+I suggest:
+ - add functionality to write logs to the file
+ - mount disk
+ - run sidecar container with filebeat/metricbeat or fluentd and push logs to the some storage(e.g. InfluxDB)
+ - Grafana as UI. Grafana might be used from kube-prometheus-stack(see below)
+
+
+4. Kubernetes.
+I implemented configuration using Helm. Of course it might be only kubectl.
+Helm chart: ./k8s
+
+4.1 Create namespace for application
+
+```sh
+kubectl create namespace test-app
+```
+
+4.2 Deploy ingress-nginx(or similar) if you don't have any ingress controllers
+```sh
+cd ingress-nginx/
+helm install -n test-app ingress-nginx .
+```
+
+4.3 Deploy application
+```sh
+cd ./k8s/deploy/
+helm install -n test-app test-app .
+# if you have some changes:
+helm upgrade -n test-app test-app .
+```
+
+You can specify the images in values file: ./k8s/deploy/values.yaml
+
+5. k8s cluster monitoring
+
+5.1 Create monitoring namespace:
+```sh
+kubectl create namespace monitoring
+```
+
+5.2 Deploy kube-prometheus-stack
+By default it will deploy prometheus, alert manager, grafana.
+You could flexible customize the configuration and add some additional alerts in values file: ./k8s/kube-prometheus-stack/values.yaml
+
+```sh
+cd ./k8s/kube-prometheus-stack
+helm install -n monitoring monitoring .
+```
+
+6. Grafana
+There are a lot of pre-defined dashboards.
+
+For instance: http://localhost/grafana/d/6581e46e4e5c7ba40a07646395ef7b23/kubernetes-compute-resources-pod?orgId=1&refresh=10s&var-datasource=Prometheus&var-cluster=&var-namespace=test-app&var-pod=test-app-app-deployment-7ddd5f94fb-2b4j7&from=now-5m&to=now
+
+7. Routing
+"/" - application
+"/prometheous"
+"/grafana"
+
diff --git a/docker-compose.yaml b/docker-compose.yaml
new file mode 100644
index 0000000..32a86dc
--- /dev/null
+++ b/docker-compose.yaml
@@ -0,0 +1,13 @@
+services:
+ app:
+ build: src
+ ports:
+ - 8080:8080
+ depends_on:
+ - redis
+ environment:
+ - REDIS_ADDR=0.0.0.0:6379
+ redis:
+ image: redis
+ ports:
+ - 6379:6379
diff --git a/k8s/deploy/Chart.yaml b/k8s/deploy/Chart.yaml
new file mode 100644
index 0000000..55c91ef
--- /dev/null
+++ b/k8s/deploy/Chart.yaml
@@ -0,0 +1,3 @@
+name: "DevOps Test"
+owner: Petr Iglaev
+version: 1.0
\ No newline at end of file
diff --git a/k8s/deploy/templates/app-deployment.yaml b/k8s/deploy/templates/app-deployment.yaml
new file mode 100644
index 0000000..6b85153
--- /dev/null
+++ b/k8s/deploy/templates/app-deployment.yaml
@@ -0,0 +1,22 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ .Release.Name }}-app-deployment
+ labels:
+ app: {{ .Release.Name }}-app-deployment
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: {{ .Release.Name }}-app-deployment
+ template:
+ metadata:
+ labels:
+ app: {{ .Release.Name }}-app-deployment
+ spec:
+ containers:
+ - name: app
+ image: {{ .Values.app_container }}
+ imagePullPolicy: {{ .Values.app_pull_policy }}
+ ports:
+ - containerPort: 8080
diff --git a/k8s/deploy/templates/app-service.yaml b/k8s/deploy/templates/app-service.yaml
new file mode 100644
index 0000000..23704bf
--- /dev/null
+++ b/k8s/deploy/templates/app-service.yaml
@@ -0,0 +1,11 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: service-app
+spec:
+ selector:
+ app: {{ .Release.Name }}-app-deployment
+ ports:
+ - protocol: TCP
+ port: 8080
+ targetPort: 8080
\ No newline at end of file
diff --git a/k8s/deploy/templates/ingress.yaml b/k8s/deploy/templates/ingress.yaml
new file mode 100644
index 0000000..0a3f48c
--- /dev/null
+++ b/k8s/deploy/templates/ingress.yaml
@@ -0,0 +1,18 @@
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: ingress-app
+ annotations:
+ # nginx.ingress.kubernetes.io/rewrite-target: /$2
+ kubernetes.io/ingress.class: "nginx"
+spec:
+ rules:
+ - http:
+ paths:
+ - path: /
+ pathType: Prefix
+ backend:
+ service:
+ name: service-app
+ port:
+ number: 8080
\ No newline at end of file
diff --git a/k8s/deploy/templates/redis-deployment.yaml b/k8s/deploy/templates/redis-deployment.yaml
new file mode 100644
index 0000000..71fc934
--- /dev/null
+++ b/k8s/deploy/templates/redis-deployment.yaml
@@ -0,0 +1,22 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ .Release.Name }}-redis-deployment
+ labels:
+ app: {{ .Release.Name }}-redis-deployment
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: {{ .Release.Name }}-redis-deployment
+ template:
+ metadata:
+ labels:
+ app: {{ .Release.Name }}-redis-deployment
+ spec:
+ containers:
+ - name: redis
+ image: {{ .Values.redis_container }}
+ imagePullPolicy: IfNotPresent
+ ports:
+ - containerPort: 6379
\ No newline at end of file
diff --git a/k8s/deploy/templates/redis-service.yaml b/k8s/deploy/templates/redis-service.yaml
new file mode 100644
index 0000000..673c0e7
--- /dev/null
+++ b/k8s/deploy/templates/redis-service.yaml
@@ -0,0 +1,11 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: service-redis
+spec:
+ selector:
+ app: {{ .Release.Name }}-redis-deployment
+ ports:
+ - protocol: TCP
+ port: 6379
+ targetPort: 6379
\ No newline at end of file
diff --git a/k8s/deploy/values.yaml b/k8s/deploy/values.yaml
new file mode 100644
index 0000000..70ae3a7
--- /dev/null
+++ b/k8s/deploy/values.yaml
@@ -0,0 +1,8 @@
+# Application
+app_container: devopstest:2.0
+# Set IfNotPresent if you image is remote
+app_pull_policy: Never
+
+
+# Redis
+redis_container: redis:latest
\ No newline at end of file
diff --git a/k8s/ingress-nginx/.helmignore b/k8s/ingress-nginx/.helmignore
new file mode 100644
index 0000000..50af031
--- /dev/null
+++ b/k8s/ingress-nginx/.helmignore
@@ -0,0 +1,22 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/k8s/ingress-nginx/CHANGELOG.md b/k8s/ingress-nginx/CHANGELOG.md
new file mode 100644
index 0000000..e569bf6
--- /dev/null
+++ b/k8s/ingress-nginx/CHANGELOG.md
@@ -0,0 +1,268 @@
+# Changelog
+
+This file documents all notable changes to [ingress-nginx](https://github.com/kubernetes/ingress-nginx) Helm Chart. The release numbering uses [semantic versioning](http://semver.org).
+
+### 4.0.5
+
+- [7740] https://github.com/kubernetes/ingress-nginx/pull/7740 Release v1.0.3 of ingress-nginx
+
+
+### 4.0.3
+
+- [7707] https://github.com/kubernetes/ingress-nginx/pull/7707 Release v1.0.2 of ingress-nginx
+
+
+### 4.0.2
+
+- [7681] https://github.com/kubernetes/ingress-nginx/pull/7681 Release v1.0.1 of ingress-nginx
+
+### 4.0.1
+
+- [7535] https://github.com/kubernetes/ingress-nginx/pull/7535 Release v1.0.0 ingress-nginx
+
+### 3.34.0
+
+- [7256] https://github.com/kubernetes/ingress-nginx/pull/7256 Add namespace field in the namespace scoped resource templates
+
+### 3.33.0
+
+- [7164] https://github.com/kubernetes/ingress-nginx/pull/7164 Update nginx to v1.20.1
+
+### 3.32.0
+
+- [7117] https://github.com/kubernetes/ingress-nginx/pull/7117 Add annotations for HPA
+
+### 3.31.0
+
+- [7137] https://github.com/kubernetes/ingress-nginx/pull/7137 Add support for custom probes
+
+### 3.30.0
+
+- [#7092](https://github.com/kubernetes/ingress-nginx/pull/7092) Removes the possibility of using localhost in ExternalNames as endpoints
+
+### 3.29.0
+
+- [X] [#6945](https://github.com/kubernetes/ingress-nginx/pull/7020) Add option to specify job label for ServiceMonitor
+
+### 3.28.0
+
+- [ ] [#6900](https://github.com/kubernetes/ingress-nginx/pull/6900) Support existing PSPs
+
+### 3.27.0
+
+- Update ingress-nginx v0.45.0
+
+### 3.26.0
+
+- [X] [#6979](https://github.com/kubernetes/ingress-nginx/pull/6979) Changed servicePort value for metrics
+
+### 3.25.0
+
+- [X] [#6957](https://github.com/kubernetes/ingress-nginx/pull/6957) Add ability to specify automountServiceAccountToken
+
+### 3.24.0
+
+- [X] [#6908](https://github.com/kubernetes/ingress-nginx/pull/6908) Add volumes to default-backend deployment
+
+### 3.23.0
+
+- Update ingress-nginx v0.44.0
+
+### 3.22.0
+
+- [X] [#6802](https://github.com/kubernetes/ingress-nginx/pull/6802) Add value for configuring a custom Diffie-Hellman parameters file
+- [X] [#6815](https://github.com/kubernetes/ingress-nginx/pull/6815) Allow use of numeric namespaces in helm chart
+
+### 3.21.0
+
+- [X] [#6783](https://github.com/kubernetes/ingress-nginx/pull/6783) Add custom annotations to ScaledObject
+- [X] [#6761](https://github.com/kubernetes/ingress-nginx/pull/6761) Adding quotes in the serviceAccount name in Helm values
+- [X] [#6767](https://github.com/kubernetes/ingress-nginx/pull/6767) Remove ClusterRole when scope option is enabled
+- [X] [#6785](https://github.com/kubernetes/ingress-nginx/pull/6785) Update kube-webhook-certgen image to v1.5.1
+
+### 3.20.1
+
+- Do not create KEDA in case of DaemonSets.
+- Fix KEDA v2 definition
+
+### 3.20.0
+
+- [X] [#6730](https://github.com/kubernetes/ingress-nginx/pull/6730) Do not create HPA for defaultBackend if not enabled.
+
+### 3.19.0
+
+- Update ingress-nginx v0.43.0
+
+### 3.18.0
+
+- [X] [#6688](https://github.com/kubernetes/ingress-nginx/pull/6688) Allow volume-type emptyDir in controller podsecuritypolicy
+- [X] [#6691](https://github.com/kubernetes/ingress-nginx/pull/6691) Improve parsing of helm parameters
+
+### 3.17.0
+
+- Update ingress-nginx v0.42.0
+
+### 3.16.1
+
+- Fix chart-releaser action
+
+### 3.16.0
+
+- [X] [#6646](https://github.com/kubernetes/ingress-nginx/pull/6646) Added LoadBalancerIP value for internal service
+
+### 3.15.1
+
+- Fix chart-releaser action
+
+### 3.15.0
+
+- [X] [#6586](https://github.com/kubernetes/ingress-nginx/pull/6586) Fix 'maxmindLicenseKey' location in values.yaml
+
+### 3.14.0
+
+- [X] [#6469](https://github.com/kubernetes/ingress-nginx/pull/6469) Allow custom service names for controller and backend
+
+### 3.13.0
+
+- [X] [#6544](https://github.com/kubernetes/ingress-nginx/pull/6544) Fix default backend HPA name variable
+
+### 3.12.0
+
+- [X] [#6514](https://github.com/kubernetes/ingress-nginx/pull/6514) Remove helm2 support and update docs
+
+### 3.11.1
+
+- [X] [#6505](https://github.com/kubernetes/ingress-nginx/pull/6505) Reorder HPA resource list to work with GitOps tooling
+
+### 3.11.0
+
+- Support Keda Autoscaling
+
+### 3.10.1
+
+- Fix regression introduced in 0.41.0 with external authentication
+
+### 3.10.0
+
+- Fix routing regression introduced in 0.41.0 with PathType Exact
+
+### 3.9.0
+
+- [X] [#6423](https://github.com/kubernetes/ingress-nginx/pull/6423) Add Default backend HPA autoscaling
+
+### 3.8.0
+
+- [X] [#6395](https://github.com/kubernetes/ingress-nginx/pull/6395) Update jettech/kube-webhook-certgen image
+- [X] [#6377](https://github.com/kubernetes/ingress-nginx/pull/6377) Added loadBalancerSourceRanges for internal lbs
+- [X] [#6356](https://github.com/kubernetes/ingress-nginx/pull/6356) Add securitycontext settings on defaultbackend
+- [X] [#6401](https://github.com/kubernetes/ingress-nginx/pull/6401) Fix controller service annotations
+- [X] [#6403](https://github.com/kubernetes/ingress-nginx/pull/6403) Initial helm chart changelog
+
+### 3.7.1
+
+- [X] [#6326](https://github.com/kubernetes/ingress-nginx/pull/6326) Fix liveness and readiness probe path in daemonset chart
+
+### 3.7.0
+
+- [X] [#6316](https://github.com/kubernetes/ingress-nginx/pull/6316) Numerals in podAnnotations in quotes [#6315](https://github.com/kubernetes/ingress-nginx/issues/6315)
+
+### 3.6.0
+
+- [X] [#6305](https://github.com/kubernetes/ingress-nginx/pull/6305) Add default linux nodeSelector
+
+### 3.5.1
+
+- [X] [#6299](https://github.com/kubernetes/ingress-nginx/pull/6299) Fix helm chart release
+
+### 3.5.0
+
+- [X] [#6260](https://github.com/kubernetes/ingress-nginx/pull/6260) Allow Helm Chart to customize admission webhook's annotations, timeoutSeconds, namespaceSelector, objectSelector and cert files locations
+
+### 3.4.0
+
+- [X] [#6268](https://github.com/kubernetes/ingress-nginx/pull/6268) Update to 0.40.2 in helm chart #6288
+
+### 3.3.1
+
+- [X] [#6259](https://github.com/kubernetes/ingress-nginx/pull/6259) Release helm chart
+- [X] [#6258](https://github.com/kubernetes/ingress-nginx/pull/6258) Fix chart markdown link
+- [X] [#6253](https://github.com/kubernetes/ingress-nginx/pull/6253) Release v0.40.0
+
+### 3.3.1
+
+- [X] [#6233](https://github.com/kubernetes/ingress-nginx/pull/6233) Add admission controller e2e test
+
+### 3.3.0
+
+- [X] [#6203](https://github.com/kubernetes/ingress-nginx/pull/6203) Refactor parsing of key values
+- [X] [#6162](https://github.com/kubernetes/ingress-nginx/pull/6162) Add helm chart options to expose metrics service as NodePort
+- [X] [#6180](https://github.com/kubernetes/ingress-nginx/pull/6180) Fix helm chart admissionReviewVersions regression
+- [X] [#6169](https://github.com/kubernetes/ingress-nginx/pull/6169) Fix Typo in example prometheus rules
+
+### 3.0.0
+
+- [X] [#6167](https://github.com/kubernetes/ingress-nginx/pull/6167) Update chart requirements
+
+### 2.16.0
+
+- [X] [#6154](https://github.com/kubernetes/ingress-nginx/pull/6154) add `topologySpreadConstraint` to controller
+
+### 2.15.0
+
+- [X] [#6087](https://github.com/kubernetes/ingress-nginx/pull/6087) Adding parameter for externalTrafficPolicy in internal controller service spec
+
+### 2.14.0
+
+- [X] [#6104](https://github.com/kubernetes/ingress-nginx/pull/6104) Misc fixes for nginx-ingress chart for better keel and prometheus-operator integration
+
+### 2.13.0
+
+- [X] [#6093](https://github.com/kubernetes/ingress-nginx/pull/6093) Release v0.35.0
+
+### 2.13.0
+
+- [X] [#6093](https://github.com/kubernetes/ingress-nginx/pull/6093) Release v0.35.0
+- [X] [#6080](https://github.com/kubernetes/ingress-nginx/pull/6080) Switch images to k8s.gcr.io after Vanity Domain Flip
+
+### 2.12.1
+
+- [X] [#6075](https://github.com/kubernetes/ingress-nginx/pull/6075) Sync helm chart affinity examples
+
+### 2.12.0
+
+- [X] [#6039](https://github.com/kubernetes/ingress-nginx/pull/6039) Add configurable serviceMonitor metricRelabelling and targetLabels
+- [X] [#6044](https://github.com/kubernetes/ingress-nginx/pull/6044) Fix YAML linting
+
+### 2.11.3
+
+- [X] [#6038](https://github.com/kubernetes/ingress-nginx/pull/6038) Bump chart version PATCH
+
+### 2.11.2
+
+- [X] [#5951](https://github.com/kubernetes/ingress-nginx/pull/5951) Bump chart patch version
+
+### 2.11.1
+
+- [X] [#5900](https://github.com/kubernetes/ingress-nginx/pull/5900) Release helm chart for v0.34.1
+
+### 2.11.0
+
+- [X] [#5879](https://github.com/kubernetes/ingress-nginx/pull/5879) Update helm chart for v0.34.0
+- [X] [#5671](https://github.com/kubernetes/ingress-nginx/pull/5671) Make liveness probe more fault tolerant than readiness probe
+
+### 2.10.0
+
+- [X] [#5843](https://github.com/kubernetes/ingress-nginx/pull/5843) Update jettech/kube-webhook-certgen image
+
+### 2.9.1
+
+- [X] [#5823](https://github.com/kubernetes/ingress-nginx/pull/5823) Add quoting to sysctls because numeric values need to be presented as strings (#5823)
+
+### 2.9.0
+
+- [X] [#5795](https://github.com/kubernetes/ingress-nginx/pull/5795) Use fully qualified images to avoid cri-o issues
+
+
+### TODO
+
+Keep building the changelog using *git log charts* checking the tag
diff --git a/k8s/ingress-nginx/Chart.yaml b/k8s/ingress-nginx/Chart.yaml
new file mode 100644
index 0000000..3729343
--- /dev/null
+++ b/k8s/ingress-nginx/Chart.yaml
@@ -0,0 +1,22 @@
+annotations:
+ artifacthub.io/changes: |
+ - Bump various libraries
+ - Fix selector for shutting down Pods
+ - Fix overlap check when ingress is configured as canary
+ artifacthub.io/prerelease: "false"
+apiVersion: v2
+appVersion: 1.0.3
+description: Ingress controller for Kubernetes using NGINX as a reverse proxy and load balancer
+home: https://github.com/kubernetes/ingress-nginx
+icon: https://upload.wikimedia.org/wikipedia/commons/thumb/c/c5/Nginx_logo.svg/500px-Nginx_logo.svg.png
+keywords:
+- ingress
+- nginx
+kubeVersion: '>=1.19.0-0'
+maintainers:
+- name: ChiefAlexander
+name: ingress-nginx
+sources:
+- https://github.com/kubernetes/ingress-nginx
+type: application
+version: 4.0.5
diff --git a/k8s/ingress-nginx/OWNERS b/k8s/ingress-nginx/OWNERS
new file mode 100644
index 0000000..6b7e049
--- /dev/null
+++ b/k8s/ingress-nginx/OWNERS
@@ -0,0 +1,10 @@
+# See the OWNERS docs: https://github.com/kubernetes/community/blob/master/contributors/guide/owners.md
+
+approvers:
+- ingress-nginx-helm-maintainers
+
+reviewers:
+- ingress-nginx-helm-reviewers
+
+labels:
+- area/helm
diff --git a/k8s/ingress-nginx/README.md b/k8s/ingress-nginx/README.md
new file mode 100644
index 0000000..fecbbcd
--- /dev/null
+++ b/k8s/ingress-nginx/README.md
@@ -0,0 +1,227 @@
+# ingress-nginx
+
+[ingress-nginx](https://github.com/kubernetes/ingress-nginx) Ingress controller for Kubernetes using NGINX as a reverse proxy and load balancer
+
+To use, add the `kubernetes.io/ingress.class: nginx` annotation to your Ingress resources.
+
+This chart bootstraps an ingress-nginx deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
+
+## Prerequisites
+
+- Kubernetes v1.16+
+
+## Get Repo Info
+
+```console
+helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
+helm repo update
+```
+
+## Install Chart
+
+**Important:** only helm3 is supported
+
+```console
+helm install [RELEASE_NAME] ingress-nginx/ingress-nginx
+```
+
+The command deploys ingress-nginx on the Kubernetes cluster in the default configuration.
+
+_See [configuration](#configuration) below._
+
+_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._
+
+## Uninstall Chart
+
+```console
+helm uninstall [RELEASE_NAME]
+```
+
+This removes all the Kubernetes components associated with the chart and deletes the release.
+
+_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._
+
+## Upgrading Chart
+
+```console
+helm upgrade [RELEASE_NAME] [CHART] --install
+```
+
+_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._
+
+### Upgrading With Zero Downtime in Production
+
+By default the ingress-nginx controller has service interruptions whenever it's pods are restarted or redeployed. In order to fix that, see the excellent blog post by Lindsay Landry from Codecademy: [Kubernetes: Nginx and Zero Downtime in Production](https://medium.com/codecademy-engineering/kubernetes-nginx-and-zero-downtime-in-production-2c910c6a5ed8).
+
+### Migrating from stable/nginx-ingress
+
+There are two main ways to migrate a release from `stable/nginx-ingress` to `ingress-nginx/ingress-nginx` chart:
+
+1. For Nginx Ingress controllers used for non-critical services, the easiest method is to [uninstall](#uninstall-chart) the old release and [install](#install-chart) the new one
+1. For critical services in production that require zero-downtime, you will want to:
+ 1. [Install](#install-chart) a second Ingress controller
+ 1. Redirect your DNS traffic from the old controller to the new controller
+ 1. Log traffic from both controllers during this changeover
+ 1. [Uninstall](#uninstall-chart) the old controller once traffic has fully drained from it
+ 1. For details on all of these steps see [Upgrading With Zero Downtime in Production](#upgrading-with-zero-downtime-in-production)
+
+Note that there are some different and upgraded configurations between the two charts, described by Rimas Mocevicius from JFrog in the "Upgrading to ingress-nginx Helm chart" section of [Migrating from Helm chart nginx-ingress to ingress-nginx](https://rimusz.net/migrating-to-ingress-nginx). As the `ingress-nginx/ingress-nginx` chart continues to update, you will want to check current differences by running [helm configuration](#configuration) commands on both charts.
+
+## Configuration
+
+See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments, visit the chart's [values.yaml](./values.yaml), or run these configuration commands:
+
+```console
+helm show values ingress-nginx/ingress-nginx
+```
+
+### PodDisruptionBudget
+
+Note that the PodDisruptionBudget resource will only be defined if the replicaCount is greater than one,
+else it would make it impossible to evacuate a node. See [gh issue #7127](https://github.com/helm/charts/issues/7127) for more info.
+
+### Prometheus Metrics
+
+The Nginx ingress controller can export Prometheus metrics, by setting `controller.metrics.enabled` to `true`.
+
+You can add Prometheus annotations to the metrics service using `controller.metrics.service.annotations`.
+Alternatively, if you use the Prometheus Operator, you can enable ServiceMonitor creation using `controller.metrics.serviceMonitor.enabled`. And set `controller.metrics.serviceMonitor.additionalLabels.release="prometheus"`. "release=prometheus" should match the label configured in the prometheus servicemonitor ( see `kubectl get servicemonitor prometheus-kube-prom-prometheus -oyaml -n prometheus`)
+
+### ingress-nginx nginx\_status page/stats server
+
+Previous versions of this chart had a `controller.stats.*` configuration block, which is now obsolete due to the following changes in nginx ingress controller:
+
+- In [0.16.1](https://github.com/kubernetes/ingress-nginx/blob/main/Changelog.md#0161), the vts (virtual host traffic status) dashboard was removed
+- In [0.23.0](https://github.com/kubernetes/ingress-nginx/blob/main/Changelog.md#0230), the status page at port 18080 is now a unix socket webserver only available at localhost.
+ You can use `curl --unix-socket /tmp/nginx-status-server.sock http://localhost/nginx_status` inside the controller container to access it locally, or use the snippet from [nginx-ingress changelog](https://github.com/kubernetes/ingress-nginx/blob/main/Changelog.md#0230) to re-enable the http server
+
+### ExternalDNS Service Configuration
+
+Add an [ExternalDNS](https://github.com/kubernetes-incubator/external-dns) annotation to the LoadBalancer service:
+
+```yaml
+controller:
+ service:
+ annotations:
+ external-dns.alpha.kubernetes.io/hostname: kubernetes-example.com.
+```
+
+### AWS L7 ELB with SSL Termination
+
+Annotate the controller as shown in the [nginx-ingress l7 patch](https://github.com/kubernetes/ingress-nginx/blob/main/deploy/aws/l7/service-l7.yaml):
+
+```yaml
+controller:
+ service:
+ targetPorts:
+ http: http
+ https: http
+ annotations:
+ service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:XX-XXXX-X:XXXXXXXXX:certificate/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX
+ service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "http"
+ service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "https"
+ service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: '3600'
+```
+
+### AWS route53-mapper
+
+To configure the LoadBalancer service with the [route53-mapper addon](https://github.com/kubernetes/kops/tree/master/addons/route53-mapper), add the `domainName` annotation and `dns` label:
+
+```yaml
+controller:
+ service:
+ labels:
+ dns: "route53"
+ annotations:
+ domainName: "kubernetes-example.com"
+```
+
+### Additional Internal Load Balancer
+
+This setup is useful when you need both external and internal load balancers but don't want to have multiple ingress controllers and multiple ingress objects per application.
+
+By default, the ingress object will point to the external load balancer address, but if correctly configured, you can make use of the internal one if the URL you are looking up resolves to the internal load balancer's URL.
+
+You'll need to set both the following values:
+
+`controller.service.internal.enabled`
+`controller.service.internal.annotations`
+
+If one of them is missing the internal load balancer will not be deployed. Example you may have `controller.service.internal.enabled=true` but no annotations set, in this case no action will be taken.
+
+`controller.service.internal.annotations` varies with the cloud service you're using.
+
+Example for AWS:
+
+```yaml
+controller:
+ service:
+ internal:
+ enabled: true
+ annotations:
+ # Create internal ELB
+ service.beta.kubernetes.io/aws-load-balancer-internal: "true"
+ # Any other annotation can be declared here.
+```
+
+Example for GCE:
+
+```yaml
+controller:
+ service:
+ internal:
+ enabled: true
+ annotations:
+ # Create internal LB. More informations: https://cloud.google.com/kubernetes-engine/docs/how-to/internal-load-balancing
+ # For GKE versions 1.17 and later
+ networking.gke.io/load-balancer-type: "Internal"
+ # For earlier versions
+ # cloud.google.com/load-balancer-type: "Internal"
+
+ # Any other annotation can be declared here.
+```
+
+Example for Azure:
+
+```yaml
+controller:
+ service:
+ annotations:
+ # Create internal LB
+ service.beta.kubernetes.io/azure-load-balancer-internal: "true"
+ # Any other annotation can be declared here.
+```
+
+Example for Oracle Cloud Infrastructure:
+
+```yaml
+controller:
+ service:
+ annotations:
+ # Create internal LB
+ service.beta.kubernetes.io/oci-load-balancer-internal: "true"
+ # Any other annotation can be declared here.
+```
+
+An use case for this scenario is having a split-view DNS setup where the public zone CNAME records point to the external balancer URL while the private zone CNAME records point to the internal balancer URL. This way, you only need one ingress kubernetes object.
+
+Optionally you can set `controller.service.loadBalancerIP` if you need a static IP for the resulting `LoadBalancer`.
+
+### Ingress Admission Webhooks
+
+With nginx-ingress-controller version 0.25+, the nginx ingress controller pod exposes an endpoint that will integrate with the `validatingwebhookconfiguration` Kubernetes feature to prevent bad ingress from being added to the cluster.
+**This feature is enabled by default since 0.31.0.**
+
+With nginx-ingress-controller in 0.25.* work only with kubernetes 1.14+, 0.26 fix [this issue](https://github.com/kubernetes/ingress-nginx/pull/4521)
+
+### Helm Error When Upgrading: spec.clusterIP: Invalid value: ""
+
+If you are upgrading this chart from a version between 0.31.0 and 1.2.2 then you may get an error like this:
+
+```console
+Error: UPGRADE FAILED: Service "?????-controller" is invalid: spec.clusterIP: Invalid value: "": field is immutable
+```
+
+Detail of how and why are in [this issue](https://github.com/helm/charts/pull/13646) but to resolve this you can set `xxxx.service.omitClusterIP` to `true` where `xxxx` is the service referenced in the error.
+
+As of version `1.26.0` of this chart, by simply not providing any clusterIP value, `invalid: spec.clusterIP: Invalid value: "": field is immutable` will no longer occur since `clusterIP: ""` will not be rendered.
diff --git a/k8s/ingress-nginx/ci/controller-custom-ingressclass-flags.yaml b/k8s/ingress-nginx/ci/controller-custom-ingressclass-flags.yaml
new file mode 100644
index 0000000..b28a232
--- /dev/null
+++ b/k8s/ingress-nginx/ci/controller-custom-ingressclass-flags.yaml
@@ -0,0 +1,7 @@
+controller:
+ watchIngressWithoutClass: true
+ ingressClassResource:
+ name: custom-nginx
+ enabled: true
+ default: true
+ controllerValue: "k8s.io/custom-nginx"
diff --git a/k8s/ingress-nginx/ci/daemonset-customconfig-values.yaml b/k8s/ingress-nginx/ci/daemonset-customconfig-values.yaml
new file mode 100644
index 0000000..4393a5b
--- /dev/null
+++ b/k8s/ingress-nginx/ci/daemonset-customconfig-values.yaml
@@ -0,0 +1,14 @@
+controller:
+ image:
+ repository: ingress-controller/controller
+ tag: 1.0.0-dev
+ digest: null
+ kind: DaemonSet
+ allowSnippetAnnotations: false
+ admissionWebhooks:
+ enabled: false
+ service:
+ type: ClusterIP
+
+ config:
+ use-proxy-protocol: "true"
diff --git a/k8s/ingress-nginx/ci/daemonset-customnodeport-values.yaml b/k8s/ingress-nginx/ci/daemonset-customnodeport-values.yaml
new file mode 100644
index 0000000..1d94be2
--- /dev/null
+++ b/k8s/ingress-nginx/ci/daemonset-customnodeport-values.yaml
@@ -0,0 +1,22 @@
+controller:
+ kind: DaemonSet
+ image:
+ repository: ingress-controller/controller
+ tag: 1.0.0-dev
+ digest: null
+ admissionWebhooks:
+ enabled: false
+
+ service:
+ type: NodePort
+ nodePorts:
+ tcp:
+ 9000: 30090
+ udp:
+ 9001: 30091
+
+tcp:
+ 9000: "default/test:8080"
+
+udp:
+ 9001: "default/test:8080"
diff --git a/k8s/ingress-nginx/ci/daemonset-headers-values.yaml b/k8s/ingress-nginx/ci/daemonset-headers-values.yaml
new file mode 100644
index 0000000..ab7d47b
--- /dev/null
+++ b/k8s/ingress-nginx/ci/daemonset-headers-values.yaml
@@ -0,0 +1,14 @@
+controller:
+ kind: DaemonSet
+ image:
+ repository: ingress-controller/controller
+ tag: 1.0.0-dev
+ digest: null
+ admissionWebhooks:
+ enabled: false
+ addHeaders:
+ X-Frame-Options: deny
+ proxySetHeaders:
+ X-Forwarded-Proto: https
+ service:
+ type: ClusterIP
diff --git a/k8s/ingress-nginx/ci/daemonset-internal-lb-values.yaml b/k8s/ingress-nginx/ci/daemonset-internal-lb-values.yaml
new file mode 100644
index 0000000..0a200a7
--- /dev/null
+++ b/k8s/ingress-nginx/ci/daemonset-internal-lb-values.yaml
@@ -0,0 +1,14 @@
+controller:
+ kind: DaemonSet
+ image:
+ repository: ingress-controller/controller
+ tag: 1.0.0-dev
+ digest: null
+ admissionWebhooks:
+ enabled: false
+ service:
+ type: ClusterIP
+ internal:
+ enabled: true
+ annotations:
+ service.beta.kubernetes.io/aws-load-balancer-internal: "true"
diff --git a/k8s/ingress-nginx/ci/daemonset-nodeport-values.yaml b/k8s/ingress-nginx/ci/daemonset-nodeport-values.yaml
new file mode 100644
index 0000000..3b7aa2f
--- /dev/null
+++ b/k8s/ingress-nginx/ci/daemonset-nodeport-values.yaml
@@ -0,0 +1,10 @@
+controller:
+ kind: DaemonSet
+ image:
+ repository: ingress-controller/controller
+ tag: 1.0.0-dev
+ digest: null
+ admissionWebhooks:
+ enabled: false
+ service:
+ type: NodePort
diff --git a/k8s/ingress-nginx/ci/daemonset-podannotations-values.yaml b/k8s/ingress-nginx/ci/daemonset-podannotations-values.yaml
new file mode 100644
index 0000000..0b55306
--- /dev/null
+++ b/k8s/ingress-nginx/ci/daemonset-podannotations-values.yaml
@@ -0,0 +1,17 @@
+controller:
+ kind: DaemonSet
+ image:
+ repository: ingress-controller/controller
+ tag: 1.0.0-dev
+ digest: null
+ admissionWebhooks:
+ enabled: false
+ metrics:
+ enabled: true
+ service:
+ type: ClusterIP
+ podAnnotations:
+ prometheus.io/path: /metrics
+ prometheus.io/port: "10254"
+ prometheus.io/scheme: http
+ prometheus.io/scrape: "true"
diff --git a/k8s/ingress-nginx/ci/daemonset-tcp-udp-configMapNamespace-values.yaml b/k8s/ingress-nginx/ci/daemonset-tcp-udp-configMapNamespace-values.yaml
new file mode 100644
index 0000000..acd86a7
--- /dev/null
+++ b/k8s/ingress-nginx/ci/daemonset-tcp-udp-configMapNamespace-values.yaml
@@ -0,0 +1,20 @@
+controller:
+ kind: DaemonSet
+ image:
+ repository: ingress-controller/controller
+ tag: 1.0.0-dev
+ digest: null
+ admissionWebhooks:
+ enabled: false
+ service:
+ type: ClusterIP
+ tcp:
+ configMapNamespace: default
+ udp:
+ configMapNamespace: default
+
+tcp:
+ 9000: "default/test:8080"
+
+udp:
+ 9001: "default/test:8080"
diff --git a/k8s/ingress-nginx/ci/daemonset-tcp-udp-values.yaml b/k8s/ingress-nginx/ci/daemonset-tcp-udp-values.yaml
new file mode 100644
index 0000000..25ee64d
--- /dev/null
+++ b/k8s/ingress-nginx/ci/daemonset-tcp-udp-values.yaml
@@ -0,0 +1,16 @@
+controller:
+ kind: DaemonSet
+ image:
+ repository: ingress-controller/controller
+ tag: 1.0.0-dev
+ digest: null
+ admissionWebhooks:
+ enabled: false
+ service:
+ type: ClusterIP
+
+tcp:
+ 9000: "default/test:8080"
+
+udp:
+ 9001: "default/test:8080"
diff --git a/k8s/ingress-nginx/ci/daemonset-tcp-values.yaml b/k8s/ingress-nginx/ci/daemonset-tcp-values.yaml
new file mode 100644
index 0000000..380c8b4
--- /dev/null
+++ b/k8s/ingress-nginx/ci/daemonset-tcp-values.yaml
@@ -0,0 +1,14 @@
+controller:
+ kind: DaemonSet
+ image:
+ repository: ingress-controller/controller
+ tag: 1.0.0-dev
+ digest: null
+ admissionWebhooks:
+ enabled: false
+ service:
+ type: ClusterIP
+
+tcp:
+ 9000: "default/test:8080"
+ 9001: "default/test:8080"
diff --git a/k8s/ingress-nginx/ci/deamonset-default-values.yaml b/k8s/ingress-nginx/ci/deamonset-default-values.yaml
new file mode 100644
index 0000000..82fa23e
--- /dev/null
+++ b/k8s/ingress-nginx/ci/deamonset-default-values.yaml
@@ -0,0 +1,10 @@
+controller:
+ kind: DaemonSet
+ image:
+ repository: ingress-controller/controller
+ tag: 1.0.0-dev
+ digest: null
+ admissionWebhooks:
+ enabled: false
+ service:
+ type: ClusterIP
diff --git a/k8s/ingress-nginx/ci/deamonset-metrics-values.yaml b/k8s/ingress-nginx/ci/deamonset-metrics-values.yaml
new file mode 100644
index 0000000..cb3cb54
--- /dev/null
+++ b/k8s/ingress-nginx/ci/deamonset-metrics-values.yaml
@@ -0,0 +1,12 @@
+controller:
+ kind: DaemonSet
+ image:
+ repository: ingress-controller/controller
+ tag: 1.0.0-dev
+ digest: null
+ admissionWebhooks:
+ enabled: false
+ metrics:
+ enabled: true
+ service:
+ type: ClusterIP
diff --git a/k8s/ingress-nginx/ci/deamonset-psp-values.yaml b/k8s/ingress-nginx/ci/deamonset-psp-values.yaml
new file mode 100644
index 0000000..8026a63
--- /dev/null
+++ b/k8s/ingress-nginx/ci/deamonset-psp-values.yaml
@@ -0,0 +1,13 @@
+controller:
+ kind: DaemonSet
+ image:
+ repository: ingress-controller/controller
+ tag: 1.0.0-dev
+ digest: null
+ admissionWebhooks:
+ enabled: false
+ service:
+ type: ClusterIP
+
+podSecurityPolicy:
+ enabled: true
diff --git a/k8s/ingress-nginx/ci/deamonset-webhook-and-psp-values.yaml b/k8s/ingress-nginx/ci/deamonset-webhook-and-psp-values.yaml
new file mode 100644
index 0000000..fccdb13
--- /dev/null
+++ b/k8s/ingress-nginx/ci/deamonset-webhook-and-psp-values.yaml
@@ -0,0 +1,13 @@
+controller:
+ kind: DaemonSet
+ image:
+ repository: ingress-controller/controller
+ tag: 1.0.0-dev
+ digest: null
+ admissionWebhooks:
+ enabled: true
+ service:
+ type: ClusterIP
+
+podSecurityPolicy:
+ enabled: true
diff --git a/k8s/ingress-nginx/ci/deamonset-webhook-values.yaml b/k8s/ingress-nginx/ci/deamonset-webhook-values.yaml
new file mode 100644
index 0000000..54d364d
--- /dev/null
+++ b/k8s/ingress-nginx/ci/deamonset-webhook-values.yaml
@@ -0,0 +1,10 @@
+controller:
+ kind: DaemonSet
+ image:
+ repository: ingress-controller/controller
+ tag: 1.0.0-dev
+ digest: null
+ admissionWebhooks:
+ enabled: true
+ service:
+ type: ClusterIP
diff --git a/k8s/ingress-nginx/ci/deployment-autoscaling-behavior-values.yaml b/k8s/ingress-nginx/ci/deployment-autoscaling-behavior-values.yaml
new file mode 100644
index 0000000..dca3f35
--- /dev/null
+++ b/k8s/ingress-nginx/ci/deployment-autoscaling-behavior-values.yaml
@@ -0,0 +1,14 @@
+controller:
+ autoscaling:
+ enabled: true
+ behavior:
+ scaleDown:
+ stabilizationWindowSeconds: 300
+ policies:
+ - type: Pods
+ value: 1
+ periodSeconds: 180
+ admissionWebhooks:
+ enabled: false
+ service:
+ type: ClusterIP
diff --git a/k8s/ingress-nginx/ci/deployment-autoscaling-values.yaml b/k8s/ingress-nginx/ci/deployment-autoscaling-values.yaml
new file mode 100644
index 0000000..b8b3ac6
--- /dev/null
+++ b/k8s/ingress-nginx/ci/deployment-autoscaling-values.yaml
@@ -0,0 +1,11 @@
+controller:
+ image:
+ repository: ingress-controller/controller
+ tag: 1.0.0-dev
+ digest: null
+ autoscaling:
+ enabled: true
+ admissionWebhooks:
+ enabled: false
+ service:
+ type: ClusterIP
diff --git a/k8s/ingress-nginx/ci/deployment-customconfig-values.yaml b/k8s/ingress-nginx/ci/deployment-customconfig-values.yaml
new file mode 100644
index 0000000..1749418
--- /dev/null
+++ b/k8s/ingress-nginx/ci/deployment-customconfig-values.yaml
@@ -0,0 +1,12 @@
+controller:
+ image:
+ repository: ingress-controller/controller
+ tag: 1.0.0-dev
+ digest: null
+ config:
+ use-proxy-protocol: "true"
+ allowSnippetAnnotations: false
+ admissionWebhooks:
+ enabled: false
+ service:
+ type: ClusterIP
diff --git a/k8s/ingress-nginx/ci/deployment-customnodeport-values.yaml b/k8s/ingress-nginx/ci/deployment-customnodeport-values.yaml
new file mode 100644
index 0000000..a564eaf
--- /dev/null
+++ b/k8s/ingress-nginx/ci/deployment-customnodeport-values.yaml
@@ -0,0 +1,20 @@
+controller:
+ image:
+ repository: ingress-controller/controller
+ tag: 1.0.0-dev
+ digest: null
+ admissionWebhooks:
+ enabled: false
+ service:
+ type: NodePort
+ nodePorts:
+ tcp:
+ 9000: 30090
+ udp:
+ 9001: 30091
+
+tcp:
+ 9000: "default/test:8080"
+
+udp:
+ 9001: "default/test:8080"
diff --git a/k8s/ingress-nginx/ci/deployment-default-values.yaml b/k8s/ingress-nginx/ci/deployment-default-values.yaml
new file mode 100644
index 0000000..9f46b4e
--- /dev/null
+++ b/k8s/ingress-nginx/ci/deployment-default-values.yaml
@@ -0,0 +1,8 @@
+# Left blank to test default values
+controller:
+ image:
+ repository: ingress-controller/controller
+ tag: 1.0.0-dev
+ digest: null
+ service:
+ type: ClusterIP
diff --git a/k8s/ingress-nginx/ci/deployment-headers-values.yaml b/k8s/ingress-nginx/ci/deployment-headers-values.yaml
new file mode 100644
index 0000000..17a11ac
--- /dev/null
+++ b/k8s/ingress-nginx/ci/deployment-headers-values.yaml
@@ -0,0 +1,13 @@
+controller:
+ image:
+ repository: ingress-controller/controller
+ tag: 1.0.0-dev
+ digest: null
+ admissionWebhooks:
+ enabled: false
+ addHeaders:
+ X-Frame-Options: deny
+ proxySetHeaders:
+ X-Forwarded-Proto: https
+ service:
+ type: ClusterIP
diff --git a/k8s/ingress-nginx/ci/deployment-internal-lb-values.yaml b/k8s/ingress-nginx/ci/deployment-internal-lb-values.yaml
new file mode 100644
index 0000000..fd8df8d
--- /dev/null
+++ b/k8s/ingress-nginx/ci/deployment-internal-lb-values.yaml
@@ -0,0 +1,13 @@
+controller:
+ image:
+ repository: ingress-controller/controller
+ tag: 1.0.0-dev
+ digest: null
+ admissionWebhooks:
+ enabled: false
+ service:
+ type: ClusterIP
+ internal:
+ enabled: true
+ annotations:
+ service.beta.kubernetes.io/aws-load-balancer-internal: "true"
diff --git a/k8s/ingress-nginx/ci/deployment-metrics-values.yaml b/k8s/ingress-nginx/ci/deployment-metrics-values.yaml
new file mode 100644
index 0000000..9209ad5
--- /dev/null
+++ b/k8s/ingress-nginx/ci/deployment-metrics-values.yaml
@@ -0,0 +1,11 @@
+controller:
+ image:
+ repository: ingress-controller/controller
+ tag: 1.0.0-dev
+ digest: null
+ admissionWebhooks:
+ enabled: false
+ metrics:
+ enabled: true
+ service:
+ type: ClusterIP
diff --git a/k8s/ingress-nginx/ci/deployment-nodeport-values.yaml b/k8s/ingress-nginx/ci/deployment-nodeport-values.yaml
new file mode 100644
index 0000000..cd9b323
--- /dev/null
+++ b/k8s/ingress-nginx/ci/deployment-nodeport-values.yaml
@@ -0,0 +1,9 @@
+controller:
+ image:
+ repository: ingress-controller/controller
+ tag: 1.0.0-dev
+ digest: null
+ admissionWebhooks:
+ enabled: false
+ service:
+ type: NodePort
diff --git a/k8s/ingress-nginx/ci/deployment-podannotations-values.yaml b/k8s/ingress-nginx/ci/deployment-podannotations-values.yaml
new file mode 100644
index 0000000..b48d93c
--- /dev/null
+++ b/k8s/ingress-nginx/ci/deployment-podannotations-values.yaml
@@ -0,0 +1,16 @@
+controller:
+ image:
+ repository: ingress-controller/controller
+ tag: 1.0.0-dev
+ digest: null
+ admissionWebhooks:
+ enabled: false
+ metrics:
+ enabled: true
+ service:
+ type: ClusterIP
+ podAnnotations:
+ prometheus.io/path: /metrics
+ prometheus.io/port: "10254"
+ prometheus.io/scheme: http
+ prometheus.io/scrape: "true"
diff --git a/k8s/ingress-nginx/ci/deployment-psp-values.yaml b/k8s/ingress-nginx/ci/deployment-psp-values.yaml
new file mode 100644
index 0000000..2f332a7
--- /dev/null
+++ b/k8s/ingress-nginx/ci/deployment-psp-values.yaml
@@ -0,0 +1,10 @@
+controller:
+ image:
+ repository: ingress-controller/controller
+ tag: 1.0.0-dev
+ digest: null
+ service:
+ type: ClusterIP
+
+podSecurityPolicy:
+ enabled: true
diff --git a/k8s/ingress-nginx/ci/deployment-tcp-udp-configMapNamespace-values.yaml b/k8s/ingress-nginx/ci/deployment-tcp-udp-configMapNamespace-values.yaml
new file mode 100644
index 0000000..c51a4e9
--- /dev/null
+++ b/k8s/ingress-nginx/ci/deployment-tcp-udp-configMapNamespace-values.yaml
@@ -0,0 +1,19 @@
+controller:
+ image:
+ repository: ingress-controller/controller
+ tag: 1.0.0-dev
+ digest: null
+ admissionWebhooks:
+ enabled: false
+ service:
+ type: ClusterIP
+ tcp:
+ configMapNamespace: default
+ udp:
+ configMapNamespace: default
+
+tcp:
+ 9000: "default/test:8080"
+
+udp:
+ 9001: "default/test:8080"
diff --git a/k8s/ingress-nginx/ci/deployment-tcp-udp-values.yaml b/k8s/ingress-nginx/ci/deployment-tcp-udp-values.yaml
new file mode 100644
index 0000000..5b45b69
--- /dev/null
+++ b/k8s/ingress-nginx/ci/deployment-tcp-udp-values.yaml
@@ -0,0 +1,15 @@
+controller:
+ image:
+ repository: ingress-controller/controller
+ tag: 1.0.0-dev
+ digest: null
+ admissionWebhooks:
+ enabled: false
+ service:
+ type: ClusterIP
+
+tcp:
+ 9000: "default/test:8080"
+
+udp:
+ 9001: "default/test:8080"
diff --git a/k8s/ingress-nginx/ci/deployment-tcp-values.yaml b/k8s/ingress-nginx/ci/deployment-tcp-values.yaml
new file mode 100644
index 0000000..ac0b6e6
--- /dev/null
+++ b/k8s/ingress-nginx/ci/deployment-tcp-values.yaml
@@ -0,0 +1,11 @@
+controller:
+ image:
+ repository: ingress-controller/controller
+ tag: 1.0.0-dev
+ digest: null
+ service:
+ type: ClusterIP
+
+tcp:
+ 9000: "default/test:8080"
+ 9001: "default/test:8080"
diff --git a/k8s/ingress-nginx/ci/deployment-webhook-and-psp-values.yaml b/k8s/ingress-nginx/ci/deployment-webhook-and-psp-values.yaml
new file mode 100644
index 0000000..6195bb3
--- /dev/null
+++ b/k8s/ingress-nginx/ci/deployment-webhook-and-psp-values.yaml
@@ -0,0 +1,12 @@
+controller:
+ image:
+ repository: ingress-controller/controller
+ tag: 1.0.0-dev
+ digest: null
+ admissionWebhooks:
+ enabled: true
+ service:
+ type: ClusterIP
+
+podSecurityPolicy:
+ enabled: true
diff --git a/k8s/ingress-nginx/ci/deployment-webhook-resources-values.yaml b/k8s/ingress-nginx/ci/deployment-webhook-resources-values.yaml
new file mode 100644
index 0000000..49ebbb0
--- /dev/null
+++ b/k8s/ingress-nginx/ci/deployment-webhook-resources-values.yaml
@@ -0,0 +1,23 @@
+controller:
+ service:
+ type: ClusterIP
+ admissionWebhooks:
+ enabled: true
+ createSecretJob:
+ resources:
+ limits:
+ cpu: 10m
+ memory: 20Mi
+ requests:
+ cpu: 10m
+ memory: 20Mi
+ patchWebhookJob:
+ resources:
+ limits:
+ cpu: 10m
+ memory: 20Mi
+ requests:
+ cpu: 10m
+ memory: 20Mi
+ patch:
+ enabled: true
diff --git a/k8s/ingress-nginx/ci/deployment-webhook-values.yaml b/k8s/ingress-nginx/ci/deployment-webhook-values.yaml
new file mode 100644
index 0000000..76669a5
--- /dev/null
+++ b/k8s/ingress-nginx/ci/deployment-webhook-values.yaml
@@ -0,0 +1,9 @@
+controller:
+ image:
+ repository: ingress-controller/controller
+ tag: 1.0.0-dev
+ digest: null
+ admissionWebhooks:
+ enabled: true
+ service:
+ type: ClusterIP
diff --git a/k8s/ingress-nginx/templates/NOTES.txt b/k8s/ingress-nginx/templates/NOTES.txt
new file mode 100644
index 0000000..03ece9c
--- /dev/null
+++ b/k8s/ingress-nginx/templates/NOTES.txt
@@ -0,0 +1,71 @@
+The ingress-nginx controller has been installed.
+
+{{- if contains "NodePort" .Values.controller.service.type }}
+Get the application URL by running these commands:
+
+{{- if (not (empty .Values.controller.service.nodePorts.http)) }}
+ export HTTP_NODE_PORT={{ .Values.controller.service.nodePorts.http }}
+{{- else }}
+ export HTTP_NODE_PORT=$(kubectl --namespace {{ .Release.Namespace }} get services -o jsonpath="{.spec.ports[0].nodePort}" {{ include "ingress-nginx.controller.fullname" . }})
+{{- end }}
+{{- if (not (empty .Values.controller.service.nodePorts.https)) }}
+ export HTTPS_NODE_PORT={{ .Values.controller.service.nodePorts.https }}
+{{- else }}
+ export HTTPS_NODE_PORT=$(kubectl --namespace {{ .Release.Namespace }} get services -o jsonpath="{.spec.ports[1].nodePort}" {{ include "ingress-nginx.controller.fullname" . }})
+{{- end }}
+ export NODE_IP=$(kubectl --namespace {{ .Release.Namespace }} get nodes -o jsonpath="{.items[0].status.addresses[1].address}")
+
+ echo "Visit http://$NODE_IP:$HTTP_NODE_PORT to access your application via HTTP."
+ echo "Visit https://$NODE_IP:$HTTPS_NODE_PORT to access your application via HTTPS."
+{{- else if contains "LoadBalancer" .Values.controller.service.type }}
+It may take a few minutes for the LoadBalancer IP to be available.
+You can watch the status by running 'kubectl --namespace {{ .Release.Namespace }} get services -o wide -w {{ include "ingress-nginx.controller.fullname" . }}'
+{{- else if contains "ClusterIP" .Values.controller.service.type }}
+Get the application URL by running these commands:
+ export POD_NAME=$(kubectl --namespace {{ .Release.Namespace }} get pods -o jsonpath="{.items[0].metadata.name}" -l "app={{ template "ingress-nginx.name" . }},component={{ .Values.controller.name }},release={{ .Release.Name }}")
+ kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:80
+ echo "Visit http://127.0.0.1:8080 to access your application."
+{{- end }}
+
+An example Ingress that makes use of the controller:
+
+ apiVersion: networking.k8s.io/v1
+ kind: Ingress
+ metadata:
+ annotations:
+ kubernetes.io/ingress.class: {{ .Values.controller.ingressClassResource.name }}
+ name: example
+ namespace: foo
+ spec:
+ rules:
+ - host: www.example.com
+ http:
+ paths:
+ - backend:
+ serviceName: exampleService
+ servicePort: 80
+ path: /
+ # This section is only required if TLS is to be enabled for the Ingress
+ tls:
+ - hosts:
+ - www.example.com
+ secretName: example-tls
+
+If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided:
+
+ apiVersion: v1
+ kind: Secret
+ metadata:
+ name: example-tls
+ namespace: foo
+ data:
+ tls.crt:
+ tls.key:
+ type: kubernetes.io/tls
+
+{{- if .Values.controller.headers }}
+#################################################################################
+###### WARNING: `controller.headers` has been deprecated! #####
+###### It has been renamed to `controller.proxySetHeaders`. #####
+#################################################################################
+{{- end }}
diff --git a/k8s/ingress-nginx/templates/_helpers.tpl b/k8s/ingress-nginx/templates/_helpers.tpl
new file mode 100644
index 0000000..8b1fd09
--- /dev/null
+++ b/k8s/ingress-nginx/templates/_helpers.tpl
@@ -0,0 +1,134 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "ingress-nginx.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "ingress-nginx.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "ingress-nginx.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified controller name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "ingress-nginx.controller.fullname" -}}
+{{- printf "%s-%s" (include "ingress-nginx.fullname" .) .Values.controller.name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Construct the path for the publish-service.
+
+By convention this will simply use the / to match the name of the
+service generated.
+
+Users can provide an override for an explicit service they want bound via `.Values.controller.publishService.pathOverride`
+
+*/}}
+{{- define "ingress-nginx.controller.publishServicePath" -}}
+{{- $defServiceName := printf "%s/%s" "$(POD_NAMESPACE)" (include "ingress-nginx.controller.fullname" .) -}}
+{{- $servicePath := default $defServiceName .Values.controller.publishService.pathOverride }}
+{{- print $servicePath | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified default backend name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "ingress-nginx.defaultBackend.fullname" -}}
+{{- printf "%s-%s" (include "ingress-nginx.fullname" .) .Values.defaultBackend.name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Common labels
+*/}}
+{{- define "ingress-nginx.labels" -}}
+helm.sh/chart: {{ include "ingress-nginx.chart" . }}
+{{ include "ingress-nginx.selectorLabels" . }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end -}}
+
+{{/*
+Selector labels
+*/}}
+{{- define "ingress-nginx.selectorLabels" -}}
+app.kubernetes.io/name: {{ include "ingress-nginx.name" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end -}}
+
+{{/*
+Create the name of the controller service account to use
+*/}}
+{{- define "ingress-nginx.serviceAccountName" -}}
+{{- if .Values.serviceAccount.create -}}
+ {{ default (include "ingress-nginx.fullname" .) .Values.serviceAccount.name }}
+{{- else -}}
+ {{ default "default" .Values.serviceAccount.name }}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create the name of the backend service account to use - only used when podsecuritypolicy is also enabled
+*/}}
+{{- define "ingress-nginx.defaultBackend.serviceAccountName" -}}
+{{- if .Values.defaultBackend.serviceAccount.create -}}
+ {{ default (printf "%s-backend" (include "ingress-nginx.fullname" .)) .Values.defaultBackend.serviceAccount.name }}
+{{- else -}}
+ {{ default "default-backend" .Values.defaultBackend.serviceAccount.name }}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiGroup for PodSecurityPolicy.
+*/}}
+{{- define "podSecurityPolicy.apiGroup" -}}
+{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
+{{- print "policy" -}}
+{{- else -}}
+{{- print "extensions" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Check the ingress controller version tag is at most three versions behind the last release
+*/}}
+{{- define "isControllerTagValid" -}}
+{{- if not (semverCompare ">=0.27.0-0" .Values.controller.image.tag) -}}
+{{- fail "Controller container image tag should be 0.27.0 or higher" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+IngressClass parameters.
+*/}}
+{{- define "ingressClass.parameters" -}}
+ {{- if .Values.controller.ingressClassResource.parameters -}}
+ parameters:
+{{ toYaml .Values.controller.ingressClassResource.parameters | indent 4}}
+ {{ end }}
+{{- end -}}
diff --git a/k8s/ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml b/k8s/ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml
new file mode 100644
index 0000000..fd762f9
--- /dev/null
+++ b/k8s/ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml
@@ -0,0 +1,31 @@
+{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: {{ include "ingress-nginx.fullname" . }}-admission
+ annotations:
+ "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
+ "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: admission-webhook
+rules:
+ - apiGroups:
+ - admissionregistration.k8s.io
+ resources:
+ - validatingwebhookconfigurations
+ verbs:
+ - get
+ - update
+{{- if .Values.podSecurityPolicy.enabled }}
+ - apiGroups: ['extensions']
+ resources: ['podsecuritypolicies']
+ verbs: ['use']
+ resourceNames:
+ {{- with .Values.controller.admissionWebhooks.existingPsp }}
+ - {{ . }}
+ {{- else }}
+ - {{ include "ingress-nginx.fullname" . }}-admission
+ {{- end }}
+{{- end }}
+{{- end }}
diff --git a/k8s/ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml b/k8s/ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml
new file mode 100644
index 0000000..4990fb1
--- /dev/null
+++ b/k8s/ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml
@@ -0,0 +1,20 @@
+{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: {{ include "ingress-nginx.fullname" . }}-admission
+ annotations:
+ "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
+ "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: admission-webhook
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ include "ingress-nginx.fullname" . }}-admission
+subjects:
+ - kind: ServiceAccount
+ name: {{ include "ingress-nginx.fullname" . }}-admission
+ namespace: {{ .Release.Namespace | quote }}
+{{- end }}
diff --git a/k8s/ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml b/k8s/ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml
new file mode 100644
index 0000000..1f58bdc
--- /dev/null
+++ b/k8s/ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml
@@ -0,0 +1,64 @@
+{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}}
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: {{ include "ingress-nginx.fullname" . }}-admission-create
+ namespace: {{ .Release.Namespace }}
+ annotations:
+ "helm.sh/hook": pre-install,pre-upgrade
+ "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: admission-webhook
+spec:
+{{- if .Capabilities.APIVersions.Has "batch/v1alpha1" }}
+ # Alpha feature since k8s 1.12
+ ttlSecondsAfterFinished: 0
+{{- end }}
+ template:
+ metadata:
+ name: {{ include "ingress-nginx.fullname" . }}-admission-create
+ {{- if .Values.controller.admissionWebhooks.patch.podAnnotations }}
+ annotations: {{ toYaml .Values.controller.admissionWebhooks.patch.podAnnotations | nindent 8 }}
+ {{- end }}
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 8 }}
+ app.kubernetes.io/component: admission-webhook
+ spec:
+ {{- if .Values.controller.admissionWebhooks.patch.priorityClassName }}
+ priorityClassName: {{ .Values.controller.admissionWebhooks.patch.priorityClassName }}
+ {{- end }}
+ {{- if .Values.imagePullSecrets }}
+ imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 8 }}
+ {{- end }}
+ containers:
+ - name: create
+ {{- with .Values.controller.admissionWebhooks.patch.image }}
+ image: "{{- if .repository -}}{{ .repository }}{{ else }}{{ .registry }}/{{ .image }}{{- end -}}:{{ .tag }}{{- if (.digest) -}} @{{.digest}} {{- end -}}"
+ {{- end }}
+ imagePullPolicy: {{ .Values.controller.admissionWebhooks.patch.image.pullPolicy }}
+ args:
+ - create
+ - --host={{ include "ingress-nginx.controller.fullname" . }}-admission,{{ include "ingress-nginx.controller.fullname" . }}-admission.$(POD_NAMESPACE).svc
+ - --namespace=$(POD_NAMESPACE)
+ - --secret-name={{ include "ingress-nginx.fullname" . }}-admission
+ env:
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ {{- if .Values.controller.admissionWebhooks.createSecretJob.resources }}
+ resources: {{ toYaml .Values.controller.admissionWebhooks.createSecretJob.resources | nindent 12 }}
+ {{- end }}
+ restartPolicy: OnFailure
+ serviceAccountName: {{ include "ingress-nginx.fullname" . }}-admission
+ {{- if .Values.controller.admissionWebhooks.patch.nodeSelector }}
+ nodeSelector: {{ toYaml .Values.controller.admissionWebhooks.patch.nodeSelector | nindent 8 }}
+ {{- end }}
+ {{- if .Values.controller.admissionWebhooks.patch.tolerations }}
+ tolerations: {{ toYaml .Values.controller.admissionWebhooks.patch.tolerations | nindent 8 }}
+ {{- end }}
+ securityContext:
+ runAsNonRoot: true
+ runAsUser: {{ .Values.controller.admissionWebhooks.patch.runAsUser }}
+{{- end }}
diff --git a/k8s/ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml b/k8s/ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml
new file mode 100644
index 0000000..6d01ad2
--- /dev/null
+++ b/k8s/ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml
@@ -0,0 +1,66 @@
+{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}}
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: {{ include "ingress-nginx.fullname" . }}-admission-patch
+ namespace: {{ .Release.Namespace }}
+ annotations:
+ "helm.sh/hook": post-install,post-upgrade
+ "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: admission-webhook
+spec:
+{{- if .Capabilities.APIVersions.Has "batch/v1alpha1" }}
+ # Alpha feature since k8s 1.12
+ ttlSecondsAfterFinished: 0
+{{- end }}
+ template:
+ metadata:
+ name: {{ include "ingress-nginx.fullname" . }}-admission-patch
+ {{- if .Values.controller.admissionWebhooks.patch.podAnnotations }}
+ annotations: {{ toYaml .Values.controller.admissionWebhooks.patch.podAnnotations | nindent 8 }}
+ {{- end }}
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 8 }}
+ app.kubernetes.io/component: admission-webhook
+ spec:
+ {{- if .Values.controller.admissionWebhooks.patch.priorityClassName }}
+ priorityClassName: {{ .Values.controller.admissionWebhooks.patch.priorityClassName }}
+ {{- end }}
+ {{- if .Values.imagePullSecrets }}
+ imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 8 }}
+ {{- end }}
+ containers:
+ - name: patch
+ {{- with .Values.controller.admissionWebhooks.patch.image }}
+ image: "{{- if .repository -}}{{ .repository }}{{ else }}{{ .registry }}/{{ .image }}{{- end -}}:{{ .tag }}{{- if (.digest) -}} @{{.digest}} {{- end -}}"
+ {{- end }}
+ imagePullPolicy: {{ .Values.controller.admissionWebhooks.patch.image.pullPolicy }}
+ args:
+ - patch
+ - --webhook-name={{ include "ingress-nginx.fullname" . }}-admission
+ - --namespace=$(POD_NAMESPACE)
+ - --patch-mutating=false
+ - --secret-name={{ include "ingress-nginx.fullname" . }}-admission
+ - --patch-failure-policy={{ .Values.controller.admissionWebhooks.failurePolicy }}
+ env:
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ {{- if .Values.controller.admissionWebhooks.patchWebhookJob.resources }}
+ resources: {{ toYaml .Values.controller.admissionWebhooks.patchWebhookJob.resources | nindent 12 }}
+ {{- end }}
+ restartPolicy: OnFailure
+ serviceAccountName: {{ include "ingress-nginx.fullname" . }}-admission
+ {{- if .Values.controller.admissionWebhooks.patch.nodeSelector }}
+ nodeSelector: {{ toYaml .Values.controller.admissionWebhooks.patch.nodeSelector | nindent 8 }}
+ {{- end }}
+ {{- if .Values.controller.admissionWebhooks.patch.tolerations }}
+ tolerations: {{ toYaml .Values.controller.admissionWebhooks.patch.tolerations | nindent 8 }}
+ {{- end }}
+ securityContext:
+ runAsNonRoot: true
+ runAsUser: {{ .Values.controller.admissionWebhooks.patch.runAsUser }}
+{{- end }}
diff --git a/k8s/ingress-nginx/templates/admission-webhooks/job-patch/psp.yaml b/k8s/ingress-nginx/templates/admission-webhooks/job-patch/psp.yaml
new file mode 100644
index 0000000..d2c7de6
--- /dev/null
+++ b/k8s/ingress-nginx/templates/admission-webhooks/job-patch/psp.yaml
@@ -0,0 +1,36 @@
+{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled .Values.podSecurityPolicy.enabled (empty .Values.controller.admissionWebhooks.existingPsp) -}}
+apiVersion: policy/v1beta1
+kind: PodSecurityPolicy
+metadata:
+ name: {{ include "ingress-nginx.fullname" . }}-admission
+ annotations:
+ "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
+ "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: admission-webhook
+spec:
+ allowPrivilegeEscalation: false
+ fsGroup:
+ ranges:
+ - max: 65535
+ min: 1
+ rule: MustRunAs
+ requiredDropCapabilities:
+ - ALL
+ runAsUser:
+ rule: MustRunAsNonRoot
+ seLinux:
+ rule: RunAsAny
+ supplementalGroups:
+ ranges:
+ - max: 65535
+ min: 1
+ rule: MustRunAs
+ volumes:
+ - configMap
+ - emptyDir
+ - projected
+ - secret
+ - downwardAPI
+{{- end }}
diff --git a/k8s/ingress-nginx/templates/admission-webhooks/job-patch/role.yaml b/k8s/ingress-nginx/templates/admission-webhooks/job-patch/role.yaml
new file mode 100644
index 0000000..9b083ee
--- /dev/null
+++ b/k8s/ingress-nginx/templates/admission-webhooks/job-patch/role.yaml
@@ -0,0 +1,21 @@
+{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: {{ include "ingress-nginx.fullname" . }}-admission
+ namespace: {{ .Release.Namespace }}
+ annotations:
+ "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
+ "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: admission-webhook
+rules:
+ - apiGroups:
+ - ""
+ resources:
+ - secrets
+ verbs:
+ - get
+ - create
+{{- end }}
diff --git a/k8s/ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml b/k8s/ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml
new file mode 100644
index 0000000..edda07f
--- /dev/null
+++ b/k8s/ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml
@@ -0,0 +1,21 @@
+{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: {{ include "ingress-nginx.fullname" . }}-admission
+ namespace: {{ .Release.Namespace }}
+ annotations:
+ "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
+ "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: admission-webhook
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: {{ include "ingress-nginx.fullname" . }}-admission
+subjects:
+ - kind: ServiceAccount
+ name: {{ include "ingress-nginx.fullname" . }}-admission
+ namespace: {{ .Release.Namespace | quote }}
+{{- end }}
diff --git a/k8s/ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml b/k8s/ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml
new file mode 100644
index 0000000..1ff0f7f
--- /dev/null
+++ b/k8s/ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml
@@ -0,0 +1,13 @@
+{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ include "ingress-nginx.fullname" . }}-admission
+ namespace: {{ .Release.Namespace }}
+ annotations:
+ "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
+ "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: admission-webhook
+{{- end }}
diff --git a/k8s/ingress-nginx/templates/admission-webhooks/validating-webhook.yaml b/k8s/ingress-nginx/templates/admission-webhooks/validating-webhook.yaml
new file mode 100644
index 0000000..712f74f
--- /dev/null
+++ b/k8s/ingress-nginx/templates/admission-webhooks/validating-webhook.yaml
@@ -0,0 +1,45 @@
+{{- if .Values.controller.admissionWebhooks.enabled -}}
+# before changing this value, check the required kubernetes version
+# https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#prerequisites
+apiVersion: admissionregistration.k8s.io/v1
+kind: ValidatingWebhookConfiguration
+metadata:
+ {{- if .Values.controller.admissionWebhooks.annotations }}
+ annotations: {{ toYaml .Values.controller.admissionWebhooks.annotations | nindent 4 }}
+ {{- end }}
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: admission-webhook
+ name: {{ include "ingress-nginx.fullname" . }}-admission
+webhooks:
+ - name: validate.nginx.ingress.kubernetes.io
+ matchPolicy: Equivalent
+ rules:
+ - apiGroups:
+ - networking.k8s.io
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - ingresses
+ failurePolicy: {{ .Values.controller.admissionWebhooks.failurePolicy | default "Fail" }}
+ sideEffects: None
+ admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ namespace: {{ .Release.Namespace | quote }}
+ name: {{ include "ingress-nginx.controller.fullname" . }}-admission
+ path: /networking/v1/ingresses
+ {{- if .Values.controller.admissionWebhooks.timeoutSeconds }}
+ timeoutSeconds: {{ .Values.controller.admissionWebhooks.timeoutSeconds }}
+ {{- end }}
+ {{- if .Values.controller.admissionWebhooks.namespaceSelector }}
+ namespaceSelector: {{ toYaml .Values.controller.admissionWebhooks.namespaceSelector | nindent 6 }}
+ {{- end }}
+ {{- if .Values.controller.admissionWebhooks.objectSelector }}
+ objectSelector: {{ toYaml .Values.controller.admissionWebhooks.objectSelector | nindent 6 }}
+ {{- end }}
+{{- end }}
diff --git a/k8s/ingress-nginx/templates/clusterrole.yaml b/k8s/ingress-nginx/templates/clusterrole.yaml
new file mode 100644
index 0000000..c1f901d
--- /dev/null
+++ b/k8s/ingress-nginx/templates/clusterrole.yaml
@@ -0,0 +1,81 @@
+{{- if .Values.rbac.create }}
+
+{{- if and .Values.rbac.scope (not .Values.controller.scope.enabled) -}}
+ {{ required "Invalid configuration: 'rbac.scope' should be equal to 'controller.scope.enabled' (true/false)." (index (dict) ".") }}
+{{- end }}
+
+{{- if not .Values.rbac.scope -}}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ name: {{ include "ingress-nginx.fullname" . }}
+rules:
+ - apiGroups:
+ - ""
+ resources:
+ - configmaps
+ - endpoints
+ - nodes
+ - pods
+ - secrets
+ verbs:
+ - list
+ - watch
+{{- if and .Values.controller.scope.enabled .Values.controller.scope.namespace }}
+ - apiGroups:
+ - ""
+ resources:
+ - namespaces
+ resourceNames:
+ - "{{ .Values.controller.scope.namespace }}"
+ verbs:
+ - get
+{{- end }}
+ - apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - get
+ - apiGroups:
+ - ""
+ resources:
+ - services
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - networking.k8s.io
+ resources:
+ - ingresses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - patch
+ - apiGroups:
+ - networking.k8s.io
+ resources:
+ - ingresses/status
+ verbs:
+ - update
+ - apiGroups:
+ - networking.k8s.io
+ resources:
+ - ingressclasses
+ verbs:
+ - get
+ - list
+ - watch
+{{- end }}
+
+{{- end }}
diff --git a/k8s/ingress-nginx/templates/clusterrolebinding.yaml b/k8s/ingress-nginx/templates/clusterrolebinding.yaml
new file mode 100644
index 0000000..81be52b
--- /dev/null
+++ b/k8s/ingress-nginx/templates/clusterrolebinding.yaml
@@ -0,0 +1,16 @@
+{{- if and .Values.rbac.create (not .Values.rbac.scope) -}}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ name: {{ include "ingress-nginx.fullname" . }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ include "ingress-nginx.fullname" . }}
+subjects:
+ - kind: ServiceAccount
+ name: {{ template "ingress-nginx.serviceAccountName" . }}
+ namespace: {{ .Release.Namespace | quote }}
+{{- end }}
diff --git a/k8s/ingress-nginx/templates/controller-configmap-addheaders.yaml b/k8s/ingress-nginx/templates/controller-configmap-addheaders.yaml
new file mode 100644
index 0000000..e0b7a0f
--- /dev/null
+++ b/k8s/ingress-nginx/templates/controller-configmap-addheaders.yaml
@@ -0,0 +1,11 @@
+{{- if .Values.controller.addHeaders -}}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: controller
+ name: {{ include "ingress-nginx.fullname" . }}-custom-add-headers
+ namespace: {{ .Release.Namespace }}
+data: {{ toYaml .Values.controller.addHeaders | nindent 2 }}
+{{- end }}
diff --git a/k8s/ingress-nginx/templates/controller-configmap-proxyheaders.yaml b/k8s/ingress-nginx/templates/controller-configmap-proxyheaders.yaml
new file mode 100644
index 0000000..91f22f0
--- /dev/null
+++ b/k8s/ingress-nginx/templates/controller-configmap-proxyheaders.yaml
@@ -0,0 +1,16 @@
+{{- if or .Values.controller.proxySetHeaders .Values.controller.headers -}}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: controller
+ name: {{ include "ingress-nginx.fullname" . }}-custom-proxy-headers
+ namespace: {{ .Release.Namespace }}
+data:
+{{- if .Values.controller.proxySetHeaders }}
+{{ toYaml .Values.controller.proxySetHeaders | indent 2 }}
+{{ else if and .Values.controller.headers (not .Values.controller.proxySetHeaders) }}
+{{ toYaml .Values.controller.headers | indent 2 }}
+{{- end }}
+{{- end }}
diff --git a/k8s/ingress-nginx/templates/controller-configmap-tcp.yaml b/k8s/ingress-nginx/templates/controller-configmap-tcp.yaml
new file mode 100644
index 0000000..aaf336f
--- /dev/null
+++ b/k8s/ingress-nginx/templates/controller-configmap-tcp.yaml
@@ -0,0 +1,14 @@
+{{- if .Values.tcp -}}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: controller
+{{- if .Values.controller.tcp.annotations }}
+ annotations: {{ toYaml .Values.controller.tcp.annotations | nindent 4 }}
+{{- end }}
+ name: {{ include "ingress-nginx.fullname" . }}-tcp
+ namespace: {{ .Release.Namespace }}
+data: {{ tpl (toYaml .Values.tcp) . | nindent 2 }}
+{{- end }}
diff --git a/k8s/ingress-nginx/templates/controller-configmap-udp.yaml b/k8s/ingress-nginx/templates/controller-configmap-udp.yaml
new file mode 100644
index 0000000..7f46791
--- /dev/null
+++ b/k8s/ingress-nginx/templates/controller-configmap-udp.yaml
@@ -0,0 +1,14 @@
+{{- if .Values.udp -}}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: controller
+{{- if .Values.controller.udp.annotations }}
+ annotations: {{ toYaml .Values.controller.udp.annotations | nindent 4 }}
+{{- end }}
+ name: {{ include "ingress-nginx.fullname" . }}-udp
+ namespace: {{ .Release.Namespace }}
+data: {{ tpl (toYaml .Values.udp) . | nindent 2 }}
+{{- end }}
diff --git a/k8s/ingress-nginx/templates/controller-configmap.yaml b/k8s/ingress-nginx/templates/controller-configmap.yaml
new file mode 100644
index 0000000..6973892
--- /dev/null
+++ b/k8s/ingress-nginx/templates/controller-configmap.yaml
@@ -0,0 +1,26 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: controller
+{{- if .Values.controller.configAnnotations }}
+ annotations: {{ toYaml .Values.controller.configAnnotations | nindent 4 }}
+{{- end }}
+ name: {{ include "ingress-nginx.controller.fullname" . }}
+ namespace: {{ .Release.Namespace }}
+data:
+ allow-snippet-annotations: "{{ .Values.controller.allowSnippetAnnotations }}"
+{{- if .Values.controller.addHeaders }}
+ add-headers: {{ .Release.Namespace }}/{{ include "ingress-nginx.fullname" . }}-custom-add-headers
+{{- end }}
+{{- if or .Values.controller.proxySetHeaders .Values.controller.headers }}
+ proxy-set-headers: {{ .Release.Namespace }}/{{ include "ingress-nginx.fullname" . }}-custom-proxy-headers
+{{- end }}
+{{- if .Values.dhParam }}
+ ssl-dh-param: {{ printf "%s/%s" .Release.Namespace (include "ingress-nginx.controller.fullname" .) }}
+{{- end }}
+{{- range $key, $value := .Values.controller.config }}
+ {{ $key | nindent 2 }}: {{ $value | quote }}
+{{- end }}
+
diff --git a/k8s/ingress-nginx/templates/controller-daemonset.yaml b/k8s/ingress-nginx/templates/controller-daemonset.yaml
new file mode 100644
index 0000000..68291ed
--- /dev/null
+++ b/k8s/ingress-nginx/templates/controller-daemonset.yaml
@@ -0,0 +1,256 @@
+{{- if or (eq .Values.controller.kind "DaemonSet") (eq .Values.controller.kind "Both") -}}
+{{- include "isControllerTagValid" . -}}
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: controller
+ {{- with .Values.controller.labels }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ name: {{ include "ingress-nginx.controller.fullname" . }}
+ namespace: {{ .Release.Namespace }}
+ {{- if .Values.controller.annotations }}
+ annotations: {{ toYaml .Values.controller.annotations | nindent 4 }}
+ {{- end }}
+spec:
+ selector:
+ matchLabels:
+ {{- include "ingress-nginx.selectorLabels" . | nindent 6 }}
+ app.kubernetes.io/component: controller
+ revisionHistoryLimit: {{ .Values.revisionHistoryLimit }}
+ {{- if .Values.controller.updateStrategy }}
+ updateStrategy: {{ toYaml .Values.controller.updateStrategy | nindent 4 }}
+ {{- end }}
+ minReadySeconds: {{ .Values.controller.minReadySeconds }}
+ template:
+ metadata:
+ {{- if .Values.controller.podAnnotations }}
+ annotations:
+ {{- range $key, $value := .Values.controller.podAnnotations }}
+ {{ $key }}: {{ $value | quote }}
+ {{- end }}
+ {{- end }}
+ labels:
+ {{- include "ingress-nginx.selectorLabels" . | nindent 8 }}
+ app.kubernetes.io/component: controller
+ {{- if .Values.controller.podLabels }}
+ {{- toYaml .Values.controller.podLabels | nindent 8 }}
+ {{- end }}
+ spec:
+ {{- if .Values.controller.dnsConfig }}
+ dnsConfig: {{ toYaml .Values.controller.dnsConfig | nindent 8 }}
+ {{- end }}
+ {{- if .Values.controller.hostname }}
+ hostname: {{ toYaml .Values.controller.hostname | nindent 8 }}
+ {{- end }}
+ dnsPolicy: {{ .Values.controller.dnsPolicy }}
+ {{- if .Values.imagePullSecrets }}
+ imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 8 }}
+ {{- end }}
+ {{- if .Values.controller.priorityClassName }}
+ priorityClassName: {{ .Values.controller.priorityClassName }}
+ {{- end }}
+ {{- if or .Values.controller.podSecurityContext .Values.controller.sysctls }}
+ securityContext:
+ {{- end }}
+ {{- if .Values.controller.podSecurityContext }}
+ {{- toYaml .Values.controller.podSecurityContext | nindent 8 }}
+ {{- end }}
+ {{- if .Values.controller.sysctls }}
+ sysctls:
+ {{- range $sysctl, $value := .Values.controller.sysctls }}
+ - name: {{ $sysctl | quote }}
+ value: {{ $value | quote }}
+ {{- end }}
+ {{- end }}
+ containers:
+ - name: {{ .Values.controller.containerName }}
+ {{- with .Values.controller.image }}
+ image: "{{- if .repository -}}{{ .repository }}{{ else }}{{ .registry }}/{{ .image }}{{- end -}}:{{ .tag }}{{- if (.digest) -}} @{{.digest}} {{- end -}}"
+ {{- end }}
+ imagePullPolicy: {{ .Values.controller.image.pullPolicy }}
+ {{- if .Values.controller.lifecycle }}
+ lifecycle: {{ toYaml .Values.controller.lifecycle | nindent 12 }}
+ {{- end }}
+ args:
+ - /nginx-ingress-controller
+ {{- if .Values.defaultBackend.enabled }}
+ - --default-backend-service=$(POD_NAMESPACE)/{{ include "ingress-nginx.defaultBackend.fullname" . }}
+ {{- end }}
+ {{- if .Values.controller.publishService.enabled }}
+ - --publish-service={{ template "ingress-nginx.controller.publishServicePath" . }}
+ {{- end }}
+ - --election-id={{ .Values.controller.electionID }}
+ - --controller-class={{ .Values.controller.ingressClassResource.controllerValue }}
+ - --configmap={{ default "$(POD_NAMESPACE)" .Values.controller.configMapNamespace }}/{{ include "ingress-nginx.controller.fullname" . }}
+ {{- if .Values.tcp }}
+ - --tcp-services-configmap={{ default "$(POD_NAMESPACE)" .Values.controller.tcp.configMapNamespace }}/{{ include "ingress-nginx.fullname" . }}-tcp
+ {{- end }}
+ {{- if .Values.udp }}
+ - --udp-services-configmap={{ default "$(POD_NAMESPACE)" .Values.controller.udp.configMapNamespace }}/{{ include "ingress-nginx.fullname" . }}-udp
+ {{- end }}
+ {{- if .Values.controller.scope.enabled }}
+ - --watch-namespace={{ default "$(POD_NAMESPACE)" .Values.controller.scope.namespace }}
+ {{- end }}
+ {{- if and .Values.controller.reportNodeInternalIp .Values.controller.hostNetwork }}
+ - --report-node-internal-ip-address={{ .Values.controller.reportNodeInternalIp }}
+ {{- end }}
+ {{- if .Values.controller.admissionWebhooks.enabled }}
+ - --validating-webhook=:{{ .Values.controller.admissionWebhooks.port }}
+ - --validating-webhook-certificate={{ .Values.controller.admissionWebhooks.certificate }}
+ - --validating-webhook-key={{ .Values.controller.admissionWebhooks.key }}
+ {{- end }}
+ {{- if .Values.controller.maxmindMirror }}
+ - --maxmind-mirror={{ .Values.controller.maxmindMirror }}
+ {{- end}}
+ {{- if .Values.controller.maxmindLicenseKey }}
+ - --maxmind-license-key={{ .Values.controller.maxmindLicenseKey }}
+ {{- end }}
+ {{- if not (eq .Values.controller.healthCheckPath "/healthz") }}
+ - --health-check-path={{ .Values.controller.healthCheckPath }}
+ {{- end }}
+ {{- if .Values.controller.healthCheckHost }}
+ - --healthz-host={{ .Values.controller.healthCheckHost }}
+ {{- end }}
+ {{- if .Values.controller.ingressClassByName }}
+ - --ingress-class-by-name=true
+ {{- end }}
+ {{- if .Values.controller.watchIngressWithoutClass }}
+ - --watch-ingress-without-class=true
+ {{- end }}
+ {{- range $key, $value := .Values.controller.extraArgs }}
+ {{- /* Accept keys without values or with false as value */}}
+ {{- if eq ($value | quote | len) 2 }}
+ - --{{ $key }}
+ {{- else }}
+ - --{{ $key }}={{ $value }}
+ {{- end }}
+ {{- end }}
+ securityContext:
+ capabilities:
+ drop:
+ - ALL
+ add:
+ - NET_BIND_SERVICE
+ runAsUser: {{ .Values.controller.image.runAsUser }}
+ allowPrivilegeEscalation: {{ .Values.controller.image.allowPrivilegeEscalation }}
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ {{- if .Values.controller.enableMimalloc }}
+ - name: LD_PRELOAD
+ value: /usr/local/lib/libmimalloc.so
+ {{- end }}
+ {{- if .Values.controller.extraEnvs }}
+ {{- toYaml .Values.controller.extraEnvs | nindent 12 }}
+ {{- end }}
+ {{- if .Values.controller.startupProbe }}
+ startupProbe: {{ toYaml .Values.controller.startupProbe | nindent 12 }}
+ {{- end }}
+ livenessProbe: {{ toYaml .Values.controller.livenessProbe | nindent 12 }}
+ readinessProbe: {{ toYaml .Values.controller.readinessProbe | nindent 12 }}
+ ports:
+ {{- range $key, $value := .Values.controller.containerPort }}
+ - name: {{ $key }}
+ containerPort: {{ $value }}
+ protocol: TCP
+ {{- if $.Values.controller.hostPort.enabled }}
+ hostPort: {{ index $.Values.controller.hostPort.ports $key | default $value }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.controller.metrics.enabled }}
+ - name: metrics
+ containerPort: {{ .Values.controller.metrics.port }}
+ protocol: TCP
+ {{- end }}
+ {{- if .Values.controller.admissionWebhooks.enabled }}
+ - name: webhook
+ containerPort: {{ .Values.controller.admissionWebhooks.port }}
+ protocol: TCP
+ {{- end }}
+ {{- range $key, $value := .Values.tcp }}
+ - name: {{ $key }}-tcp
+ containerPort: {{ $key }}
+ protocol: TCP
+ {{- if $.Values.controller.hostPort.enabled }}
+ hostPort: {{ $key }}
+ {{- end }}
+ {{- end }}
+ {{- range $key, $value := .Values.udp }}
+ - name: {{ $key }}-udp
+ containerPort: {{ $key }}
+ protocol: UDP
+ {{- if $.Values.controller.hostPort.enabled }}
+ hostPort: {{ $key }}
+ {{- end }}
+ {{- end }}
+ {{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled) }}
+ volumeMounts:
+ {{- if .Values.controller.customTemplate.configMapName }}
+ - mountPath: /etc/nginx/template
+ name: nginx-template-volume
+ readOnly: true
+ {{- end }}
+ {{- if .Values.controller.admissionWebhooks.enabled }}
+ - name: webhook-cert
+ mountPath: /usr/local/certificates/
+ readOnly: true
+ {{- end }}
+ {{- if .Values.controller.extraVolumeMounts }}
+ {{- toYaml .Values.controller.extraVolumeMounts | nindent 12 }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.controller.resources }}
+ resources: {{ toYaml .Values.controller.resources | nindent 12 }}
+ {{- end }}
+ {{- if .Values.controller.extraContainers }}
+ {{ toYaml .Values.controller.extraContainers | nindent 8 }}
+ {{- end }}
+ {{- if .Values.controller.extraInitContainers }}
+ initContainers: {{ toYaml .Values.controller.extraInitContainers | nindent 8 }}
+ {{- end }}
+ {{- if .Values.controller.hostNetwork }}
+ hostNetwork: {{ .Values.controller.hostNetwork }}
+ {{- end }}
+ {{- if .Values.controller.nodeSelector }}
+ nodeSelector: {{ toYaml .Values.controller.nodeSelector | nindent 8 }}
+ {{- end }}
+ {{- if .Values.controller.tolerations }}
+ tolerations: {{ toYaml .Values.controller.tolerations | nindent 8 }}
+ {{- end }}
+ {{- if .Values.controller.affinity }}
+ affinity: {{ toYaml .Values.controller.affinity | nindent 8 }}
+ {{- end }}
+ {{- if .Values.controller.topologySpreadConstraints }}
+ topologySpreadConstraints: {{ toYaml .Values.controller.topologySpreadConstraints | nindent 8 }}
+ {{- end }}
+ serviceAccountName: {{ template "ingress-nginx.serviceAccountName" . }}
+ terminationGracePeriodSeconds: {{ .Values.controller.terminationGracePeriodSeconds }}
+ {{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled .Values.controller.extraVolumes) }}
+ volumes:
+ {{- if .Values.controller.customTemplate.configMapName }}
+ - name: nginx-template-volume
+ configMap:
+ name: {{ .Values.controller.customTemplate.configMapName }}
+ items:
+ - key: {{ .Values.controller.customTemplate.configMapKey }}
+ path: nginx.tmpl
+ {{- end }}
+ {{- if .Values.controller.admissionWebhooks.enabled }}
+ - name: webhook-cert
+ secret:
+ secretName: {{ include "ingress-nginx.fullname" . }}-admission
+ {{- end }}
+ {{- if .Values.controller.extraVolumes }}
+ {{ toYaml .Values.controller.extraVolumes | nindent 8 }}
+ {{- end }}
+ {{- end }}
+{{- end }}
diff --git a/k8s/ingress-nginx/templates/controller-deployment.yaml b/k8s/ingress-nginx/templates/controller-deployment.yaml
new file mode 100644
index 0000000..24714a5
--- /dev/null
+++ b/k8s/ingress-nginx/templates/controller-deployment.yaml
@@ -0,0 +1,257 @@
+{{- if or (eq .Values.controller.kind "Deployment") (eq .Values.controller.kind "Both") -}}
+{{- include "isControllerTagValid" . -}}
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: controller
+ {{- with .Values.controller.labels }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ name: {{ include "ingress-nginx.controller.fullname" . }}
+ namespace: {{ .Release.Namespace }}
+ {{- if .Values.controller.annotations }}
+ annotations: {{ toYaml .Values.controller.annotations | nindent 4 }}
+ {{- end }}
+spec:
+ selector:
+ matchLabels:
+ {{- include "ingress-nginx.selectorLabels" . | nindent 6 }}
+ app.kubernetes.io/component: controller
+ {{- if not .Values.controller.autoscaling.enabled }}
+ replicas: {{ .Values.controller.replicaCount }}
+ {{- end }}
+ revisionHistoryLimit: {{ .Values.revisionHistoryLimit }}
+ {{- if .Values.controller.updateStrategy }}
+ strategy:
+ {{ toYaml .Values.controller.updateStrategy | nindent 4 }}
+ {{- end }}
+ minReadySeconds: {{ .Values.controller.minReadySeconds }}
+ template:
+ metadata:
+ {{- if .Values.controller.podAnnotations }}
+ annotations:
+ {{- range $key, $value := .Values.controller.podAnnotations }}
+ {{ $key }}: {{ $value | quote }}
+ {{- end }}
+ {{- end }}
+ labels:
+ {{- include "ingress-nginx.selectorLabels" . | nindent 8 }}
+ app.kubernetes.io/component: controller
+ {{- if .Values.controller.podLabels }}
+ {{- toYaml .Values.controller.podLabels | nindent 8 }}
+ {{- end }}
+ spec:
+ {{- if .Values.controller.dnsConfig }}
+ dnsConfig: {{ toYaml .Values.controller.dnsConfig | nindent 8 }}
+ {{- end }}
+ {{- if .Values.controller.hostname }}
+ hostname: {{ toYaml .Values.controller.hostname | nindent 8 }}
+ {{- end }}
+ dnsPolicy: {{ .Values.controller.dnsPolicy }}
+ {{- if .Values.imagePullSecrets }}
+ imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 8 }}
+ {{- end }}
+ {{- if .Values.controller.priorityClassName }}
+ priorityClassName: {{ .Values.controller.priorityClassName }}
+ {{- end }}
+ {{- if or .Values.controller.podSecurityContext .Values.controller.sysctls }}
+ securityContext:
+ {{- end }}
+ {{- if .Values.controller.podSecurityContext }}
+ {{- toYaml .Values.controller.podSecurityContext | nindent 8 }}
+ {{- end }}
+ {{- if .Values.controller.sysctls }}
+ sysctls:
+ {{- range $sysctl, $value := .Values.controller.sysctls }}
+ - name: {{ $sysctl | quote }}
+ value: {{ $value | quote }}
+ {{- end }}
+ {{- end }}
+ containers:
+ - name: {{ .Values.controller.containerName }}
+ {{- with .Values.controller.image }}
+ image: "{{- if .repository -}}{{ .repository }}{{ else }}{{ .registry }}/{{ .image }}{{- end -}}:{{ .tag }}{{- if (.digest) -}} @{{.digest}} {{- end -}}"
+ {{- end }}
+ imagePullPolicy: {{ .Values.controller.image.pullPolicy }}
+ {{- if .Values.controller.lifecycle }}
+ lifecycle: {{ toYaml .Values.controller.lifecycle | nindent 12 }}
+ {{- end }}
+ args:
+ - /nginx-ingress-controller
+ {{- if .Values.defaultBackend.enabled }}
+ - --default-backend-service=$(POD_NAMESPACE)/{{ include "ingress-nginx.defaultBackend.fullname" . }}
+ {{- end }}
+ {{- if .Values.controller.publishService.enabled }}
+ - --publish-service={{ template "ingress-nginx.controller.publishServicePath" . }}
+ {{- end }}
+ - --election-id={{ .Values.controller.electionID }}
+ - --controller-class={{ .Values.controller.ingressClassResource.controllerValue }}
+ - --configmap={{ default "$(POD_NAMESPACE)" .Values.controller.configMapNamespace }}/{{ include "ingress-nginx.controller.fullname" . }}
+ {{- if .Values.tcp }}
+ - --tcp-services-configmap={{ default "$(POD_NAMESPACE)" .Values.controller.tcp.configMapNamespace }}/{{ include "ingress-nginx.fullname" . }}-tcp
+ {{- end }}
+ {{- if .Values.udp }}
+ - --udp-services-configmap={{ default "$(POD_NAMESPACE)" .Values.controller.udp.configMapNamespace }}/{{ include "ingress-nginx.fullname" . }}-udp
+ {{- end }}
+ {{- if .Values.controller.scope.enabled }}
+ - --watch-namespace={{ default "$(POD_NAMESPACE)" .Values.controller.scope.namespace }}
+ {{- end }}
+ {{- if and .Values.controller.reportNodeInternalIp .Values.controller.hostNetwork }}
+ - --report-node-internal-ip-address={{ .Values.controller.reportNodeInternalIp }}
+ {{- end }}
+ {{- if .Values.controller.admissionWebhooks.enabled }}
+ - --validating-webhook=:{{ .Values.controller.admissionWebhooks.port }}
+ - --validating-webhook-certificate={{ .Values.controller.admissionWebhooks.certificate }}
+ - --validating-webhook-key={{ .Values.controller.admissionWebhooks.key }}
+ {{- end }}
+ {{- if .Values.controller.maxmindLicenseKey }}
+ - --maxmind-license-key={{ .Values.controller.maxmindLicenseKey }}
+ {{- end }}
+ {{- if .Values.controller.healthCheckHost }}
+ - --healthz-host={{ .Values.controller.healthCheckHost }}
+ {{- end }}
+ {{- if not (eq .Values.controller.healthCheckPath "/healthz") }}
+ - --health-check-path={{ .Values.controller.healthCheckPath }}
+ {{- end }}
+ {{- if .Values.controller.ingressClassByName }}
+ - --ingress-class-by-name=true
+ {{- end }}
+ {{- if .Values.controller.watchIngressWithoutClass }}
+ - --watch-ingress-without-class=true
+ {{- end }}
+ {{- range $key, $value := .Values.controller.extraArgs }}
+ {{- /* Accept keys without values or with false as value */}}
+ {{- if eq ($value | quote | len) 2 }}
+ - --{{ $key }}
+ {{- else }}
+ - --{{ $key }}={{ $value }}
+ {{- end }}
+ {{- end }}
+ securityContext:
+ capabilities:
+ drop:
+ - ALL
+ add:
+ - NET_BIND_SERVICE
+ runAsUser: {{ .Values.controller.image.runAsUser }}
+ allowPrivilegeEscalation: {{ .Values.controller.image.allowPrivilegeEscalation }}
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ {{- if .Values.controller.enableMimalloc }}
+ - name: LD_PRELOAD
+ value: /usr/local/lib/libmimalloc.so
+ {{- end }}
+ {{- if .Values.controller.extraEnvs }}
+ {{- toYaml .Values.controller.extraEnvs | nindent 12 }}
+ {{- end }}
+ {{- if .Values.controller.startupProbe }}
+ startupProbe: {{ toYaml .Values.controller.startupProbe | nindent 12 }}
+ {{- end }}
+ livenessProbe: {{ toYaml .Values.controller.livenessProbe | nindent 12 }}
+ readinessProbe: {{ toYaml .Values.controller.readinessProbe | nindent 12 }}
+ ports:
+ {{- range $key, $value := .Values.controller.containerPort }}
+ - name: {{ $key }}
+ containerPort: {{ $value }}
+ protocol: TCP
+ {{- if $.Values.controller.hostPort.enabled }}
+ hostPort: {{ index $.Values.controller.hostPort.ports $key | default $value }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.controller.metrics.enabled }}
+ - name: metrics
+ containerPort: {{ .Values.controller.metrics.port }}
+ protocol: TCP
+ {{- end }}
+ {{- if .Values.controller.admissionWebhooks.enabled }}
+ - name: webhook
+ containerPort: {{ .Values.controller.admissionWebhooks.port }}
+ protocol: TCP
+ {{- end }}
+ {{- range $key, $value := .Values.tcp }}
+ - name: {{ $key }}-tcp
+ containerPort: {{ $key }}
+ protocol: TCP
+ {{- if $.Values.controller.hostPort.enabled }}
+ hostPort: {{ $key }}
+ {{- end }}
+ {{- end }}
+ {{- range $key, $value := .Values.udp }}
+ - name: {{ $key }}-udp
+ containerPort: {{ $key }}
+ protocol: UDP
+ {{- if $.Values.controller.hostPort.enabled }}
+ hostPort: {{ $key }}
+ {{- end }}
+ {{- end }}
+ {{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled) }}
+ volumeMounts:
+ {{- if .Values.controller.customTemplate.configMapName }}
+ - mountPath: /etc/nginx/template
+ name: nginx-template-volume
+ readOnly: true
+ {{- end }}
+ {{- if .Values.controller.admissionWebhooks.enabled }}
+ - name: webhook-cert
+ mountPath: /usr/local/certificates/
+ readOnly: true
+ {{- end }}
+ {{- if .Values.controller.extraVolumeMounts }}
+ {{- toYaml .Values.controller.extraVolumeMounts | nindent 12 }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.controller.resources }}
+ resources: {{ toYaml .Values.controller.resources | nindent 12 }}
+ {{- end }}
+ {{- if .Values.controller.extraContainers }}
+ {{ toYaml .Values.controller.extraContainers | nindent 8 }}
+ {{- end }}
+ {{- if .Values.controller.extraInitContainers }}
+ initContainers: {{ toYaml .Values.controller.extraInitContainers | nindent 8 }}
+ {{- end }}
+ {{- if .Values.controller.hostNetwork }}
+ hostNetwork: {{ .Values.controller.hostNetwork }}
+ {{- end }}
+ {{- if .Values.controller.nodeSelector }}
+ nodeSelector: {{ toYaml .Values.controller.nodeSelector | nindent 8 }}
+ {{- end }}
+ {{- if .Values.controller.tolerations }}
+ tolerations: {{ toYaml .Values.controller.tolerations | nindent 8 }}
+ {{- end }}
+ {{- if .Values.controller.affinity }}
+ affinity: {{ toYaml .Values.controller.affinity | nindent 8 }}
+ {{- end }}
+ {{- if .Values.controller.topologySpreadConstraints }}
+ topologySpreadConstraints: {{ toYaml .Values.controller.topologySpreadConstraints | nindent 8 }}
+ {{- end }}
+ serviceAccountName: {{ template "ingress-nginx.serviceAccountName" . }}
+ terminationGracePeriodSeconds: {{ .Values.controller.terminationGracePeriodSeconds }}
+ {{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled .Values.controller.extraVolumes) }}
+ volumes:
+ {{- if .Values.controller.customTemplate.configMapName }}
+ - name: nginx-template-volume
+ configMap:
+ name: {{ .Values.controller.customTemplate.configMapName }}
+ items:
+ - key: {{ .Values.controller.customTemplate.configMapKey }}
+ path: nginx.tmpl
+ {{- end }}
+ {{- if .Values.controller.admissionWebhooks.enabled }}
+ - name: webhook-cert
+ secret:
+ secretName: {{ include "ingress-nginx.fullname" . }}-admission
+ {{- end }}
+ {{- if .Values.controller.extraVolumes }}
+ {{ toYaml .Values.controller.extraVolumes | nindent 8 }}
+ {{- end }}
+ {{- end }}
+{{- end }}
diff --git a/k8s/ingress-nginx/templates/controller-hpa.yaml b/k8s/ingress-nginx/templates/controller-hpa.yaml
new file mode 100644
index 0000000..876315f
--- /dev/null
+++ b/k8s/ingress-nginx/templates/controller-hpa.yaml
@@ -0,0 +1,49 @@
+{{- if and .Values.controller.autoscaling.enabled (or (eq .Values.controller.kind "Deployment") (eq .Values.controller.kind "Both")) -}}
+{{- if not .Values.controller.keda.enabled }}
+
+apiVersion: autoscaling/v2beta2
+kind: HorizontalPodAutoscaler
+metadata:
+ annotations:
+ {{- with .Values.controller.autoscaling.annotations }}
+ {{- toYaml . | trimSuffix "\n" | nindent 4 }}
+ {{- end }}
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: controller
+ name: {{ include "ingress-nginx.controller.fullname" . }}
+ namespace: {{ .Release.Namespace }}
+spec:
+ scaleTargetRef:
+ apiVersion: apps/v1
+ kind: Deployment
+ name: {{ include "ingress-nginx.controller.fullname" . }}
+ minReplicas: {{ .Values.controller.autoscaling.minReplicas }}
+ maxReplicas: {{ .Values.controller.autoscaling.maxReplicas }}
+ metrics:
+ {{- with .Values.controller.autoscaling.targetMemoryUtilizationPercentage }}
+ - type: Resource
+ resource:
+ name: memory
+ target:
+ type: Utilization
+ averageUtilization: {{ . }}
+ {{- end }}
+ {{- with .Values.controller.autoscaling.targetCPUUtilizationPercentage }}
+ - type: Resource
+ resource:
+ name: cpu
+ target:
+ type: Utilization
+ averageUtilization: {{ . }}
+ {{- end }}
+ {{- with .Values.controller.autoscalingTemplate }}
+ {{- toYaml . | nindent 2 }}
+ {{- end }}
+ {{- with .Values.controller.autoscaling.behavior }}
+ behavior:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+{{- end }}
+{{- end }}
+
diff --git a/k8s/ingress-nginx/templates/controller-ingressclass.yaml b/k8s/ingress-nginx/templates/controller-ingressclass.yaml
new file mode 100644
index 0000000..9492784
--- /dev/null
+++ b/k8s/ingress-nginx/templates/controller-ingressclass.yaml
@@ -0,0 +1,21 @@
+{{- if .Values.controller.ingressClassResource.enabled -}}
+# We don't support namespaced ingressClass yet
+# So a ClusterRole and a ClusterRoleBinding is required
+apiVersion: networking.k8s.io/v1
+kind: IngressClass
+metadata:
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: controller
+ {{- with .Values.controller.labels }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ name: {{ .Values.controller.ingressClassResource.name }}
+{{- if .Values.controller.ingressClassResource.default }}
+ annotations:
+ ingressclass.kubernetes.io/is-default-class: "true"
+{{- end }}
+spec:
+ controller: {{ .Values.controller.ingressClassResource.controllerValue }}
+ {{ template "ingressClass.parameters" . }}
+{{- end }}
diff --git a/k8s/ingress-nginx/templates/controller-keda.yaml b/k8s/ingress-nginx/templates/controller-keda.yaml
new file mode 100644
index 0000000..c7eebf5
--- /dev/null
+++ b/k8s/ingress-nginx/templates/controller-keda.yaml
@@ -0,0 +1,39 @@
+{{- if and .Values.controller.keda.enabled (or (eq .Values.controller.kind "Deployment") (eq .Values.controller.kind "Both")) -}}
+# https://keda.sh/docs/
+
+apiVersion: {{ .Values.controller.keda.apiVersion }}
+kind: ScaledObject
+metadata:
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: controller
+ name: {{ include "ingress-nginx.controller.fullname" . }}
+ {{- if .Values.controller.keda.scaledObject.annotations }}
+ annotations: {{ toYaml .Values.controller.keda.scaledObject.annotations | nindent 4 }}
+ {{- end }}
+spec:
+ scaleTargetRef:
+{{- if eq .Values.controller.keda.apiVersion "keda.k8s.io/v1alpha1" }}
+ deploymentName: {{ include "ingress-nginx.controller.fullname" . }}
+{{- else if eq .Values.controller.keda.apiVersion "keda.sh/v1alpha1" }}
+ name: {{ include "ingress-nginx.controller.fullname" . }}
+{{- end }}
+ pollingInterval: {{ .Values.controller.keda.pollingInterval }}
+ cooldownPeriod: {{ .Values.controller.keda.cooldownPeriod }}
+ minReplicaCount: {{ .Values.controller.keda.minReplicas }}
+ maxReplicaCount: {{ .Values.controller.keda.maxReplicas }}
+ triggers:
+{{- with .Values.controller.keda.triggers }}
+{{ toYaml . | indent 2 }}
+{{ end }}
+ advanced:
+ restoreToOriginalReplicaCount: {{ .Values.controller.keda.restoreToOriginalReplicaCount }}
+{{- if .Values.controller.keda.behavior }}
+ horizontalPodAutoscalerConfig:
+ behavior:
+{{ with .Values.controller.keda.behavior -}}
+{{ toYaml . | indent 8 }}
+{{ end }}
+
+{{- end }}
+{{- end }}
diff --git a/k8s/ingress-nginx/templates/controller-poddisruptionbudget.yaml b/k8s/ingress-nginx/templates/controller-poddisruptionbudget.yaml
new file mode 100644
index 0000000..9556f58
--- /dev/null
+++ b/k8s/ingress-nginx/templates/controller-poddisruptionbudget.yaml
@@ -0,0 +1,16 @@
+{{- if or (and .Values.controller.autoscaling.enabled (gt (.Values.controller.autoscaling.minReplicas | int) 1)) (and (not .Values.controller.autoscaling.enabled) (gt (.Values.controller.replicaCount | int) 1)) }}
+apiVersion: {{ ternary "policy/v1" "policy/v1beta1" (semverCompare ">=1.21.0-0" .Capabilities.KubeVersion.Version) }}
+kind: PodDisruptionBudget
+metadata:
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: controller
+ name: {{ include "ingress-nginx.controller.fullname" . }}
+ namespace: {{ .Release.Namespace }}
+spec:
+ selector:
+ matchLabels:
+ {{- include "ingress-nginx.selectorLabels" . | nindent 6 }}
+ app.kubernetes.io/component: controller
+ minAvailable: {{ .Values.controller.minAvailable }}
+{{- end }}
diff --git a/k8s/ingress-nginx/templates/controller-prometheusrules.yaml b/k8s/ingress-nginx/templates/controller-prometheusrules.yaml
new file mode 100644
index 0000000..ca54275
--- /dev/null
+++ b/k8s/ingress-nginx/templates/controller-prometheusrules.yaml
@@ -0,0 +1,21 @@
+{{- if and .Values.controller.metrics.enabled .Values.controller.metrics.prometheusRule.enabled -}}
+apiVersion: monitoring.coreos.com/v1
+kind: PrometheusRule
+metadata:
+ name: {{ include "ingress-nginx.controller.fullname" . }}
+{{- if .Values.controller.metrics.prometheusRule.namespace }}
+ namespace: {{ .Values.controller.metrics.prometheusRule.namespace | quote }}
+{{- end }}
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: controller
+ {{- if .Values.controller.metrics.prometheusRule.additionalLabels }}
+ {{- toYaml .Values.controller.metrics.prometheusRule.additionalLabels | nindent 4 }}
+ {{- end }}
+spec:
+{{- if .Values.controller.metrics.prometheusRule.rules }}
+ groups:
+ - name: {{ template "ingress-nginx.name" . }}
+ rules: {{- toYaml .Values.controller.metrics.prometheusRule.rules | nindent 4 }}
+{{- end }}
+{{- end }}
diff --git a/k8s/ingress-nginx/templates/controller-psp.yaml b/k8s/ingress-nginx/templates/controller-psp.yaml
new file mode 100644
index 0000000..bdb8563
--- /dev/null
+++ b/k8s/ingress-nginx/templates/controller-psp.yaml
@@ -0,0 +1,86 @@
+{{- if and .Values.podSecurityPolicy.enabled (empty .Values.controller.existingPsp) -}}
+apiVersion: policy/v1beta1
+kind: PodSecurityPolicy
+metadata:
+ name: {{ include "ingress-nginx.fullname" . }}
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: controller
+spec:
+ allowedCapabilities:
+ - NET_BIND_SERVICE
+{{- if .Values.controller.sysctls }}
+ allowedUnsafeSysctls:
+ {{- range $sysctl, $value := .Values.controller.sysctls }}
+ - {{ $sysctl }}
+ {{- end }}
+{{- end }}
+ privileged: false
+ allowPrivilegeEscalation: true
+ # Allow core volume types.
+ volumes:
+ - 'configMap'
+ - 'emptyDir'
+ #- 'projected'
+ - 'secret'
+ #- 'downwardAPI'
+{{- if .Values.controller.hostNetwork }}
+ hostNetwork: {{ .Values.controller.hostNetwork }}
+{{- end }}
+{{- if or .Values.controller.hostNetwork .Values.controller.hostPort.enabled }}
+ hostPorts:
+{{- if .Values.controller.hostNetwork }}
+{{- range $key, $value := .Values.controller.containerPort }}
+ # {{ $key }}
+ - min: {{ $value }}
+ max: {{ $value }}
+{{- end }}
+{{- else if .Values.controller.hostPort.enabled }}
+{{- range $key, $value := .Values.controller.hostPort.ports }}
+ # {{ $key }}
+ - min: {{ $value }}
+ max: {{ $value }}
+{{- end }}
+{{- end }}
+{{- if .Values.controller.metrics.enabled }}
+ # metrics
+ - min: {{ .Values.controller.metrics.port }}
+ max: {{ .Values.controller.metrics.port }}
+{{- end }}
+{{- if .Values.controller.admissionWebhooks.enabled }}
+ # admission webhooks
+ - min: {{ .Values.controller.admissionWebhooks.port }}
+ max: {{ .Values.controller.admissionWebhooks.port }}
+{{- end }}
+{{- range $key, $value := .Values.tcp }}
+ # {{ $key }}-tcp
+ - min: {{ $key }}
+ max: {{ $key }}
+{{- end }}
+{{- range $key, $value := .Values.udp }}
+ # {{ $key }}-udp
+ - min: {{ $key }}
+ max: {{ $key }}
+{{- end }}
+{{- end }}
+ hostIPC: false
+ hostPID: false
+ runAsUser:
+ # Require the container to run without root privileges.
+ rule: 'MustRunAsNonRoot'
+ supplementalGroups:
+ rule: 'MustRunAs'
+ ranges:
+ # Forbid adding the root group.
+ - min: 1
+ max: 65535
+ fsGroup:
+ rule: 'MustRunAs'
+ ranges:
+ # Forbid adding the root group.
+ - min: 1
+ max: 65535
+ readOnlyRootFilesystem: false
+ seLinux:
+ rule: 'RunAsAny'
+{{- end }}
diff --git a/k8s/ingress-nginx/templates/controller-role.yaml b/k8s/ingress-nginx/templates/controller-role.yaml
new file mode 100644
index 0000000..97c627d
--- /dev/null
+++ b/k8s/ingress-nginx/templates/controller-role.yaml
@@ -0,0 +1,90 @@
+{{- if .Values.rbac.create -}}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: controller
+ name: {{ include "ingress-nginx.fullname" . }}
+ namespace: {{ .Release.Namespace }}
+rules:
+ - apiGroups:
+ - ""
+ resources:
+ - namespaces
+ verbs:
+ - get
+ - apiGroups:
+ - ""
+ resources:
+ - configmaps
+ - pods
+ - secrets
+ - endpoints
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - services
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - networking.k8s.io
+ resources:
+ - ingresses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - networking.k8s.io
+ resources:
+ - ingresses/status
+ verbs:
+ - update
+ - apiGroups:
+ - networking.k8s.io
+ resources:
+ - ingressclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - configmaps
+ resourceNames:
+ - {{ .Values.controller.electionID }}
+ verbs:
+ - get
+ - update
+ - apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - create
+ - apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - patch
+{{- if .Values.podSecurityPolicy.enabled }}
+ - apiGroups: [{{ template "podSecurityPolicy.apiGroup" . }}]
+ resources: ['podsecuritypolicies']
+ verbs: ['use']
+ {{- with .Values.controller.existingPsp }}
+ resourceNames: [{{ . }}]
+ {{- else }}
+ resourceNames: [{{ include "ingress-nginx.fullname" . }}]
+ {{- end }}
+{{- end }}
+{{- end }}
diff --git a/k8s/ingress-nginx/templates/controller-rolebinding.yaml b/k8s/ingress-nginx/templates/controller-rolebinding.yaml
new file mode 100644
index 0000000..5ec3bc7
--- /dev/null
+++ b/k8s/ingress-nginx/templates/controller-rolebinding.yaml
@@ -0,0 +1,18 @@
+{{- if .Values.rbac.create -}}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: controller
+ name: {{ include "ingress-nginx.fullname" . }}
+ namespace: {{ .Release.Namespace }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: {{ include "ingress-nginx.fullname" . }}
+subjects:
+ - kind: ServiceAccount
+ name: {{ template "ingress-nginx.serviceAccountName" . }}
+ namespace: {{ .Release.Namespace | quote }}
+{{- end }}
diff --git a/k8s/ingress-nginx/templates/controller-service-internal.yaml b/k8s/ingress-nginx/templates/controller-service-internal.yaml
new file mode 100644
index 0000000..5994498
--- /dev/null
+++ b/k8s/ingress-nginx/templates/controller-service-internal.yaml
@@ -0,0 +1,79 @@
+{{- if and .Values.controller.service.enabled .Values.controller.service.internal.enabled .Values.controller.service.internal.annotations}}
+apiVersion: v1
+kind: Service
+metadata:
+ annotations:
+ {{- range $key, $value := .Values.controller.service.internal.annotations }}
+ {{ $key }}: {{ $value | quote }}
+ {{- end }}
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: controller
+ {{- if .Values.controller.service.labels }}
+ {{- toYaml .Values.controller.service.labels | nindent 4 }}
+ {{- end }}
+ name: {{ include "ingress-nginx.controller.fullname" . }}-internal
+ namespace: {{ .Release.Namespace }}
+spec:
+ type: "{{ .Values.controller.service.type }}"
+{{- if .Values.controller.service.internal.loadBalancerIP }}
+ loadBalancerIP: {{ .Values.controller.service.internal.loadBalancerIP }}
+{{- end }}
+{{- if .Values.controller.service.internal.loadBalancerSourceRanges }}
+ loadBalancerSourceRanges: {{ toYaml .Values.controller.service.internal.loadBalancerSourceRanges | nindent 4 }}
+{{- end }}
+{{- if .Values.controller.service.internal.externalTrafficPolicy }}
+ externalTrafficPolicy: {{ .Values.controller.service.internal.externalTrafficPolicy }}
+{{- end }}
+ ports:
+ {{- $setNodePorts := (or (eq .Values.controller.service.type "NodePort") (eq .Values.controller.service.type "LoadBalancer")) }}
+ {{- if .Values.controller.service.enableHttp }}
+ - name: http
+ port: {{ .Values.controller.service.ports.http }}
+ protocol: TCP
+ targetPort: {{ .Values.controller.service.targetPorts.http }}
+ {{- if semverCompare ">=1.20" .Capabilities.KubeVersion.Version }}
+ appProtocol: http
+ {{- end }}
+ {{- if (and $setNodePorts (not (empty .Values.controller.service.nodePorts.http))) }}
+ nodePort: {{ .Values.controller.service.nodePorts.http }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.controller.service.enableHttps }}
+ - name: https
+ port: {{ .Values.controller.service.ports.https }}
+ protocol: TCP
+ targetPort: {{ .Values.controller.service.targetPorts.https }}
+ {{- if semverCompare ">=1.20" .Capabilities.KubeVersion.Version }}
+ appProtocol: https
+ {{- end }}
+ {{- if (and $setNodePorts (not (empty .Values.controller.service.nodePorts.https))) }}
+ nodePort: {{ .Values.controller.service.nodePorts.https }}
+ {{- end }}
+ {{- end }}
+ {{- range $key, $value := .Values.tcp }}
+ - name: {{ $key }}-tcp
+ port: {{ $key }}
+ protocol: TCP
+ targetPort: {{ $key }}-tcp
+ {{- if $.Values.controller.service.nodePorts.tcp }}
+ {{- if index $.Values.controller.service.nodePorts.tcp $key }}
+ nodePort: {{ index $.Values.controller.service.nodePorts.tcp $key }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ {{- range $key, $value := .Values.udp }}
+ - name: {{ $key }}-udp
+ port: {{ $key }}
+ protocol: UDP
+ targetPort: {{ $key }}-udp
+ {{- if $.Values.controller.service.nodePorts.udp }}
+ {{- if index $.Values.controller.service.nodePorts.udp $key }}
+ nodePort: {{ index $.Values.controller.service.nodePorts.udp $key }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ selector:
+ {{- include "ingress-nginx.selectorLabels" . | nindent 4 }}
+ app.kubernetes.io/component: controller
+{{- end }}
diff --git a/k8s/ingress-nginx/templates/controller-service-metrics.yaml b/k8s/ingress-nginx/templates/controller-service-metrics.yaml
new file mode 100644
index 0000000..1b69019
--- /dev/null
+++ b/k8s/ingress-nginx/templates/controller-service-metrics.yaml
@@ -0,0 +1,44 @@
+{{- if .Values.controller.metrics.enabled -}}
+apiVersion: v1
+kind: Service
+metadata:
+{{- if .Values.controller.metrics.service.annotations }}
+ annotations: {{ toYaml .Values.controller.metrics.service.annotations | nindent 4 }}
+{{- end }}
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: controller
+ {{- if .Values.controller.metrics.service.labels }}
+ {{- toYaml .Values.controller.metrics.service.labels | nindent 4 }}
+ {{- end }}
+ name: {{ include "ingress-nginx.controller.fullname" . }}-metrics
+ namespace: {{ .Release.Namespace }}
+spec:
+ type: {{ .Values.controller.metrics.service.type }}
+{{- if .Values.controller.metrics.service.clusterIP }}
+ clusterIP: {{ .Values.controller.metrics.service.clusterIP }}
+{{- end }}
+{{- if .Values.controller.metrics.service.externalIPs }}
+ externalIPs: {{ toYaml .Values.controller.metrics.service.externalIPs | nindent 4 }}
+{{- end }}
+{{- if .Values.controller.metrics.service.loadBalancerIP }}
+ loadBalancerIP: {{ .Values.controller.metrics.service.loadBalancerIP }}
+{{- end }}
+{{- if .Values.controller.metrics.service.loadBalancerSourceRanges }}
+ loadBalancerSourceRanges: {{ toYaml .Values.controller.metrics.service.loadBalancerSourceRanges | nindent 4 }}
+{{- end }}
+{{- if .Values.controller.metrics.service.externalTrafficPolicy }}
+ externalTrafficPolicy: {{ .Values.controller.metrics.service.externalTrafficPolicy }}
+{{- end }}
+ ports:
+ - name: metrics
+ port: {{ .Values.controller.metrics.service.servicePort }}
+ targetPort: metrics
+ {{- $setNodePorts := (or (eq .Values.controller.metrics.service.type "NodePort") (eq .Values.controller.metrics.service.type "LoadBalancer")) }}
+ {{- if (and $setNodePorts (not (empty .Values.controller.metrics.service.nodePort))) }}
+ nodePort: {{ .Values.controller.metrics.service.nodePort }}
+ {{- end }}
+ selector:
+ {{- include "ingress-nginx.selectorLabels" . | nindent 4 }}
+ app.kubernetes.io/component: controller
+{{- end }}
diff --git a/k8s/ingress-nginx/templates/controller-service-webhook.yaml b/k8s/ingress-nginx/templates/controller-service-webhook.yaml
new file mode 100644
index 0000000..ae3b1fc
--- /dev/null
+++ b/k8s/ingress-nginx/templates/controller-service-webhook.yaml
@@ -0,0 +1,37 @@
+{{- if .Values.controller.admissionWebhooks.enabled -}}
+apiVersion: v1
+kind: Service
+metadata:
+{{- if .Values.controller.admissionWebhooks.service.annotations }}
+ annotations: {{ toYaml .Values.controller.admissionWebhooks.service.annotations | nindent 4 }}
+{{- end }}
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: controller
+ name: {{ include "ingress-nginx.controller.fullname" . }}-admission
+ namespace: {{ .Release.Namespace }}
+spec:
+ type: {{ .Values.controller.admissionWebhooks.service.type }}
+{{- if .Values.controller.admissionWebhooks.service.clusterIP }}
+ clusterIP: {{ .Values.controller.admissionWebhooks.service.clusterIP }}
+{{- end }}
+{{- if .Values.controller.admissionWebhooks.service.externalIPs }}
+ externalIPs: {{ toYaml .Values.controller.admissionWebhooks.service.externalIPs | nindent 4 }}
+{{- end }}
+{{- if .Values.controller.admissionWebhooks.service.loadBalancerIP }}
+ loadBalancerIP: {{ .Values.controller.admissionWebhooks.service.loadBalancerIP }}
+{{- end }}
+{{- if .Values.controller.admissionWebhooks.service.loadBalancerSourceRanges }}
+ loadBalancerSourceRanges: {{ toYaml .Values.controller.admissionWebhooks.service.loadBalancerSourceRanges | nindent 4 }}
+{{- end }}
+ ports:
+ - name: https-webhook
+ port: 443
+ targetPort: webhook
+ {{- if semverCompare ">=1.20" .Capabilities.KubeVersion.Version }}
+ appProtocol: https
+ {{- end }}
+ selector:
+ {{- include "ingress-nginx.selectorLabels" . | nindent 4 }}
+ app.kubernetes.io/component: controller
+{{- end }}
diff --git a/k8s/ingress-nginx/templates/controller-service.yaml b/k8s/ingress-nginx/templates/controller-service.yaml
new file mode 100644
index 0000000..9248818
--- /dev/null
+++ b/k8s/ingress-nginx/templates/controller-service.yaml
@@ -0,0 +1,91 @@
+{{- if .Values.controller.service.enabled -}}
+apiVersion: v1
+kind: Service
+metadata:
+ annotations:
+ {{- range $key, $value := .Values.controller.service.annotations }}
+ {{ $key }}: {{ $value | quote }}
+ {{- end }}
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: controller
+ {{- if .Values.controller.service.labels }}
+ {{- toYaml .Values.controller.service.labels | nindent 4 }}
+ {{- end }}
+ name: {{ include "ingress-nginx.controller.fullname" . }}
+ namespace: {{ .Release.Namespace }}
+spec:
+ type: {{ .Values.controller.service.type }}
+{{- if .Values.controller.service.clusterIP }}
+ clusterIP: {{ .Values.controller.service.clusterIP }}
+{{- end }}
+{{- if .Values.controller.service.externalIPs }}
+ externalIPs: {{ toYaml .Values.controller.service.externalIPs | nindent 4 }}
+{{- end }}
+{{- if .Values.controller.service.loadBalancerIP }}
+ loadBalancerIP: {{ .Values.controller.service.loadBalancerIP }}
+{{- end }}
+{{- if .Values.controller.service.loadBalancerSourceRanges }}
+ loadBalancerSourceRanges: {{ toYaml .Values.controller.service.loadBalancerSourceRanges | nindent 4 }}
+{{- end }}
+{{- if .Values.controller.service.externalTrafficPolicy }}
+ externalTrafficPolicy: {{ .Values.controller.service.externalTrafficPolicy }}
+{{- end }}
+{{- if .Values.controller.service.sessionAffinity }}
+ sessionAffinity: {{ .Values.controller.service.sessionAffinity }}
+{{- end }}
+{{- if .Values.controller.service.healthCheckNodePort }}
+ healthCheckNodePort: {{ .Values.controller.service.healthCheckNodePort }}
+{{- end }}
+ ports:
+ {{- $setNodePorts := (or (eq .Values.controller.service.type "NodePort") (eq .Values.controller.service.type "LoadBalancer")) }}
+ {{- if .Values.controller.service.enableHttp }}
+ - name: http
+ port: {{ .Values.controller.service.ports.http }}
+ protocol: TCP
+ targetPort: {{ .Values.controller.service.targetPorts.http }}
+ {{- if semverCompare ">=1.20" .Capabilities.KubeVersion.Version }}
+ appProtocol: http
+ {{- end }}
+ {{- if (and $setNodePorts (not (empty .Values.controller.service.nodePorts.http))) }}
+ nodePort: {{ .Values.controller.service.nodePorts.http }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.controller.service.enableHttps }}
+ - name: https
+ port: {{ .Values.controller.service.ports.https }}
+ protocol: TCP
+ targetPort: {{ .Values.controller.service.targetPorts.https }}
+ {{- if semverCompare ">=1.20" .Capabilities.KubeVersion.Version }}
+ appProtocol: https
+ {{- end }}
+ {{- if (and $setNodePorts (not (empty .Values.controller.service.nodePorts.https))) }}
+ nodePort: {{ .Values.controller.service.nodePorts.https }}
+ {{- end }}
+ {{- end }}
+ {{- range $key, $value := .Values.tcp }}
+ - name: {{ $key }}-tcp
+ port: {{ $key }}
+ protocol: TCP
+ targetPort: {{ $key }}-tcp
+ {{- if $.Values.controller.service.nodePorts.tcp }}
+ {{- if index $.Values.controller.service.nodePorts.tcp $key }}
+ nodePort: {{ index $.Values.controller.service.nodePorts.tcp $key }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ {{- range $key, $value := .Values.udp }}
+ - name: {{ $key }}-udp
+ port: {{ $key }}
+ protocol: UDP
+ targetPort: {{ $key }}-udp
+ {{- if $.Values.controller.service.nodePorts.udp }}
+ {{- if index $.Values.controller.service.nodePorts.udp $key }}
+ nodePort: {{ index $.Values.controller.service.nodePorts.udp $key }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ selector:
+ {{- include "ingress-nginx.selectorLabels" . | nindent 4 }}
+ app.kubernetes.io/component: controller
+{{- end }}
diff --git a/k8s/ingress-nginx/templates/controller-serviceaccount.yaml b/k8s/ingress-nginx/templates/controller-serviceaccount.yaml
new file mode 100644
index 0000000..50a718d
--- /dev/null
+++ b/k8s/ingress-nginx/templates/controller-serviceaccount.yaml
@@ -0,0 +1,11 @@
+{{- if or .Values.serviceAccount.create -}}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: controller
+ name: {{ template "ingress-nginx.serviceAccountName" . }}
+ namespace: {{ .Release.Namespace }}
+automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }}
+{{- end }}
diff --git a/k8s/ingress-nginx/templates/controller-servicemonitor.yaml b/k8s/ingress-nginx/templates/controller-servicemonitor.yaml
new file mode 100644
index 0000000..17894c8
--- /dev/null
+++ b/k8s/ingress-nginx/templates/controller-servicemonitor.yaml
@@ -0,0 +1,45 @@
+{{- if and ( .Capabilities.APIVersions.Has "monitoring.coreos.com/v1" ) .Values.controller.metrics.enabled .Values.controller.metrics.serviceMonitor.enabled -}}
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+ name: {{ include "ingress-nginx.controller.fullname" . }}
+{{- if .Values.controller.metrics.serviceMonitor.namespace }}
+ namespace: {{ .Values.controller.metrics.serviceMonitor.namespace | quote }}
+{{- end }}
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: controller
+ {{- if .Values.controller.metrics.serviceMonitor.additionalLabels }}
+ {{- toYaml .Values.controller.metrics.serviceMonitor.additionalLabels | nindent 4 }}
+ {{- end }}
+spec:
+ endpoints:
+ - port: metrics
+ interval: {{ .Values.controller.metrics.serviceMonitor.scrapeInterval }}
+ {{- if .Values.controller.metrics.serviceMonitor.honorLabels }}
+ honorLabels: true
+ {{- end }}
+ {{- if .Values.controller.metrics.serviceMonitor.metricRelabelings }}
+ metricRelabelings: {{ toYaml .Values.controller.metrics.serviceMonitor.metricRelabelings | nindent 8 }}
+ {{- end }}
+{{- if .Values.controller.metrics.serviceMonitor.jobLabel }}
+ jobLabel: {{ .Values.controller.metrics.serviceMonitor.jobLabel | quote }}
+{{- end }}
+{{- if .Values.controller.metrics.serviceMonitor.namespaceSelector }}
+ namespaceSelector: {{ toYaml .Values.controller.metrics.serviceMonitor.namespaceSelector | nindent 4 }}
+{{ else }}
+ namespaceSelector:
+ matchNames:
+ - {{ .Release.Namespace }}
+{{- end }}
+{{- if .Values.controller.metrics.serviceMonitor.targetLabels }}
+ targetLabels:
+ {{- range .Values.controller.metrics.serviceMonitor.targetLabels }}
+ - {{ . }}
+ {{- end }}
+{{- end }}
+ selector:
+ matchLabels:
+ {{- include "ingress-nginx.selectorLabels" . | nindent 6 }}
+ app.kubernetes.io/component: controller
+{{- end }}
diff --git a/k8s/ingress-nginx/templates/default-backend-deployment.yaml b/k8s/ingress-nginx/templates/default-backend-deployment.yaml
new file mode 100644
index 0000000..9934526
--- /dev/null
+++ b/k8s/ingress-nginx/templates/default-backend-deployment.yaml
@@ -0,0 +1,112 @@
+{{- if .Values.defaultBackend.enabled -}}
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: default-backend
+ name: {{ include "ingress-nginx.defaultBackend.fullname" . }}
+ namespace: {{ .Release.Namespace }}
+spec:
+ selector:
+ matchLabels:
+ {{- include "ingress-nginx.selectorLabels" . | nindent 6 }}
+ app.kubernetes.io/component: default-backend
+{{- if not .Values.defaultBackend.autoscaling.enabled }}
+ replicas: {{ .Values.defaultBackend.replicaCount }}
+{{- end }}
+ revisionHistoryLimit: {{ .Values.revisionHistoryLimit }}
+ template:
+ metadata:
+ {{- if .Values.defaultBackend.podAnnotations }}
+ annotations: {{ toYaml .Values.defaultBackend.podAnnotations | nindent 8 }}
+ {{- end }}
+ labels:
+ {{- include "ingress-nginx.selectorLabels" . | nindent 8 }}
+ app.kubernetes.io/component: default-backend
+ {{- if .Values.defaultBackend.podLabels }}
+ {{- toYaml .Values.defaultBackend.podLabels | nindent 8 }}
+ {{- end }}
+ spec:
+ {{- if .Values.imagePullSecrets }}
+ imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 8 }}
+ {{- end }}
+ {{- if .Values.defaultBackend.priorityClassName }}
+ priorityClassName: {{ .Values.defaultBackend.priorityClassName }}
+ {{- end }}
+ {{- if .Values.defaultBackend.podSecurityContext }}
+ securityContext: {{ toYaml .Values.defaultBackend.podSecurityContext | nindent 8 }}
+ {{- end }}
+ containers:
+ - name: {{ template "ingress-nginx.name" . }}-default-backend
+ {{- with .Values.defaultBackend.image }}
+ image: "{{- if .repository -}}{{ .repository }}{{ else }}{{ .registry }}/{{ .image }}{{- end -}}:{{ .tag }}{{- if (.digest) -}} @{{.digest}} {{- end -}}"
+ {{- end }}
+ imagePullPolicy: {{ .Values.defaultBackend.image.pullPolicy }}
+ {{- if .Values.defaultBackend.extraArgs }}
+ args:
+ {{- range $key, $value := .Values.defaultBackend.extraArgs }}
+ {{- /* Accept keys without values or with false as value */}}
+ {{- if eq ($value | quote | len) 2 }}
+ - --{{ $key }}
+ {{- else }}
+ - --{{ $key }}={{ $value }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ securityContext:
+ capabilities:
+ drop:
+ - ALL
+ runAsUser: {{ .Values.defaultBackend.image.runAsUser }}
+ runAsNonRoot: {{ .Values.defaultBackend.image.runAsNonRoot }}
+ allowPrivilegeEscalation: {{ .Values.defaultBackend.image.allowPrivilegeEscalation }}
+ readOnlyRootFilesystem: {{ .Values.defaultBackend.image.readOnlyRootFilesystem}}
+ {{- if .Values.defaultBackend.extraEnvs }}
+ env: {{ toYaml .Values.defaultBackend.extraEnvs | nindent 12 }}
+ {{- end }}
+ livenessProbe:
+ httpGet:
+ path: /healthz
+ port: {{ .Values.defaultBackend.port }}
+ scheme: HTTP
+ initialDelaySeconds: {{ .Values.defaultBackend.livenessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.defaultBackend.livenessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.defaultBackend.livenessProbe.timeoutSeconds }}
+ successThreshold: {{ .Values.defaultBackend.livenessProbe.successThreshold }}
+ failureThreshold: {{ .Values.defaultBackend.livenessProbe.failureThreshold }}
+ readinessProbe:
+ httpGet:
+ path: /healthz
+ port: {{ .Values.defaultBackend.port }}
+ scheme: HTTP
+ initialDelaySeconds: {{ .Values.defaultBackend.readinessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.defaultBackend.readinessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.defaultBackend.readinessProbe.timeoutSeconds }}
+ successThreshold: {{ .Values.defaultBackend.readinessProbe.successThreshold }}
+ failureThreshold: {{ .Values.defaultBackend.readinessProbe.failureThreshold }}
+ ports:
+ - name: http
+ containerPort: {{ .Values.defaultBackend.port }}
+ protocol: TCP
+ {{- if .Values.defaultBackend.extraVolumeMounts }}
+ volumeMounts: {{- toYaml .Values.defaultBackend.extraVolumeMounts | nindent 12 }}
+ {{- end }}
+ {{- if .Values.defaultBackend.resources }}
+ resources: {{ toYaml .Values.defaultBackend.resources | nindent 12 }}
+ {{- end }}
+ {{- if .Values.defaultBackend.nodeSelector }}
+ nodeSelector: {{ toYaml .Values.defaultBackend.nodeSelector | nindent 8 }}
+ {{- end }}
+ serviceAccountName: {{ template "ingress-nginx.defaultBackend.serviceAccountName" . }}
+ {{- if .Values.defaultBackend.tolerations }}
+ tolerations: {{ toYaml .Values.defaultBackend.tolerations | nindent 8 }}
+ {{- end }}
+ {{- if .Values.defaultBackend.affinity }}
+ affinity: {{ toYaml .Values.defaultBackend.affinity | nindent 8 }}
+ {{- end }}
+ terminationGracePeriodSeconds: 60
+ {{- if .Values.defaultBackend.extraVolumes }}
+ volumes: {{ toYaml .Values.defaultBackend.extraVolumes | nindent 8 }}
+ {{- end }}
+{{- end }}
diff --git a/k8s/ingress-nginx/templates/default-backend-hpa.yaml b/k8s/ingress-nginx/templates/default-backend-hpa.yaml
new file mode 100644
index 0000000..e31fda3
--- /dev/null
+++ b/k8s/ingress-nginx/templates/default-backend-hpa.yaml
@@ -0,0 +1,30 @@
+{{- if and .Values.defaultBackend.enabled .Values.defaultBackend.autoscaling.enabled }}
+apiVersion: autoscaling/v2beta1
+kind: HorizontalPodAutoscaler
+metadata:
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: default-backend
+ name: {{ template "ingress-nginx.defaultBackend.fullname" . }}
+ namespace: {{ .Release.Namespace }}
+spec:
+ scaleTargetRef:
+ apiVersion: apps/v1
+ kind: Deployment
+ name: {{ template "ingress-nginx.defaultBackend.fullname" . }}
+ minReplicas: {{ .Values.defaultBackend.autoscaling.minReplicas }}
+ maxReplicas: {{ .Values.defaultBackend.autoscaling.maxReplicas }}
+ metrics:
+{{- with .Values.defaultBackend.autoscaling.targetCPUUtilizationPercentage }}
+ - type: Resource
+ resource:
+ name: cpu
+ targetAverageUtilization: {{ . }}
+{{- end }}
+{{- with .Values.defaultBackend.autoscaling.targetMemoryUtilizationPercentage }}
+ - type: Resource
+ resource:
+ name: memory
+ targetAverageUtilization: {{ . }}
+{{- end }}
+{{- end }}
diff --git a/k8s/ingress-nginx/templates/default-backend-poddisruptionbudget.yaml b/k8s/ingress-nginx/templates/default-backend-poddisruptionbudget.yaml
new file mode 100644
index 0000000..9e586aa
--- /dev/null
+++ b/k8s/ingress-nginx/templates/default-backend-poddisruptionbudget.yaml
@@ -0,0 +1,16 @@
+{{- if or (gt (.Values.defaultBackend.replicaCount | int) 1) (gt (.Values.defaultBackend.autoscaling.minReplicas | int) 1) }}
+apiVersion: {{ ternary "policy/v1" "policy/v1beta1" (semverCompare ">=1.21.0-0" .Capabilities.KubeVersion.Version) }}
+kind: PodDisruptionBudget
+metadata:
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: default-backend
+ name: {{ include "ingress-nginx.defaultBackend.fullname" . }}
+ namespace: {{ .Release.Namespace }}
+spec:
+ selector:
+ matchLabels:
+ {{- include "ingress-nginx.selectorLabels" . | nindent 6 }}
+ app.kubernetes.io/component: default-backend
+ minAvailable: {{ .Values.defaultBackend.minAvailable }}
+{{- end }}
diff --git a/k8s/ingress-nginx/templates/default-backend-psp.yaml b/k8s/ingress-nginx/templates/default-backend-psp.yaml
new file mode 100644
index 0000000..716dbf1
--- /dev/null
+++ b/k8s/ingress-nginx/templates/default-backend-psp.yaml
@@ -0,0 +1,33 @@
+{{- if and .Values.podSecurityPolicy.enabled .Values.defaultBackend.enabled (empty .Values.defaultBackend.existingPsp) -}}
+apiVersion: policy/v1beta1
+kind: PodSecurityPolicy
+metadata:
+ name: {{ include "ingress-nginx.fullname" . }}-backend
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: default-backend
+spec:
+ allowPrivilegeEscalation: false
+ fsGroup:
+ ranges:
+ - max: 65535
+ min: 1
+ rule: MustRunAs
+ requiredDropCapabilities:
+ - ALL
+ runAsUser:
+ rule: MustRunAsNonRoot
+ seLinux:
+ rule: RunAsAny
+ supplementalGroups:
+ ranges:
+ - max: 65535
+ min: 1
+ rule: MustRunAs
+ volumes:
+ - configMap
+ - emptyDir
+ - projected
+ - secret
+ - downwardAPI
+{{- end }}
diff --git a/k8s/ingress-nginx/templates/default-backend-role.yaml b/k8s/ingress-nginx/templates/default-backend-role.yaml
new file mode 100644
index 0000000..5d29a2d
--- /dev/null
+++ b/k8s/ingress-nginx/templates/default-backend-role.yaml
@@ -0,0 +1,19 @@
+{{- if and .Values.rbac.create .Values.podSecurityPolicy.enabled .Values.defaultBackend.enabled -}}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: default-backend
+ name: {{ include "ingress-nginx.fullname" . }}-backend
+ namespace: {{ .Release.Namespace }}
+rules:
+ - apiGroups: [{{ template "podSecurityPolicy.apiGroup" . }}]
+ resources: ['podsecuritypolicies']
+ verbs: ['use']
+ {{- with .Values.defaultBackend.existingPsp }}
+ resourceNames: [{{ . }}]
+ {{- else }}
+ resourceNames: [{{ include "ingress-nginx.fullname" . }}-backend]
+ {{- end }}
+{{- end }}
diff --git a/k8s/ingress-nginx/templates/default-backend-rolebinding.yaml b/k8s/ingress-nginx/templates/default-backend-rolebinding.yaml
new file mode 100644
index 0000000..4a9cb92
--- /dev/null
+++ b/k8s/ingress-nginx/templates/default-backend-rolebinding.yaml
@@ -0,0 +1,18 @@
+{{- if and .Values.rbac.create .Values.podSecurityPolicy.enabled .Values.defaultBackend.enabled -}}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: default-backend
+ name: {{ include "ingress-nginx.fullname" . }}-backend
+ namespace: {{ .Release.Namespace }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: {{ include "ingress-nginx.fullname" . }}-backend
+subjects:
+ - kind: ServiceAccount
+ name: {{ template "ingress-nginx.defaultBackend.serviceAccountName" . }}
+ namespace: {{ .Release.Namespace | quote }}
+{{- end }}
diff --git a/k8s/ingress-nginx/templates/default-backend-service.yaml b/k8s/ingress-nginx/templates/default-backend-service.yaml
new file mode 100644
index 0000000..f59eb1e
--- /dev/null
+++ b/k8s/ingress-nginx/templates/default-backend-service.yaml
@@ -0,0 +1,38 @@
+{{- if .Values.defaultBackend.enabled -}}
+apiVersion: v1
+kind: Service
+metadata:
+{{- if .Values.defaultBackend.service.annotations }}
+ annotations: {{ toYaml .Values.defaultBackend.service.annotations | nindent 4 }}
+{{- end }}
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: default-backend
+ name: {{ include "ingress-nginx.defaultBackend.fullname" . }}
+ namespace: {{ .Release.Namespace }}
+spec:
+ type: {{ .Values.defaultBackend.service.type }}
+{{- if .Values.defaultBackend.service.clusterIP }}
+ clusterIP: {{ .Values.defaultBackend.service.clusterIP }}
+{{- end }}
+{{- if .Values.defaultBackend.service.externalIPs }}
+ externalIPs: {{ toYaml .Values.defaultBackend.service.externalIPs | nindent 4 }}
+{{- end }}
+{{- if .Values.defaultBackend.service.loadBalancerIP }}
+ loadBalancerIP: {{ .Values.defaultBackend.service.loadBalancerIP }}
+{{- end }}
+{{- if .Values.defaultBackend.service.loadBalancerSourceRanges }}
+ loadBalancerSourceRanges: {{ toYaml .Values.defaultBackend.service.loadBalancerSourceRanges | nindent 4 }}
+{{- end }}
+ ports:
+ - name: http
+ port: {{ .Values.defaultBackend.service.servicePort }}
+ protocol: TCP
+ targetPort: http
+ {{- if semverCompare ">=1.20" .Capabilities.KubeVersion.Version }}
+ appProtocol: http
+ {{- end }}
+ selector:
+ {{- include "ingress-nginx.selectorLabels" . | nindent 4 }}
+ app.kubernetes.io/component: default-backend
+{{- end }}
diff --git a/k8s/ingress-nginx/templates/default-backend-serviceaccount.yaml b/k8s/ingress-nginx/templates/default-backend-serviceaccount.yaml
new file mode 100644
index 0000000..0c00e93
--- /dev/null
+++ b/k8s/ingress-nginx/templates/default-backend-serviceaccount.yaml
@@ -0,0 +1,11 @@
+{{- if and .Values.defaultBackend.enabled .Values.defaultBackend.serviceAccount.create -}}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ labels:
+ {{- include "ingress-nginx.labels" . | nindent 4 }}
+ app.kubernetes.io/component: default-backend
+ name: {{ template "ingress-nginx.defaultBackend.serviceAccountName" . }}
+ namespace: {{ .Release.Namespace }}
+automountServiceAccountToken: {{ .Values.defaultBackend.serviceAccount.automountServiceAccountToken }}
+{{- end }}
diff --git a/k8s/ingress-nginx/templates/dh-param-secret.yaml b/k8s/ingress-nginx/templates/dh-param-secret.yaml
new file mode 100644
index 0000000..12e7a4f
--- /dev/null
+++ b/k8s/ingress-nginx/templates/dh-param-secret.yaml
@@ -0,0 +1,10 @@
+{{- with .Values.dhParam -}}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ include "ingress-nginx.controller.fullname" $ }}
+ labels:
+ {{- include "ingress-nginx.labels" $ | nindent 4 }}
+data:
+ dhparam.pem: {{ . }}
+{{- end }}
diff --git a/k8s/ingress-nginx/values.yaml b/k8s/ingress-nginx/values.yaml
new file mode 100644
index 0000000..c4d3ef3
--- /dev/null
+++ b/k8s/ingress-nginx/values.yaml
@@ -0,0 +1,856 @@
+## nginx configuration
+## Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/index.md
+##
+
+## Overrides for generated resource names
+# See templates/_helpers.tpl
+# nameOverride:
+# fullnameOverride:
+
+controller:
+ name: controller
+ image:
+ registry: k8s.gcr.io
+ image: ingress-nginx/controller
+ # for backwards compatibility consider setting the full image url via the repository value below
+ # use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail
+ # repository:
+ tag: "v1.0.3"
+ digest: sha256:4ade87838eb8256b094fbb5272d7dda9b6c7fa8b759e6af5383c1300996a7452
+ pullPolicy: IfNotPresent
+ # www-data -> uid 101
+ runAsUser: 101
+ allowPrivilegeEscalation: true
+
+ # Use an existing PSP instead of creating one
+ existingPsp: ""
+
+ # Configures the controller container name
+ containerName: controller
+
+ # Configures the ports the nginx-controller listens on
+ containerPort:
+ http: 80
+ https: 443
+
+ # Will add custom configuration options to Nginx https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/
+ config: {}
+
+ ## Annotations to be added to the controller config configuration configmap
+ ##
+ configAnnotations: {}
+
+ # Will add custom headers before sending traffic to backends according to https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/custom-headers
+ proxySetHeaders: {}
+
+ # Will add custom headers before sending response traffic to the client according to: https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#add-headers
+ addHeaders: {}
+
+ # Optionally customize the pod dnsConfig.
+ dnsConfig: {}
+
+ # Optionally customize the pod hostname.
+ hostname: {}
+
+ # Optionally change this to ClusterFirstWithHostNet in case you have 'hostNetwork: true'.
+ # By default, while using host network, name resolution uses the host's DNS. If you wish nginx-controller
+ # to keep resolving names inside the k8s network, use ClusterFirstWithHostNet.
+ dnsPolicy: ClusterFirst
+
+ # Bare-metal considerations via the host network https://kubernetes.github.io/ingress-nginx/deploy/baremetal/#via-the-host-network
+ # Ingress status was blank because there is no Service exposing the NGINX Ingress controller in a configuration using the host network, the default --publish-service flag used in standard cloud setups does not apply
+ reportNodeInternalIp: false
+
+ # Process Ingress objects without ingressClass annotation/ingressClassName field
+ # Overrides value for --watch-ingress-without-class flag of the controller binary
+ # Defaults to false
+ watchIngressWithoutClass: false
+
+ # Process IngressClass per name (additionally as per spec.controller)
+ ingressClassByName: false
+
+ # This configuration defines if Ingress Controller should allow users to set
+ # their own *-snippet annotations, otherwise this is forbidden / dropped
+ # when users add those annotations.
+ # Global snippets in ConfigMap are still respected
+ allowSnippetAnnotations: true
+
+ # Required for use with CNI based kubernetes installations (such as ones set up by kubeadm),
+ # since CNI and hostport don't mix yet. Can be deprecated once https://github.com/kubernetes/kubernetes/issues/23920
+ # is merged
+ hostNetwork: false
+
+ ## Use host ports 80 and 443
+ ## Disabled by default
+ ##
+ hostPort:
+ enabled: false
+ ports:
+ http: 80
+ https: 443
+
+ ## Election ID to use for status update
+ ##
+ electionID: ingress-controller-leader
+
+ # This section refers to the creation of the IngressClass resource
+ # IngressClass resources are supported since k8s >= 1.18 and required since k8s >= 1.19
+ ingressClassResource:
+ name: nginx
+ enabled: true
+ default: false
+ controllerValue: "k8s.io/ingress-nginx"
+
+ # Parameters is a link to a custom resource containing additional
+ # configuration for the controller. This is optional if the controller
+ # does not require extra parameters.
+ parameters: {}
+
+ # labels to add to the pod container metadata
+ podLabels: {}
+ # key: value
+
+ ## Security Context policies for controller pods
+ ##
+ podSecurityContext: {}
+
+ ## See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for
+ ## notes on enabling and using sysctls
+ ###
+ sysctls: {}
+ # sysctls:
+ # "net.core.somaxconn": "8192"
+
+ ## Allows customization of the source of the IP address or FQDN to report
+ ## in the ingress status field. By default, it reads the information provided
+ ## by the service. If disable, the status field reports the IP address of the
+ ## node or nodes where an ingress controller pod is running.
+ publishService:
+ enabled: true
+ ## Allows overriding of the publish service to bind to
+ ## Must be /
+ ##
+ pathOverride: ""
+
+ ## Limit the scope of the controller
+ ##
+ scope:
+ enabled: false
+ namespace: "" # defaults to $(POD_NAMESPACE)
+
+ ## Allows customization of the configmap / nginx-configmap namespace
+ ##
+ configMapNamespace: "" # defaults to $(POD_NAMESPACE)
+
+ ## Allows customization of the tcp-services-configmap
+ ##
+ tcp:
+ configMapNamespace: "" # defaults to $(POD_NAMESPACE)
+ ## Annotations to be added to the tcp config configmap
+ annotations: {}
+
+ ## Allows customization of the udp-services-configmap
+ ##
+ udp:
+ configMapNamespace: "" # defaults to $(POD_NAMESPACE)
+ ## Annotations to be added to the udp config configmap
+ annotations: {}
+
+ # Maxmind license key to download GeoLite2 Databases
+ # https://blog.maxmind.com/2019/12/18/significant-changes-to-accessing-and-using-geolite2-databases
+ maxmindLicenseKey: ""
+
+ ## Additional command line arguments to pass to nginx-ingress-controller
+ ## E.g. to specify the default SSL certificate you can use
+ ## extraArgs:
+ ## default-ssl-certificate: "/"
+ extraArgs: {}
+
+ ## Additional environment variables to set
+ extraEnvs: []
+ # extraEnvs:
+ # - name: FOO
+ # valueFrom:
+ # secretKeyRef:
+ # key: FOO
+ # name: secret-resource
+
+ ## DaemonSet or Deployment
+ ##
+ kind: Deployment
+
+ ## Annotations to be added to the controller Deployment or DaemonSet
+ ##
+ annotations: {}
+ # keel.sh/pollSchedule: "@every 60m"
+
+ ## Labels to be added to the controller Deployment or DaemonSet
+ ##
+ labels: {}
+ # keel.sh/policy: patch
+ # keel.sh/trigger: poll
+
+
+ # The update strategy to apply to the Deployment or DaemonSet
+ ##
+ updateStrategy: {}
+ # rollingUpdate:
+ # maxUnavailable: 1
+ # type: RollingUpdate
+
+ # minReadySeconds to avoid killing pods before we are ready
+ ##
+ minReadySeconds: 0
+
+
+ ## Node tolerations for server scheduling to nodes with taints
+ ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
+ ##
+ tolerations: []
+ # - key: "key"
+ # operator: "Equal|Exists"
+ # value: "value"
+ # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
+
+ ## Affinity and anti-affinity
+ ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+ ##
+ affinity: {}
+ # # An example of preferred pod anti-affinity, weight is in the range 1-100
+ # podAntiAffinity:
+ # preferredDuringSchedulingIgnoredDuringExecution:
+ # - weight: 100
+ # podAffinityTerm:
+ # labelSelector:
+ # matchExpressions:
+ # - key: app.kubernetes.io/name
+ # operator: In
+ # values:
+ # - ingress-nginx
+ # - key: app.kubernetes.io/instance
+ # operator: In
+ # values:
+ # - ingress-nginx
+ # - key: app.kubernetes.io/component
+ # operator: In
+ # values:
+ # - controller
+ # topologyKey: kubernetes.io/hostname
+
+ # # An example of required pod anti-affinity
+ # podAntiAffinity:
+ # requiredDuringSchedulingIgnoredDuringExecution:
+ # - labelSelector:
+ # matchExpressions:
+ # - key: app.kubernetes.io/name
+ # operator: In
+ # values:
+ # - ingress-nginx
+ # - key: app.kubernetes.io/instance
+ # operator: In
+ # values:
+ # - ingress-nginx
+ # - key: app.kubernetes.io/component
+ # operator: In
+ # values:
+ # - controller
+ # topologyKey: "kubernetes.io/hostname"
+
+ ## Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in.
+ ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
+ ##
+ topologySpreadConstraints: []
+ # - maxSkew: 1
+ # topologyKey: failure-domain.beta.kubernetes.io/zone
+ # whenUnsatisfiable: DoNotSchedule
+ # labelSelector:
+ # matchLabels:
+ # app.kubernetes.io/instance: ingress-nginx-internal
+
+ ## terminationGracePeriodSeconds
+ ## wait up to five minutes for the drain of connections
+ ##
+ terminationGracePeriodSeconds: 300
+
+ ## Node labels for controller pod assignment
+ ## Ref: https://kubernetes.io/docs/user-guide/node-selection/
+ ##
+ nodeSelector:
+ kubernetes.io/os: linux
+
+ ## Liveness and readiness probe values
+ ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
+ ##
+ # startupProbe:
+ # httpGet:
+ # # should match container.healthCheckPath
+ # path: "/healthz"
+ # port: 10254
+ # scheme: HTTP
+ # initialDelaySeconds: 5
+ # periodSeconds: 5
+ # timeoutSeconds: 2
+ # successThreshold: 1
+ # failureThreshold: 5
+ livenessProbe:
+ httpGet:
+ # should match container.healthCheckPath
+ path: "/healthz"
+ port: 10254
+ scheme: HTTP
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 1
+ successThreshold: 1
+ failureThreshold: 5
+ readinessProbe:
+ httpGet:
+ # should match container.healthCheckPath
+ path: "/healthz"
+ port: 10254
+ scheme: HTTP
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 1
+ successThreshold: 1
+ failureThreshold: 3
+
+
+ # Path of the health check endpoint. All requests received on the port defined by
+ # the healthz-port parameter are forwarded internally to this path.
+ healthCheckPath: "/healthz"
+
+ # Address to bind the health check endpoint.
+ # It is better to set this option to the internal node address
+ # if the ingress nginx controller is running in the hostNetwork: true mode.
+ healthCheckHost: ""
+
+ ## Annotations to be added to controller pods
+ ##
+ podAnnotations: {}
+
+ replicaCount: 1
+
+ minAvailable: 1
+
+ # Define requests resources to avoid probe issues due to CPU utilization in busy nodes
+ # ref: https://github.com/kubernetes/ingress-nginx/issues/4735#issuecomment-551204903
+ # Ideally, there should be no limits.
+ # https://engineering.indeedblog.com/blog/2019/12/cpu-throttling-regression-fix/
+ resources:
+ # limits:
+ # cpu: 100m
+ # memory: 90Mi
+ requests:
+ cpu: 100m
+ memory: 90Mi
+
+ # Mutually exclusive with keda autoscaling
+ autoscaling:
+ enabled: false
+ minReplicas: 1
+ maxReplicas: 11
+ targetCPUUtilizationPercentage: 50
+ targetMemoryUtilizationPercentage: 50
+ behavior: {}
+ # scaleDown:
+ # stabilizationWindowSeconds: 300
+ # policies:
+ # - type: Pods
+ # value: 1
+ # periodSeconds: 180
+ # scaleUp:
+ # stabilizationWindowSeconds: 300
+ # policies:
+ # - type: Pods
+ # value: 2
+ # periodSeconds: 60
+
+ autoscalingTemplate: []
+ # Custom or additional autoscaling metrics
+ # ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-custom-metrics
+ # - type: Pods
+ # pods:
+ # metric:
+ # name: nginx_ingress_controller_nginx_process_requests_total
+ # target:
+ # type: AverageValue
+ # averageValue: 10000m
+
+ # Mutually exclusive with hpa autoscaling
+ keda:
+ apiVersion: "keda.sh/v1alpha1"
+ # apiVersion changes with keda 1.x vs 2.x
+ # 2.x = keda.sh/v1alpha1
+ # 1.x = keda.k8s.io/v1alpha1
+ enabled: false
+ minReplicas: 1
+ maxReplicas: 11
+ pollingInterval: 30
+ cooldownPeriod: 300
+ restoreToOriginalReplicaCount: false
+ scaledObject:
+ annotations: {}
+ # Custom annotations for ScaledObject resource
+ # annotations:
+ # key: value
+ triggers: []
+ # - type: prometheus
+ # metadata:
+ # serverAddress: http://:9090
+ # metricName: http_requests_total
+ # threshold: '100'
+ # query: sum(rate(http_requests_total{deployment="my-deployment"}[2m]))
+
+ behavior: {}
+ # scaleDown:
+ # stabilizationWindowSeconds: 300
+ # policies:
+ # - type: Pods
+ # value: 1
+ # periodSeconds: 180
+ # scaleUp:
+ # stabilizationWindowSeconds: 300
+ # policies:
+ # - type: Pods
+ # value: 2
+ # periodSeconds: 60
+
+ ## Enable mimalloc as a drop-in replacement for malloc.
+ ## ref: https://github.com/microsoft/mimalloc
+ ##
+ enableMimalloc: true
+
+ ## Override NGINX template
+ customTemplate:
+ configMapName: ""
+ configMapKey: ""
+
+ service:
+ enabled: true
+
+ annotations: {}
+ labels: {}
+ # clusterIP: ""
+
+ ## List of IP addresses at which the controller services are available
+ ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
+ ##
+ externalIPs: []
+
+ # loadBalancerIP: ""
+ loadBalancerSourceRanges: []
+
+ enableHttp: true
+ enableHttps: true
+
+ ## Set external traffic policy to: "Local" to preserve source IP on
+ ## providers supporting it
+ ## Ref: https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typeloadbalancer
+ # externalTrafficPolicy: ""
+
+ # Must be either "None" or "ClientIP" if set. Kubernetes will default to "None".
+ # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
+ # sessionAffinity: ""
+
+ # specifies the health check node port (numeric port number) for the service. If healthCheckNodePort isn’t specified,
+ # the service controller allocates a port from your cluster’s NodePort range.
+ # Ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
+ # healthCheckNodePort: 0
+
+ ports:
+ http: 80
+ https: 443
+
+ targetPorts:
+ http: http
+ https: https
+
+ type: LoadBalancer
+
+ # type: NodePort
+ # nodePorts:
+ # http: 32080
+ # https: 32443
+ # tcp:
+ # 8080: 32808
+ nodePorts:
+ http: ""
+ https: ""
+ tcp: {}
+ udp: {}
+
+ ## Enables an additional internal load balancer (besides the external one).
+ ## Annotations are mandatory for the load balancer to come up. Varies with the cloud service.
+ internal:
+ enabled: false
+ annotations: {}
+
+ # loadBalancerIP: ""
+
+ ## Restrict access For LoadBalancer service. Defaults to 0.0.0.0/0.
+ loadBalancerSourceRanges: []
+
+ ## Set external traffic policy to: "Local" to preserve source IP on
+ ## providers supporting it
+ ## Ref: https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typeloadbalancer
+ # externalTrafficPolicy: ""
+
+ extraContainers: []
+ ## Additional containers to be added to the controller pod.
+ ## See https://github.com/lemonldap-ng-controller/lemonldap-ng-controller as example.
+ # - name: my-sidecar
+ # image: nginx:latest
+ # - name: lemonldap-ng-controller
+ # image: lemonldapng/lemonldap-ng-controller:0.2.0
+ # args:
+ # - /lemonldap-ng-controller
+ # - --alsologtostderr
+ # - --configmap=$(POD_NAMESPACE)/lemonldap-ng-configuration
+ # env:
+ # - name: POD_NAME
+ # valueFrom:
+ # fieldRef:
+ # fieldPath: metadata.name
+ # - name: POD_NAMESPACE
+ # valueFrom:
+ # fieldRef:
+ # fieldPath: metadata.namespace
+ # volumeMounts:
+ # - name: copy-portal-skins
+ # mountPath: /srv/var/lib/lemonldap-ng/portal/skins
+
+ extraVolumeMounts: []
+ ## Additional volumeMounts to the controller main container.
+ # - name: copy-portal-skins
+ # mountPath: /var/lib/lemonldap-ng/portal/skins
+
+ extraVolumes: []
+ ## Additional volumes to the controller pod.
+ # - name: copy-portal-skins
+ # emptyDir: {}
+
+ extraInitContainers: []
+ ## Containers, which are run before the app containers are started.
+ # - name: init-myservice
+ # image: busybox
+ # command: ['sh', '-c', 'until nslookup myservice; do echo waiting for myservice; sleep 2; done;']
+
+ admissionWebhooks:
+ annotations: {}
+ enabled: true
+ failurePolicy: Fail
+ # timeoutSeconds: 10
+ port: 8443
+ certificate: "/usr/local/certificates/cert"
+ key: "/usr/local/certificates/key"
+ namespaceSelector: {}
+ objectSelector: {}
+
+ # Use an existing PSP instead of creating one
+ existingPsp: ""
+
+ service:
+ annotations: {}
+ # clusterIP: ""
+ externalIPs: []
+ # loadBalancerIP: ""
+ loadBalancerSourceRanges: []
+ servicePort: 443
+ type: ClusterIP
+
+ createSecretJob:
+ resources: {}
+ # limits:
+ # cpu: 10m
+ # memory: 20Mi
+ # requests:
+ # cpu: 10m
+ # memory: 20Mi
+
+ patchWebhookJob:
+ resources: {}
+
+ patch:
+ enabled: true
+ image:
+ registry: k8s.gcr.io
+ image: ingress-nginx/kube-webhook-certgen
+ # for backwards compatibility consider setting the full image url via the repository value below
+ # use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail
+ # repository:
+ tag: v1.0
+ digest: sha256:f3b6b39a6062328c095337b4cadcefd1612348fdd5190b1dcbcb9b9e90bd8068
+ pullPolicy: IfNotPresent
+ ## Provide a priority class name to the webhook patching job
+ ##
+ priorityClassName: ""
+ podAnnotations: {}
+ nodeSelector:
+ kubernetes.io/os: linux
+ tolerations: []
+ runAsUser: 2000
+
+ metrics:
+ port: 10254
+ # if this port is changed, change healthz-port: in extraArgs: accordingly
+ enabled: false
+
+ service:
+ annotations: {}
+ # prometheus.io/scrape: "true"
+ # prometheus.io/port: "10254"
+
+ # clusterIP: ""
+
+ ## List of IP addresses at which the stats-exporter service is available
+ ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
+ ##
+ externalIPs: []
+
+ # loadBalancerIP: ""
+ loadBalancerSourceRanges: []
+ servicePort: 10254
+ type: ClusterIP
+ # externalTrafficPolicy: ""
+ # nodePort: ""
+
+ serviceMonitor:
+ enabled: false
+ additionalLabels: {}
+ # The label to use to retrieve the job name from.
+ # jobLabel: "app.kubernetes.io/name"
+ namespace: ""
+ namespaceSelector: {}
+ # Default: scrape .Release.Namespace only
+ # To scrape all, use the following:
+ # namespaceSelector:
+ # any: true
+ scrapeInterval: 30s
+ # honorLabels: true
+ targetLabels: []
+ metricRelabelings: []
+
+ prometheusRule:
+ enabled: false
+ additionalLabels: {}
+ # namespace: ""
+ rules: []
+ # # These are just examples rules, please adapt them to your needs
+ # - alert: NGINXConfigFailed
+ # expr: count(nginx_ingress_controller_config_last_reload_successful == 0) > 0
+ # for: 1s
+ # labels:
+ # severity: critical
+ # annotations:
+ # description: bad ingress config - nginx config test failed
+ # summary: uninstall the latest ingress changes to allow config reloads to resume
+ # - alert: NGINXCertificateExpiry
+ # expr: (avg(nginx_ingress_controller_ssl_expire_time_seconds) by (host) - time()) < 604800
+ # for: 1s
+ # labels:
+ # severity: critical
+ # annotations:
+ # description: ssl certificate(s) will expire in less then a week
+ # summary: renew expiring certificates to avoid downtime
+ # - alert: NGINXTooMany500s
+ # expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"5.+"} ) / sum(nginx_ingress_controller_requests) ) > 5
+ # for: 1m
+ # labels:
+ # severity: warning
+ # annotations:
+ # description: Too many 5XXs
+ # summary: More than 5% of all requests returned 5XX, this requires your attention
+ # - alert: NGINXTooMany400s
+ # expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"4.+"} ) / sum(nginx_ingress_controller_requests) ) > 5
+ # for: 1m
+ # labels:
+ # severity: warning
+ # annotations:
+ # description: Too many 4XXs
+ # summary: More than 5% of all requests returned 4XX, this requires your attention
+
+ ## Improve connection draining when ingress controller pod is deleted using a lifecycle hook:
+ ## With this new hook, we increased the default terminationGracePeriodSeconds from 30 seconds
+ ## to 300, allowing the draining of connections up to five minutes.
+ ## If the active connections end before that, the pod will terminate gracefully at that time.
+ ## To effectively take advantage of this feature, the Configmap feature
+ ## worker-shutdown-timeout new value is 240s instead of 10s.
+ ##
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - /wait-shutdown
+
+ priorityClassName: ""
+
+## Rollback limit
+##
+revisionHistoryLimit: 10
+
+## Default 404 backend
+##
+defaultBackend:
+ ##
+ enabled: false
+
+ name: defaultbackend
+ image:
+ registry: k8s.gcr.io
+ image: defaultbackend-amd64
+ # for backwards compatibility consider setting the full image url via the repository value below
+ # use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail
+ # repository:
+ tag: "1.5"
+ pullPolicy: IfNotPresent
+ # nobody user -> uid 65534
+ runAsUser: 65534
+ runAsNonRoot: true
+ readOnlyRootFilesystem: true
+ allowPrivilegeEscalation: false
+
+ # Use an existing PSP instead of creating one
+ existingPsp: ""
+
+ extraArgs: {}
+
+ serviceAccount:
+ create: true
+ name: ""
+ automountServiceAccountToken: true
+ ## Additional environment variables to set for defaultBackend pods
+ extraEnvs: []
+
+ port: 8080
+
+ ## Readiness and liveness probes for default backend
+ ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/
+ ##
+ livenessProbe:
+ failureThreshold: 3
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 5
+ readinessProbe:
+ failureThreshold: 6
+ initialDelaySeconds: 0
+ periodSeconds: 5
+ successThreshold: 1
+ timeoutSeconds: 5
+
+ ## Node tolerations for server scheduling to nodes with taints
+ ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
+ ##
+ tolerations: []
+ # - key: "key"
+ # operator: "Equal|Exists"
+ # value: "value"
+ # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
+
+ affinity: {}
+
+ ## Security Context policies for controller pods
+ ## See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for
+ ## notes on enabling and using sysctls
+ ##
+ podSecurityContext: {}
+
+ # labels to add to the pod container metadata
+ podLabels: {}
+ # key: value
+
+ ## Node labels for default backend pod assignment
+ ## Ref: https://kubernetes.io/docs/user-guide/node-selection/
+ ##
+ nodeSelector:
+ kubernetes.io/os: linux
+
+ ## Annotations to be added to default backend pods
+ ##
+ podAnnotations: {}
+
+ replicaCount: 1
+
+ minAvailable: 1
+
+ resources: {}
+ # limits:
+ # cpu: 10m
+ # memory: 20Mi
+ # requests:
+ # cpu: 10m
+ # memory: 20Mi
+
+ extraVolumeMounts: []
+ ## Additional volumeMounts to the default backend container.
+ # - name: copy-portal-skins
+ # mountPath: /var/lib/lemonldap-ng/portal/skins
+
+ extraVolumes: []
+ ## Additional volumes to the default backend pod.
+ # - name: copy-portal-skins
+ # emptyDir: {}
+
+ autoscaling:
+ annotations: {}
+ enabled: false
+ minReplicas: 1
+ maxReplicas: 2
+ targetCPUUtilizationPercentage: 50
+ targetMemoryUtilizationPercentage: 50
+
+ service:
+ annotations: {}
+
+ # clusterIP: ""
+
+ ## List of IP addresses at which the default backend service is available
+ ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
+ ##
+ externalIPs: []
+
+ # loadBalancerIP: ""
+ loadBalancerSourceRanges: []
+ servicePort: 80
+ type: ClusterIP
+
+ priorityClassName: ""
+
+## Enable RBAC as per https://github.com/kubernetes/ingress-nginx/blob/main/docs/deploy/rbac.md and https://github.com/kubernetes/ingress-nginx/issues/266
+rbac:
+ create: true
+ scope: false
+
+# If true, create & use Pod Security Policy resources
+# https://kubernetes.io/docs/concepts/policy/pod-security-policy/
+podSecurityPolicy:
+ enabled: false
+
+serviceAccount:
+ create: true
+ name: ""
+ automountServiceAccountToken: true
+
+## Optional array of imagePullSecrets containing private registry credentials
+## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+imagePullSecrets: []
+# - name: secretName
+
+# TCP service key:value pairs
+# Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md
+##
+tcp: {}
+# 8080: "default/example-tcp-svc:9000"
+
+# UDP service key:value pairs
+# Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md
+##
+udp: {}
+# 53: "kube-system/kube-dns:53"
+
+# A base64ed Diffie-Hellman parameter
+# This can be generated with: openssl dhparam 4096 2> /dev/null | base64
+# Ref: https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/ssl-dh-param
+dhParam:
diff --git a/k8s/kube-prometheus-stack/.helmignore b/k8s/kube-prometheus-stack/.helmignore
new file mode 100644
index 0000000..1937f42
--- /dev/null
+++ b/k8s/kube-prometheus-stack/.helmignore
@@ -0,0 +1,28 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+# helm/charts
+OWNERS
+hack/
+ci/
+kube-prometheus-*.tgz
+
+unittests/
diff --git a/k8s/kube-prometheus-stack/CONTRIBUTING.md b/k8s/kube-prometheus-stack/CONTRIBUTING.md
new file mode 100644
index 0000000..f6ce2a3
--- /dev/null
+++ b/k8s/kube-prometheus-stack/CONTRIBUTING.md
@@ -0,0 +1,12 @@
+# Contributing Guidelines
+
+## How to contribute to this chart
+
+1. Fork this repository, develop and test your Chart.
+1. Bump the chart version for every change.
+1. Ensure PR title has the prefix `[kube-prometheus-stack]`
+1. When making changes to rules or dashboards, see the README.md section on how to sync data from upstream repositories
+1. Check the `hack/minikube` folder has scripts to set up minikube and components of this chart that will allow all components to be scraped. You can use this configuration when validating your changes.
+1. Check for changes of RBAC rules.
+1. Check for changes in CRD specs.
+1. PR must pass the linter (`helm lint`)
diff --git a/k8s/kube-prometheus-stack/Chart.lock b/k8s/kube-prometheus-stack/Chart.lock
new file mode 100644
index 0000000..1ec7bcf
--- /dev/null
+++ b/k8s/kube-prometheus-stack/Chart.lock
@@ -0,0 +1,12 @@
+dependencies:
+- name: kube-state-metrics
+ repository: https://prometheus-community.github.io/helm-charts
+ version: 3.5.2
+- name: prometheus-node-exporter
+ repository: https://prometheus-community.github.io/helm-charts
+ version: 2.0.4
+- name: grafana
+ repository: https://grafana.github.io/helm-charts
+ version: 6.17.2
+digest: sha256:18d49ac2a5c01b00dd2bab9fe35d7a77999a06f7f36e03ab5ca5529a0c596896
+generated: "2021-10-19T15:36:25.610092181+03:00"
diff --git a/k8s/kube-prometheus-stack/Chart.yaml b/k8s/kube-prometheus-stack/Chart.yaml
new file mode 100644
index 0000000..4795d57
--- /dev/null
+++ b/k8s/kube-prometheus-stack/Chart.yaml
@@ -0,0 +1,50 @@
+annotations:
+ artifacthub.io/links: |
+ - name: Chart Source
+ url: https://github.com/prometheus-community/helm-charts
+ - name: Upstream Project
+ url: https://github.com/prometheus-operator/kube-prometheus
+ artifacthub.io/operator: "true"
+apiVersion: v2
+appVersion: 0.50.0
+dependencies:
+- condition: kubeStateMetrics.enabled
+ name: kube-state-metrics
+ repository: https://prometheus-community.github.io/helm-charts
+ version: 3.5.*
+- condition: nodeExporter.enabled
+ name: prometheus-node-exporter
+ repository: https://prometheus-community.github.io/helm-charts
+ version: 2.0.*
+- condition: grafana.enabled
+ name: grafana
+ repository: https://grafana.github.io/helm-charts
+ version: 6.17.*
+description: kube-prometheus-stack collects Kubernetes manifests, Grafana dashboards,
+ and Prometheus rules combined with documentation and scripts to provide easy to
+ operate end-to-end Kubernetes cluster monitoring with Prometheus using the Prometheus
+ Operator.
+home: https://github.com/prometheus-operator/kube-prometheus
+icon: https://raw.githubusercontent.com/prometheus/prometheus.github.io/master/assets/prometheus_logo-cb55bb5c346.png
+keywords:
+- operator
+- prometheus
+- kube-prometheus
+kubeVersion: '>=1.16.0-0'
+maintainers:
+- name: vsliouniaev
+- name: bismarck
+- email: gianrubio@gmail.com
+ name: gianrubio
+- email: github.gkarthiks@gmail.com
+ name: gkarthiks
+- email: scott@r6by.com
+ name: scottrigby
+- email: miroslav.hadzhiev@gmail.com
+ name: Xtigyro
+name: kube-prometheus-stack
+sources:
+- https://github.com/prometheus-community/helm-charts
+- https://github.com/prometheus-operator/kube-prometheus
+type: application
+version: 19.2.2
diff --git a/k8s/kube-prometheus-stack/README.md b/k8s/kube-prometheus-stack/README.md
new file mode 100644
index 0000000..371d3a5
--- /dev/null
+++ b/k8s/kube-prometheus-stack/README.md
@@ -0,0 +1,480 @@
+# kube-prometheus-stack
+
+Installs the [kube-prometheus stack](https://github.com/prometheus-operator/kube-prometheus), a collection of Kubernetes manifests, [Grafana](http://grafana.com/) dashboards, and [Prometheus rules](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/) combined with documentation and scripts to provide easy to operate end-to-end Kubernetes cluster monitoring with [Prometheus](https://prometheus.io/) using the [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator).
+
+See the [kube-prometheus](https://github.com/prometheus-operator/kube-prometheus) README for details about components, dashboards, and alerts.
+
+_Note: This chart was formerly named `prometheus-operator` chart, now renamed to more clearly reflect that it installs the `kube-prometheus` project stack, within which Prometheus Operator is only one component._
+
+## Prerequisites
+
+- Kubernetes 1.16+
+- Helm 3+
+
+## Get Repo Info
+
+```console
+helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
+helm repo update
+```
+
+_See [helm repo](https://helm.sh/docs/helm/helm_repo/) for command documentation._
+
+## Install Chart
+
+```console
+# Helm
+$ helm install [RELEASE_NAME] prometheus-community/kube-prometheus-stack
+```
+
+_See [configuration](#configuration) below._
+
+_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._
+
+## Dependencies
+
+By default this chart installs additional, dependent charts:
+
+- [prometheus-community/kube-state-metrics](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-state-metrics)
+- [prometheus-community/prometheus-node-exporter](https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-node-exporter)
+- [grafana/grafana](https://github.com/grafana/helm-charts/tree/main/charts/grafana)
+
+To disable dependencies during installation, see [multiple releases](#multiple-releases) below.
+
+_See [helm dependency](https://helm.sh/docs/helm/helm_dependency/) for command documentation._
+
+## Uninstall Chart
+
+```console
+# Helm
+$ helm uninstall [RELEASE_NAME]
+```
+
+This removes all the Kubernetes components associated with the chart and deletes the release.
+
+_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._
+
+CRDs created by this chart are not removed by default and should be manually cleaned up:
+
+```console
+kubectl delete crd alertmanagerconfigs.monitoring.coreos.com
+kubectl delete crd alertmanagers.monitoring.coreos.com
+kubectl delete crd podmonitors.monitoring.coreos.com
+kubectl delete crd probes.monitoring.coreos.com
+kubectl delete crd prometheuses.monitoring.coreos.com
+kubectl delete crd prometheusrules.monitoring.coreos.com
+kubectl delete crd servicemonitors.monitoring.coreos.com
+kubectl delete crd thanosrulers.monitoring.coreos.com
+```
+
+## Upgrading Chart
+
+```console
+# Helm
+$ helm upgrade [RELEASE_NAME] prometheus-community/kube-prometheus-stack
+```
+
+With Helm v3, CRDs created by this chart are not updated by default and should be manually updated.
+Consult also the [Helm Documentation on CRDs](https://helm.sh/docs/chart_best_practices/custom_resource_definitions).
+
+_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._
+
+### Upgrading an existing Release to a new major version
+
+A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an incompatible breaking change needing manual actions.
+
+### From 18.x to 19.x
+
+`kubeStateMetrics.serviceMonitor.namespaceOverride` was removed.
+Please use `kube-state-metrics.namespaceOverride` instead.
+
+### From 17.x to 18.x
+
+Version 18 upgrades prometheus-operator from 0.49.x to 0.50.x. Helm does not automatically upgrade or install new CRDs on a chart upgrade, so you have to install the CRDs manually before updating:
+
+```console
+kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.50.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
+kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.50.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml
+kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.50.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
+kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.50.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml
+kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.50.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
+kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.50.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml
+kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.50.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
+kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.50.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml
+```
+
+### From 16.x to 17.x
+
+Version 17 upgrades prometheus-operator from 0.48.x to 0.49.x. Helm does not automatically upgrade or install new CRDs on a chart upgrade, so you have to install the CRDs manually before updating:
+
+```console
+kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.49.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
+kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.49.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml
+kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.49.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
+kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.49.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml
+kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.49.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
+kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.49.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml
+kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.49.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
+kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.49.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml
+```
+
+### From 15.x to 16.x
+
+Version 16 upgrades kube-state-metrics to v2.0.0. This includes changed command-line arguments and removed metrics, see this [blog post](https://kubernetes.io/blog/2021/04/13/kube-state-metrics-v-2-0/). This version also removes Grafana dashboards that supported Kubernetes 1.14 or earlier.
+
+### From 14.x to 15.x
+
+Version 15 upgrades prometheus-operator from 0.46.x to 0.47.x. Helm does not automatically upgrade or install new CRDs on a chart upgrade, so you have to install the CRDs manually before updating:
+
+```console
+kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.47.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
+kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.47.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml
+kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.47.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
+kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.47.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml
+kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.47.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
+kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.47.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
+kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.47.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml
+```
+
+### From 13.x to 14.x
+
+Version 14 upgrades prometheus-operator from 0.45.x to 0.46.x. Helm does not automatically upgrade or install new CRDs on a chart upgrade, so you have to install the CRDs manually before updating:
+
+```console
+kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.46.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
+kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.46.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml
+kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.46.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
+kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.46.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml
+kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.46.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
+kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.46.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
+kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.46.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml
+```
+
+### From 12.x to 13.x
+
+Version 13 upgrades prometheus-operator from 0.44.x to 0.45.x. Helm does not automatically upgrade or install new CRDs on a chart upgrade, so you have to install the CRD manually before updating:
+
+```console
+kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.45.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
+kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.45.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
+kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.45.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml
+```
+
+### From 11.x to 12.x
+
+Version 12 upgrades prometheus-operator from 0.43.x to 0.44.x. Helm does not automatically upgrade or install new CRDs on a chart upgrade, so you have to install the CRD manually before updating:
+
+```console
+kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/release-0.44/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
+```
+
+The chart was migrated to support only helm v3 and later.
+
+### From 10.x to 11.x
+
+Version 11 upgrades prometheus-operator from 0.42.x to 0.43.x. Starting with 0.43.x an additional `AlertmanagerConfigs` CRD is introduced. Helm does not automatically upgrade or install new CRDs on a chart upgrade, so you have to install the CRD manually before updating:
+
+```console
+kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/release-0.43/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
+```
+
+Version 11 removes the deprecated tlsProxy via ghostunnel in favor of native TLS support the prometheus-operator gained with v0.39.0.
+
+### From 9.x to 10.x
+
+Version 10 upgrades prometheus-operator from 0.38.x to 0.42.x. Starting with 0.40.x an additional `Probes` CRD is introduced. Helm does not automatically upgrade or install new CRDs on a chart upgrade, so you have to install the CRD manually before updating:
+
+```console
+kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/release-0.42/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml
+```
+
+### From 8.x to 9.x
+
+Version 9 of the helm chart removes the existing `additionalScrapeConfigsExternal` in favour of `additionalScrapeConfigsSecret`. This change lets users specify the secret name and secret key to use for the additional scrape configuration of prometheus. This is useful for users that have prometheus-operator as a subchart and also have a template that creates the additional scrape configuration.
+
+### From 7.x to 8.x
+
+Due to new template functions being used in the rules in version 8.x.x of the chart, an upgrade to Prometheus Operator and Prometheus is necessary in order to support them. First, upgrade to the latest version of 7.x.x
+
+```console
+helm upgrade [RELEASE_NAME] prometheus-community/kube-prometheus-stack --version 7.5.0
+```
+
+Then upgrade to 8.x.x
+
+```console
+helm upgrade [RELEASE_NAME] prometheus-community/kube-prometheus-stack --version [8.x.x]
+```
+
+Minimal recommended Prometheus version for this chart release is `2.12.x`
+
+### From 6.x to 7.x
+
+Due to a change in grafana subchart, version 7.x.x now requires Helm >= 2.12.0.
+
+### From 5.x to 6.x
+
+Due to a change in deployment labels of kube-state-metrics, the upgrade requires `helm upgrade --force` in order to re-create the deployment. If this is not done an error will occur indicating that the deployment cannot be modified:
+
+```console
+invalid: spec.selector: Invalid value: v1.LabelSelector{MatchLabels:map[string]string{"app.kubernetes.io/name":"kube-state-metrics"}, MatchExpressions:[]v1.LabelSelectorRequirement(nil)}: field is immutable
+```
+
+If this error has already been encountered, a `helm history` command can be used to determine which release has worked, then `helm rollback` to the release, then `helm upgrade --force` to this new one
+
+## Configuration
+
+See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments:
+
+```console
+helm show values prometheus-community/kube-prometheus-stack
+```
+
+You may also `helm show values` on this chart's [dependencies](#dependencies) for additional options.
+
+### Multiple releases
+
+The same chart can be used to run multiple Prometheus instances in the same cluster if required. To achieve this, it is necessary to run only one instance of prometheus-operator and a pair of alertmanager pods for an HA configuration, while all other components need to be disabled. To disable a dependency during installation, set `kubeStateMetrics.enabled`, `nodeExporter.enabled` and `grafana.enabled` to `false`.
+
+## Work-Arounds for Known Issues
+
+### Running on private GKE clusters
+
+When Google configure the control plane for private clusters, they automatically configure VPC peering between your Kubernetes cluster’s network and a separate Google managed project. In order to restrict what Google are able to access within your cluster, the firewall rules configured restrict access to your Kubernetes pods. This means that in order to use the webhook component with a GKE private cluster, you must configure an additional firewall rule to allow the GKE control plane access to your webhook pod.
+
+You can read more information on how to add firewall rules for the GKE control plane nodes in the [GKE docs](https://cloud.google.com/kubernetes-engine/docs/how-to/private-clusters#add_firewall_rules)
+
+Alternatively, you can disable the hooks by setting `prometheusOperator.admissionWebhooks.enabled=false`.
+
+## PrometheusRules Admission Webhooks
+
+With Prometheus Operator version 0.30+, the core Prometheus Operator pod exposes an endpoint that will integrate with the `validatingwebhookconfiguration` Kubernetes feature to prevent malformed rules from being added to the cluster.
+
+### How the Chart Configures the Hooks
+
+A validating and mutating webhook configuration requires the endpoint to which the request is sent to use TLS. It is possible to set up custom certificates to do this, but in most cases, a self-signed certificate is enough. The setup of this component requires some more complex orchestration when using helm. The steps are created to be idempotent and to allow turning the feature on and off without running into helm quirks.
+
+1. A pre-install hook provisions a certificate into the same namespace using a format compatible with provisioning using end-user certificates. If the certificate already exists, the hook exits.
+2. The prometheus operator pod is configured to use a TLS proxy container, which will load that certificate.
+3. Validating and Mutating webhook configurations are created in the cluster, with their failure mode set to Ignore. This allows rules to be created by the same chart at the same time, even though the webhook has not yet been fully set up - it does not have the correct CA field set.
+4. A post-install hook reads the CA from the secret created by step 1 and patches the Validating and Mutating webhook configurations. This process will allow a custom CA provisioned by some other process to also be patched into the webhook configurations. The chosen failure policy is also patched into the webhook configurations
+
+### Alternatives
+
+It should be possible to use [jetstack/cert-manager](https://github.com/jetstack/cert-manager) if a more complete solution is required, but it has not been tested.
+
+You can enable automatic self-signed TLS certificate provisioning via cert-manager by setting the `prometheusOperator.admissionWebhooks.certManager.enabled` value to true.
+
+### Limitations
+
+Because the operator can only run as a single pod, there is potential for this component failure to cause rule deployment failure. Because this risk is outweighed by the benefit of having validation, the feature is enabled by default.
+
+## Developing Prometheus Rules and Grafana Dashboards
+
+This chart Grafana Dashboards and Prometheus Rules are just a copy from [prometheus-operator/prometheus-operator](https://github.com/prometheus-operator/prometheus-operator) and other sources, synced (with alterations) by scripts in [hack](hack) folder. In order to introduce any changes you need to first [add them to the original repo](https://github.com/prometheus-operator/kube-prometheus/blob/master/docs/developing-prometheus-rules-and-grafana-dashboards.md) and then sync there by scripts.
+
+## Further Information
+
+For more in-depth documentation of configuration options meanings, please see
+
+- [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator)
+- [Prometheus](https://prometheus.io/docs/introduction/overview/)
+- [Grafana](https://github.com/grafana/helm-charts/tree/main/charts/grafana#grafana-helm-chart)
+
+## prometheus.io/scrape
+
+The prometheus operator does not support annotation-based discovery of services, using the `PodMonitor` or `ServiceMonitor` CRD in its place as they provide far more configuration options.
+For information on how to use PodMonitors/ServiceMonitors, please see the documentation on the `prometheus-operator/prometheus-operator` documentation here:
+
+- [ServiceMonitors](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/user-guides/getting-started.md#include-servicemonitors)
+- [PodMonitors](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/user-guides/getting-started.md#include-podmonitors)
+- [Running Exporters](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/user-guides/running-exporters.md)
+
+By default, Prometheus discovers PodMonitors and ServiceMonitors within its namespace, that are labeled with the same release tag as the prometheus-operator release.
+Sometimes, you may need to discover custom PodMonitors/ServiceMonitors, for example used to scrape data from third-party applications.
+An easy way of doing this, without compromising the default PodMonitors/ServiceMonitors discovery, is allowing Prometheus to discover all PodMonitors/ServiceMonitors within its namespace, without applying label filtering.
+To do so, you can set `prometheus.prometheusSpec.podMonitorSelectorNilUsesHelmValues` and `prometheus.prometheusSpec.serviceMonitorSelectorNilUsesHelmValues` to `false`.
+
+## Migrating from stable/prometheus-operator chart
+
+## Zero downtime
+
+Since `kube-prometheus-stack` is fully compatible with the `stable/prometheus-operator` chart, a migration without downtime can be achieved.
+However, the old name prefix needs to be kept. If you want the new name please follow the step by step guide below (with downtime).
+
+You can override the name to achieve this:
+
+```console
+helm upgrade prometheus-operator prometheus-community/kube-prometheus-stack -n monitoring --reuse-values --set nameOverride=prometheus-operator
+```
+
+**Note**: It is recommended to run this first with `--dry-run --debug`.
+
+## Redeploy with new name (downtime)
+
+If the **prometheus-operator** values are compatible with the new **kube-prometheus-stack** chart, please follow the below steps for migration:
+
+> The guide presumes that chart is deployed in `monitoring` namespace and the deployments are running there. If in other namespace, please replace the `monitoring` to the deployed namespace.
+
+1. Patch the PersistenceVolume created/used by the prometheus-operator chart to `Retain` claim policy:
+
+ ```console
+ kubectl patch pv/ -p '{"spec":{"persistentVolumeReclaimPolicy":"Retain"}}'
+ ```
+
+ **Note:** To execute the above command, the user must have a cluster wide permission. Please refer [Kubernetes RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac/)
+
+2. Uninstall the **prometheus-operator** release and delete the existing PersistentVolumeClaim, and verify PV become Released.
+
+ ```console
+ helm uninstall prometheus-operator -n monitoring
+ kubectl delete pvc/ -n monitoring
+ ```
+
+ Additionally, you have to manually remove the remaining `prometheus-operator-kubelet` service.
+
+ ```console
+ kubectl delete service/prometheus-operator-kubelet -n kube-system
+ ```
+
+ You can choose to remove all your existing CRDs (ServiceMonitors, Podmonitors, etc.) if you want to.
+
+3. Remove current `spec.claimRef` values to change the PV's status from Released to Available.
+
+ ```console
+ kubectl patch pv/ --type json -p='[{"op": "remove", "path": "/spec/claimRef"}]' -n monitoring
+ ```
+
+**Note:** To execute the above command, the user must have a cluster wide permission. Please refer to [Kubernetes RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac/)
+
+After these steps, proceed to a fresh **kube-prometheus-stack** installation and make sure the current release of **kube-prometheus-stack** matching the `volumeClaimTemplate` values in the `values.yaml`.
+
+The binding is done via matching a specific amount of storage requested and with certain access modes.
+
+For example, if you had storage specified as this with **prometheus-operator**:
+
+```yaml
+volumeClaimTemplate:
+ spec:
+ storageClassName: gp2
+ accessModes: ["ReadWriteOnce"]
+ resources:
+ requests:
+ storage: 50Gi
+```
+
+You have to specify matching `volumeClaimTemplate` with 50Gi storage and `ReadWriteOnce` access mode.
+
+Additionally, you should check the current AZ of your legacy installation's PV, and configure the fresh release to use the same AZ as the old one. If the pods are in a different AZ than the PV, the release will fail to bind the existing one, hence creating a new PV.
+
+This can be achieved either by specifying the labels through `values.yaml`, e.g. setting `prometheus.prometheusSpec.nodeSelector` to:
+
+```yaml
+nodeSelector:
+ failure-domain.beta.kubernetes.io/zone: east-west-1a
+```
+
+or passing these values as `--set` overrides during installation.
+
+The new release should now re-attach your previously released PV with its content.
+
+## Migrating from coreos/prometheus-operator chart
+
+The multiple charts have been combined into a single chart that installs prometheus operator, prometheus, alertmanager, grafana as well as the multitude of exporters necessary to monitor a cluster.
+
+There is no simple and direct migration path between the charts as the changes are extensive and intended to make the chart easier to support.
+
+The capabilities of the old chart are all available in the new chart, including the ability to run multiple prometheus instances on a single cluster - you will need to disable the parts of the chart you do not wish to deploy.
+
+You can check out the tickets for this change [here](https://github.com/prometheus-operator/prometheus-operator/issues/592) and [here](https://github.com/helm/charts/pull/6765).
+
+### High-level overview of Changes
+
+#### Added dependencies
+
+The chart has added 3 [dependencies](#dependencies).
+
+- Node-Exporter, Kube-State-Metrics: These components are loaded as dependencies into the chart, and are relatively simple components
+- Grafana: The Grafana chart is more feature-rich than this chart - it contains a sidecar that is able to load data sources and dashboards from configmaps deployed into the same cluster. For more information check out the [documentation for the chart](https://github.com/grafana/helm-charts/blob/main/charts/grafana/README.md)
+
+#### Kubelet Service
+
+Because the kubelet service has a new name in the chart, make sure to clean up the old kubelet service in the `kube-system` namespace to prevent counting container metrics twice.
+
+#### Persistent Volumes
+
+If you would like to keep the data of the current persistent volumes, it should be possible to attach existing volumes to new PVCs and PVs that are created using the conventions in the new chart. For example, in order to use an existing Azure disk for a helm release called `prometheus-migration` the following resources can be created:
+
+```yaml
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: pvc-prometheus-migration-prometheus-0
+spec:
+ accessModes:
+ - ReadWriteOnce
+ azureDisk:
+ cachingMode: None
+ diskName: pvc-prometheus-migration-prometheus-0
+ diskURI: /subscriptions/f5125d82-2622-4c50-8d25-3f7ba3e9ac4b/resourceGroups/sample-migration-resource-group/providers/Microsoft.Compute/disks/pvc-prometheus-migration-prometheus-0
+ fsType: ""
+ kind: Managed
+ readOnly: false
+ capacity:
+ storage: 1Gi
+ persistentVolumeReclaimPolicy: Delete
+ storageClassName: prometheus
+ volumeMode: Filesystem
+```
+
+```yaml
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ labels:
+ app.kubernetes.io/name: prometheus
+ prometheus: prometheus-migration-prometheus
+ name: prometheus-prometheus-migration-prometheus-db-prometheus-prometheus-migration-prometheus-0
+ namespace: monitoring
+spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 1Gi
+ storageClassName: prometheus
+ volumeMode: Filesystem
+ volumeName: pvc-prometheus-migration-prometheus-0
+```
+
+The PVC will take ownership of the PV and when you create a release using a persistent volume claim template it will use the existing PVCs as they match the naming convention used by the chart. For other cloud providers similar approaches can be used.
+
+#### KubeProxy
+
+The metrics bind address of kube-proxy is default to `127.0.0.1:10249` that prometheus instances **cannot** access to. You should expose metrics by changing `metricsBindAddress` field value to `0.0.0.0:10249` if you want to collect them.
+
+Depending on the cluster, the relevant part `config.conf` will be in ConfigMap `kube-system/kube-proxy` or `kube-system/kube-proxy-config`. For example:
+
+```console
+kubectl -n kube-system edit cm kube-proxy
+```
+
+```yaml
+apiVersion: v1
+data:
+ config.conf: |-
+ apiVersion: kubeproxy.config.k8s.io/v1alpha1
+ kind: KubeProxyConfiguration
+ # ...
+ # metricsBindAddress: 127.0.0.1:10249
+ metricsBindAddress: 0.0.0.0:10249
+ # ...
+ kubeconfig.conf: |-
+ # ...
+kind: ConfigMap
+metadata:
+ labels:
+ app: kube-proxy
+ name: kube-proxy
+ namespace: kube-system
+```
diff --git a/k8s/kube-prometheus-stack/charts/grafana/.helmignore b/k8s/kube-prometheus-stack/charts/grafana/.helmignore
new file mode 100644
index 0000000..8cade13
--- /dev/null
+++ b/k8s/kube-prometheus-stack/charts/grafana/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.vscode
+.project
+.idea/
+*.tmproj
+OWNERS
diff --git a/k8s/kube-prometheus-stack/charts/grafana/Chart.yaml b/k8s/kube-prometheus-stack/charts/grafana/Chart.yaml
new file mode 100644
index 0000000..1ecd4f7
--- /dev/null
+++ b/k8s/kube-prometheus-stack/charts/grafana/Chart.yaml
@@ -0,0 +1,22 @@
+apiVersion: v2
+appVersion: 8.2.1
+description: The leading tool for querying and visualizing time series and metrics.
+home: https://grafana.net
+icon: https://raw.githubusercontent.com/grafana/grafana/master/public/img/logo_transparent_400x.png
+kubeVersion: ^1.8.0-0
+maintainers:
+- email: zanhsieh@gmail.com
+ name: zanhsieh
+- email: rluckie@cisco.com
+ name: rtluckie
+- email: maor.friedman@redhat.com
+ name: maorfr
+- email: miroslav.hadzhiev@gmail.com
+ name: Xtigyro
+- email: mail@torstenwalter.de
+ name: torstenwalter
+name: grafana
+sources:
+- https://github.com/grafana/grafana
+type: application
+version: 6.17.2
diff --git a/k8s/kube-prometheus-stack/charts/grafana/README.md b/k8s/kube-prometheus-stack/charts/grafana/README.md
new file mode 100644
index 0000000..4fc35d9
--- /dev/null
+++ b/k8s/kube-prometheus-stack/charts/grafana/README.md
@@ -0,0 +1,528 @@
+# Grafana Helm Chart
+
+* Installs the web dashboarding system [Grafana](http://grafana.org/)
+
+## Get Repo Info
+
+```console
+helm repo add grafana https://grafana.github.io/helm-charts
+helm repo update
+```
+
+_See [helm repo](https://helm.sh/docs/helm/helm_repo/) for command documentation._
+
+## Installing the Chart
+
+To install the chart with the release name `my-release`:
+
+```console
+helm install my-release grafana/grafana
+```
+
+## Uninstalling the Chart
+
+To uninstall/delete the my-release deployment:
+
+```console
+helm delete my-release
+```
+
+The command removes all the Kubernetes components associated with the chart and deletes the release.
+
+## Upgrading an existing Release to a new major version
+
+A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an
+incompatible breaking change needing manual actions.
+
+### To 4.0.0 (And 3.12.1)
+
+This version requires Helm >= 2.12.0.
+
+### To 5.0.0
+
+You have to add --force to your helm upgrade command as the labels of the chart have changed.
+
+### To 6.0.0
+
+This version requires Helm >= 3.1.0.
+
+## Configuration
+
+| Parameter | Description | Default |
+|-------------------------------------------|-----------------------------------------------|---------------------------------------------------------|
+| `replicas` | Number of nodes | `1` |
+| `podDisruptionBudget.minAvailable` | Pod disruption minimum available | `nil` |
+| `podDisruptionBudget.maxUnavailable` | Pod disruption maximum unavailable | `nil` |
+| `deploymentStrategy` | Deployment strategy | `{ "type": "RollingUpdate" }` |
+| `livenessProbe` | Liveness Probe settings | `{ "httpGet": { "path": "/api/health", "port": 3000 } "initialDelaySeconds": 60, "timeoutSeconds": 30, "failureThreshold": 10 }` |
+| `readinessProbe` | Readiness Probe settings | `{ "httpGet": { "path": "/api/health", "port": 3000 } }`|
+| `securityContext` | Deployment securityContext | `{"runAsUser": 472, "runAsGroup": 472, "fsGroup": 472}` |
+| `priorityClassName` | Name of Priority Class to assign pods | `nil` |
+| `image.repository` | Image repository | `grafana/grafana` |
+| `image.tag` | Image tag (`Must be >= 5.0.0`) | `8.0.3` |
+| `image.sha` | Image sha (optional) | `80c6d6ac633ba5ab3f722976fb1d9a138f87ca6a9934fcd26a5fc28cbde7dbfa` |
+| `image.pullPolicy` | Image pull policy | `IfNotPresent` |
+| `image.pullSecrets` | Image pull secrets | `{}` |
+| `service.enabled` | Enable grafana service | `true` |
+| `service.type` | Kubernetes service type | `ClusterIP` |
+| `service.port` | Kubernetes port where service is exposed | `80` |
+| `service.portName` | Name of the port on the service | `service` |
+| `service.targetPort` | Internal service is port | `3000` |
+| `service.nodePort` | Kubernetes service nodePort | `nil` |
+| `service.annotations` | Service annotations | `{}` |
+| `service.labels` | Custom labels | `{}` |
+| `service.clusterIP` | internal cluster service IP | `nil` |
+| `service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `nil` |
+| `service.loadBalancerSourceRanges` | list of IP CIDRs allowed access to lb (if supported) | `[]` |
+| `service.externalIPs` | service external IP addresses | `[]` |
+| `extraExposePorts` | Additional service ports for sidecar containers| `[]` |
+| `hostAliases` | adds rules to the pod's /etc/hosts | `[]` |
+| `ingress.enabled` | Enables Ingress | `false` |
+| `ingress.annotations` | Ingress annotations (values are templated) | `{}` |
+| `ingress.labels` | Custom labels | `{}` |
+| `ingress.path` | Ingress accepted path | `/` |
+| `ingress.pathType` | Ingress type of path | `Prefix` |
+| `ingress.hosts` | Ingress accepted hostnames | `["chart-example.local"]` |
+| `ingress.extraPaths` | Ingress extra paths to prepend to every host configuration. Useful when configuring [custom actions with AWS ALB Ingress Controller](https://kubernetes-sigs.github.io/aws-alb-ingress-controller/guide/ingress/annotation/#actions). Requires `ingress.hosts` to have one or more host entries. | `[]` |
+| `ingress.tls` | Ingress TLS configuration | `[]` |
+| `resources` | CPU/Memory resource requests/limits | `{}` |
+| `nodeSelector` | Node labels for pod assignment | `{}` |
+| `tolerations` | Toleration labels for pod assignment | `[]` |
+| `affinity` | Affinity settings for pod assignment | `{}` |
+| `extraInitContainers` | Init containers to add to the grafana pod | `{}` |
+| `extraContainers` | Sidecar containers to add to the grafana pod | `""` |
+| `extraContainerVolumes` | Volumes that can be mounted in sidecar containers | `[]` |
+| `extraLabels` | Custom labels for all manifests | `{}` |
+| `schedulerName` | Name of the k8s scheduler (other than default) | `nil` |
+| `persistence.enabled` | Use persistent volume to store data | `false` |
+| `persistence.type` | Type of persistence (`pvc` or `statefulset`) | `pvc` |
+| `persistence.size` | Size of persistent volume claim | `10Gi` |
+| `persistence.existingClaim` | Use an existing PVC to persist data | `nil` |
+| `persistence.storageClassName` | Type of persistent volume claim | `nil` |
+| `persistence.accessModes` | Persistence access modes | `[ReadWriteOnce]` |
+| `persistence.annotations` | PersistentVolumeClaim annotations | `{}` |
+| `persistence.finalizers` | PersistentVolumeClaim finalizers | `[ "kubernetes.io/pvc-protection" ]` |
+| `persistence.subPath` | Mount a sub dir of the persistent volume | `nil` |
+| `persistence.inMemory.enabled` | If persistence is not enabled, whether to mount the local storage in-memory to improve performance | `false` |
+| `persistence.inMemory.sizeLimit` | SizeLimit for the in-memory local storage | `nil` |
+| `initChownData.enabled` | If false, don't reset data ownership at startup | true |
+| `initChownData.image.repository` | init-chown-data container image repository | `busybox` |
+| `initChownData.image.tag` | init-chown-data container image tag | `1.31.1` |
+| `initChownData.image.sha` | init-chown-data container image sha (optional)| `""` |
+| `initChownData.image.pullPolicy` | init-chown-data container image pull policy | `IfNotPresent` |
+| `initChownData.resources` | init-chown-data pod resource requests & limits | `{}` |
+| `schedulerName` | Alternate scheduler name | `nil` |
+| `env` | Extra environment variables passed to pods | `{}` |
+| `envValueFrom` | Environment variables from alternate sources. See the API docs on [EnvVarSource](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#envvarsource-v1-core) for format details. | `{}` |
+| `envFromSecret` | Name of a Kubernetes secret (must be manually created in the same namespace) containing values to be added to the environment. Can be templated | `""` |
+| `envRenderSecret` | Sensible environment variables passed to pods and stored as secret | `{}` |
+| `enableServiceLinks` | Inject Kubernetes services as environment variables. | `true` |
+| `extraSecretMounts` | Additional grafana server secret mounts | `[]` |
+| `extraVolumeMounts` | Additional grafana server volume mounts | `[]` |
+| `extraConfigmapMounts` | Additional grafana server configMap volume mounts | `[]` |
+| `extraEmptyDirMounts` | Additional grafana server emptyDir volume mounts | `[]` |
+| `plugins` | Plugins to be loaded along with Grafana | `[]` |
+| `datasources` | Configure grafana datasources (passed through tpl) | `{}` |
+| `notifiers` | Configure grafana notifiers | `{}` |
+| `dashboardProviders` | Configure grafana dashboard providers | `{}` |
+| `dashboards` | Dashboards to import | `{}` |
+| `dashboardsConfigMaps` | ConfigMaps reference that contains dashboards | `{}` |
+| `grafana.ini` | Grafana's primary configuration | `{}` |
+| `ldap.enabled` | Enable LDAP authentication | `false` |
+| `ldap.existingSecret` | The name of an existing secret containing the `ldap.toml` file, this must have the key `ldap-toml`. | `""` |
+| `ldap.config` | Grafana's LDAP configuration | `""` |
+| `annotations` | Deployment annotations | `{}` |
+| `labels` | Deployment labels | `{}` |
+| `podAnnotations` | Pod annotations | `{}` |
+| `podLabels` | Pod labels | `{}` |
+| `podPortName` | Name of the grafana port on the pod | `grafana` |
+| `sidecar.image.repository` | Sidecar image repository | `quay.io/kiwigrid/k8s-sidecar` |
+| `sidecar.image.tag` | Sidecar image tag | `1.12.3` |
+| `sidecar.image.sha` | Sidecar image sha (optional) | `""` |
+| `sidecar.imagePullPolicy` | Sidecar image pull policy | `IfNotPresent` |
+| `sidecar.resources` | Sidecar resources | `{}` |
+| `sidecar.enableUniqueFilenames` | Sets the kiwigrid/k8s-sidecar UNIQUE_FILENAMES environment variable | `false` |
+| `sidecar.dashboards.enabled` | Enables the cluster wide search for dashboards and adds/updates/deletes them in grafana | `false` |
+| `sidecar.dashboards.SCProvider` | Enables creation of sidecar provider | `true` |
+| `sidecar.dashboards.provider.name` | Unique name of the grafana provider | `sidecarProvider` |
+| `sidecar.dashboards.provider.orgid` | Id of the organisation, to which the dashboards should be added | `1` |
+| `sidecar.dashboards.provider.folder` | Logical folder in which grafana groups dashboards | `""` |
+| `sidecar.dashboards.provider.disableDelete` | Activate to avoid the deletion of imported dashboards | `false` |
+| `sidecar.dashboards.provider.allowUiUpdates` | Allow updating provisioned dashboards from the UI | `false` |
+| `sidecar.dashboards.provider.type` | Provider type | `file` |
+| `sidecar.dashboards.provider.foldersFromFilesStructure` | Allow Grafana to replicate dashboard structure from filesystem. | `false` |
+| `sidecar.dashboards.watchMethod` | Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. | `WATCH` |
+| `sidecar.skipTlsVerify` | Set to true to skip tls verification for kube api calls | `nil` |
+| `sidecar.dashboards.label` | Label that config maps with dashboards should have to be added | `grafana_dashboard` |
+| `sidecar.dashboards.labelValue` | Label value that config maps with dashboards should have to be added | `nil` |
+| `sidecar.dashboards.folder` | Folder in the pod that should hold the collected dashboards (unless `sidecar.dashboards.defaultFolderName` is set). This path will be mounted. | `/tmp/dashboards` |
+| `sidecar.dashboards.folderAnnotation` | The annotation the sidecar will look for in configmaps to override the destination folder for files | `nil` |
+| `sidecar.dashboards.defaultFolderName` | The default folder name, it will create a subfolder under the `sidecar.dashboards.folder` and put dashboards in there instead | `nil` |
+| `sidecar.dashboards.searchNamespace` | Namespaces list. If specified, the sidecar will search for dashboards config-maps inside these namespaces.Otherwise the namespace in which the sidecar is running will be used.It's also possible to specify ALL to search in all namespaces. | `nil` |
+| `sidecar.dashboards.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` |
+| `sidecar.datasources.enabled` | Enables the cluster wide search for datasources and adds/updates/deletes them in grafana |`false` |
+| `sidecar.datasources.label` | Label that config maps with datasources should have to be added | `grafana_datasource` |
+| `sidecar.datasources.labelValue` | Label value that config maps with datasources should have to be added | `nil` |
+| `sidecar.datasources.searchNamespace` | Namespaces list. If specified, the sidecar will search for datasources config-maps inside these namespaces.Otherwise the namespace in which the sidecar is running will be used.It's also possible to specify ALL to search in all namespaces. | `nil` |
+| `sidecar.datasources.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` |
+| `sidecar.notifiers.enabled` | Enables the cluster wide search for notifiers and adds/updates/deletes them in grafana | `false` |
+| `sidecar.notifiers.label` | Label that config maps with notifiers should have to be added | `grafana_notifier` |
+| `sidecar.notifiers.searchNamespace` | Namespaces list. If specified, the sidecar will search for notifiers config-maps (or secrets) inside these namespaces.Otherwise the namespace in which the sidecar is running will be used.It's also possible to specify ALL to search in all namespaces. | `nil` |
+| `sidecar.notifiers.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` |
+| `smtp.existingSecret` | The name of an existing secret containing the SMTP credentials. | `""` |
+| `smtp.userKey` | The key in the existing SMTP secret containing the username. | `"user"` |
+| `smtp.passwordKey` | The key in the existing SMTP secret containing the password. | `"password"` |
+| `admin.existingSecret` | The name of an existing secret containing the admin credentials. | `""` |
+| `admin.userKey` | The key in the existing admin secret containing the username. | `"admin-user"` |
+| `admin.passwordKey` | The key in the existing admin secret containing the password. | `"admin-password"` |
+| `serviceAccount.autoMount` | Automount the service account token in the pod| `true` |
+| `serviceAccount.annotations` | ServiceAccount annotations | |
+| `serviceAccount.create` | Create service account | `true` |
+| `serviceAccount.name` | Service account name to use, when empty will be set to created account if `serviceAccount.create` is set else to `default` | `` |
+| `serviceAccount.nameTest` | Service account name to use for test, when empty will be set to created account if `serviceAccount.create` is set else to `default` | `nil` |
+| `rbac.create` | Create and use RBAC resources | `true` |
+| `rbac.namespaced` | Creates Role and Rolebinding instead of the default ClusterRole and ClusteRoleBindings for the grafana instance | `false` |
+| `rbac.useExistingRole` | Set to a rolename to use existing role - skipping role creating - but still doing serviceaccount and rolebinding to the rolename set here. | `nil` |
+| `rbac.pspEnabled` | Create PodSecurityPolicy (with `rbac.create`, grant roles permissions as well) | `true` |
+| `rbac.pspUseAppArmor` | Enforce AppArmor in created PodSecurityPolicy (requires `rbac.pspEnabled`) | `true` |
+| `rbac.extraRoleRules` | Additional rules to add to the Role | [] |
+| `rbac.extraClusterRoleRules` | Additional rules to add to the ClusterRole | [] |
+| `command` | Define command to be executed by grafana container at startup | `nil` |
+| `testFramework.enabled` | Whether to create test-related resources | `true` |
+| `testFramework.image` | `test-framework` image repository. | `bats/bats` |
+| `testFramework.tag` | `test-framework` image tag. | `v1.4.1` |
+| `testFramework.imagePullPolicy` | `test-framework` image pull policy. | `IfNotPresent` |
+| `testFramework.securityContext` | `test-framework` securityContext | `{}` |
+| `downloadDashboards.env` | Environment variables to be passed to the `download-dashboards` container | `{}` |
+| `downloadDashboards.envFromSecret` | Name of a Kubernetes secret (must be manually created in the same namespace) containing values to be added to the environment. Can be templated | `""` |
+| `downloadDashboards.resources` | Resources of `download-dashboards` container | `{}` |
+| `downloadDashboardsImage.repository` | Curl docker image repo | `curlimages/curl` |
+| `downloadDashboardsImage.tag` | Curl docker image tag | `7.73.0` |
+| `downloadDashboardsImage.sha` | Curl docker image sha (optional) | `""` |
+| `downloadDashboardsImage.pullPolicy` | Curl docker image pull policy | `IfNotPresent` |
+| `namespaceOverride` | Override the deployment namespace | `""` (`Release.Namespace`) |
+| `serviceMonitor.enabled` | Use servicemonitor from prometheus operator | `false` |
+| `serviceMonitor.namespace` | Namespace this servicemonitor is installed in | |
+| `serviceMonitor.interval` | How frequently Prometheus should scrape | `1m` |
+| `serviceMonitor.path` | Path to scrape | `/metrics` |
+| `serviceMonitor.scheme` | Scheme to use for metrics scraping | `http` |
+| `serviceMonitor.tlsConfig` | TLS configuration block for the endpoint | `{}` |
+| `serviceMonitor.labels` | Labels for the servicemonitor passed to Prometheus Operator | `{}` |
+| `serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `30s` |
+| `serviceMonitor.relabelings` | MetricRelabelConfigs to apply to samples before ingestion. | `[]` |
+| `revisionHistoryLimit` | Number of old ReplicaSets to retain | `10` |
+| `imageRenderer.enabled` | Enable the image-renderer deployment & service | `false` |
+| `imageRenderer.image.repository` | image-renderer Image repository | `grafana/grafana-image-renderer` |
+| `imageRenderer.image.tag` | image-renderer Image tag | `latest` |
+| `imageRenderer.image.sha` | image-renderer Image sha (optional) | `""` |
+| `imageRenderer.image.pullPolicy` | image-renderer ImagePullPolicy | `Always` |
+| `imageRenderer.env` | extra env-vars for image-renderer | `{}` |
+| `imageRenderer.serviceAccountName` | image-renderer deployment serviceAccountName | `""` |
+| `imageRenderer.securityContext` | image-renderer deployment securityContext | `{}` |
+| `imageRenderer.hostAliases` | image-renderer deployment Host Aliases | `[]` |
+| `imageRenderer.priorityClassName` | image-renderer deployment priority class | `''` |
+| `imageRenderer.service.enabled` | Enable the image-renderer service | `true` |
+| `imageRenderer.service.portName` | image-renderer service port name | `'http'` |
+| `imageRenderer.service.port` | image-renderer service port used by both service and deployment | `8081` |
+| `imageRenderer.grafanaSubPath` | Grafana sub path to use for image renderer callback url | `''` |
+| `imageRenderer.podPortName` | name of the image-renderer port on the pod | `http` |
+| `imageRenderer.revisionHistoryLimit` | number of image-renderer replica sets to keep | `10` |
+| `imageRenderer.networkPolicy.limitIngress` | Enable a NetworkPolicy to limit inbound traffic from only the created grafana pods | `true` |
+| `imageRenderer.networkPolicy.limitEgress` | Enable a NetworkPolicy to limit outbound traffic to only the created grafana pods | `false` |
+| `imageRenderer.resources` | Set resource limits for image-renderer pdos | `{}` |
+
+### Example ingress with path
+
+With grafana 6.3 and above
+```yaml
+grafana.ini:
+ server:
+ domain: monitoring.example.com
+ root_url: "%(protocol)s://%(domain)s/grafana"
+ serve_from_sub_path: true
+ingress:
+ enabled: true
+ hosts:
+ - "monitoring.example.com"
+ path: "/grafana"
+```
+
+### Example of extraVolumeMounts
+
+Volume can be type persistentVolumeClaim or hostPath but not both at same time.
+If none existingClaim or hostPath argument is givent then type is emptyDir.
+
+```yaml
+- extraVolumeMounts:
+ - name: plugins
+ mountPath: /var/lib/grafana/plugins
+ subPath: configs/grafana/plugins
+ existingClaim: existing-grafana-claim
+ readOnly: false
+ - name: dashboards
+ mountPath: /var/lib/grafana/dashboards
+ hostPath: /usr/shared/grafana/dashboards
+ readOnly: false
+```
+
+## Import dashboards
+
+There are a few methods to import dashboards to Grafana. Below are some examples and explanations as to how to use each method:
+
+```yaml
+dashboards:
+ default:
+ some-dashboard:
+ json: |
+ {
+ "annotations":
+
+ ...
+ # Complete json file here
+ ...
+
+ "title": "Some Dashboard",
+ "uid": "abcd1234",
+ "version": 1
+ }
+ custom-dashboard:
+ # This is a path to a file inside the dashboards directory inside the chart directory
+ file: dashboards/custom-dashboard.json
+ prometheus-stats:
+ # Ref: https://grafana.com/dashboards/2
+ gnetId: 2
+ revision: 2
+ datasource: Prometheus
+ local-dashboard:
+ url: https://raw.githubusercontent.com/user/repository/master/dashboards/dashboard.json
+```
+
+## BASE64 dashboards
+
+Dashboards could be stored on a server that does not return JSON directly and instead of it returns a Base64 encoded file (e.g. Gerrit)
+A new parameter has been added to the url use case so if you specify a b64content value equals to true after the url entry a Base64 decoding is applied before save the file to disk.
+If this entry is not set or is equals to false not decoding is applied to the file before saving it to disk.
+
+### Gerrit use case
+
+Gerrit API for download files has the following schema: where {project-name} and
+{file-id} usually has '/' in their values and so they MUST be replaced by %2F so if project-name is user/repo, branch-id is master and file-id is equals to dir1/dir2/dashboard
+the url value is
+
+## Sidecar for dashboards
+
+If the parameter `sidecar.dashboards.enabled` is set, a sidecar container is deployed in the grafana
+pod. This container watches all configmaps (or secrets) in the cluster and filters out the ones with
+a label as defined in `sidecar.dashboards.label`. The files defined in those configmaps are written
+to a folder and accessed by grafana. Changes to the configmaps are monitored and the imported
+dashboards are deleted/updated.
+
+A recommendation is to use one configmap per dashboard, as a reduction of multiple dashboards inside
+one configmap is currently not properly mirrored in grafana.
+
+Example dashboard config:
+
+```yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: sample-grafana-dashboard
+ labels:
+ grafana_dashboard: "1"
+data:
+ k8s-dashboard.json: |-
+ [...]
+```
+
+## Sidecar for datasources
+
+If the parameter `sidecar.datasources.enabled` is set, an init container is deployed in the grafana
+pod. This container lists all secrets (or configmaps, though not recommended) in the cluster and
+filters out the ones with a label as defined in `sidecar.datasources.label`. The files defined in
+those secrets are written to a folder and accessed by grafana on startup. Using these yaml files,
+the data sources in grafana can be imported.
+
+Secrets are recommended over configmaps for this usecase because datasources usually contain private
+data like usernames and passwords. Secrets are the more appropriate cluster resource to manage those.
+
+Example values to add a datasource adapted from [Grafana](http://docs.grafana.org/administration/provisioning/#example-datasource-config-file):
+
+```yaml
+datasources:
+ datasources.yaml:
+ apiVersion: 1
+ datasources:
+ # name of the datasource. Required
+ - name: Graphite
+ # datasource type. Required
+ type: graphite
+ # access mode. proxy or direct (Server or Browser in the UI). Required
+ access: proxy
+ # org id. will default to orgId 1 if not specified
+ orgId: 1
+ # url
+ url: http://localhost:8080
+ # database password, if used
+ password:
+ # database user, if used
+ user:
+ # database name, if used
+ database:
+ # enable/disable basic auth
+ basicAuth:
+ # basic auth username
+ basicAuthUser:
+ # basic auth password
+ basicAuthPassword:
+ # enable/disable with credentials headers
+ withCredentials:
+ # mark as default datasource. Max one per org
+ isDefault:
+ #