diff --git a/charts/.helmignore b/charts/.helmignore
new file mode 100644
index 00000000..0e8a0eb3
--- /dev/null
+++ b/charts/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/charts/Chart.yaml b/charts/Chart.yaml
new file mode 100644
index 00000000..2ed480c6
--- /dev/null
+++ b/charts/Chart.yaml
@@ -0,0 +1,12 @@
+apiVersion: v2
+name: cht-interop
+description: A Helm chart for CHT Interoperability Stack
+type: application
+version: 1.0.0
+appVersion: "1.0"
+keywords:
+ - health
+ - interoperability
+ - openhim
+ - cht
+ - fhir
diff --git a/charts/templates/01-namespace.yaml b/charts/templates/01-namespace.yaml
new file mode 100644
index 00000000..91324fd1
--- /dev/null
+++ b/charts/templates/01-namespace.yaml
@@ -0,0 +1,8 @@
+{{- if .Values.createNamespace }}
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: {{ .Values.global.namespace }}
+ labels:
+ name: {{ .Values.global.namespace }}
+{{- end }}
diff --git a/charts/templates/02-configmap.yaml b/charts/templates/02-configmap.yaml
new file mode 100644
index 00000000..79ddc300
--- /dev/null
+++ b/charts/templates/02-configmap.yaml
@@ -0,0 +1,26 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: openhim-console-config
+ namespace: {{ .Values.global.namespace }}
+data:
+ default.json: |
+ {
+ "version": "1.10.0",
+ "minimumCoreVersion": "3.4.0",
+{{- if eq .Values.cluster_type "eks" }}
+ "protocol": "https",
+ "host": "{{ .Values.ingress.openhimCoreHost }}",
+ "port": 443,
+{{- else }}
+ "protocol": "{{ .Values.openhim.console.config.protocol }}",
+ "host": "{{ .Values.openhim.console.config.host }}",
+ "port": {{ .Values.openhim.console.config.port }},
+{{- end }}
+ "title": "Admin Console",
+ "footerTitle": "OpenHIM Administration Console",
+ "footerPoweredBy": "Powered by OpenHIM",
+ "loginBanner": "",
+ "mediatorLastHeartbeatWarningSeconds": 60,
+ "mediatorLastHeartbeatDangerSeconds": 120
+ }
diff --git a/charts/templates/03-secrets.yaml b/charts/templates/03-secrets.yaml
new file mode 100644
index 00000000..d008b4eb
--- /dev/null
+++ b/charts/templates/03-secrets.yaml
@@ -0,0 +1,41 @@
+apiVersion: v1
+kind: Secret
+metadata:
+ name: database-credentials
+ namespace: {{ .Values.global.namespace }}
+type: Opaque
+stringData:
+ couchdb-user: {{ .Values.couchdb.credentials.username | quote }}
+ couchdb-password: {{ .Values.couchdb.credentials.password | quote }}
+ couchdb-secret: {{ .Values.couchdb.credentials.secret | quote }}
+ postgres-user: {{ .Values.postgresql.credentials.username | quote }}
+ postgres-password: {{ .Values.postgresql.credentials.password | quote }}
+ postgres-db: {{ .Values.postgresql.credentials.database | quote }}
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: openhim-credentials
+ namespace: {{ .Values.global.namespace }}
+type: Opaque
+stringData:
+ openhim-username: "root@openhim.org"
+ openhim-password: "openhim-password"
+ openhim-client-password: "interop-password"
+ openhim-user-password: "interop-password"
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: mediator-credentials
+ namespace: {{ .Values.global.namespace }}
+type: Opaque
+stringData:
+ openhim-username: {{ .Values.mediator.credentials.openhimUsername | quote }}
+ openhim-password: {{ .Values.mediator.credentials.openhimPassword | quote }}
+ fhir-username: {{ .Values.mediator.credentials.fhirUsername | quote }}
+ fhir-password: {{ .Values.mediator.credentials.fhirPassword | quote }}
+ cht-username: {{ .Values.mediator.credentials.chtUsername | quote }}
+ cht-password: {{ .Values.mediator.credentials.chtPassword | quote }}
+ openimis-username: "Admin"
+ openimis-password: "admin123"
diff --git a/charts/templates/04-persistent-volumes.yaml b/charts/templates/04-persistent-volumes.yaml
new file mode 100644
index 00000000..9d76752b
--- /dev/null
+++ b/charts/templates/04-persistent-volumes.yaml
@@ -0,0 +1,68 @@
+{{- if .Values.couchdb.enabled }}
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: couchdb-data
+ namespace: {{ .Values.global.namespace }}
+spec:
+ accessModes:
+ - ReadWriteOnce
+ storageClassName: {{ .Values.persistence.storageClass }}
+ resources:
+ requests:
+ storage: {{ .Values.couchdb.storage }}
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: cht-credentials
+ namespace: {{ .Values.global.namespace }}
+spec:
+ accessModes:
+ - ReadWriteOnce
+ storageClassName: {{ .Values.persistence.storageClass }}
+ resources:
+ requests:
+ storage: {{ .Values.persistence.chtCredentials }}
+{{- end }}
+---
+{{- if .Values.cht.enabled }}
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: cht-ssl
+ namespace: {{ .Values.global.namespace }}
+spec:
+ accessModes:
+ - ReadWriteOnce
+ storageClassName: {{ .Values.persistence.storageClass }}
+ resources:
+ requests:
+ storage: {{ .Values.persistence.chtSsl }}
+{{- end }}
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: hapi-db-volume
+ namespace: {{ .Values.global.namespace }}
+spec:
+ accessModes:
+ - ReadWriteOnce
+ storageClassName: {{ .Values.persistence.storageClass }}
+ resources:
+ requests:
+ storage: {{ .Values.postgresql.storage }}
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: mongo-data
+ namespace: {{ .Values.global.namespace }}
+spec:
+ accessModes:
+ - ReadWriteOnce
+ storageClassName: {{ .Values.persistence.storageClass }}
+ resources:
+ requests:
+ storage: {{ .Values.mongodb.storage }}
diff --git a/charts/templates/05-databases.yaml b/charts/templates/05-databases.yaml
new file mode 100644
index 00000000..9995327d
--- /dev/null
+++ b/charts/templates/05-databases.yaml
@@ -0,0 +1,200 @@
+# MongoDBefor OpenHIM
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: mongo
+ namespace: {{ .Values.global.namespace }}
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: mongo
+ template:
+ metadata:
+ labels:
+ app: mongo
+ spec:
+ automountServiceAccountToken: false
+ containers:
+ - name: mongo
+ image: {{ .Values.mongodb.image }}
+ resources:
+ requests:
+ memory: {{ .Values.mongodb.resources.requests.memory | quote }}
+ cpu: {{ .Values.mongodb.resources.requests.cpu | quote }}
+ ephemeral-storage: {{ index .Values.mongodb.resources.requests "ephemeral-storage" | quote }}
+ limits:
+ memory: {{ .Values.mongodb.resources.limits.memory | quote }}
+ cpu: {{ .Values.mongodb.resources.limits.cpu | quote }}
+ ephemeral-storage: {{ index .Values.mongodb.resources.limits "ephemeral-storage" | quote }}
+ ports:
+ - containerPort: 27017
+ volumeMounts:
+ - name: mongo-storage
+ mountPath: /data/db
+ volumes:
+ - name: mongo-storage
+ persistentVolumeClaim:
+ claimName: mongo-data
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: mongo
+ namespace: {{ .Values.global.namespace }}
+spec:
+ selector:
+ app: mongo
+ ports:
+ - port: 27017
+ targetPort: 27017
+---
+# PostgreSQL for HAPI FHIR
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: hapi-db
+ namespace: {{ .Values.global.namespace }}
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: hapi-db
+ template:
+ metadata:
+ labels:
+ app: hapi-db
+ spec:
+ automountServiceAccountToken: false
+ containers:
+ - name: hapi-db
+ image: {{ .Values.postgresql.image }}
+ resources:
+ requests:
+ memory: {{ .Values.postgresql.resources.requests.memory | quote }}
+ cpu: {{ .Values.postgresql.resources.requests.cpu | quote }}
+ ephemeral-storage: {{ index .Values.postgresql.resources.requests "ephemeral-storage" | quote }}
+ limits:
+ memory: {{ .Values.postgresql.resources.limits.memory | quote }}
+ cpu: {{ .Values.postgresql.resources.limits.cpu | quote }}
+ ephemeral-storage: {{ index .Values.postgresql.resources.limits "ephemeral-storage" | quote }}
+ env:
+ - name: POSTGRES_USER
+ valueFrom:
+ secretKeyRef:
+ name: database-credentials
+ key: postgres-user
+ - name: POSTGRES_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: database-credentials
+ key: postgres-password
+ - name: POSTGRES_DB
+ valueFrom:
+ secretKeyRef:
+ name: database-credentials
+ key: postgres-db
+ - name: PGDATA
+ value: /var/lib/postgresql/data/pgdata
+ ports:
+ - containerPort: 5432
+ volumeMounts:
+ - name: hapi-db-storage
+ mountPath: /var/lib/postgresql/data
+ volumes:
+ - name: hapi-db-storage
+ persistentVolumeClaim:
+ claimName: hapi-db-volume
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: hapi-db
+ namespace: {{ .Values.global.namespace }}
+spec:
+ selector:
+ app: hapi-db
+ ports:
+ - port: 5432
+ targetPort: 5432
+---
+{{- if .Values.couchdb.enabled }}
+# CouchDB for CHT
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: couchdb
+ namespace: {{ .Values.global.namespace }}
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: couchdb
+ template:
+ metadata:
+ labels:
+ app: couchdb
+ spec:
+ automountServiceAccountToken: false
+ containers:
+ - name: couchdb
+ image: {{ .Values.couchdb.image }}
+ resources:
+ requests:
+ memory: {{ .Values.couchdb.resources.requests.memory | quote }}
+ cpu: {{ .Values.couchdb.resources.requests.cpu | quote }}
+ ephemeral-storage: {{ index .Values.couchdb.resources.requests "ephemeral-storage" | quote }}
+ limits:
+ memory: {{ .Values.couchdb.resources.limits.memory | quote }}
+ cpu: {{ .Values.couchdb.resources.limits.cpu | quote }}
+ ephemeral-storage: {{ index .Values.couchdb.resources.limits "ephemeral-storage" | quote }}
+ env:
+ - name: COUCHDB_USER
+ valueFrom:
+ secretKeyRef:
+ name: database-credentials
+ key: couchdb-user
+ - name: COUCHDB_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: database-credentials
+ key: couchdb-password
+ - name: COUCHDB_SECRET
+ valueFrom:
+ secretKeyRef:
+ name: database-credentials
+ key: couchdb-secret
+ - name: COUCHDB_UUID
+ value: {{ .Values.couchdb.credentials.uuid | quote }}
+ - name: SVC_NAME
+ value: "couchdb"
+ - name: COUCHDB_LOG_LEVEL
+ value: "error"
+ ports:
+ - containerPort: 5984
+ volumeMounts:
+ - name: couchdb-data-storage
+ mountPath: /opt/couchdb/data
+ - name: cht-credentials-storage
+ mountPath: /opt/couchdb/etc/local.d/
+ volumes:
+ - name: couchdb-data-storage
+ persistentVolumeClaim:
+ claimName: couchdb-data
+ - name: cht-credentials-storage
+ persistentVolumeClaim:
+ claimName: cht-credentials
+ restartPolicy: Always
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: couchdb
+ namespace: {{ .Values.global.namespace }}
+spec:
+ selector:
+ app: couchdb
+ ports:
+ - port: 5984
+ targetPort: 5984
+{{- end }}
diff --git a/charts/templates/06-openhim-core.yaml b/charts/templates/06-openhim-core.yaml
new file mode 100644
index 00000000..f3600ccf
--- /dev/null
+++ b/charts/templates/06-openhim-core.yaml
@@ -0,0 +1,121 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: openhim-core
+ namespace: {{ .Values.global.namespace }}
+spec:
+ replicas: {{ .Values.openhim.core.replicas }}
+ selector:
+ matchLabels:
+ app: openhim-core
+ template:
+ metadata:
+ labels:
+ app: openhim-core
+ spec:
+ automountServiceAccountToken: false
+ containers:
+ - name: openhim-core
+ image: {{ .Values.openhim.core.image }}
+ env:
+ - name: mongo_url
+ value: "mongodb://mongo/openhim"
+ - name: mongo_atnaUrl
+ value: "mongodb://mongo/openhim"
+ ports:
+ - containerPort: 8080
+ - containerPort: 5000
+ - containerPort: 5001
+ - containerPort: 5050
+ - containerPort: 5051
+ - containerPort: 5052
+ - containerPort: 7788
+ resources:
+ requests:
+ memory: {{ .Values.openhim.core.resources.requests.memory | quote }}
+ cpu: {{ .Values.openhim.core.resources.requests.cpu | quote }}
+ ephemeral-storage: {{ index .Values.openhim.core.resources.requests "ephemeral-storage" | quote }}
+ limits:
+ memory: {{ .Values.openhim.core.resources.limits.memory | quote }}
+ cpu: {{ .Values.openhim.core.resources.limits.cpu | quote }}
+ ephemeral-storage: {{ index .Values.openhim.core.resources.limits "ephemeral-storage" | quote }}
+ readinessProbe:
+ httpGet:
+ path: /heartbeat
+ port: 8080
+ scheme: HTTPS
+ initialDelaySeconds: 60
+ periodSeconds: 20
+ timeoutSeconds: 10
+ failureThreshold: 5
+ livenessProbe:
+ httpGet:
+ path: /heartbeat
+ port: 8080
+ scheme: HTTPS
+ initialDelaySeconds: 120
+ periodSeconds: 60
+ timeoutSeconds: 10
+ failureThreshold: 3
+ initContainers:
+ - name: wait-for-mongo
+ image: busybox:1.35
+ command: ['sh', '-c', 'until nc -z mongo 27017; do echo waiting for mongo; sleep 2; done']
+ restartPolicy: Always
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: openhim-core
+ namespace: {{ .Values.global.namespace }}
+spec:
+ selector:
+ app: openhim-core
+ ports:
+ - name: api
+ port: 8080
+ targetPort: 8080
+ - name: router-http
+ port: 5000
+ targetPort: 5000
+ - name: router-https
+ port: 5001
+ targetPort: 5001
+ - name: router-tcp
+ port: 5050
+ targetPort: 5050
+ - name: router-tls
+ port: 5051
+ targetPort: 5051
+ - name: router-polling
+ port: 5052
+ targetPort: 5052
+ - name: tcp-adapter
+ port: 7788
+ targetPort: 7788
+---
+# Expose OpenHIM Core API externally
+{{- if ne .Values.cluster_type "eks" }}
+apiVersion: v1
+kind: Service
+metadata:
+ name: openhim-core-external
+ namespace: {{ .Values.global.namespace }}
+spec:
+ type: NodePort
+ selector:
+ app: openhim-core
+ ports:
+ - name: api
+ port: 8080
+ targetPort: 8080
+ nodePort: {{ .Values.services.nodePort.openhimCore | default 30081 }}
+ - name: router-http
+ port: 5000
+ targetPort: 5000
+ nodePort: {{ .Values.services.nodePort.openhimRouterHttp | default 30500 }}
+ - name: router-https
+ port: 5001
+ targetPort: 5001
+ nodePort: {{ .Values.services.nodePort.openhimRouterHttps | default 30501 }}
+{{- end }}
diff --git a/charts/templates/07-openhim-console.yaml b/charts/templates/07-openhim-console.yaml
new file mode 100644
index 00000000..d4fcfb7b
--- /dev/null
+++ b/charts/templates/07-openhim-console.yaml
@@ -0,0 +1,84 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: openhim-console
+ namespace: {{ .Values.global.namespace }}
+spec:
+ replicas: {{ .Values.openhim.console.replicas }}
+ selector:
+ matchLabels:
+ app: openhim-console
+ template:
+ metadata:
+ labels:
+ app: openhim-console
+ spec:
+ automountServiceAccountToken: false
+ containers:
+ - name: openhim-console
+ image: {{ .Values.openhim.console.image }}
+ ports:
+ - containerPort: 80
+ resources:
+ requests:
+ memory: {{ .Values.openhim.console.resources.requests.memory | quote }}
+ cpu: {{ .Values.openhim.console.resources.requests.cpu | quote }}
+ ephemeral-storage: {{ index .Values.openhim.console.resources.requests "ephemeral-storage" | quote }}
+ limits:
+ memory: {{ .Values.openhim.console.resources.limits.memory | quote }}
+ cpu: {{ .Values.openhim.console.resources.limits.cpu | quote }}
+ ephemeral-storage: {{ index .Values.openhim.console.resources.limits "ephemeral-storage" | quote }}
+ volumeMounts:
+ - name: console-config
+ mountPath: /usr/share/nginx/html/config/default.json
+ subPath: default.json
+ readinessProbe:
+ httpGet:
+ path: /
+ port: 80
+ initialDelaySeconds: 10
+ periodSeconds: 5
+ livenessProbe:
+ httpGet:
+ path: /
+ port: 80
+ initialDelaySeconds: 30
+ periodSeconds: 30
+ volumes:
+ - name: console-config
+ configMap:
+ name: openhim-console-config
+ initContainers:
+ - name: wait-for-openhim-core
+ image: busybox:1.35
+ command: ['sh', '-c', 'until nc -z openhim-core 8080; do echo waiting for openhim-core; sleep 2; done']
+ restartPolicy: Always
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: openhim-console
+ namespace: {{ .Values.global.namespace }}
+spec:
+ selector:
+ app: openhim-console
+ ports:
+ - port: 80
+ targetPort: 80
+---
+# Expose OpenHIM Console externally via NodePort (for local k3d development)
+{{- if ne .Values.cluster_type "eks" }}
+apiVersion: v1
+kind: Service
+metadata:
+ name: openhim-console-external
+ namespace: {{ .Values.global.namespace }}
+spec:
+ type: NodePort
+ selector:
+ app: openhim-console
+ ports:
+ - port: 80
+ targetPort: 80
+ nodePort: {{ .Values.services.nodePort.console }}
+{{- end }}
diff --git a/charts/templates/08-hapi-fhir.yaml b/charts/templates/08-hapi-fhir.yaml
new file mode 100644
index 00000000..aaf3a1de
--- /dev/null
+++ b/charts/templates/08-hapi-fhir.yaml
@@ -0,0 +1,88 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: hapi-fhir
+ namespace: {{ .Values.global.namespace }}
+spec:
+ replicas: {{ .Values.hapiFhir.replicas }}
+ selector:
+ matchLabels:
+ app: hapi-fhir
+ template:
+ metadata:
+ labels:
+ app: hapi-fhir
+ spec:
+ automountServiceAccountToken: false
+ containers:
+ - name: hapi-fhir
+ image: {{ .Values.hapiFhir.image }}
+ env:
+ - name: spring.datasource.url
+ value: "jdbc:postgresql://hapi-db:5432/hapi"
+ - name: spring.datasource.username
+ valueFrom:
+ secretKeyRef:
+ name: database-credentials
+ key: postgres-user
+ - name: spring.datasource.password
+ valueFrom:
+ secretKeyRef:
+ name: database-credentials
+ key: postgres-password
+ - name: spring.datasource.driverClassName
+ value: "org.postgresql.Driver"
+ - name: spring.jpa.properties.hibernate.dialect
+ value: "org.hibernate.dialect.PostgreSQL95Dialect"
+ - name: hapi.fhir.allow_external_references
+ value: "true"
+ - name: hapi.fhir.bulk_export_enabled
+ value: "true"
+ - name: hapi.fhir.subscription.resthook_enabled
+ value: "true"
+ - name: JAVA_TOOL_OPTIONS
+ value: "-Xmx2g"
+ - name: CATALINA_OPTS
+ value: "-Xmx2g"
+ ports:
+ - containerPort: 8080
+ resources:
+ requests:
+ memory: {{ .Values.hapiFhir.resources.requests.memory | quote }}
+ cpu: {{ .Values.hapiFhir.resources.requests.cpu | quote }}
+ ephemeral-storage: {{ index .Values.hapiFhir.resources.requests "ephemeral-storage" | quote }}
+ limits:
+ memory: {{ .Values.hapiFhir.resources.limits.memory | quote }}
+ cpu: {{ .Values.hapiFhir.resources.limits.cpu | quote }}
+ ephemeral-storage: {{ index .Values.hapiFhir.resources.limits "ephemeral-storage" | quote }}
+ readinessProbe:
+ httpGet:
+ path: /fhir/metadata
+ port: 8080
+ initialDelaySeconds: 60
+ periodSeconds: 30
+ timeoutSeconds: 10
+ livenessProbe:
+ httpGet:
+ path: /fhir/metadata
+ port: 8080
+ initialDelaySeconds: 120
+ periodSeconds: 60
+ timeoutSeconds: 10
+ initContainers:
+ - name: wait-for-hapi-db
+ image: postgres:14.1
+ command: ['sh', '-c', 'until pg_isready -h hapi-db -p 5432; do echo waiting for database; sleep 2; done']
+ restartPolicy: Always
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: hapi-fhir
+ namespace: {{ .Values.global.namespace }}
+spec:
+ selector:
+ app: hapi-fhir
+ ports:
+ - port: 8080
+ targetPort: 8080
diff --git a/charts/templates/09-cht-services.yaml b/charts/templates/09-cht-services.yaml
new file mode 100644
index 00000000..8216837c
--- /dev/null
+++ b/charts/templates/09-cht-services.yaml
@@ -0,0 +1,365 @@
+{{- if .Values.cht.enabled }}
+# HAProxy for CouchDB load balancing
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: haproxy
+ namespace: {{ .Values.global.namespace }}
+spec:
+ replicas: {{ .Values.cht.haproxy.replicas }}
+ selector:
+ matchLabels:
+ app: haproxy
+ template:
+ metadata:
+ labels:
+ app: haproxy
+ spec:
+ automountServiceAccountToken: false
+ containers:
+ - name: haproxy
+ image: {{ .Values.cht.haproxy.image }}
+ resources:
+ requests:
+ memory: {{ .Values.cht.haproxy.resources.requests.memory | quote }}
+ cpu: {{ .Values.cht.haproxy.resources.requests.cpu | quote }}
+ ephemeral-storage: {{ index .Values.cht.haproxy.resources.requests "ephemeral-storage" | quote }}
+ limits:
+ memory: {{ .Values.cht.haproxy.resources.limits.memory | quote }}
+ cpu: {{ .Values.cht.haproxy.resources.limits.cpu | quote }}
+ ephemeral-storage: {{ index .Values.cht.haproxy.resources.limits "ephemeral-storage" | quote }}
+ env:
+ - name: HAPROXY_IP
+ value: "0.0.0.0"
+ - name: COUCHDB_USER
+ valueFrom:
+ secretKeyRef:
+ name: database-credentials
+ key: couchdb-user
+ - name: COUCHDB_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: database-credentials
+ key: couchdb-password
+ - name: COUCHDB_SERVERS
+ value: "couchdb"
+ - name: HAPROXY_PORT
+ value: "5984"
+ - name: HEALTHCHECK_ADDR
+ value: "healthcheck"
+ ports:
+ - containerPort: 5984
+ restartPolicy: Always
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: haproxy
+ namespace: {{ .Values.global.namespace }}
+spec:
+ selector:
+ app: haproxy
+ ports:
+ - port: 5984
+ targetPort: 5984
+---
+# CHT HAProxy Healthcheck
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: healthcheck
+ namespace: {{ .Values.global.namespace }}
+spec:
+ replicas: {{ .Values.cht.healthcheck.replicas }}
+ selector:
+ matchLabels:
+ app: healthcheck
+ template:
+ metadata:
+ labels:
+ app: healthcheck
+ spec:
+ automountServiceAccountToken: false
+ containers:
+ - name: healthcheck
+ image: {{ .Values.cht.healthcheck.image }}
+ resources:
+ requests:
+ memory: {{ .Values.cht.healthcheck.resources.requests.memory | quote }}
+ cpu: {{ .Values.cht.healthcheck.resources.requests.cpu | quote }}
+ ephemeral-storage: {{ index .Values.cht.healthcheck.resources.requests "ephemeral-storage" | quote }}
+ limits:
+ memory: {{ .Values.cht.healthcheck.resources.limits.memory | quote }}
+ cpu: {{ .Values.cht.healthcheck.resources.limits.cpu | quote }}
+ ephemeral-storage: {{ index .Values.cht.healthcheck.resources.limits "ephemeral-storage" | quote }}
+ env:
+ - name: COUCHDB_SERVERS
+ value: "couchdb"
+ - name: COUCHDB_USER
+ valueFrom:
+ secretKeyRef:
+ name: database-credentials
+ key: couchdb-user
+ - name: COUCHDB_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: database-credentials
+ key: couchdb-password
+ restartPolicy: Always
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: healthcheck
+ namespace: {{ .Values.global.namespace }}
+spec:
+ selector:
+ app: healthcheck
+ ports:
+ - port: 80
+ targetPort: 80
+---
+# CHT API
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: api
+ namespace: {{ .Values.global.namespace }}
+spec:
+ replicas: {{ .Values.cht.api.replicas }}
+ selector:
+ matchLabels:
+ app: api
+ template:
+ metadata:
+ labels:
+ app: api
+ spec:
+ automountServiceAccountToken: false
+ hostname: localhost
+ containers:
+ - name: api
+ image: {{ .Values.cht.api.image }}
+ resources:
+ requests:
+ memory: {{ .Values.cht.api.resources.requests.memory | quote }}
+ cpu: {{ .Values.cht.api.resources.requests.cpu | quote }}
+ ephemeral-storage: {{ index .Values.cht.api.resources.requests "ephemeral-storage" | quote }}
+ limits:
+ memory: {{ .Values.cht.api.resources.limits.memory | quote }}
+ cpu: {{ .Values.cht.api.resources.limits.cpu | quote }}
+ ephemeral-storage: {{ index .Values.cht.api.resources.limits "ephemeral-storage" | quote }}
+ env:
+ - name: COUCH_URL
+ value: "http://admin:password@haproxy:5984/medic"
+ - name: BUILDS_URL
+ value: "https://staging.dev.medicmobile.org/_couch/builds_4"
+ - name: UPGRADE_SERVICE_URL
+ value: "http://localhost:5100"
+ - name: HOST
+ value: "0.0.0.0"
+ - name: HOSTNAME
+ value: "0.0.0.0"
+ - name: API_HOST
+ value: "0.0.0.0"
+ - name: BIND_ADDRESS
+ value: "0.0.0.0"
+ - name: API_PORT
+ value: "5988"
+ - name: PORT
+ value: "5988"
+ ports:
+ - containerPort: 5988
+ readinessProbe:
+ httpGet:
+ path: /api/info
+ port: 5988
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ livenessProbe:
+ httpGet:
+ path: /api/info
+ port: 5988
+ initialDelaySeconds: 60
+ periodSeconds: 30
+ initContainers:
+ - name: wait-for-haproxy
+ image: busybox:1.35
+ command: ['sh', '-c', 'until nc -z haproxy 5984; do echo waiting for haproxy; sleep 2; done']
+ restartPolicy: Always
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: api
+ namespace: {{ .Values.global.namespace }}
+spec:
+ selector:
+ app: api
+ ports:
+ - port: 5988
+ targetPort: 5988
+---
+# CHT Sentinel
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: sentinel
+ namespace: {{ .Values.global.namespace }}
+spec:
+ replicas: {{ .Values.cht.sentinel.replicas }}
+ selector:
+ matchLabels:
+ app: sentinel
+ template:
+ metadata:
+ labels:
+ app: sentinel
+ spec:
+ automountServiceAccountToken: false
+ containers:
+ - name: sentinel
+ image: {{ .Values.cht.sentinel.image }}
+ resources:
+ requests:
+ memory: {{ .Values.cht.sentinel.resources.requests.memory | quote }}
+ cpu: {{ .Values.cht.sentinel.resources.requests.cpu | quote }}
+ ephemeral-storage: {{ index .Values.cht.sentinel.resources.requests "ephemeral-storage" | quote }}
+ limits:
+ memory: {{ .Values.cht.sentinel.resources.limits.memory | quote }}
+ cpu: {{ .Values.cht.sentinel.resources.limits.cpu | quote }}
+ ephemeral-storage: {{ index .Values.cht.sentinel.resources.limits "ephemeral-storage" | quote }}
+ env:
+ - name: COUCH_URL
+ value: "http://admin:password@haproxy:5984/medic"
+ - name: API_HOST
+ value: "api"
+ initContainers:
+ - name: wait-for-haproxy
+ image: busybox:1.35
+ command: ['sh', '-c', 'until nc -z haproxy 5984; do echo waiting for haproxy; sleep 2; done']
+ restartPolicy: Always
+---
+# CHT Nginx
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: nginx
+ namespace: {{ .Values.global.namespace }}
+spec:
+ replicas: {{ .Values.cht.nginx.replicas }}
+ selector:
+ matchLabels:
+ app: nginx
+ template:
+ metadata:
+ labels:
+ app: nginx
+ spec:
+ automountServiceAccountToken: false
+ containers:
+ - name: nginx
+ image: {{ .Values.cht.nginx.image }}
+ resources:
+ requests:
+ memory: {{ .Values.cht.nginx.resources.requests.memory | quote }}
+ cpu: {{ .Values.cht.nginx.resources.requests.cpu | quote }}
+ ephemeral-storage: {{ index .Values.cht.nginx.resources.requests "ephemeral-storage" | quote }}
+ limits:
+ memory: {{ .Values.cht.nginx.resources.limits.memory | quote }}
+ cpu: {{ .Values.cht.nginx.resources.limits.cpu | quote }}
+ ephemeral-storage: {{ index .Values.cht.nginx.resources.limits "ephemeral-storage" | quote }}
+ env:
+ - name: API_HOST
+ value: "api"
+ - name: API_PORT
+ value: "5988"
+ - name: CERTIFICATE_MODE
+ value: "SELF_SIGNED"
+ - name: SSL_CERT_FILE_PATH
+ value: "/etc/nginx/private/cert.pem"
+ - name: SSL_KEY_FILE_PATH
+ value: "/etc/nginx/private/key.pem"
+ - name: COMMON_NAME
+ value: "test-nginx.dev.medicmobile.org"
+ - name: EMAIL
+ value: "domains@medic.org"
+ - name: COUNTRY
+ value: "US"
+ - name: STATE
+ value: "California"
+ - name: LOCALITY
+ value: "San_Francisco"
+ - name: ORGANISATION
+ value: "medic"
+ - name: DEPARTMENT
+ value: "Information_Security"
+ ports:
+ - containerPort: 80
+ - containerPort: 443
+ volumeMounts:
+ - name: cht-ssl-storage
+ mountPath: /etc/nginx/private/
+ readinessProbe:
+ httpGet:
+ path: /
+ port: 80
+ initialDelaySeconds: 20
+ periodSeconds: 10
+ livenessProbe:
+ httpGet:
+ path: /
+ port: 80
+ initialDelaySeconds: 40
+ periodSeconds: 30
+ volumes:
+ - name: cht-ssl-storage
+ persistentVolumeClaim:
+ claimName: cht-ssl
+ initContainers:
+ - name: wait-for-api
+ image: busybox:1.35
+ command: ['sh', '-c', 'until nc -z api 5988; do echo waiting for api; sleep 2; done']
+ - name: wait-for-haproxy
+ image: busybox:1.35
+ command: ['sh', '-c', 'until nc -z haproxy 5984; do echo waiting for haproxy; sleep 2; done']
+ restartPolicy: Always
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: nginx
+ namespace: {{ .Values.global.namespace }}
+spec:
+ selector:
+ app: nginx
+ ports:
+ - name: http
+ port: 80
+ targetPort: 80
+ - name: https
+ port: 443
+ targetPort: 443
+---
+# Expose CHT Nginx externally via NodePort (for local k3d development)
+{{- if ne .Values.cluster_type "eks" }}
+apiVersion: v1
+kind: Service
+metadata:
+ name: nginx-external
+ namespace: {{ .Values.global.namespace }}
+spec:
+ type: NodePort
+ selector:
+ app: nginx
+ ports:
+ - name: http
+ port: 80
+ targetPort: 80
+ nodePort: {{ .Values.services.nodePort.nginx }}
+ - name: https
+ port: 443
+ targetPort: 443
+ nodePort: {{ .Values.services.nodePort.nginxHttps }}
+{{- end }}
+{{- end }}
diff --git a/charts/templates/10-mediator-services.yaml b/charts/templates/10-mediator-services.yaml
new file mode 100644
index 00000000..72e094fb
--- /dev/null
+++ b/charts/templates/10-mediator-services.yaml
@@ -0,0 +1,206 @@
+# Configurator - runs as a Job to set up initial configuration
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: configurator
+ namespace: {{ .Values.global.namespace }}
+spec:
+ template:
+ spec:
+ automountServiceAccountToken: false
+ containers:
+ - name: configurator
+ image: {{ .Values.configurator.image }}
+ imagePullPolicy: {{ .Values.configurator.imagePullPolicy }}
+ resources:
+ requests:
+ memory: {{ .Values.configurator.resources.requests.memory | quote }}
+ cpu: {{ .Values.configurator.resources.requests.cpu | quote }}
+ ephemeral-storage: {{ index .Values.configurator.resources.requests "ephemeral-storage" | quote }}
+ limits:
+ memory: {{ .Values.configurator.resources.limits.memory | quote }}
+ cpu: {{ .Values.configurator.resources.limits.cpu | quote }}
+ ephemeral-storage: {{ index .Values.configurator.resources.limits "ephemeral-storage" | quote }}
+ env:
+ - name: COUCHDB_USER
+ valueFrom:
+ secretKeyRef:
+ name: database-credentials
+ key: couchdb-user
+ - name: COUCHDB_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: database-credentials
+ key: couchdb-password
+ - name: OPENHIM_API_HOSTNAME
+ value: "openhim-core"
+ - name: OPENHIM_API_PORT
+ value: "8080"
+ - name: OPENHIM_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: openhim-credentials
+ key: openhim-password
+ - name: OPENHIM_USERNAME
+ valueFrom:
+ secretKeyRef:
+ name: openhim-credentials
+ key: openhim-username
+ - name: OPENHIM_CLIENT_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: openhim-credentials
+ key: openhim-client-password
+ - name: OPENHIM_USER_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: openhim-credentials
+ key: openhim-user-password
+ initContainers:
+ - name: wait-for-openhim-core
+ image: busybox:1.35
+ command: ['sh', '-c', 'until nc -z openhim-core 8080; do echo waiting for openhim-core; sleep 5; done']
+ restartPolicy: OnFailure
+ backoffLimit: 3
+---
+# Mediator Service
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: mediator
+ namespace: {{ .Values.global.namespace }}
+spec:
+ replicas: {{ .Values.mediator.replicas }}
+ selector:
+ matchLabels:
+ app: mediator
+ template:
+ metadata:
+ labels:
+ app: mediator
+ spec:
+ automountServiceAccountToken: false
+ containers:
+ - name: mediator
+ image: {{ .Values.mediator.image }}
+ imagePullPolicy: {{ .Values.mediator.imagePullPolicy }}
+ resources:
+ requests:
+ memory: {{ .Values.mediator.resources.requests.memory | quote }}
+ cpu: {{ .Values.mediator.resources.requests.cpu | quote }}
+ ephemeral-storage: {{ index .Values.mediator.resources.requests "ephemeral-storage" | quote }}
+ limits:
+ memory: {{ .Values.mediator.resources.limits.memory | quote }}
+ cpu: {{ .Values.mediator.resources.limits.cpu | quote }}
+ ephemeral-storage: {{ index .Values.mediator.resources.limits "ephemeral-storage" | quote }}
+ ports:
+ - containerPort: 6000
+ env:
+ - name: OPENHIM_USERNAME
+ valueFrom:
+ secretKeyRef:
+ name: mediator-credentials
+ key: openhim-username
+ - name: OPENHIM_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: mediator-credentials
+ key: openhim-password
+ - name: OPENHIM_API_URL
+ value: "https://openhim-core:8080"
+ - name: PORT
+ value: "6000"
+ - name: FHIR_URL
+ value: "http://openhim-core:5001/fhir"
+ - name: FHIR_USERNAME
+ valueFrom:
+ secretKeyRef:
+ name: mediator-credentials
+ key: fhir-username
+ - name: FHIR_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: mediator-credentials
+ key: fhir-password
+ - name: CHT_URL
+ {{- if .Values.cht.enabled }}
+ value: "https://nginx"
+ {{- else }}
+ value: {{ .Values.cht.external.url | quote }}
+ {{- end }}
+ - name: CHT_USERNAME
+ {{- if .Values.cht.enabled }}
+ valueFrom:
+ secretKeyRef:
+ name: mediator-credentials
+ key: cht-username
+ {{- else }}
+ value: {{ .Values.cht.external.username | quote }}
+ {{- end }}
+ - name: CHT_PASSWORD
+ {{- if .Values.cht.enabled }}
+ valueFrom:
+ secretKeyRef:
+ name: mediator-credentials
+ key: cht-password
+ {{- else }}
+ value: {{ .Values.cht.external.password | quote }}
+ {{- end }}
+ - name: OPENIMIS_API_URL
+ value: "https://openimis.s2.openimis.org"
+ - name: OPENIMIS_USERNAME
+ valueFrom:
+ secretKeyRef:
+ name: mediator-credentials
+ key: openimis-username
+ - name: OPENIMIS_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: mediator-credentials
+ key: openimis-password
+ readinessProbe:
+ httpGet:
+ path: /health
+ port: 6000
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ livenessProbe:
+ httpGet:
+ path: /health
+ port: 6000
+ initialDelaySeconds: 60
+ periodSeconds: 30
+ initContainers:
+ - name: wait-for-configurator
+ image: busybox:1.35
+ command: ['sh', '-c', 'echo "Configurator job completed, starting mediator"; sleep 5']
+ restartPolicy: Always
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: mediator
+ namespace: {{ .Values.global.namespace }}
+spec:
+ selector:
+ app: mediator
+ ports:
+ - port: 6000
+ targetPort: 6000
+---
+# Expose Mediator externally via NodePort (for local k3d development)
+{{- if ne .Values.cluster_type "eks" }}
+apiVersion: v1
+kind: Service
+metadata:
+ name: mediator-external
+ namespace: {{ .Values.global.namespace }}
+spec:
+ type: NodePort
+ selector:
+ app: mediator
+ ports:
+ - port: 6000
+ targetPort: 6000
+ nodePort: {{ .Values.services.nodePort.mediator }}
+{{- end }}
diff --git a/charts/templates/eks-cht-ingress.yaml b/charts/templates/eks-cht-ingress.yaml
new file mode 100644
index 00000000..442aa109
--- /dev/null
+++ b/charts/templates/eks-cht-ingress.yaml
@@ -0,0 +1,38 @@
+{{- if and (eq .Values.cluster_type "eks") .Values.cht.enabled }}
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: cht-ingress
+ namespace: {{ .Values.global.namespace }}
+ annotations:
+ # ALB Configuration
+ alb.ingress.kubernetes.io/scheme: internet-facing
+ alb.ingress.kubernetes.io/tags: {{ .Values.ingress.annotations.tags }}
+ alb.ingress.kubernetes.io/group.name: {{ .Values.ingress.annotations.groupname }}
+ alb.ingress.kubernetes.io/target-type: ip
+
+ # Backend uses HTTPS (nginx's self-signed cert)
+ alb.ingress.kubernetes.io/backend-protocol: HTTPS
+ alb.ingress.kubernetes.io/backend-protocol-version: HTTP1
+ alb.ingress.kubernetes.io/healthcheck-path: /
+ alb.ingress.kubernetes.io/healthcheck-protocol: HTTPS
+
+ # SSL/TLS Configuration
+ alb.ingress.kubernetes.io/certificate-arn: {{ .Values.ingress.annotations.certificate }}
+ alb.ingress.kubernetes.io/listen-ports: '[{"HTTPS": 443}]'
+ alb.ingress.kubernetes.io/ssl-policy: ELBSecurityPolicy-TLS-1-2-2017-01
+spec:
+ ingressClassName: alb
+ rules:
+ # ONLY CHT Application
+ - host: {{ .Values.ingress.chtHost }}
+ http:
+ paths:
+ - path: /
+ pathType: Prefix
+ backend:
+ service:
+ name: nginx
+ port:
+ number: 443
+{{- end }}
diff --git a/charts/templates/eks-openhim-console-ingress.yaml b/charts/templates/eks-openhim-console-ingress.yaml
new file mode 100644
index 00000000..348ab484
--- /dev/null
+++ b/charts/templates/eks-openhim-console-ingress.yaml
@@ -0,0 +1,36 @@
+{{- if eq .Values.cluster_type "eks" }}
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: openhim-console-ingress
+ namespace: {{ .Values.global.namespace }}
+ annotations:
+ # ALB Configuration
+ alb.ingress.kubernetes.io/scheme: internet-facing
+ alb.ingress.kubernetes.io/tags: {{ .Values.ingress.annotations.tags }}
+ alb.ingress.kubernetes.io/group.name: {{ .Values.ingress.annotations.groupname }}
+ alb.ingress.kubernetes.io/target-type: ip
+
+ # Backend uses HTTP
+ alb.ingress.kubernetes.io/backend-protocol: HTTP
+ alb.ingress.kubernetes.io/healthcheck-path: /
+ alb.ingress.kubernetes.io/healthcheck-protocol: HTTP
+
+ # SSL/TLS Configuration
+ alb.ingress.kubernetes.io/certificate-arn: {{ .Values.ingress.annotations.certificate }}
+ alb.ingress.kubernetes.io/listen-ports: '[{"HTTPS": 443}]'
+ alb.ingress.kubernetes.io/ssl-policy: ELBSecurityPolicy-TLS-1-2-2017-01
+spec:
+ ingressClassName: alb
+ rules:
+ - host: {{ .Values.ingress.openhimConsoleHost }}
+ http:
+ paths:
+ - path: /
+ pathType: Prefix
+ backend:
+ service:
+ name: openhim-console
+ port:
+ number: 80
+{{- end }}
diff --git a/charts/templates/eks-openhim-core-ingress.yaml b/charts/templates/eks-openhim-core-ingress.yaml
new file mode 100644
index 00000000..319c413b
--- /dev/null
+++ b/charts/templates/eks-openhim-core-ingress.yaml
@@ -0,0 +1,37 @@
+{{- if eq .Values.cluster_type "eks" }}
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: openhim-core-ingress
+ namespace: {{ .Values.global.namespace }}
+ annotations:
+ # ALB Configuration
+ alb.ingress.kubernetes.io/scheme: internet-facing
+ alb.ingress.kubernetes.io/tags: {{ .Values.ingress.annotations.tags }}
+ alb.ingress.kubernetes.io/group.name: {{ .Values.ingress.annotations.groupname }}
+ alb.ingress.kubernetes.io/target-type: ip
+
+ # Backend uses HTTPS (OpenHIM Core uses self-signed cert)
+ alb.ingress.kubernetes.io/backend-protocol: HTTPS
+ alb.ingress.kubernetes.io/backend-protocol-version: HTTP1
+ alb.ingress.kubernetes.io/healthcheck-path: /heartbeat
+ alb.ingress.kubernetes.io/healthcheck-protocol: HTTPS
+
+ # SSL/TLS Configuration
+ alb.ingress.kubernetes.io/certificate-arn: {{ .Values.ingress.annotations.certificate }}
+ alb.ingress.kubernetes.io/listen-ports: '[{"HTTPS": 443}]'
+ alb.ingress.kubernetes.io/ssl-policy: ELBSecurityPolicy-TLS-1-2-2017-01
+spec:
+ ingressClassName: alb
+ rules:
+ - host: {{ .Values.ingress.openhimCoreHost }}
+ http:
+ paths:
+ - path: /
+ pathType: Prefix
+ backend:
+ service:
+ name: openhim-core
+ port:
+ number: 8080
+{{- end }}
diff --git a/charts/templates/eks-openhim-router-ingress.yaml b/charts/templates/eks-openhim-router-ingress.yaml
new file mode 100644
index 00000000..bf716a17
--- /dev/null
+++ b/charts/templates/eks-openhim-router-ingress.yaml
@@ -0,0 +1,41 @@
+{{- if eq .Values.cluster_type "eks" }}
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: openhim-router-ingress
+ namespace: {{ .Values.global.namespace }}
+ annotations:
+ # ALB Configuration
+ alb.ingress.kubernetes.io/scheme: internet-facing
+ alb.ingress.kubernetes.io/tags: {{ .Values.ingress.annotations.tags }}
+ alb.ingress.kubernetes.io/group.name: {{ .Values.ingress.annotations.groupname }}
+ alb.ingress.kubernetes.io/target-type: ip
+
+ # Backend uses HTTPS (OpenHIM Router uses self-signed cert)
+ alb.ingress.kubernetes.io/backend-protocol: HTTPS
+ alb.ingress.kubernetes.io/backend-protocol-version: HTTP1
+ alb.ingress.kubernetes.io/healthcheck-path: /
+ alb.ingress.kubernetes.io/healthcheck-protocol: HTTPS
+ alb.ingress.kubernetes.io/healthcheck-interval-seconds: '30'
+ alb.ingress.kubernetes.io/healthcheck-timeout-seconds: '5'
+ alb.ingress.kubernetes.io/healthy-threshold-count: '2'
+ alb.ingress.kubernetes.io/unhealthy-threshold-count: '3'
+
+ # SSL/TLS Configuration
+ alb.ingress.kubernetes.io/certificate-arn: {{ .Values.ingress.annotations.certificate }}
+ alb.ingress.kubernetes.io/listen-ports: '[{"HTTPS": 443}]'
+ alb.ingress.kubernetes.io/ssl-policy: ELBSecurityPolicy-TLS-1-2-2017-01
+spec:
+ ingressClassName: alb
+ rules:
+ - host: {{ .Values.ingress.openhimRouterHost }}
+ http:
+ paths:
+ - path: /
+ pathType: Prefix
+ backend:
+ service:
+ name: openhim-core
+ port:
+ number: 5000
+{{- end }}
diff --git a/charts/values-eks.yaml b/charts/values-eks.yaml
new file mode 100644
index 00000000..bbde92b5
--- /dev/null
+++ b/charts/values-eks.yaml
@@ -0,0 +1,55 @@
+global:
+ namespace: # Override namespace (e.g., "my-namespace")
+
+createNamespace: false
+
+persistence:
+ storageClass: # Override storage (e.g., "gp2" for AWS)
+
+configurator:
+ image: 720541322708.dkr.ecr.eu-west-2.amazonaws.com/cht-interop/configurator:latest
+ imagePullPolicy: Always # Override pull policy
+
+mediator:
+ image: 720541322708.dkr.ecr.eu-west-2.amazonaws.com/cht-interop/mediator:latest
+ imagePullPolicy: Always
+
+service:
+ type: ClusterIP # Override service type
+
+ingress:
+ annotations:
+ groupname: "dev-cht-alb"
+ tags: "Environment=dev,Team=QA"
+ certificate: "arn:aws:iam::720541322708:server-certificate/2026-q1-wildcard-dev-medicmobile-org-letsencrypt"
+ # Ensure the host is not already taken. Valid characters for a subdomain are:
+ # a-z, 0-9, and - (but not as first or last character).
+ chtHost: "cht-interop.dev.medicmobile.org" # e.g. "mrjones.dev.medicmobile.org"
+ openhimConsoleHost: "openhim-cht-interop.dev.medicmobile.org"
+ openhimCoreHost: "openhim-api-cht-interop.dev.medicmobile.org"
+ openhimRouterHost: "openhim-router-cht-interop.dev.medicmobile.org"
+ hosted_zone_id: "Z3304WUAJTCM7P"
+ load_balancer: "dualstack.k8s-devchtalb-3eb0781cbb-694321496.eu-west-2.elb.amazonaws.com"
+
+cluster_type: "eks"
+
+# CHT and CouchDB configuration
+# Set to false to use an external CHT instance (common for EKS deployments)
+couchdb:
+ enabled: true
+
+cht:
+ enabled: true
+ # External CHT configuration (used when cht.enabled=false)
+ external:
+ url: "" # e.g., https://your-cht-instance.example.com
+ username: ""
+ password: ""
+
+resources:
+ requests:
+ memory: "512Mi" # Override resources
+ cpu: "250m"
+ limits:
+ memory: "2Gi"
+ cpu: "1000m"
diff --git a/charts/values.yaml b/charts/values.yaml
new file mode 100644
index 00000000..755b98a9
--- /dev/null
+++ b/charts/values.yaml
@@ -0,0 +1,224 @@
+createNamespace: true # For template/01-namespace.yaml
+cluster_type: "k3d" # Default to k3d for local development
+
+# Default values for health-interop
+global:
+ namespace: cht-interop
+
+# Database configurations
+couchdb:
+ enabled: true # Set to false to use external CHT/CouchDB
+ image: public.ecr.aws/medic/cht-couchdb:4.1.0-alpha
+ storage: 10Gi
+ credentials:
+ username: admin
+ password: password
+ secret: secret
+ uuid: CC0C3BA1-88EE-4AE3-BFD3-6E0EE56ED534
+ resources:
+ requests:
+ memory: "512Mi"
+ cpu: "250m"
+ ephemeral-storage: "500Mi"
+ limits:
+ memory: "2Gi"
+ cpu: "1000m"
+ ephemeral-storage: "2Gi"
+
+mongodb:
+ image: mongo:4.2
+ storage: 5Gi
+ resources:
+ requests:
+ memory: "256Mi"
+ cpu: "200m"
+ ephemeral-storage: "256Mi"
+ limits:
+ memory: "1Gi"
+ cpu: "500m"
+ ephemeral-storage: "1Gi"
+
+postgresql:
+ image: postgres:14.1
+ storage: 5Gi
+ credentials:
+ username: admin
+ password: instant101
+ database: hapi
+ resources:
+ requests:
+ memory: "256Mi"
+ cpu: "200m"
+ ephemeral-storage: "256Mi"
+ limits:
+ memory: "1Gi"
+ cpu: "500m"
+ ephemeral-storage: "1Gi"
+
+# OpenHIM configuration
+openhim:
+ core:
+ image: jembi/openhim-core:7
+ replicas: 1
+ resources:
+ requests:
+ memory: "256Mi"
+ cpu: "100m"
+ ephemeral-storage: "100Mi"
+ limits:
+ memory: "512Mi"
+ cpu: "500m"
+ ephemeral-storage: "500Mi"
+ console:
+ image: jembi/openhim-console:1.14.4
+ replicas: 1
+ resources:
+ requests:
+ memory: "64Mi"
+ cpu: "50m"
+ ephemeral-storage: "50Mi"
+ limits:
+ memory: "128Mi"
+ cpu: "200m"
+ ephemeral-storage: "200Mi"
+ config:
+ protocol: https
+ host: localhost
+ port: 8080
+
+# CHT configuration
+cht:
+ enabled: true # Set to false to use external CHT instance
+ # External CHT configuration (used when cht.enabled=false)
+ external:
+ url: "" # e.g., https://external-cht.example.com
+ username: ""
+ password: ""
+ api:
+ image: public.ecr.aws/medic/cht-api:4.1.0-alpha
+ replicas: 1
+ resources:
+ requests:
+ memory: "256Mi"
+ cpu: "100m"
+ ephemeral-storage: "100Mi"
+ limits:
+ memory: "512Mi"
+ cpu: "500m"
+ ephemeral-storage: "500Mi"
+ nginx:
+ image: public.ecr.aws/medic/cht-nginx:4.1.0-alpha
+ replicas: 1
+ resources:
+ requests:
+ memory: "64Mi"
+ cpu: "50m"
+ ephemeral-storage: "50Mi"
+ limits:
+ memory: "256Mi"
+ cpu: "200m"
+ ephemeral-storage: "200Mi"
+ haproxy:
+ image: public.ecr.aws/medic/cht-haproxy:4.1.0-alpha
+ replicas: 1
+ resources:
+ requests:
+ memory: "64Mi"
+ cpu: "50m"
+ ephemeral-storage: "50Mi"
+ limits:
+ memory: "256Mi"
+ cpu: "200m"
+ ephemeral-storage: "200Mi"
+ sentinel:
+ image: public.ecr.aws/medic/cht-sentinel:4.1.0-alpha
+ replicas: 1
+ resources:
+ requests:
+ memory: "128Mi"
+ cpu: "100m"
+ ephemeral-storage: "100Mi"
+ limits:
+ memory: "256Mi"
+ cpu: "300m"
+ ephemeral-storage: "300Mi"
+ healthcheck:
+ image: public.ecr.aws/medic/cht-haproxy-healthcheck:4.1.0-alpha
+ replicas: 1
+ resources:
+ requests:
+ memory: "32Mi"
+ cpu: "25m"
+ ephemeral-storage: "50Mi"
+ limits:
+ memory: "64Mi"
+ cpu: "100m"
+ ephemeral-storage: "100Mi"
+
+# HAPI FHIR configuration
+hapiFhir:
+ image: hapiproject/hapi:v5.5.1
+ replicas: 1
+ resources:
+ requests:
+ memory: "2Gi"
+ cpu: "500m"
+ ephemeral-storage: "500Mi"
+ limits:
+ memory: "3Gi"
+ cpu: "1000m"
+ ephemeral-storage: "2Gi"
+
+# Mediator configuration
+mediator:
+ image: mediator:local
+ imagePullPolicy: Never
+ replicas: 1
+ resources:
+ requests:
+ memory: "128Mi"
+ cpu: "100m"
+ ephemeral-storage: "100Mi"
+ limits:
+ memory: "256Mi"
+ cpu: "300m"
+ ephemeral-storage: "300Mi"
+ credentials:
+ openhimUsername: interop@openhim.org
+ openhimPassword: interop-password
+ fhirUsername: interop-client
+ fhirPassword: interop-password
+ chtUsername: admin
+ chtPassword: password
+
+# Configurator configuration
+configurator:
+ image: configurator:local
+ imagePullPolicy: Never
+ resources:
+ requests:
+ memory: "128Mi"
+ cpu: "100m"
+ ephemeral-storage: "100Mi"
+ limits:
+ memory: "256Mi"
+ cpu: "300m"
+ ephemeral-storage: "300Mi"
+
+# Storage configuration
+persistence:
+ storageClass: local-path # k3d default storage class
+ chtCredentials: 1Gi
+ chtSsl: 1Gi
+
+# Service configuration
+services:
+ nodePort:
+ nginx: 30080
+ nginxHttps: 30443
+ console: 30900
+ mediator: 30600
+ openhimCore: 30081
+ openhimRouterHttp: 30500
+ openhimRouterHttps: 30501
+
diff --git a/remove_local_kubernetes.sh b/remove_local_kubernetes.sh
new file mode 100755
index 00000000..d21cd6ca
--- /dev/null
+++ b/remove_local_kubernetes.sh
@@ -0,0 +1,45 @@
+#!/bin/bash
+
+set -e # Exit on any error
+
+echo "CHT Interoperability Stack Cleanup (k3d)"
+echo ""
+
+# Check if cluster exists
+CLUSTER_EXISTS=$(k3d cluster list | grep -w "cht-interop" || echo "")
+
+if [[ -z "$CLUSTER_EXISTS" ]]; then
+ echo "No k3d cluster 'cht-interop' found. Nothing to clean up."
+ exit 0
+fi
+
+echo "This will delete:"
+echo " - k3d cluster 'cht-interop'"
+echo " - All pods, services, and data in the cluster"
+echo ""
+read -p "Are you sure you want to proceed? (y/N): " -n 1 -r
+echo
+
+if [[ ! $REPLY =~ ^[Yy]$ ]]; then
+ echo "Cleanup cancelled."
+ exit 0
+fi
+
+echo ""
+echo "Deleting k3d cluster 'cht-interop'..."
+k3d cluster delete cht-interop
+
+if [[ $? -eq 0 ]]; then
+ echo ""
+ echo "Cleanup complete!"
+ echo ""
+ echo "The following have been removed:"
+ echo " - k3d cluster 'cht-interop'"
+ echo " - All associated containers and volumes"
+ echo ""
+ echo "To redeploy, run: ./start_local_kubernetes.sh"
+else
+ echo ""
+ echo "Failed to delete cluster"
+ exit 1
+fi
diff --git a/start_local_kubernetes.sh b/start_local_kubernetes.sh
new file mode 100755
index 00000000..288c31b1
--- /dev/null
+++ b/start_local_kubernetes.sh
@@ -0,0 +1,141 @@
+#!/bin/bash
+
+set -e # Exit on any error
+
+echo "CHT Interoperability Stack Deployment (k3d)"
+echo ""
+
+# Check if cluster exists
+CLUSTER_EXISTS=$(k3d cluster list | grep -w "cht-interop" || echo "")
+
+if [[ -n "$CLUSTER_EXISTS" ]]; then
+ echo "k3d cluster 'cht-interop' already exists."
+ read -p "Do you want to delete and recreate it? (y/N): " -n 1 -r
+ echo
+ if [[ $REPLY =~ ^[Yy]$ ]]; then
+ echo "Deleting existing k3d cluster..."
+ k3d cluster delete cht-interop
+ CREATE_CLUSTER=true
+ else
+ echo "Keeping existing cluster..."
+ CREATE_CLUSTER=false
+ fi
+else
+ echo "No existing cluster found."
+ CREATE_CLUSTER=true
+fi
+
+# Create cluster if needed
+if [[ "$CREATE_CLUSTER" == true ]]; then
+ echo "Creating fresh k3d cluster with port mappings..."
+ k3d cluster create cht-interop --agents 1 \
+ --port "8080:30081@loadbalancer" \
+ --port "9000:30900@loadbalancer" \
+ --port "6000:30600@loadbalancer" \
+ --port "5001:30501@loadbalancer" \
+ --port "80:30080@loadbalancer" \
+ --port "443:30443@loadbalancer"
+ if [[ $? -ne 0 ]]; then
+ echo "Failed to create k3d cluster"
+ exit 1
+ fi
+fi
+
+# Build and load custom images
+echo ""
+echo "Building and loading custom images..."
+
+# Build configurator
+if [[ -d "./configurator" ]]; then
+ echo "Building configurator image..."
+ docker build -f configurator/Dockerfile -t configurator:local .
+ if [[ $? -ne 0 ]]; then
+ echo "Failed to build configurator image"
+ exit 1
+ fi
+ k3d image import configurator:local -c cht-interop
+else
+ echo "Configurator directory not found, skipping build"
+fi
+
+# Build mediator
+if [[ -d "./mediator" ]]; then
+ echo "Building mediator image..."
+ docker build -t mediator:local ./mediator
+ if [[ $? -ne 0 ]]; then
+ echo "Failed to build mediator image"
+ exit 1
+ fi
+ k3d image import mediator:local -c cht-interop
+else
+ echo "Mediator directory not found, skipping build"
+fi
+
+# Check if Helm release exists
+RELEASE_EXISTS=$(helm list -n cht-interop | grep -w "cht-interop" || echo "")
+
+if [[ -n "$RELEASE_EXISTS" ]]; then
+ echo ""
+ echo "Helm release 'cht-interop' already exists."
+ read -p "Do you want to upgrade it? (Y/n): " -n 1 -r
+ echo
+ if [[ ! $REPLY =~ ^[Nn]$ ]]; then
+ echo "Upgrading Helm release..."
+ helm upgrade cht-interop ./charts
+ if [[ $? -ne 0 ]]; then
+ echo "Failed to upgrade Helm release"
+ exit 1
+ fi
+ else
+ echo "Skipping Helm deployment..."
+ fi
+else
+ # Deploy using Helm
+ echo ""
+ echo "Deploying with Helm..."
+ helm install cht-interop ./charts
+ if [[ $? -ne 0 ]]; then
+ echo "Failed to install Helm release"
+ exit 1
+ fi
+fi
+
+# Wait for pods to be ready
+echo ""
+echo "Waiting for pods to be ready..."
+echo "You can monitor progress with: kubectl get pods -n cht-interop -w"
+echo "Or use K9s: k9s --context k3d-cht-interop"
+
+# Wait for critical services with timeout
+echo ""
+echo "Waiting for databases to be ready..."
+kubectl wait --for=condition=ready pod -l app=mongo -n cht-interop --timeout=300s || echo "MongoDB not ready yet"
+kubectl wait --for=condition=ready pod -l app=couchdb -n cht-interop --timeout=300s || echo "CouchDB not ready yet"
+kubectl wait --for=condition=ready pod -l app=hapi-db -n cht-interop --timeout=300s || echo "PostgreSQL not ready yet"
+
+echo "Waiting for OpenHIM Core to be ready..."
+kubectl wait --for=condition=ready pod -l app=openhim-core -n cht-interop --timeout=300s || echo "OpenHIM Core not ready yet"
+
+echo ""
+echo "Deployment complete!"
+
+echo ""
+echo "CHT Interoperability Stack is ready!"
+echo ""
+echo "Access services at (via k3d port mappings - no port-forward needed):"
+echo " OpenHIM Console: http://localhost:9000"
+echo " OpenHIM Core API: https://localhost:8080"
+echo " OpenHIM Router: https://localhost:5001"
+echo " CHT: http://localhost:80 or https://localhost:443"
+echo " Mediator: http://localhost:6000"
+echo ""
+echo "Default credentials:"
+echo " OpenHIM: root@openhim.org / openhim-password"
+echo " CHT: medic / password"
+echo ""
+echo "Useful commands:"
+echo " View pods: kubectl get pods -n cht-interop"
+echo " View logs: kubectl logs -n cht-interop"
+echo " Use K9s: k9s --context k3d-cht-interop"
+echo " Helm status: helm status cht-interop"
+echo ""