From 9bc8f75526858957a0a53cabf371a92e7bb302c6 Mon Sep 17 00:00:00 2001 From: Theo Date: Tue, 17 Feb 2026 15:35:59 +0000 Subject: [PATCH 1/6] docs(deployment): add local k3d integration test setup instructions Document critical gotchas discovered when running integration tests against a local k3d cluster: - CoreDNS fix for host.k3d.internal DNS resolution from pods (S3/MinIO pre-signed URLs are unreachable from pods without this override) - Anaconda Python version conflict with deploy.py - Disk pressure thresholds causing pod scheduling failures - CLI venv setup: keyrings.alt installation and PATH configuration - Updated step-by-step instructions with correct Helm deploy flags - Added troubleshooting section for common failure modes Generated with [Claude Code](https://claude.ai/code) via [Happy](https://happy.engineering) Co-Authored-By: Claude Co-Authored-By: Happy --- AGENTS.md | 34 +++++++++++++++ integration-tests/AGENTS.md | 86 ++++++++++++++++++++++++++++++++----- 2 files changed, 109 insertions(+), 11 deletions(-) diff --git a/AGENTS.md b/AGENTS.md index 978ed3e902..31d94f2a91 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -15,6 +15,40 @@ When adding or modifying interactive components in the website, ensure they are These wrappers automatically disable components until client-side hydration is complete, preventing Playwright from interacting with them before they're ready. +## Running Integration Tests Locally (k3d) + +See `integration-tests/AGENTS.md` for full instructions. Key gotchas: + +### Anaconda Python conflict +If Anaconda is in your PATH, `deploy.py` will fail with "Python 3.9 or higher is required". Use `/usr/bin/python3 ./deploy.py` instead. + +### Disk pressure (k3d nodes won't schedule pods) +k3d nodes inherit the host's disk. If disk usage is above ~85%, kubelet marks nodes with `DiskPressure` taint and pods stay `Pending`. Free up disk space or create the cluster with relaxed eviction thresholds: +```sh +k3d cluster create testCluster ... \ + --k3s-arg '--kubelet-arg=eviction-hard=imagefs.available<1%,nodefs.available<1%@server:*' \ + --k3s-arg '--kubelet-arg=eviction-hard=imagefs.available<1%,nodefs.available<1%@agent:*' +``` + +### Critical: Fix `host.k3d.internal` DNS for pods (S3/MinIO connectivity) +After deploying with `localHost: host.k3d.internal`, pods **cannot** reach `host.k3d.internal:8084` (MinIO/S3) because that hostname resolves to the Docker gateway IP which doesn't forward host ports. Pre-signed S3 URLs generated by the backend contain `host.k3d.internal:8084` and are used by preprocessing pods, causing `ConnectionError` / `ConnectTimeoutError`. + +**Fix**: Override CoreDNS to point `host.k3d.internal` to the MinIO service ClusterIP: +```sh +MINIO_IP=$(kubectl get svc loculus-minio-service -o jsonpath='{.spec.clusterIP}') +kubectl patch configmap coredns -n kube-system --type=json \ + -p="[{\"op\":\"replace\",\"path\":\"/data/NodeHosts\",\"value\":\"${MINIO_IP} host.k3d.internal\n$(kubectl get configmap coredns -n kube-system -o jsonpath='{.data.NodeHosts}' | grep -v host.k3d.internal)\"}]" +kubectl rollout restart deployment coredns -n kube-system +# Restart preprocessing pods to pick up DNS change +kubectl get deployments | grep preprocessing | awk '{print $1}' | xargs -I {} kubectl rollout restart deployment {} +``` + +### CLI tests require `keyrings.alt` +Install it in the CLI venv, not system-wide: `uv pip install --python cli/.venv/bin/python keyrings.alt` + +### CLI must be on PATH +The integration tests invoke `loculus` directly. Symlink it: `ln -sf $(pwd)/cli/.venv/bin/loculus ~/bin/loculus` + ## Updating Conda Environment Dependencies Conda dependencies in `environment.yml` files are not automatically updated by dependabot. diff --git a/integration-tests/AGENTS.md b/integration-tests/AGENTS.md index 72e1a68228..45d4d13502 100644 --- a/integration-tests/AGENTS.md +++ b/integration-tests/AGENTS.md @@ -42,27 +42,65 @@ npm --version # Create k3d cluster with port bindings ./deploy.py --verbose cluster --bind-all -# Deploy with Helm (use main branch to avoid image pull issues) -./deploy.py --verbose helm --branch main --for-e2e --enablePreprocessing --use-localhost-ip +# Create values file to use host.k3d.internal +echo 'localHost: host.k3d.internal' > /tmp/k3d-values.yaml + +# Deploy with Helm +SHA=$(git rev-parse HEAD | cut -c1-7) +./deploy.py --verbose helm --branch main --sha $SHA --for-e2e --enablePreprocessing --values /tmp/k3d-values.yaml + +# Add host entry so the browser can resolve host.k3d.internal +# (requires sudo - add this line to /etc/hosts if not already present) +grep -q 'host.k3d.internal' /etc/hosts || echo '127.0.0.1 host.k3d.internal' | sudo tee -a /etc/hosts +``` + +> **Why `host.k3d.internal`?** Services like MinIO generate pre-signed URLs that both browsers and internal pods need to access. Using `host.k3d.internal` works because: +> - From the browser: The `/etc/hosts` entry points to `127.0.0.1` where ports are forwarded +> - From inside pods: After the DNS fix below, it resolves to the MinIO service ClusterIP + +> **Python version note:** If anaconda shadows system python, use `/usr/bin/python3 ./deploy.py` instead. + +> **Disk pressure note:** If disk usage is above ~85%, k3d nodes get `DiskPressure` taint and pods stay `Pending`. Free up disk space or add `--k3s-arg '--kubelet-arg=eviction-hard=imagefs.available<1%,nodefs.available<1%@server:*' --k3s-arg '--kubelet-arg=eviction-hard=imagefs.available<1%,nodefs.available<1%@agent:*'` to the `k3d cluster create` command. + +### 2. Fix DNS for pods (critical) + +By default, `host.k3d.internal` resolves to the Docker gateway IP inside pods, but host ports (8084 for MinIO, etc.) aren't accessible from that route. This causes preprocessing pods to fail with `ConnectionError` when trying to use pre-signed S3 URLs. + +Fix by overriding CoreDNS to point `host.k3d.internal` to the MinIO service ClusterIP: + +```sh +MINIO_IP=$(kubectl get svc loculus-minio-service -o jsonpath='{.spec.clusterIP}') +kubectl patch configmap coredns -n kube-system --type=json \ + -p="[{\"op\":\"replace\",\"path\":\"/data/NodeHosts\",\"value\":\"${MINIO_IP} host.k3d.internal\n$(kubectl get configmap coredns -n kube-system -o jsonpath='{.data.NodeHosts}' | grep -v host.k3d.internal)\"}]" +kubectl rollout restart deployment coredns -n kube-system +sleep 10 + +# Restart preprocessing pods to pick up DNS change +kubectl get deployments | grep preprocessing | awk '{print $1}' | xargs -I {} kubectl rollout restart deployment {} ``` -### 2. Install dependencies +### 3. Install dependencies ```sh # Install Node.js dependencies cd integration-tests && npm ci -# Install Python CLI -cd ../cli && uv sync && uv build && uv pip install --system dist/*.whl +# Install Python CLI (use the venv, not --system, to avoid permission issues) +cd ../cli && uv sync && uv build + +# Install keyring backend for CLI tests +uv pip install --python .venv/bin/python keyrings.alt -# Install keyring backend for CLI -uv pip install --system keyrings.alt +# Make the CLI available on PATH +mkdir -p ~/bin && ln -sf $(pwd)/.venv/bin/loculus ~/bin/loculus +export PATH="$HOME/bin:$PATH" -# Install Playwright browsers -cd ../integration-tests && npx playwright install --with-deps +# Install Playwright browsers (use --with-deps if you have sudo, otherwise just the browser) +cd ../integration-tests && npx playwright install chromium +# Or with system deps: npx playwright install --with-deps ``` -### 3. Wait for pods to be ready +### 4. Wait for pods to be ready ```sh # Wait for all pods to be ready (from repository root) @@ -72,7 +110,7 @@ cd ../integration-tests && npx playwright install --with-deps sleep 10 ``` -### 4. Run the tests +### 5. Run the tests ```sh cd integration-tests @@ -87,6 +125,10 @@ npx playwright test tests/specs/cli/auth.spec.ts --reporter=list BROWSER=firefox npx playwright test --workers=4 --reporter=list ``` +**Important:** Do NOT set `PLAYWRIGHT_TEST_BASE_URL=http://host.k3d.internal:3000` when running tests against local k3d. The default `localhost:3000` works correctly because: +- Ports are forwarded from k3d to localhost +- Keycloak is configured to allow redirects to `localhost:3000`, not `host.k3d.internal:3000` + #### Controlling Test Execution Test execution can be controlled using the `BROWSER` and `TEST_SUITE` environment variables. @@ -108,6 +150,28 @@ PLAYWRIGHT_TEST_BASE_URL=https://main.loculus.org npx playwright test --reporter PLAYWRIGHT_TEST_BASE_URL=https://preview-123.loculus.org npx playwright test --reporter=list ``` +## Troubleshooting + +**Preprocessing pods crash-loop with `ConnectionError` to `host.k3d.internal:8084`:** +- You need the DNS fix from step 2 above. The pre-signed S3 URLs contain `host.k3d.internal:8084` which isn't reachable from pods by default. + +**CLI tests fail with `No module named 'keyring'`:** +- Install `keyrings.alt` in the CLI venv: `uv pip install --python cli/.venv/bin/python keyrings.alt` + +**CLI tests fail with `loculus: command not found`:** +- Ensure the CLI is on PATH: `ln -sf $(pwd)/cli/.venv/bin/loculus ~/bin/loculus && export PATH="$HOME/bin:$PATH"` + +**`deploy.py` fails with "Python 3.9 or higher is required":** +- Anaconda Python is too old. Use: `/usr/bin/python3 ./deploy.py` + +**Pods stuck in `Pending` with `DiskPressure` taint:** +- Free disk space below 85% usage, or recreate cluster with relaxed eviction thresholds (see step 1 notes). + +**Cleanup:** +```sh +k3d cluster delete testCluster +``` + ## Checklist before committing code Run `npm run format` to ensure proper formatting and linting before committing. From b0d9fe0468a5e1e93d817960f4b42269810c2d7d Mon Sep 17 00:00:00 2001 From: Theo Date: Tue, 17 Feb 2026 16:53:46 +0000 Subject: [PATCH 2/6] demo(deployment): replace Helm with CDK8s for Kubernetes manifest generation This is a proof of concept demonstrating CDK8s (TypeScript) as a replacement for Helm chart templating. The CDK8s implementation generates functionally identical Kubernetes manifests, validated by passing 92/98 integration tests (the 6 failures are pre-existing issues unrelated to the migration). Co-Authored-By: Claude Opus 4.6 --- .github/workflows/integration-tests.yml | 22 +- deploy.py | 339 ++++----- kubernetes/cdk8s/cdk8s.yaml | 2 + kubernetes/cdk8s/package-lock.json | 427 +++++++++++ kubernetes/cdk8s/package.json | 29 + kubernetes/cdk8s/src/chart.ts | 55 ++ kubernetes/cdk8s/src/config-generation.ts | 709 ++++++++++++++++++ kubernetes/cdk8s/src/config-processor.ts | 53 ++ kubernetes/cdk8s/src/constructs/backend.ts | 196 +++++ kubernetes/cdk8s/src/constructs/database.ts | 88 +++ kubernetes/cdk8s/src/constructs/docs.ts | 79 ++ .../cdk8s/src/constructs/ena-submission.ts | 221 ++++++ kubernetes/cdk8s/src/constructs/ingest.ts | 251 +++++++ kubernetes/cdk8s/src/constructs/ingress.ts | 203 +++++ kubernetes/cdk8s/src/constructs/keycloak.ts | 493 ++++++++++++ kubernetes/cdk8s/src/constructs/lapis.ts | 228 ++++++ kubernetes/cdk8s/src/constructs/minio.ts | 116 +++ .../cdk8s/src/constructs/preprocessing.ts | 128 ++++ kubernetes/cdk8s/src/constructs/secrets.ts | 80 ++ kubernetes/cdk8s/src/constructs/silo.ts | 360 +++++++++ kubernetes/cdk8s/src/constructs/website.ts | 108 +++ kubernetes/cdk8s/src/docker-tag.ts | 12 + kubernetes/cdk8s/src/main.ts | 42 ++ kubernetes/cdk8s/src/organisms.ts | 135 ++++ kubernetes/cdk8s/src/resources.ts | 25 + kubernetes/cdk8s/src/urls.ts | 75 ++ kubernetes/cdk8s/src/values.ts | 355 +++++++++ kubernetes/cdk8s/tsconfig.json | 19 + 28 files changed, 4655 insertions(+), 195 deletions(-) create mode 100644 kubernetes/cdk8s/cdk8s.yaml create mode 100644 kubernetes/cdk8s/package-lock.json create mode 100644 kubernetes/cdk8s/package.json create mode 100644 kubernetes/cdk8s/src/chart.ts create mode 100644 kubernetes/cdk8s/src/config-generation.ts create mode 100644 kubernetes/cdk8s/src/config-processor.ts create mode 100644 kubernetes/cdk8s/src/constructs/backend.ts create mode 100644 kubernetes/cdk8s/src/constructs/database.ts create mode 100644 kubernetes/cdk8s/src/constructs/docs.ts create mode 100644 kubernetes/cdk8s/src/constructs/ena-submission.ts create mode 100644 kubernetes/cdk8s/src/constructs/ingest.ts create mode 100644 kubernetes/cdk8s/src/constructs/ingress.ts create mode 100644 kubernetes/cdk8s/src/constructs/keycloak.ts create mode 100644 kubernetes/cdk8s/src/constructs/lapis.ts create mode 100644 kubernetes/cdk8s/src/constructs/minio.ts create mode 100644 kubernetes/cdk8s/src/constructs/preprocessing.ts create mode 100644 kubernetes/cdk8s/src/constructs/secrets.ts create mode 100644 kubernetes/cdk8s/src/constructs/silo.ts create mode 100644 kubernetes/cdk8s/src/constructs/website.ts create mode 100644 kubernetes/cdk8s/src/docker-tag.ts create mode 100644 kubernetes/cdk8s/src/main.ts create mode 100644 kubernetes/cdk8s/src/organisms.ts create mode 100644 kubernetes/cdk8s/src/resources.ts create mode 100644 kubernetes/cdk8s/src/urls.ts create mode 100644 kubernetes/cdk8s/src/values.ts create mode 100644 kubernetes/cdk8s/tsconfig.json diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml index bb91acc1bd..a106afb666 100644 --- a/.github/workflows/integration-tests.yml +++ b/.github/workflows/integration-tests.yml @@ -74,10 +74,20 @@ jobs: - uses: azure/setup-helm@v4 with: version: v3.18.3 + - uses: actions/setup-node@v6 + with: + node-version-file: ./integration-tests/.nvmrc + - name: Cache .npm + uses: actions/cache@v5 + with: + path: ~/.npm + key: ${{ runner.os }}-node-${{ hashFiles('integration-tests/**/package-lock.json', 'kubernetes/cdk8s/package-lock.json') }} + - name: Install CDK8s dependencies + run: cd kubernetes/cdk8s && npm ci - name: Create k3d cluster run: | ./deploy.py --verbose cluster --bind-all - - name: Deploy with Helm + - name: Deploy with CDK8s run: | ./deploy.py --verbose helm \ --branch ${{ github.ref_name }} \ @@ -85,15 +95,7 @@ jobs: --for-e2e \ --enablePreprocessing \ --use-localhost-ip - - uses: actions/setup-node@v6 - with: - node-version-file: ./integration-tests/.nvmrc - - name: Cache .npm - uses: actions/cache@v5 - with: - path: ~/.npm - key: ${{ runner.os }}-node-${{ hashFiles('integration-tests/**/package-lock.json') }} - - name: Install dependencies + - name: Install integration test dependencies run: cd integration-tests && npm ci - name: Install Python uses: actions/setup-python@v6 diff --git a/deploy.py b/deploy.py index c0aab20f42..ab6e5cd09e 100755 --- a/deploy.py +++ b/deploy.py @@ -30,8 +30,8 @@ CLUSTER_NAME = "testCluster" -HELM_RELEASE_NAME = "preview" -HELM_CHART_DIR = ROOT_DIR / "kubernetes" / "loculus" +CDK8S_DIR = ROOT_DIR / "kubernetes" / "cdk8s" +HELM_CHART_DIR = ROOT_DIR / "kubernetes" / "loculus" # Still used for values files and secret generator WEBSITE_PORT_MAPPING = "-p 127.0.0.1:3000:30081@agent:0" BACKEND_PORT_MAPPING = "-p 127.0.0.1:8079:30082@agent:0" @@ -216,66 +216,84 @@ def cluster_exists(cluster_name): def handle_helm(): # noqa: C901 if args.uninstall: - run_command(["helm", "uninstall", HELM_RELEASE_NAME]) + # Delete all resources created by cdk8s + output_file = CDK8S_DIR / "dist" / "loculus.k8s.yaml" + if output_file.exists(): + run_command(["kubectl", "delete", "-f", str(output_file), "--ignore-not-found"]) + else: + print("No cdk8s output file found to uninstall.") return branch = args.branch or "latest" - parameters = [ - "helm", - "template" if args.template else "install", - HELM_RELEASE_NAME, - HELM_CHART_DIR, - ] + # Build the cdk8s CLI args + cdk8s_args = [] if args.values: for values_file in args.values: - parameters += ["-f", values_file] + cdk8s_args += ["--values", values_file] - parameters += [ - "--set", - "environment=local", - "--set", - f"branch={branch}", - ] + cdk8s_args += ["--set", "environment=local", "--set", f"branch={branch}"] if args.for_e2e or args.dev: - parameters += ["-f", HELM_CHART_DIR / "values_e2e_and_dev.yaml"] - parameters += ["--skip-schema-validation"] + cdk8s_args += ["--values", str(HELM_CHART_DIR / "values_e2e_and_dev.yaml")] if args.sha: - parameters += ["--set", f"sha={args.sha[:7]}"] + cdk8s_args += ["--set", f"sha={args.sha[:7]}"] if args.dev: - parameters += ["--set", "disableBackend=true"] - parameters += ["--set", "disableWebsite=true"] + cdk8s_args += ["--set", "disableBackend=true", "--set", "disableWebsite=true"] if not args.enablePreprocessing: - parameters += ["--set", "disablePreprocessing=true"] + cdk8s_args += ["--set", "disablePreprocessing=true"] if not args.enableIngest: - parameters += ["--set", "disableIngest=true"] + cdk8s_args += ["--set", "disableIngest=true"] if args.enableEnaSubmission: - parameters += ["--set", "disableEnaSubmission=false"] + cdk8s_args += ["--set", "disableEnaSubmission=false"] if args.use_localhost_ip: - parameters += ["--set", f"localHost={get_local_ip()}"] + cdk8s_args += ["--set", f"localHost={get_local_ip()}"] elif get_codespace_name(): - parameters += get_codespace_params(get_codespace_name()) + codespace_name = get_codespace_name() + public_runtime_config = { + "websiteUrl": f"https://{codespace_name}-3000.app.github.dev", + "backendUrl": f"https://{codespace_name}-8079.app.github.dev", + "lapisUrlTemplate": f"https://{codespace_name}-8080.app.github.dev/%organism%", + "keycloakUrl": f"https://{codespace_name}-8083.app.github.dev", + } + cdk8s_args += ["--set-json", f"public={json.dumps(public_runtime_config)}"] + + cdk8s_args += ["--base-dir", str(HELM_CHART_DIR)] + + # Synth YAML using cdk8s + synth_command = [ + "npx", "ts-node", "src/main.ts", + ] + cdk8s_args + + run_command(synth_command, cwd=str(CDK8S_DIR)) + + output_file = CDK8S_DIR / "dist" / "loculus.k8s.yaml" - output = run_command(parameters) if args.template: - print(output.stdout) + with open(output_file) as f: + print(f.read()) + return + + # Apply the generated YAML + run_command(["kubectl", "apply", "-f", str(output_file), "--server-side", "--force-conflicts"]) def handle_helm_upgrade(): - parameters = [ - "helm", - "upgrade", - HELM_RELEASE_NAME, - HELM_CHART_DIR, + # Re-synth and re-apply (cdk8s is declarative, so upgrade = synth + apply) + synth_command = [ + "npx", "ts-node", "src/main.ts", + "--base-dir", str(HELM_CHART_DIR), ] - run_command(parameters) + run_command(synth_command, cwd=str(CDK8S_DIR)) + + output_file = CDK8S_DIR / "dist" / "loculus.k8s.yaml" + run_command(["kubectl", "apply", "-f", str(output_file), "--server-side", "--force-conflicts"]) def get_local_ip_mac() -> str: @@ -328,86 +346,118 @@ def generate_configs(from_live, live_host, enable_ena, values_files=None): print(f"Unprocessed config available in temp dir: {temp_dir_path}") - helm_chart = str(HELM_CHART_DIR) codespace_name = get_codespace_name() output_dir = ROOT_DIR / "website" / "tests" / "config" - backend_config_path = temp_dir_path / "backend_config.json" - generate_config( - helm_chart, - "templates/loculus-backend-config.yaml", - backend_config_path, - codespace_name, - from_live, - live_host, - values_files=values_files, - ) + # Build cdk8s args for config generation + cdk8s_args = [] - website_config_path = temp_dir_path / "website_config.json" - generate_config( - helm_chart, - "templates/loculus-website-config.yaml", - website_config_path, - codespace_name, - from_live, - live_host, - values_files=values_files, - ) + if values_files: + for values_file in values_files: + cdk8s_args += ["--values", values_file] - runtime_config_path = temp_dir_path / "runtime_config.json" - generate_config( - helm_chart, - "templates/loculus-website-config.yaml", - runtime_config_path, - codespace_name, - from_live, - live_host, - values_files=values_files, - ) + cdk8s_args += ["--set", "disableWebsite=true", "--set", "disableBackend=true"] + + if from_live: + if live_host: + number_of_dots = live_host.count(".") + if number_of_dots < 2: + raise ValueError("Currently only subdomains are supported as live-hosts") + cdk8s_args += ["--set", "environment=server"] + cdk8s_args += ["--set", f"host={live_host}"] + cdk8s_args += ["--set", "usePublicRuntimeConfigAsServerSide=true"] + else: + cdk8s_args += ["--set", "environment=local"] + cdk8s_args += ["--set", "testconfig=true"] if enable_ena: - ena_submission_configmap_path = temp_dir_path / "config.yaml" - ena_submission_configout_path = temp_dir_path / "ena-submission-config.yaml" - generate_config( - helm_chart, - "templates/ena-submission-config.yaml", - ena_submission_configmap_path, - codespace_name, - from_live, - live_host, - ena_submission_configout_path, - values_files=values_files, - enableEnaSubmission=True, - ) + cdk8s_args += ["--set", "disableEnaSubmission=false"] - ingest_configmap_path = temp_dir_path / "config.yaml" - ingest_template_path = "templates/ingest-config.yaml" - ingest_configout_path = temp_dir_path / "ingest-config.yaml" - generate_config( - helm_chart, - ingest_template_path, - ingest_configmap_path, - codespace_name, - from_live, - live_host, - ingest_configout_path, - values_files=values_files, - ) + if codespace_name: + public_runtime_config = { + "websiteUrl": f"https://{codespace_name}-3000.app.github.dev", + "backendUrl": f"https://{codespace_name}-8079.app.github.dev", + "lapisUrlTemplate": f"https://{codespace_name}-8080.app.github.dev/%organism%", + "keycloakUrl": f"https://{codespace_name}-8083.app.github.dev", + } + cdk8s_args += ["--set-json", f"public={json.dumps(public_runtime_config)}"] - prepro_configmap_path = temp_dir_path / "preprocessing-config.yaml" - prepro_template_path = "templates/loculus-preprocessing-config.yaml" - prepro_configout_path = temp_dir_path / "preprocessing-config.yaml" - generate_config( - helm_chart, - prepro_template_path, - prepro_configmap_path, - codespace_name, - from_live, - live_host, - prepro_configout_path, - values_files=values_files, - ) + cdk8s_args += ["--base-dir", str(HELM_CHART_DIR)] + + # Synth all resources + synth_command = ["npx", "ts-node", "src/main.ts"] + cdk8s_args + run_command(synth_command, cwd=str(CDK8S_DIR)) + + if args.dry_run: + return + + # Parse the generated YAML and extract configs + output_file = CDK8S_DIR / "dist" / "loculus.k8s.yaml" + with open(output_file) as f: + all_docs = list(yaml.full_load_all(f.read())) + + # Extract ConfigMaps by name + configmaps = {} + for doc in all_docs: + if doc and doc.get("kind") == "ConfigMap": + name = doc.get("metadata", {}).get("name", "") + configmaps[name] = doc + + # Backend config + if "loculus-backend-config" in configmaps: + config_data = configmaps["loculus-backend-config"]["data"]["backend_config.json"] + backend_config_path = temp_dir_path / "backend_config.json" + with open(backend_config_path, "w") as f: + f.write(config_data) + print(f"Wrote config to {backend_config_path}") + + # Website config + if "loculus-website-config" in configmaps: + config_data = configmaps["loculus-website-config"]["data"]["website_config.json"] + website_config_path = temp_dir_path / "website_config.json" + with open(website_config_path, "w") as f: + f.write(config_data) + print(f"Wrote config to {website_config_path}") + + # Runtime config + if "loculus-website-config" in configmaps: + config_data = configmaps["loculus-website-config"]["data"]["runtime_config.json"] + runtime_config_path = temp_dir_path / "runtime_config.json" + with open(runtime_config_path, "w") as f: + f.write(config_data) + print(f"Wrote config to {runtime_config_path}") + + # ENA submission config + if enable_ena: + for name, doc in configmaps.items(): + if "ena-submission" in name and "config" in name: + if "config.yaml" in doc.get("data", {}): + config_data = doc["data"]["config.yaml"] + ena_path = temp_dir_path / "ena-submission-config.yaml" + with open(ena_path, "w") as f: + f.write(config_data) + print(f"Wrote config to {ena_path}") + + # Ingest configs (per-organism) + for name, doc in configmaps.items(): + if name.startswith("ingest-config-") and "config.yaml" in doc.get("data", {}): + config_data = yaml.safe_load(doc["data"]["config.yaml"]) + organism = config_data.get("organism", name.replace("ingest-config-", "")) + ingest_path = temp_dir_path / f"ingest-config.{organism}.yaml" + with open(ingest_path, "w") as f: + yaml.dump(config_data, f) + print(f"Wrote config to {ingest_path}") + + # Preprocessing configs (per-organism) + for name, doc in configmaps.items(): + if name.startswith("loculus-preprocessing-config-") and "preprocessing-config.yaml" in doc.get("data", {}): + config_data = yaml.safe_load(doc["data"]["preprocessing-config.yaml"]) + organism = config_data.get("organism", name.replace("loculus-preprocessing-config-", "")) + prepro_path = temp_dir_path / f"preprocessing-config.{organism}.yaml" + with open(prepro_path, "w") as f: + yaml.dump(config_data, f) + print(f"Wrote config to {prepro_path}") run_command( [ @@ -420,87 +470,6 @@ def generate_configs(from_live, live_host, enable_ena, values_files=None): print(f"Config generation succeeded, processed config files available in {output_dir}") -def generate_config( - helm_chart, - template, - configmap_path, - codespace_name=None, - from_live=False, - live_host=None, - output_path=None, - values_files=None, - enableEnaSubmission=False, -): - if from_live and live_host: - number_of_dots = live_host.count(".") - if number_of_dots < 2: # this is an imperfect hack - raise ValueError("Currently only subdomains are supported as live-hosts") - # To be able to cope with top level domains we need more logic to use the right subdomain separator - but we should probably avoid this anyway as we shouldn't use production domains - helm_template_cmd = [ - "helm", - "template", - "name-does-not-matter", - helm_chart, - "--show-only", - template, - ] - - if values_files: - for values_file in values_files: - helm_template_cmd.extend(["-f", values_file]) - - helm_template_cmd.append("--skip-schema-validation") - - if not output_path: - output_path = configmap_path - - if codespace_name: - helm_template_cmd.extend(get_codespace_params(codespace_name)) - - helm_template_cmd.extend(["--set", "disableWebsite=true"]) - helm_template_cmd.extend(["--set", "disableBackend=true"]) - if from_live: - helm_template_cmd.extend(["--set", "environment=server"]) - helm_template_cmd.extend(["--set", f"host={live_host}"]) - helm_template_cmd.extend(["--set", "usePublicRuntimeConfigAsServerSide=true"]) - else: - helm_template_cmd.extend(["--set", "environment=local"]) - helm_template_cmd.extend(["--set", "testconfig=true"]) - if enableEnaSubmission: - helm_template_cmd.extend(["--set", "disableEnaSubmission=false"]) - helm_output = run_command(helm_template_cmd, capture_output=True, text=True).stdout - if args.dry_run: - return - - parsed_yaml = list(yaml.full_load_all(helm_output)) - if len(parsed_yaml) == 1: - config_data = parsed_yaml[0]["data"][configmap_path.name] - - with open(output_path, "w") as f: - f.write(config_data) - - print(f"Wrote config to {output_path}") - elif any(substring in template for substring in ["ingest", "preprocessing"]): - for doc in parsed_yaml: - config_data = yaml.safe_load(doc["data"][configmap_path.name]) - with open(output_path.with_suffix(f".{config_data['organism']}.yaml"), "w") as f: - yaml.dump(config_data, f) - print(f"Wrote config to {f.name}") - - -def get_codespace_params(codespace_name): - public_runtime_config = { - "websiteUrl": f"https://{codespace_name}-3000.app.github.dev", - "backendUrl": f"https://{codespace_name}-8079.app.github.dev", - "lapisUrlTemplate": f"https://{codespace_name}-8080.app.github.dev/%organism%", - "keycloakUrl": f"https://{codespace_name}-8083.app.github.dev", - } - return [ - "--set-json", - f"public={json.dumps(public_runtime_config)}", - ] - - def install_secret_generator(): add_helm_repo_command = [ "helm", diff --git a/kubernetes/cdk8s/cdk8s.yaml b/kubernetes/cdk8s/cdk8s.yaml new file mode 100644 index 0000000000..7abd7ede78 --- /dev/null +++ b/kubernetes/cdk8s/cdk8s.yaml @@ -0,0 +1,2 @@ +language: typescript +app: npx ts-node src/main.ts diff --git a/kubernetes/cdk8s/package-lock.json b/kubernetes/cdk8s/package-lock.json new file mode 100644 index 0000000000..6f3be2f54d --- /dev/null +++ b/kubernetes/cdk8s/package-lock.json @@ -0,0 +1,427 @@ +{ + "name": "loculus-cdk8s", + "version": "1.0.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "loculus-cdk8s", + "version": "1.0.0", + "dependencies": { + "apache-md5": "^1.1.8", + "cdk8s": "^2.68.0", + "cdk8s-plus-29": "^2.6.0", + "constructs": "^10.3.0", + "js-yaml": "^4.1.0", + "minimist": "^1.2.8" + }, + "devDependencies": { + "@types/js-yaml": "^4.0.9", + "@types/minimist": "^1.2.5", + "@types/node": "^22.0.0", + "prettier": "3.8.1", + "ts-node": "^10.9.2", + "typescript": "^5.5.0" + } + }, + "node_modules/@cspotcode/source-map-support": { + "version": "0.8.1", + "resolved": "https://registry.npmjs.org/@cspotcode/source-map-support/-/source-map-support-0.8.1.tgz", + "integrity": "sha512-IchNf6dN4tHoMFIn/7OE8LWZ19Y6q/67Bmf6vnGREv8RSbBVb9LPJxEcnwrcwX6ixSvaiGoomAUvu4YSxXrVgw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/trace-mapping": "0.3.9" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.5.5", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz", + "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==", + "dev": true, + "license": "MIT" + }, + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.9", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.9.tgz", + "integrity": "sha512-3Belt6tdc8bPgAtbcmdtNJlirVoTmEb5e2gC94PnkwEW9jI6CAHUeoG85tjWP5WquqfavoMtMwiG4P926ZKKuQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/resolve-uri": "^3.0.3", + "@jridgewell/sourcemap-codec": "^1.4.10" + } + }, + "node_modules/@tsconfig/node10": { + "version": "1.0.12", + "resolved": "https://registry.npmjs.org/@tsconfig/node10/-/node10-1.0.12.tgz", + "integrity": "sha512-UCYBaeFvM11aU2y3YPZ//O5Rhj+xKyzy7mvcIoAjASbigy8mHMryP5cK7dgjlz2hWxh1g5pLw084E0a/wlUSFQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/@tsconfig/node12": { + "version": "1.0.11", + "resolved": "https://registry.npmjs.org/@tsconfig/node12/-/node12-1.0.11.tgz", + "integrity": "sha512-cqefuRsh12pWyGsIoBKJA9luFu3mRxCA+ORZvA4ktLSzIuCUtWVxGIuXigEwO5/ywWFMZ2QEGKWvkZG1zDMTag==", + "dev": true, + "license": "MIT" + }, + "node_modules/@tsconfig/node14": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@tsconfig/node14/-/node14-1.0.3.tgz", + "integrity": "sha512-ysT8mhdixWK6Hw3i1V2AeRqZ5WfXg1G43mqoYlM2nc6388Fq5jcXyr5mRsqViLx/GJYdoL0bfXD8nmF+Zn/Iow==", + "dev": true, + "license": "MIT" + }, + "node_modules/@tsconfig/node16": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/@tsconfig/node16/-/node16-1.0.4.tgz", + "integrity": "sha512-vxhUy4J8lyeyinH7Azl1pdd43GJhZH/tP2weN8TntQblOY+A0XbT8DJk1/oCPuOOyg/Ja757rG0CgHcWC8OfMA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/js-yaml": { + "version": "4.0.9", + "resolved": "https://registry.npmjs.org/@types/js-yaml/-/js-yaml-4.0.9.tgz", + "integrity": "sha512-k4MGaQl5TGo/iipqb2UDG2UwjXziSWkh0uysQelTlJpX1qGlpUZYm8PnO4DxG1qBomtJUdYJ6qR6xdIah10JLg==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/minimist": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/@types/minimist/-/minimist-1.2.5.tgz", + "integrity": "sha512-hov8bUuiLiyFPGyFPE1lwWhmzYbirOXQNNo40+y3zow8aFVTeyn3VWL0VFFfdNddA8S4Vf0Tc062rzyNr7Paag==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/node": { + "version": "22.19.11", + "resolved": "https://registry.npmjs.org/@types/node/-/node-22.19.11.tgz", + "integrity": "sha512-BH7YwL6rA93ReqeQS1c4bsPpcfOmJasG+Fkr6Y59q83f9M1WcBRHR2vM+P9eOisYRcN3ujQoiZY8uk5W+1WL8w==", + "dev": true, + "license": "MIT", + "dependencies": { + "undici-types": "~6.21.0" + } + }, + "node_modules/acorn": { + "version": "8.15.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.15.0.tgz", + "integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==", + "dev": true, + "license": "MIT", + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/acorn-walk": { + "version": "8.3.4", + "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.3.4.tgz", + "integrity": "sha512-ueEepnujpqee2o5aIYnvHU6C0A42MNdsIDeqy5BydrkuC5R1ZuUFnm27EeFJGoEHJQgn3uleRvmTXaJgfXbt4g==", + "dev": true, + "license": "MIT", + "dependencies": { + "acorn": "^8.11.0" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/apache-md5": { + "version": "1.1.8", + "resolved": "https://registry.npmjs.org/apache-md5/-/apache-md5-1.1.8.tgz", + "integrity": "sha512-FCAJojipPn0bXjuEpjOOOMN8FZDkxfWWp4JGN9mifU2IhxvKyXZYqpzPHdnTSUpmPDy+tsslB6Z1g+Vg6nVbYA==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/arg": { + "version": "4.1.3", + "resolved": "https://registry.npmjs.org/arg/-/arg-4.1.3.tgz", + "integrity": "sha512-58S9QDqG0Xx27YwPSt9fJxivjYl432YCwfDMfZ+71RAqUrZef7LrKQZ3LHLOwCS4FLNBplP533Zx895SeOCHvA==", + "dev": true, + "license": "MIT" + }, + "node_modules/argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", + "license": "Python-2.0" + }, + "node_modules/cdk8s": { + "version": "2.70.47", + "resolved": "https://registry.npmjs.org/cdk8s/-/cdk8s-2.70.47.tgz", + "integrity": "sha512-9T3+bq9qqEt9/MLufPxxuggYY5npqsRRlFzCmq/VzfiBPoCMt7CPVw/ksl7XldzsjJeSfMUMb3lXc94RwOLBmQ==", + "bundleDependencies": [ + "fast-json-patch", + "follow-redirects", + "yaml" + ], + "license": "Apache-2.0", + "dependencies": { + "fast-json-patch": "^3.1.1", + "follow-redirects": "^1.15.11", + "yaml": "2.8.2" + }, + "engines": { + "node": ">= 16.20.0" + }, + "peerDependencies": { + "constructs": "^10" + } + }, + "node_modules/cdk8s-plus-29": { + "version": "2.6.0", + "resolved": "https://registry.npmjs.org/cdk8s-plus-29/-/cdk8s-plus-29-2.6.0.tgz", + "integrity": "sha512-H8JuwF+6ZQCHrlPHstTwNGsQ0MRE9qpJDUKlrMDJAgEJSiBUa3Qeca609TuVN07/7Mxj2kFmVkvVjUeGZa0N9Q==", + "bundleDependencies": [ + "minimatch" + ], + "license": "Apache-2.0", + "dependencies": { + "minimatch": "^3.1.2" + }, + "engines": { + "node": ">= 16.20.0" + }, + "peerDependencies": { + "cdk8s": "^2.68.11", + "constructs": "^10.3.0" + } + }, + "node_modules/cdk8s-plus-29/node_modules/balanced-match": { + "version": "1.0.2", + "inBundle": true, + "license": "MIT" + }, + "node_modules/cdk8s-plus-29/node_modules/concat-map": { + "version": "0.0.1", + "inBundle": true, + "license": "MIT" + }, + "node_modules/cdk8s-plus-29/node_modules/minimatch": { + "version": "3.1.2", + "inBundle": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/cdk8s-plus-29/node_modules/minimatch/node_modules/brace-expansion": { + "version": "1.1.11", + "inBundle": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/cdk8s/node_modules/fast-json-patch": { + "version": "3.1.1", + "inBundle": true, + "license": "MIT" + }, + "node_modules/cdk8s/node_modules/follow-redirects": { + "version": "1.15.11", + "funding": [ + { + "type": "individual", + "url": "https://github.com/sponsors/RubenVerborgh" + } + ], + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=4.0" + }, + "peerDependenciesMeta": { + "debug": { + "optional": true + } + } + }, + "node_modules/cdk8s/node_modules/yaml": { + "version": "2.8.2", + "inBundle": true, + "license": "ISC", + "bin": { + "yaml": "bin.mjs" + }, + "engines": { + "node": ">= 14.6" + }, + "funding": { + "url": "https://github.com/sponsors/eemeli" + } + }, + "node_modules/constructs": { + "version": "10.5.0", + "resolved": "https://registry.npmjs.org/constructs/-/constructs-10.5.0.tgz", + "integrity": "sha512-zWjwqIgk4nAWmQGrPnDWv+M2Yly6m7ROyqmmQVLgJiHnWP762It22uWFaF2Pu/sSx0u8WsoUcvt0PZ4DQIQYwQ==", + "license": "Apache-2.0" + }, + "node_modules/create-require": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/create-require/-/create-require-1.1.1.tgz", + "integrity": "sha512-dcKFX3jn0MpIaXjisoRvexIJVEKzaq7z2rZKxf+MSr9TkdmHmsU4m2lcLojrj/FHl8mk5VxMmYA+ftRkP/3oKQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/diff": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/diff/-/diff-4.0.4.tgz", + "integrity": "sha512-X07nttJQkwkfKfvTPG/KSnE2OMdcUCao6+eXF3wmnIQRn2aPAHH3VxDbDOdegkd6JbPsXqShpvEOHfAT+nCNwQ==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.3.1" + } + }, + "node_modules/js-yaml": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz", + "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==", + "license": "MIT", + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/make-error": { + "version": "1.3.6", + "resolved": "https://registry.npmjs.org/make-error/-/make-error-1.3.6.tgz", + "integrity": "sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw==", + "dev": true, + "license": "ISC" + }, + "node_modules/minimist": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz", + "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/prettier": { + "version": "3.8.1", + "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.8.1.tgz", + "integrity": "sha512-UOnG6LftzbdaHZcKoPFtOcCKztrQ57WkHDeRD9t/PTQtmT0NHSeWWepj6pS0z/N7+08BHFDQVUrfmfMRcZwbMg==", + "dev": true, + "license": "MIT", + "bin": { + "prettier": "bin/prettier.cjs" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/prettier/prettier?sponsor=1" + } + }, + "node_modules/ts-node": { + "version": "10.9.2", + "resolved": "https://registry.npmjs.org/ts-node/-/ts-node-10.9.2.tgz", + "integrity": "sha512-f0FFpIdcHgn8zcPSbf1dRevwt047YMnaiJM3u2w2RewrB+fob/zePZcrOyQoLMMO7aBIddLcQIEK5dYjkLnGrQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@cspotcode/source-map-support": "^0.8.0", + "@tsconfig/node10": "^1.0.7", + "@tsconfig/node12": "^1.0.7", + "@tsconfig/node14": "^1.0.0", + "@tsconfig/node16": "^1.0.2", + "acorn": "^8.4.1", + "acorn-walk": "^8.1.1", + "arg": "^4.1.0", + "create-require": "^1.1.0", + "diff": "^4.0.1", + "make-error": "^1.1.1", + "v8-compile-cache-lib": "^3.0.1", + "yn": "3.1.1" + }, + "bin": { + "ts-node": "dist/bin.js", + "ts-node-cwd": "dist/bin-cwd.js", + "ts-node-esm": "dist/bin-esm.js", + "ts-node-script": "dist/bin-script.js", + "ts-node-transpile-only": "dist/bin-transpile.js", + "ts-script": "dist/bin-script-deprecated.js" + }, + "peerDependencies": { + "@swc/core": ">=1.2.50", + "@swc/wasm": ">=1.2.50", + "@types/node": "*", + "typescript": ">=2.7" + }, + "peerDependenciesMeta": { + "@swc/core": { + "optional": true + }, + "@swc/wasm": { + "optional": true + } + } + }, + "node_modules/typescript": { + "version": "5.9.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.9.3.tgz", + "integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==", + "dev": true, + "license": "Apache-2.0", + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/undici-types": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.21.0.tgz", + "integrity": "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/v8-compile-cache-lib": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/v8-compile-cache-lib/-/v8-compile-cache-lib-3.0.1.tgz", + "integrity": "sha512-wa7YjyUGfNZngI/vtK0UHAN+lgDCxBPCylVXGp0zu59Fz5aiGtNXaq3DhIov063MorB+VfufLh3JlF2KdTK3xg==", + "dev": true, + "license": "MIT" + }, + "node_modules/yn": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/yn/-/yn-3.1.1.tgz", + "integrity": "sha512-Ux4ygGWsu2c7isFWe8Yu1YluJmqVhxqK2cLXNQA5AcC3QfbGNpM7fu0Y8b/z16pXLnFxZYvWhd3fhBY9DLmC6Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + } + } +} diff --git a/kubernetes/cdk8s/package.json b/kubernetes/cdk8s/package.json new file mode 100644 index 0000000000..75097d05e5 --- /dev/null +++ b/kubernetes/cdk8s/package.json @@ -0,0 +1,29 @@ +{ + "name": "loculus-cdk8s", + "version": "1.0.0", + "description": "CDK8s deployment for Loculus", + "main": "src/main.ts", + "scripts": { + "build": "tsc", + "synth": "npx ts-node src/main.ts", + "format": "prettier --write \"src/**/*.ts\"", + "check-format": "prettier --check \"src/**/*.ts\"", + "test": "echo \"No tests yet\"" + }, + "dependencies": { + "cdk8s": "^2.68.0", + "cdk8s-plus-29": "^2.6.0", + "constructs": "^10.3.0", + "js-yaml": "^4.1.0", + "apache-md5": "^1.1.8", + "minimist": "^1.2.8" + }, + "devDependencies": { + "@types/js-yaml": "^4.0.9", + "@types/minimist": "^1.2.5", + "@types/node": "^22.0.0", + "prettier": "3.8.1", + "ts-node": "^10.9.2", + "typescript": "^5.5.0" + } +} diff --git a/kubernetes/cdk8s/src/chart.ts b/kubernetes/cdk8s/src/chart.ts new file mode 100644 index 0000000000..f631498878 --- /dev/null +++ b/kubernetes/cdk8s/src/chart.ts @@ -0,0 +1,55 @@ +import { Chart } from 'cdk8s'; +import { Construct } from 'constructs'; +import { LoculusValues } from './values'; +import { getEnabledOrganisms } from './organisms'; + +import { Secrets } from './constructs/secrets'; +import { Database } from './constructs/database'; +import { Minio } from './constructs/minio'; +import { Keycloak } from './constructs/keycloak'; +import { Backend } from './constructs/backend'; +import { Website } from './constructs/website'; +import { Silo } from './constructs/silo'; +import { Lapis, LapisIngress } from './constructs/lapis'; +import { Preprocessing } from './constructs/preprocessing'; +import { Ingest } from './constructs/ingest'; +import { EnaSubmission } from './constructs/ena-submission'; +import { Docs } from './constructs/docs'; +import { MainIngress } from './constructs/ingress'; + +export class LoculusChart extends Chart { + constructor(scope: Construct, id: string, values: LoculusValues) { + super(scope, id); + + // Secrets + new Secrets(this, 'secrets', values); + + // Core infrastructure + new Database(this, 'database', values); + new Minio(this, 'minio', values); + new Keycloak(this, 'keycloak', values); + + // Main services + new Backend(this, 'backend', values); + new Website(this, 'website', values); + + // Per-organism services + const organisms = getEnabledOrganisms(values); + for (const org of organisms) { + new Silo(this, `silo-${org.key}`, values, org); + new Lapis(this, `lapis-${org.key}`, values, org); + new Preprocessing(this, `prepro-${org.key}`, values, org); + new Ingest(this, `ingest-${org.key}`, values, org); + } + + // LAPIS shared ingress (always created - handles local and server) + new LapisIngress(this, 'lapis-ingress', values); + + // Optional services + new EnaSubmission(this, 'ena-submission', values); + new Docs(this, 'docs', values); + + // Server-only ingress + new MainIngress(this, 'main-ingress', values); + } +} diff --git a/kubernetes/cdk8s/src/config-generation.ts b/kubernetes/cdk8s/src/config-generation.ts new file mode 100644 index 0000000000..578ade644f --- /dev/null +++ b/kubernetes/cdk8s/src/config-generation.ts @@ -0,0 +1,709 @@ +/** + * Config generation functions. + * Replaces _common-metadata.tpl and related helpers. + * Generates the JSON content for backend-config, website-config, etc. + */ + +import { LoculusValues, MetadataField, OrganismConfig, ReferenceGenomeSegment } from './values'; +import { + getEnabledOrganisms, + patchMetadataSchema, + getNucleotideSegmentNames, + isSegmented, + mergeReferenceGenomes, + lineageSystemForOrganism, + flattenPreprocessingVersions, +} from './organisms'; +import { + backendUrl, + websiteUrl, + keycloakUrl, + lapisUrlTemplate, + generateExternalLapisUrls, + generateInternalLapisUrls, +} from './urls'; + +/** Common metadata fields required for all organisms */ +function commonMetadataFields(values: LoculusValues): MetadataField[] { + const fields: MetadataField[] = [ + { + name: 'accessionVersion', + type: 'string', + notSearchable: true, + hideOnSequenceDetailsPage: true, + includeInDownloadsByDefault: true, + }, + { name: 'accession', type: 'string', notSearchable: true, hideOnSequenceDetailsPage: true }, + { name: 'version', type: 'int', hideOnSequenceDetailsPage: true }, + { + name: 'submissionId', + displayName: 'Submission ID', + type: 'string', + header: 'Submission details', + orderOnDetailsPage: 5000, + enableSubstringSearch: true, + includeInDownloadsByDefault: true, + }, + { + name: 'isRevocation', + displayName: 'Is revocation', + type: 'boolean', + autocomplete: true, + hideOnSequenceDetailsPage: true, + }, + { + name: 'submitter', + type: 'string', + generateIndex: true, + autocomplete: true, + hideOnSequenceDetailsPage: true, + header: 'Submission details', + orderOnDetailsPage: 5010, + }, + { + name: 'groupName', + type: 'string', + generateIndex: true, + autocomplete: true, + header: 'Submission details', + displayName: 'Submitting group', + includeInDownloadsByDefault: true, + orderOnDetailsPage: 5020, + customDisplay: { type: 'submittingGroup', displayGroup: 'group' }, + }, + { + name: 'groupId', + type: 'int', + autocomplete: true, + header: 'Submission details', + displayName: 'Submitting group (numeric ID)', + orderOnDetailsPage: 5030, + customDisplay: { type: 'submittingGroup', displayGroup: 'group' }, + }, + { + name: 'submittedAtTimestamp', + type: 'timestamp', + displayName: 'Date submitted', + header: 'Submission details', + orderOnDetailsPage: 5040, + }, + { + name: 'submittedDate', + type: 'string', + hideOnSequenceDetailsPage: true, + generateIndex: true, + autocomplete: true, + displayName: 'Date submitted (exact)', + orderOnDetailsPage: 5050, + }, + { + name: 'releasedAtTimestamp', + type: 'timestamp', + displayName: 'Date released', + header: 'Submission details', + columnWidth: 100, + orderOnDetailsPage: 5060, + }, + { + name: 'releasedDate', + type: 'string', + hideOnSequenceDetailsPage: true, + generateIndex: true, + autocomplete: true, + displayName: 'Date released (exact)', + columnWidth: 100, + orderOnDetailsPage: 5070, + }, + ]; + + if (values.dataUseTerms.enabled) { + fields.push( + { + name: 'dataUseTerms', + type: 'string', + generateIndex: true, + autocomplete: true, + displayName: 'Data use terms', + initiallyVisible: true, + includeInDownloadsByDefault: true, + customDisplay: { type: 'dataUseTerms' }, + header: 'Data use terms', + orderOnDetailsPage: 610, + }, + { + name: 'dataUseTermsRestrictedUntil', + type: 'date', + displayName: 'Data use terms restricted until', + hideOnSequenceDetailsPage: true, + header: 'Data use terms', + orderOnDetailsPage: 620, + }, + { + name: 'dataBecameOpenAt', + type: 'date', + displayName: 'Date data became open', + hideOnSequenceDetailsPage: true, + header: 'Data use terms', + orderOnDetailsPage: 625, + }, + ); + if (values.dataUseTerms.urls) { + fields.push({ + name: 'dataUseTermsUrl', + displayName: 'Data use terms URL', + type: 'string', + notSearchable: true, + header: 'Data use terms', + includeInDownloadsByDefault: true, + customDisplay: { type: 'link', url: '__value__' }, + orderOnDetailsPage: 630, + }); + } + } + + fields.push( + { + name: 'versionStatus', + displayName: 'Version status', + type: 'string', + autocomplete: true, + hideOnSequenceDetailsPage: true, + }, + { + name: 'versionComment', + type: 'string', + displayName: 'Version comment', + header: 'Submission details', + orderOnDetailsPage: 5000, + }, + { name: 'pipelineVersion', type: 'int', notSearchable: true, hideOnSequenceDetailsPage: true }, + ); + + return fields; +} + +/** Generate standard website metadata entry */ +function standardWebsiteMetadata(field: MetadataField): any { + const entry: any = { + type: field.type || 'string', + }; + if (field.autocomplete) entry.autocomplete = field.autocomplete; + if (field.enableSubstringSearch) entry.substringSearch = field.enableSubstringSearch; + if (field.notSearchable) entry.notSearchable = field.notSearchable; + if (field.initiallyVisible) entry.initiallyVisible = field.initiallyVisible; + if (field.hideInSearchResultsTable) entry.hideInSearchResultsTable = field.hideInSearchResultsTable; + if (field.type === 'timestamp' || field.type === 'date' || field.rangeSearch) entry.rangeSearch = true; + if (field.rangeOverlapSearch) entry.rangeOverlapSearch = field.rangeOverlapSearch; + if (field.lineageSystem) entry.lineageSearch = true; + if (field.hideOnSequenceDetailsPage) entry.hideOnSequenceDetailsPage = field.hideOnSequenceDetailsPage; + if (field.columnWidth) entry.columnWidth = field.columnWidth; + if (field.order) entry.order = field.order; + if (field.orderOnDetailsPage) entry.orderOnDetailsPage = field.orderOnDetailsPage; + if (field.includeInDownloadsByDefault) entry.includeInDownloadsByDefault = field.includeInDownloadsByDefault; + if (field.onlyForReference) entry.onlyForReference = field.onlyForReference; + if (field.customDisplay) { + entry.customDisplay = { type: field.customDisplay.type }; + if (field.customDisplay.url) entry.customDisplay.url = field.customDisplay.url; + if (field.customDisplay.linkMenuItems) entry.customDisplay.linkMenuItems = field.customDisplay.linkMenuItems; + if (field.customDisplay.displayGroup) entry.customDisplay.displayGroup = field.customDisplay.displayGroup; + if (field.customDisplay.label) entry.customDisplay.label = field.customDisplay.label; + if (field.customDisplay.html) entry.customDisplay.html = field.customDisplay.html; + } + return entry; +} + +/** Generate website metadata from metadata array + reference genomes */ +function generateWebsiteMetadata(metadata: MetadataField[], referenceGenomes: ReferenceGenomeSegment[]): any[] { + const segments = getNucleotideSegmentNames(referenceGenomes); + const segmented = isSegmented(referenceGenomes); + const fields: any[] = []; + + for (const field of metadata) { + if (segmented && field.perSegment) { + for (const segment of segments) { + const entry = standardWebsiteMetadata(field); + entry.name = `${field.name}_${segment}`; + if (field.displayName) entry.displayName = `${field.displayName} ${segment}`; + if (field.oneHeader) { + entry.header = field.header || 'Other'; + } else { + entry.header = `${field.header || 'Other'} ${segment}`; + } + if (field.customDisplay?.displayGroup) { + entry.customDisplay = { + type: field.customDisplay.type, + displayGroup: `${field.customDisplay.displayGroup}_${segment}`, + }; + if (field.customDisplay.label) { + entry.customDisplay.label = `${field.customDisplay.label} ${segment}`; + } + } + fields.push(entry); + } + } else { + const entry = standardWebsiteMetadata(field); + entry.name = field.name; + if (field.displayName) entry.displayName = field.displayName; + entry.header = field.header || 'Other'; + fields.push(entry); + } + } + + return fields; +} + +/** Generate backend metadata */ +function generateBackendMetadata(metadata: MetadataField[], referenceGenomes: ReferenceGenomeSegment[]): any[] { + const segments = getNucleotideSegmentNames(referenceGenomes); + const segmented = isSegmented(referenceGenomes); + const fields: any[] = []; + + for (const field of metadata) { + if (segmented && field.perSegment) { + for (const segment of segments) { + fields.push({ name: `${field.name}_${segment}`, type: field.type || 'string' }); + } + } else { + fields.push({ name: field.name, type: field.type || 'string' }); + } + } + fields.push({ name: 'versionComment', type: 'string' }); + return fields; +} + +/** Generate backend external metadata (INSDC header fields) */ +function generateBackendExternalMetadata(metadata: MetadataField[], referenceGenomes: ReferenceGenomeSegment[]): any[] { + const segments = getNucleotideSegmentNames(referenceGenomes); + const segmented = isSegmented(referenceGenomes); + const fields: any[] = []; + + for (const field of metadata) { + if (field.header === 'INSDC') { + if (segmented && field.perSegment) { + for (const segment of segments) { + const entry: any = { + name: `${field.name}_${segment}`, + type: field.type || 'string', + externalMetadataUpdater: 'ena', + }; + if (field.required) entry.required = field.required; + fields.push(entry); + } + } else { + const entry: any = { name: field.name, type: field.type || 'string', externalMetadataUpdater: 'ena' }; + if (field.required) entry.required = field.required; + fields.push(entry); + } + } + } + return fields; +} + +/** Generate submission data types config */ +function submissionDataTypes(schema: any): any { + const result: any = {}; + if (schema.submissionDataTypes) { + result.consensusSequences = + schema.submissionDataTypes.consensusSequences !== undefined + ? schema.submissionDataTypes.consensusSequences + : true; + if (schema.submissionDataTypes.maxSequencesPerEntry !== undefined) { + result.maxSequencesPerEntry = schema.submissionDataTypes.maxSequencesPerEntry; + } + if (schema.submissionDataTypes.files !== undefined) { + result.files = schema.submissionDataTypes.files; + } + } else { + result.consensusSequences = true; + } + return result; +} + +/** Generate input fields (filtering out noInput and ordering with extraInputFields) */ +function generateInputFields(schema: any): any[] { + const metadata = schema.metadata || []; + const extraFields = schema.extraInputFields || []; + const TO_KEEP = [ + 'name', + 'displayName', + 'definition', + 'guidance', + 'example', + 'required', + 'noEdit', + 'desired', + 'options', + ]; + + const orderedFields: any[] = []; + + // Add fields with position "first" + for (const field of extraFields) { + if (field.position === 'first') orderedFields.push(field); + } + + // Add filtered metadata fields (exclude noInput) + for (const field of metadata) { + if (!field.noInput) orderedFields.push(field); + } + + // Add fields with position "last" + for (const field of extraFields) { + if (field.position === 'last') orderedFields.push(field); + } + + // Filter to only keep allowed keys + return orderedFields.map((field) => { + const filtered: any = {}; + for (const key of TO_KEEP) { + if (key in field) filtered[key] = field[key]; + } + return filtered; + }); +} + +/** Generate the full website config JSON */ +export function generateWebsiteConfig(values: LoculusValues): any { + const common = commonMetadataFields(values); + const config: any = { + name: values.name, + logo: values.logo, + }; + + if (values.sequenceFlagging) config.sequenceFlagging = values.sequenceFlagging; + if (values.gitHubMainUrl) config.gitHubMainUrl = values.gitHubMainUrl; + if (values.bannerMessageURL) config.bannerMessageURL = values.bannerMessageURL; + if (values.bannerMessage) { + config.bannerMessage = values.bannerMessage; + } else if (values.runDevelopmentMainDatabase || values.runDevelopmentKeycloakDatabase) { + config.bannerMessage = 'Warning: Development or Keycloak main database is enabled. Development environment only.'; + } + if (values.submissionBannerMessageURL) config.submissionBannerMessageURL = values.submissionBannerMessageURL; + if (values.submissionBannerMessage) config.submissionBannerMessage = values.submissionBannerMessage; + if (values.gitHubEditLink) config.gitHubEditLink = values.gitHubEditLink; + if (values.welcomeMessageHTML) config.welcomeMessageHTML = values.welcomeMessageHTML; + if (values.additionalHeadHTML) config.additionalHeadHTML = values.additionalHeadHTML; + + config.enableLoginNavigationItem = values.website.websiteConfig.enableLoginNavigationItem; + config.enableSubmissionNavigationItem = values.website.websiteConfig.enableSubmissionNavigationItem; + config.enableSubmissionPages = values.website.websiteConfig.enableSubmissionPages; + config.enableSeqSets = values.seqSets.enabled; + if (values.seqSets.fieldsToDisplay) config.seqSetsFieldsToDisplay = values.seqSets.fieldsToDisplay; + config.enableDataUseTerms = values.dataUseTerms.enabled; + config.accessionPrefix = values.accessionPrefix; + + config.organisms = {}; + for (const org of getEnabledOrganisms(values)) { + const instance = org.contents; + const patchedSchema = patchMetadataSchema(instance.schema); + const allMetadata = [...common, ...patchedSchema.metadata]; + const websiteMetadata = generateWebsiteMetadata(allMetadata, instance.referenceGenomes); + + const organismConfig: any = { + schema: { + organismName: patchedSchema.organismName, + }, + }; + + if (patchedSchema.linkOuts) { + organismConfig.schema.linkOuts = patchedSchema.linkOuts.map((lo: any) => { + const entry: any = { name: lo.name, url: lo.url }; + if (lo.maxNumberOfRecommendedEntries) entry.maxNumberOfRecommendedEntries = lo.maxNumberOfRecommendedEntries; + return entry; + }); + } + + organismConfig.schema.loadSequencesAutomatically = patchedSchema.loadSequencesAutomatically || false; + if (patchedSchema.richFastaHeaderFields) + organismConfig.schema.richFastaHeaderFields = patchedSchema.richFastaHeaderFields; + + organismConfig.schema.submissionDataTypes = submissionDataTypes(patchedSchema); + + if (patchedSchema.image) organismConfig.schema.image = patchedSchema.image; + if (patchedSchema.description) organismConfig.schema.description = patchedSchema.description; + + organismConfig.schema.primaryKey = 'accessionVersion'; + + const inputFields = generateInputFields(patchedSchema); + inputFields.push({ + name: 'versionComment', + displayName: 'Version comment', + definition: 'Reason for revising sequences or other general comments concerning a specific version', + example: + 'Fixed an issue in previous version where low-coverage nucleotides were erroneously filled with reference sequence', + desired: true, + }); + organismConfig.schema.inputFields = inputFields; + + if (patchedSchema.files) organismConfig.schema.files = patchedSchema.files; + + // Website metadata + file entries + const metadataEntries = [...websiteMetadata]; + if (patchedSchema.files) { + for (const file of patchedSchema.files) { + metadataEntries.push({ + name: file.name, + type: 'string', + header: 'Files', + noInput: true, + customDisplay: { type: 'fileList' }, + }); + } + } + organismConfig.schema.metadata = metadataEntries; + + if (patchedSchema.metadataTemplate) organismConfig.schema.metadataTemplate = patchedSchema.metadataTemplate; + + // Merge website-specific config from schema + if (patchedSchema.website) { + Object.assign(organismConfig.schema, patchedSchema.website); + } + + organismConfig.referenceGenomes = instance.referenceGenomes; + config.organisms[org.key] = organismConfig; + } + + return config; +} + +/** Generate the full backend config JSON */ +export function generateBackendConfig(values: LoculusValues): any { + const config: any = { + accessionPrefix: values.accessionPrefix, + zstdCompressionLevel: values.zstdCompressionLevel, + pipelineVersionUpgradeCheckIntervalSeconds: values.pipelineVersionUpgradeCheckIntervalSeconds, + name: values.name, + dataUseTerms: values.dataUseTerms, + }; + + if (values.fileSharing) config.fileSharing = values.fileSharing; + + config.websiteUrl = websiteUrl(values); + config.backendUrl = backendUrl(values); + + config.organisms = {}; + for (const org of getEnabledOrganisms(values)) { + const instance = org.contents; + const patchedSchema = patchMetadataSchema(instance.schema); + // Backend config uses ONLY organism-specific metadata (NOT common metadata fields) + const backendMeta = generateBackendMetadata(patchedSchema.metadata, instance.referenceGenomes); + const externalMeta = generateBackendExternalMetadata(patchedSchema.metadata, instance.referenceGenomes); + + const organismConfig: any = { + schema: { + organismName: patchedSchema.organismName, + submissionDataTypes: submissionDataTypes(patchedSchema), + metadata: backendMeta, + externalMetadata: externalMeta.length > 0 ? externalMeta : [], + earliestReleaseDate: patchedSchema.earliestReleaseDate || { enabled: false, externalFields: [] }, + }, + referenceGenome: mergeReferenceGenomes(instance.referenceGenomes), + }; + + if (patchedSchema.files) organismConfig.schema.files = patchedSchema.files; + + config.organisms[org.key] = organismConfig; + } + + return config; +} + +/** Generate the public runtime config section */ +export function generatePublicRuntimeConfig(values: LoculusValues): any { + return { + backendUrl: backendUrl(values), + lapisUrls: generateExternalLapisUrls(values), + keycloakUrl: keycloakUrl(values), + }; +} + +/** Generate runtime config for website */ +export function generateRuntimeConfig(values: LoculusValues): any { + const config: any = { + name: values.name, + insecureCookies: values.insecureCookies || false, + serverSide: {} as any, + public: generatePublicRuntimeConfig(values), + backendKeycloakClientSecret: '[[backendKeycloakClientSecret]]', + }; + + if (values.usePublicRuntimeConfigAsServerSide) { + config.serverSide = generatePublicRuntimeConfig(values); + } else { + let serverBackendUrl: string; + if (values.disableBackend) { + serverBackendUrl = `http://${values.localHost}:8079`; + } else { + serverBackendUrl = 'http://loculus-backend-service:8079'; + } + + let serverKeycloakUrl: string; + if (!values.disableWebsite) { + serverKeycloakUrl = 'http://loculus-keycloak-service:8083'; + } else { + serverKeycloakUrl = `http://${values.localHost}:8083`; + } + + config.serverSide = { + backendUrl: serverBackendUrl, + lapisUrls: generateInternalLapisUrls(values), + keycloakUrl: serverKeycloakUrl, + }; + } + + return config; +} + +/** Generate SILO database config for an organism */ +export function generateSiloDatabaseConfig( + schema: any, + commonMeta: MetadataField[], + referenceGenomes: ReferenceGenomeSegment[], +): any { + const segments = getNucleotideSegmentNames(referenceGenomes); + const segmented = isSegmented(referenceGenomes); + const allMetadata = [...commonMeta, ...schema.metadata]; + + const metadata: any[] = []; + for (const field of allMetadata) { + if (segmented && field.perSegment) { + for (const segment of segments) { + metadata.push(siloMetadataEntry(field, `${field.name}_${segment}`)); + } + } else { + metadata.push(siloMetadataEntry(field, field.name)); + } + } + + // Add file fields + if (schema.files) { + for (const file of schema.files) { + metadata.push({ type: 'string', name: file.name }); + } + } + + return { + schema: { + instanceName: schema.organismName, + opennessLevel: 'OPEN', + metadata, + primaryKey: 'accessionVersion', + features: [{ name: 'generalizedAdvancedQuery' }], + }, + }; +} + +function siloMetadataEntry(field: MetadataField, name: string): any { + const type = field.type || 'string'; + const siloType = type === 'timestamp' ? 'int' : type === 'authors' ? 'string' : type; + const entry: any = { type: siloType, name }; + if (field.generateIndex) entry.generateIndex = field.generateIndex; + if (field.lineageSystem) { + entry.generateIndex = true; + entry.generateLineageIndex = 'lineage_definitions'; + } + return entry; +} + +/** Generate preprocessing specs for an organism */ +export function generatePreprocessingSpecs( + metadata: MetadataField[], + referenceGenomes: ReferenceGenomeSegment[], +): Record { + const segments = getNucleotideSegmentNames(referenceGenomes); + const segmented = isSegmented(referenceGenomes); + const specs: Record = {}; + + for (const field of metadata) { + if (segmented && field.perSegment) { + for (const segment of segments) { + const key = `${field.name}_${segment}`; + specs[key] = sharedPreproSpec(field, segment); + } + } else { + specs[field.name] = sharedPreproSpec(field, ''); + } + } + + return specs; +} + +function sharedPreproSpec(field: MetadataField, segment: string): any { + const spec: any = {}; + + if (field.preprocessing) { + spec.function = field.preprocessing.function || 'identity'; + if (field.preprocessing.inputs) { + spec.inputs = { ...field.preprocessing.inputs }; + } + const args: any = {}; + if (segment) args.segment = segment; + if (field.type) args.type = field.type; + if (field.options) { + args.options = field.options.map((o: any) => o.name); + } + if (field.preprocessing.args) { + Object.assign(args, field.preprocessing.args); + } + spec.args = Object.keys(args).length > 0 ? args : null; + } else { + spec.function = 'identity'; + spec.inputs = { + input: segment ? `${field.name}_${segment}` : field.name, + }; + const args: any = {}; + if (segment) args.segment = segment; + if (field.type) args.type = field.type; + spec.args = Object.keys(args).length > 0 ? args : null; + } + + if (field.required) spec.required = true; + return spec; +} + +/** Generate ENA submission config */ +export function generateENASubmissionConfig(values: LoculusValues): any { + const enaOrganisms: Record = {}; + + for (const org of getEnabledOrganisms(values)) { + const instance = org.contents; + if (!instance.enaDeposition) continue; + + for (const [suborganismName, configFileRaw] of Object.entries(instance.enaDeposition)) { + const configFile = configFileRaw as any; + const patchedSchema = patchMetadataSchema(instance.schema); + const segments = getNucleotideSegmentNames(instance.referenceGenomes); + const externalMeta = generateBackendExternalMetadata(patchedSchema.metadata, instance.referenceGenomes); + + const entry: any = {}; + if (suborganismName !== 'singleReference') { + entry.loculusOrganism = org.key; + } + if (configFile.configFile) { + Object.assign(entry, configFile.configFile); + } + if (configFile.referenceIdentifierField) { + entry.referenceIdentifierField = configFile.referenceIdentifierField; + } + entry.organismName = patchedSchema.organismName; + entry.segments = segments; + entry.externalMetadata = externalMeta.length > 0 ? externalMeta : []; + + const key = suborganismName === 'singleReference' ? org.key : suborganismName; + enaOrganisms[key] = entry; + } + } + + return enaOrganisms; +} + +/** Generate ingest rename mapping */ +export function generateIngestRename(metadata: MetadataField[]): Record { + const rename: Record = {}; + for (const field of metadata) { + if (field.ingest) { + rename[field.ingest] = field.name; + } + } + return rename; +} diff --git a/kubernetes/cdk8s/src/config-processor.ts b/kubernetes/cdk8s/src/config-processor.ts new file mode 100644 index 0000000000..bba1981295 --- /dev/null +++ b/kubernetes/cdk8s/src/config-processor.ts @@ -0,0 +1,53 @@ +/** + * Generates the config-processor init container spec and associated volumes. + * Replaces _config-processor.tpl and _configVolume.tpl from Helm. + */ + +export function configProcessorContainer(name: string, dockerTag: string, imagePullPolicy: string): any { + return { + name: `config-processor-${name}`, + image: `ghcr.io/loculus-project/config-processor:${dockerTag}`, + imagePullPolicy, + volumeMounts: [ + { name, mountPath: '/input' }, + { name: `${name}-processed`, mountPath: '/output' }, + ], + command: ['python3'], + args: ['/app/config-processor.py', '/input', '/output'], + resources: { + requests: { cpu: '50m', memory: '64Mi' }, + limits: { cpu: '500m', memory: '256Mi' }, + }, + env: [ + { name: 'LOCULUSSUB_smtpPassword', valueFrom: { secretKeyRef: { name: 'smtp-password', key: 'secretKey' } } }, + { + name: 'LOCULUSSUB_insdcIngestUserPassword', + valueFrom: { secretKeyRef: { name: 'service-accounts', key: 'insdcIngestUserPassword' } }, + }, + { + name: 'LOCULUSSUB_preprocessingPipelinePassword', + valueFrom: { secretKeyRef: { name: 'service-accounts', key: 'preprocessingPipelinePassword' } }, + }, + { + name: 'LOCULUSSUB_externalMetadataUpdaterPassword', + valueFrom: { secretKeyRef: { name: 'service-accounts', key: 'externalMetadataUpdaterPassword' } }, + }, + { + name: 'LOCULUSSUB_backendUserPassword', + valueFrom: { secretKeyRef: { name: 'service-accounts', key: 'backendUserPassword' } }, + }, + { + name: 'LOCULUSSUB_backendKeycloakClientSecret', + valueFrom: { secretKeyRef: { name: 'backend-keycloak-client-secret', key: 'backendKeycloakClientSecret' } }, + }, + { name: 'LOCULUSSUB_orcidSecret', valueFrom: { secretKeyRef: { name: 'orcid', key: 'orcidSecret' } } }, + ], + }; +} + +export function configVolumes(name: string, configMapName?: string): any[] { + return [ + { name, configMap: { name: configMapName || name } }, + { name: `${name}-processed`, emptyDir: {} }, + ]; +} diff --git a/kubernetes/cdk8s/src/constructs/backend.ts b/kubernetes/cdk8s/src/constructs/backend.ts new file mode 100644 index 0000000000..e7a4f38dec --- /dev/null +++ b/kubernetes/cdk8s/src/constructs/backend.ts @@ -0,0 +1,196 @@ +import { Construct } from 'constructs'; +import { ApiObject } from 'cdk8s'; +import { LoculusValues } from '../values'; +import { dockerTag } from '../docker-tag'; +import { configProcessorContainer, configVolumes } from '../config-processor'; +import { getResources, serviceType, priorityClassName } from '../resources'; +import { s3Url, s3UrlInternal } from '../urls'; +import { generateBackendConfig } from '../config-generation'; + +export class Backend extends Construct { + constructor(scope: Construct, id: string, values: LoculusValues) { + super(scope, id); + + if (values.disableBackend) return; + + const tag = dockerTag(values); + + this.createConfigMap(values); + this.createDeployment(values, tag); + this.createService(values); + } + + private createConfigMap(values: LoculusValues): void { + const config = generateBackendConfig(values); + new ApiObject(this, 'config', { + apiVersion: 'v1', + kind: 'ConfigMap', + metadata: { name: 'loculus-backend-config' }, + data: { + 'backend_config.json': JSON.stringify(config), + }, + }); + } + + private createDeployment(values: LoculusValues, tag: string): void { + const args: string[] = [`--loculus.enable-seqsets=${values.seqSets.enabled}`]; + + if (values.seqSets.crossRef) { + args.push( + '--crossref.doi-prefix=$(CROSSREF_DOI_PREFIX)', + '--crossref.endpoint=$(CROSSREF_ENDPOINT)', + '--crossref.username=$(CROSSREF_USERNAME)', + '--crossref.password=$(CROSSREF_PASSWORD)', + '--crossref.database-name=$(CROSSREF_DATABASE_NAME)', + '--crossref.email=$(CROSSREF_EMAIL)', + '--crossref.organization=$(CROSSREF_ORGANIZATION)', + '--crossref.host-url=$(CROSSREF_HOST_URL)', + ); + } + + args.push( + '--keycloak.password=$(BACKEND_KEYCLOAK_PASSWORD)', + '--keycloak.realm=loculus', + '--keycloak.client=backend-client', + '--keycloak.url=http://loculus-keycloak-service:8083', + '--keycloak.user=backend', + '--spring.datasource.password=$(DB_PASSWORD)', + '--spring.datasource.url=$(DB_URL)', + '--spring.datasource.username=$(DB_USERNAME)', + '--spring.security.oauth2.resourceserver.jwt.jwk-set-uri=http://loculus-keycloak-service:8083/realms/loculus/protocol/openid-connect/certs', + `--loculus.cleanup.task.reset-stale-in-processing-after-seconds=${values.preprocessingTimeout || 120}`, + `--loculus.pipeline-version-upgrade-check.interval-seconds=${values.pipelineVersionUpgradeCheckIntervalSeconds || 10}`, + '--loculus.s3.enabled=$(S3_ENABLED)', + ); + + if (values.s3.enabled) { + args.push( + '--loculus.s3.bucket.endpoint=$(S3_BUCKET_ENDPOINT)', + '--loculus.s3.bucket.internal-endpoint=$(S3_BUCKET_INTERNAL_ENDPOINT)', + '--loculus.s3.bucket.region=$(S3_BUCKET_REGION)', + '--loculus.s3.bucket.bucket=$(S3_BUCKET_BUCKET)', + '--loculus.s3.bucket.access-key=$(S3_BUCKET_ACCESS_KEY)', + '--loculus.s3.bucket.secret-key=$(S3_BUCKET_SECRET_KEY)', + ); + } + + if (values.backendExtraArgs) { + args.push(...values.backendExtraArgs); + } + + const env: any[] = [ + { + name: 'JVM_OPTS', + value: '-XX:+UseContainerSupport -XX:+UseG1GC -XX:MaxHeapFreeRatio=5 -XX:MinHeapFreeRatio=2', + }, + ]; + + if (values.seqSets.crossRef) { + env.push( + { name: 'CROSSREF_USERNAME', valueFrom: { secretKeyRef: { name: 'crossref', key: 'username' } } }, + { name: 'CROSSREF_PASSWORD', valueFrom: { secretKeyRef: { name: 'crossref', key: 'password' } } }, + { name: 'CROSSREF_DOI_PREFIX', value: String(values.seqSets.crossRef.DOIPrefix) }, + { name: 'CROSSREF_ENDPOINT', value: String(values.seqSets.crossRef.endpoint) }, + { name: 'CROSSREF_DATABASE_NAME', value: values.seqSets.crossRef.databaseName || null }, + { name: 'CROSSREF_EMAIL', value: values.seqSets.crossRef.email || null }, + { name: 'CROSSREF_ORGANIZATION', value: values.seqSets.crossRef.organization || null }, + { name: 'CROSSREF_HOST_URL', value: values.seqSets.crossRef.hostUrl || null }, + ); + } + + env.push( + { + name: 'BACKEND_KEYCLOAK_PASSWORD', + valueFrom: { secretKeyRef: { name: 'service-accounts', key: 'backendUserPassword' } }, + }, + { name: 'DB_URL', valueFrom: { secretKeyRef: { name: 'database', key: 'url' } } }, + { name: 'DB_USERNAME', valueFrom: { secretKeyRef: { name: 'database', key: 'username' } } }, + { name: 'DB_PASSWORD', valueFrom: { secretKeyRef: { name: 'database', key: 'password' } } }, + { name: 'S3_ENABLED', value: String(values.s3.enabled) }, + ); + + if (values.s3.enabled) { + env.push( + { name: 'S3_BUCKET_ENDPOINT', value: s3Url(values) }, + { name: 'S3_BUCKET_INTERNAL_ENDPOINT', value: s3UrlInternal(values) }, + { name: 'S3_BUCKET_REGION', value: values.s3.bucket.region || '' }, + { name: 'S3_BUCKET_BUCKET', value: values.s3.bucket.bucket }, + { name: 'S3_BUCKET_ACCESS_KEY', valueFrom: { secretKeyRef: { name: 's3-bucket', key: 'accessKey' } } }, + { name: 'S3_BUCKET_SECRET_KEY', valueFrom: { secretKeyRef: { name: 's3-bucket', key: 'secretKey' } } }, + ); + } + + const containerSpec: any = { + name: 'backend', + image: `${values.images.backend.repository}:${values.images.backend.tag || tag}`, + imagePullPolicy: values.images.backend.pullPolicy || values.imagePullPolicy, + startupProbe: { + httpGet: { path: '/actuator/health/liveness', port: 8079 }, + periodSeconds: 5, + failureThreshold: 360, + }, + livenessProbe: { + httpGet: { path: '/actuator/health/liveness', port: 8079 }, + periodSeconds: 10, + }, + readinessProbe: { + httpGet: { path: '/actuator/health/readiness', port: 8079 }, + }, + ports: [{ containerPort: 8079 }], + args, + env, + volumeMounts: [{ name: 'loculus-backend-config-processed', mountPath: '/config' }], + }; + + const resources = getResources('backend', values); + if (resources) Object.assign(containerSpec, resources); + + new ApiObject(this, 'deployment', { + apiVersion: 'apps/v1', + kind: 'Deployment', + metadata: { + name: 'loculus-backend', + annotations: { 'argocd.argoproj.io/sync-options': 'Replace=true' }, + }, + spec: { + replicas: values.replicas.backend, + selector: { matchLabels: { app: 'loculus', component: 'backend' } }, + template: { + metadata: { + annotations: { timestamp: new Date().toISOString() }, + labels: { app: 'loculus', component: 'backend' }, + }, + spec: { + ...priorityClassName(values), + initContainers: [configProcessorContainer('loculus-backend-config', tag, values.imagePullPolicy)], + containers: [containerSpec], + volumes: configVolumes('loculus-backend-config'), + }, + }, + }, + }); + } + + private createService(values: LoculusValues): void { + const portSpec: any = { + port: 8079, + targetPort: 8079, + protocol: 'TCP', + name: 'http', + }; + if (values.environment !== 'server') { + portSpec.nodePort = 30082; + } + + new ApiObject(this, 'service', { + apiVersion: 'v1', + kind: 'Service', + metadata: { name: 'loculus-backend-service' }, + spec: { + type: serviceType(values), + selector: { app: 'loculus', component: 'backend' }, + ports: [portSpec], + }, + }); + } +} diff --git a/kubernetes/cdk8s/src/constructs/database.ts b/kubernetes/cdk8s/src/constructs/database.ts new file mode 100644 index 0000000000..95ef013f35 --- /dev/null +++ b/kubernetes/cdk8s/src/constructs/database.ts @@ -0,0 +1,88 @@ +import { Construct } from 'constructs'; +import { ApiObject } from 'cdk8s'; +import { LoculusValues } from '../values'; +import { dockerTag } from '../docker-tag'; +import { serviceType } from '../resources'; + +export class Database extends Construct { + constructor(scope: Construct, id: string, values: LoculusValues) { + super(scope, id); + + if (!values.runDevelopmentMainDatabase) return; + + const tag = dockerTag(values); + const env: any[] = [ + { name: 'POSTGRES_USER', value: 'postgres' }, + { name: 'POSTGRES_PASSWORD', value: 'unsecure' }, + { name: 'POSTGRES_DB', value: 'loculus' }, + { name: 'POSTGRES_HOST_AUTH_METHOD', value: 'trust' }, + ]; + if (!values.developmentDatabasePersistence) { + env.push({ name: 'LOCULUS_VERSION', value: tag }); + } + + new ApiObject(this, 'deployment', { + apiVersion: 'apps/v1', + kind: 'Deployment', + metadata: { + name: 'loculus-database', + annotations: { 'argocd.argoproj.io/sync-options': 'Replace=true' }, + }, + spec: { + replicas: 1, + selector: { matchLabels: { app: 'loculus', component: 'database' } }, + strategy: { type: 'Recreate' }, + template: { + metadata: { labels: { app: 'loculus', component: 'database' } }, + spec: { + containers: [ + { + name: 'database', + image: 'postgres:15.12', + args: ['-c', 'shared_preload_libraries=pg_stat_statements', '-c', 'pg_stat_statements.track=all'], + resources: { + requests: { memory: '200Mi', cpu: '250m' }, + limits: { memory: '2Gi' }, + }, + ports: [{ containerPort: 5432 }], + env, + volumeMounts: [{ name: 'init-scripts', mountPath: '/docker-entrypoint-initdb.d' }], + }, + ], + volumes: [{ name: 'init-scripts', configMap: { name: 'loculus-database-init' } }], + }, + }, + }, + }); + + new ApiObject(this, 'init-configmap', { + apiVersion: 'v1', + kind: 'ConfigMap', + metadata: { name: 'loculus-database-init' }, + data: { + 'init-pg-stat.sql': 'CREATE EXTENSION IF NOT EXISTS pg_stat_statements;\n', + }, + }); + + const portSpec: any = { + port: 5432, + targetPort: 5432, + protocol: 'TCP', + name: 'http', + }; + if (values.environment !== 'server') { + portSpec.nodePort = 30432; + } + + new ApiObject(this, 'service', { + apiVersion: 'v1', + kind: 'Service', + metadata: { name: 'loculus-database-service' }, + spec: { + type: serviceType(values), + selector: { app: 'loculus', component: 'database' }, + ports: [portSpec], + }, + }); + } +} diff --git a/kubernetes/cdk8s/src/constructs/docs.ts b/kubernetes/cdk8s/src/constructs/docs.ts new file mode 100644 index 0000000000..4f2689837a --- /dev/null +++ b/kubernetes/cdk8s/src/constructs/docs.ts @@ -0,0 +1,79 @@ +import { Construct } from 'constructs'; +import { ApiObject } from 'cdk8s'; +import { LoculusValues } from '../values'; +import { dockerTag } from '../docker-tag'; +import { getResources } from '../resources'; + +export class Docs extends Construct { + constructor(scope: Construct, id: string, values: LoculusValues) { + super(scope, id); + + if (!values.previewDocs) return; + + const tag = dockerTag(values); + const docsHost = `docs${values.subdomainSeparator || '.'}${values.host}`; + + const containerSpec: any = { + name: 'docs', + image: `ghcr.io/loculus-project/docs:${tag}`, + imagePullPolicy: values.imagePullPolicy, + ports: [{ containerPort: 8080 }], + }; + + const resources = getResources('docs', values); + if (resources) Object.assign(containerSpec, resources); + + new ApiObject(this, 'deployment', { + apiVersion: 'apps/v1', + kind: 'Deployment', + metadata: { + name: 'loculus-docs', + annotations: { 'argocd.argoproj.io/sync-options': 'Replace=true' }, + }, + spec: { + replicas: 1, + selector: { matchLabels: { app: 'loculus', component: 'docs' } }, + template: { + metadata: { + annotations: { timestamp: new Date().toISOString() }, + labels: { app: 'loculus', component: 'docs' }, + }, + spec: { containers: [containerSpec] }, + }, + }, + }); + + new ApiObject(this, 'service', { + apiVersion: 'v1', + kind: 'Service', + metadata: { name: 'loculus-docs' }, + spec: { + selector: { app: 'loculus', component: 'docs' }, + ports: [{ protocol: 'TCP', port: 80, targetPort: 8080 }], + }, + }); + + new ApiObject(this, 'ingress', { + apiVersion: 'networking.k8s.io/v1', + kind: 'Ingress', + metadata: { name: 'loculus-docs-ingress' }, + spec: { + rules: [ + { + host: docsHost, + http: { + paths: [ + { + path: '/', + pathType: 'Prefix', + backend: { service: { name: 'loculus-docs', port: { number: 80 } } }, + }, + ], + }, + }, + ], + tls: [{ hosts: [docsHost] }], + }, + }); + } +} diff --git a/kubernetes/cdk8s/src/constructs/ena-submission.ts b/kubernetes/cdk8s/src/constructs/ena-submission.ts new file mode 100644 index 0000000000..e54e0dc539 --- /dev/null +++ b/kubernetes/cdk8s/src/constructs/ena-submission.ts @@ -0,0 +1,221 @@ +import { Construct } from 'constructs'; +import { ApiObject } from 'cdk8s'; +import { LoculusValues } from '../values'; +import { dockerTag } from '../docker-tag'; +import { getResources, serviceType, priorityClassName } from '../resources'; +import { generateENASubmissionConfig } from '../config-generation'; +import * as yaml from 'js-yaml'; + +export class EnaSubmission extends Construct { + constructor(scope: Construct, id: string, values: LoculusValues) { + super(scope, id); + + if (values.disableEnaSubmission) return; + + const tag = dockerTag(values); + + this.createConfigMap(values, tag); + this.createDeployment(values, tag); + this.createCronJob(values, tag); + this.createService(values); + } + + private createConfigMap(values: LoculusValues, tag: string): void { + const testconfig = values.testconfig || false; + const enaDepositionHost = testconfig ? '127.0.0.1' : '0.0.0.0'; + const backendHost = + values.environment === 'server' + ? `https://backend${values.subdomainSeparator || '.'}${values.host}` + : testconfig + ? `http://${values.localHost}:8079` + : 'http://loculus-backend-service:8079'; + const keycloakHost = testconfig ? `http://${values.localHost}:8083` : 'http://loculus-keycloak-service:8083'; + + const enaOrganisms = generateENASubmissionConfig(values); + + const config: any = { + submit_to_ena_prod: values.enaDeposition?.submitToEnaProduction || false, + db_name: values.enaDeposition?.enaDbName || false, + is_broker: values.enaDeposition?.enaIsBroker || false, + unique_project_suffix: values.enaDeposition?.enaUniqueSuffix || false, + backend_url: backendHost, + ena_deposition_host: enaDepositionHost, + keycloak_token_url: `${keycloakHost}/realms/loculus/protocol/openid-connect/token`, + approved_list_test_url: values.enaDeposition?.enaApprovedListTestUrl || '', + suppressed_list_test_url: values.enaDeposition?.enaSuppressedListTestUrl || '', + enaOrganisms, + }; + + new ApiObject(this, 'config', { + apiVersion: 'v1', + kind: 'ConfigMap', + metadata: { name: 'loculus-ena-submission-config' }, + data: { + 'config.yaml': yaml.dump(config), + }, + }); + } + + private createDeployment(values: LoculusValues, tag: string): void { + const containerSpec: any = { + name: 'ena-submission', + image: `ghcr.io/loculus-project/ena-submission:${tag}`, + imagePullPolicy: values.imagePullPolicy, + env: [ + { + name: 'EXTERNAL_METADATA_UPDATER_PASSWORD', + valueFrom: { secretKeyRef: { name: 'service-accounts', key: 'externalMetadataUpdaterPassword' } }, + }, + { name: 'DB_URL', valueFrom: { secretKeyRef: { name: 'database', key: 'url' } } }, + { name: 'DB_USERNAME', valueFrom: { secretKeyRef: { name: 'database', key: 'username' } } }, + { name: 'DB_PASSWORD', valueFrom: { secretKeyRef: { name: 'database', key: 'password' } } }, + { name: 'SLACK_HOOK', valueFrom: { secretKeyRef: { name: 'slack-notifications', key: 'slack-hook' } } }, + { name: 'SLACK_TOKEN', valueFrom: { secretKeyRef: { name: 'slack-notifications', key: 'slack-token' } } }, + { + name: 'SLACK_CHANNEL_ID', + valueFrom: { secretKeyRef: { name: 'slack-notifications', key: 'slack-channel-id' } }, + }, + { name: 'ENA_USERNAME', valueFrom: { secretKeyRef: { name: 'ena-submission', key: 'username' } } }, + { name: 'ENA_PASSWORD', valueFrom: { secretKeyRef: { name: 'ena-submission', key: 'password' } } }, + ], + args: ['ena_deposition', '--config-file=/config/config.yaml'], + volumeMounts: [ + { name: 'loculus-ena-submission-config-volume', mountPath: '/config/config.yaml', subPath: 'config.yaml' }, + ], + }; + + const resources = getResources('ena-submission', values); + if (resources) Object.assign(containerSpec, resources); + + new ApiObject(this, 'deployment', { + apiVersion: 'apps/v1', + kind: 'Deployment', + metadata: { + name: 'loculus-ena-submission', + annotations: { 'argocd.argoproj.io/sync-options': 'Replace=true' }, + }, + spec: { + replicas: 1, + selector: { matchLabels: { app: 'loculus', component: 'loculus-ena-submission' } }, + template: { + metadata: { + annotations: { timestamp: new Date().toISOString() }, + labels: { app: 'loculus', component: 'loculus-ena-submission' }, + }, + spec: { + ...priorityClassName(values), + initContainers: [ + { + name: 'ena-submission-flyway', + image: `ghcr.io/loculus-project/ena-submission-flyway:${tag}`, + resources: { + requests: { cpu: '100m', memory: '128Mi' }, + limits: { cpu: '500m', memory: '256Mi' }, + }, + command: ['flyway', 'migrate'], + env: [ + { name: 'FLYWAY_URL', valueFrom: { secretKeyRef: { name: 'database', key: 'url' } } }, + { name: 'FLYWAY_USER', valueFrom: { secretKeyRef: { name: 'database', key: 'username' } } }, + { name: 'FLYWAY_PASSWORD', valueFrom: { secretKeyRef: { name: 'database', key: 'password' } } }, + ], + }, + ], + containers: [containerSpec], + volumes: [ + { + name: 'loculus-ena-submission-config-volume', + configMap: { name: 'loculus-ena-submission-config' }, + }, + ], + }, + }, + }, + }); + } + + private createCronJob(values: LoculusValues, tag: string): void { + const containerSpec: any = { + name: 'ena-submission', + image: `ghcr.io/loculus-project/ena-submission:${tag}`, + imagePullPolicy: values.imagePullPolicy, + env: [ + { + name: 'EXTERNAL_METADATA_UPDATER_PASSWORD', + valueFrom: { secretKeyRef: { name: 'service-accounts', key: 'externalMetadataUpdaterPassword' } }, + }, + { name: 'DB_URL', valueFrom: { secretKeyRef: { name: 'database', key: 'url' } } }, + { name: 'DB_USERNAME', valueFrom: { secretKeyRef: { name: 'database', key: 'username' } } }, + { name: 'DB_PASSWORD', valueFrom: { secretKeyRef: { name: 'database', key: 'password' } } }, + { name: 'SLACK_HOOK', valueFrom: { secretKeyRef: { name: 'slack-notifications', key: 'slack-hook' } } }, + { name: 'SLACK_TOKEN', valueFrom: { secretKeyRef: { name: 'slack-notifications', key: 'slack-token' } } }, + { + name: 'SLACK_CHANNEL_ID', + valueFrom: { secretKeyRef: { name: 'slack-notifications', key: 'slack-channel-id' } }, + }, + ], + args: ['python', 'scripts/get_ena_submission_list.py', '--config-file=/config/config.yaml'], + volumeMounts: [ + { name: 'loculus-ena-submission-config-volume', mountPath: '/config/config.yaml', subPath: 'config.yaml' }, + ], + }; + + const cronResources = getResources('ena-submission-list-cronjob', values); + if (cronResources) Object.assign(containerSpec, cronResources); + + new ApiObject(this, 'cronjob', { + apiVersion: 'batch/v1', + kind: 'CronJob', + metadata: { name: 'loculus-get-ena-submission-list-cronjob' }, + spec: { + schedule: '0 1,13 * * *', + startingDeadlineSeconds: 60, + concurrencyPolicy: 'Forbid', + jobTemplate: { + spec: { + activeDeadlineSeconds: values.getSubmissionListLimitSeconds, + template: { + metadata: { + labels: { app: 'loculus', component: 'loculus-get-ena-submission-list-cronjob' }, + annotations: { 'argocd.argoproj.io/sync-options': 'Replace=true' }, + }, + spec: { + ...priorityClassName(values), + restartPolicy: 'Never', + containers: [containerSpec], + volumes: [ + { + name: 'loculus-ena-submission-config-volume', + configMap: { name: 'loculus-ena-submission-config' }, + }, + ], + }, + }, + }, + }, + }, + }); + } + + private createService(values: LoculusValues): void { + const portSpec: any = { + port: 5000, + targetPort: 5000, + protocol: 'TCP', + name: 'http', + }; + if (values.environment !== 'server') { + portSpec.nodePort = 30050; + } + + new ApiObject(this, 'service', { + apiVersion: 'v1', + kind: 'Service', + metadata: { name: 'loculus-ena-submission-service' }, + spec: { + type: serviceType(values), + selector: { app: 'loculus', component: 'loculus-ena-submission' }, + ports: [portSpec], + }, + }); + } +} diff --git a/kubernetes/cdk8s/src/constructs/ingest.ts b/kubernetes/cdk8s/src/constructs/ingest.ts new file mode 100644 index 0000000000..42f7025abd --- /dev/null +++ b/kubernetes/cdk8s/src/constructs/ingest.ts @@ -0,0 +1,251 @@ +import { Construct } from 'constructs'; +import { ApiObject } from 'cdk8s'; +import { LoculusValues, EnabledOrganism } from '../values'; +import { dockerTag } from '../docker-tag'; +import { getResources, priorityClassName } from '../resources'; +import { patchMetadataSchema, getNucleotideSegmentNames } from '../organisms'; +import { generateIngestRename } from '../config-generation'; +import * as yaml from 'js-yaml'; + +export class Ingest extends Construct { + constructor(scope: Construct, id: string, values: LoculusValues, organism: EnabledOrganism) { + super(scope, id); + + if (!organism.contents.ingest) return; + + const tag = dockerTag(values); + const key = organism.key; + const organismContent = organism.contents; + const testconfig = values.testconfig || false; + + const backendHost = + values.environment === 'server' + ? `https://backend${values.subdomainSeparator || '.'}${values.host}` + : testconfig + ? `http://${values.localHost}:8079` + : 'http://loculus-backend-service:8079'; + + const enaDepositionHost = testconfig + ? `http://${values.localHost}:5000` + : 'http://loculus-ena-submission-service:5000'; + + const keycloakHost = testconfig ? `http://${values.localHost}:8083` : 'http://loculus-keycloak-service:8083'; + + // ConfigMap is always created when organism has ingest config + if (organismContent.ingest!.configFile) { + this.createConfigMap(values, tag, key, organismContent, backendHost, keycloakHost, enaDepositionHost); + } + + // Deployments and CronJobs are gated by disableIngest + if (values.disableIngest) return; + + // Deployment + this.createDeployment(values, tag, key, organismContent); + + // CronJob + this.createCronJob(values, tag, key, organismContent); + } + + private createConfigMap( + values: LoculusValues, + tag: string, + key: string, + organismContent: any, + backendHost: string, + keycloakHost: string, + enaDepositionHost: string, + ): void { + const patchedSchema = patchMetadataSchema(organismContent.schema); + const metadata = patchedSchema.metadata; + const segments = getNucleotideSegmentNames(organismContent.referenceGenomes); + + const config: any = { + ...organismContent.ingest!.configFile, + nucleotide_sequences: segments, + verify_loculus_version_is: tag, + check_ena_deposition: !values.disableEnaSubmission, + }; + + if (!values.disableEnaSubmission) { + config.ena_deposition_url = enaDepositionHost; + } + + config.organism = key; + config.backend_url = backendHost; + config.keycloak_token_url = `${keycloakHost}/realms/loculus/protocol/openid-connect/token`; + + if (values.ingest?.ncbiGatewayUrl) config.ncbi_gateway_url = values.ingest.ncbiGatewayUrl; + if (values.ingest?.mirrorBucket) config.mirror_bucket = values.ingest.mirrorBucket; + + const rename = generateIngestRename(metadata); + config.rename = rename; + + // INSDC segment-specific fields + const insdcSegmentFields: string[] = []; + for (const field of metadata) { + if (field.header === 'INSDC' && field.perSegment) { + insdcSegmentFields.push(field.name); + } + } + config.insdc_segment_specific_fields = insdcSegmentFields; + + new ApiObject(this, 'config', { + apiVersion: 'v1', + kind: 'ConfigMap', + metadata: { name: `loculus-ingest-config-${key}` }, + data: { + 'config.yaml': yaml.dump(config), + }, + }); + } + + private createDeployment(values: LoculusValues, tag: string, key: string, organismContent: any): void { + const containerSpec: any = { + name: `ingest-${key}`, + image: `${organismContent.ingest!.image}:${tag}`, + imagePullPolicy: values.imagePullPolicy, + env: [ + { + name: 'KEYCLOAK_INGEST_PASSWORD', + valueFrom: { secretKeyRef: { name: 'service-accounts', key: 'insdcIngestUserPassword' } }, + }, + { name: 'SLACK_HOOK', valueFrom: { secretKeyRef: { name: 'slack-notifications', key: 'slack-hook' } } }, + { name: 'NCBI_API_KEY', valueFrom: { secretKeyRef: { name: 'ingest-ncbi', key: 'api-key' } } }, + ], + args: ['snakemake', 'results/submitted', 'results/revised', 'results/approved', '--all-temp'], + }; + + const resources = getResources('ingest', values); + if (resources) Object.assign(containerSpec, resources); + + const podSpec: any = { + ...priorityClassName(values), + initContainers: [ + { + name: 'version-check', + image: 'busybox', + command: [ + 'sh', + '-c', + `CONFIG_VERSION=$(grep "verify_loculus_version_is:" /package/config/config.yaml | sed "s/verify_loculus_version_is: //;");\nDOCKER_TAG="${tag}";\necho "Config version: $CONFIG_VERSION";\necho "Docker tag: $DOCKER_TAG";\nif [ "$CONFIG_VERSION" != "$DOCKER_TAG" ]; then\n echo "Version mismatch: ConfigMap version $CONFIG_VERSION does not match docker tag $DOCKER_TAG";\n exit 1;\nelse\n echo "Version match confirmed";\nfi`, + ], + volumeMounts: [ + { + name: `loculus-ingest-config-volume-${key}`, + mountPath: '/package/config/config.yaml', + subPath: 'config.yaml', + }, + ], + }, + ], + containers: [containerSpec], + }; + + const initResources = getResources('ingest-init', values); + if (initResources) Object.assign(podSpec.initContainers[0], initResources); + + if (organismContent.ingest!.configFile) { + containerSpec.volumeMounts = [ + { + name: `loculus-ingest-config-volume-${key}`, + mountPath: '/package/config/config.yaml', + subPath: 'config.yaml', + }, + ]; + podSpec.volumes = [ + { + name: `loculus-ingest-config-volume-${key}`, + configMap: { name: `loculus-ingest-config-${key}` }, + }, + ]; + } + + new ApiObject(this, 'deployment', { + apiVersion: 'apps/v1', + kind: 'Deployment', + metadata: { + name: `loculus-ingest-deployment-${key}`, + annotations: { 'argocd.argoproj.io/sync-options': 'Force=true,Replace=true' }, + }, + spec: { + replicas: 1, + strategy: { type: 'Recreate' }, + selector: { matchLabels: { app: 'loculus', component: `loculus-ingest-deployment-${key}` } }, + template: { + metadata: { + annotations: { timestamp: new Date().toISOString() }, + labels: { app: 'loculus', component: `loculus-ingest-deployment-${key}` }, + }, + spec: podSpec, + }, + }, + }); + } + + private createCronJob(values: LoculusValues, tag: string, key: string, organismContent: any): void { + const containerSpec: any = { + name: `ingest-${key}`, + image: `${organismContent.ingest!.image}:${tag}`, + imagePullPolicy: values.imagePullPolicy, + resources: { + requests: { memory: '1Gi', cpu: '200m' }, + limits: { cpu: '200m', memory: '10Gi' }, + }, + env: [ + { + name: 'KEYCLOAK_INGEST_PASSWORD', + valueFrom: { secretKeyRef: { name: 'service-accounts', key: 'insdcIngestUserPassword' } }, + }, + ], + args: ['snakemake', 'results/submitted', 'results/revised', 'results/revoked', 'results/approved', '--all-temp'], + }; + + const podSpec: any = { + ...priorityClassName(values), + restartPolicy: 'Never', + containers: [containerSpec], + }; + + if (organismContent.ingest!.configFile) { + containerSpec.volumeMounts = [ + { + name: `loculus-ingest-config-volume-${key}`, + mountPath: '/package/config/config.yaml', + subPath: 'config.yaml', + }, + ]; + podSpec.volumes = [ + { + name: `loculus-ingest-config-volume-${key}`, + configMap: { name: `loculus-ingest-config-${key}` }, + }, + ]; + } + + new ApiObject(this, 'cronjob', { + apiVersion: 'batch/v1', + kind: 'CronJob', + metadata: { + name: `loculus-revoke-and-regroup-cronjob-${key}`, + annotations: { 'argocd.argoproj.io/sync-options': 'Replace=true' }, + }, + spec: { + schedule: '0 0 31 2 *', + suspend: true, + startingDeadlineSeconds: 60, + concurrencyPolicy: 'Forbid', + jobTemplate: { + spec: { + activeDeadlineSeconds: values.ingestLimitSeconds, + template: { + metadata: { + labels: { app: 'loculus', component: `loculus-ingest-cronjob-${key}` }, + }, + spec: podSpec, + }, + }, + }, + }, + }); + } +} diff --git a/kubernetes/cdk8s/src/constructs/ingress.ts b/kubernetes/cdk8s/src/constructs/ingress.ts new file mode 100644 index 0000000000..1081b77b81 --- /dev/null +++ b/kubernetes/cdk8s/src/constructs/ingress.ts @@ -0,0 +1,203 @@ +import { Construct } from 'constructs'; +import { ApiObject } from 'cdk8s'; +import { LoculusValues } from '../values'; + +export class MainIngress extends Construct { + constructor(scope: Construct, id: string, values: LoculusValues) { + super(scope, id); + + // Middleware CRDs are always created (even in local environment) + new ApiObject(this, 'compression', { + apiVersion: 'traefik.containo.us/v1alpha1', + kind: 'Middleware', + metadata: { name: 'compression-middleware' }, + spec: { compress: {} }, + }); + + new ApiObject(this, 'redirect', { + apiVersion: 'traefik.containo.us/v1alpha1', + kind: 'Middleware', + metadata: { name: 'redirect-middleware' }, + spec: { redirectScheme: { scheme: 'https', permanent: true } }, + }); + + if (values.secrets?.basicauth) { + new ApiObject(this, 'basic-auth', { + apiVersion: 'traefik.containo.us/v1alpha1', + kind: 'Middleware', + metadata: { name: 'basic-auth' }, + spec: { basicAuth: { secret: 'basicauth' } }, + }); + } + + if (values.robotsNoindexHeader) { + new ApiObject(this, 'noindex', { + apiVersion: 'traefik.containo.us/v1alpha1', + kind: 'Middleware', + metadata: { name: 'noindex-robots-header' }, + spec: { headers: { customResponseHeaders: { 'X-Robots-Tag': 'noindex, nofollow' } } }, + }); + } + + new ApiObject(this, 'redirect-www', { + apiVersion: 'traefik.containo.us/v1alpha1', + kind: 'Middleware', + metadata: { name: 'redirect-www-middleware' }, + spec: { + redirectRegex: { + regex: '^https://www\\.(.*)', + replacement: 'https://$1', + permanent: true, + }, + }, + }); + + // Ingress resources are only created in server mode + if (values.environment !== 'server') return; + + const ns = 'default'; + const backendHost = `backend${values.subdomainSeparator || '.'}${values.host}`; + const keycloakHost = `authentication${values.subdomainSeparator || '.'}${values.host}`; + const minioHost = `s3${values.subdomainSeparator || '.'}${values.host}`; + + // Build middleware lists + const middlewareList = [`${ns}-compression-middleware@kubernetescrd`]; + if (values.enforceHTTPS) { + middlewareList.push(`${ns}-redirect-middleware@kubernetescrd`); + } + if (values.robotsNoindexHeader) { + middlewareList.push(`${ns}-noindex-robots-header@kubernetescrd`); + } + + const websiteMiddlewareList = [...middlewareList]; + const keycloakMiddlewareList = [...middlewareList]; + + if (values.secrets?.basicauth) { + websiteMiddlewareList.push(`${ns}-basic-auth@kubernetescrd`); + keycloakMiddlewareList.push(`${ns}-basic-auth@kubernetescrd`); + } + websiteMiddlewareList.push(`${ns}-redirect-www-middleware@kubernetescrd`); + + // Website ingress + new ApiObject(this, 'website-ingress', { + apiVersion: 'networking.k8s.io/v1', + kind: 'Ingress', + metadata: { + name: 'loculus-website-ingress', + annotations: { 'traefik.ingress.kubernetes.io/router.middlewares': websiteMiddlewareList.join(',') }, + }, + spec: { + rules: [ + { + host: values.host!, + http: { + paths: [ + { + path: '/', + pathType: 'Prefix', + backend: { service: { name: 'loculus-website-service', port: { number: 3000 } } }, + }, + ], + }, + }, + { + host: `www.${values.host}`, + http: { + paths: [ + { + path: '/', + pathType: 'Prefix', + backend: { service: { name: 'loculus-website-service', port: { number: 3000 } } }, + }, + ], + }, + }, + ], + tls: [{ hosts: [values.host!, `www.${values.host}`] }], + }, + }); + + // Backend ingress + new ApiObject(this, 'backend-ingress', { + apiVersion: 'networking.k8s.io/v1', + kind: 'Ingress', + metadata: { + name: 'loculus-backend-ingress', + annotations: { 'traefik.ingress.kubernetes.io/router.middlewares': middlewareList.join(',') }, + }, + spec: { + rules: [ + { + host: backendHost, + http: { + paths: [ + { + path: '/', + pathType: 'Prefix', + backend: { service: { name: 'loculus-backend-service', port: { number: 8079 } } }, + }, + ], + }, + }, + ], + tls: [{ hosts: [backendHost] }], + }, + }); + + // Keycloak ingress + new ApiObject(this, 'keycloak-ingress', { + apiVersion: 'networking.k8s.io/v1', + kind: 'Ingress', + metadata: { + name: 'loculus-keycloak-ingress', + annotations: { 'traefik.ingress.kubernetes.io/router.middlewares': keycloakMiddlewareList.join(',') }, + }, + spec: { + rules: [ + { + host: keycloakHost, + http: { + paths: [ + { + path: '/{+}', + pathType: 'Prefix', + backend: { service: { name: 'loculus-keycloak-service', port: { number: 8083 } } }, + }, + ], + }, + }, + ], + tls: [{ hosts: [keycloakHost] }], + }, + }); + + // MinIO ingress (conditional) + if (values.s3.enabled && values.runDevelopmentS3) { + new ApiObject(this, 'minio-ingress', { + apiVersion: 'networking.k8s.io/v1', + kind: 'Ingress', + metadata: { + name: 'minio-ingress', + annotations: { 'traefik.ingress.kubernetes.io/router.middlewares': middlewareList.join(',') }, + }, + spec: { + rules: [ + { + host: minioHost, + http: { + paths: [ + { + path: '/', + pathType: 'Prefix', + backend: { service: { name: 'loculus-minio-service', port: { number: 8084 } } }, + }, + ], + }, + }, + ], + tls: [{ hosts: [minioHost] }], + }, + }); + } + } +} diff --git a/kubernetes/cdk8s/src/constructs/keycloak.ts b/kubernetes/cdk8s/src/constructs/keycloak.ts new file mode 100644 index 0000000000..824b6c3c11 --- /dev/null +++ b/kubernetes/cdk8s/src/constructs/keycloak.ts @@ -0,0 +1,493 @@ +import { Construct } from 'constructs'; +import { ApiObject } from 'cdk8s'; +import { LoculusValues } from '../values'; +import { dockerTag } from '../docker-tag'; +import { configProcessorContainer, configVolumes } from '../config-processor'; +import { getResources, serviceType, priorityClassName } from '../resources'; +import { keycloakUrl } from '../urls'; + +export class Keycloak extends Construct { + constructor(scope: Construct, id: string, values: LoculusValues) { + super(scope, id); + + const tag = dockerTag(values); + + // ConfigMap + this.createConfigMap(values, tag); + + // Deployment + this.createDeployment(values, tag); + + // Service + this.createService(values); + + // Database (if dev) + if (values.runDevelopmentKeycloakDatabase) { + this.createDatabase(values, tag); + this.createDatabaseService(values); + } + } + + private createConfigMap(values: LoculusValues, tag: string): void { + const kcUrl = keycloakUrl(values); + const config = this.generateKeycloakRealmConfig(values, kcUrl); + + new ApiObject(this, 'config', { + apiVersion: 'v1', + kind: 'ConfigMap', + metadata: { name: 'keycloak-config' }, + data: { + 'keycloak-config.json': JSON.stringify(config, null, 2), + }, + }); + } + + private generateKeycloakRealmConfig(values: LoculusValues, kcUrl: string): any { + const config: any = { + realm: 'loculus', + enabled: true, + verifyEmail: values.auth.verifyEmail, + resetPasswordAllowed: values.auth.resetPasswordAllowed, + }; + + if (values.auth.verifyEmail && values.auth.smtp) { + config.smtpServer = { + host: values.auth.smtp.host, + port: values.auth.smtp.port, + from: values.auth.smtp.from, + fromDisplayName: values.name, + replyTo: values.auth.smtp.replyTo, + replyToDisplayName: values.name, + envelopeFrom: values.auth.smtp.envelopeFrom, + ssl: 'false', + starttls: 'true', + auth: 'true', + user: values.auth.smtp.user, + password: '[[smtpPassword]]', + }; + } + + config.registrationAllowed = values.auth.registrationAllowed; + config.accessTokenLifespan = 36000; + config.ssoSessionIdleTimeout = 36000; + config.actionTokenGeneratedByUserLifespan = 1800; + + // Users + const users: any[] = []; + + if (values.createTestAccounts) { + const browsers = ['firefox', 'webkit', 'chromium']; + for (const browser of browsers) { + for (let i = 0; i < 20; i++) { + users.push({ + username: `testuser_${i}_${browser}`, + enabled: true, + email: `testuser_${i}_${browser}@void.o`, + emailVerified: true, + firstName: `${i}_${browser}`, + lastName: 'TestUser', + credentials: [{ type: 'password', value: `testuser_${i}_${browser}` }], + realmRoles: ['user', 'offline_access'], + attributes: { university: 'University of Test' }, + clientRoles: { account: ['manage-account'] }, + }); + } + } + users.push( + { + username: 'testuser', + enabled: true, + email: 'testuser@void.o', + emailVerified: true, + firstName: 'Test', + lastName: 'User', + credentials: [{ type: 'password', value: 'testuser' }], + realmRoles: ['user', 'offline_access'], + attributes: { university: 'University of Test' }, + clientRoles: { account: ['manage-account'] }, + }, + { + username: 'superuser', + enabled: true, + email: 'superuser@void.o', + emailVerified: true, + firstName: 'Dummy', + lastName: 'SuperUser', + credentials: [{ type: 'password', value: 'superuser' }], + realmRoles: ['super_user', 'offline_access'], + attributes: { university: 'University of Test' }, + clientRoles: { account: ['manage-account'] }, + }, + ); + } + + // System users (always present) + users.push( + { + username: 'insdc_ingest_user', + enabled: true, + email: 'insdc_ingest_user@void.o', + emailVerified: true, + firstName: 'INSDC Ingest', + lastName: 'User', + credentials: [{ type: 'password', value: '[[insdcIngestUserPassword]]' }], + realmRoles: ['user', 'offline_access'], + attributes: { university: 'University of Test' }, + clientRoles: { account: ['manage-account'] }, + }, + { + username: 'preprocessing_pipeline', + enabled: true, + email: 'preprocessing_pipeline@void.o', + emailVerified: true, + firstName: 'Dummy', + lastName: 'Preprocessing', + credentials: [{ type: 'password', value: '[[preprocessingPipelinePassword]]' }], + realmRoles: ['preprocessing_pipeline', 'offline_access'], + attributes: { university: 'University of Test' }, + clientRoles: { account: ['manage-account'] }, + }, + { + username: 'external_metadata_updater', + enabled: true, + email: 'external_metadata_updater@void.o', + emailVerified: true, + firstName: 'Dummy', + lastName: 'INSDC', + credentials: [{ type: 'password', value: '[[externalMetadataUpdaterPassword]]' }], + realmRoles: ['external_metadata_updater', 'get_released_data', 'offline_access'], + attributes: { university: 'University of Test' }, + clientRoles: { account: ['manage-account'] }, + }, + { + username: 'backend', + enabled: true, + email: 'nothing@void.o', + emailVerified: true, + firstName: 'Backend', + lastName: 'Technical-User', + attributes: { university: 'University of Test' }, + credentials: [{ type: 'password', value: '[[backendUserPassword]]' }], + clientRoles: { + 'realm-management': ['view-users'], + account: ['manage-account'], + }, + }, + ); + + config.users = users; + + config.roles = { + realm: [ + { name: 'user', description: 'User privileges' }, + { name: 'admin', description: 'Administrator privileges' }, + { name: 'preprocessing_pipeline', description: 'Preprocessing pipeline privileges' }, + { name: 'external_metadata_updater', description: 'External Submitter privileges' }, + { name: 'get_released_data', description: 'Privileges for getting released data' }, + { name: 'super_user', description: 'Privileges for curators to modify sequence entries of any user' }, + ], + }; + + config.clients = [ + { + clientId: 'backend-client', + enabled: true, + publicClient: true, + directAccessGrantsEnabled: true, + redirectUris: [`https://${values.host || ''}/*`, `http://${values.host || ''}/*`, 'http://localhost:3000/*'], + }, + { + clientId: 'account-console2', + name: '${client_account-console}', + description: '', + rootUrl: '${authBaseUrl}', + adminUrl: '', + baseUrl: '/realms/loculus/account/', + surrogateAuthRequired: false, + enabled: true, + alwaysDisplayInConsole: false, + clientAuthenticatorType: 'client-secret', + redirectUris: ['/realms/loculus/account/*'], + webOrigins: ['+'], + notBefore: 0, + bearerOnly: false, + consentRequired: false, + standardFlowEnabled: true, + implicitFlowEnabled: false, + directAccessGrantsEnabled: false, + serviceAccountsEnabled: false, + publicClient: true, + frontchannelLogout: false, + protocol: 'openid-connect', + attributes: { + 'oidc.ciba.grant.enabled': 'false', + 'backchannel.logout.session.required': 'true', + 'post.logout.redirect.uris': '+', + 'oauth2.device.authorization.grant.enabled': 'false', + 'display.on.consent.screen': 'false', + 'pkce.code.challenge.method': 'S256', + 'backchannel.logout.revoke.offline.tokens': 'false', + }, + authenticationFlowBindingOverrides: {}, + fullScopeAllowed: false, + nodeReRegistrationTimeout: 0, + protocolMappers: [ + { + name: 'audience resolve', + protocol: 'openid-connect', + protocolMapper: 'oidc-audience-resolve-mapper', + consentRequired: false, + config: {}, + }, + ], + defaultClientScopes: ['web-origins', 'acr', 'profile', 'roles', 'email'], + optionalClientScopes: ['address', 'phone', 'offline_access', 'microprofile-jwt'], + }, + ]; + + config.attributes = { + frontendUrl: kcUrl, + userProfileEnabled: 'true', + }; + + config.components = { + 'org.keycloak.userprofile.UserProfileProvider': [ + { + providerId: 'declarative-user-profile', + subComponents: {}, + config: { + 'kc.user.profile.config': [ + '{"attributes":[{"name":"username","displayName":"${username}","validations":{"length":{"min":3,"max":255},"username-prohibited-characters":{},"up-username-not-idn-homograph":{}},"permissions":{"view":["admin","user"],"edit":["admin","user"]}},{"name":"email","displayName":"${email}","validations":{"email":{},"length":{"max":255}},"required":{"roles":["user"]},"permissions":{"view":["admin","user"],"edit":["admin","user"]}},{"name":"firstName","displayName":"${firstName}","validations":{"length":{"max":255},"person-name-prohibited-characters":{}},"required":{"roles":["user"]},"permissions":{"view":["admin","user"],"edit":["admin","user"]}},{"name":"lastName","displayName":"${lastName}","validations":{"length":{"max":255},"person-name-prohibited-characters":{}},"required":{"roles":["user"]},"permissions":{"view":["admin","user"],"edit":["admin","user"]}},{"name":"university","displayName":"University / Organisation","validations":{},"annotations":{},"required":{"roles":["admin","user"]},"permissions":{"view":[],"edit":["admin","user"]}},{"name":"orcid","displayName":"","permissions":{"edit":["admin"],"view":["admin","user"]},"annotations":{},"validations":{}}],"groups":[]}', + ], + }, + }, + ], + }; + + config.loginTheme = 'loculus'; + config.emailTheme = 'loculus'; + + // Identity providers + const identityProviders: any[] = []; + const identityProviderMappers: any[] = []; + if (values.auth.identityProviders) { + for (const [key, value] of Object.entries(values.auth.identityProviders)) { + if (key === 'orcid') { + identityProviders.push({ + alias: 'orcid', + providerId: 'orcid', + enabled: true, + updateProfileFirstLoginMode: 'on', + trustEmail: false, + storeToken: false, + addReadTokenRoleOnCreate: false, + authenticateByDefault: false, + linkOnly: false, + firstBrokerLoginFlowAlias: 'first broker login', + config: { + clientSecret: '[[orcidSecret]]', + clientId: (value as any).clientId, + }, + }); + identityProviderMappers.push( + { + name: 'username mapper', + identityProviderAlias: 'orcid', + identityProviderMapper: 'hardcoded-attribute-idp-mapper', + config: { syncMode: 'IMPORT', attribute: 'username' }, + }, + { + name: 'orcid', + identityProviderAlias: 'orcid', + identityProviderMapper: 'orcid-user-attribute-mapper', + config: { syncMode: 'INHERIT', jsonField: 'orcid-identifier', userAttribute: 'orcid.path' }, + }, + ); + } + } + } + config.identityProviders = identityProviders; + config.identityProviderMappers = identityProviderMappers; + + return config; + } + + private createDeployment(values: LoculusValues, tag: string): void { + const kcUrl = keycloakUrl(values); + const env: any[] = [ + { name: 'REGISTRATION_TERMS_MESSAGE', value: (values.registrationTermsMessage || '').trimEnd() }, + { name: 'PROJECT_NAME', value: values.name }, + { name: 'KC_DB', value: 'postgres' }, + { name: 'KC_DB_URL_HOST', valueFrom: { secretKeyRef: { name: 'keycloak-database', key: 'addr' } } }, + { name: 'KC_DB_URL_PORT', valueFrom: { secretKeyRef: { name: 'keycloak-database', key: 'port' } } }, + { name: 'KC_DB_URL_DATABASE', valueFrom: { secretKeyRef: { name: 'keycloak-database', key: 'database' } } }, + { name: 'KC_DB_USERNAME', valueFrom: { secretKeyRef: { name: 'keycloak-database', key: 'username' } } }, + { name: 'KC_DB_PASSWORD', valueFrom: { secretKeyRef: { name: 'keycloak-database', key: 'password' } } }, + { name: 'KC_BOOTSTRAP_ADMIN_USERNAME', value: 'admin' }, + { + name: 'KC_BOOTSTRAP_ADMIN_PASSWORD', + valueFrom: { secretKeyRef: { name: 'keycloak-admin', key: 'initialAdminPassword' } }, + }, + { name: 'KEYCLOAK_ADMIN', value: 'admin' }, + { + name: 'KEYCLOAK_ADMIN_PASSWORD', + valueFrom: { secretKeyRef: { name: 'keycloak-admin', key: 'initialAdminPassword' } }, + }, + { name: 'KC_PROXY', value: 'edge' }, + { name: 'PROXY_ADDRESS_FORWARDING', value: 'true' }, + { name: 'KC_HEALTH_ENABLED', value: 'true' }, + { name: 'KC_HOSTNAME_URL', value: kcUrl }, + { name: 'KC_HOSTNAME_ADMIN_URL', value: kcUrl }, + { name: 'KC_FEATURES', value: 'declarative-user-profile' }, + { name: 'KC_RUN_IN_CONTAINER', value: 'true' }, + ]; + + if (values.runDevelopmentKeycloakDatabase) { + env.push({ name: 'LOCULUS_VERSION', value: tag }); + } + + const containerSpec: any = { + name: 'keycloak', + image: 'quay.io/keycloak/keycloak:23.0', + env, + args: ['start', '--import-realm', '--cache=local'], + ports: [{ containerPort: 8080 }], + volumeMounts: [ + { name: 'keycloak-config-processed', mountPath: '/opt/keycloak/data/import/' }, + { name: 'theme-volume', mountPath: '/opt/keycloak/providers/' }, + ], + startupProbe: { + httpGet: { path: '/health/ready', port: 8080 }, + timeoutSeconds: 3, + failureThreshold: 150, + periodSeconds: 5, + }, + livenessProbe: { + httpGet: { path: '/health/ready', port: 8080 }, + timeoutSeconds: 3, + periodSeconds: 10, + failureThreshold: 2, + }, + }; + + const resources = getResources('keycloak', values); + if (resources) Object.assign(containerSpec, resources); + + const spec: any = { + ...priorityClassName(values), + initContainers: [ + configProcessorContainer('keycloak-config', tag, values.imagePullPolicy), + { + name: 'keycloak-theme-prep', + resources: { + requests: { cpu: '100m', memory: '128Mi' }, + limits: { cpu: '500m', memory: '256Mi' }, + }, + image: `ghcr.io/loculus-project/keycloakify:${tag}`, + volumeMounts: [{ name: 'theme-volume', mountPath: '/destination' }], + }, + ], + containers: [containerSpec], + volumes: [...configVolumes('keycloak-config'), { name: 'theme-volume', emptyDir: {} }], + }; + + new ApiObject(this, 'deployment', { + apiVersion: 'apps/v1', + kind: 'Deployment', + metadata: { + name: 'loculus-keycloak', + annotations: { 'argocd.argoproj.io/sync-options': 'Replace=true' }, + }, + spec: { + replicas: 1, + selector: { matchLabels: { app: 'loculus', component: 'keycloak' } }, + template: { + metadata: { labels: { app: 'loculus', component: 'keycloak' } }, + spec, + }, + }, + }); + } + + private createService(values: LoculusValues): void { + const portSpec: any = { + port: 8083, + targetPort: 8080, + protocol: 'TCP', + name: 'http', + }; + if (values.environment !== 'server') { + portSpec.nodePort = 30083; + } + + new ApiObject(this, 'service', { + apiVersion: 'v1', + kind: 'Service', + metadata: { name: 'loculus-keycloak-service' }, + spec: { + type: serviceType(values), + selector: { app: 'loculus', component: 'keycloak' }, + ports: [portSpec], + }, + }); + } + + private createDatabase(values: LoculusValues, tag: string): void { + const env: any[] = [ + { name: 'POSTGRES_USER', value: 'postgres' }, + { name: 'POSTGRES_PASSWORD', value: 'unsecure' }, + { name: 'POSTGRES_DB', value: 'keycloak' }, + { name: 'POSTGRES_HOST_AUTH_METHOD', value: 'trust' }, + ]; + if (!values.developmentDatabasePersistence) { + env.push({ name: 'LOCULUS_VERSION', value: tag }); + } + + new ApiObject(this, 'db-deployment', { + apiVersion: 'apps/v1', + kind: 'Deployment', + metadata: { + name: 'loculus-keycloak-database', + annotations: { 'argocd.argoproj.io/sync-options': 'Replace=true' }, + }, + spec: { + replicas: 1, + selector: { matchLabels: { app: 'loculus', component: 'keycloak-database' } }, + strategy: { type: 'Recreate' }, + template: { + metadata: { + annotations: { timestamp: new Date().toISOString() }, + labels: { app: 'loculus', component: 'keycloak-database' }, + }, + spec: { + containers: [ + { + name: 'loculus-keycloak-database', + image: 'postgres:15.12', + resources: { + requests: { memory: '30Mi', cpu: '10m' }, + limits: { memory: '100Mi' }, + }, + ports: [{ containerPort: 5432 }], + env, + }, + ], + }, + }, + }, + }); + } + + private createDatabaseService(_values: LoculusValues): void { + new ApiObject(this, 'db-service', { + apiVersion: 'v1', + kind: 'Service', + metadata: { name: 'loculus-keycloak-database-service' }, + spec: { + type: 'ClusterIP', + selector: { app: 'loculus', component: 'keycloak-database' }, + ports: [{ port: 5432 }], + }, + }); + } +} diff --git a/kubernetes/cdk8s/src/constructs/lapis.ts b/kubernetes/cdk8s/src/constructs/lapis.ts new file mode 100644 index 0000000000..74531344d3 --- /dev/null +++ b/kubernetes/cdk8s/src/constructs/lapis.ts @@ -0,0 +1,228 @@ +import { Construct } from 'constructs'; +import { ApiObject } from 'cdk8s'; +import { LoculusValues, EnabledOrganism } from '../values'; +import { dockerTag } from '../docker-tag'; +import { configProcessorContainer, configVolumes } from '../config-processor'; +import { getResources, priorityClassName } from '../resources'; +import { getEnabledOrganisms } from '../organisms'; + +export class Lapis extends Construct { + constructor(scope: Construct, id: string, values: LoculusValues, organism: EnabledOrganism) { + super(scope, id); + + const tag = dockerTag(values); + const key = organism.key; + + this.createDeployment(values, tag, key); + this.createService(key); + } + + private createDeployment(values: LoculusValues, tag: string, key: string): void { + const containerSpec: any = { + name: 'lapis', + image: `${values.images.lapis.repository}:${values.images.lapis.tag}`, + imagePullPolicy: values.images.lapis.pullPolicy || values.imagePullPolicy, + ports: [{ containerPort: 8080 }], + args: [`--silo.url=http://loculus-silo-service-${key}:8081`], + env: [ + { + name: 'JVM_OPTS', + value: + '-XX:+UseContainerSupport -XX:MaxRAMPercentage=75.0 -XX:+UseG1GC -XX:MaxHeapFreeRatio=5 -XX:MinHeapFreeRatio=2 -XX:MaxGCPauseMillis=100', + }, + ], + volumeMounts: [ + { + name: 'lapis-silo-database-config-processed', + mountPath: '/workspace/database_config.yaml', + subPath: 'database_config.yaml', + }, + { + name: 'lapis-silo-database-config-processed', + mountPath: '/workspace/reference_genomes.json', + subPath: 'reference_genomes.json', + }, + ], + startupProbe: { + httpGet: { path: '/actuator/health', port: 8080 }, + periodSeconds: 5, + failureThreshold: 36, + }, + readinessProbe: { + httpGet: { path: '/sample/info', port: 8080 }, + periodSeconds: 10, + failureThreshold: 3, + timeoutSeconds: 5, + }, + livenessProbe: { + httpGet: { path: '/actuator/health', port: 8080 }, + periodSeconds: 10, + failureThreshold: 3, + timeoutSeconds: 5, + }, + }; + + const resources = getResources('lapis', values, key); + if (resources) Object.assign(containerSpec, resources); + + new ApiObject(this, 'deployment', { + apiVersion: 'apps/v1', + kind: 'Deployment', + metadata: { + name: `loculus-lapis-${key}`, + annotations: {}, + }, + spec: { + replicas: values.replicas.lapis || 1, + selector: { matchLabels: { app: 'loculus', component: `lapis-${key}` } }, + template: { + metadata: { + annotations: { timestamp: new Date().toISOString() }, + labels: { app: 'loculus', component: `lapis-${key}` }, + }, + spec: { + ...priorityClassName(values), + initContainers: [configProcessorContainer('lapis-silo-database-config', tag, values.imagePullPolicy)], + containers: [containerSpec], + volumes: configVolumes('lapis-silo-database-config', `lapis-silo-database-config-${key}`), + }, + }, + }, + }); + } + + private createService(key: string): void { + new ApiObject(this, 'service', { + apiVersion: 'v1', + kind: 'Service', + metadata: { name: `loculus-lapis-service-${key}` }, + spec: { + type: 'ClusterIP', + selector: { app: 'loculus', component: `lapis-${key}` }, + ports: [{ port: 8080, targetPort: 8080, protocol: 'TCP', name: 'http' }], + }, + }); + } +} + +/** Creates the shared LAPIS ingress resources (one set for all organisms) */ +export class LapisIngress extends Construct { + constructor(scope: Construct, id: string, values: LoculusValues) { + super(scope, id); + + const organisms = getEnabledOrganisms(values); + const organismKeys = organisms.map((o) => o.key); + const ns = 'default'; // Release namespace equivalent + + // CORS middleware + new ApiObject(this, 'cors', { + apiVersion: 'traefik.containo.us/v1alpha1', + kind: 'Middleware', + metadata: { name: 'cors-all-origins' }, + spec: { + headers: { + accessControlAllowMethods: ['GET', 'OPTIONS', 'POST', 'HEAD'], + accessControlAllowOriginList: ['*'], + accessControlMaxAge: 100, + accessControlAllowHeaders: ['*'], + }, + }, + }); + + // Strip prefix middlewares per organism + for (const key of organismKeys) { + new ApiObject(this, `strip-${key}`, { + apiVersion: 'traefik.containo.us/v1alpha1', + kind: 'Middleware', + metadata: { name: `strip-${key}-prefix` }, + spec: { + stripPrefix: { prefixes: [`/${key}/`] }, + }, + }); + } + + // Main LAPIS ingress + const stripMiddlewares = organismKeys.map((k) => `${ns}-strip-${k}-prefix@kubernetescrd`).join(','); + const mainMiddlewares = `${ns}-cors-all-origins@kubernetescrd,${stripMiddlewares}`; + + const paths = organismKeys.map((key) => ({ + path: `/${key}/`, + pathType: values.environment === 'server' ? 'ImplementationSpecific' : 'Prefix', + backend: { + service: { name: `loculus-lapis-service-${key}`, port: { number: 8080 } }, + }, + })); + + const lapisHost = + values.environment === 'server' ? `lapis${values.subdomainSeparator || '.'}${values.host}` : undefined; + + const mainIngressSpec: any = { + rules: [ + { + ...(lapisHost ? { host: lapisHost } : {}), + http: { paths }, + }, + ], + }; + if (values.environment === 'server' && lapisHost) { + mainIngressSpec.tls = [{ hosts: [lapisHost] }]; + } + + new ApiObject(this, 'ingress', { + apiVersion: 'networking.k8s.io/v1', + kind: 'Ingress', + metadata: { + name: 'lapis-ingress', + annotations: { + 'traefik.ingress.kubernetes.io/router.middlewares': mainMiddlewares, + }, + }, + spec: mainIngressSpec, + }); + + // Redirect slash middleware + new ApiObject(this, 'redirect-slash', { + apiVersion: 'traefik.containo.us/v1alpha1', + kind: 'Middleware', + metadata: { name: 'redirect-slash' }, + spec: { + redirectRegex: { regex: '.*', replacement: '$0/', permanent: true }, + }, + }); + + // Redirect ingress for trailing slash + const redirectMiddlewareList: string[] = []; + if (values.enforceHTTPS) { + redirectMiddlewareList.push(`${ns}-redirect-middleware@kubernetescrd`); + } + redirectMiddlewareList.push(`${ns}-redirect-slash@kubernetescrd`); + + const redirectPaths = organismKeys.map((key) => ({ + path: `/${key}`, + pathType: 'Exact', + backend: { + service: { name: `loculus-lapis-service-${organismKeys[0]}`, port: { number: 8080 } }, + }, + })); + + new ApiObject(this, 'redirect-ingress', { + apiVersion: 'networking.k8s.io/v1', + kind: 'Ingress', + metadata: { + name: 'lapis-redirect-ingress', + annotations: { + 'traefik.ingress.kubernetes.io/router.middlewares': redirectMiddlewareList.join(','), + 'traefik.ingress.kubernetes.io/router.priority': '500', + }, + }, + spec: { + rules: [ + { + ...(lapisHost ? { host: lapisHost } : {}), + http: { paths: redirectPaths }, + }, + ], + }, + }); + } +} diff --git a/kubernetes/cdk8s/src/constructs/minio.ts b/kubernetes/cdk8s/src/constructs/minio.ts new file mode 100644 index 0000000000..3c9d93f007 --- /dev/null +++ b/kubernetes/cdk8s/src/constructs/minio.ts @@ -0,0 +1,116 @@ +import { Construct } from 'constructs'; +import { ApiObject } from 'cdk8s'; +import { LoculusValues } from '../values'; +import { dockerTag } from '../docker-tag'; +import { getResources, serviceType } from '../resources'; + +export class Minio extends Construct { + constructor(scope: Construct, id: string, values: LoculusValues) { + super(scope, id); + + if (!values.s3.enabled || !values.runDevelopmentS3) return; + + const tag = dockerTag(values); + const bucketName = values.s3.bucket.bucket; + + // Policy ConfigMap + new ApiObject(this, 'policies', { + apiVersion: 'v1', + kind: 'ConfigMap', + metadata: { name: 'minio-policies' }, + data: { + 'policy.json': JSON.stringify( + { + Version: '2012-10-17', + Statement: [ + { + Effect: 'Allow', + Action: 's3:GetObject', + Resource: `arn:aws:s3:::${bucketName}/*`, + Principal: '*', + Condition: { + StringEquals: { 's3:ExistingObjectTag/public': 'true' }, + }, + }, + ], + }, + null, + 2, + ), + }, + }); + + const env: any[] = [ + { name: 'MINIO_ROOT_USER', valueFrom: { secretKeyRef: { name: 's3-bucket', key: 'accessKey' } } }, + { name: 'MINIO_ROOT_PASSWORD', valueFrom: { secretKeyRef: { name: 's3-bucket', key: 'secretKey' } } }, + ]; + if (!values.developmentDatabasePersistence) { + env.push({ name: 'LOCULUS_VERSION', value: tag }); + } + + const containerSpec: any = { + name: 'minio', + image: 'minio/minio:latest', + args: ['server', '/data'], + ports: [{ containerPort: 9000 }], + env, + lifecycle: { + postStart: { + exec: { + command: [ + '/bin/sh', + '-c', + `(\n sleep 10\n mc alias set local http://localhost:9000 "$MINIO_ROOT_USER" "$MINIO_ROOT_PASSWORD"\n mc mb -p local/${bucketName}\n echo "Bucket ${bucketName} ensured."\n mc anonymous set-json /policy/policy.json local/${bucketName}\n) &\n`, + ], + }, + }, + }, + volumeMounts: [{ name: 'policy-volume', mountPath: '/policy' }], + }; + + const resources = getResources('minio', values); + if (resources) Object.assign(containerSpec, resources); + + new ApiObject(this, 'deployment', { + apiVersion: 'apps/v1', + kind: 'Deployment', + metadata: { + name: 'minio', + annotations: { 'argocd.argoproj.io/sync-options': 'Replace=true' }, + }, + spec: { + replicas: 1, + selector: { matchLabels: { app: 'loculus', component: 'minio' } }, + template: { + metadata: { labels: { app: 'loculus', component: 'minio' } }, + spec: { + volumes: [{ name: 'policy-volume', configMap: { name: 'minio-policies' } }], + containers: [containerSpec], + restartPolicy: 'Always', + }, + }, + }, + }); + + const portSpec: any = { + port: 8084, + targetPort: 9000, + protocol: 'TCP', + name: 'http', + }; + if (values.environment !== 'server') { + portSpec.nodePort = 30084; + } + + new ApiObject(this, 'service', { + apiVersion: 'v1', + kind: 'Service', + metadata: { name: 'loculus-minio-service' }, + spec: { + type: serviceType(values), + selector: { app: 'loculus', component: 'minio' }, + ports: [portSpec], + }, + }); + } +} diff --git a/kubernetes/cdk8s/src/constructs/preprocessing.ts b/kubernetes/cdk8s/src/constructs/preprocessing.ts new file mode 100644 index 0000000000..938d5e942a --- /dev/null +++ b/kubernetes/cdk8s/src/constructs/preprocessing.ts @@ -0,0 +1,128 @@ +import { Construct } from 'constructs'; +import { ApiObject } from 'cdk8s'; +import { LoculusValues, EnabledOrganism } from '../values'; +import { dockerTag } from '../docker-tag'; +import { getResources, priorityClassName } from '../resources'; +import { patchMetadataSchema, flattenPreprocessingVersions, mergeReferenceGenomes } from '../organisms'; +import { generatePreprocessingSpecs } from '../config-generation'; +import * as yaml from 'js-yaml'; + +export class Preprocessing extends Construct { + constructor(scope: Construct, id: string, values: LoculusValues, organism: EnabledOrganism) { + super(scope, id); + + if (values.disablePreprocessing) return; + + const tag = dockerTag(values); + const orgKey = organism.key; + const organismContent = organism.contents; + const testconfig = values.testconfig || false; + + const backendHost = values.disableBackend ? 'http://host.k3d.internal:8079' : 'http://loculus-backend-service:8079'; + + const keycloakHost = testconfig ? `http://${values.localHost}:8083` : 'http://loculus-keycloak-service:8083'; + + const patchedSchema = patchMetadataSchema(organismContent.schema); + const metadata = patchedSchema.metadata; + const flattened = flattenPreprocessingVersions(organismContent.preprocessing); + + for (let processingIndex = 0; processingIndex < flattened.length; processingIndex++) { + const pc = flattened[processingIndex]; + const thisDockerTag = pc.dockerTag || tag; + const replicas = pc.replicas || 1; + const deploymentName = `loculus-preprocessing-${orgKey}-v${pc.version}-${processingIndex}`; + + // ConfigMap (only if configFile exists) + if (pc.configFile) { + const preprocessingSpecs = generatePreprocessingSpecs(metadata, organismContent.referenceGenomes); + const configData: any = { + organism: orgKey, + ...pc.configFile, + processing_spec: { + ...preprocessingSpecs, + versionComment: { + function: 'identity', + inputs: { input: 'versionComment' }, + args: null, + }, + }, + }; + + new ApiObject(this, `config-${processingIndex}`, { + apiVersion: 'v1', + kind: 'ConfigMap', + metadata: { name: `loculus-preprocessing-config-${orgKey}-v${pc.version}-${processingIndex}` }, + data: { + 'preprocessing-config.yaml': yaml.dump(configData), + }, + }); + } + + // Deployment + const containerArgs: string[] = [...(pc.args || [])]; + containerArgs.push( + `--backend-host=${backendHost}/${orgKey}`, + `--keycloak-host=${keycloakHost}`, + `--pipeline-version=${pc.version}`, + '--keycloak-password=$(KEYCLOAK_PASSWORD)', + ); + + const containerSpec: any = { + name: `preprocessing-${orgKey}`, + image: `${pc.image}:${thisDockerTag}`, + imagePullPolicy: values.imagePullPolicy, + env: [ + { + name: 'KEYCLOAK_PASSWORD', + valueFrom: { secretKeyRef: { name: 'service-accounts', key: 'preprocessingPipelinePassword' } }, + }, + ], + args: containerArgs, + }; + + const resources = getResources('preprocessing', values); + if (resources) Object.assign(containerSpec, resources); + + const podSpec: any = { + ...priorityClassName(values), + containers: [containerSpec], + }; + + if (pc.configFile) { + containerSpec.args.push('--config=/etc/config/preprocessing-config.yaml'); + containerSpec.volumeMounts = [ + { + name: `preprocessing-config-volume-${orgKey}-v${pc.version}-${processingIndex}`, + mountPath: '/etc/config', + }, + ]; + podSpec.volumes = [ + { + name: `preprocessing-config-volume-${orgKey}-v${pc.version}-${processingIndex}`, + configMap: { name: `loculus-preprocessing-config-${orgKey}-v${pc.version}-${processingIndex}` }, + }, + ]; + } + + new ApiObject(this, `deployment-${processingIndex}`, { + apiVersion: 'apps/v1', + kind: 'Deployment', + metadata: { + name: deploymentName, + annotations: { 'argocd.argoproj.io/sync-options': 'Replace=true' }, + }, + spec: { + replicas, + selector: { matchLabels: { app: 'loculus', component: deploymentName } }, + template: { + metadata: { + annotations: { timestamp: new Date().toISOString() }, + labels: { app: 'loculus', component: deploymentName }, + }, + spec: podSpec, + }, + }, + }); + } + } +} diff --git a/kubernetes/cdk8s/src/constructs/secrets.ts b/kubernetes/cdk8s/src/constructs/secrets.ts new file mode 100644 index 0000000000..e8e3b272e8 --- /dev/null +++ b/kubernetes/cdk8s/src/constructs/secrets.ts @@ -0,0 +1,80 @@ +import { Construct } from 'constructs'; +import { ApiObject } from 'cdk8s'; +import { LoculusValues } from '../values'; + +export class Secrets extends Construct { + constructor(scope: Construct, id: string, values: LoculusValues) { + super(scope, id); + + const secrets = values.secrets || {}; + for (const [name, secret] of Object.entries(secrets)) { + if (secret.type === 'sealedsecret') { + new ApiObject(this, `sealed-${name}`, { + apiVersion: 'bitnami.com/v1alpha1', + kind: 'SealedSecret', + metadata: { + name, + annotations: { + 'sealedsecrets.bitnami.com/cluster-wide': String(secret.clusterWide || false), + }, + }, + spec: { + encryptedData: secret.encryptedData, + ...(secret.rawType ? { template: { type: secret.rawType } } : {}), + }, + }); + } else if (secret.type === 'autogen') { + new ApiObject(this, `autogen-${name}`, { + apiVersion: 'secretgenerator.mittwald.de/v1alpha1', + kind: 'StringSecret', + metadata: { name }, + spec: { + fields: Object.keys(secret.data).map((key) => ({ + fieldName: key, + encoding: 'hex', + length: '18', + })), + }, + }); + } else if (secret.type === 'rawhtpasswd') { + // htpasswd needs special handling - encode as base64 + // For now, we'll use a placeholder that matches Helm's htpasswd function + const htpasswdValue = generateHtpasswd(secret.data.username, secret.data.password); + new ApiObject(this, `htpasswd-${name}`, { + apiVersion: 'v1', + kind: 'Secret', + metadata: { name }, + data: { + users: Buffer.from(htpasswdValue).toString('base64'), + }, + }); + } else { + // Raw secret + const data: Record = {}; + for (const [key, value] of Object.entries(secret.data || {})) { + data[key] = Buffer.from(String(value)).toString('base64'); + } + new ApiObject(this, `secret-${name}`, { + apiVersion: 'v1', + kind: 'Secret', + metadata: { name }, + data, + }); + } + } + } +} + +function generateHtpasswd(username: string, password: string): string { + // Apache htpasswd format using apr1 (MD5). + // We use a simplified approach - the actual bcrypt hash will differ from Helm's + // but functionally work the same for basic auth. + try { + const apacheMd5 = require('apache-md5'); + return `${username}:${apacheMd5(password)}`; + } catch { + // Fallback: use a simple format that Traefik will accept + // This won't match Helm's output exactly but will work + return `${username}:{SSHA}placeholder`; + } +} diff --git a/kubernetes/cdk8s/src/constructs/silo.ts b/kubernetes/cdk8s/src/constructs/silo.ts new file mode 100644 index 0000000000..47d797d2d4 --- /dev/null +++ b/kubernetes/cdk8s/src/constructs/silo.ts @@ -0,0 +1,360 @@ +import { Construct } from 'constructs'; +import { ApiObject } from 'cdk8s'; +import { LoculusValues, EnabledOrganism, MetadataField } from '../values'; +import { dockerTag } from '../docker-tag'; +import { configProcessorContainer, configVolumes } from '../config-processor'; +import { getResources, priorityClassName } from '../resources'; +import { patchMetadataSchema, lineageSystemForOrganism, mergeReferenceGenomes } from '../organisms'; +import { generateSiloDatabaseConfig } from '../config-generation'; + +function commonMetadataFields(values: LoculusValues): MetadataField[] { + // Reuse the same common metadata generation as config-generation.ts + // Import won't work circularly, so we duplicate the minimal set needed + const fields: MetadataField[] = [ + { + name: 'accessionVersion', + type: 'string', + notSearchable: true, + hideOnSequenceDetailsPage: true, + includeInDownloadsByDefault: true, + }, + { name: 'accession', type: 'string', notSearchable: true, hideOnSequenceDetailsPage: true }, + { name: 'version', type: 'int', hideOnSequenceDetailsPage: true }, + { + name: 'submissionId', + displayName: 'Submission ID', + type: 'string', + header: 'Submission details', + orderOnDetailsPage: 5000, + enableSubstringSearch: true, + includeInDownloadsByDefault: true, + }, + { + name: 'isRevocation', + displayName: 'Is revocation', + type: 'boolean', + autocomplete: true, + hideOnSequenceDetailsPage: true, + }, + { + name: 'submitter', + type: 'string', + generateIndex: true, + autocomplete: true, + hideOnSequenceDetailsPage: true, + header: 'Submission details', + orderOnDetailsPage: 5010, + }, + { + name: 'groupName', + type: 'string', + generateIndex: true, + autocomplete: true, + header: 'Submission details', + displayName: 'Submitting group', + includeInDownloadsByDefault: true, + orderOnDetailsPage: 5020, + customDisplay: { type: 'submittingGroup', displayGroup: 'group' }, + }, + { + name: 'groupId', + type: 'int', + autocomplete: true, + header: 'Submission details', + displayName: 'Submitting group (numeric ID)', + orderOnDetailsPage: 5030, + customDisplay: { type: 'submittingGroup', displayGroup: 'group' }, + }, + { + name: 'submittedAtTimestamp', + type: 'timestamp', + displayName: 'Date submitted', + header: 'Submission details', + orderOnDetailsPage: 5040, + }, + { + name: 'submittedDate', + type: 'string', + hideOnSequenceDetailsPage: true, + generateIndex: true, + autocomplete: true, + displayName: 'Date submitted (exact)', + orderOnDetailsPage: 5050, + }, + { + name: 'releasedAtTimestamp', + type: 'timestamp', + displayName: 'Date released', + header: 'Submission details', + columnWidth: 100, + orderOnDetailsPage: 5060, + }, + { + name: 'releasedDate', + type: 'string', + hideOnSequenceDetailsPage: true, + generateIndex: true, + autocomplete: true, + displayName: 'Date released (exact)', + columnWidth: 100, + orderOnDetailsPage: 5070, + }, + ]; + + if (values.dataUseTerms.enabled) { + fields.push( + { + name: 'dataUseTerms', + type: 'string', + generateIndex: true, + autocomplete: true, + displayName: 'Data use terms', + initiallyVisible: true, + includeInDownloadsByDefault: true, + customDisplay: { type: 'dataUseTerms' }, + header: 'Data use terms', + orderOnDetailsPage: 610, + }, + { + name: 'dataUseTermsRestrictedUntil', + type: 'date', + displayName: 'Data use terms restricted until', + hideOnSequenceDetailsPage: true, + header: 'Data use terms', + orderOnDetailsPage: 620, + }, + { + name: 'dataBecameOpenAt', + type: 'date', + displayName: 'Date data became open', + hideOnSequenceDetailsPage: true, + header: 'Data use terms', + orderOnDetailsPage: 625, + }, + ); + if (values.dataUseTerms.urls) { + fields.push({ + name: 'dataUseTermsUrl', + displayName: 'Data use terms URL', + type: 'string', + notSearchable: true, + header: 'Data use terms', + includeInDownloadsByDefault: true, + customDisplay: { type: 'link', url: '__value__' }, + orderOnDetailsPage: 630, + }); + } + } + + fields.push( + { + name: 'versionStatus', + displayName: 'Version status', + type: 'string', + autocomplete: true, + hideOnSequenceDetailsPage: true, + }, + { + name: 'versionComment', + type: 'string', + displayName: 'Version comment', + header: 'Submission details', + orderOnDetailsPage: 5000, + }, + { name: 'pipelineVersion', type: 'int', notSearchable: true, hideOnSequenceDetailsPage: true }, + ); + + return fields; +} + +export class Silo extends Construct { + constructor(scope: Construct, id: string, values: LoculusValues, organism: EnabledOrganism) { + super(scope, id); + + const tag = dockerTag(values); + const key = organism.key; + const organismContent = organism.contents; + const lineageSystem = lineageSystemForOrganism(organismContent); + + this.createConfigMap(values, key, organismContent, lineageSystem); + this.createDeployment(values, tag, key, organismContent, lineageSystem); + this.createService(key); + } + + private createConfigMap( + values: LoculusValues, + key: string, + organismContent: any, + lineageSystem: string | undefined, + ): void { + const patchedSchema = patchMetadataSchema(organismContent.schema); + const common = commonMetadataFields(values); + const dbConfig = generateSiloDatabaseConfig(patchedSchema, common, organismContent.referenceGenomes); + const merged = mergeReferenceGenomes(organismContent.referenceGenomes); + + const yaml = require('js-yaml'); + const preprocessingConfig: any = { + inputDirectory: '/preprocessing/input', + outputDirectory: '/preprocessing/output', + ndjsonInputFilename: 'data.ndjson.zst', + referenceGenomeFilename: 'reference_genomes.json', + }; + if (lineageSystem) { + preprocessingConfig.lineageDefinitionFilenames = ['lineage_definitions.yaml']; + } + + new ApiObject(this, 'config', { + apiVersion: 'v1', + kind: 'ConfigMap', + metadata: { name: `lapis-silo-database-config-${key}` }, + data: { + 'database_config.yaml': yaml.dump(dbConfig), + 'preprocessing_config.yaml': yaml.dump(preprocessingConfig), + 'reference_genomes.json': JSON.stringify(merged), + }, + }); + } + + private createDeployment( + values: LoculusValues, + tag: string, + key: string, + organismContent: any, + lineageSystem: string | undefined, + ): void { + const siloContainer: any = { + name: 'silo', + image: `${values.images.loculusSilo.repository}:${values.images.loculusSilo.tag || tag}`, + command: ['/usr/local/bin/silo'], + imagePullPolicy: values.imagePullPolicy, + env: [ + { name: 'SPDLOG_LEVEL', value: 'debug' }, + { name: 'SILO_DATA_DIRECTORY', value: '/data/' }, + ], + ports: [{ containerPort: 8081 }], + args: [ + 'api', + '--api-threads-for-http-connections', + String(values.silo?.apiThreadsForHttpConnections || 16), + '--api-max-queued-http-connections', + '1000', + '--query-materialization-cutoff', + '3276', + ], + volumeMounts: [{ name: 'lapis-silo-shared-data', mountPath: '/data' }], + readinessProbe: { + httpGet: { path: '/info', port: 8081 }, + initialDelaySeconds: 30, + periodSeconds: 10, + failureThreshold: 3, + timeoutSeconds: 5, + }, + livenessProbe: { + httpGet: { path: '/health', port: 8081 }, + initialDelaySeconds: 30, + periodSeconds: 10, + failureThreshold: 3, + timeoutSeconds: 5, + }, + }; + + const siloResources = getResources('silo', values, key); + if (siloResources) Object.assign(siloContainer, siloResources); + + const importerEnv: any[] = []; + if (values.disableBackend) { + importerEnv.push({ name: 'BACKEND_BASE_URL', value: `http://host.k3d.internal:8079/${key}` }); + } else { + importerEnv.push({ name: 'BACKEND_BASE_URL', value: `http://loculus-backend-service:8079/${key}` }); + } + + if (lineageSystem) { + const lineageDefs = values.lineageSystemDefinitions?.[lineageSystem]; + if (lineageDefs) { + importerEnv.push({ name: 'LINEAGE_DEFINITIONS', value: JSON.stringify(lineageDefs) }); + } + } + + importerEnv.push( + { name: 'SILO_RUN_TIMEOUT_SECONDS', value: String(values.siloImport.siloTimeoutSeconds) }, + { name: 'HARD_REFRESH_INTERVAL', value: String(values.siloImport.hardRefreshIntervalSeconds) }, + { name: 'SILO_IMPORT_POLL_INTERVAL_SECONDS', value: String(values.siloImport.pollIntervalSeconds) }, + { name: 'PATH_TO_SILO_BINARY', value: '/usr/local/bin/silo' }, + { name: 'PREPROCESSING_CONFIG', value: '/app/preprocessing_config.yaml' }, + ); + + const importerContainer: any = { + name: 'silo-importer', + image: `${values.images.loculusSilo.repository}:${values.images.loculusSilo.tag || tag}`, + imagePullPolicy: values.images.loculusSilo.pullPolicy, + env: importerEnv, + volumeMounts: [ + { + name: 'lapis-silo-database-config-processed', + mountPath: '/preprocessing/input/reference_genomes.json', + subPath: 'reference_genomes.json', + }, + { + name: 'lapis-silo-database-config-processed', + mountPath: '/preprocessing/input/database_config.yaml', + subPath: 'database_config.yaml', + }, + { + name: 'lapis-silo-database-config-processed', + mountPath: '/app/preprocessing_config.yaml', + subPath: 'preprocessing_config.yaml', + }, + { name: 'lapis-silo-shared-data', mountPath: '/preprocessing/output' }, + { name: 'lapis-silo-input-data-cache', mountPath: '/preprocessing/input' }, + ], + }; + + const importerResources = getResources('silo-importer', values); + if (importerResources) Object.assign(importerContainer, importerResources); + + const forceReplace = !values.developmentDatabasePersistence && values.runDevelopmentMainDatabase; + const syncOptions = forceReplace ? 'Replace=true,Force=true' : 'Replace=true'; + + new ApiObject(this, 'deployment', { + apiVersion: 'apps/v1', + kind: 'Deployment', + metadata: { + name: `loculus-silo-${key}`, + annotations: { 'argocd.argoproj.io/sync-options': syncOptions }, + }, + spec: { + replicas: 1, + selector: { matchLabels: { app: 'loculus', component: `silo-${key}` } }, + template: { + metadata: { + annotations: { timestamp: new Date().toISOString() }, + labels: { app: 'loculus', component: `silo-${key}` }, + }, + spec: { + ...priorityClassName(values), + initContainers: [configProcessorContainer('lapis-silo-database-config', tag, values.imagePullPolicy)], + containers: [siloContainer, importerContainer], + volumes: [ + ...configVolumes('lapis-silo-database-config', `lapis-silo-database-config-${key}`), + { name: 'lapis-silo-shared-data', emptyDir: {} }, + { name: 'lapis-silo-input-data-cache', emptyDir: {} }, + ], + }, + }, + }, + }); + } + + private createService(key: string): void { + new ApiObject(this, 'service', { + apiVersion: 'v1', + kind: 'Service', + metadata: { name: `loculus-silo-service-${key}` }, + spec: { + type: 'ClusterIP', + selector: { app: 'loculus', component: `silo-${key}` }, + ports: [{ port: 8081, targetPort: 8081, protocol: 'TCP', name: 'http' }], + }, + }); + } +} diff --git a/kubernetes/cdk8s/src/constructs/website.ts b/kubernetes/cdk8s/src/constructs/website.ts new file mode 100644 index 0000000000..a32081ba9f --- /dev/null +++ b/kubernetes/cdk8s/src/constructs/website.ts @@ -0,0 +1,108 @@ +import { Construct } from 'constructs'; +import { ApiObject } from 'cdk8s'; +import { LoculusValues } from '../values'; +import { dockerTag } from '../docker-tag'; +import { configProcessorContainer, configVolumes } from '../config-processor'; +import { getResources, serviceType, priorityClassName } from '../resources'; +import { generateWebsiteConfig, generateRuntimeConfig } from '../config-generation'; + +export class Website extends Construct { + constructor(scope: Construct, id: string, values: LoculusValues) { + super(scope, id); + + if (values.disableWebsite) return; + + const tag = dockerTag(values); + + this.createConfigMap(values); + this.createDeployment(values, tag); + this.createService(values); + } + + private createConfigMap(values: LoculusValues): void { + const websiteConfig = generateWebsiteConfig(values); + const runtimeConfig = generateRuntimeConfig(values); + + new ApiObject(this, 'config', { + apiVersion: 'v1', + kind: 'ConfigMap', + metadata: { name: 'loculus-website-config' }, + data: { + 'website_config.json': JSON.stringify(websiteConfig), + 'runtime_config.json': JSON.stringify(runtimeConfig), + }, + }); + } + + private createDeployment(values: LoculusValues, tag: string): void { + const containerSpec: any = { + name: 'website', + image: `${values.images.website.repository}:${values.images.website.tag || tag}`, + imagePullPolicy: values.images.website.pullPolicy || values.imagePullPolicy, + ports: [{ containerPort: 3000 }], + volumeMounts: [{ name: 'loculus-website-config-processed', mountPath: '/config' }], + livenessProbe: { + httpGet: { path: '/', port: 3000 }, + initialDelaySeconds: 30, + periodSeconds: 10, + }, + readinessProbe: { + httpGet: { path: '/', port: 3000 }, + initialDelaySeconds: 5, + periodSeconds: 5, + }, + }; + + const resources = getResources('website', values); + if (resources) Object.assign(containerSpec, resources); + + new ApiObject(this, 'deployment', { + apiVersion: 'apps/v1', + kind: 'Deployment', + metadata: { + name: 'loculus-website', + annotations: { 'argocd.argoproj.io/sync-options': 'Replace=true' }, + }, + spec: { + replicas: values.replicas.website, + selector: { matchLabels: { app: 'loculus', component: 'website' } }, + template: { + metadata: { + annotations: { timestamp: new Date().toISOString() }, + labels: { app: 'loculus', component: 'website' }, + }, + spec: { + ...priorityClassName(values), + initContainers: [configProcessorContainer('loculus-website-config', tag, values.imagePullPolicy)], + containers: [containerSpec], + imagePullSecrets: [{ name: 'custom-website-sealed-secret' }], + volumes: configVolumes('loculus-website-config'), + }, + }, + }, + }); + } + + private createService(values: LoculusValues): void { + const portSpec: any = { + port: 3000, + targetPort: 3000, + protocol: 'TCP', + name: 'http', + }; + if (values.environment !== 'server') { + portSpec.nodePort = 30081; + } + + new ApiObject(this, 'service', { + apiVersion: 'v1', + kind: 'Service', + metadata: { name: 'loculus-website-service' }, + spec: { + type: serviceType(values), + selector: { app: 'loculus', component: 'website' }, + ports: [portSpec], + }, + }); + } +} diff --git a/kubernetes/cdk8s/src/docker-tag.ts b/kubernetes/cdk8s/src/docker-tag.ts new file mode 100644 index 0000000000..c6d3d16d7a --- /dev/null +++ b/kubernetes/cdk8s/src/docker-tag.ts @@ -0,0 +1,12 @@ +import { LoculusValues } from './values'; + +export function dockerTag(values: LoculusValues): string { + if (values.sha) { + return `commit-${values.sha.substring(0, 7)}`; + } + const branch = values.branch || 'latest'; + if (branch === 'main') { + return 'latest'; + } + return `branch-${branch.replace(/\//g, '-')}`; +} diff --git a/kubernetes/cdk8s/src/main.ts b/kubernetes/cdk8s/src/main.ts new file mode 100644 index 0000000000..07326fb43d --- /dev/null +++ b/kubernetes/cdk8s/src/main.ts @@ -0,0 +1,42 @@ +import { App } from 'cdk8s'; +import { LoculusChart } from './chart'; +import { loadValues } from './values'; + +// Parse CLI args: --values file1.yaml --values file2.yaml --set key=value +function parseArgs(argv: string[]): { valuesFiles: string[]; sets: Record; baseDir?: string } { + const valuesFiles: string[] = []; + const sets: Record = {}; + let baseDir: string | undefined; + + for (let i = 0; i < argv.length; i++) { + if ((argv[i] === '--values' || argv[i] === '-f') && i + 1 < argv.length) { + valuesFiles.push(argv[++i]); + } else if (argv[i] === '--set' && i + 1 < argv.length) { + const [key, ...valueParts] = argv[++i].split('='); + sets[key] = valueParts.join('='); + } else if (argv[i] === '--set-json' && i + 1 < argv.length) { + const arg = argv[++i]; + const eqIdx = arg.indexOf('='); + if (eqIdx !== -1) { + const key = arg.substring(0, eqIdx); + const jsonStr = arg.substring(eqIdx + 1); + try { + sets[key] = JSON.parse(jsonStr); + } catch { + sets[key] = jsonStr; + } + } + } else if (argv[i] === '--base-dir' && i + 1 < argv.length) { + baseDir = argv[++i]; + } + } + + return { valuesFiles, sets, baseDir }; +} + +const args = parseArgs(process.argv.slice(2)); +const values = loadValues(args); + +const app = new App(); +new LoculusChart(app, 'loculus', values); +app.synth(); diff --git a/kubernetes/cdk8s/src/organisms.ts b/kubernetes/cdk8s/src/organisms.ts new file mode 100644 index 0000000000..a6efae9d9e --- /dev/null +++ b/kubernetes/cdk8s/src/organisms.ts @@ -0,0 +1,135 @@ +import { + LoculusValues, + EnabledOrganism, + OrganismConfig, + MetadataField, + ReferenceGenomeSegment, + PreprocessingConfig, +} from './values'; + +export function getEnabledOrganisms(values: LoculusValues): EnabledOrganism[] { + const allOrganisms = values.organisms || values.defaultOrganisms || {}; + const enabled: EnabledOrganism[] = []; + const sortedKeys = Object.keys(allOrganisms).sort(); + for (const key of sortedKeys) { + const organism = allOrganisms[key]; + if (organism.enabled !== false) { + enabled.push({ key, contents: organism }); + } + } + return enabled; +} + +/** + * Patch schema by merging metadataAdd into metadata (overwriting by name). + */ +export function patchMetadataSchema(schema: any): any { + const metadata: MetadataField[] = schema.metadata || []; + const toAdd: MetadataField[] = schema.metadataAdd || []; + + const metadataMap = new Map(); + for (const field of metadata) { + metadataMap.set(field.name, field); + } + for (const field of toAdd) { + metadataMap.set(field.name, field); + } + + return { + ...schema, + metadata: Array.from(metadataMap.values()), + }; +} + +/** + * Get nucleotide segment names from reference genomes, sorted alphabetically. + */ +export function getNucleotideSegmentNames(referenceGenomes: ReferenceGenomeSegment[]): string[] { + return referenceGenomes.map((s) => s.name).sort(); +} + +/** + * Check if organism has multiple segments. + */ +export function isSegmented(referenceGenomes: ReferenceGenomeSegment[]): boolean { + return referenceGenomes.length > 1; +} + +/** + * Get the lineage system for an organism (from its patched schema). + */ +export function lineageSystemForOrganism(organism: OrganismConfig): string | undefined { + const schema = patchMetadataSchema(organism.schema); + const lineageSystems: string[] = []; + for (const field of schema.metadata) { + if (field.lineageSystem) { + lineageSystems.push(field.lineageSystem); + } + } + const unique = [...new Set(lineageSystems)]; + if (unique.length > 1) { + throw new Error(`Multiple lineage systems found: ${unique.join(', ')}`); + } + return unique[0]; +} + +export interface FlattenedPreprocessingConfig extends PreprocessingConfig { + version: number; +} + +/** + * Flatten preprocessing versions (a version field can be an array). + */ +export function flattenPreprocessingVersions(preprocessing: PreprocessingConfig[]): FlattenedPreprocessingConfig[] { + const flattened: FlattenedPreprocessingConfig[] = []; + const seen = new Set(); + for (const pc of preprocessing) { + const versions = Array.isArray(pc.version) ? pc.version : [pc.version]; + for (const v of versions) { + if (seen.has(v)) { + throw new Error(`Duplicate preprocessing pipeline version ${v} found in organism configuration`); + } + seen.add(v); + flattened.push({ ...pc, version: v }); + } + } + return flattened; +} + +/** + * Merge reference genomes into LAPIS format. + */ +export function mergeReferenceGenomes(referenceGenomes: ReferenceGenomeSegment[]): { + nucleotideSequences: Array<{ name: string; sequence: string }>; + genes: Array<{ name: string; sequence: string }>; +} { + const nucleotideSequences: Array<{ name: string; sequence: string }> = []; + const genes: Array<{ name: string; sequence: string }> = []; + const singleSegment = referenceGenomes.length === 1; + + for (const segment of referenceGenomes) { + const segmentName = segment.name; + const singleReference = segment.references.length === 1; + + for (const reference of segment.references) { + if (singleReference) { + nucleotideSequences.push({ name: segmentName, sequence: reference.sequence }); + } else { + const name = singleSegment ? reference.name : `${segmentName}-${reference.name}`; + nucleotideSequences.push({ name, sequence: reference.sequence }); + } + + if (reference.genes) { + for (const gene of reference.genes) { + if (singleReference) { + genes.push({ name: gene.name, sequence: gene.sequence }); + } else { + genes.push({ name: `${gene.name}-${reference.name}`, sequence: gene.sequence }); + } + } + } + } + } + + return { nucleotideSequences, genes }; +} diff --git a/kubernetes/cdk8s/src/resources.ts b/kubernetes/cdk8s/src/resources.ts new file mode 100644 index 0000000000..a9bea64096 --- /dev/null +++ b/kubernetes/cdk8s/src/resources.ts @@ -0,0 +1,25 @@ +import { LoculusValues } from './values'; + +export function getResources(containerName: string, values: LoculusValues, organism?: string): any | undefined { + if (organism && values.resources?.organismSpecific?.[organism]?.[containerName]) { + return { resources: values.resources.organismSpecific[organism][containerName] }; + } + if (values.resources?.[containerName]) { + return { resources: values.resources[containerName] }; + } + if (values.defaultResources) { + return { resources: values.defaultResources }; + } + return undefined; +} + +export function serviceType(values: LoculusValues): string { + return values.environment === 'server' ? 'ClusterIP' : 'NodePort'; +} + +export function priorityClassName(values: LoculusValues): any { + if (values.podPriorityClassName) { + return { priorityClassName: values.podPriorityClassName }; + } + return {}; +} diff --git a/kubernetes/cdk8s/src/urls.ts b/kubernetes/cdk8s/src/urls.ts new file mode 100644 index 0000000000..33ca1ac003 --- /dev/null +++ b/kubernetes/cdk8s/src/urls.ts @@ -0,0 +1,75 @@ +import { LoculusValues, EnabledOrganism } from './values'; +import { getEnabledOrganisms } from './organisms'; + +export function backendUrl(values: LoculusValues): string { + if (values.public?.backendUrl) return values.public.backendUrl; + if (values.environment === 'server') { + return `https://backend${values.subdomainSeparator || '.'}${values.host}`; + } + return `http://${values.localHost}:8079`; +} + +export function websiteUrl(values: LoculusValues): string { + if (values.public?.websiteUrl) return values.public.websiteUrl; + if (values.environment === 'server') { + return `https://${values.host}`; + } + return `http://${values.localHost}:3000`; +} + +export function s3Url(values: LoculusValues): string { + if (values.runDevelopmentS3) { + if (values.environment === 'server') { + return `https://s3${values.subdomainSeparator || '.'}${values.host}`; + } + return `http://${values.localHost}:8084`; + } + return values.s3.bucket.endpoint || ''; +} + +export function s3UrlInternal(values: LoculusValues): string { + if (values.runDevelopmentS3) { + return 'http://loculus-minio-service:8084'; + } + return values.s3.bucket.endpoint || ''; +} + +export function keycloakUrl(values: LoculusValues): string { + if (values.public?.keycloakUrl) return values.public.keycloakUrl; + if (values.environment === 'server') { + return `https://authentication${values.subdomainSeparator || '.'}${values.host}`; + } + return `http://${values.localHost}:8083`; +} + +export function lapisUrlTemplate(values: LoculusValues): string { + if (values.public?.lapisUrlTemplate) return values.public.lapisUrlTemplate; + if (values.environment === 'server') { + return `https://lapis${values.subdomainSeparator || '.'}${values.host}/%organism%`; + } + return `http://${values.localHost}:8080/%organism%`; +} + +export function generateInternalLapisUrls(values: LoculusValues): Record { + const result: Record = {}; + for (const org of getEnabledOrganisms(values)) { + if (!values.disableWebsite) { + result[org.key] = `http://loculus-lapis-service-${org.key}:8080`; + } else { + result[org.key] = `http://${values.localHost}:8080/${org.key}`; + } + } + return result; +} + +export function generateExternalLapisUrls(values: LoculusValues): Record { + const template = lapisUrlTemplate(values); + const result: Record = {}; + const organisms = values.organisms || values.defaultOrganisms || {}; + for (const [key, organism] of Object.entries(organisms)) { + if (organism.enabled !== false) { + result[key] = template.replace('%organism%', key); + } + } + return result; +} diff --git a/kubernetes/cdk8s/src/values.ts b/kubernetes/cdk8s/src/values.ts new file mode 100644 index 0000000000..a17351a740 --- /dev/null +++ b/kubernetes/cdk8s/src/values.ts @@ -0,0 +1,355 @@ +import * as fs from 'fs'; +import * as yaml from 'js-yaml'; +import * as path from 'path'; + +// TypeScript interfaces matching values.yaml structure +// These are intentionally loose (using `any` for deeply nested configs) +// to avoid maintaining a 1785-line JSON Schema in TypeScript + +export interface MetadataField { + name: string; + displayName?: string; + type?: string; + header?: string; + required?: boolean; + noInput?: boolean; + noEdit?: boolean; + desired?: boolean; + definition?: string; + guidance?: string; + example?: string; + ontology_id?: string; + options?: Array<{ name: string; [key: string]: any }>; + ingest?: string; + preprocessing?: { + function?: string; + inputs?: Record; + args?: Record; + }; + generateIndex?: boolean; + autocomplete?: boolean; + notSearchable?: boolean; + initiallyVisible?: boolean; + hideInSearchResultsTable?: boolean; + hideOnSequenceDetailsPage?: boolean; + includeInDownloadsByDefault?: boolean; + rangeSearch?: boolean; + rangeOverlapSearch?: any; + lineageSystem?: string; + perSegment?: boolean; + oneHeader?: boolean; + columnWidth?: number; + order?: number; + orderOnDetailsPage?: number; + onlyForReference?: string; + enableSubstringSearch?: boolean; + customDisplay?: { + type: string; + url?: string; + linkMenuItems?: Array<{ name: string; url: string }>; + displayGroup?: string; + label?: string; + html?: string; + }; + [key: string]: any; +} + +export interface ReferenceGenomeSegment { + name: string; + references: Array<{ + name: string; + sequence: string; + genes?: Array<{ name: string; sequence: string }>; + }>; +} + +export interface OrganismSchema { + organismName: string; + submissionDataTypes?: { + consensusSequences?: boolean; + maxSequencesPerEntry?: number; + files?: any; + }; + loadSequencesAutomatically?: boolean; + earliestReleaseDate?: { + enabled: boolean; + externalFields?: string[]; + }; + richFastaHeaderFields?: string[]; + files?: Array<{ name: string; [key: string]: any }>; + metadata: MetadataField[]; + metadataAdd?: MetadataField[]; + metadataTemplate?: any; + linkOuts?: Array<{ name: string; url: string; maxNumberOfRecommendedEntries?: number }>; + image?: string; + description?: string; + website?: any; + extraInputFields?: Array; + [key: string]: any; +} + +export interface PreprocessingConfig { + version: number | number[]; + image: string; + args?: string[]; + configFile?: any; + dockerTag?: string; + replicas?: number; + [key: string]: any; +} + +export interface IngestConfig { + image: string; + configFile?: any; + [key: string]: any; +} + +export interface OrganismConfig { + enabled?: boolean; + schema: OrganismSchema; + referenceGenomes: ReferenceGenomeSegment[]; + preprocessing: PreprocessingConfig[]; + ingest?: IngestConfig; + enaDeposition?: any; + [key: string]: any; +} + +export interface S3Config { + enabled: boolean; + bucket: { + endpoint?: string; + region?: string; + bucket: string; + accessKey?: string; + secretKey?: string; + }; +} + +export interface ImageConfig { + repository: string; + tag?: string; + pullPolicy?: string; +} + +export interface LoculusValues { + environment: string; + imagePullPolicy: string; + localHost: string; + host?: string; + subdomainSeparator?: string; + robotsNoindexHeader?: boolean; + enforceHTTPS?: boolean; + insecureCookies?: boolean; + + seqSets: { + enabled: boolean; + crossRef?: any; + fieldsToDisplay?: any[]; + }; + + disableWebsite: boolean; + disableBackend: boolean; + disablePreprocessing: boolean; + disableIngest: boolean; + disableEnaSubmission: boolean; + + website: { + websiteConfig: { + enableLoginNavigationItem: boolean; + enableSubmissionNavigationItem: boolean; + enableSubmissionPages: boolean; + }; + }; + + siloImport: { + siloTimeoutSeconds: number; + hardRefreshIntervalSeconds: number; + pollIntervalSeconds: number; + }; + + ingestLimitSeconds: number; + getSubmissionListLimitSeconds: number; + preprocessingTimeout: number; + accessionPrefix: string; + zstdCompressionLevel: number; + pipelineVersionUpgradeCheckIntervalSeconds: number; + + dataUseTerms: { + enabled: boolean; + urls?: { + open?: string; + restricted?: string; + }; + }; + + fileSharing?: any; + s3: S3Config; + + name: string; + logo: { url: string; width: number; height: number }; + lineageSystemDefinitions?: Record>; + + organisms?: Record; + defaultOrganisms?: Record; + + runDevelopmentMainDatabase?: boolean; + runDevelopmentKeycloakDatabase?: boolean; + runDevelopmentS3?: boolean; + developmentDatabasePersistence?: boolean; + + auth: { + verifyEmail: boolean; + resetPasswordAllowed: boolean; + registrationAllowed: boolean; + smtp?: { + host: string; + port: string; + from: string; + replyTo: string; + envelopeFrom: string; + user: string; + }; + identityProviders?: Record; + }; + + createTestAccounts?: boolean; + registrationTermsMessage?: string; + + images: { + backend: ImageConfig; + website: ImageConfig; + loculusSilo: ImageConfig & { pullPolicy: string }; + lapis: ImageConfig & { tag: string }; + [key: string]: any; + }; + + replicas: { + backend: number; + website: number; + lapis?: number; + [key: string]: any; + }; + + resources?: Record; + defaultResources?: any; + podPriorityClassName?: string; + + secrets?: Record; + + silo?: { + apiThreadsForHttpConnections?: number; + }; + + backendExtraArgs?: string[]; + + branch?: string; + sha?: string; + testconfig?: boolean; + usePublicRuntimeConfigAsServerSide?: boolean; + + previewDocs?: boolean; + docsImage?: string; + + gitHubMainUrl?: string; + bannerMessageURL?: string; + bannerMessage?: string; + submissionBannerMessageURL?: string; + submissionBannerMessage?: string; + gitHubEditLink?: string; + welcomeMessageHTML?: string; + additionalHeadHTML?: string; + sequenceFlagging?: any; + + enaDeposition?: { + submitToEnaProduction?: boolean; + enaDbName?: string; + enaUniqueSuffix?: string; + enaIsBroker?: boolean; + enaApprovedListTestUrl?: string; + enaSuppressedListTestUrl?: string; + }; + + ingest?: { + ncbiGatewayUrl?: string; + mirrorBucket?: string; + }; + + [key: string]: any; +} + +export interface EnabledOrganism { + key: string; + contents: OrganismConfig; +} + +/** + * Load and merge values from multiple YAML files plus --set overrides. + */ +export function loadValues(args: { + valuesFiles: string[]; + sets: Record; + baseDir?: string; +}): LoculusValues { + const baseDir = args.baseDir || path.resolve(__dirname, '../../loculus'); + + // Start with defaults from values.yaml + const defaultValuesPath = path.join(baseDir, 'values.yaml'); + let merged: any = {}; + if (fs.existsSync(defaultValuesPath)) { + merged = yaml.load(fs.readFileSync(defaultValuesPath, 'utf8')) as any; + } + + // Merge additional values files + for (const f of args.valuesFiles) { + const filePath = path.isAbsolute(f) ? f : path.resolve(process.cwd(), f); + const extra = yaml.load(fs.readFileSync(filePath, 'utf8')) as any; + merged = deepMerge(merged, extra); + } + + // Apply --set overrides (supports dot-notation paths) + for (const [key, value] of Object.entries(args.sets)) { + setNestedValue(merged, key, parseSetValue(value)); + } + + return merged as LoculusValues; +} + +function parseSetValue(value: string): any { + if (value === 'true') return true; + if (value === 'false') return false; + const num = Number(value); + if (!isNaN(num) && value !== '') return num; + return value; +} + +function setNestedValue(obj: any, path: string, value: any): void { + const parts = path.split('.'); + let current = obj; + for (let i = 0; i < parts.length - 1; i++) { + if (!(parts[i] in current) || typeof current[parts[i]] !== 'object') { + current[parts[i]] = {}; + } + current = current[parts[i]]; + } + current[parts[parts.length - 1]] = value; +} + +export function deepMerge(target: any, source: any): any { + if (source === undefined || source === null) return target; + if (typeof source !== 'object' || Array.isArray(source)) return source; + const result = { ...target }; + for (const key of Object.keys(source)) { + if ( + key in result && + typeof result[key] === 'object' && + !Array.isArray(result[key]) && + typeof source[key] === 'object' && + !Array.isArray(source[key]) + ) { + result[key] = deepMerge(result[key], source[key]); + } else { + result[key] = source[key]; + } + } + return result; +} diff --git a/kubernetes/cdk8s/tsconfig.json b/kubernetes/cdk8s/tsconfig.json new file mode 100644 index 0000000000..6cee70357b --- /dev/null +++ b/kubernetes/cdk8s/tsconfig.json @@ -0,0 +1,19 @@ +{ + "compilerOptions": { + "target": "ES2020", + "module": "commonjs", + "lib": ["ES2020"], + "outDir": "dist", + "rootDir": "src", + "strict": true, + "esModuleInterop": true, + "skipLibCheck": true, + "forceConsistentCasingInFileNames": true, + "resolveJsonModule": true, + "declaration": true, + "declarationMap": true, + "sourceMap": true + }, + "include": ["src/**/*"], + "exclude": ["node_modules", "dist"] +} From 2d28b4ef3f5787e3b84284ea114d65cb299fd81f Mon Sep 17 00:00:00 2001 From: Theo Date: Tue, 17 Feb 2026 17:16:03 +0000 Subject: [PATCH 3/6] fix(deployment): install CDK8s deps in website dev test workflow The dev server test workflow runs deploy.py config which now uses CDK8s instead of Helm for manifest generation. Add Node.js setup and CDK8s npm ci before config generation. Co-Authored-By: Claude Opus 4.6 --- .github/workflows/website-playwright-dev-test.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.github/workflows/website-playwright-dev-test.yml b/.github/workflows/website-playwright-dev-test.yml index ad7c87cc9e..daff960152 100644 --- a/.github/workflows/website-playwright-dev-test.yml +++ b/.github/workflows/website-playwright-dev-test.yml @@ -3,6 +3,7 @@ on: pull_request: paths: - "website/**" + - "kubernetes/cdk8s/**" - "deploy.py" - generate_local_test_config.sh - .github/workflows/website-playwright-dev-test.yml @@ -36,6 +37,11 @@ jobs: uses: astral-sh/setup-uv@v6 - name: Checkout repository uses: actions/checkout@v6 + - uses: actions/setup-node@v6 + with: + node-version: '22' + - name: Install CDK8s dependencies + run: cd kubernetes/cdk8s && npm ci - name: Install yaml package run: uv pip install --system PyYAML requests - name: Generate local test config From 9218a4466efb8faf8e215d6b3a545a9442642636 Mon Sep 17 00:00:00 2001 From: Theo Date: Tue, 17 Feb 2026 17:22:52 +0000 Subject: [PATCH 4/6] refactor(deployment): remove Helm chart templates and references - Delete all 46 Helm template files from kubernetes/loculus/templates/ - Delete Chart.yaml, Chart.lock, CONTRIBUTING.md - Delete helm-schema-lint.yaml workflow - Remove helm lint from pre-commit config - Remove setup-helm from website dev test workflow (not needed for config gen) - Rename deploy.py subcommand: helm -> deploy - Rename HELM_CHART_DIR -> VALUES_DIR in deploy.py - Update all references in workflows, scripts, READMEs, and AGENTS.md Values files (values.yaml, values_e2e_and_dev.yaml, values_preview_server.yaml, values.schema.json) are retained as they are consumed by CDK8s. Helm itself is still installed in CI for the secret-generator dependency. Co-Authored-By: Claude Opus 4.6 --- .github/workflows/helm-schema-lint.yaml | 29 - .github/workflows/integration-tests.yml | 2 +- .../workflows/website-playwright-dev-test.yml | 3 - .pre-commit-config.yaml | 13 - deploy.py | 52 +- ena-submission/README.md | 2 +- integration-tests/AGENTS.md | 4 +- integration-tests/start-server.sh | 2 +- kubernetes/AGENTS.md | 12 +- kubernetes/README.md | 34 +- kubernetes/loculus/CONTRIBUTING.md | 21 - kubernetes/loculus/Chart.lock | 0 kubernetes/loculus/Chart.yaml | 5 - .../loculus/templates/_common-metadata.tpl | 564 ------------------ .../loculus/templates/_config-processor.tpl | 64 -- .../loculus/templates/_enabledOrganisms.tpl | 17 - .../_flattenPreprocessingVersions.tpl | 18 - .../templates/_ingestRenameFromValues.tpl | 11 - .../templates/_inputFieldsFromValues.tpl | 48 -- .../_lineage-system-for-organism.tpl | 19 - .../loculus/templates/_loculus-docker-tag.tpl | 8 - .../templates/_merged-reference-genomes.tpl | 63 -- .../templates/_possiblePriorityClassName.tpl | 5 - .../templates/_preprocessingFromValues.tpl | 78 --- kubernetes/loculus/templates/_resources.tpl | 23 - .../loculus/templates/_service-type.tpl | 9 - .../loculus/templates/_siloDatabaseConfig.tpl | 45 -- .../templates/_submission-data-types.tpl | 16 - kubernetes/loculus/templates/_urls.tpl | 75 --- .../loculus/templates/docs-preview.yaml | 68 --- .../templates/ena-submission-config.yaml | 29 - .../templates/ena-submission-deployment.yaml | 190 ------ .../templates/ena-submission-service.yaml | 19 - .../loculus/templates/ingest-config.yaml | 43 -- .../loculus/templates/ingest-deployment.yaml | 147 ----- .../loculus/templates/ingressroute.yaml | 170 ------ .../templates/keycloak-config-map.yaml | 377 ------------ .../templates/keycloak-database-service.yaml | 13 - .../templates/keycloak-database-standin.yaml | 49 -- .../templates/keycloak-deployment.yaml | 134 ----- .../loculus/templates/keycloak-service.yaml | 17 - .../loculus/templates/lapis-deployment.yaml | 70 --- .../loculus/templates/lapis-ingress.yaml | 104 ---- .../loculus/templates/lapis-service.yaml | 18 - .../templates/lapis-silo-database-config.yaml | 34 -- .../templates/loculus-backend-config.yaml | 8 - .../templates/loculus-backend-service.yaml | 19 - .../loculus/templates/loculus-backend.yaml | 158 ----- .../templates/loculus-database-service.yaml | 20 - .../templates/loculus-database-standin.yaml | 67 --- .../loculus-preprocessing-config.yaml | 28 - .../loculus-preprocessing-deployment.yaml | 68 --- .../templates/loculus-website-config.yaml | 30 - .../loculus/templates/loculus-website.yaml | 53 -- .../loculus/templates/minio-deployment.yaml | 88 --- .../loculus/templates/minio-service.yaml | 19 - kubernetes/loculus/templates/secrets.yaml | 50 -- .../loculus/templates/silo-deployment.yaml | 118 ---- .../loculus/templates/silo-service.yaml | 18 - .../loculus/templates/website-service.yaml | 19 - 60 files changed, 60 insertions(+), 3427 deletions(-) delete mode 100644 .github/workflows/helm-schema-lint.yaml delete mode 100644 kubernetes/loculus/CONTRIBUTING.md delete mode 100644 kubernetes/loculus/Chart.lock delete mode 100644 kubernetes/loculus/Chart.yaml delete mode 100644 kubernetes/loculus/templates/_common-metadata.tpl delete mode 100644 kubernetes/loculus/templates/_config-processor.tpl delete mode 100644 kubernetes/loculus/templates/_enabledOrganisms.tpl delete mode 100644 kubernetes/loculus/templates/_flattenPreprocessingVersions.tpl delete mode 100644 kubernetes/loculus/templates/_ingestRenameFromValues.tpl delete mode 100644 kubernetes/loculus/templates/_inputFieldsFromValues.tpl delete mode 100644 kubernetes/loculus/templates/_lineage-system-for-organism.tpl delete mode 100644 kubernetes/loculus/templates/_loculus-docker-tag.tpl delete mode 100644 kubernetes/loculus/templates/_merged-reference-genomes.tpl delete mode 100644 kubernetes/loculus/templates/_possiblePriorityClassName.tpl delete mode 100644 kubernetes/loculus/templates/_preprocessingFromValues.tpl delete mode 100644 kubernetes/loculus/templates/_resources.tpl delete mode 100644 kubernetes/loculus/templates/_service-type.tpl delete mode 100644 kubernetes/loculus/templates/_siloDatabaseConfig.tpl delete mode 100644 kubernetes/loculus/templates/_submission-data-types.tpl delete mode 100644 kubernetes/loculus/templates/_urls.tpl delete mode 100644 kubernetes/loculus/templates/docs-preview.yaml delete mode 100644 kubernetes/loculus/templates/ena-submission-config.yaml delete mode 100644 kubernetes/loculus/templates/ena-submission-deployment.yaml delete mode 100644 kubernetes/loculus/templates/ena-submission-service.yaml delete mode 100644 kubernetes/loculus/templates/ingest-config.yaml delete mode 100644 kubernetes/loculus/templates/ingest-deployment.yaml delete mode 100644 kubernetes/loculus/templates/ingressroute.yaml delete mode 100644 kubernetes/loculus/templates/keycloak-config-map.yaml delete mode 100644 kubernetes/loculus/templates/keycloak-database-service.yaml delete mode 100644 kubernetes/loculus/templates/keycloak-database-standin.yaml delete mode 100644 kubernetes/loculus/templates/keycloak-deployment.yaml delete mode 100644 kubernetes/loculus/templates/keycloak-service.yaml delete mode 100644 kubernetes/loculus/templates/lapis-deployment.yaml delete mode 100644 kubernetes/loculus/templates/lapis-ingress.yaml delete mode 100644 kubernetes/loculus/templates/lapis-service.yaml delete mode 100644 kubernetes/loculus/templates/lapis-silo-database-config.yaml delete mode 100644 kubernetes/loculus/templates/loculus-backend-config.yaml delete mode 100644 kubernetes/loculus/templates/loculus-backend-service.yaml delete mode 100644 kubernetes/loculus/templates/loculus-backend.yaml delete mode 100644 kubernetes/loculus/templates/loculus-database-service.yaml delete mode 100644 kubernetes/loculus/templates/loculus-database-standin.yaml delete mode 100644 kubernetes/loculus/templates/loculus-preprocessing-config.yaml delete mode 100644 kubernetes/loculus/templates/loculus-preprocessing-deployment.yaml delete mode 100644 kubernetes/loculus/templates/loculus-website-config.yaml delete mode 100644 kubernetes/loculus/templates/loculus-website.yaml delete mode 100644 kubernetes/loculus/templates/minio-deployment.yaml delete mode 100644 kubernetes/loculus/templates/minio-service.yaml delete mode 100644 kubernetes/loculus/templates/secrets.yaml delete mode 100644 kubernetes/loculus/templates/silo-deployment.yaml delete mode 100644 kubernetes/loculus/templates/silo-service.yaml delete mode 100644 kubernetes/loculus/templates/website-service.yaml diff --git a/.github/workflows/helm-schema-lint.yaml b/.github/workflows/helm-schema-lint.yaml deleted file mode 100644 index b3aeb47e2e..0000000000 --- a/.github/workflows/helm-schema-lint.yaml +++ /dev/null @@ -1,29 +0,0 @@ -name: helm-schema-lint - -on: - pull_request: - paths: - - "kubernetes/**" - - ".github/workflows/helm-schema-lint.yaml" - push: - branches: - - main - workflow_dispatch: - -jobs: - lint: - runs-on: ubuntu-latest - steps: - - name: Checkout repository - uses: actions/checkout@v6 - - - name: Install Helm - uses: azure/setup-helm@v4 - with: - version: v3.18.3 - - - name: Run Helm lint on values files - run: | - helm lint kubernetes/loculus -f kubernetes/loculus/values.yaml - helm lint kubernetes/loculus -f kubernetes/loculus/values.yaml -f kubernetes/loculus/values_e2e_and_dev.yaml - helm lint kubernetes/loculus -f kubernetes/loculus/values.yaml -f kubernetes/loculus/values_preview_server.yaml diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml index a106afb666..8173a2605f 100644 --- a/.github/workflows/integration-tests.yml +++ b/.github/workflows/integration-tests.yml @@ -89,7 +89,7 @@ jobs: ./deploy.py --verbose cluster --bind-all - name: Deploy with CDK8s run: | - ./deploy.py --verbose helm \ + ./deploy.py --verbose deploy \ --branch ${{ github.ref_name }} \ --sha ${{ env.sha }} \ --for-e2e \ diff --git a/.github/workflows/website-playwright-dev-test.yml b/.github/workflows/website-playwright-dev-test.yml index daff960152..8f11119210 100644 --- a/.github/workflows/website-playwright-dev-test.yml +++ b/.github/workflows/website-playwright-dev-test.yml @@ -27,9 +27,6 @@ jobs: exclude: - os: ${{ github.ref != 'refs/heads/main' && 'macos-latest' }} steps: - - uses: azure/setup-helm@v4 - with: - version: v3.18.3 - uses: actions/setup-python@v6 with: python-version: '3.12' diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index df4e073db1..40afea958b 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -21,16 +21,3 @@ repos: entry: npx prettier@3.6.2 --write language: system files: ^kubernetes/loculus/values\.schema\.json$ - - id: helm-lint - name: helm lint - entry: bash - args: - - -c - - | - set -euo pipefail - helm lint kubernetes/loculus -f kubernetes/loculus/values.yaml - helm lint kubernetes/loculus -f kubernetes/loculus/values.yaml -f kubernetes/loculus/values_e2e_and_dev.yaml - helm lint kubernetes/loculus -f kubernetes/loculus/values.yaml -f kubernetes/loculus/values_preview_server.yaml - language: system - pass_filenames: false - files: ^kubernetes/loculus/ diff --git a/deploy.py b/deploy.py index ab6e5cd09e..a4bc2f9ebe 100755 --- a/deploy.py +++ b/deploy.py @@ -31,7 +31,7 @@ CLUSTER_NAME = "testCluster" CDK8S_DIR = ROOT_DIR / "kubernetes" / "cdk8s" -HELM_CHART_DIR = ROOT_DIR / "kubernetes" / "loculus" # Still used for values files and secret generator +VALUES_DIR = ROOT_DIR / "kubernetes" / "loculus" WEBSITE_PORT_MAPPING = "-p 127.0.0.1:3000:30081@agent:0" BACKEND_PORT_MAPPING = "-p 127.0.0.1:8079:30082@agent:0" @@ -49,7 +49,7 @@ S3_PORT_MAPPING, ] -parser = argparse.ArgumentParser(description="Manage k3d cluster and helm installations.") +parser = argparse.ArgumentParser(description="Manage k3d cluster and CDK8s deployments.") subparsers = parser.add_subparsers(dest="subcommand", required=True, help="Subcommands") parser.add_argument( "--dry-run", action="store_true", help="Print commands instead of executing them" @@ -69,44 +69,44 @@ cluster_parser.add_argument("--delete", action="store_true", help="Delete the cluster") cluster_parser.add_argument("--bind-all", action="store_true", help="Bind to all interfaces") -helm_parser = subparsers.add_parser("helm", help="Install the Helm chart to the k3d cluster") -helm_parser.add_argument( +deploy_parser = subparsers.add_parser("deploy", help="Deploy to the k3d cluster using CDK8s") +deploy_parser.add_argument( "--dev", action="store_true", help="Set up a development environment for running the website and the backend locally", ) -helm_parser.add_argument("--branch", help="Set the branch to deploy with the Helm chart") -helm_parser.add_argument("--sha", help="Set the commit sha to deploy with the Helm chart") -helm_parser.add_argument("--uninstall", action="store_true", help="Uninstall installation") -helm_parser.add_argument( +deploy_parser.add_argument("--branch", help="Set the branch to deploy") +deploy_parser.add_argument("--sha", help="Set the commit sha to deploy") +deploy_parser.add_argument("--uninstall", action="store_true", help="Uninstall installation") +deploy_parser.add_argument( "--enablePreprocessing", action="store_true", help="Include deployment of preprocessing pipelines", ) -helm_parser.add_argument( +deploy_parser.add_argument( "--enableIngest", action="store_true", help="Include deployment of ingest pipelines" ) -helm_parser.add_argument( +deploy_parser.add_argument( "--values", action="append", - help="Values file for helm chart (can be specified multiple times)", + help="Values file (can be specified multiple times)", ) -helm_parser.add_argument( +deploy_parser.add_argument( "--template", help="Just template and print out the YAML produced", action="store_true", ) -helm_parser.add_argument( - "--for-e2e", action="store_true", help="Use the E2E values file, skip schema validation" +deploy_parser.add_argument( + "--for-e2e", action="store_true", help="Use the E2E values file" ) -helm_parser.add_argument( +deploy_parser.add_argument( "--use-localhost-ip", action="store_true", help="Use the local IP address instead of 'localhost' in the config files", ) -upgrade_parser = subparsers.add_parser("upgrade", help="Upgrade helm installation") +upgrade_parser = subparsers.add_parser("upgrade", help="Re-synth and re-apply deployment") config_parser = subparsers.add_parser("config", help="Generate config files") @@ -126,7 +126,7 @@ config_parser.add_argument( "--values", action="append", - help="Values file for helm chart (can be specified multiple times)", + help="Values file (can be specified multiple times)", ) @@ -151,10 +151,10 @@ def run_command(command: list[str], **kwargs): def main(): if args.subcommand == "cluster": handle_cluster() - elif args.subcommand == "helm": - handle_helm() + elif args.subcommand == "deploy": + handle_deploy() elif args.subcommand == "upgrade": - handle_helm_upgrade() + handle_upgrade() elif args.subcommand == "config": generate_configs( args.from_live, @@ -214,7 +214,7 @@ def cluster_exists(cluster_name): return cluster_name in result.stdout -def handle_helm(): # noqa: C901 +def handle_deploy(): # noqa: C901 if args.uninstall: # Delete all resources created by cdk8s output_file = CDK8S_DIR / "dist" / "loculus.k8s.yaml" @@ -236,7 +236,7 @@ def handle_helm(): # noqa: C901 cdk8s_args += ["--set", "environment=local", "--set", f"branch={branch}"] if args.for_e2e or args.dev: - cdk8s_args += ["--values", str(HELM_CHART_DIR / "values_e2e_and_dev.yaml")] + cdk8s_args += ["--values", str(VALUES_DIR / "values_e2e_and_dev.yaml")] if args.sha: cdk8s_args += ["--set", f"sha={args.sha[:7]}"] @@ -264,7 +264,7 @@ def handle_helm(): # noqa: C901 } cdk8s_args += ["--set-json", f"public={json.dumps(public_runtime_config)}"] - cdk8s_args += ["--base-dir", str(HELM_CHART_DIR)] + cdk8s_args += ["--base-dir", str(VALUES_DIR)] # Synth YAML using cdk8s synth_command = [ @@ -284,11 +284,11 @@ def handle_helm(): # noqa: C901 run_command(["kubectl", "apply", "-f", str(output_file), "--server-side", "--force-conflicts"]) -def handle_helm_upgrade(): +def handle_upgrade(): # Re-synth and re-apply (cdk8s is declarative, so upgrade = synth + apply) synth_command = [ "npx", "ts-node", "src/main.ts", - "--base-dir", str(HELM_CHART_DIR), + "--base-dir", str(VALUES_DIR), ] run_command(synth_command, cwd=str(CDK8S_DIR)) @@ -383,7 +383,7 @@ def generate_configs(from_live, live_host, enable_ena, values_files=None): } cdk8s_args += ["--set-json", f"public={json.dumps(public_runtime_config)}"] - cdk8s_args += ["--base-dir", str(HELM_CHART_DIR)] + cdk8s_args += ["--base-dir", str(VALUES_DIR)] # Synth all resources synth_command = ["npx", "ts-node", "src/main.ts"] + cdk8s_args diff --git a/ena-submission/README.md b/ena-submission/README.md index dca44af568..98294a6718 100644 --- a/ena-submission/README.md +++ b/ena-submission/README.md @@ -235,7 +235,7 @@ The tests perform the same tests as described in the section on testing submissi ```sh ../deploy.py cluster --dev -../deploy.py helm --dev --enablePreprocessing +../deploy.py deploy --dev --enablePreprocessing ../generate_local_test_config.sh cd ../backend ./start_dev.sh & diff --git a/integration-tests/AGENTS.md b/integration-tests/AGENTS.md index 45d4d13502..4e28bb08f3 100644 --- a/integration-tests/AGENTS.md +++ b/integration-tests/AGENTS.md @@ -45,9 +45,9 @@ npm --version # Create values file to use host.k3d.internal echo 'localHost: host.k3d.internal' > /tmp/k3d-values.yaml -# Deploy with Helm +# Deploy with CDK8s SHA=$(git rev-parse HEAD | cut -c1-7) -./deploy.py --verbose helm --branch main --sha $SHA --for-e2e --enablePreprocessing --values /tmp/k3d-values.yaml +./deploy.py --verbose deploy --branch main --sha $SHA --for-e2e --enablePreprocessing --values /tmp/k3d-values.yaml # Add host entry so the browser can resolve host.k3d.internal # (requires sudo - add this line to /etc/hosts if not already present) diff --git a/integration-tests/start-server.sh b/integration-tests/start-server.sh index 331e445219..013c404337 100755 --- a/integration-tests/start-server.sh +++ b/integration-tests/start-server.sh @@ -3,5 +3,5 @@ set -euxo pipefail ../deploy.py cluster --delete ../deploy.py cluster -../deploy.py helm --for-e2e --enablePreprocessing +../deploy.py deploy --for-e2e --enablePreprocessing python ../.github/scripts/wait_for_pods_to_be_ready.py diff --git a/kubernetes/AGENTS.md b/kubernetes/AGENTS.md index 15c9569f10..429080a2d1 100644 --- a/kubernetes/AGENTS.md +++ b/kubernetes/AGENTS.md @@ -8,10 +8,16 @@ After editing `kubernetes/loculus/values.schema.json`, run prettier to format it npx prettier@3.6.2 --write kubernetes/loculus/values.schema.json ``` -## Testing schema and values changes +## CDK8s -After changing values.schema.json or values.yaml, run helm lint to validate: +The Kubernetes manifests are generated by CDK8s (TypeScript) in `kubernetes/cdk8s/src/`. To format: ```bash -helm lint kubernetes/loculus -f kubernetes/loculus/values.yaml +cd kubernetes/cdk8s && npm run format +``` + +To check formatting: + +```bash +cd kubernetes/cdk8s && npm run check-format ``` diff --git a/kubernetes/README.md b/kubernetes/README.md index 716eb9fb21..a320b8687b 100644 --- a/kubernetes/README.md +++ b/kubernetes/README.md @@ -1,7 +1,7 @@ # Kubernetes setup -This directory contains a Helm chart to deploy Loculus instances for several purposes. -The Helm variable `environment` reflects those purposes: +This directory contains the CDK8s (TypeScript) configuration to deploy Loculus instances for several purposes. +The `environment` variable reflects those purposes: - `local`: Running locally with ports - `server`: Running on a server with domain name @@ -12,7 +12,7 @@ _For development, follow the k3d instructions lower down the page._ ### Prerequisites -Install [helm](https://helm.sh/) and use [traefik](https://traefik.io/traefik/) for ingress. +Install [helm](https://helm.sh/) (needed for the secret generator dependency), [Node.js](https://nodejs.org/) (for CDK8s), and use [traefik](https://traefik.io/traefik/) for ingress. Create a long-lived managed database: [to be documented as part of: https://github.com/loculus-project/loculus/issues/793] @@ -20,19 +20,25 @@ Create your own configuration, by copying the `loculus/values.yaml` file and edi ### Deployment -Install the Helm chart: +Deploy using the deploy script: ```shell -helm install loculus kubernetes/loculus -f my-values.yaml +./deploy.py deploy --values my-values.yaml ``` ## Local development/testing with k3d ### Prerequisites -Install [k3d](https://k3d.io/v5.6.0/) and [helm](https://helm.sh/). +Install [k3d](https://k3d.io/v5.6.0/), [helm](https://helm.sh/) (for the secret generator), and [Node.js](https://nodejs.org/). We also recommend installing [k9s](https://k9scli.io/) to inspect cluster resources. +Install the CDK8s dependencies: + +```shell +cd cdk8s && npm ci && cd .. +``` + We deploy to kubernetes via the `../deploy.py` script. It requires you to have python 3.9 or higher and the packages `pyyaml` and `requests` installed. To create a virtual environment with the required dependencies run: ```shell @@ -49,14 +55,14 @@ NOTE: On MacOS, make sure that you have configured enough RAM in Docker, we reco ```shell ../deploy.py cluster --dev -../deploy.py helm --dev +../deploy.py deploy --dev ``` Start the [backend](/backend/README.md) and the [website](/website/README.md) locally. Note that by default the deploy script will also start a Loculus deployment without preprocessing and ingest, to add preprocessing and ingest add the `--enablePreprocessing` and `--enableIngest` flags. To run either of these deployments locally you will need to use the generated configs. ##### The `deploy.py` script -The `deploy.py` script wraps the most important `k3d` and `helm` commands. +The `deploy.py` script wraps the most important `k3d` and CDK8s commands. Check the help for more information: ```shell @@ -74,10 +80,10 @@ Create a cluster that doesn't expose the ports of the backend and the website: ../deploy.py cluster --dev ``` -Install the chart with some port forwarding disabled to link to local manual runs of the backend and website: +Deploy the chart with some port forwarding disabled to link to local manual runs of the backend and website: ```shell -../deploy.py helm --dev +../deploy.py deploy --dev ``` Start the website and the backend locally. @@ -97,7 +103,7 @@ kubectl get events might help to see the reason. -Redeploy after changing the Helm chart: +Redeploy after changing the CDK8s code: ```shell ../deploy.py upgrade @@ -109,7 +115,7 @@ You can also delete the cluster with: ../deploy.py cluster --delete ``` -With helm based commands you can customise the values yaml file with `--values [file.yaml]`. +You can customise the values yaml file with `--values [file.yaml]`. ## Full deployment for E2E testing @@ -122,10 +128,10 @@ Create a cluster with ports for all services exposed: ../deploy.py cluster ``` -Install the chart to deploy the services: +Deploy the services: ```shell -../deploy.py helm --branch [your_branch] +../deploy.py deploy --branch [your_branch] ``` ## Argo CD diff --git a/kubernetes/loculus/CONTRIBUTING.md b/kubernetes/loculus/CONTRIBUTING.md deleted file mode 100644 index d5702651e2..0000000000 --- a/kubernetes/loculus/CONTRIBUTING.md +++ /dev/null @@ -1,21 +0,0 @@ -# Developer guide - -## How to debug helm templates with `yq` - -To get a particular helm template from the output of `helm template` command, you can use `yq`. - -For example, to get the `website_config.json` from the `loculus-website-config` ConfigMap, you can use the following command: - -```bash -helm template loculus kubernetes/loculus \ -| yq e 'select(.metadata.name == "loculus-website-config") | .data."website_config.json"' -``` - -## Diffing produced Kubernetes manifests - -To diff produced manifests, you can use the `diff` command and to specifically compare metadata fields you can use the `kubernetes/loculus/utils/yamldiff_script.py` script. - -1. Install yamldiff: `go install github.com/sahilm/yamldiff@latest` -2. Create the manifests to diff: `helm template loculus kubernetes/loculus > /tmp/new.yaml` -3. Create manifests to diff against, e.g. from main and put into `/tmp/old.yaml` -4. Run script to diff: `python kubernetes/loculus/utils/yamldiff_script.py /tmp/old.yaml /tmp/new.yaml` diff --git a/kubernetes/loculus/Chart.lock b/kubernetes/loculus/Chart.lock deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/kubernetes/loculus/Chart.yaml b/kubernetes/loculus/Chart.yaml deleted file mode 100644 index 8a104bdacd..0000000000 --- a/kubernetes/loculus/Chart.yaml +++ /dev/null @@ -1,5 +0,0 @@ -apiVersion: v2 -name: loculus -description: A Helm chart for loculus -version: 0.1.0 -icon: https://loculus.org/favicon.svg diff --git a/kubernetes/loculus/templates/_common-metadata.tpl b/kubernetes/loculus/templates/_common-metadata.tpl deleted file mode 100644 index df44719198..0000000000 --- a/kubernetes/loculus/templates/_common-metadata.tpl +++ /dev/null @@ -1,564 +0,0 @@ -{{/* Common metadata fields are metadata fields that are required for all organisms -for Loculus to work. -In almost all cases (except for versionComment), these fields are not supposed to -be input by the user but are instead generated by the system. -They are defined here so that they can't be misconfigured in values.yaml. -Beware: if a field here is supposed to be user submitted and preprocessed, you need -it's not enough to add the field here, you need to add it in other places. -See https://github.com/loculus-project/loculus/pull/3141 for an example */}} -{{- define "loculus.commonMetadata" }} -fields: - - name: accessionVersion - type: string - notSearchable: true - hideOnSequenceDetailsPage: true - includeInDownloadsByDefault: true - - name: accession - type: string - notSearchable: true - hideOnSequenceDetailsPage: true - - name: version - type: int - hideOnSequenceDetailsPage: true - - name: submissionId - displayName: Submission ID - type: string - header: Submission details - orderOnDetailsPage: 5000 - enableSubstringSearch: true - includeInDownloadsByDefault: true - - name: isRevocation - displayName: Is revocation - type: boolean - autocomplete: true - hideOnSequenceDetailsPage: true - - name: submitter - type: string - generateIndex: true - autocomplete: true - hideOnSequenceDetailsPage: true - header: Submission details - orderOnDetailsPage: 5010 - - name: groupName - type: string - generateIndex: true - autocomplete: true - header: Submission details - displayName: Submitting group - includeInDownloadsByDefault: true - orderOnDetailsPage: 5020 - customDisplay: - type: submittingGroup - displayGroup: group - - name: groupId - displayName: Group ID - type: int - autocomplete: true - header: Submission details - displayName: Submitting group (numeric ID) - orderOnDetailsPage: 5030 - customDisplay: - type: submittingGroup - displayGroup: group - - name: submittedAtTimestamp - type: timestamp - displayName: Date submitted - header: Submission details - orderOnDetailsPage: 5040 - - name: submittedDate - type: string - hideOnSequenceDetailsPage: true - generateIndex: true - autocomplete: true - displayName: Date submitted (exact) - orderOnDetailsPage: 5050 - - name: releasedAtTimestamp - type: timestamp - displayName: Date released - header: Submission details - columnWidth: 100 - orderOnDetailsPage: 5060 - - name: releasedDate - type: string - hideOnSequenceDetailsPage: true - generateIndex: true - autocomplete: true - displayName: Date released (exact) - columnWidth: 100 - orderOnDetailsPage: 5070 - {{- if $.Values.dataUseTerms.enabled }} - - name: dataUseTerms - type: string - generateIndex: true - autocomplete: true - displayName: Data use terms - initiallyVisible: true - includeInDownloadsByDefault: true - customDisplay: - type: dataUseTerms - header: Data use terms - orderOnDetailsPage: 610 - - name: dataUseTermsRestrictedUntil - type: date - displayName: Data use terms restricted until - hideOnSequenceDetailsPage: true - header: Data use terms - orderOnDetailsPage: 620 - - name: dataBecameOpenAt - type: date - displayName: Date data became open - hideOnSequenceDetailsPage: true - header: Data use terms - orderOnDetailsPage: 625 - {{- if $.Values.dataUseTerms.urls }} - - name: dataUseTermsUrl - displayName: Data use terms URL - type: string - notSearchable: true - header: Data use terms - includeInDownloadsByDefault: true - customDisplay: - type: link - url: "__value__" - orderOnDetailsPage: 630 - {{- end}} - {{- end}} - - name: versionStatus - displayName: Version status - type: string - autocomplete: true - hideOnSequenceDetailsPage: true - - name: versionComment - type: string - displayName: Version comment - header: Submission details - orderOnDetailsPage: 5000 - - name: pipelineVersion - type: int - notSearchable: true - hideOnSequenceDetailsPage: true -{{- end}} - -{{/* Patches schema by adding to it and overwriting overlapping fields by the value in metadataAdd*/}} -{{- define "loculus.patchMetadataSchema" -}} -{{- $patchedSchema := deepCopy . }} -{{- $metadata := .metadata | default (list) }} -{{- $toAdd := . | dig "metadataAdd" list | default (list) }} -{{- $metadataMap := dict -}} -{{- range $metadata }} - {{- $key := .name }} - {{- $metadataMap = merge $metadataMap (dict $key .) -}} -{{- end -}} -{{- range $toAdd }} - {{- $key := .name }} - {{- $metadataMap = set $metadataMap $key . -}} -{{- end -}} -{{- $patchedMetadata := list -}} -{{- range $key, $value := $metadataMap }} - {{- $patchedMetadata = append $patchedMetadata $value -}} -{{- end -}} -{{- set $patchedSchema "metadata" $patchedMetadata | toYaml -}} -{{- end -}} - -{{/* Generate website config from passed config object */}} -{{- define "loculus.generateWebsiteConfig" }} -name: {{ quote $.Values.name }} -logo: {{ $.Values.logo | toYaml | nindent 6 }} -{{ if $.Values.sequenceFlagging }} -sequenceFlagging: {{ $.Values.sequenceFlagging | toYaml | nindent 6 }} -{{ end }} -{{ if $.Values.gitHubMainUrl }} -gitHubMainUrl: {{ quote $.Values.gitHubMainUrl }} -{{ end }} -{{ if $.Values.bannerMessageURL }} -bannerMessageURL: {{ quote $.Values.bannerMessageURL }} -{{ end }} -{{ if $.Values.bannerMessage }} -bannerMessage: {{ quote $.Values.bannerMessage }} -{{ else if or $.Values.runDevelopmentMainDatabase $.Values.runDevelopmentKeycloakDatabase }} -bannerMessage: "Warning: Development or Keycloak main database is enabled. Development environment only." -{{ end }} -{{ if $.Values.submissionBannerMessageURL }} -submissionBannerMessageURL: {{ quote $.Values.submissionBannerMessageURL }} -{{ end }} -{{ if $.Values.submissionBannerMessage }} -submissionBannerMessage: {{ quote $.Values.submissionBannerMessage }} -{{ end }} -{{ if $.Values.gitHubEditLink }} -gitHubEditLink: {{ quote $.Values.gitHubEditLink }} -{{ end }} -{{ if $.Values.welcomeMessageHTML }} -welcomeMessageHTML: {{ quote $.Values.welcomeMessageHTML }} -{{end}} -{{ if $.Values.additionalHeadHTML }} -additionalHeadHTML: {{ quote $.Values.additionalHeadHTML }} -{{end}} - -enableLoginNavigationItem: {{ $.Values.website.websiteConfig.enableLoginNavigationItem }} -enableSubmissionNavigationItem: {{ $.Values.website.websiteConfig.enableSubmissionNavigationItem }} -enableSubmissionPages: {{ $.Values.website.websiteConfig.enableSubmissionPages }} -enableSeqSets: {{ $.Values.seqSets.enabled }} -{{- if $.Values.seqSets.fieldsToDisplay }} -seqSetsFieldsToDisplay: {{ $.Values.seqSets.fieldsToDisplay | toJson }} -{{- end }} -enableDataUseTerms: {{ $.Values.dataUseTerms.enabled }} -accessionPrefix: {{ quote $.Values.accessionPrefix }} -{{- $commonMetadata := (include "loculus.commonMetadata" . | fromYaml).fields }} -organisms: - {{- range $_, $item := (include "loculus.enabledOrganisms" . | fromJson).organisms }} -{{- $key := $item.key }} -{{- $instance := $item.contents }} - {{ $key }}: - schema: - {{- with ($instance.schema | include "loculus.patchMetadataSchema" | fromYaml) }} - organismName: {{ quote .organismName }} - {{ if .linkOuts }} - linkOuts: - {{- range $linkOut := .linkOuts }} - - name: {{ quote $linkOut.name }} - url: {{ quote $linkOut.url }} - {{- if $linkOut.maxNumberOfRecommendedEntries }} - maxNumberOfRecommendedEntries: {{ $linkOut.maxNumberOfRecommendedEntries }} - {{- end }} - {{- end }} - {{- end }} - loadSequencesAutomatically: {{ .loadSequencesAutomatically | default false }} - {{ if .richFastaHeaderFields}} - richFastaHeaderFields: {{ toJson .richFastaHeaderFields }} - {{ end }} - {{- include "loculus.submissionDataTypes" . | nindent 6 }} - {{- $nucleotideSequences := .nucleotideSequences | default (list "main")}} - {{ if .image }} - image: {{ .image }} - {{ end }} - {{ if .description }} - description: {{ quote .description }} - {{ end }} - primaryKey: accessionVersion - inputFields: {{- include "loculus.inputFields" . | nindent 8 }} - - name: versionComment - displayName: Version comment - definition: "Reason for revising sequences or other general comments concerning a specific version" - example: "Fixed an issue in previous version where low-coverage nucleotides were erroneously filled with reference sequence" - desired: true - {{ if .files }} - files: {{ .files | toYaml | nindent 8 }} - {{ end }} - metadata: - {{- $args := dict "metadata" (concat $commonMetadata .metadata) "referenceGenomes" $instance.referenceGenomes}} - {{ $metadata := include "loculus.generateWebsiteMetadata" $args | fromYaml }} - {{ $metadata.fields | toYaml | nindent 8 }} - {{ if .files }} - {{- range .files }} - - name: {{ .name }} - type: string - header: "Files" - noInput: true - customDisplay: - type: fileList - {{- end }} - {{ end }} - {{ if .metadataTemplate }} - metadataTemplate: - {{ .metadataTemplate | toYaml | nindent 8}} - {{ end }} - {{ .website | toYaml | nindent 6 }} - {{- end }} - referenceGenomes: - {{ $instance.referenceGenomes | toYaml | nindent 6 }} - {{- end }} -{{- end }} - -{{- define "loculus.standardWebsiteMetadata" }} -- type: {{ .type | default "string" | quote }} - {{- if .autocomplete }} - autocomplete: {{ .autocomplete }} - {{- end }} - {{- if .enableSubstringSearch }} - substringSearch: {{ .enableSubstringSearch }} - {{- end}} - {{- if .notSearchable }} - notSearchable: {{ .notSearchable }} - {{- end }} - {{- if .initiallyVisible }} - initiallyVisible: {{ .initiallyVisible }} - {{- end }} - {{- if .hideInSearchResultsTable }} - hideInSearchResultsTable: {{ .hideInSearchResultsTable }} - {{- end }} - {{- if or (or (eq .type "timestamp") (eq .type "date")) .rangeSearch }} - rangeSearch: true - {{- end }} - {{- if .rangeOverlapSearch }} - rangeOverlapSearch: {{ .rangeOverlapSearch | toJson }} - {{- end}} - {{- if .lineageSystem }} - lineageSearch: true - {{- end }} - {{- if .hideOnSequenceDetailsPage }} - hideOnSequenceDetailsPage: {{ .hideOnSequenceDetailsPage }} - {{- end }} - {{- if .columnWidth }} - columnWidth: {{ .columnWidth }} - {{- end }} - {{- if .order }} - order: {{ .order }} - {{- end }} - {{- if .orderOnDetailsPage }} - orderOnDetailsPage: {{ .orderOnDetailsPage }} - {{- end }} - {{- if .includeInDownloadsByDefault }} - includeInDownloadsByDefault: {{ .includeInDownloadsByDefault }} - {{- end }} - {{- if .onlyForReference }} - onlyForReference: {{ .onlyForReference }} - {{- end }} - {{- if .customDisplay }} - customDisplay: - type: {{ quote .customDisplay.type }} - {{- if .customDisplay.url }} - url: {{ .customDisplay.url }} - {{- end }} - {{- if .customDisplay.linkMenuItems }} - linkMenuItems: - {{- range .customDisplay.linkMenuItems }} - - name: {{ quote .name }} - url: {{ quote .url }} - {{- end }} - {{- end }} - {{- if .customDisplay.displayGroup }} - displayGroup: {{ quote .customDisplay.displayGroup }} - {{- end }} - {{- if .customDisplay.label }} - label: {{ quote .customDisplay.label }} - {{- end }} - {{- if .customDisplay.html }} - html: {{ .customDisplay.html }} - {{- end }} - {{- end }} -{{- end }} - -{{/* Generate website metadata from passed metadata array */}} -{{- define "loculus.generateWebsiteMetadata" }} -{{- $rawUniqueSegments := (include "loculus.getNucleotideSegmentNames" .referenceGenomes | fromYaml).segments }} -{{- $isSegmented := gt (len $rawUniqueSegments) 1 }} -{{- $metadataList := .metadata }} -fields: -{{- range $metadataList }} -{{- if and $isSegmented .perSegment }} -{{- $currentItem := . }} -{{- range $segment := $rawUniqueSegments }} -{{- with $currentItem }} -{{ include "loculus.standardWebsiteMetadata" . }} - name: {{ printf "%s_%s" .name $segment | quote }} - {{- if .displayName }} - displayName: {{ printf "%s %s" .displayName $segment | quote }} - {{- end }} - {{- if (default false .oneHeader)}} - header: {{ (default "Other" .header) | quote }} - {{- else }} - header: {{ printf "%s %s" (default "Other" .header) $segment | quote }} - {{- end }} - {{- if and .customDisplay .customDisplay.displayGroup }} - customDisplay: - type: {{ quote .customDisplay.type }} - displayGroup: {{ printf "%s_%s" .customDisplay.displayGroup $segment | quote }} - {{- if .customDisplay.label }} - label: {{ printf "%s %s" .customDisplay.label $segment | quote }} - {{- end }} - {{- end }} -{{- end }} -{{- end }} -{{- else }} -{{ include "loculus.standardWebsiteMetadata" . }} - name: {{ quote .name }} - {{- if .displayName }} - displayName: {{ quote .displayName }} - {{- end }} - header: {{ default "Other" .header }} -{{- end}} -{{- end}} -{{- end}} - -{{/* Generate backend config from passed config object */}} -{{- define "loculus.generateBackendConfig" }} -accessionPrefix: {{ quote $.Values.accessionPrefix }} -zstdCompressionLevel: {{ $.Values.zstdCompressionLevel }} -pipelineVersionUpgradeCheckIntervalSeconds: {{ $.Values.pipelineVersionUpgradeCheckIntervalSeconds }} -name: {{ quote $.Values.name }} -dataUseTerms: - {{$.Values.dataUseTerms | toYaml | nindent 2}} -{{- if .Values.fileSharing }} -fileSharing: - {{ .Values.fileSharing | toYaml | nindent 2 }} -{{- end }} -websiteUrl: {{ include "loculus.websiteUrl" . }} -backendUrl: {{ include "loculus.backendUrl" . }} -organisms: - {{- range $_, $item := (include "loculus.enabledOrganisms" . | fromJson).organisms }} -{{- $key := $item.key }} -{{- $instance := $item.contents }} - {{ $key }}: - schema: - {{- with $instance.schema }} - organismName: {{ quote .organismName }} - {{- include "loculus.submissionDataTypes" . | nindent 6 }} - {{- if .files }} - files: - {{ .files | toYaml | nindent 8 }} - {{- end }} - metadata: - {{- $args := dict "metadata" (include "loculus.patchMetadataSchema" . | fromYaml).metadata "referenceGenomes" $instance.referenceGenomes }} - {{- $metadata := include "loculus.generateBackendMetadata" $args | fromYaml }} - {{ $metadata.fields | toYaml | nindent 8 }} - externalMetadata: - {{- $args := dict "metadata" (include "loculus.patchMetadataSchema" . | fromYaml).metadata "referenceGenomes" $instance.referenceGenomes }} - {{- $metadata := include "loculus.generateBackendExternalMetadata" $args | fromYaml }} - {{ $metadata.fields | default list | toYaml | nindent 8 }} - earliestReleaseDate: - {{- if .earliestReleaseDate }} - {{ .earliestReleaseDate | toYaml | nindent 8 }} - {{- else }} - enabled: false - externalFields: [] - {{- end }} - {{- end }} - referenceGenome: - {{- $referenceGenome := include "loculus.mergeReferenceGenomes" $instance.referenceGenomes | fromYaml }} - {{ $referenceGenome | toYaml | nindent 10 }} - {{- end }} -{{- end }} - -{{- define "loculus.generateReferenceGenome" }} -{{ if .nucleotideSequences }} -nucleotideSequences: - {{ $nucleotideSequences := include "loculus.generateSequences" .nucleotideSequences | fromYaml }} - {{ $nucleotideSequences.fields | toYaml | nindent 8 }} -{{ else }} -nucleotideSequences: [] -{{ end }} -{{ if .genes }} -genes: - {{ $genes := include "loculus.generateSequences" .genes | fromYaml }} - {{ $genes.fields | toYaml | nindent 8 }} -{{ else }} -genes: [] -{{ end }} -{{- end }} - -{{- define "loculus.generateSequences" }} -{{- $sequences := . }} -fields: - {{- range $sequence := $sequences }} - - name: {{ printf "%s" $sequence.name | quote}} - sequence: {{ printf "%s" $sequence.sequence | quote }} - {{- end }} -{{- end }} - -{{/* Generate backend metadata from passed metadata array */}} -{{- define "loculus.generateBackendMetadata" }} -{{- $rawUniqueSegments := (include "loculus.getNucleotideSegmentNames" .referenceGenomes | fromYaml).segments }} -{{- $isSegmented := gt (len $rawUniqueSegments) 1 }} -{{- $metadataList := .metadata }} -fields: -{{- range $metadataList }} -{{- $currentItem := . }} -{{- if and $isSegmented .perSegment }} -{{- range $segment := $rawUniqueSegments }} -{{- with $currentItem }} - - name: {{ printf "%s_%s" .name $segment | quote }} - type: {{ .type | default "string" | quote }} -{{- end }} -{{- end}} -{{- else }} - - name: {{ quote .name }} - type: {{ .type | default "string" | quote }} -{{- end}} -{{- end}} - - name: versionComment - type: "string" -{{- end}} - -{{/* Generate backend metadata from passed metadata array */}} -{{- define "loculus.generateBackendExternalMetadata" }} -{{- $rawUniqueSegments := (include "loculus.getNucleotideSegmentNames" .referenceGenomes | fromYaml).segments }} -{{- $isSegmented := gt (len $rawUniqueSegments) 1 }} -{{- $metadataList := .metadata }} -fields: -{{- range $metadataList }} -{{- $currentItem := . }} -{{- if eq .header "INSDC" }} -{{- if and $isSegmented .perSegment }} -{{- range $segment := $rawUniqueSegments }} -{{- with $currentItem }} - - name: {{ printf "%s_%s" .name $segment | quote }} - type: {{ .type | default "string" | quote }} - {{- if .required }} - required: {{ .required }} - {{- end }} - externalMetadataUpdater: "ena" -{{- end }} -{{- end}} -{{- else }} - - name: {{ quote .name }} - type: {{ .type | default "string" | quote }} - {{- if .required }} - required: {{ .required }} - {{- end }} - externalMetadataUpdater: "ena" -{{- end}} -{{- end}} -{{- end}} -{{- end}} - -{{- define "loculus.publicRuntimeConfig" }} -{{- $publicRuntimeConfig := $.Values.public }} -{{- $lapisUrlTemplate := "" }} -{{- if $publicRuntimeConfig.lapisUrlTemplate }} - {{- $lapisUrlTemplate = $publicRuntimeConfig.lapisUrlTemplate }} -{{- else if eq $.Values.environment "server" }} - {{- $lapisUrlTemplate = printf "https://lapis%s%s/%s" $.Values.subdomainSeparator $.Values.host "%organism%" }} -{{- else }} - {{- $lapisUrlTemplate = printf "http://%s:8080/%%organism%%" $.Values.localHost }} -{{- end }} -{{- $externalLapisUrlConfig := dict "lapisUrlTemplate" $lapisUrlTemplate "config" $.Values }} - "backendUrl": "{{ include "loculus.backendUrl" . }}", - "lapisUrls": {{- include "loculus.generateExternalLapisUrls" $externalLapisUrlConfig | fromYaml | toJson }}, - "keycloakUrl": "{{ include "loculus.keycloakUrl" . }}" -{{- end }} - - -{{/* Generate ENA submission config from passed config object */}} -{{- define "loculus.generateENASubmissionConfig" }} -enaOrganisms: - {{- range $_, $item := (include "loculus.enabledOrganisms" . | fromJson).organisms }} -{{- $key := $item.key }} -{{- $instance := $item.contents }} - {{- if $instance.enaDeposition }} - {{- range $suborganismName, $configFile := $instance.enaDeposition -}} - {{- if eq $suborganismName "singleReference" }} - {{ $key }}: - {{- else }} - {{ $suborganismName }}: - loculusOrganism: {{ quote $key }} - {{- end }} - {{- with $instance.schema }} - {{ $configFile.configFile | toYaml | nindent 4 }} - {{- if $configFile.referenceIdentifierField }} - referenceIdentifierField: {{ quote $configFile.referenceIdentifierField }} - {{- end }} - organismName: {{ quote .organismName }} - {{- $rawUniqueSegments := (include "loculus.getNucleotideSegmentNames" $instance.referenceGenomes | fromYaml).segments }} - segments: {{ $rawUniqueSegments | toYaml | nindent 6 }} - externalMetadata: - {{- $args := dict - "metadata" (include "loculus.patchMetadataSchema" . | fromYaml).metadata - "referenceGenomes" $instance.referenceGenomes - }} - {{- $metadata := include "loculus.generateBackendExternalMetadata" $args | fromYaml }} - {{- $metadata.fields | default list | toYaml | nindent 6 }} - {{- end }} - {{- end }} - {{- end }} - {{- end }} -{{- end }} diff --git a/kubernetes/loculus/templates/_config-processor.tpl b/kubernetes/loculus/templates/_config-processor.tpl deleted file mode 100644 index cfea6861d9..0000000000 --- a/kubernetes/loculus/templates/_config-processor.tpl +++ /dev/null @@ -1,64 +0,0 @@ -{{- define "loculus.configProcessor" -}} -- name: config-processor-{{ .name }} - image: ghcr.io/loculus-project/config-processor:{{ .dockerTag }} - imagePullPolicy: {{ $.imagePullPolicy }} - volumeMounts: - - name: {{ .name }} - mountPath: /input - - name: {{ .name }}-processed - mountPath: /output - command: ["python3"] - args: ["/app/config-processor.py", "/input", "/output"] - resources: - requests: - cpu: 50m - memory: 64Mi - limits: - cpu: 500m - memory: 256Mi - env: - - name: LOCULUSSUB_smtpPassword - valueFrom: - secretKeyRef: - name: smtp-password - key: secretKey - - name: LOCULUSSUB_insdcIngestUserPassword - valueFrom: - secretKeyRef: - name: service-accounts - key: insdcIngestUserPassword - - name: LOCULUSSUB_preprocessingPipelinePassword - valueFrom: - secretKeyRef: - name: service-accounts - key: preprocessingPipelinePassword - - name: LOCULUSSUB_externalMetadataUpdaterPassword - valueFrom: - secretKeyRef: - name: service-accounts - key: externalMetadataUpdaterPassword - - name: LOCULUSSUB_backendUserPassword - valueFrom: - secretKeyRef: - name: service-accounts - key: backendUserPassword - - name: LOCULUSSUB_backendKeycloakClientSecret - valueFrom: - secretKeyRef: - name: backend-keycloak-client-secret - key: backendKeycloakClientSecret - - name: LOCULUSSUB_orcidSecret - valueFrom: - secretKeyRef: - name: orcid - key: orcidSecret -{{- end }} - - -{{- define "loculus.configVolume" -}} -- name: {{ .name }} - configMap: - name: {{ if .configmap }}{{ .configmap }}{{ else }}{{ .name }}{{ end }} -- name: {{ .name }}-processed - emptyDir: {} -{{- end }} diff --git a/kubernetes/loculus/templates/_enabledOrganisms.tpl b/kubernetes/loculus/templates/_enabledOrganisms.tpl deleted file mode 100644 index 65f5bcd0a6..0000000000 --- a/kubernetes/loculus/templates/_enabledOrganisms.tpl +++ /dev/null @@ -1,17 +0,0 @@ -{{- define "loculus.enabledOrganisms" -}} -{{- $allOrganisms := (.Values.organisms | default .Values.defaultOrganisms) -}} -{{- $enabledList := list -}} -{{- range $key := (keys $allOrganisms | sortAlpha) -}} - {{- $organism := get $allOrganisms $key -}} - {{- if ne $organism.enabled false -}} -{{- $enabledList = append $enabledList (dict "key" $key "contents" $organism) -}} - {{- end -}} -{{- end -}} -{{- /* - Helm's `fromJson` function (used in consuming templates) expects a single top-level object - when parsing the JSON output. Wrapping the list of enabled organisms in a dictionary - under the key "organisms" ensures `fromJson` can parse it correctly, which then allows - consuming templates to access the list via `.organisms`. -*/ -}} -{{- dict "organisms" $enabledList | toJson -}} -{{- end -}} diff --git a/kubernetes/loculus/templates/_flattenPreprocessingVersions.tpl b/kubernetes/loculus/templates/_flattenPreprocessingVersions.tpl deleted file mode 100644 index 65c1c0b8cf..0000000000 --- a/kubernetes/loculus/templates/_flattenPreprocessingVersions.tpl +++ /dev/null @@ -1,18 +0,0 @@ -{{- define "loculus.flattenPreprocessingVersions" -}} -{{- $preprocessing := . -}} -{{- $flattened := list -}} -{{- $seen := dict -}} -{{- range $pc := $preprocessing -}} - {{- $versions := (kindIs "slice" $pc.version | ternary $pc.version (list $pc.version)) -}} - {{- range $v := $versions -}} - {{- if hasKey $seen (toString $v) -}} - {{- fail (printf "Duplicate preprocessing pipeline version %v found in organism configuration" $v) -}} - {{- end -}} - {{- $_ := set $seen (toString $v) true -}} - {{- $copy := deepCopy $pc -}} - {{- $_ := set $copy "version" $v -}} - {{- $flattened = append $flattened $copy -}} - {{- end -}} -{{- end -}} -{{- dict "items" $flattened | toJson -}} -{{- end -}} diff --git a/kubernetes/loculus/templates/_ingestRenameFromValues.tpl b/kubernetes/loculus/templates/_ingestRenameFromValues.tpl deleted file mode 100644 index 08c8be45bb..0000000000 --- a/kubernetes/loculus/templates/_ingestRenameFromValues.tpl +++ /dev/null @@ -1,11 +0,0 @@ -{{- define "loculus.ingestRename" -}} -{{- $metadata := . }} -{{- $ingestRename := dict }} -{{- range $field := $metadata }} - {{- if hasKey $field "ingest" }} - {{- $_ := set $ingestRename (index $field "ingest") (index $field "name") }} - {{- end }} -{{- end }} -{{- $output := dict "rename" $ingestRename }} -{{- toYaml $output }} -{{- end -}} \ No newline at end of file diff --git a/kubernetes/loculus/templates/_inputFieldsFromValues.tpl b/kubernetes/loculus/templates/_inputFieldsFromValues.tpl deleted file mode 100644 index 92a504aead..0000000000 --- a/kubernetes/loculus/templates/_inputFieldsFromValues.tpl +++ /dev/null @@ -1,48 +0,0 @@ -{{- define "loculus.inputFields" -}} -{{- $data := . }} -{{- $metadata := $data.metadata }} -{{- $extraFields := $data.extraInputFields }} -{{- $TO_KEEP := list "name" "displayName" "definition" "guidance" "example" "required" "noEdit" "desired" "options"}} - - -{{- $fieldsDict := dict }} -{{- $index := 0 }} - -{{- /* Add fields with position "first" to the dict */}} -{{- range $field := $extraFields }} - {{- if eq $field.position "first" }} - {{- $_ := set $fieldsDict (printf "%03d" $index) $field }} - {{- $index = add $index 1 }} - {{- end }} -{{- end }} - -{{- /* Add filtered metadata fields to the dict */}} -{{- range $field := $metadata }} - {{- if not (hasKey $field "noInput") }} - {{- $_ := set $fieldsDict (printf "%03d" $index) $field }} - {{- $index = add $index 1 }} - {{- end }} -{{- end }} - -{{- /* Add fields with position "last" to the dict */}} -{{- range $field := $extraFields }} - {{- if eq $field.position "last" }} - {{- $_ := set $fieldsDict (printf "%03d" $index) $field }} - {{- $index = add $index 1 }} - {{- end }} -{{- end }} - -{{- /* Iterate over sorted index to get list of values (sorted by key) */}} -{{- $inputFields := list }} -{{- range $k:= keys $fieldsDict | sortAlpha }} - {{- $toAdd := dict }} - {{- range $k, $v := (index $fieldsDict $k) }} - {{- if has $k $TO_KEEP }} - {{- $_ := set $toAdd $k $v }} - {{- end }} - {{- end }} - {{- $inputFields = append $inputFields $toAdd }} -{{- end }} - -{{- toYaml $inputFields }} -{{- end -}} \ No newline at end of file diff --git a/kubernetes/loculus/templates/_lineage-system-for-organism.tpl b/kubernetes/loculus/templates/_lineage-system-for-organism.tpl deleted file mode 100644 index 33af07ad24..0000000000 --- a/kubernetes/loculus/templates/_lineage-system-for-organism.tpl +++ /dev/null @@ -1,19 +0,0 @@ -{{- define "loculus.lineageSystemForOrganism" -}} -{{- $organism := . -}} -{{- $schema := $organism.schema | include "loculus.patchMetadataSchema" | fromYaml }} -{{- $lineageSystems := list }} -{{- range $entry := $schema.metadata }} - {{- if hasKey $entry "lineageSystem" }} - {{- $lineageSystems = append $lineageSystems $entry.lineageSystem }} - {{- end }} -{{- end }} - -{{- $uniqueLineageSystems := $lineageSystems | uniq }} -{{- if gt (len $uniqueLineageSystems) 1 }} - {{- fail (printf "Multiple lineage systems found: %v" $uniqueLineageSystems) }} -{{- else if eq (len $uniqueLineageSystems) 0 }} - {{- /*no op*/ -}} -{{- else }} - {{- index $uniqueLineageSystems 0 -}} -{{- end }} -{{- end }} diff --git a/kubernetes/loculus/templates/_loculus-docker-tag.tpl b/kubernetes/loculus/templates/_loculus-docker-tag.tpl deleted file mode 100644 index 9b52769034..0000000000 --- a/kubernetes/loculus/templates/_loculus-docker-tag.tpl +++ /dev/null @@ -1,8 +0,0 @@ -{{- define "loculus.dockerTag" }} -{{- if .sha }} -{{- printf "commit-%v" .sha }} -{{- else }} -{{- $dockerTag := (eq (.branch | default "main") "main") | ternary "latest" .branch -}} -{{- regexReplaceAll "/" $dockerTag "-" }} -{{- end }} -{{- end }} diff --git a/kubernetes/loculus/templates/_merged-reference-genomes.tpl b/kubernetes/loculus/templates/_merged-reference-genomes.tpl deleted file mode 100644 index c00f17ff22..0000000000 --- a/kubernetes/loculus/templates/_merged-reference-genomes.tpl +++ /dev/null @@ -1,63 +0,0 @@ -{{- define "loculus.mergeReferenceGenomes" -}} -{{- $segmentWithReferencesList := . -}} -{{- $lapisNucleotideSequences := list -}} -{{- $lapisGenes := list -}} - -{{- $singleSegment := eq (len $segmentWithReferencesList) 1 -}} - -{{- range $segment := $segmentWithReferencesList -}} - {{- $segmentName := $segment.name -}} - {{- $singleReference := eq (len $segment.references) 1 -}} - {{- range $reference := $segment.references -}} - {{- $referenceName := $reference.name -}} - {{- if $singleReference -}} - {{/* Single reference mode - no suffix */}} - {{- $lapisNucleotideSequences = append $lapisNucleotideSequences (dict - "name" $segmentName - "sequence" $reference.sequence - ) -}} - {{- else -}} - {{- $name := printf "%s%s" (ternary "" (printf "%s-" $segmentName) $singleSegment) $referenceName -}} - {{- $lapisNucleotideSequences = append $lapisNucleotideSequences (dict - "name" $name - "sequence" $reference.sequence - ) -}} - {{- end -}} - - {{/* Add genes if present */}} - {{- if $reference.genes -}} - {{- range $gene := $reference.genes -}} - {{- if $singleReference -}} - {{- $lapisGenes = append $lapisGenes (dict - "name" $gene.name - "sequence" $gene.sequence - ) -}} - {{- else -}} - {{- $geneName := printf "%s-%s" $gene.name $referenceName -}} - {{- $lapisGenes = append $lapisGenes (dict - "name" $geneName - "sequence" $gene.sequence - ) -}} - {{- end -}} - {{- end -}} - {{- end -}} - {{- end -}} -{{- end -}} - -{{- $result := dict "nucleotideSequences" $lapisNucleotideSequences "genes" $lapisGenes -}} -{{- $result | toYaml -}} -{{- end -}} - - -{{- define "loculus.getNucleotideSegmentNames" -}} -{{- $segmentWithReferencesList := . -}} - -{{/* Extract segment names directly from .name */}} -{{- $segmentNames := list -}} -{{- range $segment := $segmentWithReferencesList -}} - {{- $segmentNames = append $segmentNames $segment.name -}} -{{- end -}} - -segments: -{{- $segmentNames | sortAlpha | toYaml | nindent 2 -}} -{{- end -}} diff --git a/kubernetes/loculus/templates/_possiblePriorityClassName.tpl b/kubernetes/loculus/templates/_possiblePriorityClassName.tpl deleted file mode 100644 index 3b8b29e30d..0000000000 --- a/kubernetes/loculus/templates/_possiblePriorityClassName.tpl +++ /dev/null @@ -1,5 +0,0 @@ -{{- define "possiblePriorityClassName" -}} -{{- if .Values.podPriorityClassName }} -priorityClassName: {{ .Values.podPriorityClassName }} -{{- end -}} -{{- end -}} diff --git a/kubernetes/loculus/templates/_preprocessingFromValues.tpl b/kubernetes/loculus/templates/_preprocessingFromValues.tpl deleted file mode 100644 index 62ae8a56a2..0000000000 --- a/kubernetes/loculus/templates/_preprocessingFromValues.tpl +++ /dev/null @@ -1,78 +0,0 @@ -{{- define "loculus.sharedPreproSpecs" }} -{{ .key }}: {{/* 'key' is either just 'name' of a metadata field, or 'name_segmentName' for segmented fields.*/}} - {{- if .preprocessing }} - {{- if hasKey .preprocessing "function" }} - function: {{ index .preprocessing "function" }} - {{- else }} - function: identity - {{- end }} - {{- if hasKey .preprocessing "inputs" }} - inputs: - {{- with index .preprocessing "inputs" }} - {{- . | toYaml | nindent 4 }} - {{- end }} - {{- end }} - args: - {{- if .segment }} - segment: {{ .segment }} - {{- end }} - {{- if .type }} - type: {{ .type }} - {{- end }} - {{- if .options }} - {{- $names := list }} - {{- range .options }} - {{- $names = append $names .name }} - {{- end }} - options: {{ toYaml $names | nindent 4 }} - {{- end }} - {{- with (get .preprocessing "args") }} - {{ toYaml . | nindent 4 }} - {{- end }} - {{- else }} - function: identity - inputs: - {{- if .segment }} - input: {{ printf "%s_%s" .name .segment }} - {{- else }} - input: {{ .name }} - {{- end }} - args: - {{- if .segment }} - segment: {{ .segment }} - {{- end }} - {{- if .type }} - type: {{ .type }} - {{- end }} - {{- end }} - {{- if .required}} - required: true - {{- end }} -{{- end }} - -{{/* Expects an object { metadata: [...], referenceGenomes: {...} } - .metadata is an array of metadata fields. Each has name, type, displayName, header, required etc. - .referenceGenomes is a map of reference genome definitions directly taken from the instance config of an organism. -*/}} -{{- define "loculus.preprocessingSpecs" -}} -{{- $metadata := .metadata }} -{{- $referenceGenomes := .referenceGenomes}} - -{{- $rawUniqueSegments := (include "loculus.getNucleotideSegmentNames" $referenceGenomes | fromYaml).segments }} -{{- $isSegmented := gt (len $rawUniqueSegments) 1 }} - -{{- range $metadata }} - {{- $currentItem := . }} - {{- if and $isSegmented .perSegment }} - {{- range $segment := $rawUniqueSegments }} - {{- with $currentItem }} - {{- $args := deepCopy . | merge (dict "segment" $segment "key" (printf "%s_%s" .name $segment)) }} - {{- include "loculus.sharedPreproSpecs" $args }} - {{- end }} - {{- end }} - {{- else }} - {{- $args := deepCopy . | merge (dict "segment" "" "key" .name) }} - {{- include "loculus.sharedPreproSpecs" $args }} - {{- end }} -{{- end }} -{{- end }} \ No newline at end of file diff --git a/kubernetes/loculus/templates/_resources.tpl b/kubernetes/loculus/templates/_resources.tpl deleted file mode 100644 index ecb01b5353..0000000000 --- a/kubernetes/loculus/templates/_resources.tpl +++ /dev/null @@ -1,23 +0,0 @@ -{{- define "loculus.resources" -}} -{{- $args := . -}} -{{- $containerName := index $args 0 -}} -{{- $values := index $args 1 -}} -{{- $organism := "" -}} -{{- if gt (len $args) 2 -}} - {{- $organism = index $args 2 -}} -{{- end -}} - -{{- if and $organism - $values.resources.organismSpecific - (index $values.resources.organismSpecific $organism) - (index (index $values.resources.organismSpecific $organism) $containerName) }} -resources: -{{ toYaml (index (index $values.resources.organismSpecific $organism) $containerName) | indent 2 }} -{{- else if and $values.resources (index $values.resources $containerName) }} -resources: -{{ toYaml (index $values.resources $containerName) | indent 2 }} -{{- else if $values.defaultResources }} -resources: -{{ toYaml $values.defaultResources | indent 2 }} -{{- end }} -{{- end }} diff --git a/kubernetes/loculus/templates/_service-type.tpl b/kubernetes/loculus/templates/_service-type.tpl deleted file mode 100644 index cd56950d6a..0000000000 --- a/kubernetes/loculus/templates/_service-type.tpl +++ /dev/null @@ -1,9 +0,0 @@ -{{/* on cd we expose the services via ingress */}} -{{- define "loculus.serviceType"}} - - {{- if eq $.Values.environment "server" }} - type: ClusterIP - {{- else }} - type: NodePort - {{- end }} -{{- end }} diff --git a/kubernetes/loculus/templates/_siloDatabaseConfig.tpl b/kubernetes/loculus/templates/_siloDatabaseConfig.tpl deleted file mode 100644 index ff8fbf6889..0000000000 --- a/kubernetes/loculus/templates/_siloDatabaseConfig.tpl +++ /dev/null @@ -1,45 +0,0 @@ -{{- define "loculus.siloDatabaseShared" -}} -{{- $type := default "string" .type -}} -- type: {{ ($type | eq "timestamp") | ternary "int" (($type | eq "authors") | ternary "string" $type) }} - {{- if .generateIndex }} - generateIndex: {{ .generateIndex }} - {{- end }} - {{- if .lineageSystem }} - generateIndex: true - generateLineageIndex: lineage_definitions {{- /* must match the file name in the lineageDefinitionFilenames */}} - {{- end }} -{{- end }} - - -{{- define "loculus.siloDatabaseConfig" }} -{{- $schema := .schema }} -{{- $rawUniqueSegments := (include "loculus.getNucleotideSegmentNames" .referenceGenomes | fromYaml).segments }} -{{- $isSegmented := gt (len $rawUniqueSegments) 1 }} -schema: - instanceName: {{ $schema.organismName }} - opennessLevel: OPEN - metadata: - {{- range (concat .commonMetadata $schema.metadata) }} - {{- $currentItem := . }} - {{- if and $isSegmented .perSegment }} - {{- range $segment := $rawUniqueSegments }} - {{- with $currentItem }} - {{- include "loculus.siloDatabaseShared" . | nindent 4 }} - name: {{ printf "%s_%s" .name $segment | quote}} - {{- end }} - {{- end }} - {{- else }} - {{- include "loculus.siloDatabaseShared" . | nindent 4 }} - name: {{ .name }} - {{- end }} - {{- end }} - {{- if $schema.files }} - {{- range $schema.files }} - - type: string - name: {{ .name }} - {{- end }} - {{- end }} - primaryKey: accessionVersion - features: - - name: generalizedAdvancedQuery -{{- end }} diff --git a/kubernetes/loculus/templates/_submission-data-types.tpl b/kubernetes/loculus/templates/_submission-data-types.tpl deleted file mode 100644 index a2e9592e49..0000000000 --- a/kubernetes/loculus/templates/_submission-data-types.tpl +++ /dev/null @@ -1,16 +0,0 @@ -{{- define "loculus.submissionDataTypes" -}} -submissionDataTypes: - {{- if (hasKey . "submissionDataTypes") }} - {{- with .submissionDataTypes }} - consensusSequences: {{ (hasKey . "consensusSequences") | ternary .consensusSequences "true" }} - {{- if (hasKey . "maxSequencesPerEntry") }} - maxSequencesPerEntry: {{ .maxSequencesPerEntry }} - {{- end }} - {{- if (hasKey . "files") }} - files: {{ .files | toJson }} - {{- end }} - {{- end }} - {{- else }} - consensusSequences: true - {{- end }} -{{- end }} diff --git a/kubernetes/loculus/templates/_urls.tpl b/kubernetes/loculus/templates/_urls.tpl deleted file mode 100644 index e1b609cfdd..0000000000 --- a/kubernetes/loculus/templates/_urls.tpl +++ /dev/null @@ -1,75 +0,0 @@ -{{- define "loculus.backendUrl" -}} -{{- $publicRuntimeConfig := $.Values.public }} - {{- if $publicRuntimeConfig.backendUrl }} - {{- $publicRuntimeConfig.backendUrl -}} - {{- else if eq $.Values.environment "server" -}} - {{- (printf "https://backend%s%s" $.Values.subdomainSeparator $.Values.host) -}} - {{- else -}} - {{- printf "http://%s:8079" $.Values.localHost -}} - {{- end -}} -{{- end -}} - -{{- define "loculus.websiteUrl" -}} -{{- $publicRuntimeConfig := $.Values.public }} - {{- if $publicRuntimeConfig.websiteUrl }} - {{- $publicRuntimeConfig.websiteUrl -}} - {{- else if eq $.Values.environment "server" -}} - {{- (printf "https://%s" $.Values.host) -}} - {{- else -}} - {{- printf "http://%s:3000" $.Values.localHost -}} - {{- end -}} -{{- end -}} - -{{- define "loculus.s3Url" -}} - {{- if $.Values.runDevelopmentS3 }} - {{- if eq $.Values.environment "server" -}} - {{- (printf "https://s3%s%s" $.Values.subdomainSeparator $.Values.host) -}} - {{- else -}} - {{- printf "http://%s:8084" $.Values.localHost -}} - {{- end -}} - {{- else -}} - {{- $.Values.s3.bucket.endpoint }} - {{- end -}} -{{- end -}} - -{{- define "loculus.s3UrlInternal" -}} - {{- if $.Values.runDevelopmentS3 }} - {{- "http://loculus-minio-service:8084" -}} - {{- else -}} - {{- $.Values.s3.bucket.endpoint }} - {{- end -}} -{{- end -}} - -{{- define "loculus.keycloakUrl" -}} -{{- $publicRuntimeConfig := $.Values.public }} - {{- if $publicRuntimeConfig.keycloakUrl }} - {{- $publicRuntimeConfig.keycloakUrl -}} - {{- else if eq $.Values.environment "server" -}} - {{- (printf "https://authentication%s%s" $.Values.subdomainSeparator $.Values.host) -}} - {{- else -}} - {{- printf "http://%s:8083" $.Values.localHost -}} - {{- end -}} -{{- end -}} - -{{/* generates internal LAPIS urls from given config object */}} -{{ define "loculus.generateInternalLapisUrls" }} - {{ range $_, $item := (include "loculus.enabledOrganisms" . | fromJson).organisms }} -{{- $key := $item.key }} - "{{ $key }}": "{{ if not $.Values.disableWebsite }}http://{{ template "loculus.lapisServiceName" $key }}:8080{{ else -}}http://{{ $.Values.localHost }}:8080/{{ $key }}{{ end }}" - {{ end }} -{{ end }} - -{{/* generates external LAPIS urls from { config, host } */}} -{{ define "loculus.generateExternalLapisUrls"}} -{{ $lapisUrlTemplate := .lapisUrlTemplate }} -{{ range $key, $organism := (.config.organisms | default .config.defaultOrganisms) }} -{{- if ne $organism.enabled false }} -"{{ $key -}}": "{{ $lapisUrlTemplate | replace "%organism%" $key }}" -{{- end }} -{{ end }} -{{ end }} - -{{/* generates the LAPIS service name for a given organism key */}} -{{- define "loculus.lapisServiceName"}} -{{- printf "loculus-lapis-service-%s" . }} -{{- end }} diff --git a/kubernetes/loculus/templates/docs-preview.yaml b/kubernetes/loculus/templates/docs-preview.yaml deleted file mode 100644 index d111aac825..0000000000 --- a/kubernetes/loculus/templates/docs-preview.yaml +++ /dev/null @@ -1,68 +0,0 @@ -{{- $dockerTag := include "loculus.dockerTag" .Values }} -{{- $docsHost := printf "docs%s%s" .Values.subdomainSeparator .Values.host }} -{{- if .Values.previewDocs }} ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: loculus-docs - annotations: - argocd.argoproj.io/sync-options: Replace=true -spec: - replicas: 1 - selector: - matchLabels: - app: loculus - component: docs - template: - metadata: - annotations: - timestamp: {{ now | quote }} - labels: - app: loculus - component: docs - spec: - containers: - - name: docs - image: "ghcr.io/loculus-project/docs:{{ $dockerTag }}" - imagePullPolicy: {{ .Values.imagePullPolicy }} - {{- include "loculus.resources" (list "docs" .Values) | nindent 10 }} - ports: - - containerPort: 8080 - ---- -apiVersion: v1 -kind: Service -metadata: - name: loculus-docs -spec: - selector: - app: loculus - component: docs - ports: - - protocol: TCP - port: 80 - targetPort: 8080 - ---- -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: loculus-docs-ingress -spec: - rules: - - host: "{{ $docsHost }}" - http: - paths: - - path: / - pathType: Prefix - backend: - service: - name: loculus-docs - port: - number: 80 - tls: - - hosts: - - "{{ $docsHost }}" ---- -{{- end }} \ No newline at end of file diff --git a/kubernetes/loculus/templates/ena-submission-config.yaml b/kubernetes/loculus/templates/ena-submission-config.yaml deleted file mode 100644 index c1fe71a408..0000000000 --- a/kubernetes/loculus/templates/ena-submission-config.yaml +++ /dev/null @@ -1,29 +0,0 @@ -{{- if not .Values.disableEnaSubmission }} -{{- $testconfig := .Values.testconfig | default false }} -{{- $enaDepositionHost := $testconfig | ternary "127.0.0.1" "0.0.0.0" }} -{{- $backendHost := .Values.environment | eq "server" | ternary (printf "https://backend%s%s" .Values.subdomainSeparator $.Values.host) ($testconfig | ternary (printf "http://%s:8079" $.Values.localHost) "http://loculus-backend-service:8079") }} -{{- $keycloakHost := $testconfig | ternary (printf "http://%s:8083" $.Values.localHost) "http://loculus-keycloak-service:8083" }} -{{- $submitToEnaProduction := .Values.enaDeposition.submitToEnaProduction | default false }} -{{- $enaDbName := .Values.enaDeposition.enaDbName | default false }} -{{- $enaUniqueSuffix := .Values.enaDeposition.enaUniqueSuffix | default false }} -{{- $enaIsBroker := .Values.enaDeposition.enaIsBroker | default false }} -{{- $enaApprovedListTestUrl := .Values.enaDeposition.enaApprovedListTestUrl | default "" }} -{{- $enaSuppressedListTestUrl := .Values.enaDeposition.enaSuppressedListTestUrl | default "" }} ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: loculus-ena-submission-config -data: - config.yaml: | - submit_to_ena_prod: {{ $submitToEnaProduction }} - db_name: {{ $enaDbName }} - is_broker: {{ $enaIsBroker }} - unique_project_suffix: {{ $enaUniqueSuffix }} - backend_url: {{ $backendHost }} - ena_deposition_host: {{ $enaDepositionHost }} - keycloak_token_url: {{ $keycloakHost -}}/realms/loculus/protocol/openid-connect/token - approved_list_test_url: {{ $enaApprovedListTestUrl }} - suppressed_list_test_url: {{ $enaSuppressedListTestUrl }} - {{- include "loculus.generateENASubmissionConfig" . | nindent 4 }} -{{- end }} diff --git a/kubernetes/loculus/templates/ena-submission-deployment.yaml b/kubernetes/loculus/templates/ena-submission-deployment.yaml deleted file mode 100644 index 1ad1c4d2fb..0000000000 --- a/kubernetes/loculus/templates/ena-submission-deployment.yaml +++ /dev/null @@ -1,190 +0,0 @@ -{{- $dockerTag := include "loculus.dockerTag" .Values }} -{{- if not .Values.disableEnaSubmission }} ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: loculus-ena-submission - annotations: - argocd.argoproj.io/sync-options: Replace=true -spec: - replicas: 1 - selector: - matchLabels: - app: loculus - component: loculus-ena-submission - template: - metadata: - annotations: - timestamp: {{ now | quote }} - labels: - app: loculus - component: loculus-ena-submission - spec: - {{- include "possiblePriorityClassName" . | nindent 6 }} - initContainers: - - name: ena-submission-flyway - image: "ghcr.io/loculus-project/ena-submission-flyway:{{ $dockerTag }}" - resources: - requests: - cpu: 100m - memory: 128Mi - limits: - cpu: 500m - memory: 256Mi - command: ['flyway', 'migrate'] - env: - - name: FLYWAY_URL - valueFrom: - secretKeyRef: - name: database - key: url - - name: FLYWAY_USER - valueFrom: - secretKeyRef: - name: database - key: username - - name: FLYWAY_PASSWORD - valueFrom: - secretKeyRef: - name: database - key: password - containers: - - name: ena-submission - image: "ghcr.io/loculus-project/ena-submission:{{ $dockerTag }}" - imagePullPolicy: {{ $.Values.imagePullPolicy }} - {{- include "loculus.resources" (list "ena-submission" $.Values) | nindent 10 }} - env: - - name: EXTERNAL_METADATA_UPDATER_PASSWORD - valueFrom: - secretKeyRef: - name: service-accounts - key: externalMetadataUpdaterPassword - - name: DB_URL - valueFrom: - secretKeyRef: - name: database - key: url - - name: DB_USERNAME - valueFrom: - secretKeyRef: - name: database - key: username - - name: DB_PASSWORD - valueFrom: - secretKeyRef: - name: database - key: password - - name: SLACK_HOOK - valueFrom: - secretKeyRef: - name: slack-notifications - key: slack-hook - - name: SLACK_TOKEN - valueFrom: - secretKeyRef: - name: slack-notifications - key: slack-token - - name: SLACK_CHANNEL_ID - valueFrom: - secretKeyRef: - name: slack-notifications - key: slack-channel-id - - name: ENA_USERNAME - valueFrom: - secretKeyRef: - name: ena-submission - key: username - - name: ENA_PASSWORD - valueFrom: - secretKeyRef: - name: ena-submission - key: password - args: - - ena_deposition - - "--config-file=/config/config.yaml" - volumeMounts: - - name: loculus-ena-submission-config-volume - mountPath: /config/config.yaml - subPath: config.yaml - volumes: - - name: loculus-ena-submission-config-volume - configMap: - name: loculus-ena-submission-config ---- -apiVersion: batch/v1 -kind: CronJob -metadata: - name: loculus-get-ena-submission-list-cronjob -spec: - # run twice a day to ensure at least once no overlap with argo cd refresh - schedule: "0 1,13 * * *" - startingDeadlineSeconds: 60 - concurrencyPolicy: Forbid - jobTemplate: - spec: - activeDeadlineSeconds: {{ $.Values.getSubmissionListLimitSeconds }} - template: - metadata: - labels: - app: loculus - component: loculus-get-ena-submission-list-cronjob - annotations: - argocd.argoproj.io/sync-options: Replace=true - spec: - {{- include "possiblePriorityClassName" . | nindent 10 }} - restartPolicy: Never - containers: - - name: ena-submission - image: "ghcr.io/loculus-project/ena-submission:{{ $dockerTag }}" - imagePullPolicy: {{ $.Values.imagePullPolicy }} - {{- include "loculus.resources" (list "ena-submission-list-cronjob" $.Values) | nindent 14 }} - env: - - name: EXTERNAL_METADATA_UPDATER_PASSWORD - valueFrom: - secretKeyRef: - name: service-accounts - key: externalMetadataUpdaterPassword - - name: DB_URL - valueFrom: - secretKeyRef: - name: database - key: url - - name: DB_USERNAME - valueFrom: - secretKeyRef: - name: database - key: username - - name: DB_PASSWORD - valueFrom: - secretKeyRef: - name: database - key: password - - name: SLACK_HOOK - valueFrom: - secretKeyRef: - name: slack-notifications - key: slack-hook - - name: SLACK_TOKEN - valueFrom: - secretKeyRef: - name: slack-notifications - key: slack-token - - name: SLACK_CHANNEL_ID - valueFrom: - secretKeyRef: - name: slack-notifications - key: slack-channel-id - args: - - python - - "scripts/get_ena_submission_list.py" - - "--config-file=/config/config.yaml" - volumeMounts: - - name: loculus-ena-submission-config-volume - mountPath: /config/config.yaml - subPath: config.yaml - volumes: - - name: loculus-ena-submission-config-volume - configMap: - name: loculus-ena-submission-config -{{- end }} diff --git a/kubernetes/loculus/templates/ena-submission-service.yaml b/kubernetes/loculus/templates/ena-submission-service.yaml deleted file mode 100644 index 2811911aee..0000000000 --- a/kubernetes/loculus/templates/ena-submission-service.yaml +++ /dev/null @@ -1,19 +0,0 @@ -{{- if not .Values.disableEnaSubmission }} -apiVersion: v1 -kind: Service -metadata: - name: loculus-ena-submission-service -spec: - {{- template "loculus.serviceType" . }} - selector: - app: loculus - component: loculus-ena-submission - ports: - - port: 5000 - targetPort: 5000 - {{- if ne $.Values.environment "server" }} - nodePort: 30050 - {{- end }} - protocol: TCP - name: http -{{- end }} \ No newline at end of file diff --git a/kubernetes/loculus/templates/ingest-config.yaml b/kubernetes/loculus/templates/ingest-config.yaml deleted file mode 100644 index e8ff5dd135..0000000000 --- a/kubernetes/loculus/templates/ingest-config.yaml +++ /dev/null @@ -1,43 +0,0 @@ -{{- $dockerTag := include "loculus.dockerTag" .Values }} -{{- $testconfig := .Values.testconfig | default false }} -{{- $backendHost := .Values.environment | eq "server" | ternary (printf "https://backend%s%s" .Values.subdomainSeparator $.Values.host) ($testconfig | ternary (printf "http://%s:8079" $.Values.localHost) "http://loculus-backend-service:8079") }} -{{- $enaDepositionHost := $testconfig | ternary (printf "http://%s:5000" $.Values.localHost) "http://loculus-ena-submission-service:5000" }} -{{- $keycloakHost := $testconfig | ternary (printf "http://%s:8083" $.Values.localHost) "http://loculus-keycloak-service:8083" }} -{{- range $_, $item := (include "loculus.enabledOrganisms" . | fromJson).organisms }} -{{- $key := $item.key }} -{{- $values := $item.contents }} -{{- if $values.ingest }} -{{- $metadata := (include "loculus.patchMetadataSchema" $values.schema | fromYaml).metadata }} -{{- $rawUniqueSegments := (include "loculus.getNucleotideSegmentNames" $values.referenceGenomes | fromYaml).segments }} ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: loculus-ingest-config-{{ $key }} -data: - config.yaml: | - {{- $values.ingest.configFile | toYaml | nindent 4 }} - nucleotide_sequences: {{- $rawUniqueSegments | toYaml | nindent 4 }} - verify_loculus_version_is: {{$dockerTag}} - check_ena_deposition: {{ not $.Values.disableEnaSubmission }} - {{- if not $.Values.disableEnaSubmission }} - ena_deposition_url: {{ $enaDepositionHost }} - {{- end }} - organism: {{ $key }} - backend_url: {{ $backendHost }} - keycloak_token_url: {{ $keycloakHost -}}/realms/loculus/protocol/openid-connect/token - {{- if $.Values.ingest.ncbiGatewayUrl }} - ncbi_gateway_url: {{ $.Values.ingest.ncbiGatewayUrl }} - {{- end }} - {{- if $.Values.ingest.mirrorBucket }} - mirror_bucket: {{ $.Values.ingest.mirrorBucket }} - {{- end }} - {{- include "loculus.ingestRename" $metadata | nindent 4 }} - insdc_segment_specific_fields: - {{- range $metadata }} - {{- if and (eq .header "INSDC") .perSegment }} - - {{ .name }} - {{- end -}} - {{- end }} -{{- end }} -{{- end }} diff --git a/kubernetes/loculus/templates/ingest-deployment.yaml b/kubernetes/loculus/templates/ingest-deployment.yaml deleted file mode 100644 index e7da99c71c..0000000000 --- a/kubernetes/loculus/templates/ingest-deployment.yaml +++ /dev/null @@ -1,147 +0,0 @@ -{{- $dockerTag := include "loculus.dockerTag" .Values }} -{{- if not .Values.disableIngest }} -{{- range $_, $item := (include "loculus.enabledOrganisms" . | fromJson).organisms }} -{{- $key := $item.key }} -{{- $organismContent := $item.contents }} -{{- if $organismContent.ingest }} ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: loculus-ingest-deployment-{{ $key }} - annotations: - argocd.argoproj.io/sync-options: Force=true,Replace=true -spec: - replicas: 1 - strategy: - type: Recreate - selector: - matchLabels: - app: loculus - component: loculus-ingest-deployment-{{ $key }} - template: - metadata: - annotations: - timestamp: {{ now | quote }} - labels: - app: loculus - component: loculus-ingest-deployment-{{ $key }} - spec: - {{- include "possiblePriorityClassName" $ | nindent 6 }} - initContainers: - - name: version-check - image: busybox - {{- include "loculus.resources" (list "ingest-init" $.Values) | nindent 10 }} - command: ['sh', '-c', ' - CONFIG_VERSION=$(grep "verify_loculus_version_is:" /package/config/config.yaml | sed "s/verify_loculus_version_is: //;"); - DOCKER_TAG="{{ $dockerTag }}"; - echo "Config version: $CONFIG_VERSION"; - echo "Docker tag: $DOCKER_TAG"; - if [ "$CONFIG_VERSION" != "$DOCKER_TAG" ]; then - echo "Version mismatch: ConfigMap version $CONFIG_VERSION does not match docker tag $DOCKER_TAG"; - exit 1; - else - echo "Version match confirmed"; - fi - '] - volumeMounts: - - name: loculus-ingest-config-volume-{{ $key }} - mountPath: /package/config/config.yaml - subPath: config.yaml - containers: - - name: ingest-{{ $key }} - image: {{ $organismContent.ingest.image}}:{{ $dockerTag }} - imagePullPolicy: {{ $.Values.imagePullPolicy }} - {{- include "loculus.resources" (list "ingest" $.Values) | nindent 10 }} - env: - - name: KEYCLOAK_INGEST_PASSWORD - valueFrom: - secretKeyRef: - name: service-accounts - key: insdcIngestUserPassword - - name: SLACK_HOOK - valueFrom: - secretKeyRef: - name: slack-notifications - key: slack-hook - - name: NCBI_API_KEY - valueFrom: - secretKeyRef: - name: ingest-ncbi - key: api-key - args: - - snakemake - - results/submitted - - results/revised - - results/approved - - --all-temp # Reduce disk usage by not keeping files around - {{- if $organismContent.ingest.configFile }} - volumeMounts: - - name: loculus-ingest-config-volume-{{ $key }} - mountPath: /package/config/config.yaml - subPath: config.yaml - volumes: - - name: loculus-ingest-config-volume-{{ $key }} - configMap: - name: loculus-ingest-config-{{ $key }} - {{- end }} ---- -apiVersion: batch/v1 -kind: CronJob -metadata: - name: loculus-revoke-and-regroup-cronjob-{{ $key }} - annotations: - argocd.argoproj.io/sync-options: Replace=true -spec: - schedule: "0 0 31 2 *" # Never runs without manual trigger - suspend: true - startingDeadlineSeconds: 60 - concurrencyPolicy: Forbid - jobTemplate: - spec: - activeDeadlineSeconds: {{ $.Values.ingestLimitSeconds }} - template: - metadata: - labels: - app: loculus - component: loculus-ingest-cronjob-{{ $key }} - spec: - {{- include "possiblePriorityClassName" $ | nindent 10 }} - restartPolicy: Never - containers: - - name: ingest-{{ $key }} - image: {{ $organismContent.ingest.image}}:{{ $dockerTag }} - imagePullPolicy: {{ $.Values.imagePullPolicy }} - resources: - requests: - memory: "1Gi" - cpu: "200m" - limits: - cpu: "200m" - memory: "10Gi" - env: - - name: KEYCLOAK_INGEST_PASSWORD - valueFrom: - secretKeyRef: - name: service-accounts - key: insdcIngestUserPassword - args: - - snakemake - - results/submitted - - results/revised - - results/revoked - - results/approved - - --all-temp # Reduce disk usage by not keeping files around - {{- if $organismContent.ingest.configFile }} - volumeMounts: - - name: loculus-ingest-config-volume-{{ $key }} - mountPath: /package/config/config.yaml - subPath: config.yaml - volumes: - - name: loculus-ingest-config-volume-{{ $key }} - configMap: - name: loculus-ingest-config-{{ $key }} - {{- end }} -{{- end }} -{{- end }} -{{- end }} diff --git a/kubernetes/loculus/templates/ingressroute.yaml b/kubernetes/loculus/templates/ingressroute.yaml deleted file mode 100644 index 334b7e205c..0000000000 --- a/kubernetes/loculus/templates/ingressroute.yaml +++ /dev/null @@ -1,170 +0,0 @@ -apiVersion: traefik.containo.us/v1alpha1 -kind: Middleware -metadata: - name: compression-middleware -spec: - compress: {} ---- -apiVersion: traefik.containo.us/v1alpha1 -kind: Middleware -metadata: - name: redirect-middleware -spec: - redirectScheme: - scheme: https - permanent: true -{{ if $.Values.secrets.basicauth }} ---- -apiVersion: traefik.containo.us/v1alpha1 -kind: Middleware -metadata: - name: basic-auth -spec: - basicAuth: - secret: basicauth -{{ end }} -{{ if $.Values.robotsNoindexHeader }} ---- -apiVersion: traefik.containo.us/v1alpha1 -kind: Middleware -metadata: - name: noindex-robots-header -spec: - headers: - customResponseHeaders: - X-Robots-Tag: "noindex, nofollow" -{{ end }} ---- -apiVersion: traefik.containo.us/v1alpha1 -kind: Middleware -metadata: - name: redirect-www-middleware -spec: - redirectRegex: - regex: "^https://www\\.(.*)" - replacement: "https://$1" - permanent: true ---- -{{- if eq $.Values.environment "server" }} -{{- $backendHost := printf "backend%s%s" .Values.subdomainSeparator .Values.host }} -{{- $keycloakHost := (printf "authentication%s%s" $.Values.subdomainSeparator $.Values.host) }} -{{- $minioHost := (printf "s3%s%s" $.Values.subdomainSeparator $.Values.host) }} -{{- $middlewareList := list (printf "%s-compression-middleware@kubernetescrd" $.Release.Namespace) }} -{{- if $.Values.enforceHTTPS }} -{{- $middlewareList = append $middlewareList (printf "%s-redirect-middleware@kubernetescrd" $.Release.Namespace) }} -{{- end }} -{{ if $.Values.robotsNoindexHeader }} -{{ $middlewareList = append $middlewareList (printf "%s-noindex-robots-header@kubernetescrd" $.Release.Namespace) }} -{{ end }} - -{{ $middlewareListForWebsite := $middlewareList }} -{{ $middlewareListForKeycloak := $middlewareList }} - -{{ if $.Values.secrets.basicauth }} -{{ $middlewareListForWebsite = append $middlewareListForWebsite (printf "%s-basic-auth@kubernetescrd" $.Release.Namespace) }} -{{ $middlewareListForKeycloak = append $middlewareListForKeycloak (printf "%s-basic-auth@kubernetescrd" $.Release.Namespace) }} -{{ end }} - -{{ $middlewareListForWebsite = append $middlewareListForWebsite (printf "%s-redirect-www-middleware@kubernetescrd" $.Release.Namespace) }} - -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: loculus-website-ingress - annotations: - traefik.ingress.kubernetes.io/router.middlewares: "{{ join "," $middlewareListForWebsite }}" -spec: - rules: - - host: "{{ .Values.host }}" - http: - paths: - - path: / - pathType: Prefix - backend: - service: - name: loculus-website-service - port: - number: 3000 - - host: "www.{{ .Values.host }}" - http: - paths: - - path: / - pathType: Prefix - backend: - service: - name: loculus-website-service - port: - number: 3000 - tls: - - hosts: - - "{{ .Values.host }}" - - "www.{{ .Values.host }}" ---- -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: loculus-backend-ingress - annotations: - traefik.ingress.kubernetes.io/router.middlewares: "{{ join "," $middlewareList }}" -spec: - rules: - - host: "{{ $backendHost }}" - http: - paths: - - path: / - pathType: Prefix - backend: - service: - name: loculus-backend-service - port: - number: 8079 - tls: - - hosts: - - "{{ $backendHost }}" ---- -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: loculus-keycloak-ingress - annotations: - traefik.ingress.kubernetes.io/router.middlewares: "{{ join "," $middlewareListForKeycloak }}" -spec: - rules: - - host: "{{ $keycloakHost }}" - http: - paths: - - path: /{+} - pathType: Prefix - backend: - service: - name: loculus-keycloak-service - port: - number: 8083 - tls: - - hosts: - - "{{ $keycloakHost }}" ---- -{{- if and .Values.s3.enabled .Values.runDevelopmentS3 }} -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: minio-ingress - annotations: - traefik.ingress.kubernetes.io/router.middlewares: "{{ join "," $middlewareList }}" -spec: - rules: - - host: "{{ $minioHost }}" - http: - paths: - - path: / - pathType: Prefix - backend: - service: - name: loculus-minio-service - port: - number: 8084 - tls: - - hosts: - - "{{ $minioHost }}" -{{- end }} -{{- end }} diff --git a/kubernetes/loculus/templates/keycloak-config-map.yaml b/kubernetes/loculus/templates/keycloak-config-map.yaml deleted file mode 100644 index fc7c6bf14b..0000000000 --- a/kubernetes/loculus/templates/keycloak-config-map.yaml +++ /dev/null @@ -1,377 +0,0 @@ -{{- $keycloakHost := (printf "authentication%s%s" $.Values.subdomainSeparator $.Values.host) }} ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: keycloak-config -data: - keycloak-config.json: | - { - "realm": "loculus", - "enabled": true, - "verifyEmail": {{$.Values.auth.verifyEmail}}, - "resetPasswordAllowed": {{$.Values.auth.resetPasswordAllowed}}, - {{- if $.Values.auth.verifyEmail }} - "smtpServer": { - "host": "{{$.Values.auth.smtp.host}}", - "port": "{{$.Values.auth.smtp.port}}", - "from": "{{$.Values.auth.smtp.from}}", - "fromDisplayName": "{{$.Values.name}}", - "replyTo": "{{$.Values.auth.smtp.replyTo}}", - "replyToDisplayName": "{{$.Values.name}}", - "envelopeFrom": "{{$.Values.auth.smtp.envelopeFrom}}", - "ssl": "false", - "starttls": "true", - "auth": "true", - "user": "{{$.Values.auth.smtp.user}}", - "password": "[[smtpPassword]]" - }, - {{- end }} - "registrationAllowed": {{ $.Values.auth.registrationAllowed }}, - "accessTokenLifespan": 36000, - "ssoSessionIdleTimeout": 36000, - "actionTokenGeneratedByUserLifespan": 1800, - "users": [ - {{ if $.Values.createTestAccounts }} - {{- $browsers := list "firefox" "webkit" "chromium"}} - {{- range $_, $browser := $browsers }} - {{- range $index, $_ := until 20}} - { - "username": "testuser_{{$index}}_{{$browser}}", - "enabled": true, - "email": "testuser_{{$index}}_{{$browser}}@void.o", - "emailVerified": true, - "firstName": "{{$index}}_{{$browser}}", - "lastName": "TestUser", - "credentials": [ - { - "type": "password", - "value": "testuser_{{$index}}_{{$browser}}" - } - ], - "realmRoles": [ - "user", - "offline_access" - ], - "attributes": { - "university": "University of Test" - }, - "clientRoles": { - "account": [ - "manage-account" - ] - } - }, - {{ end }} - {{ end }} - { - "username": "testuser", - "enabled": true, - "email": "testuser@void.o", - "emailVerified" : true, - "firstName": "Test", - "lastName": "User", - "credentials": [ - { - "type": "password", - "value": "testuser" - } - ], - "realmRoles": [ - "user", - "offline_access" - ], - "attributes": { - "university": "University of Test" - }, - "clientRoles": { - "account": [ - "manage-account" - ] - } - }, - { - "username": "superuser", - "enabled": true, - "email": "superuser@void.o", - "emailVerified" : true, - "firstName": "Dummy", - "lastName": "SuperUser", - "credentials": [ - { - "type": "password", - "value": "superuser" - } - ], - "realmRoles": [ - "super_user", - "offline_access" - ], - "attributes": { - "university": "University of Test" - }, - "clientRoles": { - "account": [ - "manage-account" - ] - } - }, - {{ end }} - { - "username": "insdc_ingest_user", - "enabled": true, - "email": "insdc_ingest_user@void.o", - "emailVerified" : true, - "firstName": "INSDC Ingest", - "lastName": "User", - "credentials": [ - { - "type": "password", - "value": "[[insdcIngestUserPassword]]" - } - ], - "realmRoles": [ - "user", - "offline_access" - ], - "attributes": { - "university": "University of Test" - }, - "clientRoles": { - "account": [ - "manage-account" - ] - } - }, - { - "username": "preprocessing_pipeline", - "enabled": true, - "email": "preprocessing_pipeline@void.o", - "emailVerified" : true, - "firstName": "Dummy", - "lastName": "Preprocessing", - "credentials": [ - { - "type": "password", - "value": "[[preprocessingPipelinePassword]]" - } - ], - "realmRoles": [ - "preprocessing_pipeline", - "offline_access" - ], - "attributes": { - "university": "University of Test" - }, - "clientRoles": { - "account": [ - "manage-account" - ] - } - }, - { - "username": "external_metadata_updater", - "enabled": true, - "email": "external_metadata_updater@void.o", - "emailVerified" : true, - "firstName": "Dummy", - "lastName": "INSDC", - "credentials": [ - { - "type": "password", - "value": "[[externalMetadataUpdaterPassword]]" - } - ], - "realmRoles": [ - "external_metadata_updater", - "get_released_data", - "offline_access" - ], - "attributes": { - "university": "University of Test" - }, - "clientRoles": { - "account": [ - "manage-account" - ] - } - }, - { - "username": "backend", - "enabled": true, - "email": "nothing@void.o", - "emailVerified": true, - "firstName": "Backend", - "lastName": "Technical-User", - "attributes": { - "university": "University of Test" - }, - "credentials": [ - { - "type": "password", - "value": "[[backendUserPassword]]" - } - ], - "clientRoles": { - "realm-management": [ - "view-users" - ], - "account": [ - "manage-account" - ] - } - } - ], - "roles": { - "realm": [ - { - "name": "user", - "description": "User privileges" - }, - { - "name": "admin", - "description": "Administrator privileges" - }, - { - "name": "preprocessing_pipeline", - "description": "Preprocessing pipeline privileges" - }, - { - "name": "external_metadata_updater", - "description": "External Submitter privileges" - }, - { - "name": "get_released_data", - "description": "Privileges for getting released data" - }, - { - "name": "super_user", - "description": "Privileges for curators to modify sequence entries of any user" - } - ] - }, - "clients": [ - { - "clientId": "backend-client", - "enabled": true, - "publicClient": true, - "directAccessGrantsEnabled": true, - "redirectUris": [ - "https://{{$.Values.host}}/*", - "http://{{$.Values.host}}/*", - "http://localhost:3000/*" - ] - }, - { - "clientId" : "account-console2", - "name" : "${client_account-console}", - "description" : "", - "rootUrl" : "${authBaseUrl}", - "adminUrl" : "", - "baseUrl" : "/realms/loculus/account/", - "surrogateAuthRequired" : false, - "enabled" : true, - "alwaysDisplayInConsole" : false, - "clientAuthenticatorType" : "client-secret", - "redirectUris" : [ "/realms/loculus/account/*" ], - "webOrigins" : [ "+" ], - "notBefore" : 0, - "bearerOnly" : false, - "consentRequired" : false, - "standardFlowEnabled" : true, - "implicitFlowEnabled" : false, - "directAccessGrantsEnabled" : false, - "serviceAccountsEnabled" : false, - "publicClient" : true, - "frontchannelLogout" : false, - "protocol" : "openid-connect", - "attributes" : { - "oidc.ciba.grant.enabled" : "false", - "backchannel.logout.session.required" : "true", - "post.logout.redirect.uris" : "+", - "oauth2.device.authorization.grant.enabled" : "false", - "display.on.consent.screen" : "false", - "pkce.code.challenge.method" : "S256", - "backchannel.logout.revoke.offline.tokens" : "false" - }, - "authenticationFlowBindingOverrides" : { }, - "fullScopeAllowed" : false, - "nodeReRegistrationTimeout" : 0, - "protocolMappers" : [ - { - "name" : "audience resolve", - "protocol" : "openid-connect", - "protocolMapper" : "oidc-audience-resolve-mapper", - "consentRequired" : false, - "config" : { } - } - ], - "defaultClientScopes" : [ "web-origins", "acr", "profile", "roles", "email" ], - "optionalClientScopes" : [ "address", "phone", "offline_access", "microprofile-jwt" ] - } - ], - "attributes": { - "frontendUrl": "{{ include "loculus.keycloakUrl" . }}", - "userProfileEnabled" : "true" - }, - "components": { - "org.keycloak.userprofile.UserProfileProvider" : [ - { - "providerId" : "declarative-user-profile", - "subComponents" : { }, - "config" : { - "kc.user.profile.config" : [ "{\"attributes\":[{\"name\":\"username\",\"displayName\":\"${username}\",\"validations\":{\"length\":{\"min\":3,\"max\":255},\"username-prohibited-characters\":{},\"up-username-not-idn-homograph\":{}},\"permissions\":{\"view\":[\"admin\",\"user\"],\"edit\":[\"admin\",\"user\"]}},{\"name\":\"email\",\"displayName\":\"${email}\",\"validations\":{\"email\":{},\"length\":{\"max\":255}},\"required\":{\"roles\":[\"user\"]},\"permissions\":{\"view\":[\"admin\",\"user\"],\"edit\":[\"admin\",\"user\"]}},{\"name\":\"firstName\",\"displayName\":\"${firstName}\",\"validations\":{\"length\":{\"max\":255},\"person-name-prohibited-characters\":{}},\"required\":{\"roles\":[\"user\"]},\"permissions\":{\"view\":[\"admin\",\"user\"],\"edit\":[\"admin\",\"user\"]}},{\"name\":\"lastName\",\"displayName\":\"${lastName}\",\"validations\":{\"length\":{\"max\":255},\"person-name-prohibited-characters\":{}},\"required\":{\"roles\":[\"user\"]},\"permissions\":{\"view\":[\"admin\",\"user\"],\"edit\":[\"admin\",\"user\"]}},{\"name\":\"university\",\"displayName\":\"University / Organisation\",\"validations\":{},\"annotations\":{},\"required\":{\"roles\":[\"admin\",\"user\"]},\"permissions\":{\"view\":[],\"edit\":[\"admin\",\"user\"]}},{\"name\":\"orcid\",\"displayName\":\"\",\"permissions\":{\"edit\":[\"admin\"],\"view\":[\"admin\",\"user\"]},\"annotations\":{},\"validations\":{}}],\"groups\":[]}" ] - } - } - ] - }, - "loginTheme": "loculus", - "emailTheme": "loculus", - "identityProviders" : [ - {{- range $key, $value := .Values.auth.identityProviders }} - {{- if eq $key "orcid" }} - { - "alias" : "orcid", - "providerId" : "orcid", - "enabled" : true, - "updateProfileFirstLoginMode" : "on", - "trustEmail" : false, - "storeToken" : false, - "addReadTokenRoleOnCreate" : false, - "authenticateByDefault" : false, - "linkOnly" : false, - "firstBrokerLoginFlowAlias" : "first broker login", - "config" : { - "clientSecret" : "[[orcidSecret]]", - "clientId" : "{{ $value.clientId }}" - } - } - {{- end }} - {{- end }} - ], - "identityProviderMappers" : [ - {{- range $key, $_ := .Values.auth.identityProviders }} - {{- if eq $key "orcid" }} - { - "name" : "username mapper", - "identityProviderAlias" : "orcid", - "identityProviderMapper" : "hardcoded-attribute-idp-mapper", - "config" : { - "syncMode" : "IMPORT", - "attribute" : "username" - } - }, - { - "name" : "orcid", - "identityProviderAlias" : "orcid", - "identityProviderMapper" : "orcid-user-attribute-mapper", - "config" : { - "syncMode" : "INHERIT", - "jsonField" : "orcid-identifier", - "userAttribute" : "orcid.path" - } - } - {{- end }} - {{- end }} - ] - } diff --git a/kubernetes/loculus/templates/keycloak-database-service.yaml b/kubernetes/loculus/templates/keycloak-database-service.yaml deleted file mode 100644 index 41d33b631a..0000000000 --- a/kubernetes/loculus/templates/keycloak-database-service.yaml +++ /dev/null @@ -1,13 +0,0 @@ -{{- if .Values.runDevelopmentKeycloakDatabase }} -apiVersion: v1 -kind: Service -metadata: - name: loculus-keycloak-database-service -spec: - type: ClusterIP - selector: - app: loculus - component: keycloak-database - ports: - - port: 5432 -{{- end }} diff --git a/kubernetes/loculus/templates/keycloak-database-standin.yaml b/kubernetes/loculus/templates/keycloak-database-standin.yaml deleted file mode 100644 index 0856b47227..0000000000 --- a/kubernetes/loculus/templates/keycloak-database-standin.yaml +++ /dev/null @@ -1,49 +0,0 @@ -{{- $dockerTag := include "loculus.dockerTag" .Values }} -{{- if .Values.runDevelopmentKeycloakDatabase }} -apiVersion: apps/v1 -kind: Deployment -metadata: - name: loculus-keycloak-database - annotations: - argocd.argoproj.io/sync-options: Replace=true -spec: - replicas: 1 - selector: - matchLabels: - app: loculus - component: keycloak-database - strategy: - type: Recreate - template: - metadata: - annotations: - timestamp: {{ now | quote }} - labels: - app: loculus - component: keycloak-database - spec: - containers: - - name: loculus-keycloak-database - image: postgres:15.12 - resources: - requests: - memory: "30Mi" - cpu: 10m - limits: - memory: "100Mi" - ports: - - containerPort: 5432 - env: - - name: POSTGRES_USER - value: "postgres" - - name: POSTGRES_PASSWORD - value: "unsecure" - - name: POSTGRES_DB - value: "keycloak" - - name: POSTGRES_HOST_AUTH_METHOD - value: "trust" - {{ if not .Values.developmentDatabasePersistence }} - - name: LOCULUS_VERSION - value: {{ $dockerTag }} - {{- end }} -{{- end }} diff --git a/kubernetes/loculus/templates/keycloak-deployment.yaml b/kubernetes/loculus/templates/keycloak-deployment.yaml deleted file mode 100644 index 78bd59233f..0000000000 --- a/kubernetes/loculus/templates/keycloak-deployment.yaml +++ /dev/null @@ -1,134 +0,0 @@ ---- -{{- $dockerTag := include "loculus.dockerTag" .Values }} -apiVersion: apps/v1 -kind: Deployment -metadata: - name: loculus-keycloak - annotations: - argocd.argoproj.io/sync-options: Replace=true -spec: - replicas: 1 - selector: - matchLabels: - app: loculus - component: keycloak - template: - metadata: - labels: - app: loculus - component: keycloak - spec: - {{- include "possiblePriorityClassName" . | nindent 6 }} - initContainers: -{{- include "loculus.configProcessor" (dict "name" "keycloak-config" "dockerTag" $dockerTag "imagePullPolicy" .Values.imagePullPolicy) | nindent 8 }} - - name: keycloak-theme-prep - resources: - requests: - cpu: 100m - memory: 128Mi - limits: - cpu: 500m - memory: 256Mi - image: "ghcr.io/loculus-project/keycloakify:{{ $dockerTag }}" - volumeMounts: - - name: theme-volume - mountPath: /destination - containers: - - name: keycloak - # TODO #1221 - image: quay.io/keycloak/keycloak:23.0 - {{- include "loculus.resources" (list "keycloak" $.Values) | nindent 10 }} - env: - - name: REGISTRATION_TERMS_MESSAGE - value: {{ $.Values.registrationTermsMessage }} - - name: PROJECT_NAME - value: {{ $.Values.name }} - - name: KC_DB - value: postgres - - name: KC_DB_URL_HOST - valueFrom: - secretKeyRef: - name: keycloak-database - key: addr - - name: KC_DB_URL_PORT - valueFrom: - secretKeyRef: - name: keycloak-database - key: port - - name: KC_DB_URL_DATABASE - valueFrom: - secretKeyRef: - name: keycloak-database - key: database - - name: KC_DB_USERNAME - valueFrom: - secretKeyRef: - name: keycloak-database - key: username - - name: KC_DB_PASSWORD - valueFrom: - secretKeyRef: - name: keycloak-database - key: password - - name: KC_BOOTSTRAP_ADMIN_USERNAME # TODO: delete after upgrading keycloak (#3736 ) - value: "admin" - - name: KC_BOOTSTRAP_ADMIN_PASSWORD # TODO: delete after upgrading keycloak (#3736 ) - valueFrom: - secretKeyRef: - name: keycloak-admin - key: initialAdminPassword - - name: KEYCLOAK_ADMIN - value: "admin" - - name: KEYCLOAK_ADMIN_PASSWORD - valueFrom: - secretKeyRef: - name: keycloak-admin - key: initialAdminPassword - - name: KC_PROXY - value: "edge" - - name: PROXY_ADDRESS_FORWARDING - value: "true" - - name: KC_HEALTH_ENABLED - value: "true" - - name: KC_HOSTNAME_URL - value: "{{ include "loculus.keycloakUrl" . }}" - - name: KC_HOSTNAME_ADMIN_URL - value: "{{ include "loculus.keycloakUrl" . }}" - - name: KC_FEATURES - value: "declarative-user-profile" - # see https://github.com/keycloak/keycloak/blob/77b58275ca06d1cbe430c51db74479a7e1b409b5/quarkus/dist/src/main/content/bin/kc.sh#L95-L150 - - name: KC_RUN_IN_CONTAINER - value: "true" - {{- if .Values.runDevelopmentKeycloakDatabase }} - - name: LOCULUS_VERSION - value: {{ $dockerTag }} - {{- end }} - args: - - "start" - - "--import-realm" - - "--cache=local" - ports: - - containerPort: 8080 - volumeMounts: - - name: keycloak-config-processed - mountPath: /opt/keycloak/data/import/ - - name: theme-volume - mountPath: /opt/keycloak/providers/ - startupProbe: - httpGet: - path: /health/ready - port: 8080 - timeoutSeconds: 3 - failureThreshold: 150 - periodSeconds: 5 - livenessProbe: - httpGet: - path: /health/ready - port: 8080 - timeoutSeconds: 3 - periodSeconds: 10 - failureThreshold: 2 - volumes: -{{ include "loculus.configVolume" (dict "name" "keycloak-config") | nindent 8 }} - - name: theme-volume - emptyDir: {} diff --git a/kubernetes/loculus/templates/keycloak-service.yaml b/kubernetes/loculus/templates/keycloak-service.yaml deleted file mode 100644 index a0a4077a16..0000000000 --- a/kubernetes/loculus/templates/keycloak-service.yaml +++ /dev/null @@ -1,17 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: loculus-keycloak-service -spec: - {{- template "loculus.serviceType" . }} - selector: - app: loculus - component: keycloak - ports: - - port: 8083 - targetPort: 8080 - {{- if ne $.Values.environment "server" }} - nodePort: 30083 - {{- end }} - protocol: TCP - name: http diff --git a/kubernetes/loculus/templates/lapis-deployment.yaml b/kubernetes/loculus/templates/lapis-deployment.yaml deleted file mode 100644 index 3592480e9f..0000000000 --- a/kubernetes/loculus/templates/lapis-deployment.yaml +++ /dev/null @@ -1,70 +0,0 @@ -{{- $dockerTag := include "loculus.dockerTag" .Values }} - -{{- range $_, $item := (include "loculus.enabledOrganisms" . | fromJson).organisms }} -{{- $key := $item.key }} -{{- $organismContent := $item.contents }} ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: loculus-lapis-{{ $key }} - annotations: -spec: - replicas: {{ $.Values.replicas.lapis | default 1 }} - selector: - matchLabels: - app: loculus - component: lapis-{{ $key }} - template: - metadata: - annotations: - timestamp: {{ now | quote }} - labels: - app: loculus - component: lapis-{{ $key }} - spec: - {{- include "possiblePriorityClassName" $ | nindent 6 }} - initContainers: - {{- include "loculus.configProcessor" (dict "name" "lapis-silo-database-config" "dockerTag" $dockerTag "imagePullPolicy" $.Values.imagePullPolicy) | nindent 8 }} - containers: - - name: lapis - image: "{{ $.Values.images.lapis.repository }}:{{ $.Values.images.lapis.tag }}" - imagePullPolicy: "{{ $.Values.images.lapis.pullPolicy | default $.Values.imagePullPolicy }}" - {{- include "loculus.resources" (list "lapis" $.Values $key) | nindent 10 }} - ports: - - containerPort: 8080 - args: - - "--silo.url=http://loculus-silo-service-{{ $key }}:8081" - env: - - name: JVM_OPTS - value: -XX:+UseContainerSupport -XX:MaxRAMPercentage=75.0 -XX:+UseG1GC -XX:MaxHeapFreeRatio=5 -XX:MinHeapFreeRatio=2 -XX:MaxGCPauseMillis=100 - volumeMounts: - - name: lapis-silo-database-config-processed - mountPath: /workspace/database_config.yaml - subPath: database_config.yaml - - name: lapis-silo-database-config-processed - mountPath: /workspace/reference_genomes.json - subPath: reference_genomes.json - startupProbe: - httpGet: - path: /actuator/health - port: 8080 - periodSeconds: 5 - failureThreshold: 36 # 3 minutes to start - readinessProbe: - httpGet: - path: /sample/info - port: 8080 - periodSeconds: 10 - failureThreshold: 3 - timeoutSeconds: 5 - livenessProbe: - httpGet: - path: /actuator/health - port: 8080 - periodSeconds: 10 - failureThreshold: 3 - timeoutSeconds: 5 - volumes: - {{- include "loculus.configVolume" (dict "name" "lapis-silo-database-config" "configmap" (printf "lapis-silo-database-config-%s" $key)) | nindent 8 }} -{{- end }} diff --git a/kubernetes/loculus/templates/lapis-ingress.yaml b/kubernetes/loculus/templates/lapis-ingress.yaml deleted file mode 100644 index 2400ea8dfb..0000000000 --- a/kubernetes/loculus/templates/lapis-ingress.yaml +++ /dev/null @@ -1,104 +0,0 @@ -{{- $lapisHost := printf "lapis%s%s" .Values.subdomainSeparator .Values.host }} -{{- $enabledOrganismsResult := (include "loculus.enabledOrganisms" . | fromJson) }} -{{- $enabledOrganismsList := $enabledOrganismsResult.organisms }} -{{- $organismKeys := list -}} -{{- range $_, $item := $enabledOrganismsList -}} - {{- $organismKeys = append $organismKeys $item.key -}} -{{- end -}} -apiVersion: traefik.containo.us/v1alpha1 -kind: Middleware -metadata: - name: cors-all-origins -spec: - headers: - accessControlAllowMethods: - - "GET" - - "OPTIONS" - - "POST" - - "HEAD" - accessControlAllowOriginList: - - "*" - accessControlMaxAge: 100 - accessControlAllowHeaders: - - "*" ---- -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: lapis-ingress - annotations: - traefik.ingress.kubernetes.io/router.middlewares: "{{ $.Release.Namespace }}-cors-all-origins@kubernetescrd,{{- $first := true }}{{- range $key := $organismKeys }}{{ if $first }}{{ $first = false }}{{ else }},{{ end }}{{ $.Release.Namespace }}-strip-{{ $key }}-prefix@kubernetescrd{{- end }}" -spec: - rules: - - host: {{ if eq $.Values.environment "server" }}{{ $lapisHost }}{{ end }} - http: - paths: - {{- range $key := $organismKeys }} - - path: /{{ $key }}/ - pathType: {{ ternary "ImplementationSpecific" "Prefix" (eq $.Values.environment "server") }} - backend: - service: - name: {{ template "loculus.lapisServiceName" $key }} - port: - number: 8080 - {{- end }} - {{- if eq $.Values.environment "server" }} - tls: - - hosts: - - {{ $lapisHost }} - {{- end }} - -{{- range $key := $organismKeys }} ---- -apiVersion: traefik.containo.us/v1alpha1 -kind: Middleware -metadata: - name: strip-{{ $key }}-prefix -spec: - stripPrefix: - prefixes: - - /{{ $key }}/ -{{- end }} ---- -apiVersion: traefik.containo.us/v1alpha1 -kind: Middleware -metadata: - name: redirect-slash -spec: - redirectRegex: - regex: ".*" - replacement: "$0/" - permanent: true ---- -# This ingress is only used to redirect from $LAPIS_URL/$ORGANISM_NAME to $LAPIS_URL/$ORGANISM_NAME/ -# We need to do this so we can strip the /$ORGANISM_NAME/ prefix in the main lapis ingress (see #3360) -{{- $redirectMiddlewareList := list -}} -{{- if $.Values.enforceHTTPS -}} - {{- $redirectMiddlewareList = append $redirectMiddlewareList (printf "%s-redirect-middleware@kubernetescrd" $.Release.Namespace) }} -{{- end }} -{{- $redirectMiddlewareList = append $redirectMiddlewareList (printf "%s-redirect-slash@kubernetescrd" $.Release.Namespace) }} -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: lapis-redirect-ingress - annotations: - traefik.ingress.kubernetes.io/router.middlewares: "{{ join "," $redirectMiddlewareList }}" - # High priority to ensure this rule is evaluated first - traefik.ingress.kubernetes.io/router.priority: "500" -spec: - rules: - - host: {{ if eq $.Values.environment "server" }}{{ $lapisHost }}{{ end }} - http: - paths: - {{- range $key := $organismKeys }} - - path: /{{ $key }} - pathType: Exact - backend: - # An Ingress path requires a backend. Since the redirect happens before - # the request is forwarded, we can point all paths to the first - # organism's service as a placeholder. - service: - name: {{ template "loculus.lapisServiceName" (first $organismKeys) }} - port: - number: 8080 - {{- end }} diff --git a/kubernetes/loculus/templates/lapis-service.yaml b/kubernetes/loculus/templates/lapis-service.yaml deleted file mode 100644 index 83c5ba2779..0000000000 --- a/kubernetes/loculus/templates/lapis-service.yaml +++ /dev/null @@ -1,18 +0,0 @@ -{{- range $_, $item := (include "loculus.enabledOrganisms" . | fromJson).organisms }} -{{- $key := $item.key }} ---- -apiVersion: v1 -kind: Service -metadata: - name: {{ template "loculus.lapisServiceName" $key }} -spec: - type: ClusterIP - selector: - app: loculus - component: lapis-{{ $key }} - ports: - - port: 8080 - targetPort: 8080 - protocol: TCP - name: http -{{- end }} diff --git a/kubernetes/loculus/templates/lapis-silo-database-config.yaml b/kubernetes/loculus/templates/lapis-silo-database-config.yaml deleted file mode 100644 index 91764ea025..0000000000 --- a/kubernetes/loculus/templates/lapis-silo-database-config.yaml +++ /dev/null @@ -1,34 +0,0 @@ -{{- $commonMetadata := (include "loculus.commonMetadata" . | fromYaml).fields }} - -{{- range $_, $item := (include "loculus.enabledOrganisms" . | fromJson).organisms }} -{{- $key := $item.key }} -{{- $organismContent := $item.contents }} - -{{- $lineageSystem := $organismContent | include "loculus.lineageSystemForOrganism" }} ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: lapis-silo-database-config-{{ $key }} -data: - {{- $args := dict - "schema" ($organismContent.schema | include "loculus.patchMetadataSchema" | fromYaml) - "commonMetadata" $commonMetadata - "referenceGenomes" $organismContent.referenceGenomes - }} - database_config.yaml: | - {{ include "loculus.siloDatabaseConfig" $args | nindent 4 }} - - preprocessing_config.yaml: | - inputDirectory: /preprocessing/input - outputDirectory: /preprocessing/output - ndjsonInputFilename: data.ndjson.zst - referenceGenomeFilename: reference_genomes.json - {{- if $lineageSystem }} - lineageDefinitionFilenames: - - lineage_definitions.yaml - {{- end }} - - reference_genomes.json: | - {{ include "loculus.mergeReferenceGenomes" $organismContent.referenceGenomes | fromYaml | toJson }} -{{- end }} diff --git a/kubernetes/loculus/templates/loculus-backend-config.yaml b/kubernetes/loculus/templates/loculus-backend-config.yaml deleted file mode 100644 index a024a4c209..0000000000 --- a/kubernetes/loculus/templates/loculus-backend-config.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: loculus-backend-config -data: - backend_config.json: | - {{ include "loculus.generateBackendConfig" . | fromYaml | toJson }} diff --git a/kubernetes/loculus/templates/loculus-backend-service.yaml b/kubernetes/loculus/templates/loculus-backend-service.yaml deleted file mode 100644 index e50729bcd2..0000000000 --- a/kubernetes/loculus/templates/loculus-backend-service.yaml +++ /dev/null @@ -1,19 +0,0 @@ -{{- if not .Values.disableBackend }} -apiVersion: v1 -kind: Service -metadata: - name: loculus-backend-service -spec: - {{- template "loculus.serviceType" . }} - selector: - app: loculus - component: backend - ports: - - port: 8079 - targetPort: 8079 - {{- if ne $.Values.environment "server" }} - nodePort: 30082 - {{- end }} - protocol: TCP - name: http -{{- end }} diff --git a/kubernetes/loculus/templates/loculus-backend.yaml b/kubernetes/loculus/templates/loculus-backend.yaml deleted file mode 100644 index 39312643b2..0000000000 --- a/kubernetes/loculus/templates/loculus-backend.yaml +++ /dev/null @@ -1,158 +0,0 @@ -{{- $dockerTag := include "loculus.dockerTag" .Values }} -{{- if not .Values.disableBackend }} ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: loculus-backend - annotations: - argocd.argoproj.io/sync-options: Replace=true -spec: - replicas: {{$.Values.replicas.backend}} - selector: - matchLabels: - app: loculus - component: backend - template: - metadata: - annotations: - timestamp: {{ now | quote }} - labels: - app: loculus - component: backend - spec: - {{- include "possiblePriorityClassName" . | nindent 6 }} - initContainers: -{{- include "loculus.configProcessor" (dict "name" "loculus-backend-config" "dockerTag" $dockerTag "imagePullPolicy" $.Values.imagePullPolicy) | nindent 8 }} - containers: - - name: backend - image: "{{ $.Values.images.backend.repository }}:{{ $.Values.images.backend.tag | default $dockerTag }}" - imagePullPolicy: "{{ $.Values.images.backend.pullPolicy | default $.Values.imagePullPolicy }}" - {{- include "loculus.resources" (list "backend" $.Values) | nindent 10 }} - startupProbe: - httpGet: - path: "/actuator/health/liveness" - port: 8079 - periodSeconds: 5 - failureThreshold: 360 # 30 minutes to start - livenessProbe: - httpGet: - path: "/actuator/health/liveness" - port: 8079 - periodSeconds: 10 - readinessProbe: - httpGet: - path: "/actuator/health/readiness" - port: 8079 - ports: - - containerPort: 8079 - args: - - "--loculus.enable-seqsets={{ $.Values.seqSets.enabled }}" - {{- if $.Values.seqSets.crossRef }} - - "--crossref.doi-prefix=$(CROSSREF_DOI_PREFIX)" - - "--crossref.endpoint=$(CROSSREF_ENDPOINT)" - - "--crossref.username=$(CROSSREF_USERNAME)" - - "--crossref.password=$(CROSSREF_PASSWORD)" - - "--crossref.database-name=$(CROSSREF_DATABASE_NAME)" - - "--crossref.email=$(CROSSREF_EMAIL)" - - "--crossref.organization=$(CROSSREF_ORGANIZATION)" - - "--crossref.host-url=$(CROSSREF_HOST_URL)" - {{- end }} - - "--keycloak.password=$(BACKEND_KEYCLOAK_PASSWORD)" - - "--keycloak.realm=loculus" - - "--keycloak.client=backend-client" - - "--keycloak.url=http://loculus-keycloak-service:8083" - - "--keycloak.user=backend" - - "--spring.datasource.password=$(DB_PASSWORD)" - - "--spring.datasource.url=$(DB_URL)" - - "--spring.datasource.username=$(DB_USERNAME)" - - "--spring.security.oauth2.resourceserver.jwt.jwk-set-uri=http://loculus-keycloak-service:8083/realms/loculus/protocol/openid-connect/certs" - - "--loculus.cleanup.task.reset-stale-in-processing-after-seconds={{- .Values.preprocessingTimeout | default 120 }}" - - "--loculus.pipeline-version-upgrade-check.interval-seconds={{- .Values.pipelineVersionUpgradeCheckIntervalSeconds | default 10 }}" - - "--loculus.s3.enabled=$(S3_ENABLED)" - {{- if $.Values.s3.enabled }} - - "--loculus.s3.bucket.endpoint=$(S3_BUCKET_ENDPOINT)" - - "--loculus.s3.bucket.internal-endpoint=$(S3_BUCKET_INTERNAL_ENDPOINT)" - - "--loculus.s3.bucket.region=$(S3_BUCKET_REGION)" - - "--loculus.s3.bucket.bucket=$(S3_BUCKET_BUCKET)" - - "--loculus.s3.bucket.access-key=$(S3_BUCKET_ACCESS_KEY)" - - "--loculus.s3.bucket.secret-key=$(S3_BUCKET_SECRET_KEY)" - {{- end }} - {{- if .Values.backendExtraArgs }} - {{- .Values.backendExtraArgs | toYaml | nindent 12 }} - {{- end }} - env: - - name: JVM_OPTS - value: -XX:+UseContainerSupport -XX:+UseG1GC -XX:MaxHeapFreeRatio=5 -XX:MinHeapFreeRatio=2 - {{- if $.Values.seqSets.crossRef }} - - name: CROSSREF_USERNAME - valueFrom: - secretKeyRef: - name: crossref - key: username - - name: CROSSREF_PASSWORD - valueFrom: - secretKeyRef: - name: crossref - key: password - - name: CROSSREF_DOI_PREFIX - value: {{$.Values.seqSets.crossRef.DOIPrefix | quote }} - - name: CROSSREF_ENDPOINT - value: {{$.Values.seqSets.crossRef.endpoint | quote }} - - name: CROSSREF_DATABASE_NAME - value: {{$.Values.seqSets.crossRef.databaseName | quote }} - - name: CROSSREF_EMAIL - value: {{$.Values.seqSets.crossRef.email | quote }} - - name: CROSSREF_ORGANIZATION - value: {{$.Values.seqSets.crossRef.organization | quote }} - - name: CROSSREF_HOST_URL - value: {{$.Values.seqSets.crossRef.hostUrl | quote }} - {{- end }} - - name: BACKEND_KEYCLOAK_PASSWORD - valueFrom: - secretKeyRef: - name: service-accounts - key: backendUserPassword - - name: DB_URL - valueFrom: - secretKeyRef: - name: database - key: url - - name: DB_USERNAME - valueFrom: - secretKeyRef: - name: database - key: username - - name: DB_PASSWORD - valueFrom: - secretKeyRef: - name: database - key: password - - name: S3_ENABLED - value: {{$.Values.s3.enabled | quote }} - {{- if $.Values.s3.enabled }} - - name: S3_BUCKET_ENDPOINT - value: {{ include "loculus.s3Url" . | quote }} - - name: S3_BUCKET_INTERNAL_ENDPOINT - value: {{ include "loculus.s3UrlInternal" . | quote }} - - name: S3_BUCKET_REGION - value: {{$.Values.s3.bucket.region | quote }} - - name: S3_BUCKET_BUCKET - value: {{$.Values.s3.bucket.bucket | quote }} - - name: S3_BUCKET_ACCESS_KEY - valueFrom: - secretKeyRef: - name: s3-bucket - key: accessKey - - name: S3_BUCKET_SECRET_KEY - valueFrom: - secretKeyRef: - name: s3-bucket - key: secretKey - {{- end }} - volumeMounts: - - name: loculus-backend-config-processed - mountPath: /config - volumes: -{{ include "loculus.configVolume" (dict "name" "loculus-backend-config") | nindent 8 }} -{{- end }} \ No newline at end of file diff --git a/kubernetes/loculus/templates/loculus-database-service.yaml b/kubernetes/loculus/templates/loculus-database-service.yaml deleted file mode 100644 index 2f3e8fb832..0000000000 --- a/kubernetes/loculus/templates/loculus-database-service.yaml +++ /dev/null @@ -1,20 +0,0 @@ -{{- if .Values.runDevelopmentMainDatabase }} -apiVersion: v1 -kind: Service -metadata: - name: loculus-database-service -spec: - - {{- template "loculus.serviceType" .}} - selector: - app: loculus - component: database - ports: - - port: 5432 - targetPort: 5432 - {{- if ne $.Values.environment "server" }} - nodePort: 30432 - {{- end }} - protocol: TCP - name: http -{{- end }} diff --git a/kubernetes/loculus/templates/loculus-database-standin.yaml b/kubernetes/loculus/templates/loculus-database-standin.yaml deleted file mode 100644 index 1324d2827a..0000000000 --- a/kubernetes/loculus/templates/loculus-database-standin.yaml +++ /dev/null @@ -1,67 +0,0 @@ -{{- $dockerTag := include "loculus.dockerTag" .Values }} -{{- if .Values.runDevelopmentMainDatabase }} -apiVersion: apps/v1 -kind: Deployment -metadata: - name: loculus-database - annotations: - argocd.argoproj.io/sync-options: Replace=true -spec: - replicas: 1 - selector: - matchLabels: - app: loculus - component: database - strategy: - type: Recreate - template: - metadata: - labels: - app: loculus - component: database - spec: - containers: - - name: database - image: postgres:15.12 - args: - - "-c" - - "shared_preload_libraries=pg_stat_statements" - - "-c" - - "pg_stat_statements.track=all" - resources: - requests: - memory: "200Mi" - cpu: "250m" - limits: - memory: "2Gi" - ports: - - containerPort: 5432 - env: - - name: POSTGRES_USER - value: "postgres" - - name: POSTGRES_PASSWORD - value: "unsecure" - - name: POSTGRES_DB - value: "loculus" - - name: POSTGRES_HOST_AUTH_METHOD - value: "trust" - {{ if not .Values.developmentDatabasePersistence }} - - name: LOCULUS_VERSION - value: {{ $dockerTag }} - {{- end }} - volumeMounts: - - name: init-scripts - mountPath: /docker-entrypoint-initdb.d - volumes: - - name: init-scripts - configMap: - name: loculus-database-init ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: loculus-database-init -data: - init-pg-stat.sql: | - CREATE EXTENSION IF NOT EXISTS pg_stat_statements; -{{- end }} diff --git a/kubernetes/loculus/templates/loculus-preprocessing-config.yaml b/kubernetes/loculus/templates/loculus-preprocessing-config.yaml deleted file mode 100644 index b5eb9b4f21..0000000000 --- a/kubernetes/loculus/templates/loculus-preprocessing-config.yaml +++ /dev/null @@ -1,28 +0,0 @@ -{{- range $_, $item := (include "loculus.enabledOrganisms" . | fromJson).organisms }} -{{- $organism := $item.key }} -{{- $organismConfig := $item.contents }} -{{- $metadata := ($organismConfig.schema | include "loculus.patchMetadataSchema" | fromYaml).metadata }} -{{- $referenceGenomes:= include "loculus.mergeReferenceGenomes" $organismConfig.referenceGenomes | fromYaml }} -{{- $flattened := include "loculus.flattenPreprocessingVersions" $organismConfig.preprocessing | fromJson }} -{{- range $processingIndex, $processingConfig := $flattened.items }} -{{- if $processingConfig.configFile }} ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: loculus-preprocessing-config-{{ $organism }}-v{{ $processingConfig.version }}-{{ $processingIndex }} -data: - preprocessing-config.yaml: | - organism: {{ $organism }} - {{- $processingConfig.configFile | toYaml | nindent 4 }} - processing_spec: - {{- $args := dict "metadata" $metadata "referenceGenomes" $organismConfig.referenceGenomes }} - {{- include "loculus.preprocessingSpecs" $args | nindent 6 }} - versionComment: - function: identity - inputs: - input: versionComment - args: -{{- end }} -{{- end }} -{{- end }} diff --git a/kubernetes/loculus/templates/loculus-preprocessing-deployment.yaml b/kubernetes/loculus/templates/loculus-preprocessing-deployment.yaml deleted file mode 100644 index fd728ba58e..0000000000 --- a/kubernetes/loculus/templates/loculus-preprocessing-deployment.yaml +++ /dev/null @@ -1,68 +0,0 @@ -{{- $dockerTag := include "loculus.dockerTag" .Values }} -{{ $backendHost := .Values.disableBackend | ternary - "http://host.k3d.internal:8079" - "http://loculus-backend-service:8079" -}} -{{- $testconfig := .Values.testconfig | default false }} -{{- $keycloakHost := $testconfig | ternary (printf "http://%s:8083" $.Values.localHost) "http://loculus-keycloak-service:8083" }} -{{- if not .Values.disablePreprocessing }} -{{- range $_, $item := (include "loculus.enabledOrganisms" . | fromJson).organisms }} -{{- $organism := $item.key }} -{{- $flattened := include "loculus.flattenPreprocessingVersions" $item.contents.preprocessing | fromJson }} -{{- range $processingIndex, $processingConfig := $flattened.items }} -{{- $thisDockerTag := $processingConfig.dockerTag | default $dockerTag }} -{{- $replicas := $processingConfig.replicas | default 1 }} ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: loculus-preprocessing-{{ $organism }}-v{{ $processingConfig.version }}-{{ $processingIndex }} - annotations: - argocd.argoproj.io/sync-options: Replace=true -spec: - replicas: {{ $replicas }} - selector: - matchLabels: - app: loculus - component: loculus-preprocessing-{{ $organism }}-v{{ $processingConfig.version }}-{{ $processingIndex }} - template: - metadata: - annotations: - timestamp: {{ now | quote }} - labels: - app: loculus - component: loculus-preprocessing-{{ $organism }}-v{{ $processingConfig.version }}-{{ $processingIndex }} - spec: - {{- include "possiblePriorityClassName" $ | nindent 6 }} - containers: - - name: preprocessing-{{ $organism }} - image: {{ $processingConfig.image}}:{{ $thisDockerTag }} - imagePullPolicy: {{ $.Values.imagePullPolicy }} - {{- include "loculus.resources" (list "preprocessing" $.Values) | nindent 10 }} - env: - - name: KEYCLOAK_PASSWORD - valueFrom: - secretKeyRef: - name: service-accounts - key: preprocessingPipelinePassword - args: - {{- range $arg := $processingConfig.args }} - - "{{ $arg }}" - {{- end }} - - "--backend-host={{ $backendHost }}/{{ $organism }}" - - "--keycloak-host={{ $keycloakHost }}" - - "--pipeline-version={{ $processingConfig.version }}" - - "--keycloak-password=$(KEYCLOAK_PASSWORD)" - {{- if $processingConfig.configFile }} - - "--config=/etc/config/preprocessing-config.yaml" - volumeMounts: - - name: preprocessing-config-volume-{{ $organism }}-v{{ $processingConfig.version }}-{{ $processingIndex }} - mountPath: /etc/config - volumes: - - name: preprocessing-config-volume-{{ $organism }}-v{{ $processingConfig.version }}-{{ $processingIndex }} - configMap: - name: loculus-preprocessing-config-{{ $organism }}-v{{ $processingConfig.version }}-{{ $processingIndex }} - {{- end }} -{{- end }} -{{- end }} -{{- end }} diff --git a/kubernetes/loculus/templates/loculus-website-config.yaml b/kubernetes/loculus/templates/loculus-website-config.yaml deleted file mode 100644 index 3c5df57465..0000000000 --- a/kubernetes/loculus/templates/loculus-website-config.yaml +++ /dev/null @@ -1,30 +0,0 @@ ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: loculus-website-config -data: - website_config.json: | - {{ include "loculus.generateWebsiteConfig" . | fromYaml | toJson }} - runtime_config.json: | - { - "name" : "{{ $.Values.name }}", - "insecureCookies": {{ $.Values.insecureCookies }}, - "serverSide": { - {{- if .Values.usePublicRuntimeConfigAsServerSide }} - {{- template "loculus.publicRuntimeConfig" . -}} - {{- else }} - {{ if $.Values.disableBackend -}} - "backendUrl": "http://{{ $.Values.localHost }}:8079", - {{- else -}} - "backendUrl": "http://loculus-backend-service:8079", - {{- end }} - "lapisUrls": {{- include "loculus.generateInternalLapisUrls" . | fromYaml | toJson }}, - "keycloakUrl": "{{ if not .Values.disableWebsite -}}http://loculus-keycloak-service:8083{{ else -}}http://{{ $.Values.localHost }}:8083{{ end }}" - {{- end }} - }, - "public": { - {{- template "loculus.publicRuntimeConfig" . -}} - }, - "backendKeycloakClientSecret" : "[[backendKeycloakClientSecret]]" - } diff --git a/kubernetes/loculus/templates/loculus-website.yaml b/kubernetes/loculus/templates/loculus-website.yaml deleted file mode 100644 index a7e692aabb..0000000000 --- a/kubernetes/loculus/templates/loculus-website.yaml +++ /dev/null @@ -1,53 +0,0 @@ -{{- $dockerTag := include "loculus.dockerTag" .Values }} -{{- if not .Values.disableWebsite }} ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: loculus-website - annotations: - argocd.argoproj.io/sync-options: Replace=true -spec: - replicas: {{$.Values.replicas.website}} - selector: - matchLabels: - app: loculus - component: website - template: - metadata: - annotations: - timestamp: {{ now | quote }} - labels: - app: loculus - component: website - spec: - {{- include "possiblePriorityClassName" . | nindent 6 }} - initContainers: -{{- include "loculus.configProcessor" (dict "name" "loculus-website-config" "dockerTag" $dockerTag "imagePullPolicy" $.Values.imagePullPolicy) | nindent 8 }} - containers: - - name: website - image: "{{ $.Values.images.website.repository }}:{{ $.Values.images.website.tag | default $dockerTag }}" - imagePullPolicy: "{{ $.Values.images.website.pullPolicy | default $.Values.imagePullPolicy }}" - {{- include "loculus.resources" (list "website" .Values) | nindent 10 }} - ports: - - containerPort: 3000 - volumeMounts: - - name: loculus-website-config-processed - mountPath: /config - livenessProbe: - httpGet: - path: / - port: 3000 - initialDelaySeconds: 30 - periodSeconds: 10 - readinessProbe: - httpGet: - path: / - port: 3000 - initialDelaySeconds: 5 - periodSeconds: 5 - imagePullSecrets: - - name: custom-website-sealed-secret - volumes: -{{ include "loculus.configVolume" (dict "name" "loculus-website-config") | nindent 8 }} -{{- end }} \ No newline at end of file diff --git a/kubernetes/loculus/templates/minio-deployment.yaml b/kubernetes/loculus/templates/minio-deployment.yaml deleted file mode 100644 index e18364dd85..0000000000 --- a/kubernetes/loculus/templates/minio-deployment.yaml +++ /dev/null @@ -1,88 +0,0 @@ -{{- $dockerTag := include "loculus.dockerTag" .Values }} -{{- if and .Values.s3.enabled .Values.runDevelopmentS3 }} -apiVersion: v1 -kind: ConfigMap -metadata: - name: minio-policies -data: - policy.json: | - { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": "s3:GetObject", - "Resource": "arn:aws:s3:::{{ .Values.s3.bucket.bucket }}/*", - "Principal": "*", - "Condition": { - "StringEquals": { - "s3:ExistingObjectTag/public": "true" - } - } - } - ] - } ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: minio - annotations: - argocd.argoproj.io/sync-options: Replace=true -spec: - replicas: 1 - selector: - matchLabels: - app: loculus - component: minio - template: - metadata: - labels: - app: loculus - component: minio - spec: - volumes: - - name: policy-volume - configMap: - name: minio-policies - containers: - - name: minio - image: minio/minio:latest - {{- include "loculus.resources" (list "minio" $.Values) | nindent 10 }} - args: ["server", "/data"] - ports: - - containerPort: 9000 - env: - - name: MINIO_ROOT_USER - valueFrom: - secretKeyRef: - name: s3-bucket - key: accessKey - - name: MINIO_ROOT_PASSWORD - valueFrom: - secretKeyRef: - name: s3-bucket - key: secretKey - {{ if not .Values.developmentDatabasePersistence }} - - name: LOCULUS_VERSION - value: {{ $dockerTag }} - {{- end }} - lifecycle: - postStart: - exec: - command: - - /bin/sh - - -c - - | - ( - sleep 10 - mc alias set local http://localhost:9000 "$MINIO_ROOT_USER" "$MINIO_ROOT_PASSWORD" - mc mb -p local/{{ .Values.s3.bucket.bucket }} - echo "Bucket {{ .Values.s3.bucket.bucket }} ensured." - mc anonymous set-json /policy/policy.json local/{{ .Values.s3.bucket.bucket }} - ) & - volumeMounts: - - name: policy-volume - mountPath: /policy - restartPolicy: Always -{{- end }} diff --git a/kubernetes/loculus/templates/minio-service.yaml b/kubernetes/loculus/templates/minio-service.yaml deleted file mode 100644 index 0a68990191..0000000000 --- a/kubernetes/loculus/templates/minio-service.yaml +++ /dev/null @@ -1,19 +0,0 @@ -{{- if and .Values.s3.enabled .Values.runDevelopmentS3 }} -apiVersion: v1 -kind: Service -metadata: - name: loculus-minio-service -spec: - {{- template "loculus.serviceType" . }} - selector: - app: loculus - component: minio - ports: - - port: 8084 - targetPort: 9000 - {{- if ne $.Values.environment "server" }} - nodePort: 30084 - {{- end }} - protocol: TCP - name: http -{{- end }} diff --git a/kubernetes/loculus/templates/secrets.yaml b/kubernetes/loculus/templates/secrets.yaml deleted file mode 100644 index 5d1d19acaa..0000000000 --- a/kubernetes/loculus/templates/secrets.yaml +++ /dev/null @@ -1,50 +0,0 @@ -{{- range $name, $secret := .Values.secrets }} ---- -{{- if eq $secret.type "sealedsecret" }} -apiVersion: bitnami.com/v1alpha1 -kind: SealedSecret - -metadata: - name: {{ $name }} - annotations: - sealedsecrets.bitnami.com/cluster-wide: {{ $secret.clusterWide | quote | default "false" }} -spec: - encryptedData: - {{- range $key, $value := .encryptedData }} - {{ $key }}: {{ $value }} - {{- end }} -{{ if $secret.rawType }} - template: - type: {{ $secret.rawType }} -{{ end }} - -{{- else if eq $secret.type "autogen" }} -apiVersion: "secretgenerator.mittwald.de/v1alpha1" -kind: "StringSecret" -metadata: - name: {{ $name }} -spec: - fields: - {{- range $key, $value := $secret.data }} - - fieldName: {{ $key }} - encoding: "hex" - length: "18" - {{- end }} -{{- else if eq $secret.type "rawhtpasswd" }} -apiVersion: v1 -kind: Secret -metadata: - name: {{ $name }} -data: - users: {{ htpasswd $secret.data.username $secret.data.password | b64enc }} -{{- else }} -apiVersion: v1 -kind: Secret -metadata: - name: {{ $name }} -data: - {{- range $key, $value := $secret.data }} - {{ $key }}: {{ $value | b64enc }} - {{- end }} -{{- end }} -{{- end }} diff --git a/kubernetes/loculus/templates/silo-deployment.yaml b/kubernetes/loculus/templates/silo-deployment.yaml deleted file mode 100644 index 42acb971b5..0000000000 --- a/kubernetes/loculus/templates/silo-deployment.yaml +++ /dev/null @@ -1,118 +0,0 @@ -{{- $dockerTag := include "loculus.dockerTag" .Values }} -{{- $keycloakTokenUrl := "http://loculus-keycloak-service:8083/realms/loculus/protocol/openid-connect/token" }} - -{{- range $_, $item := (include "loculus.enabledOrganisms" . | fromJson).organisms }} -{{- $key := $item.key }} -{{- $organismContent := $item.contents }} -{{- $lineageSystem := $organismContent | include "loculus.lineageSystemForOrganism" }} ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: loculus-silo-{{ $key }} - annotations: - # Force replace when run a) with dev db and b) without persistence to ensure silo prepro fails loudly - argocd.argoproj.io/sync-options: Replace=true{{ if (and (not $.Values.developmentDatabasePersistence) $.Values.runDevelopmentMainDatabase) }},Force=true{{ end }} -spec: - replicas: 1 - selector: - matchLabels: - app: loculus - component: silo-{{ $key }} - template: - metadata: - annotations: - timestamp: {{ now | quote }} - labels: - app: loculus - component: silo-{{ $key }} - spec: - {{- include "possiblePriorityClassName" $ | nindent 6 }} - initContainers: - {{- include "loculus.configProcessor" (dict "name" "lapis-silo-database-config" "dockerTag" $dockerTag "imagePullPolicy" $.Values.imagePullPolicy) | nindent 8 }} - containers: - - name: silo - image: "{{ $.Values.images.loculusSilo.repository }}:{{ $.Values.images.loculusSilo.tag | default $dockerTag }}" - command: ["/usr/local/bin/silo"] - imagePullPolicy: {{ $.Values.imagePullPolicy }} - {{- include "loculus.resources" (list "silo" $.Values $key) | nindent 10 }} - env: - - name: SPDLOG_LEVEL - value: "debug" - - name: SILO_DATA_DIRECTORY - value: "/data/" - ports: - - containerPort: 8081 - args: - - "api" - - "--api-threads-for-http-connections" - - {{ default 16 (($.Values.silo).apiThreadsForHttpConnections) | quote }} - - "--api-max-queued-http-connections" - - "1000" - - "--query-materialization-cutoff" - - "3276" - volumeMounts: - - name: lapis-silo-shared-data - mountPath: /data - readinessProbe: - httpGet: - path: /info - port: 8081 - initialDelaySeconds: 30 - periodSeconds: 10 - failureThreshold: 3 - timeoutSeconds: 5 - livenessProbe: - httpGet: - path: /health - port: 8081 - initialDelaySeconds: 30 - periodSeconds: 10 - failureThreshold: 3 - timeoutSeconds: 5 - - name: silo-importer - image: "{{ $.Values.images.loculusSilo.repository }}:{{ $.Values.images.loculusSilo.tag | default $dockerTag }}" - imagePullPolicy: "{{ $.Values.images.loculusSilo.pullPolicy }}" - {{- include "loculus.resources" (list "silo-importer" $.Values) | nindent 10 }} - env: - - name: BACKEND_BASE_URL - {{- if $.Values.disableBackend }} - value: "http://host.k3d.internal:8079/{{ $key }}" - {{- else }} - value: "http://loculus-backend-service:8079/{{ $key }}" - {{- end }} - {{- if $lineageSystem }} - - name: LINEAGE_DEFINITIONS - value: {{ index $.Values.lineageSystemDefinitions $lineageSystem | toJson | quote }} - {{- end }} - - name: SILO_RUN_TIMEOUT_SECONDS - value: {{ $.Values.siloImport.siloTimeoutSeconds | quote }} - - name: HARD_REFRESH_INTERVAL - value: {{ $.Values.siloImport.hardRefreshIntervalSeconds | quote }} - - name: SILO_IMPORT_POLL_INTERVAL_SECONDS - value: {{ $.Values.siloImport.pollIntervalSeconds | quote }} - - name: PATH_TO_SILO_BINARY - value: "/usr/local/bin/silo" - - name: PREPROCESSING_CONFIG - value: "/app/preprocessing_config.yaml" - volumeMounts: - - name: lapis-silo-database-config-processed - mountPath: /preprocessing/input/reference_genomes.json - subPath: reference_genomes.json - - name: lapis-silo-database-config-processed - mountPath: /preprocessing/input/database_config.yaml - subPath: database_config.yaml - - name: lapis-silo-database-config-processed - mountPath: /app/preprocessing_config.yaml - subPath: preprocessing_config.yaml - - name: lapis-silo-shared-data - mountPath: /preprocessing/output - - name: lapis-silo-input-data-cache - mountPath: /preprocessing/input - volumes: - {{- include "loculus.configVolume" (dict "name" "lapis-silo-database-config" "configmap" (printf "lapis-silo-database-config-%s" $key)) | nindent 8 }} - - name: lapis-silo-shared-data - emptyDir: {} - - name: lapis-silo-input-data-cache - emptyDir: {} -{{- end }} diff --git a/kubernetes/loculus/templates/silo-service.yaml b/kubernetes/loculus/templates/silo-service.yaml deleted file mode 100644 index 0bfd0d4cc6..0000000000 --- a/kubernetes/loculus/templates/silo-service.yaml +++ /dev/null @@ -1,18 +0,0 @@ -{{- range $_, $item := (include "loculus.enabledOrganisms" . | fromJson).organisms }} -{{- $key := $item.key }} ---- -apiVersion: v1 -kind: Service -metadata: - name: loculus-silo-service-{{ $key }} -spec: - type: ClusterIP - selector: - app: loculus - component: silo-{{ $key }} - ports: - - port: 8081 - targetPort: 8081 - protocol: TCP - name: http -{{- end }} \ No newline at end of file diff --git a/kubernetes/loculus/templates/website-service.yaml b/kubernetes/loculus/templates/website-service.yaml deleted file mode 100644 index 6034501e41..0000000000 --- a/kubernetes/loculus/templates/website-service.yaml +++ /dev/null @@ -1,19 +0,0 @@ -{{- if not .Values.disableWebsite }} -apiVersion: v1 -kind: Service -metadata: - name: loculus-website-service -spec: - {{- template "loculus.serviceType" .}} - selector: - app: loculus - component: website - ports: - - port: 3000 - targetPort: 3000 - {{- if ne $.Values.environment "server" }} - nodePort: 30081 - {{- end }} - protocol: TCP - name: http -{{- end }} From 4f957e8225a87411dba6d4c569b57d43b8e9b95a Mon Sep 17 00:00:00 2001 From: Theo Date: Tue, 17 Feb 2026 17:30:59 +0000 Subject: [PATCH 5/6] fix(deployment): make namespace configurable instead of hardcoding 'default' Replace hardcoded `const ns = 'default'` in ingress.ts and lapis.ts with the actual deployment namespace. Add --namespace/-n CLI flag to main.ts and thread it through LoculusChart -> values.releaseNamespace. This matches Helm's $.Release.Namespace behavior so deployments to non-default namespaces generate correct Traefik middleware references. Co-Authored-By: Claude Opus 4.6 --- kubernetes/cdk8s/src/chart.ts | 5 +++-- kubernetes/cdk8s/src/constructs/ingress.ts | 2 +- kubernetes/cdk8s/src/constructs/lapis.ts | 2 +- kubernetes/cdk8s/src/main.ts | 14 +++++++++++--- kubernetes/cdk8s/src/values.ts | 1 + 5 files changed, 17 insertions(+), 7 deletions(-) diff --git a/kubernetes/cdk8s/src/chart.ts b/kubernetes/cdk8s/src/chart.ts index f631498878..ee70dacd0e 100644 --- a/kubernetes/cdk8s/src/chart.ts +++ b/kubernetes/cdk8s/src/chart.ts @@ -18,8 +18,9 @@ import { Docs } from './constructs/docs'; import { MainIngress } from './constructs/ingress'; export class LoculusChart extends Chart { - constructor(scope: Construct, id: string, values: LoculusValues) { - super(scope, id); + constructor(scope: Construct, id: string, values: LoculusValues, namespace: string) { + super(scope, id, { namespace }); + values.releaseNamespace = namespace; // Secrets new Secrets(this, 'secrets', values); diff --git a/kubernetes/cdk8s/src/constructs/ingress.ts b/kubernetes/cdk8s/src/constructs/ingress.ts index 1081b77b81..3f84d4b915 100644 --- a/kubernetes/cdk8s/src/constructs/ingress.ts +++ b/kubernetes/cdk8s/src/constructs/ingress.ts @@ -55,7 +55,7 @@ export class MainIngress extends Construct { // Ingress resources are only created in server mode if (values.environment !== 'server') return; - const ns = 'default'; + const ns = values.releaseNamespace; const backendHost = `backend${values.subdomainSeparator || '.'}${values.host}`; const keycloakHost = `authentication${values.subdomainSeparator || '.'}${values.host}`; const minioHost = `s3${values.subdomainSeparator || '.'}${values.host}`; diff --git a/kubernetes/cdk8s/src/constructs/lapis.ts b/kubernetes/cdk8s/src/constructs/lapis.ts index 74531344d3..3bcaa8d482 100644 --- a/kubernetes/cdk8s/src/constructs/lapis.ts +++ b/kubernetes/cdk8s/src/constructs/lapis.ts @@ -112,7 +112,7 @@ export class LapisIngress extends Construct { const organisms = getEnabledOrganisms(values); const organismKeys = organisms.map((o) => o.key); - const ns = 'default'; // Release namespace equivalent + const ns = values.releaseNamespace; // CORS middleware new ApiObject(this, 'cors', { diff --git a/kubernetes/cdk8s/src/main.ts b/kubernetes/cdk8s/src/main.ts index 07326fb43d..da598ed390 100644 --- a/kubernetes/cdk8s/src/main.ts +++ b/kubernetes/cdk8s/src/main.ts @@ -3,10 +3,16 @@ import { LoculusChart } from './chart'; import { loadValues } from './values'; // Parse CLI args: --values file1.yaml --values file2.yaml --set key=value -function parseArgs(argv: string[]): { valuesFiles: string[]; sets: Record; baseDir?: string } { +function parseArgs(argv: string[]): { + valuesFiles: string[]; + sets: Record; + baseDir?: string; + namespace: string; +} { const valuesFiles: string[] = []; const sets: Record = {}; let baseDir: string | undefined; + let namespace = 'default'; for (let i = 0; i < argv.length; i++) { if ((argv[i] === '--values' || argv[i] === '-f') && i + 1 < argv.length) { @@ -28,15 +34,17 @@ function parseArgs(argv: string[]): { valuesFiles: string[]; sets: Record Date: Tue, 17 Feb 2026 17:37:47 +0000 Subject: [PATCH 6/6] refactor(deployment): deduplicate commonMetadataFields in CDK8s silo construct Export commonMetadataFields() from config-generation.ts and import it in silo.ts instead of duplicating 158 lines. No circular dependency exists despite the previous comment. Reduces silo.ts from 360 to 200 lines. Co-Authored-By: Claude Opus 4.6 --- kubernetes/cdk8s/src/config-generation.ts | 2 +- kubernetes/cdk8s/src/constructs/silo.ts | 162 +--------------------- 2 files changed, 2 insertions(+), 162 deletions(-) diff --git a/kubernetes/cdk8s/src/config-generation.ts b/kubernetes/cdk8s/src/config-generation.ts index 578ade644f..6cf78161e0 100644 --- a/kubernetes/cdk8s/src/config-generation.ts +++ b/kubernetes/cdk8s/src/config-generation.ts @@ -24,7 +24,7 @@ import { } from './urls'; /** Common metadata fields required for all organisms */ -function commonMetadataFields(values: LoculusValues): MetadataField[] { +export function commonMetadataFields(values: LoculusValues): MetadataField[] { const fields: MetadataField[] = [ { name: 'accessionVersion', diff --git a/kubernetes/cdk8s/src/constructs/silo.ts b/kubernetes/cdk8s/src/constructs/silo.ts index 47d797d2d4..718cfe9dab 100644 --- a/kubernetes/cdk8s/src/constructs/silo.ts +++ b/kubernetes/cdk8s/src/constructs/silo.ts @@ -5,167 +5,7 @@ import { dockerTag } from '../docker-tag'; import { configProcessorContainer, configVolumes } from '../config-processor'; import { getResources, priorityClassName } from '../resources'; import { patchMetadataSchema, lineageSystemForOrganism, mergeReferenceGenomes } from '../organisms'; -import { generateSiloDatabaseConfig } from '../config-generation'; - -function commonMetadataFields(values: LoculusValues): MetadataField[] { - // Reuse the same common metadata generation as config-generation.ts - // Import won't work circularly, so we duplicate the minimal set needed - const fields: MetadataField[] = [ - { - name: 'accessionVersion', - type: 'string', - notSearchable: true, - hideOnSequenceDetailsPage: true, - includeInDownloadsByDefault: true, - }, - { name: 'accession', type: 'string', notSearchable: true, hideOnSequenceDetailsPage: true }, - { name: 'version', type: 'int', hideOnSequenceDetailsPage: true }, - { - name: 'submissionId', - displayName: 'Submission ID', - type: 'string', - header: 'Submission details', - orderOnDetailsPage: 5000, - enableSubstringSearch: true, - includeInDownloadsByDefault: true, - }, - { - name: 'isRevocation', - displayName: 'Is revocation', - type: 'boolean', - autocomplete: true, - hideOnSequenceDetailsPage: true, - }, - { - name: 'submitter', - type: 'string', - generateIndex: true, - autocomplete: true, - hideOnSequenceDetailsPage: true, - header: 'Submission details', - orderOnDetailsPage: 5010, - }, - { - name: 'groupName', - type: 'string', - generateIndex: true, - autocomplete: true, - header: 'Submission details', - displayName: 'Submitting group', - includeInDownloadsByDefault: true, - orderOnDetailsPage: 5020, - customDisplay: { type: 'submittingGroup', displayGroup: 'group' }, - }, - { - name: 'groupId', - type: 'int', - autocomplete: true, - header: 'Submission details', - displayName: 'Submitting group (numeric ID)', - orderOnDetailsPage: 5030, - customDisplay: { type: 'submittingGroup', displayGroup: 'group' }, - }, - { - name: 'submittedAtTimestamp', - type: 'timestamp', - displayName: 'Date submitted', - header: 'Submission details', - orderOnDetailsPage: 5040, - }, - { - name: 'submittedDate', - type: 'string', - hideOnSequenceDetailsPage: true, - generateIndex: true, - autocomplete: true, - displayName: 'Date submitted (exact)', - orderOnDetailsPage: 5050, - }, - { - name: 'releasedAtTimestamp', - type: 'timestamp', - displayName: 'Date released', - header: 'Submission details', - columnWidth: 100, - orderOnDetailsPage: 5060, - }, - { - name: 'releasedDate', - type: 'string', - hideOnSequenceDetailsPage: true, - generateIndex: true, - autocomplete: true, - displayName: 'Date released (exact)', - columnWidth: 100, - orderOnDetailsPage: 5070, - }, - ]; - - if (values.dataUseTerms.enabled) { - fields.push( - { - name: 'dataUseTerms', - type: 'string', - generateIndex: true, - autocomplete: true, - displayName: 'Data use terms', - initiallyVisible: true, - includeInDownloadsByDefault: true, - customDisplay: { type: 'dataUseTerms' }, - header: 'Data use terms', - orderOnDetailsPage: 610, - }, - { - name: 'dataUseTermsRestrictedUntil', - type: 'date', - displayName: 'Data use terms restricted until', - hideOnSequenceDetailsPage: true, - header: 'Data use terms', - orderOnDetailsPage: 620, - }, - { - name: 'dataBecameOpenAt', - type: 'date', - displayName: 'Date data became open', - hideOnSequenceDetailsPage: true, - header: 'Data use terms', - orderOnDetailsPage: 625, - }, - ); - if (values.dataUseTerms.urls) { - fields.push({ - name: 'dataUseTermsUrl', - displayName: 'Data use terms URL', - type: 'string', - notSearchable: true, - header: 'Data use terms', - includeInDownloadsByDefault: true, - customDisplay: { type: 'link', url: '__value__' }, - orderOnDetailsPage: 630, - }); - } - } - - fields.push( - { - name: 'versionStatus', - displayName: 'Version status', - type: 'string', - autocomplete: true, - hideOnSequenceDetailsPage: true, - }, - { - name: 'versionComment', - type: 'string', - displayName: 'Version comment', - header: 'Submission details', - orderOnDetailsPage: 5000, - }, - { name: 'pipelineVersion', type: 'int', notSearchable: true, hideOnSequenceDetailsPage: true }, - ); - - return fields; -} +import { generateSiloDatabaseConfig, commonMetadataFields } from '../config-generation'; export class Silo extends Construct { constructor(scope: Construct, id: string, values: LoculusValues, organism: EnabledOrganism) {