diff --git a/.github/workflows/branch-sync.yml b/.github/workflows/branch-sync.yml new file mode 100644 index 000000000..968307c20 --- /dev/null +++ b/.github/workflows/branch-sync.yml @@ -0,0 +1,102 @@ +name: Merge 6.x into 7.x + +on: + workflow_dispatch: {} + schedule: + - cron: '0 23 * * 0' # Run every Sunday at 11 PM UTC (Monday 9 AM AEST) + +permissions: + contents: write + pull-requests: write + issues: write + +jobs: + create-pull-request: + runs-on: ubuntu-latest + container: ghcr.io/dpc-sdp/bay/ci-builder:6.x + steps: + - name: Checkout repository + uses: actions/checkout@v5 + with: + ref: 6.x + + - name: Set ownership of the workspace + run: chown -R $(id -u):$(id -g) $PWD + + - name: Configure git + run: | + git config user.name "github-actions[bot]" + git config user.email "github-actions[bot]@users.noreply.github.com" + + - name: Create PR + id: create-pr + run: | + set +e + output=$(gh pr create --base 7.x --title "Automated PR - merge 6.x into 7.x" --body "This pull request syncs relevant changes from the 6.x branch into the 7.x branch. This PR was automatically generated by the CI workflow." --label "sync" --label "automated" 2>&1) + COMMAND_STATUS=$? + + diff=$(echo $output | grep -q "No commits" && echo "false" || echo "true") + + echo "stdout<> $GITHUB_STEP_SUMMARY + COMMAND_STATUS=0 + fi + + echo "diff=$diff" >> "$GITHUB_OUTPUT" + + exit $COMMAND_STATUS + shell: bash + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + - name: If successful set PR number to env var + if: steps.create-pr.conclusion == 'success' && steps.create-pr.outputs.diff == 'true' + run: | + output="${{ steps.create-pr.outputs.stdout }}" + pr_url=$(echo $output | grep -o "^https.*[0-9]") + pr=(echo $pr_url | cut -d "/" -f 7) + + echo "PR_URL=$pr_url" >> GITHUB_ENV + echo "PR_NUMBER=$pr" >> $GITHUB_ENV + + - name: Successful, check for conflicts + if: env.PR_NUMBER + run: | + conflicts=$(gh pr view ${{ env.PR_NUMBER }} --json mergeStateStatus --jq 'if .mergeStateStatus == "DIRTY" then true else false end') + echo "CONFLICTS=$conflicts" >> $GITHUB_ENV + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + - name: Send Slack notification if pr created but conflicts exist + id: notify_slack_conflicts + uses: slackapi/slack-github-action@v2.1.1 + if: always() && env.CONFLICTS + env: + LINK: ${{ github.event.repository.html_url }}/actions/runs/${{ github.run_id }} + with: + webhook: ${{ secrets.SLACK_WEBHOOK_URL }} + webhook-type: incoming-webhook + payload: | + text: "GitHub Action - ${{ github.workflow }} requires manual intervention. \n${{ env.LINK }}" + blocks: + - type: "section" + text: + type: "mrkdwn" + text: "${{ github.workflow }} needs conflicts resolved.\n${{ env.PR_URL }}" + + - name: Send Slack notification if PR creation failed for a reason other than "no commits" + id: notify_slack_failed + uses: slackapi/slack-github-action@v2.1.1 + if: always() && (steps.create-pr.conclusion == 'failure' && steps.create-pr.outputs.diff == 'true') + env: + LINK: ${{ github.event.repository.html_url }}/actions/runs/${{ github.run_id }} + with: + webhook: ${{ secrets.SLACK_WEBHOOK_URL }} + webhook-type: incoming-webhook + payload: | + channel: ${{ secrets.SLACK_CHANNEL_ID }} + text: "GitHub Action ${{ github.workflow }} failed. \n${{ env.LINK }}" \ No newline at end of file diff --git a/.github/workflows/build-deploy.yml b/.github/workflows/build-deploy.yml index 7dfe71133..109e51b0c 100644 --- a/.github/workflows/build-deploy.yml +++ b/.github/workflows/build-deploy.yml @@ -2,7 +2,7 @@ name: build-deploy-bay-images run-name: Build and deploy Bay images on: pull_request: - types: + types: - closed - opened - synchronize @@ -17,11 +17,11 @@ env: REGISTRY: ghcr.io jobs: buildx: - if: |- + if: | github.event.pull_request.merged == true || contains(fromJson('["schedule", "workflow_dispatch"]'), github.event_name) || - github.event_name == 'pull_request' && startsWith(github.head_ref,'build/') || - ( github.event.issue.pull_request && contains(github.event.comment.body, '/build') ) + (github.event_name == 'pull_request' && startsWith(github.head_ref,'build/')) || + (github.event.issue.pull_request && contains(github.event.comment.body, '/build')) runs-on: ubuntu-latest strategy: matrix: @@ -33,19 +33,19 @@ jobs: - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 - - uses: actions/checkout@v3 - if: |- + - uses: actions/checkout@v4 + if: | github.event.pull_request.merged == true || contains(fromJson('["schedule", "workflow_dispatch"]'), github.event_name) || - github.event_name == 'pull_request' && startsWith(github.head_ref,'build/') + (github.event_name == 'pull_request' && startsWith(github.head_ref,'build/')) - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 if: ( github.event.issue.pull_request && contains(github.event.comment.body, '/build') ) with: ref: refs/pull/${{ github.event.issue.number }}/head - name: Login to registry ${{ env.REGISTRY }} - uses: docker/login-action@v2.2.0 + uses: docker/login-action@v3 with: registry: ${{ env.REGISTRY }} username: ${{ secrets.REGISTRY_USER }} @@ -53,15 +53,25 @@ jobs: - name: Extract Docker metadata id: meta - uses: docker/metadata-action@v4.6.0 + uses: docker/metadata-action@v5 with: images: | ${{ env.REGISTRY }}/${{ github.repository }}/${{ matrix.images }} tags: | + # PR images (not merged): pr- type=ref,event=pr,enable=${{ github.event.pull_request.merged == false }} + # Tag with the PR base branch name on merge type=raw,value=${{ github.event.pull_request.base.ref }},enable=${{ github.event.pull_request.merged == true }} + # Manual/scheduled runs tag by branch ref (for whichever release branch the run targets) type=ref,event=branch,enable=${{ contains(fromJson('["schedule", "workflow_dispatch"]'), github.event_name) }} + # Comment-triggered builds: pr- type=raw,event=default,value=pr-${{ github.event.issue.number }},enable=${{ github.event.issue.pull_request != null }} + # Supported release branches + type=raw,value=5.x,enable=${{ github.ref == 'refs/heads/5.x' || github.event.pull_request.base.ref == '5.x' }} + type=raw,value=6.x,enable=${{ github.ref == 'refs/heads/6.x' || github.event.pull_request.base.ref == '6.x' }} + type=raw,value=7.x,enable=${{ github.ref == 'refs/heads/7.x' || github.event.pull_request.base.ref == '7.x' }} + # Always add an immutable sha tag + type=raw,value=sha-${{ github.sha }} labels: | maintainer=Digital Victoria repository=${{ github.repositoryUrl }} @@ -88,7 +98,7 @@ jobs: retention-days: 1 - name: Build and push the images - uses: docker/bake-action@v3.1.0 + uses: docker/bake-action@v5 with: push: true files: | diff --git a/.github/workflows/vulnerability-scan-schedule-7x.yml b/.github/workflows/vulnerability-scan-schedule-7x.yml new file mode 100644 index 000000000..bded75fb4 --- /dev/null +++ b/.github/workflows/vulnerability-scan-schedule-7x.yml @@ -0,0 +1,27 @@ +name: vulnerability-scan-schedule-7.x +run-name: Scheduled CVE vulnerability scan of 7.x published images. + +env: + REGISTRY: ghcr.io + +on: + schedule: + - cron: '12 22 * * 3' + workflow_dispatch: + inputs: + summary: + description: 'Summary of the scheduled scan.' + required: false + default: 'Trivy CVE scan of 7.x published images.' + tag: + description: 'Tag to scan.' + required: false + default: '7.x' + +jobs: + vulnerability-scan-schedule: + name: Scan for vulnerabilities on 7.x images + uses: "dpc-sdp/bay/.github/workflows/vulnerability-scan.yml@7.x" + with: + tag: "7.x" + summary: "Trivy CVE scan of 7.x published images." diff --git a/.github/workflows/vulnerability-scan.yml b/.github/workflows/vulnerability-scan.yml index 863b08b59..bd6eb6c11 100644 --- a/.github/workflows/vulnerability-scan.yml +++ b/.github/workflows/vulnerability-scan.yml @@ -39,6 +39,7 @@ jobs: exclude: - images: mailpit - images: php-fpm-exporter + - images: aws-es-proxy - if: inputs.tag != '5.x' uses: druzsan/setup-matrix@v2 with: diff --git a/README.md b/README.md index a370eb2e1..5e15826ed 100644 --- a/README.md +++ b/README.md @@ -26,22 +26,6 @@ Learn more from https://docs.lagoon.sh/ ## Bay Features -### Lock-down Ingress with Pre-Shared Key - -Using the nginx image, you can lock down access to your application with using a pre-shared key added at your CDN. - -Set these environment variables in your nginx deployment: - -- `BAY_INGRESS_HEADER` defines the header which has the pre-shared key. -- `BAY_INGRESS_PSK` is the token / PSK value. -- `BAY_INGRESS_ENABLED` is a toggle for this feature, must be set to `"true"`. - -In your CDN configuration, set the header defined in `BAY_INGRESS_HEADER` with the token defined in `BAY_INGRESS_PSK`. - -- [Cloudfront](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/add-origin-custom-headers.html) - -Once deployed, if the header is missing in the request nginx will return a `405 Not Allowed` HTTP response. - ### Multiple architecture support Bay images are available in both amd64 and arm64 architectures. diff --git a/gh-actions-bake.hcl b/gh-actions-bake.hcl index 291bd0cd4..3831f2d6a 100755 --- a/gh-actions-bake.hcl +++ b/gh-actions-bake.hcl @@ -11,9 +11,16 @@ target "ci-builder" { platforms = ["linux/amd64", "linux/arm64"] } +target "opensearch" { + inherits = ["docker-metadata-action"] + context = "${CONTEXT}/opensearch" + dockerfile = "Dockerfile" + + platforms = ["linux/amd64", "linux/arm64"] +} target "elasticsearch" { inherits = ["docker-metadata-action"] - context = "${CONTEXT}/elasticsearch" + context = "${CONTEXT}/opensearch" dockerfile = "Dockerfile" platforms = ["linux/amd64", "linux/arm64"] @@ -93,6 +100,17 @@ target "php-fpm-exporter" { "org.opencontainers.image.source" = "https://github.com/dpc-sdp/bay/blob/6.x/images/bay-php-exporter/Dockerfile" } } + +target "aws-es-proxy" { + inherits = ["docker-metadata-action"] + context = "${CONTEXT}/aws-es-proxy" + dockerfile = "Dockerfile" + + platforms = ["linux/amd64", "linux/arm64"] + labels = { + "org.opencontainers.image.source" = "https://github.com/dpc-sdp/bay/blob/6.x/images/aws-es-proxy/Dockerfile" + } +} target "ripple-static" { inherits = ["docker-metadata-action"] context = "${CONTEXT}/ripple-static" diff --git a/images/aws-es-proxy/Dockerfile b/images/aws-es-proxy/Dockerfile new file mode 100644 index 000000000..3bf8c4c36 --- /dev/null +++ b/images/aws-es-proxy/Dockerfile @@ -0,0 +1,26 @@ +FROM golang:alpine AS build + +RUN apk add --no-cache git +RUN git clone https://github.com/abutaha/aws-es-proxy.git /go/src/github.com/abutaha/aws-es-proxy +WORKDIR /go/src/github.com/abutaha/aws-es-proxy + +RUN go get -u && go mod tidy +RUN CGO_ENABLED=0 GOOS=linux go build -o aws-es-proxy + +FROM alpine:latest + +RUN apk --no-cache add ca-certificates +WORKDIR /home/ +COPY --from=build /go/src/github.com/abutaha/aws-es-proxy/aws-es-proxy /usr/local/bin/ +COPY entrypoint.sh /entrypoint.sh +RUN apk add --no-cache bash aws-cli + +ENV BAY_OPENSEARCH_ENDPOINT= +ENV BAY_OPENSEARCH_ROLE= +ENV BAY_OPENSEARCH_PROXY_TIMEOUT=60 +ENV BAY_OPENSEARCH_PROXY_VERBOSE= +ENV BAY_OPENSEARCH_PROXY_DEBUG= +ENV BAY_OPENSEARCH_PROXY_PORT=9200 +EXPOSE 9200 + +ENTRYPOINT ["/entrypoint.sh"] diff --git a/images/aws-es-proxy/README.md b/images/aws-es-proxy/README.md new file mode 100644 index 000000000..f25f0b2e1 --- /dev/null +++ b/images/aws-es-proxy/README.md @@ -0,0 +1,63 @@ +# Container Image - aws-es-proxy + +This container provides a secure proxy for requests to an AWS OpenSearch endpoint using the [aws-es-proxy](https://github.com/abutaha/aws-es-proxy) +tool. It is designed for seamless integration and automated IAM authentication, with robust defaults and runtime +configuration via environment variables. + +## Features + +- Secure proxying to AWS OpenSearch services. +- Automatic validation of critical environment variables and AWS credentials. +- Configurable timeouts, port, and proxy flags. +- Flexible runtime configuration for debugging and verbosity. + +## Usage + +This image is typically intended for use as a proxy in your infrastructure. +You can use it in your Docker Compose stack with the following snippet: + +```yaml +services: + aws-es-proxy: + image: ghcr.io/dpc-sdp/bay/aws-es-proxy:6.x + environment: + BAY_OPENSEARCH_ENDPOINT=https://your-opensearch-endpoint.amazonaws.com + BAY_OPENSEARCH_ROLE=arn:aws:iam::123456789012:role/your-role + ports: + - "9200:9200" +``` + + +## Environment Variables + +| Name | Default Value | Description | +|---------------------------|---------------|-----------------------------------------------------------------| +| `BAY_OPENSEARCH_ENDPOINT` | _(required)_ | The AWS OpenSearch domain endpoint to proxy requests to. | +| `BAY_OPENSEARCH_ROLE` | _(required)_ | The AWS IAM role to assume for accessing the OpenSearch domain. | +| `BAY_OPENSEARCH_PROXY_PORT` | `9200` | The port that the proxy listens on, inside the container. | +| `BAY_OPENSEARCH_PROXY_TIMEOUT` | `60` | Timeout (in seconds) for incoming connections. | +| `BAY_OPENSEARCH_PROXY_VERBOSE` | `false` | enables verbose logging | +| `BAY_OPENSEARCH_PROXY_DEBUG` | `false` | enables debug logging | + +### Example: Enabling Debug and Verbose Logging + +``` +BAY_OPENSEARCH_PROXY_FLAGS=-debug -verbose +``` + +## Ports + +- **9200** (default, can be customized with `BAY_OPENSEARCH_PROXY_PORT`) – Proxy HTTP port + +## Entrypoint + +The container runs an entrypoint script that: + +1. Verifies mandatory environment variables and AWS credentials. +2. Launches `aws-es-proxy` with your configuration. + +## AWS Credentials + +The container expects valid AWS credentials to be supplied via standard mechanisms (environment variables, mounted credentials files, or IAM roles if running in AWS ECS/EKS environments). + +For more advanced configuration, refer to the [aws-es-proxy documentation](https://github.com/abutaha/aws-es-proxy). \ No newline at end of file diff --git a/images/aws-es-proxy/entrypoint.sh b/images/aws-es-proxy/entrypoint.sh new file mode 100755 index 000000000..c17d73a76 --- /dev/null +++ b/images/aws-es-proxy/entrypoint.sh @@ -0,0 +1,57 @@ +#!/usr/bin/env bash +# +# This script acts as a wrapper to securely proxy requests to an OpenSearch endpoint +# using the aws-es-proxy tool. It performs the following steps: +# 1. Validates that required environment variables (BAY_OPENSEARCH_ENDPOINT and BAY_OPENSEARCH_ROLE) +# are set and not empty. +# 2. Verifies that valid AWS credentials are present. If credentials are invalid or missing, the +# script exits with an error. +# 3. Starts the aws-es-proxy service. +# +# The following environment variables can be used to configure the behavior of this script: +# BAY_OPENSEARCH_ENDPOINT: The AWS opensearch domain endpoint. +# BAY_OPENSEARCH_ROLE: The AWS IAM role that should be assumed to access the opensearch +# domain. +# BAY_OPENSEARCH_PROXY_PORT: Port that the aws-es-proxy should bind to. +# BAY_OPENSEARCH_PROXY_TIMEOUT: Timeout for incoming connections. +# BAY_OPENSEARCH_PROXY_VERBOSE: "true" to enable proxy verbose logs +# BAY_OPENSEARCH_PROXY_DEBUG: "true" to enable proxy debug logs + +set -euo pipefail + +# Check if BAY_OPENSEARCH_ENDPOINT is unset or empty +if [ -z "${BAY_OPENSEARCH_ENDPOINT:-}" ]; then + echo "Error: BAY_OPENSEARCH_ENDPOINT is not set or is empty" >&2 + exit 1 +fi + +# Check if BAY_OPENSEARCH_ROLE is unset or empty +if [ -z "${BAY_OPENSEARCH_ROLE:-}" ]; then + echo "Error: BAY_OPENSEARCH_ROLE= is not set or is empty" >&2 + exit 1 +fi + +# Configure debug and verbose flags +AWS_ES_PROXY_DEBUG_FLAG="" +AWS_ES_PROXY_VERBOSE_FLAG="" + +if [ "${BAY_OPENSEARCH_PROXY_DEBUG:-false}" = "true" ]; then + AWS_ES_PROXY_DEBUG_FLAG="-debug" +fi + +if [ "${BAY_OPENSEARCH_PROXY_VERBOSE:-false}" = "true" ]; then + AWS_ES_PROXY_VERBOSE_FLAG="-verbose" +fi + + +# Ensure AWS credentials exist and are valid +AWS_PAGER="" aws sts get-caller-identity || (echo "Error: AWS credentials invalid" && exit 1) + +# Rest of your script here +aws-es-proxy \ + ${AWS_ES_PROXY_DEBUG_FLAG} \ + ${AWS_ES_PROXY_VERBOSE_FLAG} \ + -listen "0.0.0.0:${BAY_OPENSEARCH_PROXY_PORT:-9200}" \ + -timeout "${BAY_OPENSEARCH_PROXY_TIMEOUT:-60}" \ + -assume "${BAY_OPENSEARCH_ROLE}" \ + -endpoint "${BAY_OPENSEARCH_ENDPOINT}" diff --git a/images/awx-ee/execution-environment.yml b/images/awx-ee/execution-environment.yml index 58f0c8ccd..b0f45a66f 100644 --- a/images/awx-ee/execution-environment.yml +++ b/images/awx-ee/execution-environment.yml @@ -23,17 +23,16 @@ additional_build_steps: - LABEL org.opencontainers.image.title="SDP AWX Execution Environment image." - LABEL org.opencontainers.image.description="Provides an AWX execution environment image optimised for use with SDP. Built with ansible-builder." - LABEL org.opencontainers.image.source="https://github.com/dpc-sdp/bay/blob/6.x/images/awx-ee/" - - ARG LAGOON_CLI_VERSION=v0.31.2 - - ARG NVM_INSTALL_VERSION=v0.39.7 - - ARG NODE_VERSION=v20.9.0 + - ARG LAGOON_CLI_VERSION=v0.32.0 + - ARG NVM_INSTALL_VERSION=v0.40.3 + - ARG NODE_VERSION=v20.19.3 - ARG NVM_DIR="/runner/.nvm" - ARG PHP_VERSION="8.3" - ARG COMPOSER_VERSION="2.7.7" - - ARG HUB_VERSION="2.14.2" - ARG GOJQ_VERSION="0.12.17" - - ARG HELM_VERSION="3.17.0" - - ARG YAMLFMT_VERSION="0.15.0" - - ARG KUBECTL_VERSION="1.32.0" + - ARG HELM_VERSION="3.18.3" + - ARG YAMLFMT_VERSION="0.17.2" + - ARG KUBECTL_VERSION="1.33.2" append_final: - | # Required dependencies. @@ -67,8 +66,6 @@ additional_build_steps: - RUN curl -L "https://github.com/uselagoon/lagoon-cli/releases/download/${LAGOON_CLI_VERSION}/lagoon-cli-${LAGOON_CLI_VERSION}-linux-amd64" -o /usr/local/bin/lagoon - RUN chmod +x /usr/local/bin/lagoon - - RUN curl -L "https://github.com/github/hub/releases/download/v${HUB_VERSION}/hub-linux-amd64-${HUB_VERSION}.tgz" -o /tmp/hub && tar -xvf /tmp/hub -C /tmp && mv /tmp/hub-linux-amd64-${HUB_VERSION}/bin/hub /usr/local/bin - - RUN chmod +x /usr/local/bin/hub - RUN lagoon config feature --enable-local-dir-check false --force - RUN curl -sS "https://getcomposer.org/download/${COMPOSER_VERSION}/composer.phar" --output composer.phar - RUN chmod +x composer.phar diff --git a/images/awx-ee/requirements.yml b/images/awx-ee/requirements.yml index 63aa9f315..1a87fa5c5 100644 --- a/images/awx-ee/requirements.yml +++ b/images/awx-ee/requirements.yml @@ -3,11 +3,12 @@ collections: - ansible.posix - ansible.utils - awx.awx - - community.general + - name: community.general + version: "11.1.2" - kubernetes.core - name: lagoon.api source: https://github.com/salsadigitalauorg/lagoon_ansible_collection.git - version: 2.1.0 + version: "2.2.4" type: git - name: section.api source: https://github.com/salsadigitalauorg/section_ansible_collection.git diff --git a/images/ci-builder/Dockerfile b/images/ci-builder/Dockerfile index e2bcda5f4..ee4cc3a6e 100644 --- a/images/ci-builder/Dockerfile +++ b/images/ci-builder/Dockerfile @@ -1,10 +1,9 @@ FROM hashicorp/terraform:latest AS terraform FROM ghcr.io/dpc-sdp/sumocli:v0.11.1 AS sumocli FROM php:8.3-cli-alpine -ARG AHOY_VERSION=2.2.0 +ARG AHOY_VERSION=2.4.0 ARG GOJQ_VERSION=0.12.17 -ARG HUB_VERSION=2.14.2 -ARG LAGOON_CLI_VERSION=0.31.2 +ARG LAGOON_CLI_VERSION=0.32.0 ARG SHIPSHAPE_VERSION=1.0.0-alpha.1.5.1 # Ensure temp files dont end up in image. @@ -26,6 +25,7 @@ RUN apk add --update --no-cache \ docker \ docker-compose \ git \ + github-cli \ libffi-dev \ musl-dev \ ncurses \ @@ -37,12 +37,6 @@ RUN apk add --update --no-cache \ python3 \ python3-dev -## Install GitHub CLI tool. -RUN curl -sL "https://github.com/mislav/hub/releases/download/v${HUB_VERSION}/hub-$(echo ${TARGETPLATFORM:-linux/amd64} | tr '/' '-')-${HUB_VERSION}.tgz" -o /tmp/hub.tgz && \ - tar -C /tmp -xzvf /tmp/hub.tgz && \ - mv /tmp/hub-$(echo ${TARGETPLATFORM:-linux/amd64} | tr '/' '-')-${HUB_VERSION}/bin/hub /usr/local/bin && \ - chmod 755 /usr/local/bin - ## Install required PHP extensions for Drupal and python packages. RUN apk add --no-cache \ py3-flake8 \ diff --git a/images/elasticsearch/Dockerfile b/images/elasticsearch/Dockerfile deleted file mode 100644 index b15b586e6..000000000 --- a/images/elasticsearch/Dockerfile +++ /dev/null @@ -1,6 +0,0 @@ -FROM elasticsearch:8.16.1 - -ENV ES_PATH=/usr/share/elasticsearch - -RUN $ES_PATH/bin/elasticsearch-plugin install analysis-kuromoji \ - && $ES_PATH/bin/elasticsearch-plugin install analysis-icu diff --git a/images/mailpit/Dockerfile b/images/mailpit/Dockerfile index 6feaf6c89..85da1b961 100644 --- a/images/mailpit/Dockerfile +++ b/images/mailpit/Dockerfile @@ -3,7 +3,7 @@ # FROM alpine:latest -ARG MAILPIT_VERSION=1.21.8 +ARG MAILPIT_VERSION=1.26.2 # Install ca-certificates, required for the "release message" feature: RUN apk --no-cache add \ diff --git a/images/nginx/Dockerfile b/images/nginx/Dockerfile index bd52888ee..eb457a73a 100644 --- a/images/nginx/Dockerfile +++ b/images/nginx/Dockerfile @@ -2,19 +2,11 @@ FROM uselagoon/nginx-drupal:latest ENV WEBROOT=docroot ENV TZ=Australia/Melbourne -ENV BAY_INGRESS_ENABLED=false -ENV BAY_INGRESS_HEADER="" -ENV BAY_INGRESS_PSK="" + EXPOSE 8080 EXPOSE 50000 -# Add ingress protection environment variable supprot to nginx. -RUN sed -i '/env\ LAGOON_ENVIRONMENT_TYPE\;/a env BAY_INGRESS_ENABLED\;' /etc/nginx/nginx.conf \ - && sed -i '/env\ LAGOON_ENVIRONMENT_TYPE\;/a env BAY_INGRESS_HEADER\;' /etc/nginx/nginx.conf \ - && sed -i '/env\ LAGOON_ENVIRONMENT_TYPE\;/a env BAY_INGRESS_PSK\;' /etc/nginx/nginx.conf - COPY helpers/ /etc/nginx/helpers/ -COPY prepend/ /etc/nginx/conf.d/drupal/ COPY content /etc/nginx/conf.d/drupal/content # Add server append directives. diff --git a/images/nginx/README.md b/images/nginx/README.md index f4c79188f..8fecbc156 100644 --- a/images/nginx/README.md +++ b/images/nginx/README.md @@ -3,7 +3,6 @@ Provides a nginx image optimised for the Bay container platform with the following features - Drupal compatible server block -- Ingress protection with pre-shared keys - Optimised health checks for section.io ## Usage @@ -27,11 +26,7 @@ services: ## Environment Variables -| Name | Default Value | Description | -|------|---------------|-------------| -| `BAY_INGRESS_ENABLED` | `false` | Global toggle for ingress protection. Set to "true" to enable. | -| `BAY_INGRESS_HEADER` | `` | Name of header with PSK. | -| `BAY_INGRESS_PSK` | `` | Pre-shared key value | +None. ## Ports diff --git a/images/nginx/prepend/100-ingress-protection.conf b/images/nginx/prepend/100-ingress-protection.conf deleted file mode 100644 index 1c89a1d4f..000000000 --- a/images/nginx/prepend/100-ingress-protection.conf +++ /dev/null @@ -1,27 +0,0 @@ -# Bay Ingress Protection -# -# Allows nginx to reject requests if the corresponding PSK is not given by -# the requesting client. - -access_by_lua_block { - local ingress_protection_enabled = os.getenv('BAY_INGRESS_ENABLED') - local ingress_protection_psk = os.getenv('BAY_INGRESS_PSK') - local ingress_protection_key = os.getenv('BAY_INGRESS_HEADER') - - if (ingress_protection_enabled == nil) or (ingress_protection_psk == nil) or (ingress_protection_key == nil) then - return - end - - local sent_psk = ngx.req.get_headers()[ingress_protection_key] - - if (ingress_protection_enabled == "true") and (ingress_protection_psk ~= sent_psk) then - local path = "/etc/nginx/conf.d/drupal/content/404.html" - local file = io.open(path, 'r') - local content = file:read "*all" - file:close() - ngx.header['Content-Type'] = 'text/html' - ngx.status = ngx.HTTP_NOT_FOUND - ngx.say(content) - ngx.exit(ngx.OK) - end -} diff --git a/images/node/Dockerfile b/images/node/Dockerfile index 5a4d16139..c0c8c870b 100644 --- a/images/node/Dockerfile +++ b/images/node/Dockerfile @@ -1,5 +1,5 @@ FROM uselagoon/node-20:latest -ARG BAY_CLI_VERSION=v1.3.2 +ARG BAY_CLI_VERSION=v1.4.1 RUN apk --update add curl git findutils openssh-client && \ diff --git a/images/opensearch/Dockerfile b/images/opensearch/Dockerfile new file mode 100644 index 000000000..8406edc36 --- /dev/null +++ b/images/opensearch/Dockerfile @@ -0,0 +1,7 @@ +FROM uselagoon/opensearch-2:latest + +RUN for plugin in \ + analysis-kuromoji \ + analysis-icu; do \ + /usr/share/opensearch/bin/opensearch-plugin install $plugin; \ + done diff --git a/images/php/Dockerfile.cli b/images/php/Dockerfile.cli index e2f656158..f96d9ca3f 100644 --- a/images/php/Dockerfile.cli +++ b/images/php/Dockerfile.cli @@ -1,6 +1,6 @@ ARG PHP_VERSION=8.3 FROM php:${PHP_VERSION}-cli-alpine AS php-cli -FROM ghcr.io/skpr/mtk:v2.1.0 AS mtk +FROM ghcr.io/skpr/mtk:v2.1.1 AS mtk FROM uselagoon/php-${PHP_VERSION}-cli-drupal:latest # Remove unnecessary packages that increase our attack surface area. @@ -8,7 +8,7 @@ RUN apk del postgresql-client ARG GOJQ_VERSION=0.12.17 ARG DOCKERIZE_VERSION=v0.9.2 -ARG BAY_CLI_VERSION=v1.3.2 +ARG BAY_CLI_VERSION=v1.4.1 ARG SHIPSHAPE_VERSION=1.0.0-alpha.1.5.1 COPY --from=php-cli /usr/local/bin/phpdbg /usr/local/bin/ diff --git a/images/php/Dockerfile.fpm b/images/php/Dockerfile.fpm index ba07ff15f..845f29a8c 100644 --- a/images/php/Dockerfile.fpm +++ b/images/php/Dockerfile.fpm @@ -2,7 +2,7 @@ ARG PHP_VERSION=8.3 FROM ghcr.io/dpc-sdp/bay/php-fpm-exporter:6.x AS php-fpm-exporter FROM uselagoon/php-${PHP_VERSION}-fpm:latest -ARG BAY_CLI_VERSION=v1.3.2 +ARG BAY_CLI_VERSION=v1.4.1 RUN mkdir /bay COPY 01-bay.ini /usr/local/etc/php/conf.d/ diff --git a/images/php/mtk/drupal.conf b/images/php/mtk/drupal.conf index 937e906d8..3607994ca 100644 --- a/images/php/mtk/drupal.conf +++ b/images/php/mtk/drupal.conf @@ -63,3 +63,4 @@ nodata: - router - sessions - webform_* + - scheduled_transition* diff --git a/images/php/settings.php b/images/php/settings.php index a05344db6..e01c78335 100755 --- a/images/php/settings.php +++ b/images/php/settings.php @@ -314,37 +314,60 @@ $config['clamav.settings']['mode_daemon_tcpip']['hostname'] = $clamav_host; $config['clamav.settings']['mode_daemon_tcpip']['port'] = $clamav_port; -// Configure elasticsearch connections from environment variables. -if (getenv('SEARCH_HASH') && getenv('SEARCH_URL')) { - $config['elasticsearch_connector.cluster.elasticsearch_bay']['url'] = sprintf('http://%s.%s', getenv('SEARCH_HASH'), getenv('SEARCH_URL')); +$opensearch_profile = getenv('BAY_OPENSEARCH_PROFILE') ?: 'sdp-elastic'; +if ($opensearch_profile == 'sdp-elastic') { + // Configuration for legacy sdp-elastic integration. + // @todo remove this sdp-elastic block when all applications migrated to opensearch. + if (getenv('SEARCH_HASH') && getenv('SEARCH_URL')) { + $config['elasticsearch_connector.cluster.elasticsearch_bay']['url'] = sprintf('http://%s.%s', getenv('SEARCH_HASH'), getenv('SEARCH_URL')); + } else { + $config['elasticsearch_connector.cluster.elasticsearch_bay']['url'] = "http://elasticsearch:9200"; + } + + if (getenv('SEARCH_INDEX')) { + $config['elasticsearch_connector.cluster.elasticsearch_bay']['options']['rewrite']['rewrite_index'] = 1; + $config['elasticsearch_connector.cluster.elasticsearch_bay']['options']['rewrite']['index'] = [ + 'prefix' => getenv('SEARCH_INDEX'), + 'suffix' => '', + ]; + } else { + $config['elasticsearch_connector.cluster.elasticsearch_bay']['options']['rewrite']['index'] = [ + 'prefix' => 'elasticsearch_index_default_', + 'suffix' => '', + ]; + } + + if (getenv('SEARCH_AUTH_USERNAME') && getenv('SEARCH_AUTH_PASSWORD')) { + $config['elasticsearch_connector.cluster.elasticsearch_bay']['options']['username'] = getenv('SEARCH_AUTH_USERNAME'); + $config['elasticsearch_connector.cluster.elasticsearch_bay']['options']['password'] = getenv('SEARCH_AUTH_PASSWORD'); + $config['elasticsearch_connector.cluster.elasticsearch_bay']['options']['use_authentication'] = 1; + $config['elasticsearch_connector.cluster.elasticsearch_bay']['options']['authentication_type'] = 'Basic'; + } else { + $config['elasticsearch_connector.cluster.elasticsearch_bay']['options']['use_authentication'] = 0; + } + // Override data_pipelines url. + $config['data_pipelines.dataset_destination.sdp_elasticsearch']['destinationSettings']['url'] = (getenv('SEARCH_HASH') && getenv('SEARCH_URL')) ? sprintf('http://%s.%s', getenv('SEARCH_HASH'), getenv('SEARCH_URL')) : "http://elasticsearch:9200"; } else { - $config['elasticsearch_connector.cluster.elasticsearch_bay']['url'] = "http://elasticsearch:9200"; -} + // Configuration for bay opensearch integration. + + // Connect to a proxy service that handles AWS IAM auth. + $endpoint = "http://aws-es-proxy:9200"; -if (getenv('SEARCH_INDEX')) { + // Default index prefix looks like "${PROJECT}__${ENVIRONMENT}__". This can be overridden with BAY_OPENSEARCH_PREFIX. + $environment = getenv('LAGOON_ENVIRONMENT') ?: 'default'; + $index_prefix = getenv('BAY_OPENSEARCH_PREFIX') ?: sprintf('%s__%s', getenv('LAGOON_PROJECT'), $environment); + $config['elasticsearch_connector.cluster.elasticsearch_bay']['url'] = $endpoint; + $config['elasticsearch_connector.cluster.elasticsearch_bay']['options']['use_authentication'] = FALSE; $config['elasticsearch_connector.cluster.elasticsearch_bay']['options']['rewrite']['rewrite_index'] = 1; $config['elasticsearch_connector.cluster.elasticsearch_bay']['options']['rewrite']['index'] = [ - 'prefix' => getenv('SEARCH_INDEX'), - 'suffix' => '', - ]; -} else { - $config['elasticsearch_connector.cluster.elasticsearch_bay']['options']['rewrite']['index'] = [ - 'prefix' => 'elasticsearch_index_default_', + 'prefix' => sprintf('%s__%s_', $index_prefix, "sapi"), 'suffix' => '', ]; + $config['data_pipelines.dataset_destination.sdp_elasticsearch']['destinationSettings']['url'] = $endpoint; + $config['data_pipelines.dataset_destination.sdp_elasticsearch']['destinationSettings']['prefix'] = sprintf('%s__sdp_data_pipelines_', $index_prefix); } -if (getenv('SEARCH_AUTH_USERNAME') && getenv('SEARCH_AUTH_PASSWORD')) { - $config['elasticsearch_connector.cluster.elasticsearch_bay']['options']['username'] = getenv('SEARCH_AUTH_USERNAME'); - $config['elasticsearch_connector.cluster.elasticsearch_bay']['options']['password'] = getenv('SEARCH_AUTH_PASSWORD'); - $config['elasticsearch_connector.cluster.elasticsearch_bay']['options']['use_authentication'] = 1; - $config['elasticsearch_connector.cluster.elasticsearch_bay']['options']['authentication_type'] = 'Basic'; -} else { - $config['elasticsearch_connector.cluster.elasticsearch_bay']['options']['use_authentication'] = 0; -} -// Override data_pipelines url. -$config['data_pipelines.dataset_destination.sdp_elasticsearch']['destinationSettings']['url'] = (getenv('SEARCH_HASH') && getenv('SEARCH_URL')) ? sprintf('http://%s.%s', getenv('SEARCH_HASH'), getenv('SEARCH_URL')) : "http://elasticsearch:9200"; // Configure tide_logs. if (getenv('TIDE_LOGS_UDPLOG_HOST')) { @@ -411,7 +434,5 @@ $label = ($tag !== 'No tag found') ? "$msg - $authorName ($short_sha)" : $short_sha; $config['environment_indicator.indicator']['name'] = "Deployed: $label"; - $config['environment_indicator.indicator']['bg_color'] = '#fff176'; - $config['environment_indicator.indicator']['fg_color'] = '#000000'; } }