diff --git a/.azure-pipelines/bazel.yml b/.azure-pipelines/bazel.yml
deleted file mode 100644
index 8b170a05982ce..0000000000000
--- a/.azure-pipelines/bazel.yml
+++ /dev/null
@@ -1,250 +0,0 @@
-parameters:
-- name: ciTarget
- displayName: "CI target"
- type: string
- default: bazel.release
-- name: artifactSuffix
- displayName: "Suffix of artifact"
- type: string
- default: ""
-
-# caching
-- name: cacheKeyDocker
- type: string
- default: ".devcontainer/Dockerfile"
-- name: cacheKeyDockerVersion
- type: string
- default: $(cacheKeyBuildImage)
-- name: cacheKeyDockerName
- type: string
- default: envoy_build_image
-- name: cacheKeyDockerPath
- type: string
- default: /mnt/docker
-- name: cacheKeyDockerTmpDir
- type: string
- default: /mnt/docker_cache
-- name: cacheKeyDockerNoTmpfs
- type: string
- default: ''
-- name: cacheKey
- type: string
- default: $(cacheKeyBazelFiles)
-- name: cacheVersion
- type: string
- default: $(cacheKeyBazel)
-
-- name: rbe
- displayName: "Enable RBE"
- type: boolean
- default: true
-- name: managedAgent
- type: boolean
- default: true
-- name: bazelBuildExtraOptions
- type: string
- default: ""
-- name: envoyBuildFilterExample
- type: string
- default: ""
-- name: cacheTestResults
- displayName: "Cache test results"
- type: boolean
- default: true
-# Unfortunately, AZP is an unmittigated and undocumented disaster.
-# The definition of primitive types is close to pointless, as depending
-# on where things are set, azp just turns them into strings anyway.
-- name: repoFetchDepth
- type: string
- default: 1
-- name: repoFetchTags
- type: string
- default: false
-# Auth
-- name: authGithub
- type: string
- default: ""
-# Publishing
-- name: publishEnvoy
- type: string
- default: true
-- name: publishTestResults
- type: string
- default: true
-
-- name: stepsPre
- type: stepList
- default: []
-- name: stepsPost
- type: stepList
- default: []
-
-- name: env
- type: object
- default: {}
-
-steps:
-- checkout: self
- fetchDepth: ${{ parameters.repoFetchDepth }}
- fetchTags: ${{ parameters.repoFetchTags }}
-
-# Set up tmpfs directories for self-hosted agents which have a surplus of mem.
-#
-# NB: Do not add any directory that grow larger than spare memory capacity!
-- bash: |
- TMPDIRS=(
- # This is used as the final delivery directory for the binaries
- "$(Build.StagingDirectory)/envoy"
- # Bazel repository_cache which is cached by AZP (this speeds up cache load/save)
- "$(Build.StagingDirectory)/repository_cache"
- "$(Build.StagingDirectory)/bazel"
- "$(Build.StagingDirectory)/.cache"
- "$(Build.StagingDirectory)/bazel_root/install"
- "$(Build.StagingDirectory)/tmp"
- "$(Build.StagingDirectory)/bazel_root/base/external")
- if [[ "${{ parameters.artifactSuffix }}" == ".arm64" ]]; then
- TMPDIRS+=(
- "$(Build.StagingDirectory)/bazel_root/base/execroot/envoy/bazel-out/aarch64-fastbuild/testlogs"
- "$(Build.StagingDirectory)/bazel_root/base/execroot/envoy/bazel-out/aarch64-opt/testlogs")
- else
- TMPDIRS+=(
- "$(Build.StagingDirectory)/bazel_root/base/execroot/envoy/bazel-out/k8-fastbuild/testlogs"
- "$(Build.StagingDirectory)/bazel_root/base/execroot/envoy/bazel-out/k8-opt/testlogs")
- fi
- for tmpdir in "${TMPDIRS[@]}"; do
- echo "Mount(tmpfs): ${tmpdir}"
- sudo mkdir -p "$tmpdir"
- sudo mount -t tmpfs none "$tmpdir"
- sudo chown azure-pipelines "$tmpdir"
- done
- sudo chown -R azure-pipelines:azure-pipelines $(Build.StagingDirectory)/bazel_root/
- displayName: "Mount/tmpfs bazel directories"
- condition: and(succeeded(), eq('${{ parameters.managedAgent }}', false))
-
-- bash: |
- set -e
- CACHE_DIRS=(
- "$(Build.StagingDirectory)/.cache/"
- "$(Build.StagingDirectory)/bazel_root/install/"
- "$(Build.StagingDirectory)/repository_cache/"
- "$(Build.StagingDirectory)/bazel_root/base/external")
- sudo mkdir -p "${CACHE_DIRS[@]}"
- sudo chown -R vsts:vsts "${CACHE_DIRS[@]}" $(Build.StagingDirectory)/bazel_root/
- echo "Created bazel cache directories: "${CACHE_DIRS[*]}""
- displayName: "Create bazel directories"
- condition: and(succeeded(), eq('${{ parameters.managedAgent }}', true))
-
-# Caching
-- task: Cache@2
- inputs:
- key: '"${{ parameters.ciTarget }}" | "${{ parameters.cacheVersion }}" | "${{ parameters.artifactSuffix }}" | ${{ parameters.cacheKey }}'
- path: $(Build.StagingDirectory)/bazel
- cacheHitVar: BAZEL_CACHE_RESTORED
- continueOnError: true
-- script: |
- set -e
- sudo tar xf $(Build.StagingDirectory)/bazel/cache.tar.zst -C $(Build.StagingDirectory) --warning=no-timestamp
- sudo rm -rf $(Build.StagingDirectory)/bazel/*
- displayName: "Cache/restore (${{ parameters.ciTarget }})"
- condition: and(not(canceled()), eq(variables.BAZEL_CACHE_RESTORED, 'true'))
-- template: cached.yml
- parameters:
- key: "${{ parameters.cacheKeyDocker }}"
- version: "${{ parameters.cacheKeyDockerVersion }}"
- name: "${{ parameters.cacheKeyDockerName }}"
- path: "${{ parameters.cacheKeyDockerPath }}"
- tmpDirectory: "${{ parameters.cacheKeyDockerTmpDir }}"
- tmpNoTmpfs: "${{ parameters.cacheKeyDockerNoTmpfs }}"
- arch: "${{ parameters.artifactSuffix }}"
-
-- ${{ each step in parameters.stepsPre }}:
- - ${{ each pair in step }}:
- ${{ pair.key }}: ${{ pair.value }}
-
-- bash: |
- echo "disk space at beginning of build:"
- df -h
- displayName: "Check disk space at beginning"
-
-- bash: |
- sudo mkdir -p /etc/docker
- echo '{
- "ipv6": true,
- "fixed-cidr-v6": "2001:db8:1::/64"
- }' | sudo tee /etc/docker/daemon.json
- sudo service docker restart
- displayName: "Enable IPv6"
- condition: ${{ parameters.managedAgent }}
-
-- script: ci/run_envoy_docker.sh 'ci/do_ci.sh ${{ parameters.ciTarget }}'
- workingDirectory: $(Build.SourcesDirectory)
- env:
- ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory)
- ENVOY_BUILD_FILTER_EXAMPLE: ${{ parameters.envoyBuildFilterExample }}
- GITHUB_TOKEN: "${{ parameters.authGithub }}"
- ${{ if ne(parameters['cacheTestResults'], true) }}:
- BAZEL_NO_CACHE_TEST_RESULTS: 1
- ${{ if eq(variables['Build.Reason'], 'PullRequest') }}:
- CI_TARGET_BRANCH: "origin/$(System.PullRequest.TargetBranch)"
- ${{ if ne(variables['Build.Reason'], 'PullRequest') }}:
- CI_TARGET_BRANCH: "origin/$(Build.SourceBranchName)"
- # Any PR or CI run in envoy-presubmit uses the fake SCM hash
- ${{ if or(eq(variables['Build.Reason'], 'PullRequest'), eq(variables['Build.DefinitionName'], 'envoy-presubmit')) }}:
- # sha1sum of `ENVOY_PULL_REQUEST`
- BAZEL_FAKE_SCM_REVISION: e3b4a6e9570da15ac1caffdded17a8bebdc7dfc9
- ${{ if parameters.rbe }}:
- GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey)
- ENVOY_RBE: "1"
- BAZEL_BUILD_EXTRA_OPTIONS: "--config=remote-ci --jobs=$(RbeJobs) ${{ parameters.bazelBuildExtraOptions }}"
- BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com
- BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance
- ${{ if eq(parameters.rbe, false) }}:
- BAZEL_BUILD_EXTRA_OPTIONS: "--config=ci ${{ parameters.bazelBuildExtraOptions }}"
- BAZEL_REMOTE_CACHE: $(LocalBuildCache)
- ${{ each var in parameters.env }}:
- ${{ var.key }}: ${{ var.value }}
- displayName: "Run CI script ${{ parameters.ciTarget }}"
-
-- bash: |
- echo "disk space at end of build:"
- df -h
-
- echo
- du -ch "$(Build.StagingDirectory)" | grep -E "[0-9]{2,}M|[0-9]G"
-
- # Cleanup offending files with unicode names
- rm -rf $(Build.StagingDirectory)/bazel_root/base/external/go_sdk/test/fixedbugs
- displayName: "Check disk space at end"
- condition: not(canceled())
-
-- ${{ each step in parameters.stepsPost }}:
- - ${{ each pair in step }}:
- ${{ pair.key }}: ${{ pair.value }}
-
-- script: |
- set -e
- CACHE_DIRS=(
- ".cache"
- "bazel_root/install"
- "repository_cache/"
- "bazel_root/base/external")
- mkdir -p $(Build.StagingDirectory)/bazel/
- sudo tar cf - -C $(Build.StagingDirectory) "${CACHE_DIRS[@]}" \
- | zstd - -T0 -o $(Build.StagingDirectory)/bazel/cache.tar.zst
- echo "Created tarball ($(Build.StagingDirectory)/bazel/cache.tar.zst): ${CACHE_DIRS[@]}"
- displayName: "Cache/save (${{ parameters.ciTarget }})"
- condition: and(not(canceled()), ne(variables.BAZEL_CACHE_RESTORED, 'true'))
-
-- task: PublishTestResults@2
- inputs:
- testResultsFiles: "**/bazel-out/**/testlogs/**/test.xml"
- testRunTitle: "${{ parameters.ciTarget }}"
- searchFolder: $(Build.StagingDirectory)/bazel_root
- timeoutInMinutes: 10
- condition: eq(${{ parameters.publishTestResults }}, 'true')
-- task: PublishBuildArtifacts@1
- inputs:
- pathtoPublish: "$(Build.StagingDirectory)/envoy"
- artifactName: ${{ parameters.ciTarget }}
- timeoutInMinutes: 10
- condition: eq(${{ parameters.publishEnvoy }}, 'true')
diff --git a/.azure-pipelines/cached.yml b/.azure-pipelines/cached.yml
index d75ef8b5771e3..f284a1fc99756 100644
--- a/.azure-pipelines/cached.yml
+++ b/.azure-pipelines/cached.yml
@@ -1,26 +1,37 @@
parameters:
-- name: name
+- name: arch
type: string
- default: envoy_build_image
+ default: ""
- name: version
type: string
- default: ""
-- name: arch
+ default: $(cacheKeyVersion)
+- name: cacheName
type: string
- default: ""
-- name: key
+ default:
+
+- name: keyDocker
type: string
- default: ".devcontainer/Dockerfile"
-- name: tmpDirectory
+ default: $(cacheKeyDocker)
+- name: keyBazel
type: string
- default: /mnt/docker_cache
-- name: tmpNoTmpfs
+ default: $(cacheKeyBazel)
+
+- name: pathTemp
+ type: string
+ default: $(pathCacheTemp)
+
+- name: tmpfsDisabled
type: string
default:
-- name: path
+- name: tmpfsDockerDisabled
+ type: string
+ default:
+
+- name: pathDockerBind
type: string
- default: /mnt/docker
+ default: $(pathDockerBind)
+
- name: cacheTimeoutWorkaround
type: number
default: 5
@@ -30,24 +41,49 @@ parameters:
steps:
-- script: sudo .azure-pipelines/docker/prepare_cache.sh "${{ parameters.tmpDirectory }}" "${{ parameters.tmpNoTmpfs }}"
- displayName: "Cache/prepare (${{ parameters.name }})"
+- script: sudo .azure-pipelines/docker/prepare_cache.sh "${{ parameters.pathTemp }}" "${{ parameters.tmpfsDisabled }}"
+ displayName: "Cache/prepare"
+
- task: Cache@2
+ condition: and(not(canceled()), ne('${{ parameters.cacheName }}', ''))
env:
VSO_DEDUP_REDIRECT_TIMEOUT_IN_SEC: "${{ parameters.cacheTimeoutWorkaround }}"
- displayName: "Cache/fetch (${{ parameters.name }})"
+ displayName: "Cache (${{ parameters.cacheName }})"
inputs:
- key: '${{ parameters.name }} | "${{ parameters.version }}" | "${{ parameters.arch }}" | ${{ parameters.key }}'
- path: "${{ parameters.tmpDirectory }}"
+ key: '${{ parameters.cacheName }} | "${{ parameters.version }}" | "${{ parameters.arch }}" | ${{ parameters.keyDocker }} | ${{ parameters.keyBazel }}'
+ path: "${{ parameters.pathTemp }}/all"
cacheHitVar: CACHE_RESTORED
-# Prime the cache for all jobs
-- script: sudo .azure-pipelines/docker/prime_cache.sh "${{ parameters.tmpDirectory }}" "${{ parameters.arch }}"
- displayName: "Cache/prime (${{ parameters.name }})"
+- task: Cache@2
+ condition: and(not(canceled()), not(failed()), or(ne(variables.CACHE_RESTORED, 'true'), eq('${{ parameters.cacheName }}', '')))
+ env:
+ VSO_DEDUP_REDIRECT_TIMEOUT_IN_SEC: "${{ parameters.cacheTimeoutWorkaround }}"
+ displayName: "Cache (Docker)"
+ inputs:
+ key: '"${{ parameters.version }}" | "${{ parameters.arch }}" | ${{ parameters.keyDocker }} | docker'
+ path: "${{ parameters.pathTemp }}/docker"
+ cacheHitVar: DOCKER_CACHE_RESTORED
+
+- task: Cache@2
+ condition: and(not(canceled()), not(failed()), or(ne(variables.CACHE_RESTORED, 'true'), eq('${{ parameters.cacheName }}', '')))
+ env:
+ VSO_DEDUP_REDIRECT_TIMEOUT_IN_SEC: "${{ parameters.cacheTimeoutWorkaround }}"
+ displayName: "Cache (Bazel)"
+ inputs:
+ key: '"${{ parameters.version }}" | "${{ parameters.arch }}" | ${{ parameters.keyBazel }} | bazel'
+ path: "${{ parameters.pathTemp }}/bazel"
+ cacheHitVar: BAZEL_CACHE_RESTORED
+
+# Prime the caches for all jobs
+- script: .azure-pipelines/docker/prime_cache.sh "$(Build.StagingDirectory)" "${{ parameters.pathTemp }}" "${{ parameters.arch }}"
+ env:
+ DOCKER_RESTORED: $(DOCKER_CACHE_RESTORED)
+ BAZEL_RESTORED: $(BAZEL_CACHE_RESTORED)
+ displayName: "Cache/prime (Docker/Bazel)"
# TODO(phlax): figure if there is a way to test cache without downloading it
- condition: and(not(canceled()), eq(${{ parameters.prime }}, true), ne(variables.CACHE_RESTORED, 'true'))
+ condition: and(not(canceled()), eq(${{ parameters.prime }}, true), eq('${{ parameters.cacheName }}', ''), or(ne(variables.DOCKER_CACHE_RESTORED, 'true'), ne(variables.BAZEL_CACHE_RESTORED, 'true')))
-# Load the cache for a job
-- script: sudo .azure-pipelines/docker/load_cache.sh "${{ parameters.tmpDirectory }}" "${{ parameters.path }}"
- displayName: "Cache/restore (${{ parameters.name }})"
+# Load the caches for a job
+- script: sudo .azure-pipelines/docker/load_caches.sh "$(Build.StagingDirectory)" "${{ parameters.pathTemp }}" "${{ parameters.pathDockerBind }}" "${{ parameters.tmpfsDockerDisabled }}"
+ displayName: "Cache/restore"
condition: and(not(canceled()), eq(${{ parameters.prime }}, false))
diff --git a/.azure-pipelines/ci.yml b/.azure-pipelines/ci.yml
new file mode 100644
index 0000000000000..0fa528e8c8d2c
--- /dev/null
+++ b/.azure-pipelines/ci.yml
@@ -0,0 +1,350 @@
+parameters:
+- name: ciTarget
+ displayName: "CI target"
+ type: string
+ default: release
+- name: artifactName
+ displayName: "Artifact name"
+ type: string
+ default: ""
+- name: artifactSuffix
+ displayName: "Suffix of artifact"
+ type: string
+ default: ""
+
+# caching
+- name: cacheKeyDocker
+ type: string
+ default: ".bazelrc"
+- name: cacheKeyVersion
+ type: string
+ default: $(cacheKeyVersion)
+- name: pathCacheTemp
+ type: string
+ default: $(pathCacheTemp)
+- name: cacheName
+ type: string
+ default:
+
+- name: tmpfsCacheDisabled
+ type: string
+ default: ''
+- name: tmpfsDockerDisabled
+ type: string
+ default: ''
+
+- name: bazelConfigRBE
+ type: string
+ default: --config=remote-ci --config=rbe-google --jobs=$(RbeJobs)
+- name: cacheKeyBazel
+ type: string
+ default: $(cacheKeyBazel)
+- name: cacheVersion
+ type: string
+ default: $(cacheKeyVersion)
+
+- name: pathDockerBind
+ type: string
+ default: $(pathDockerBind)
+
+- name: rbe
+ displayName: "Enable RBE"
+ type: boolean
+ default: true
+- name: managedAgent
+ type: boolean
+ default: true
+- name: bazelBuildExtraOptions
+ type: string
+ default: ""
+- name: bazelStartupExtraOptions
+ type: string
+ default: ""
+- name: bazelUseBES
+ displayName: "Upload bazel run data to BES"
+ type: boolean
+ default: true
+- name: envoyBuildFilterExample
+ type: string
+ default: ""
+- name: cacheTestResults
+ displayName: "Cache test results"
+ type: boolean
+ default: true
+# Unfortunately, AZP is an unmittigated and undocumented disaster.
+# The definition of primitive types is close to pointless, as depending
+# on where things are set, azp just turns them into strings anyway.
+- name: repoFetchDepth
+ type: string
+ default: 1
+- name: repoFetchTags
+ type: string
+ default: false
+# Auth
+- name: authGithub
+ type: string
+ default: ""
+# Publishing
+- name: publishEnvoy
+ type: string
+ default: true
+- name: publishTestResults
+ type: string
+ default: true
+
+- name: diskspaceHack
+ type: boolean
+ default: false
+
+- name: stepsPre
+ type: stepList
+ default: []
+- name: stepsPost
+ type: stepList
+ default: []
+
+- name: env
+ type: object
+ default: {}
+
+steps:
+- checkout: self
+ fetchDepth: ${{ parameters.repoFetchDepth }}
+ fetchTags: ${{ parameters.repoFetchTags }}
+
+- bash: ./.azure-pipelines/cleanup.sh
+ displayName: "Free disk space"
+ condition: and(succeeded(), eq('${{ parameters.diskspaceHack }}', true))
+
+# Set up tmpfs directories for self-hosted agents which have a surplus of mem.
+#
+# NB: Do not add any directory that grow larger than spare memory capacity!
+- bash: |
+ TMPDIRS=(
+ # This is used as the final delivery directory for the binaries
+ "$(Build.StagingDirectory)/envoy"
+ # Bazel repository_cache which is cached by AZP (this speeds up cache load/save)
+ "$(Build.StagingDirectory)/repository_cache"
+ "$(Build.StagingDirectory)/bazel"
+ "$(Build.StagingDirectory)/.cache"
+ "$(Build.StagingDirectory)/bazel_root/install"
+ "$(Build.StagingDirectory)/tmp"
+ "$(Build.StagingDirectory)/bazel_root/base/external")
+ if [[ "${{ parameters.artifactSuffix }}" == ".arm64" ]]; then
+ TMPDIRS+=(
+ "$(Build.StagingDirectory)/bazel_root/base/execroot/envoy/bazel-out/aarch64-fastbuild/testlogs"
+ "$(Build.StagingDirectory)/bazel_root/base/execroot/envoy/bazel-out/aarch64-opt/testlogs")
+ else
+ TMPDIRS+=(
+ "$(Build.StagingDirectory)/bazel_root/base/execroot/envoy/bazel-out/k8-fastbuild/testlogs"
+ "$(Build.StagingDirectory)/bazel_root/base/execroot/envoy/bazel-out/k8-opt/testlogs")
+ fi
+ for tmpdir in "${TMPDIRS[@]}"; do
+ echo "Mount(tmpfs): ${tmpdir}"
+ sudo mkdir -p "$tmpdir"
+ sudo mount -t tmpfs none "$tmpdir"
+ sudo chown azure-pipelines "$tmpdir"
+ done
+ sudo chown -R azure-pipelines:azure-pipelines $(Build.StagingDirectory)/bazel_root/
+ displayName: "Mount/tmpfs bazel directories"
+ condition: and(succeeded(), eq('${{ parameters.managedAgent }}', false), ne('${{ parameters.tmpfsDockerDisabled }}', true))
+
+- bash: |
+ set -e
+ CACHE_DIRS=(
+ "$(Build.StagingDirectory)/envoy"
+ "$(Build.StagingDirectory)/.cache/"
+ "$(Build.StagingDirectory)/bazel_root/install/"
+ "$(Build.StagingDirectory)/repository_cache/"
+ "$(Build.StagingDirectory)/bazel_root/base/external")
+ sudo mkdir -p "${CACHE_DIRS[@]}"
+ if id -u vsts &> /dev/null; then
+ sudo chown -R vsts:vsts "${CACHE_DIRS[@]}" $(Build.StagingDirectory)/bazel_root/
+ else
+ sudo chown -R azure-pipelines:azure-pipelines "${CACHE_DIRS[@]}" $(Build.StagingDirectory)/bazel_root/
+ fi
+ echo "Created bazel directories: "${CACHE_DIRS[*]}""
+ displayName: "Create bazel directories"
+ condition: and(succeeded(), eq('${{ parameters.tmpfsDockerDisabled }}', true))
+
+# Caching
+- template: cached.yml
+ parameters:
+ cacheName: "${{ parameters.cacheName }}"
+ keyBazel: "${{ parameters.cacheKeyBazel }}"
+ keyDocker: "${{ parameters.cacheKeyDocker }}"
+ pathDockerBind: "${{ parameters.pathDockerBind }}"
+ arch: "${{ parameters.artifactSuffix }}"
+ pathTemp: "${{ parameters.pathCacheTemp }}"
+ tmpfsDisabled: "${{ parameters.tmpfsCacheDisabled }}"
+ tmpfsDockerDisabled: "${{ parameters.tmpfsDockerDisabled }}"
+
+- script: |
+ ENVOY_SHARED_TMP_DIR=/tmp/bazel-shared
+ mkdir -p "$ENVOY_SHARED_TMP_DIR"
+ BAZEL_BUILD_EXTRA_OPTIONS="${{ parameters.bazelBuildExtraOptions }}"
+ if [[ "${{ parameters.rbe }}" == "True" ]]; then
+ # mktemp will create a tempfile with u+rw permission minus umask, it will not be readable by all
+ # users by default.
+ GCP_SERVICE_ACCOUNT_KEY_PATH=$(mktemp -p "${ENVOY_SHARED_TMP_DIR}" -t gcp_service_account.XXXXXX.json)
+ bash -c 'echo "$(GcpServiceAccountKey)"' | base64 --decode > "${GCP_SERVICE_ACCOUNT_KEY_PATH}"
+ BAZEL_BUILD_EXTRA_OPTIONS+=" ${{ parameters.bazelConfigRBE }} --google_credentials=${GCP_SERVICE_ACCOUNT_KEY_PATH}"
+ ENVOY_RBE=1
+ if [[ "${{ parameters.bazelUseBES }}" == "True" && -n "${GOOGLE_BES_PROJECT_ID}" ]]; then
+ BAZEL_BUILD_EXTRA_OPTIONS+=" --config=rbe-google-bes --bes_instance_name=${GOOGLE_BES_PROJECT_ID}"
+ fi
+ else
+ echo "using local build cache."
+ # Normalize branches - `release/vX.xx`, `vX.xx`, `vX.xx.x` -> `vX.xx`
+ TARGET_BRANCH=$(echo "${CI_TARGET_BRANCH}" | cut -d/ -f2-)
+ BRANCH_NAME="$(echo "${TARGET_BRANCH}" | cut -d/ -f2 | cut -d. -f-2)"
+ if [[ "$BRANCH_NAME" == "merge" ]]; then
+ # Manually run PR commit - there is no easy way of telling which branch
+ # it is, so just set it to `main` - otherwise it tries to cache as `branch/merge`
+ BRANCH_NAME=main
+ fi
+ BAZEL_REMOTE_INSTANCE="branch/${BRANCH_NAME}"
+ echo "instance_name: ${BAZEL_REMOTE_INSTANCE}."
+ BAZEL_BUILD_EXTRA_OPTIONS+=" --config=ci --config=cache-local --remote_instance_name=${BAZEL_REMOTE_INSTANCE} --remote_timeout=600"
+ fi
+ if [[ "${{ parameters.cacheTestResults }}" != "True" ]]; then
+ VERSION_DEV="$(cut -d- -f2 "VERSION.txt")"
+ # Use uncached test results for non-release scheduledruns.
+ if [[ $VERSION_DEV == "dev" ]]; then
+ BAZEL_EXTRA_TEST_OPTIONS+=" --nocache_test_results"
+ fi
+ fi
+ # Any PR or CI run in envoy-presubmit uses the fake SCM hash
+ if [[ "${{ variables['Build.Reason'] }}" == "PullRequest" || "${{ variables['Build.DefinitionName'] }}" == 'envoy-presubmit' ]]; then
+ # sha1sum of `ENVOY_PULL_REQUEST`
+ BAZEL_FAKE_SCM_REVISION=e3b4a6e9570da15ac1caffdded17a8bebdc7dfc9
+ fi
+ echo "##vso[task.setvariable variable=BAZEL_BUILD_EXTRA_OPTIONS]${BAZEL_BUILD_EXTRA_OPTIONS}"
+ echo "##vso[task.setvariable variable=BAZEL_EXTRA_TEST_OPTIONS]${BAZEL_EXTRA_TEST_OPTIONS}"
+ echo "##vso[task.setvariable variable=BAZEL_FAKE_SCM_REVISION]${BAZEL_FAKE_SCM_REVISION}"
+ echo "##vso[task.setvariable variable=BAZEL_STARTUP_EXTRA_OPTIONS]${{ parameters.bazelStartupExtraOptions }}"
+ echo "##vso[task.setvariable variable=CI_TARGET_BRANCH]${CI_TARGET_BRANCH}"
+ echo "##vso[task.setvariable variable=ENVOY_BUILD_FILTER_EXAMPLE]${{ parameters.envoyBuildFilterExample }}"
+ echo "##vso[task.setvariable variable=ENVOY_DOCKER_BUILD_DIR]$(Build.StagingDirectory)"
+ echo "##vso[task.setvariable variable=ENVOY_RBE]${ENVOY_RBE}"
+ echo "##vso[task.setvariable variable=ENVOY_SHARED_TMP_DIR]${ENVOY_SHARED_TMP_DIR}"
+ echo "##vso[task.setvariable variable=GCP_SERVICE_ACCOUNT_KEY_PATH]${GCP_SERVICE_ACCOUNT_KEY_PATH}"
+ echo "##vso[task.setvariable variable=GITHUB_TOKEN]${{ parameters.authGithub }}"
+ workingDirectory: $(Build.SourcesDirectory)
+ env:
+ ${{ if eq(variables['Build.Reason'], 'PullRequest') }}:
+ CI_TARGET_BRANCH: "origin/$(System.PullRequest.TargetBranch)"
+ ${{ if ne(variables['Build.Reason'], 'PullRequest') }}:
+ CI_TARGET_BRANCH: "origin/$(Build.SourceBranchName)"
+ displayName: "CI env ${{ parameters.ciTarget }}"
+
+- script: ci/run_envoy_docker.sh 'ci/do_ci.sh fetch-${{ parameters.ciTarget }}'
+ condition: and(not(canceled()), not(failed()), ne('${{ parameters.cacheName }}', ''), ne(variables.CACHE_RESTORED, 'true'))
+ workingDirectory: $(Build.SourcesDirectory)
+ env:
+ ${{ each var in parameters.env }}:
+ ${{ var.key }}: ${{ var.value }}
+ displayName: "Fetch assets (${{ parameters.ciTarget }})"
+
+- ${{ each step in parameters.stepsPre }}:
+ - ${{ each pair in step }}:
+ ${{ pair.key }}: ${{ pair.value }}
+
+- bash: |
+ echo "disk space at beginning of build:"
+ df -h
+ if [[ -e "$(Build.StagingDirectory)/bazel_root/base/external" ]]; then
+ du -sh "$(Build.StagingDirectory)/bazel_root/base/external"
+ fi
+ if [[ -e "$(Build.StagingDirectory)/repository_cache" ]]; then
+ du -sh "$(Build.StagingDirectory)/repository_cache"
+ fi
+
+ displayName: "Check disk space at beginning"
+
+- bash: |
+ sudo mkdir -p /etc/docker
+ echo '{
+ "ipv6": true,
+ "fixed-cidr-v6": "2001:db8:1::/64"
+ }' | sudo tee /etc/docker/daemon.json
+ sudo service docker restart
+ displayName: "Enable IPv6"
+ condition: ${{ parameters.managedAgent }}
+
+- script: ci/run_envoy_docker.sh 'ci/do_ci.sh ${{ parameters.ciTarget }}'
+ workingDirectory: $(Build.SourcesDirectory)
+ env:
+ ENVOY_BUILD_FILTER_EXAMPLE: ${{ parameters.envoyBuildFilterExample }}
+ ${{ each var in parameters.env }}:
+ ${{ var.key }}: ${{ var.value }}
+ displayName: "Run CI script ${{ parameters.ciTarget }}"
+
+- bash: |
+ echo "disk space at end of build:"
+ df -h
+
+ for hprof in $(find "$(Build.StagingDirectory)" -name "*heapdump.hprof"); do
+ echo
+ mkdir -p $(Build.StagingDirectory)/envoy/hprof
+ echo "Copying ${hprof}"
+ cp -a $hprof $(Build.StagingDirectory)/envoy/hprof
+ done
+
+ du -sh "$(Build.StagingDirectory)"/bazel_root/base/external
+ du -sh "$(Build.StagingDirectory)"/repository_cache
+
+ cp -a "$(Build.StagingDirectory)/bazel_root/base/server/jvm.out" $(Build.StagingDirectory)/envoy
+
+ if [[ "${{ parameters.artifactSuffix }}" == ".arm64" ]]; then
+ # Dump bazel-remote logging (only required for arm/self-hosted).
+ sudo systemctl status --no-pager bazel-remote > $(Build.StagingDirectory)/envoy/br.status
+ sudo journalctl --no-pager -xu bazel-remote > $(Build.StagingDirectory)/envoy/br.journal
+ fi
+ echo
+ du -ch "$(Build.StagingDirectory)" | grep -E "[0-9]{2,}M|[0-9]G"
+
+ # Cleanup offending files with unicode names
+ rm -rf $(Build.StagingDirectory)/bazel_root/base/external/go_sdk/test/fixedbugs
+ displayName: "Check disk space at end"
+ condition: not(canceled())
+
+- ${{ each step in parameters.stepsPost }}:
+ - ${{ each pair in step }}:
+ ${{ pair.key }}: ${{ pair.value }}
+
+- bash: |
+ if [[ -n "$GCP_SERVICE_ACCOUNT_KEY_PATH" && -e "$GCP_SERVICE_ACCOUNT_KEY_PATH" ]]; then
+ echo "Removed key: ${GCP_SERVICE_ACCOUNT_KEY_PATH}"
+ rm -rf "$GCP_SERVICE_ACCOUNT_KEY_PATH"
+ fi
+ condition: not(canceled())
+
+- script: |
+ set -e
+ sudo .azure-pipelines/docker/save_cache.sh "$(Build.StagingDirectory)" /mnt/cache/all true true
+ if id -u vsts &> /dev/null; then
+ sudo chown -R vsts:vsts /mnt/cache/all
+ else
+ sudo chown -R azure-pipelines:azure-pipelines /mnt/cache/all
+ fi
+
+ displayName: "Cache/save (${{ parameters.cacheName}})"
+ condition: and(succeeded(), ne('${{ parameters.cacheName }}', ''), ne(variables.CACHE_RESTORED, 'true'))
+
+- task: PublishTestResults@2
+ inputs:
+ testResultsFiles: "**/bazel-out/**/testlogs/**/test.xml"
+ testRunTitle: "${{ parameters.ciTarget }}"
+ searchFolder: $(Build.StagingDirectory)/bazel_root
+ timeoutInMinutes: 10
+ condition: eq(${{ parameters.publishTestResults }}, 'true')
+- task: PublishBuildArtifacts@1
+ inputs:
+ pathtoPublish: "$(Build.StagingDirectory)/envoy"
+ ${{ if eq(parameters.artifactName, '') }}:
+ artifactName: ${{ parameters.ciTarget }}
+ ${{ if ne(parameters.artifactName, '') }}:
+ artifactName: ${{ parameters.artifactName }}
+ timeoutInMinutes: 10
+ condition: eq(${{ parameters.publishEnvoy }}, 'true')
diff --git a/.azure-pipelines/docker/clean_docker.sh b/.azure-pipelines/docker/clean_docker.sh
new file mode 100755
index 0000000000000..cbad33a4ad579
--- /dev/null
+++ b/.azure-pipelines/docker/clean_docker.sh
@@ -0,0 +1,11 @@
+#!/bin/bash -e
+
+set -o pipefail
+
+echo "Stopping Docker ..."
+systemctl stop docker
+
+echo "Restarting Docker with empty /var/lib/docker ..."
+mv /var/lib/docker/ /var/lib/docker.old
+mkdir /var/lib/docker
+systemctl start docker
diff --git a/.azure-pipelines/docker/create_cache.sh b/.azure-pipelines/docker/create_cache.sh
new file mode 100755
index 0000000000000..e9d9f55b071c7
--- /dev/null
+++ b/.azure-pipelines/docker/create_cache.sh
@@ -0,0 +1,29 @@
+#!/bin/bash -e
+
+set -o pipefail
+
+CACHE_TARBALL="${1}"
+ROOT_DIR="${2}"
+shift 2
+
+echo "Exporting ${*} -> ${CACHE_TARBALL}"
+
+CACHE_PATH="$(dirname "$CACHE_TARBALL")"
+mkdir -p "$CACHE_PATH"
+
+CACHE_ARGS=()
+for path in "$@"; do
+ if [[ "$ROOT_DIR" == "." ]]; then
+ total="$(du -sh "$path" | cut -f1)"
+ echo "Adding cache dir (${path}): ${total}"
+ CACHE_ARGS+=(-C "$path" .)
+ else
+ total="$(du -sh "${ROOT_DIR}/$path" | cut -f1)"
+ echo "Adding cache dir (${ROOT_DIR}/${path}): ${total}"
+ CACHE_ARGS+=(-C "$ROOT_DIR" "$path")
+ fi
+done
+
+tar cf - "${CACHE_ARGS[@]}" | zstd - -q -T0 -o "$CACHE_TARBALL"
+echo "Cache tarball created: ${CACHE_TARBALL}"
+ls -lh "$CACHE_TARBALL"
diff --git a/.azure-pipelines/docker/load_cache.sh b/.azure-pipelines/docker/load_cache.sh
deleted file mode 100755
index 78c6cd8e5d99b..0000000000000
--- a/.azure-pipelines/docker/load_cache.sh
+++ /dev/null
@@ -1,59 +0,0 @@
-#!/bin/bash -e
-
-DOCKER_CACHE_PATH="$1"
-DOCKER_BIND_PATH="$2"
-
-if [[ -z "$DOCKER_CACHE_PATH" ]]; then
- echo "load_docker_cache called without path arg" >&2
- exit 1
-fi
-
-
-DOCKER_CACHE_TARBALL="${DOCKER_CACHE_PATH}/docker.tar.zst"
-
-echo "Stopping Docker daemon ..."
-systemctl stop docker docker.socket
-
-mv /var/lib/docker/ /var/lib/docker.old
-mkdir -p /var/lib/docker
-
-if id -u vsts &> /dev/null && [[ -n "$DOCKER_BIND_PATH" ]]; then
- # use separate disk on windows hosted
- echo "Binding docker directory ${DOCKER_BIND_PATH} -> /var/lib/docker ..."
- mkdir -p "$DOCKER_BIND_PATH"
- mount -o bind "$DOCKER_BIND_PATH" /var/lib/docker
-elif ! id -u vsts &> /dev/null; then
- echo "Mounting tmpfs directory -> /var/lib/docker ..."
- # Use a ramdisk to load docker (avoids Docker slow start on big disk)
- mount -t tmpfs none /var/lib/docker
-else
- # If we are on a managed host but the bind path is not set then we need to remove
- # the old /var/lib/docker to free some space (maybe)
- DOCKER_REMOVE_EXISTING=1
-fi
-
-if [[ -e "${DOCKER_CACHE_TARBALL}" ]]; then
- echo "Extracting docker cache ${DOCKER_CACHE_TARBALL} -> /var/lib/docker ..."
- zstd --stdout -d "$DOCKER_CACHE_TARBALL" | tar -xf - -C /var/lib/docker
- touch /tmp/DOCKER_CACHE_RESTORED
-else
- echo "No cache to restore, starting Docker with no data"
-fi
-
-echo "Starting Docker daemon ..."
-systemctl start docker
-
-if mountpoint -q "${DOCKER_CACHE_PATH}"; then
- echo "Unmount cache tmp ${DOCKER_CACHE_PATH} ..."
- umount "${DOCKER_CACHE_PATH}"
-else
- echo "Remove cache tmp ${DOCKER_CACHE_PATH} ..."
- rm -rf "${DOCKER_CACHE_PATH}"
-fi
-docker images
-df -h
-
-# this takes time but may be desirable in some situations
-if [[ -n "$DOCKER_REMOVE_EXISTING" ]]; then
- rm -rf /var/lib/docker.old
-fi
diff --git a/.azure-pipelines/docker/load_caches.sh b/.azure-pipelines/docker/load_caches.sh
new file mode 100755
index 0000000000000..73c03425cfd53
--- /dev/null
+++ b/.azure-pipelines/docker/load_caches.sh
@@ -0,0 +1,100 @@
+#!/bin/bash -e
+
+ENVOY_DOCKER_BUILD_DIR="$1"
+CACHE_PATH="$2"
+DOCKER_BIND_PATH="$3"
+DOCKER_NO_TMPFS="$4"
+
+
+if [[ -z "$CACHE_PATH" ]]; then
+ echo "load_caches called without path arg" >&2
+ exit 1
+fi
+
+if [[ -e "${CACHE_PATH}/all" ]]; then
+ DOCKER_CACHE_PATH="${CACHE_PATH}/all"
+ BAZEL_CACHE_PATH="${CACHE_PATH}/all"
+else
+ DOCKER_CACHE_PATH="${CACHE_PATH}/docker"
+ BAZEL_CACHE_PATH="${CACHE_PATH}/bazel"
+fi
+
+DOCKER_CACHE_TARBALL="${DOCKER_CACHE_PATH}/docker.tar.zst"
+BAZEL_CACHE_TARBALL="${BAZEL_CACHE_PATH}/bazel.tar.zst"
+
+
+remount_docker () {
+ echo "Stopping Docker daemon ..."
+ systemctl stop docker docker.socket
+ mv /var/lib/docker/ /var/lib/docker.old
+ mkdir -p /var/lib/docker
+ if id -u vsts &> /dev/null && [[ -n "$DOCKER_BIND_PATH" ]]; then
+ # use separate disk on windows hosted
+ echo "Binding docker directory ${DOCKER_BIND_PATH} -> /var/lib/docker ..."
+ mkdir -p "$DOCKER_BIND_PATH"
+ mount -o bind "$DOCKER_BIND_PATH" /var/lib/docker
+ elif ! id -u vsts &> /dev/null && [[ -z "$DOCKER_NO_TMPFS" ]]; then
+ echo "Mounting tmpfs directory -> /var/lib/docker ..."
+ # Use a ramdisk to load docker (avoids Docker slow start on big disk)
+ mount -t tmpfs none /var/lib/docker
+ else
+ # If we are on a managed/resource-constrained host but the bind path is not set then we need to remove
+ # the old /var/lib/docker to free some space (maybe)
+ DOCKER_REMOVE_EXISTING=1
+ fi
+}
+
+extract_docker () {
+ if [[ -e "${DOCKER_CACHE_TARBALL}" ]]; then
+ echo "Extracting docker cache ${DOCKER_CACHE_TARBALL} -> /var/lib/docker ..."
+ zstd --stdout -d "$DOCKER_CACHE_TARBALL" | tar --warning=no-timestamp -xf - -C /var/lib/docker
+ else
+ echo "No Docker cache to restore, starting Docker with no data"
+ fi
+}
+
+extract_bazel () {
+ if [[ -e "${BAZEL_CACHE_TARBALL}" ]]; then
+ echo "Extracting bazel cache ${BAZEL_CACHE_TARBALL} -> ${ENVOY_DOCKER_BUILD_DIR} ..."
+ zstd --stdout -d "$BAZEL_CACHE_TARBALL" | tar --warning=no-timestamp -xf - -C "${ENVOY_DOCKER_BUILD_DIR}"
+ if id -u vsts &> /dev/null; then
+ sudo chown -R vsts:vsts "${ENVOY_DOCKER_BUILD_DIR}"
+ else
+ sudo chown -R azure-pipelines:azure-pipelines "${ENVOY_DOCKER_BUILD_DIR}"
+ fi
+ else
+ echo "No bazel cache to restore, starting bazel with no data"
+ fi
+}
+
+cleanup_cache () {
+ if mountpoint -q "${CACHE_PATH}"; then
+ echo "Unmount cache tmp ${CACHE_PATH} ..."
+ umount "${CACHE_PATH}"
+ else
+ echo "Remove cache tmp ${CACHE_PATH} ..."
+ rm -rf "${CACHE_PATH}"
+ fi
+
+ # this takes time but may be desirable in some situations
+ if [[ -n "$DOCKER_REMOVE_EXISTING" ]]; then
+ rm -rf /var/lib/docker.old
+ fi
+}
+
+restart_docker () {
+ echo "Starting Docker daemon ..."
+ systemctl start docker
+ docker images
+ mkdir -p "${ENVOY_DOCKER_BUILD_DIR}"
+}
+
+df -h
+
+remount_docker
+extract_bazel
+extract_docker
+restart_docker
+cleanup_cache
+
+df -h
diff --git a/.azure-pipelines/docker/prepare_cache.sh b/.azure-pipelines/docker/prepare_cache.sh
index fe417d5f5e419..ff3a07ffbc934 100755
--- a/.azure-pipelines/docker/prepare_cache.sh
+++ b/.azure-pipelines/docker/prepare_cache.sh
@@ -4,7 +4,6 @@ DOCKER_CACHE_PATH="$1"
NO_MOUNT_TMPFS="${2:-}"
DOCKER_CACHE_OWNERSHIP="vsts:vsts"
-
if [[ -z "$DOCKER_CACHE_PATH" ]]; then
echo "prepare_docker_cache called without path arg" >&2
exit 1
@@ -14,10 +13,20 @@ if ! id -u vsts &> /dev/null; then
DOCKER_CACHE_OWNERSHIP=azure-pipelines
fi
+tmpfs_size () {
+ # Make this 2/3 of total memory
+ total_mem="$(grep MemTotal /proc/meminfo | cut -d' ' -f2- | xargs | cut -d' ' -f1)"
+ bc <<< "$total_mem"*2/3*1024
+}
+
+TMPFS_SIZE="$(tmpfs_size)"
+
echo "Creating cache directory (${DOCKER_CACHE_PATH}) ..."
mkdir -p "${DOCKER_CACHE_PATH}"
if [[ -z "$NO_MOUNT_TMPFS" ]]; then
echo "Mount tmpfs directory: ${DOCKER_CACHE_PATH}"
- mount -t tmpfs none "$DOCKER_CACHE_PATH"
+ mount -o size="$TMPFS_SIZE" -t tmpfs none "$DOCKER_CACHE_PATH"
fi
+mkdir -p "${DOCKER_CACHE_PATH}/docker"
+mkdir -p "${DOCKER_CACHE_PATH}/bazel"
chown -R "$DOCKER_CACHE_OWNERSHIP" "${DOCKER_CACHE_PATH}"
diff --git a/.azure-pipelines/docker/prime_cache.sh b/.azure-pipelines/docker/prime_cache.sh
index d5bef3388a44c..368c9a8aa319d 100755
--- a/.azure-pipelines/docker/prime_cache.sh
+++ b/.azure-pipelines/docker/prime_cache.sh
@@ -1,40 +1,76 @@
#!/bin/bash -e
-DOCKER_CACHE_PATH="$1"
-DOCKER_CACHE_ARCH="$2"
+ENVOY_DOCKER_BUILD_DIR="$1"
+CACHE_PATH="$2"
+CACHE_ARCH="$3"
-if [[ -z "$DOCKER_CACHE_PATH" ]]; then
+echo "Docker restored: $DOCKER_RESTORED"
+echo "Bazel restored: $BAZEL_RESTORED"
+
+if [[ -z "$CACHE_PATH" ]]; then
echo "prime_docker_cache called without path arg" >&2
exit 1
fi
-if [[ "$DOCKER_CACHE_ARCH" == ".arm64" ]]; then
- DOCKER_CACHE_ARCH=linux/arm64
+if [[ "$CACHE_ARCH" == ".arm64" ]]; then
+ CACHE_ARCH=linux/arm64
else
- DOCKER_CACHE_ARCH=linux/amd64
+ CACHE_ARCH=linux/amd64
fi
-DOCKER_CACHE_TARBALL="${DOCKER_CACHE_PATH}/docker.tar.zst"
-
-echo "Stopping Docker ..."
-systemctl stop docker
+DOCKER_CACHE_TARBALL="${CACHE_PATH}/docker/docker.tar.zst"
+BAZEL_CACHE_TARBALL="${CACHE_PATH}/bazel/bazel.tar.zst"
+BAZEL_PATH=/tmp/envoy-docker-build
-echo "Restarting Docker with empty /var/lib/docker ..."
-mv /var/lib/docker/ /var/lib/docker.old
-mkdir /var/lib/docker
-systemctl start docker
+echo
+echo "================ Load caches ==================="
+if [[ "$DOCKER_RESTORED" == "true" ]] || [[ "$BAZEL_RESTORED" == "true" ]]; then
+ sudo ./.azure-pipelines/docker/load_caches.sh "$ENVOY_DOCKER_BUILD_DIR" "$CACHE_PATH" "" true
+else
+ sudo ./.azure-pipelines/docker/clean_docker.sh
+ echo "No caches to restore"
+fi
+echo "==================================================="
+echo
-BUILD_IMAGE=$(head -n1 .devcontainer/Dockerfile | cut -d: -f2)
+echo
+echo "================ Docker fetch ======================"
+if [[ "$DOCKER_RESTORED" != "true" ]]; then
+ echo "Fetching Docker"
+ ./ci/run_envoy_docker.sh uname -a
+ docker images
+else
+ echo "Not fetching Docker as it was restored"
+fi
+echo "==================================================="
+echo
-echo "Pulling build image for ${DOCKER_CACHE_ARCH} (${BUILD_IMAGE}) ..."
-docker pull -q --platform "${DOCKER_CACHE_ARCH}" "envoyproxy/envoy-build-ubuntu:${BUILD_IMAGE}"
+echo
+echo "================ Bazel fetch ======================"
+# Fetch bazel dependencies
+if [[ "$BAZEL_RESTORED" != "true" ]]; then
+ echo "Fetching bazel"
+ ./ci/run_envoy_docker.sh './ci/do_ci.sh fetch'
+else
+ echo "Not fetching bazel as it was restored"
+fi
+echo "==================================================="
+echo
-echo "Stopping docker"
-systemctl stop docker
+df -h
-echo "Exporting /var/lib/docker -> ${DOCKER_CACHE_PATH}"
-mkdir -p "$DOCKER_CACHE_PATH"
-tar cf - -C /var/lib/docker . | zstd - -T0 -o "$DOCKER_CACHE_TARBALL"
+echo
+echo "================ Save caches ======================"
+# Save the caches -> tarballs
+if [[ "$DOCKER_RESTORED" != "true" ]]; then
+ echo "Stopping docker"
+ sudo systemctl stop docker docker.socket
+ sudo ./.azure-pipelines/docker/create_cache.sh "${DOCKER_CACHE_TARBALL}" . /var/lib/docker
+fi
-echo "Docker cache tarball created: ${DOCKER_CACHE_TARBALL}"
-ls -lh "$DOCKER_CACHE_TARBALL"
+if [[ "$BAZEL_RESTORED" != "true" ]]; then
+ sudo ./.azure-pipelines/docker/create_cache.sh "${BAZEL_CACHE_TARBALL}" . "${BAZEL_PATH}"
+fi
+sudo chmod o+r -R "${CACHE_PATH}"
+echo "==================================================="
+echo
diff --git a/.azure-pipelines/docker/save_cache.sh b/.azure-pipelines/docker/save_cache.sh
index 85f912cbad2d6..f80f28d9f56be 100755
--- a/.azure-pipelines/docker/save_cache.sh
+++ b/.azure-pipelines/docker/save_cache.sh
@@ -1,35 +1,43 @@
#!/bin/bash -e
-DOCKER_CACHE_PATH="$1"
-NO_MOUNT_TMPFS="${2:-}"
+set -o pipefail
+ENVOY_DOCKER_BUILD_DIR="$1"
+CACHE_PATH="$2"
+NO_MOUNT_TMPFS="${3:-}"
+CACHE_BAZEL="${4:-}"
-if [[ -z "$DOCKER_CACHE_PATH" ]]; then
+if [[ -z "$CACHE_PATH" ]]; then
echo "prime_docker_cache called without path arg" >&2
exit 1
fi
-if [[ -e /tmp/DOCKER_CACHE_RESTORED ]]; then
- echo "Not saving cache as it was restored"
- exit 0
-fi
-
-DOCKER_CACHE_TARBALL="${DOCKER_CACHE_PATH}/docker.tar.zst"
+DOCKER_CACHE_TARBALL="${CACHE_PATH}/docker.tar.zst"
+BAZEL_CACHE_TARBALL="${CACHE_PATH}/bazel.tar.zst"
docker images
echo "Stopping Docker ..."
-systemctl stop docker
+systemctl stop docker docker.socket
-echo "Creating directory to save tarball: ${DOCKER_CACHE_PATH}"
-mkdir -p "$DOCKER_CACHE_PATH"
+echo "Creating directory to save tarball: ${CACHE_PATH}"
+mkdir -p "$CACHE_PATH"
if [[ -z "$NO_MOUNT_TMPFS" ]]; then
- echo "Mount tmpfs directory: ${DOCKER_CACHE_PATH}"
- mount -t tmpfs none "$DOCKER_CACHE_PATH"
+ echo "Mount tmpfs directory: ${CACHE_PATH}"
+ mount -t tmpfs none "$CACHE_PATH"
fi
-echo "Creating tarball: /var/lib/docker -> ${DOCKER_CACHE_TARBALL}"
-tar cf - -C /var/lib/docker . | zstd - -T0 -o "$DOCKER_CACHE_TARBALL"
-
-echo "Docker cache tarball created: ${DOCKER_CACHE_TARBALL}"
-ls -lh "$DOCKER_CACHE_TARBALL"
+./.azure-pipelines/docker/create_cache.sh \
+ "${DOCKER_CACHE_TARBALL}" \
+ . \
+ /var/lib/docker
+
+if [[ "$CACHE_BAZEL" == "true" ]]; then
+ ./.azure-pipelines/docker/create_cache.sh \
+ "${BAZEL_CACHE_TARBALL}" \
+ "${ENVOY_DOCKER_BUILD_DIR}" \
+ .cache \
+ bazel_root/install \
+ bazel_root/base/external \
+ repository_cache
+fi
diff --git a/.azure-pipelines/env.yml b/.azure-pipelines/env.yml
index ed70d498c8ccb..c511ebc67a7b1 100644
--- a/.azure-pipelines/env.yml
+++ b/.azure-pipelines/env.yml
@@ -42,19 +42,16 @@ jobs:
steps:
- template: cached.yml
parameters:
- version: "$(cacheKeyBuildImage)"
prime: true
- job: cache_arm
dependsOn: []
displayName: Cache (arm64)
- pool:
- vmImage: $(agentUbuntu)
+ pool: envoy-arm-small
steps:
- template: cached.yml
parameters:
prime: true
arch: .arm64
- version: "$(cacheKeyBuildImage)"
- job: repo
dependsOn: []
@@ -151,6 +148,8 @@ jobs:
RUN_CHECKS=true
RUN_DOCKER=true
RUN_PACKAGING=true
+ RUN_RELEASE_TESTS=true
+
if [[ "$(changed.mobileOnly)" == true || "$(changed.docsOnly)" == true ]]; then
RUN_BUILD=false
RUN_DOCKER=false
@@ -159,10 +158,15 @@ jobs:
RUN_CHECKS=false
RUN_PACKAGING=false
fi
+ if [[ "$ISSTABLEBRANCH" == True && -n "$POSTSUBMIT" && "$(state.isDev)" == false ]]; then
+ RUN_RELEASE_TESTS=false
+ fi
+
echo "##vso[task.setvariable variable=build;isoutput=true]${RUN_BUILD}"
echo "##vso[task.setvariable variable=checks;isoutput=true]${RUN_CHECKS}"
echo "##vso[task.setvariable variable=docker;isoutput=true]${RUN_DOCKER}"
echo "##vso[task.setvariable variable=packaging;isoutput=true]${RUN_PACKAGING}"
+ echo "##vso[task.setvariable variable=releaseTests;isoutput=true]${RUN_RELEASE_TESTS}"
displayName: "Decide what to run"
workingDirectory: $(Build.SourcesDirectory)
@@ -174,32 +178,18 @@ jobs:
PUBLISH_GITHUB_RELEASE=$(run.packaging)
PUBLISH_DOCKERHUB=false
- PUBLISH_DOCS=false
- PUBLISH_DOCS_LATEST=false
- PUBLISH_DOCS_RELEASE=false
if [[ "$ISSTABLEBRANCH" == True && -n "$POSTSUBMIT" && "$NOSYNC" != true ]]; then
- # Build docs for publishing either latest or a release build
- PUBLISH_DOCS=true
# main
if [[ "$ISMAIN" == True ]]; then
# Update the Dockerhub README
PUBLISH_DOCKERHUB=true
- if [[ "$(state.isDev)" == true ]]; then
- # Postsubmit on `main` trigger rebuild of latest docs
- PUBLISH_DOCS_LATEST=true
- fi
# Not main, and not -dev
elif [[ "$(state.isDev)" == false ]]; then
if [[ "$(state.versionPatch)" -eq 0 ]]; then
# A just-forked branch
PUBLISH_GITHUB_RELEASE=false
fi
- # A stable release, publish docs to the release
- PUBLISH_DOCS_RELEASE=true
- else
- # Postsubmit for non-main/release, skip publishing docs in this case
- PUBLISH_DOCS=false
fi
fi
@@ -210,9 +200,6 @@ jobs:
echo "##vso[task.setvariable variable=githubRelease;isoutput=true]${PUBLISH_GITHUB_RELEASE}"
echo "##vso[task.setvariable variable=dockerhub;isoutput=true]${PUBLISH_DOCKERHUB}"
- echo "##vso[task.setvariable variable=docs;isoutput=true]${PUBLISH_DOCS}"
- echo "##vso[task.setvariable variable=docsLatest;isoutput=true]${PUBLISH_DOCS_LATEST}"
- echo "##vso[task.setvariable variable=docsRelease;isoutput=true]${PUBLISH_DOCS_RELEASE}"
displayName: "Decide what to publish"
workingDirectory: $(Build.SourcesDirectory)
@@ -231,12 +218,10 @@ jobs:
echo "env.outputs['run.build']: $(run.build)"
echo "env.outputs['run.checks']: $(run.checks)"
echo "env.outputs['run.packaging']: $(run.packaging)"
+ echo "env.outputs['run.releaseTests']: $(run.releaseTests)"
echo
echo "env.outputs['publish.githubRelease']: $(publish.githubRelease)"
echo "env.outputs['publish.dockerhub]: $(publish.dockerhub)"
- echo "env.outputs['publish.docs]: $(publish.docs)"
- echo "env.outputs['publish.docsLatest]: $(publish.docsLatest)"
- echo "env.outputs['publish.docsRelease]: $(publish.docsRelease)"
displayName: "Print build environment"
diff --git a/.azure-pipelines/pipelines.yml b/.azure-pipelines/pipelines.yml
index fdb87e4631f16..4a09a485ef71f 100644
--- a/.azure-pipelines/pipelines.yml
+++ b/.azure-pipelines/pipelines.yml
@@ -44,14 +44,17 @@ variables:
## Variable settings
# Caches (tip: append a version suffix while testing caches)
-- name: cacheKeyBuildImage
- value: v0
-- name: cacheKeyDockerBuild
- value: v0
+- name: cacheKeyVersion
+ value: v3
- name: cacheKeyBazel
- value: v0
-- name: cacheKeyBazelFiles
- value: './WORKSPACE | **/*.bzl, !mobile/**, !envoy-docs/**'
+ value: '.bazelversion | ./WORKSPACE | **/*.bzl, !mobile/**, !envoy-docs/**'
+- name: cacheKeyDocker
+ value: ".bazelrc"
+
+- name: pathCacheTemp
+ value: /mnt/cache
+- name: pathDockerBind
+ value: /mnt/docker
- name: authGithubSSHKeyPublic
value: "github.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCj7ndNxQowgcQnjshcLrqPEiiphnt+VTTvDP6mHBL9j1aNUkY4Ue1gvwnGLVlOhGeYrnZaMgRK6+PKCUXaDbC7qtbW8gIkhL7aGCsOr/C56SJMy/BCZfxd1nWzAOxSDPgVsmerOBYfNqltV9/hWCqBywINIR+5dIg6JTJ72pcEpEjcYgXkE2YEFXV1JHnsKgbLWNlhScqb2UmyRkQyytRLtL+38TGxkxCflmO+5Z8CSSNY7GidjMIZ7Q4zMjA2n1nGrlTDkzwDCsw+wqFPGQA179cnfGWOWRVruj16z6XyvxvjJwbz0wQZ75XK5tKSb7FNyeIEs4TT4jk+S4dhPeAUC5y+bDYirYgM4GC7uEnztnZyaVWQ7B381AK4Qdrwt51ZqExKbQpTUNn+EjqoTwvqNj4kqx5QUCI0ThS/YkOxJCXmPUWZbhjpCg56i+2aB6CmK2JGhn57K5mj0MNdBXA4/WnwH6XoPWJzK5Nyu2zB3nAZp+S5hpQs+p1vN1/wsjk="
@@ -62,6 +65,9 @@ stages:
# Presubmit/default
- ${{ if eq(variables.pipelineDefault, true) }}:
- template: stages.yml
+ parameters:
+ buildStageDeps:
+ - env
# Scheduled run anywhere
- ${{ if eq(variables.pipelineScheduled, true) }}:
@@ -77,11 +83,6 @@ stages:
- env
checkStageDeps:
- env
- concurrencyChecks: 10
- macBuildStageDeps:
- - env
- windowsBuildStageDeps:
- - env
# Postsubmit main/release branches
- ${{ if eq(variables.pipelinePostsubmit, true) }}:
@@ -94,8 +95,3 @@ stages:
- env
checkStageDeps:
- env
- concurrencyChecks: 10
- macBuildStageDeps:
- - env
- windowsBuildStageDeps:
- - env
diff --git a/.azure-pipelines/stage/checks.yml b/.azure-pipelines/stage/checks.yml
index 54cb0c899d33b..8c03249e227b3 100644
--- a/.azure-pipelines/stage/checks.yml
+++ b/.azure-pipelines/stage/checks.yml
@@ -42,100 +42,72 @@ jobs:
maxParallel: ${{ parameters.concurrencyChecks }}
matrix:
# These are ordered by most time-consuming first.
+ coverage:
+ CI_TARGET: "coverage"
+ fuzz_coverage:
+ CI_TARGET: "fuzz_coverage"
compile_time_options:
- CI_TARGET: "bazel.compile_time_options"
+ CI_TARGET: "compile_time_options"
ENVOY_FILTER_EXAMPLE: true
tsan:
- CI_TARGET: "bazel.tsan"
+ CI_TARGET: "tsan"
asan:
- CI_TARGET: "bazel.asan"
+ CI_TARGET: "asan"
ENVOY_FILTER_EXAMPLE: true
# Disabled due to https://github.com/envoyproxy/envoy/pull/18218
# api_compat:
- # CI_TARGET: "bazel.api_compat"
+ # CI_TARGET: "api_compat"
gcc:
- CI_TARGET: "bazel.gcc"
+ CI_TARGET: "gcc"
msan:
- CI_TARGET: "bazel.msan"
+ CI_TARGET: "msan"
ENVOY_FILTER_EXAMPLE: true
#
# Temporarily disabled to facilitate release CI, should be resolved
# as part of https://github.com/envoyproxy/envoy/issues/28566
#
# clang_tidy:
- # CI_TARGET: "bazel.clang_tidy"
+ # CI_TARGET: "clang_tidy"
# REPO_FETCH_DEPTH: 0
# REPO_FETCH_TAGS: true
# PUBLISH_TEST_RESULTS: false
# PUBLISH_ENVOY: false
api:
- CI_TARGET: "bazel.api"
+ CI_TARGET: "api"
timeoutInMinutes: 180
- pool:
- vmImage: $(agentUbuntu)
+ pool: envoy-x64-small
steps:
- - template: ../bazel.yml
+ - template: ../ci.yml
parameters:
ciTarget: $(CI_TARGET)
+ cacheName: $(CI_TARGET)
envoyBuildFilterExample: $(ENVOY_FILTER_EXAMPLE)
cacheTestResults: ${{ parameters.cacheTestResults }}
+ managedAgent: false
repoFetchDepth: $(REPO_FETCH_DEPTH)
repoFetchTags: $(REPO_FETCH_TAGS)
publishTestResults: variables.PUBLISH_TEST_RESULTS
publishEnvoy: variables.PUBLISH_ENVOY
+ tmpfsDockerDisabled: true
stepsPost:
-
# TODO(phlax): consolidate "fix" paths/jobs
- task: PublishBuildArtifacts@1
inputs:
pathtoPublish: "$(Build.StagingDirectory)/tmp/lint-fixes"
artifactName: "$(CI_TARGET).fixes"
timeoutInMinutes: 10
- condition: and(failed(), eq(variables['CI_TARGET'], 'bazel.clang_tidy'))
-
-- job: coverage
- displayName: "Linux x64"
- dependsOn: []
- condition: |
- and(not(canceled()),
- eq(${{ parameters.runChecks }}, 'true'))
- timeoutInMinutes: 300
- pool: "envoy-x64-large"
- strategy:
- maxParallel: 2
- matrix:
- coverage:
- CI_TARGET: "coverage"
- fuzz_coverage:
- CI_TARGET: "fuzz_coverage"
- steps:
- - template: ../bazel.yml
- parameters:
- managedAgent: false
- ciTarget: bazel.$(CI_TARGET)
- rbe: false
- # /tmp/sandbox_base is a tmpfs in CI environment to optimize large I/O for coverage traces
- bazelBuildExtraOptions: "--define=no_debug_info=1 --linkopt=-Wl,-s --test_env=ENVOY_IP_TEST_VERSIONS=v4only --sandbox_base=/tmp/sandbox_base"
- cacheTestResults: ${{ parameters.cacheTestResults }}
-
- - script: ci/run_envoy_docker.sh 'ci/do_ci.sh $(CI_TARGET)-upload'
- displayName: "Upload $(CI_TARGET) Report to GCS"
- env:
- ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory)
- GCP_SERVICE_ACCOUNT_KEY: ${{ parameters.authGCP }}
- GCS_ARTIFACT_BUCKET: ${{ parameters.bucketGCP }}
- BAZEL_BUILD_EXTRA_OPTIONS: "--config=ci"
- BAZEL_REMOTE_CACHE: $(LocalBuildCache)
- ${{ if eq(variables['Build.Reason'], 'PullRequest') }}:
- BAZEL_REMOTE_INSTANCE_BRANCH: "$(System.PullRequest.TargetBranch)"
- ${{ if ne(variables['Build.Reason'], 'PullRequest') }}:
- BAZEL_REMOTE_INSTANCE_BRANCH: "$(Build.SourceBranchName)"
- condition: not(canceled())
+ condition: and(failed(), eq(variables['CI_TARGET'], 'clang_tidy'))
+ - script: ci/run_envoy_docker.sh 'ci/do_ci.sh $(CI_TARGET)-upload'
+ displayName: "Upload $(CI_TARGET) Report to GCS"
+ condition: and(not(canceled()), or(eq(variables['CI_TARGET'], 'coverage'), eq(variables['CI_TARGET'], 'fuzz_coverage')))
+ env:
+ GCS_ARTIFACT_BUCKET: ${{ parameters.bucketGCP }}
- job: complete
displayName: "Checks complete"
- dependsOn: ["bazel", "coverage"]
- pool: x64-nano
+ dependsOn: ["bazel"]
+ pool:
+ vmImage: $(agentUbuntu)
# This condition ensures that this (required) check passes if all of
# the preceding checks either pass or are skipped
# adapted from:
@@ -143,8 +115,7 @@ jobs:
condition: |
and(
eq(variables['Build.Reason'], 'PullRequest'),
- in(dependencies.bazel.result, 'Succeeded', 'SucceededWithIssues', 'Skipped'),
- in(dependencies.coverage.result, 'Succeeded', 'SucceededWithIssues', 'Skipped'))
+ in(dependencies.bazel.result, 'Succeeded', 'SucceededWithIssues', 'Skipped'))
steps:
- checkout: none
- bash: |
diff --git a/.azure-pipelines/stage/linux.yml b/.azure-pipelines/stage/linux.yml
index 80bfe1a0f549e..04ce08fb03899 100644
--- a/.azure-pipelines/stage/linux.yml
+++ b/.azure-pipelines/stage/linux.yml
@@ -6,11 +6,15 @@ parameters:
- name: pool
displayName: "Agent pool"
type: string
- default: envoy-x64-large
+ default: envoy-x64-small
- name: artifactSuffix
displayName: "Artifact suffix"
type: string
default:
+- name: runTests
+ displayName: "Run release tests"
+ type: string
+ default: true
- name: rbe
displayName: "Use RBE"
type: boolean
@@ -19,6 +23,17 @@ parameters:
displayName: "Build timeout"
type: number
default: 120
+- name: bazelBuildExtraOptions
+ type: string
+ default: ""
+
+- name: managedAgent
+ type: boolean
+ default: false
+- name: tmpfsDockerDisabled
+ type: string
+ default: ''
+
- name: runBuild
displayName: "Run build"
@@ -34,20 +49,33 @@ jobs:
timeoutInMinutes: ${{ parameters.timeoutBuild }}
pool: ${{ parameters.pool }}
steps:
- - template: ../bazel.yml
+ - bash: |
+ if [[ "${{ parameters.runTests }}" == "false" ]]; then
+ CI_TARGET="release.server_only"
+ else
+ CI_TARGET="release"
+ fi
+ echo "${CI_TARGET}"
+ echo "##vso[task.setvariable variable=value;isoutput=true]${CI_TARGET}"
+ name: target
+ - template: ../ci.yml
parameters:
- managedAgent: false
- ciTarget: bazel.release
- bazelBuildExtraOptions: "--sandbox_base=/tmp/sandbox_base"
+ artifactName: release
+ managedAgent: ${{ parameters.managedAgent }}
+ ciTarget: $(target.value)
+ cacheName: "release"
+ bazelBuildExtraOptions: ${{ parameters.bazelBuildExtraOptions }}
cacheTestResults: ${{ parameters.cacheTestResults }}
cacheVersion: $(cacheKeyBazel)
artifactSuffix: ${{ parameters.artifactSuffix }}
rbe: ${{ parameters.rbe }}
+ tmpfsDockerDisabled: ${{ parameters.tmpfsDockerDisabled }}
- job: released
displayName: Complete
dependsOn: ["release"]
- pool: x64-nano
+ pool:
+ vmImage: $(agentUbuntu)
# This condition ensures that this (required) job passes if all of
# the preceeding jobs either pass or are skipped
# adapted from:
diff --git a/.azure-pipelines/stage/macos.yml b/.azure-pipelines/stage/macos.yml
deleted file mode 100644
index d049e140eacd3..0000000000000
--- a/.azure-pipelines/stage/macos.yml
+++ /dev/null
@@ -1,55 +0,0 @@
-
-parameters:
-
-# Auth
-- name: authGCP
- type: string
- default: ""
-
-- name: runBuild
- displayName: "Run build"
- type: string
- default: true
-
-jobs:
-- job: test
- displayName: Build and test
- condition: |
- and(not(canceled()),
- eq(${{ parameters.runBuild }}, 'true'))
- timeoutInMinutes: 180
- pool:
- vmImage: "macos-11"
- steps:
- - script: ./ci/mac_ci_setup.sh
- displayName: "Install dependencies"
-
- - script: ./ci/mac_ci_steps.sh
- displayName: "Run Mac CI"
- env:
- BAZEL_BUILD_EXTRA_OPTIONS: "--remote_download_toplevel --flaky_test_attempts=2"
- BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com
- BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance
- GCP_SERVICE_ACCOUNT_KEY: ${{ parameters.authGCP }}
- ENVOY_RBE: 1
-
- - task: PublishTestResults@2
- inputs:
- testResultsFiles: "**/bazel-testlogs/**/test.xml"
- testRunTitle: "macOS"
- timeoutInMinutes: 10
- condition: not(canceled())
-
-- job: tested
- displayName: Complete
- dependsOn: ["test"]
- pool: x64-nano
- # This condition ensures that this (required) job passes if all of
- # the preceeding jobs either pass or are skipped
- # adapted from:
- # https://learn.microsoft.com/en-us/azure/devops/pipelines/process/expressions?view=azure-devops#job-to-job-dependencies-within-one-stage
- condition: and(eq(variables['Build.Reason'], 'PullRequest'), in(dependencies.test.result, 'Succeeded', 'SucceededWithIssues', 'Skipped'))
- steps:
- - checkout: none
- - bash: |
- echo "macos tested"
diff --git a/.azure-pipelines/stage/prechecks.yml b/.azure-pipelines/stage/prechecks.yml
index 6e7c82e577d7e..b699a960eacec 100644
--- a/.azure-pipelines/stage/prechecks.yml
+++ b/.azure-pipelines/stage/prechecks.yml
@@ -25,11 +25,25 @@ parameters:
type: string
default: ""
+# Timeout/s
+- name: timeoutPrechecks
+ type: number
+ # Building the rst from protos can take a while even with RBE if there is
+ # a lot of change - eg protobuf changed, or a primitve proto changed.
+ default: 40
+
+- name: runPrechecks
+ displayName: "Run prechecks"
+ type: string
+ default: true
jobs:
- job: prechecks
displayName: Precheck
- timeoutInMinutes: 30
+ timeoutInMinutes: ${{ parameters.timeoutPrechecks }}
+ condition: |
+ and(not(canceled()),
+ eq(${{ parameters.runPrechecks }}, 'true'))
pool:
vmImage: $(agentUbuntu)
variables:
@@ -41,12 +55,15 @@ jobs:
CI_TARGET: "format"
protobuf:
CI_TARGET: "check_and_fix_proto_format"
- publishing:
- CI_TARGET: docs
+ ${{ if eq(variables['Build.Reason'], 'PullRequest') }}:
+ publishing:
+ CI_TARGET: docs
steps:
- - template: ../bazel.yml
+ - template: ../ci.yml
parameters:
+ bazelBuildExtraOptions: --config=docs-ci
ciTarget: $(CI_TARGET)
+ cacheName: $(CI_TARGET)
cacheTestResults: ${{ parameters.cacheTestResults }}
cacheVersion: $(cacheKeyBazel)
publishEnvoy: false
@@ -82,7 +99,7 @@ jobs:
authGPGKey: ${{ parameters.authGPGKey }}
# GNUPGHOME inside the container
pathGPGConfiguredHome: /build/.gnupg
- pathGPGHome: /tmp/envoy-docker-build/.gnupg
+ pathGPGHome: $(Build.StagingDirectory)/.gnupg
- bash: |
set -e
ci/run_envoy_docker.sh "
@@ -90,7 +107,7 @@ jobs:
&& gpg --clearsign /tmp/authority \
&& cat /tmp/authority.asc \
&& gpg --verify /tmp/authority.asc"
- rm -rf /tmp/envoy-docker-build/.gnupg
+ rm -rf $(Build.StagingDirectory)/.gnupg
displayName: "Ensure container CI can sign with GPG"
condition: and(not(canceled()), eq(variables['CI_TARGET'], 'docs'))
@@ -112,12 +129,6 @@ jobs:
ci/run_envoy_docker.sh 'ci/do_ci.sh dockerhub-readme'
displayName: "Dockerhub publishing test"
env:
- ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory)
- ENVOY_RBE: "1"
- BAZEL_BUILD_EXTRA_OPTIONS: "--config=remote-ci --jobs=$(RbeJobs)"
- BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com
- BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance
- GCP_SERVICE_ACCOUNT_KEY: ${{ parameters.authGCP }}
GCS_ARTIFACT_BUCKET: ${{ parameters.bucketGCP }}
condition: eq(variables['CI_TARGET'], 'docs')
@@ -140,23 +151,17 @@ jobs:
condition: and(failed(), eq(variables['CI_TARGET'], 'check_and_fix_proto_format'))
# Publish docs
- - script: |
- ci/run_envoy_docker.sh 'ci/do_ci.sh docs-upload'
+ - script: ci/run_envoy_docker.sh 'ci/do_ci.sh docs-upload'
displayName: "Upload Docs to GCS"
env:
- ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory)
- ENVOY_RBE: "1"
- BAZEL_BUILD_EXTRA_OPTIONS: "--config=remote-ci --jobs=$(RbeJobs)"
- BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com
- BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance
- GCP_SERVICE_ACCOUNT_KEY: ${{ parameters.authGCP }}
GCS_ARTIFACT_BUCKET: ${{ parameters.bucketGCP }}
condition: eq(variables['CI_TARGET'], 'docs')
- job: prechecked
displayName: Prechecked
dependsOn: ["prechecks"]
- pool: x64-nano
+ pool:
+ vmImage: $(agentUbuntu)
# This condition ensures that this (required) job passes if all of
# the preceeding jobs either pass or are skipped
# adapted from:
diff --git a/.azure-pipelines/stage/publish.yml b/.azure-pipelines/stage/publish.yml
index d80c1f5057277..30e62ebc362c9 100644
--- a/.azure-pipelines/stage/publish.yml
+++ b/.azure-pipelines/stage/publish.yml
@@ -39,24 +39,12 @@ parameters:
- name: authGPGKey
type: string
default: ""
-- name: authNetlifyURL
- type: string
- default: ""
- name: authDockerUser
type: string
default: ""
- name: authDockerPassword
type: string
default: ""
-- name: authSSHDocsKey
- type: string
- default: ""
-- name: authSSHDocsKeyPublic
- type: string
- default: ""
-- name: authSSHKeyPassphrase
- type: string
- default: ""
- name: runDocker
displayName: "Run Docker"
@@ -71,18 +59,6 @@ parameters:
displayName: "Publish Dockerhub"
type: string
default: false
-- name: publishDocs
- displayName: "Publish Docs"
- type: string
- default: false
-- name: publishDocsLatest
- displayName: "Publish latest docs"
- type: string
- default: false
-- name: publishDocsRelease
- displayName: "Publish release docs"
- type: string
- default: false
- name: publishGithubRelease
displayName: "Publish Github release"
type: string
@@ -101,22 +77,18 @@ jobs:
- task: DownloadBuildArtifacts@0
inputs:
buildType: current
- artifactName: "bazel.release"
- itemPattern: "bazel.release/**/bin/*"
+ artifactName: "release"
+ itemPattern: "release/**/bin/*"
targetPath: $(Build.StagingDirectory)
- - template: ../bazel.yml
+ - template: ../ci.yml
parameters:
ciTarget: docker-upload
- # cacheVersion: $(cacheKeyBazel)
+ cacheName: docker-upload
publishEnvoy: false
publishTestResults: false
- # VERSION.txt is included to refresh Docker images for release
- cacheKeyDocker: "ci/Dockerfile-envoy | VERSION.txt| $(cacheKeyBazelFiles)"
- cacheKeyDockerName: publish_docker
- cacheKeyDockerTmpDir: /var/azpcache
- cacheKeyDockerNoTmpfs: true
- cacheKeyDockerPath: ""
- cacheKeyDockerVersion: "$(cacheKeyDockerBuild)"
+ pathDockerBind: ""
+ tmpfsCacheDisabled: true
+ diskspaceHack: true
env:
GCS_ARTIFACT_BUCKET: ${{ parameters.bucketGCP }}
stepsPre:
@@ -124,37 +96,36 @@ jobs:
echo "disk space at beginning of Docker build:"
df -h
displayName: "Check disk space before Docker build"
+ # TODO(phlax): switch docker <> docker-upload as main task
- bash: |
set -e
-
- mkdir -p linux/amd64 linux/arm64
-
- # x64
- cp -a $(Build.StagingDirectory)/bazel.release/x64/bin/release.tar.zst linux/amd64/release.tar.zst
- cp -a $(Build.StagingDirectory)/bazel.release/x64/bin/schema_validator_tool linux/amd64/schema_validator_tool
-
- # arm64
- cp -a $(Build.StagingDirectory)/bazel.release/arm64/bin/release.tar.zst linux/arm64/release.tar.zst
- cp -a $(Build.StagingDirectory)/bazel.release/arm64/bin/schema_validator_tool linux/arm64/schema_validator_tool
-
- # Debug what files appear to have been downloaded
- find linux -type f -name "*" | xargs ls -l
-
- ci/docker_ci.sh
+ mkdir -p $(Build.StagingDirectory)/envoy
+ rm -rf $(Build.StagingDirectory)/envoy/*
+ mv $(Build.StagingDirectory)/release/* $(Build.StagingDirectory)/envoy
+ ./ci/run_envoy_docker.sh 'ci/do_ci.sh docker'
displayName: Build Docker images
timeoutInMinutes: ${{ parameters.timeoutDockerPublish }}
workingDirectory: $(Build.SourcesDirectory)
env:
- AZP_BRANCH: $(Build.SourceBranch)
- AZP_SHA1: $(Build.SourceVersion)
+ CI_BRANCH: $(Build.SourceBranch)
+ CI_SHA1: $(Build.SourceVersion)
DOCKERHUB_USERNAME: ${{ parameters.authDockerUser }}
DOCKERHUB_PASSWORD: ${{ parameters.authDockerPassword }}
DOCKER_BUILD_TIMEOUT: ${{ parameters.timeoutDockerBuild }}
+ ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory)
+ ENVOY_DOCKER_IN_DOCKER: 1
+
stepsPost:
- script: |
- sudo .azure-pipelines/docker/save_cache.sh /var/azpcache true
- sudo rm -rf /var/lib/docker
- displayName: "Cache/save (publish_docker)"
+ ci/run_envoy_docker.sh 'ci/do_ci.sh dockerhub-publish'
+ condition: |
+ and(not(canceled()), succeeded(),
+ eq(${{ parameters.publishDockerhub }}, 'true'))
+ displayName: "Publish Dockerhub description and README"
+ env:
+ GCS_ARTIFACT_BUCKET: ${{ parameters.bucketGCP }}
+ DOCKERHUB_USERNAME: ${{ parameters.authDockerUser }}
+ DOCKERHUB_PASSWORD: ${{ parameters.authDockerPassword }}
- job: package_x64
displayName: Linux debs (x64)
@@ -169,12 +140,14 @@ jobs:
- task: DownloadBuildArtifacts@0
inputs:
buildType: current
- artifactName: "bazel.release"
- itemPattern: "bazel.release/x64/bin/*"
+ artifactName: "release"
+ itemPattern: "release/x64/bin/*"
targetPath: $(Build.StagingDirectory)
- - template: ../bazel.yml
+ - template: ../ci.yml
parameters:
- ciTarget: bazel.distribution
+ ciTarget: distribution
+ cacheName: distribution
+ publishTestResults: false
stepsPre:
- template: ../gpg.yml
parameters:
@@ -195,22 +168,25 @@ jobs:
and(not(canceled()),
eq(${{ parameters.runPackaging }}, 'true'))
timeoutInMinutes: 120
- pool: "envoy-arm-large"
+ pool: "envoy-arm-small"
steps:
- task: DownloadBuildArtifacts@0
inputs:
buildType: current
- artifactName: "bazel.release"
- itemPattern: "bazel.release/arm64/bin/*"
+ artifactName: "release"
+ itemPattern: "release/arm64/bin/*"
targetPath: $(Build.StagingDirectory)
- - template: ../bazel.yml
+ - template: ../ci.yml
parameters:
managedAgent: false
- ciTarget: bazel.distribution
+ ciTarget: distribution
+ cacheName: distribution
rbe: false
artifactSuffix: ".arm64"
bazelBuildExtraOptions: "--sandbox_base=/tmp/sandbox_base"
+ publishTestResults: false
+ tmpfsDockerDisabled: true
stepsPre:
- template: ../gpg.yml
parameters:
@@ -224,83 +200,6 @@ jobs:
set -e
rm -rf $(Build.StagingDirectory)/.gnupg
-- job: docs
- displayName: Publish docs
- dependsOn: []
- condition: |
- and(not(canceled()),
- eq(${{ parameters.publishDocs }}, 'true'))
- pool:
- vmImage: $(agentUbuntu)
- steps:
- - template: ../bazel.yml
- parameters:
- ciTarget: docs
- cacheVersion: $(cacheKeyBazel)
- publishEnvoy: false
- publishTestResults: false
- env:
- AZP_BRANCH: $(Build.SourceBranch)
- stepsPost:
-
- - script: |
- ci/run_envoy_docker.sh 'ci/do_ci.sh dockerhub-publish'
- condition: |
- and(not(canceled()),
- eq(${{ parameters.publishDockerhub }}, 'true'))
- displayName: "Publish Dockerhub description and README"
- env:
- ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory)
- ENVOY_RBE: "1"
- BAZEL_BUILD_EXTRA_OPTIONS: "--config=remote-ci --jobs=$(RbeJobs)"
- BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com
- BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance
- GCP_SERVICE_ACCOUNT_KEY: ${{ parameters.authGCP }}
- GCS_ARTIFACT_BUCKET: ${{ parameters.bucketGCP }}
- DOCKERHUB_USERNAME: ${{ parameters.authDockerUser }}
- DOCKERHUB_PASSWORD: ${{ parameters.authDockerPassword }}
-
- # Trigger Netlify rebuild of latest docs
- - script: |
- ci/run_envoy_docker.sh 'ci/do_ci.sh docs-upload'
- displayName: "Upload Docs to GCS"
- condition: |
- and(not(canceled()),
- eq(${{ parameters.publishDocsLatest }}, 'true'))
- env:
- ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory)
- ENVOY_RBE: "1"
- BAZEL_BUILD_EXTRA_OPTIONS: "--config=remote-ci --jobs=$(RbeJobs)"
- BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com
- BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance
- GCP_SERVICE_ACCOUNT_KEY: ${{ parameters.authGCP }}
- GCS_ARTIFACT_BUCKET: ${{ parameters.bucketGCP }}
- - script: ci/run_envoy_docker.sh 'ci/do_ci.sh docs-publish-latest'
- condition: |
- and(not(canceled()),
- eq(${{ parameters.publishDocsLatest }}, 'true'))
- displayName: "Publish latest docs"
- workingDirectory: $(Build.SourcesDirectory)
- env:
- NETLIFY_TRIGGER_URL: ${{ parameters.authNetlifyURL }}
-
- # Publish docs to the website
- - task: InstallSSHKey@0
- condition: |
- and(not(canceled()),
- eq(${{ parameters.publishDocsRelease }}, 'true'))
- inputs:
- hostName: $(authGithubSSHKeyPublic)
- sshPublicKey: "${{ parameters.authSSHDocsKeyPublic }}"
- sshPassphrase: "${{ parameters.authSSHKeyPassphrase }}"
- sshKeySecureFile: "${{ parameters.authSSHDocsKey }}"
- - script: docs/publish.sh
- condition: |
- and(not(canceled()),
- eq(${{ parameters.publishDocsRelease }}, 'true'))
- displayName: "Publish release docs"
- workingDirectory: $(Build.SourcesDirectory)
-
- job: signed_release
displayName: Signed binaries
dependsOn:
@@ -316,18 +215,20 @@ jobs:
- task: DownloadBuildArtifacts@0
inputs:
buildType: current
- artifactName: "bazel.release"
- itemPattern: "bazel.release/**/bin/*"
+ artifactName: "release"
+ itemPattern: "release/**/bin/*"
targetPath: $(Build.StagingDirectory)
- task: DownloadBuildArtifacts@0
inputs:
buildType: current
- artifactName: "bazel.distribution"
- itemPattern: "bazel.distribution/**/packages.*.tar.gz"
+ artifactName: "distribution"
+ itemPattern: "distribution/**/packages.*.tar.gz"
targetPath: $(Build.StagingDirectory)
- - template: ../bazel.yml
+ - template: ../ci.yml
parameters:
ciTarget: release.signed
+ cacheName: release-signed
+ publishTestResults: false
env:
GCS_ARTIFACT_BUCKET: ${{ parameters.bucketGCP }}
stepsPre:
@@ -338,10 +239,17 @@ jobs:
authGPGKey: ${{ parameters.authGPGKey }}
pathGPGConfiguredHome: /build/.gnupg
pathGPGHome: $(Build.StagingDirectory)/.gnupg
+ - bash: |
+ set -e -o pipefail
+ mkdir -p distribution/custom
+ cp -a $(Build.StagingDirectory)/*/*64 distribution/custom/
+ workingDirectory: $(Build.SourcesDirectory)
+
- job: success
- dependsOn: ["docker", "docs", "signed_release"]
+ dependsOn: ["docker", "signed_release"]
displayName: Success (linux artefacts)
- pool: x64-nano
+ pool:
+ vmImage: $(agentUbuntu)
# This condition ensures that this (required) check passes if all of
# the preceding checks either pass or are skipped
# adapted from:
@@ -349,7 +257,6 @@ jobs:
condition: |
and(
in(dependencies.docker.result, 'Succeeded', 'SucceededWithIssues', 'Skipped'),
- in(dependencies.docs.result, 'Succeeded', 'SucceededWithIssues', 'Skipped'),
in(dependencies.signed_release.result, 'Succeeded', 'SucceededWithIssues', 'Skipped'))
steps:
- checkout: none
@@ -366,14 +273,26 @@ jobs:
pool:
vmImage: $(agentUbuntu)
steps:
- - template: ../bazel.yml
+ - task: DownloadSecureFile@1
+ name: WorkflowTriggerKey
+ displayName: 'Download workflow trigger key'
+ inputs:
+ secureFile: '${{ parameters.authGithubWorkflow }}'
+ - bash: |
+ set -e
+ KEY="$(cat $(WorkflowTriggerKey.secureFilePath) | base64 -w0)"
+ echo "##vso[task.setvariable variable=value;isoutput=true]$KEY"
+ name: key
+ - template: ../ci.yml
parameters:
ciTarget: verify.trigger
+ cacheName: verify-trigger
authGithub: "$(key.value)"
cacheVersion: $(cacheKeyBazel)
publishEnvoy: false
publishTestResults: false
env:
+ ENVOY_REPO: $(Build.Repository.Name)
${{ if eq(variables['Build.Reason'], 'PullRequest') }}:
ENVOY_HEAD_REF: "$(Build.SourceBranch)"
ENVOY_BRANCH: "$(System.PullRequest.TargetBranch)"
@@ -398,13 +317,3 @@ jobs:
mkdir -p $(Build.StagingDirectory)/release.signed
mv release.signed.tar.zst $(Build.StagingDirectory)/release.signed
displayName: Fetch signed release
- - task: DownloadSecureFile@1
- name: WorkflowTriggerKey
- displayName: 'Download workflow trigger key'
- inputs:
- secureFile: '${{ parameters.authGithubWorkflow }}'
- - bash: |
- set -e
- KEY="$(cat $(WorkflowTriggerKey.secureFilePath) | base64 -w0)"
- echo "##vso[task.setvariable variable=value;isoutput=true]$KEY"
- name: key
diff --git a/.azure-pipelines/stage/verify.yml b/.azure-pipelines/stage/verify.yml
index 67898770c463b..f429feb4ff441 100644
--- a/.azure-pipelines/stage/verify.yml
+++ b/.azure-pipelines/stage/verify.yml
@@ -12,57 +12,54 @@ jobs:
displayName: Debs (x64)
condition: and(not(canceled()), succeeded(), ne(stageDependencies.env.repo.outputs['changed.mobileOnly'], 'true'), ne(stageDependencies.env.repo.outputs['changed.docsOnly'], 'true'), ne(stageDependencies.env.repo.outputs['changed.examplesOnly'], 'true'))
timeoutInMinutes: 120
- pool:
- vmImage: $(agentUbuntu)
+ pool: envoy-x64-small
steps:
- task: DownloadBuildArtifacts@0
inputs:
buildType: current
- artifactName: "bazel.distribution"
- itemPattern: "bazel.distribution/x64/packages.x64.tar.gz"
+ artifactName: "distribution"
+ itemPattern: "distribution/x64/packages.x64.tar.gz"
downloadType: single
targetPath: $(Build.StagingDirectory)
- - script: ci/run_envoy_docker.sh 'ci/do_ci.sh verify_distro'
- workingDirectory: $(Build.SourcesDirectory)
- env:
- AZP_BRANCH: $(Build.SourceBranch)
- ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory)
- ENVOY_DOCKER_IN_DOCKER: 1
- ENVOY_RBE: 1
- BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com
- BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance
- GCP_SERVICE_ACCOUNT_KEY: ${{ parameters.authGCP }}
- displayName: "Verify packages"
+ - template: ../ci.yml
+ parameters:
+ ciTarget: verify_distro
+ cacheName: verify_distro
+ publishTestResults: false
+ tmpfsDockerDisabled: true
+ env:
+ ENVOY_DOCKER_IN_DOCKER: 1
- job: packages_arm64
displayName: Debs (arm64)
condition: and(not(canceled()), succeeded(), ne(stageDependencies.env.repo.outputs['changed.mobileOnly'], 'true'), ne(stageDependencies.env.repo.outputs['changed.docsOnly'], 'true'), ne(stageDependencies.env.repo.outputs['changed.examplesOnly'], 'true'))
timeoutInMinutes: 120
- pool: "envoy-arm-large"
+ pool: "envoy-arm-small"
steps:
- task: DownloadBuildArtifacts@0
inputs:
buildType: current
- artifactName: "bazel.distribution"
- itemPattern: "bazel.distribution/arm64/packages.arm64.tar.gz"
+ artifactName: "distribution"
+ itemPattern: "distribution/arm64/packages.arm64.tar.gz"
downloadType: single
targetPath: $(Build.StagingDirectory)
- - script: ci/run_envoy_docker.sh 'ci/do_ci.sh verify_distro'
- workingDirectory: $(Build.SourcesDirectory)
- env:
- AZP_BRANCH: $(Build.SourceBranch)
- ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory)
- ENVOY_DOCKER_IN_DOCKER: 1
- ENVOY_RBE: 1
- BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com
- BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance
- GCP_SERVICE_ACCOUNT_KEY: ${{ parameters.authGCP }}
- displayName: "Verify packages"
+ - template: ../ci.yml
+ parameters:
+ managedAgent: false
+ ciTarget: verify_distro
+ cacheName: verify_distro
+ rbe: false
+ artifactSuffix: ".arm64"
+ publishTestResults: false
+ tmpfsDockerDisabled: true
+ env:
+ ENVOY_DOCKER_IN_DOCKER: 1
- job: verified
displayName: Verification complete
dependsOn: ["packages_x64", "packages_arm64"]
- pool: x64-nano
+ pool:
+ vmImage: $(agentUbuntu)
# This condition ensures that this (required) check passes if all of
# the preceding checks either pass or are skipped
# adapted from:
diff --git a/.azure-pipelines/stage/windows.yml b/.azure-pipelines/stage/windows.yml
deleted file mode 100644
index a59e01d024d32..0000000000000
--- a/.azure-pipelines/stage/windows.yml
+++ /dev/null
@@ -1,119 +0,0 @@
-
-parameters:
-
-# Auth
-- name: authGCP
- type: string
- default: ""
-
-- name: runBuild
- displayName: "Run build"
- type: string
- default: true
-
-jobs:
-- job: release
- displayName: Build and test
- condition: |
- and(not(canceled()),
- eq(${{ parameters.runBuild }}, 'true'))
- timeoutInMinutes: 180
- pool:
- vmImage: "windows-2019"
- steps:
- - task: Cache@2
- inputs:
- key: '"windows.release" | $(cacheKeyBazelFiles)'
- path: $(Build.StagingDirectory)/repository_cache
- continueOnError: true
- - bash: ci/run_envoy_docker.sh ci/windows_ci_steps.sh
- displayName: "Run Windows msvc-cl CI"
- env:
- CI_TARGET: "windows"
- ENVOY_DOCKER_BUILD_DIR: "$(Build.StagingDirectory)"
- ENVOY_RBE: "true"
- BAZEL_BUILD_EXTRA_OPTIONS: "--config=remote-ci --config=remote-msvc-cl --jobs=$(RbeJobs) --flaky_test_attempts=2"
- BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com
- BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance
- GCP_SERVICE_ACCOUNT_KEY: ${{ parameters.authGCP }}
-
- - task: PublishTestResults@2
- inputs:
- testResultsFiles: "**/bazel-out/**/testlogs/**/test.xml"
- testRunTitle: "windows"
- searchFolder: $(Build.StagingDirectory)/tmp
- timeoutInMinutes: 10
- condition: not(canceled())
- - task: PublishBuildArtifacts@1
- inputs:
- pathtoPublish: "$(Build.StagingDirectory)/envoy"
- artifactName: windows.release
- timeoutInMinutes: 10
- condition: not(canceled())
-
-- job: docker
- displayName: Build Docker image
- condition: and(not(canceled()), succeeded(), ne(stageDependencies.env.repo.outputs['changed.mobileOnly'], 'true'), ne(stageDependencies.env.repo.outputs['changed.docsOnly'], 'true'), ne(stageDependencies.env.repo.outputs['changed.examplesOnly'], 'true'))
- strategy:
- matrix:
- windows2019:
- imageName: 'windows-2019'
- windowsBuildType: "windows"
- windowsImageBase: "mcr.microsoft.com/windows/servercore"
- windowsImageTag: "ltsc2019"
- windows2022:
- imageName: 'windows-2022'
- windowsBuildType: "windows-ltsc2022"
- windowsImageBase: "mcr.microsoft.com/windows/nanoserver"
- windowsImageTag: "ltsc2022"
- dependsOn: ["release"]
- timeoutInMinutes: 120
- pool:
- vmImage: $(imageName)
- steps:
- - task: DownloadBuildArtifacts@0
- inputs:
- buildType: current
- artifactName: "windows.release"
- itemPattern: "windows.release/envoy_binary.tar.gz"
- downloadType: single
- targetPath: $(Build.StagingDirectory)
- - bash: |
- set -e
- # Convert to Unix-style path so tar doesn't think drive letter is a hostname
- STAGING_DIR="/$(echo '$(Build.StagingDirectory)' | tr -d ':' | tr '\\' '/')"
- mkdir -p windows/amd64 && tar zxf "${STAGING_DIR}/windows.release/envoy_binary.tar.gz" -C ./windows/amd64
- ci/docker_ci.sh
- workingDirectory: $(Build.SourcesDirectory)
- env:
- AZP_BRANCH: $(Build.SourceBranch)
- AZP_SHA1: $(Build.SourceVersion)
- DOCKERHUB_USERNAME: $(DockerUsername)
- DOCKERHUB_PASSWORD: $(DockerPassword)
- WINDOWS_BUILD_TYPE: $(windowsBuildType)
- WINDOWS_IMAGE_BASE: $(windowsImageBase)
- WINDOWS_IMAGE_TAG: $(windowsImageTag)
- - task: PublishBuildArtifacts@1
- inputs:
- pathtoPublish: "$(Build.StagingDirectory)/build_images"
- artifactName: docker_windows
- timeoutInMinutes: 10
- condition: not(canceled())
-
-- job: released
- displayName: Complete
- dependsOn: ["release", "docker"]
- pool: x64-nano
- # This condition ensures that this (required) job passes if all of
- # the preceeding jobs either pass or are skipped
- # adapted from:
- # https://learn.microsoft.com/en-us/azure/devops/pipelines/process/expressions?view=azure-devops#job-to-job-dependencies-within-one-stage
- condition: |
- and(
- eq(variables['Build.Reason'], 'PullRequest'),
- in(dependencies.release.result, 'Succeeded', 'SucceededWithIssues', 'Skipped'),
- in(dependencies.docker.result, 'Succeeded', 'SucceededWithIssues', 'Skipped'))
- steps:
- - checkout: none
- - bash: |
- echo "windows released"
diff --git a/.azure-pipelines/stages.yml b/.azure-pipelines/stages.yml
index 39ca4fc3a8f32..c957a14a4a9eb 100644
--- a/.azure-pipelines/stages.yml
+++ b/.azure-pipelines/stages.yml
@@ -8,18 +8,6 @@ parameters:
default:
- env
- prechecks
-- name: macBuildStageDeps
- displayName: "macOS stage dependencies"
- type: object
- default:
- - env
- - prechecks
-- name: windowsBuildStageDeps
- displayName: "Windows stage dependencies"
- type: object
- default:
- - env
- - prechecks
- name: checkStageDeps
displayName: "Check stage dependencies"
type: object
@@ -29,7 +17,7 @@ parameters:
- name: concurrencyChecks
displayName: "Check concurrency"
type: number
- default: 3
+ default: 10
- name: concurrencyPrechecks
displayName: "Prechecks concurrency"
type: number
@@ -60,6 +48,8 @@ stages:
- stage: prechecks
displayName: Prechecks
dependsOn: ["env"]
+ variables:
+ RUN_PRECHECKS: $[stageDependencies.env.repo.outputs['run.releaseTests']]
jobs:
- template: stage/prechecks.yml
parameters:
@@ -70,23 +60,29 @@ stages:
authGPGKey: $(MaintainerGPGKeySecureFileDownloadPath)
authGPGPath: $(MaintainerGPGKey.secureFilePath)
bucketGCP: $(GcsArtifactBucket)
+ runPrechecks: variables['RUN_PRECHECKS']
- stage: linux_x64
displayName: Linux x64
dependsOn: ${{ parameters.buildStageDeps }}
variables:
RUN_BUILD: $[stageDependencies.env.repo.outputs['run.build']]
+ RUN_TESTS: $[stageDependencies.env.repo.outputs['run.releaseTests']]
jobs:
- template: stage/linux.yml
parameters:
cacheTestResults: ${{ parameters.cacheTestResults }}
+ # these are parsed differently and _must_ be expressed in this way
runBuild: variables['RUN_BUILD']
+ runTests: $(RUN_TESTS)
+ tmpfsDockerDisabled: true
- stage: linux_arm64
displayName: Linux arm64
dependsOn: ${{ parameters.buildStageDeps }}
variables:
RUN_BUILD: $[stageDependencies.env.repo.outputs['run.build']]
+ RUN_TESTS: $[stageDependencies.env.repo.outputs['run.releaseTests']]
jobs:
- template: stage/linux.yml
parameters:
@@ -96,6 +92,8 @@ stages:
timeoutBuild: 180
pool: envoy-arm-large
runBuild: variables['RUN_BUILD']
+ runTests: $(RUN_TESTS)
+ bazelBuildExtraOptions: "--sandbox_base=/tmp/sandbox_base"
- stage: check
displayName: Checks (Linux x64)
@@ -119,9 +117,6 @@ stages:
RUN_PACKAGING: $[stageDependencies.env.repo.outputs['run.packaging']]
PUBLISH_GITHUB_RELEASE: $[stageDependencies.env.repo.outputs['publish.githubRelease']]
PUBLISH_DOCKERHUB: $[stageDependencies.env.repo.outputs['publish.dockerhub']]
- PUBLISH_DOCS: $[stageDependencies.env.repo.outputs['publish.docs']]
- PUBLISH_DOCS_LATEST: $[stageDependencies.env.repo.outputs['publish.docsLatest']]
- PUBLISH_DOCS_RELEASE: $[stageDependencies.env.repo.outputs['publish.docsRelease']]
jobs:
- template: stage/publish.yml
parameters:
@@ -134,18 +129,11 @@ stages:
authGPGPassphrase: $(MaintainerGPGKeyPassphrase)
authGPGKey: $(MaintainerGPGKeySecureFileDownloadPath)
authGPGPath: $(MaintainerGPGKey.secureFilePath)
- authNetlifyURL: $(NetlifyTriggerURL)
- authSSHDocsKeyPublic: $(DocsPublicKey)
- authSSHDocsKey: $(DocsPrivateKey)
- authSSHKeyPassphrase: $(SshDeployKeyPassphrase)
bucketGCP: $(GcsArtifactBucket)
timeoutDockerBuild: ${{ parameters.timeoutDockerBuild }}
timeoutDockerPublish: ${{ parameters.timeoutDockerPublish }}
runDocker: variables['RUN_DOCKER']
runPackaging: variables['RUN_PACKAGING']
- publishDocs: variables['PUBLISH_DOCS']
- publishDocsLatest: variables['PUBLISH_DOCS_LATEST']
- publishDocsRelease: variables['PUBLISH_DOCS_RELEASE']
publishDockerhub: variables['PUBLISH_DOCKERHUB']
publishGithubRelease: variables['PUBLISH_GITHUB_RELEASE']
@@ -158,25 +146,3 @@ stages:
- template: stage/verify.yml
parameters:
authGCP: $(GcpServiceAccountKey)
-
-- stage: macos
- displayName: macOS
- dependsOn: ${{ parameters.macBuildStageDeps }}
- variables:
- RUN_BUILD: $[stageDependencies.env.repo.outputs['run.build']]
- jobs:
- - template: stage/macos.yml
- parameters:
- authGCP: $(GcpServiceAccountKey)
- runBuild: variables['RUN_BUILD']
-
-- stage: windows
- displayName: Windows
- dependsOn: ${{ parameters.windowsBuildStageDeps }}
- variables:
- RUN_BUILD: $[stageDependencies.env.repo.outputs['run.build']]
- jobs:
- - template: stage/windows.yml
- parameters:
- authGCP: $(GcpServiceAccountKey)
- runBuild: variables['RUN_BUILD']
diff --git a/.bazelrc b/.bazelrc
index 17a7fa0b9b4e4..055f7c48197b9 100644
--- a/.bazelrc
+++ b/.bazelrc
@@ -10,9 +10,11 @@
# Startup options cannot be selected via config.
startup --host_jvm_args=-Xmx3g
+fetch --color=yes
run --color=yes
build --color=yes
+build --jobs=HOST_CPUS-1
build --workspace_status_command="bash bazel/get_workspace_status"
build --incompatible_strict_action_env
build --java_runtime_version=remotejdk_11
@@ -40,6 +42,8 @@ build --action_env=BAZEL_FAKE_SCM_REVISION --host_action_env=BAZEL_FAKE_SCM_REVI
build --enable_platform_specific_config
build --test_summary=terse
+build:docs-ci --action_env=DOCS_RST_CHECK=1 --host_action_env=DOCS_RST_CHECK=1
+
# TODO(keith): Remove once these 2 are the default
build --incompatible_config_setting_private_default_visibility
build --incompatible_enforce_config_setting_visibility
@@ -69,8 +73,6 @@ build --@com_googlesource_googleurl//build_config:system_icu=0
# Common flags for sanitizers
build:sanitizer --define tcmalloc=disabled
build:sanitizer --linkopt -ldl
-build:sanitizer --build_tag_filters=-no_san
-build:sanitizer --test_tag_filters=-no_san
# Common flags for Clang
build:clang --action_env=BAZEL_COMPILER=clang
@@ -90,6 +92,8 @@ build:asan --config=sanitizer
# ASAN install its signal handler, disable ours so the stacktrace will be printed by ASAN
build:asan --define signal_trace=disabled
build:asan --define ENVOY_CONFIG_ASAN=1
+build:asan --build_tag_filters=-no_san
+build:asan --test_tag_filters=-no_san
build:asan --copt -fsanitize=address,undefined
build:asan --linkopt -fsanitize=address,undefined
# vptr and function sanitizer are enabled in clang-asan if it is set up via bazel/setup_clang.sh.
@@ -143,12 +147,15 @@ build:clang-tsan --copt -DEVENT__DISABLE_DEBUG_MODE
# https://github.com/abseil/abseil-cpp/issues/760
# https://github.com/google/sanitizers/issues/953
build:clang-tsan --test_env="TSAN_OPTIONS=report_atomic_races=0"
+build:clang-tsan --test_timeout=120,600,1500,4800
# Clang MSAN - this is the base config for remote-msan and docker-msan. To run this config without
# our build image, follow https://github.com/google/sanitizers/wiki/MemorySanitizerLibcxxHowTo
# with libc++ instruction and provide corresponding `--copt` and `--linkopt` as well.
build:clang-msan --action_env=ENVOY_MSAN=1
build:clang-msan --config=sanitizer
+build:clang-msan --build_tag_filters=-no_san
+build:clang-msan --test_tag_filters=-no_san
build:clang-msan --define ENVOY_CONFIG_MSAN=1
build:clang-msan --copt -fsanitize=memory
build:clang-msan --linkopt -fsanitize=memory
@@ -182,6 +189,7 @@ build --test_env=HEAPCHECK=normal --test_env=PPROF_PATH
# Coverage options
coverage --config=coverage
coverage --build_tests_only
+
build:coverage --action_env=BAZEL_USE_LLVM_NATIVE_COVERAGE=1
build:coverage --action_env=GCOV=llvm-profdata
build:coverage --copt=-DNDEBUG
@@ -190,20 +198,31 @@ build:coverage --test_timeout=390,750,1500,5700
build:coverage --define=dynamic_link_tests=true
build:coverage --define=ENVOY_CONFIG_COVERAGE=1
build:coverage --cxxopt="-DENVOY_CONFIG_COVERAGE=1"
-build:coverage --coverage_support=@envoy//bazel/coverage:coverage_support
-build:coverage --test_env=CC_CODE_COVERAGE_SCRIPT=bazel/coverage/collect_cc_coverage.sh
build:coverage --test_env=HEAPCHECK=
build:coverage --combined_report=lcov
-build:coverage --strategy=TestRunner=sandboxed,local
+build:coverage --strategy=TestRunner=remote,sandboxed,local
build:coverage --strategy=CoverageReport=sandboxed,local
build:coverage --experimental_use_llvm_covmap
+build:coverage --experimental_generate_llvm_lcov
+build:coverage --experimental_split_coverage_postprocessing
+build:coverage --experimental_fetch_all_coverage_outputs
build:coverage --collect_code_coverage
-build:coverage --test_tag_filters=-nocoverage
-build:coverage --instrumentation_filter="//source(?!/common/quic/platform)[/:],//envoy[/:],//contrib(?!/.*/test)[/:]"
+build:coverage --instrumentation_filter="^//source(?!/common/quic/platform)[/:],^//envoy[/:],^//contrib(?!/.*/test)[/:]"
+build:coverage --remote_download_minimal
+build:coverage --define=tcmalloc=gperftools
+build:coverage --define=no_debug_info=1
+# `--no-relax` is required for coverage to not err with `relocation R_X86_64_REX_GOTPCRELX`
+build:coverage --linkopt=-Wl,-s,--no-relax
+build:coverage --test_env=ENVOY_IP_TEST_VERSIONS=v4only
+
build:test-coverage --test_arg="-l trace"
build:test-coverage --test_arg="--log-path /dev/null"
+build:test-coverage --test_tag_filters=-nocoverage,-fuzz_target
build:fuzz-coverage --config=plain-fuzzer
build:fuzz-coverage --run_under=@envoy//bazel/coverage:fuzz_coverage_wrapper.sh
+build:fuzz-coverage --test_tag_filters=-nocoverage
+
+build:cache-local --remote_cache=grpc://localhost:9092
# Remote execution: https://docs.bazel.build/versions/master/remote-execution.html
build:rbe-toolchain --action_env=BAZEL_DO_NOT_DETECT_CPP_TOOLCHAIN=1
@@ -263,10 +282,6 @@ build:remote --spawn_strategy=remote,sandboxed,local
build:remote --strategy=Javac=remote,sandboxed,local
build:remote --strategy=Closure=remote,sandboxed,local
build:remote --strategy=Genrule=remote,sandboxed,local
-build:remote --remote_timeout=7200
-build:remote --google_default_credentials=true
-build:remote --remote_download_toplevel
-build:remote --nobuild_runfile_links
# Windows bazel does not allow sandboxed as a spawn strategy
build:remote-windows --spawn_strategy=remote,local
@@ -306,9 +321,28 @@ build:remote-clang-cl --config=remote-windows
build:remote-clang-cl --config=clang-cl
build:remote-clang-cl --config=rbe-toolchain-clang-cl
+## Compile-time-options testing
+# Right now, none of the available compile-time options conflict with each other. If this
+# changes, this build type may need to be broken up.
+build:compile-time-options --define=admin_html=disabled
+build:compile-time-options --define=signal_trace=disabled
+build:compile-time-options --define=hot_restart=disabled
+build:compile-time-options --define=google_grpc=disabled
+build:compile-time-options --define=boringssl=fips
+build:compile-time-options --define=log_debug_assert_in_release=enabled
+build:compile-time-options --define=path_normalization_by_default=true
+build:compile-time-options --define=deprecated_features=disabled
+build:compile-time-options --define=tcmalloc=gperftools
+build:compile-time-options --define=zlib=ng
+build:compile-time-options --define=uhv=enabled
+build:compile-time-options --config=libc++20
+build:compile-time-options --test_env=ENVOY_HAS_EXTRA_EXTENSIONS=true
+build:compile-time-options --@envoy//bazel:http3=False
+build:compile-time-options --@envoy//source/extensions/filters/http/kill_request:enabled
+
# Docker sandbox
# NOTE: Update this from https://github.com/envoyproxy/envoy-build-tools/blob/main/toolchains/rbe_toolchains_config.bzl#L8
-build:docker-sandbox --experimental_docker_image=envoyproxy/envoy-build-ubuntu:41c5a05d708972d703661b702a63ef5060125c33
+build:docker-sandbox --experimental_docker_image=envoyproxy/envoy-build-ubuntu:fdd65c6270a8507a18d5acd6cf19a18cb695e4fa@sha256:3c8a3ce6f90dcfb5d09dc8f79bb01404d3526d420061f9a176e0a8e91e1e573e
build:docker-sandbox --spawn_strategy=docker
build:docker-sandbox --strategy=Javac=docker
build:docker-sandbox --strategy=Closure=docker
@@ -339,16 +373,13 @@ build:docker-tsan --config=rbe-toolchain-clang-libc++
build:docker-tsan --config=rbe-toolchain-tsan
# CI configurations
-build:remote-ci --remote_cache=grpcs://remotebuildexecution.googleapis.com
-build:remote-ci --remote_executor=grpcs://remotebuildexecution.googleapis.com
build:remote-ci --config=ci
+build:remote-ci --remote_download_minimal
+
# Note this config is used by mobile CI also.
build:ci --noshow_progress
build:ci --noshow_loading_progress
-
-# Build Event Service
-build:google-bes --bes_backend=grpcs://buildeventservice.googleapis.com
-build:google-bes --bes_results_url=https://source.cloud.google.com/results/invocations/
+build:ci --test_output=errors
# Fuzz builds
@@ -439,6 +470,71 @@ build:windows --features=fully_static_link
build:windows --features=static_link_msvcrt
build:windows --dynamic_mode=off
+# RBE (Google)
+build:rbe-google --google_default_credentials=true
+build:rbe-google --remote_cache=grpcs://remotebuildexecution.googleapis.com
+build:rbe-google --remote_executor=grpcs://remotebuildexecution.googleapis.com
+build:rbe-google --remote_timeout=7200
+build:rbe-google --remote_instance_name=projects/envoy-ci/instances/default_instance
+
+build:rbe-google-bes --bes_backend=grpcs://buildeventservice.googleapis.com
+build:rbe-google-bes --bes_results_url=https://source.cloud.google.com/results/invocations/
+
+# RBE (Engflow mobile)
+build:rbe-engflow --google_default_credentials=false
+build:rbe-engflow --remote_cache=grpcs://envoy.cluster.engflow.com
+build:rbe-engflow --remote_executor=grpcs://envoy.cluster.engflow.com
+build:rbe-engflow --bes_backend=grpcs://envoy.cluster.engflow.com/
+build:rbe-engflow --bes_results_url=https://envoy.cluster.engflow.com/invocation/
+build:rbe-engflow --credential_helper=*.engflow.com=%workspace%/bazel/engflow-bazel-credential-helper.sh
+build:rbe-engflow --grpc_keepalive_time=30s
+build:rbe-engflow --remote_timeout=3600s
+build:rbe-engflow --bes_timeout=3600s
+build:rbe-engflow --bes_upload_mode=fully_async
+
+build:cache-envoy-engflow --google_default_credentials=false
+build:cache-envoy-engflow --remote_cache=grpcs://morganite.cluster.engflow.com
+build:cache-envoy-engflow --remote_timeout=3600s
+build:cache-envoy-engflow --credential_helper=*.engflow.com=%workspace%/bazel/engflow-bazel-credential-helper.sh
+build:cache-envoy-engflow --grpc_keepalive_time=30s
+build:bes-envoy-engflow --bes_backend=grpcs://morganite.cluster.engflow.com/
+build:bes-envoy-engflow --bes_results_url=https://morganite.cluster.engflow.com/invocation/
+build:bes-envoy-engflow --bes_timeout=3600s
+build:bes-envoy-engflow --bes_upload_mode=fully_async
+build:rbe-envoy-engflow --config=cache-envoy-engflow
+build:rbe-envoy-engflow --config=bes-envoy-engflow
+build:rbe-envoy-engflow --remote_executor=grpcs://morganite.cluster.engflow.com
+build:rbe-envoy-engflow --remote_default_exec_properties=container-image=docker://docker.io/envoyproxy/envoy-build-ubuntu:fdd65c6270a8507a18d5acd6cf19a18cb695e4fa@sha256:3c8a3ce6f90dcfb5d09dc8f79bb01404d3526d420061f9a176e0a8e91e1e573e
+
+#############################################################################
+# debug: Various Bazel debugging flags
+#############################################################################
+# debug/bazel
+common:debug-bazel --announce_rc
+common:debug-bazel -s
+# debug/sandbox
+common:debug-sandbox --verbose_failures
+common:debug-sandbox --sandbox_debug
+# debug/coverage
+common:debug-coverage --action_env=VERBOSE_COVERAGE=true
+common:debug-coverage --test_env=VERBOSE_COVERAGE=true
+common:debug-coverage --test_env=DISPLAY_LCOV_CMD=true
+common:debug-coverage --config=debug-tests
+# debug/tests
+common:debug-tests --test_output=all
+# debug/everything
+common:debug --config=debug-bazel
+common:debug --config=debug-sandbox
+common:debug --config=debug-coverage
+common:debug --config=debug-tests
+
try-import %workspace%/clang.bazelrc
try-import %workspace%/user.bazelrc
try-import %workspace%/local_tsan.bazelrc
+
+
+# Prevent crashes caused by the new version of tcmalloc using the percpu feature
+build --define tcmalloc=gperftools
+
+# Avoid affecting the signal handling in golang-filter
+build --define signal_trace=disabled
diff --git a/.bazelversion b/.bazelversion
index dfda3e0b4f011..91e4a9f262244 100644
--- a/.bazelversion
+++ b/.bazelversion
@@ -1 +1 @@
-6.1.0
+6.3.2
diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile
index 7dd1f7df667dd..066695f4922a2 100644
--- a/.devcontainer/Dockerfile
+++ b/.devcontainer/Dockerfile
@@ -1,4 +1,4 @@
-FROM gcr.io/envoy-ci/envoy-build:41c5a05d708972d703661b702a63ef5060125c33
+FROM gcr.io/envoy-ci/envoy-build:fdd65c6270a8507a18d5acd6cf19a18cb695e4fa@sha256:2a473cd9808182735d54e03b158975389948b9559b8e8fc624cfafbaf7059e62
ARG USERNAME=vscode
ARG USER_UID=501
diff --git a/.devcontainer/setup.sh b/.devcontainer/setup.sh
index d2a54b474bb17..b50bb1190d661 100755
--- a/.devcontainer/setup.sh
+++ b/.devcontainer/setup.sh
@@ -1,10 +1,8 @@
#!/usr/bin/env bash
-. ci/setup_cache.sh
-trap - EXIT # Don't remove the key file written into a temporary file
-
BAZELRC_FILE=~/.bazelrc bazel/setup_clang.sh /opt/llvm
+# TODO(phlax): use user.bazelrc
# Use generated toolchain config because we know the base container is the one we're using in RBE.
# Not using libc++ here because clangd will raise some tidy issue in libc++ header as of version 9.
echo "build --config=rbe-toolchain-clang" >> ~/.bazelrc
diff --git a/.github/actions/do_ci/action.yml b/.github/actions/do_ci/action.yml
deleted file mode 100644
index 5a024feede039..0000000000000
--- a/.github/actions/do_ci/action.yml
+++ /dev/null
@@ -1,90 +0,0 @@
-inputs:
- target:
- required: true
- type: string
- rbe:
- type: boolean
- default: true
- managed:
- type: boolean
- default: true
-
- auth_bazel_rbe:
- type: string
- default: ''
-
- bazel_extra:
- type: string
- default:
- bazel_local_cache:
- type: string
- default:
- bazel_rbe_cache:
- type: string
- default: grpcs://remotebuildexecution.googleapis.com
- bazel_rbe_instance:
- type: string
- default: projects/envoy-ci/instances/default_instance
- bazel_rbe_jobs:
- type: number
- default: 75
-
- command_prefix:
- type: string
- default: ./ci/run_envoy_docker.sh
- command_ci:
- type: string
- default: ./ci/do_ci.sh
-
- env:
- type: string
-
- GITHUB_TOKEN:
- required: true
-
-runs:
- using: composite
- steps:
- - id: do_ci
- name: 'Run CI target ${{ inputs.target }}'
- run: |
- if [[ "${#INPUT_ENV}" -ne 0 ]]; then
- SOURCETMP="$(mktemp)"
- # TODO(phlax): Fix escaping
- echo "${{ inputs.env }}" > "$SOURCETMP"
- . "$SOURCETMP"
- rm -rf "$SOURCETMP"
- fi
- if [[ "${{ inputs.rbe }}" == 'true' ]]; then
- export ENVOY_RBE=1
- export GCP_SERVICE_ACCOUNT_KEY=${{ inputs.auth_bazel_rbe }}
- export BAZEL_BUILD_EXTRA_OPTIONS="--config=remote-ci --jobs=${{ inputs.bazel_rbe_jobs }} ${{ inputs.bazel_extra }}"
- export BAZEL_REMOTE_CACHE=${{ inputs.bazel_rbe_cache }}"
- export BAZEL_REMOTE_INSTANCE=${{ inputs.bazel_rbe_instance }}"
- else
- export BAZEL_BUILD_EXTRA_OPTIONS="--config=ci ${{ inputs.bazel_extra }}"
- export BAZEL_REMOTE_CACHE="${{ inputs.bazel_local_cache }}"
- if [[ "${{ github.event_name }}" == "pull_request" ]]; then
- export BAZEL_REMOTE_INSTANCE_BRANCH="${{ github.event.base.ref }}"
- else
- export BAZEL_REMOTE_INSTANCE_BRANCH="${{ github.ref }}"
- fi
- fi
-
- if [[ -n "${{ inputs.command_prefix }}" ]]; then
- ${{ inputs.command_prefix }} '${{ inputs.command_ci }} ${{ inputs.target }}'
- else
- ${{ inputs.command_ci }} ${{ inputs.target }}
- fi
-
- if [[ ${{ github.event_name }} == "pull_request" ]]; then
- export BAZEL_FAKE_SCM_REVISION=e3b4a6e9570da15ac1caffdded17a8bebdc7dfc9
- export CI_TARGET_BRANCH="${{ github.event.base.ref }}"
- else
- export CI_TARGET_BRANCH="${{ github.ref }}"
- fi
- shell: bash
- env:
- GITHUB_TOKEN: ${{ inputs.GITHUB_TOKEN }}
- ENVOY_DOCKER_BUILD_DIR: ${{ runner.temp }}
- INPUT_ENV: ${{ inputs.env }}
diff --git a/.github/actions/env/action.yml b/.github/actions/env/action.yml
deleted file mode 100644
index b5d44c56d24f6..0000000000000
--- a/.github/actions/env/action.yml
+++ /dev/null
@@ -1,175 +0,0 @@
-inputs:
- build_image_tag:
- type: string
- required: true
- build_image_repo:
- type: string
- required: true
- build_image_mobile_sha:
- type: string
- required: true
- build_image_sha:
- type: string
- required: true
-
- repo_ref:
- type: string
- repo_ref_sha:
- type: string
- repo_ref_name:
- type: string
-
- trusted_bots:
- type: string
- default: |
- trigger-release-envoy[bot]
-
- check_mobile_run:
- type: boolean
- default: true
-
-outputs:
- build_image_ubuntu:
- value: ${{ steps.build.outputs.build_image_ubuntu }}
- build_image_ubuntu_mobile:
- value: ${{ steps.build.outputs.build_image_ubuntu_mobile }}
-
- mobile_android_build:
- value: ${{ steps.should_run.outputs.mobile_android_build }}
- mobile_android_build_all:
- value: ${{ steps.should_run.outputs.mobile_android_build_all }}
- mobile_android_tests:
- value: ${{ steps.should_run.outputs.mobile_android_tests }}
- mobile_asan:
- value: ${{ steps.should_run.outputs.mobile_asan }}
- mobile_cc_tests:
- value: ${{ steps.should_run.outputs.mobile_cc_tests }}
- mobile_compile_time_options:
- value: ${{ steps.should_run.outputs.mobile_compile_time_options }}
- mobile_coverage:
- value: ${{ steps.should_run.outputs.mobile_coverage }}
- mobile_formatting:
- value: ${{ steps.should_run.outputs.mobile_formatting }}
- mobile_ios_build:
- value: ${{ steps.should_run.outputs.mobile_ios_build }}
- mobile_ios_build_all:
- value: ${{ steps.should_run.outputs.mobile_ios_build_all }}
- mobile_ios_tests:
- value: ${{ steps.should_run.outputs.mobile_ios_tests }}
- mobile_release_validation:
- value: ${{ steps.should_run.outputs.mobile_release_validation }}
- mobile_tsan:
- value: ${{ steps.should_run.outputs.mobile_tsan }}
- repo_ref:
- value: ${{ steps.context.outputs.repo_ref }}
- repo_ref_name:
- value: ${{ steps.context.outputs.repo_ref_name }}
- repo_ref_pr_number:
- value: ${{ steps.context.outputs.repo_ref_pr_number }}
- repo_ref_sha:
- value: ${{ steps.context.outputs.repo_ref_sha }}
- repo_ref_sha_short:
- value: ${{ steps.context.outputs.repo_ref_sha_short }}
- repo_ref_title:
- value: ${{ steps.context.outputs.repo_ref_title }}
- trusted:
- value: ${{ steps.trusted.outputs.trusted }}
- version_dev:
- value: ${{ steps.context.outputs.version_dev }}
- version_patch:
- value: ${{ steps.context.outputs.version_patch }}
-
-runs:
- using: composite
- steps:
-
- - if: ${{ inputs.check_mobile_run != 'false' }}
- id: should_run
- name: 'Check what to run'
- run: ./mobile/tools/what_to_run.sh
- shell: bash
-
- - id: trusted
- name: 'Check if its a trusted run'
- run: |
- TRUSTED=1
- ACTOR="${{ github.actor }}"
- if [[ "$ACTOR" =~ \[bot\] ]]; then
- TRUSTED_BOT=
- TRUSTED_BOTS=(${{ inputs.trusted_bots }})
- for bot in ${TRUSTED_BOTS[@]}; do
- if [[ "$bot" == "$ACTOR" ]]; then
- # Trusted bot account, ie non-PR
- TRUSTED_BOT=1
- break
- fi
- done
- if [[ -z "$TRUSTED_BOT" ]]; then
- echo "Not trusted bot account"
- TRUSTED=
- fi
- fi
- if [[ "${{ github.event_name }}" == "pull_request" ]]; then
- echo "Not trusted pull_request event"
- TRUSTED=
- fi
- if [[ -n "$TRUSTED" ]]; then
- echo "trusted=true" >> "$GITHUB_OUTPUT"
- else
- echo "trusted=false" >> "$GITHUB_OUTPUT"
- fi
- shell: bash
-
- - id: context
- name: 'CI context'
- run: |
- if grep dev VERSION.txt; then
- VERSION_DEV="$(cat VERSION.txt | cut -d- -f2)"
- else
- VERSION_DEV=""
- fi
- VERSION_PATCH="$(cat VERSION.txt | cut -d- -f1 | rev | cut -d. -f1 | rev)"
- # TODO: strip merge from pr names
- REF_NAME=${{ inputs.repo_ref_name || github.ref_name }}
- if [[ "$REF_NAME" =~ ^refs/pull/ ]]; then
- REF_NAME="${REF_NAME:10}"
- REF_PR_NUMBER="$(echo "${REF_NAME}" | cut -d/ -f1)"
- elif [[ "${{ github.event_name }}" == 'pull_request' ]]; then
- REF_PR_NUMBER="$(echo "${REF_NAME}" | cut -d/ -f1)"
- fi
- echo "SET PR NUMBER: ${REF_PR_NUMBER}"
-
- REF="${{ steps.trusted.outputs.trusted != 'true' && inputs.repo_ref || '' }}"
- REF_SHA=${{ inputs.repo_ref_sha || github.event.pull_request.head.sha || github.sha }}
- REF_SHA_SHORT="${REF_SHA:0:7}"
- REF_TITLE=(
- "${{ steps.trusted.outputs.trusted == 'true' && 'postsubmit' || 'pr' }}/"
- "${REF_NAME}"
- "@${REF_SHA_SHORT}")
- REF_TITLE="$(printf %s "${REF_TITLE[@]}" $'\n')"
- {
- echo "repo_ref=$REF"
- echo "repo_ref_name=$REF_NAME"
- echo "repo_ref_pr_number=$REF_PR_NUMBER"
- echo "repo_ref_sha=$REF_SHA"
- echo "repo_ref_title=$REF_TITLE"
- echo "repo_ref_sha_short=$REF_SHA_SHORT"
- echo "version_dev=$VERSION_DEV"
- echo "version_patch=$VERSION_PATCH"
- } >> "$GITHUB_OUTPUT"
- shell: bash
-
- - id: build
- name: 'Check current build images'
- run: |
- {
- echo "build_image_ubuntu=${BUILD_IMAGE_UBUNTU_REPO}:${BUILD_IMAGE_UBUNTU}@sha256:${BUILD_IMAGE_UBUNTU_SHA}"
- echo "build_image_ubuntu_mobile=${BUILD_IMAGE_UBUNTU_REPO}:mobile-${BUILD_IMAGE_UBUNTU}@sha256:${BUILD_IMAGE_UBUNTU_MOBILE_SHA}"
- } >> "$GITHUB_OUTPUT"
- env:
- # TODO(phlax): derive these from a config file
- BUILD_IMAGE_UBUNTU_REPO: ${{ inputs.build_image_repo }}
- BUILD_IMAGE_UBUNTU: ${{ inputs.build_image_tag }}
- BUILD_IMAGE_UBUNTU_SHA: ${{ inputs.build_image_sha }}
- BUILD_IMAGE_UBUNTU_MOBILE_SHA: ${{ inputs.build_image_mobile_sha }}
- shell: bash
diff --git a/.github/actions/pr_notifier/pr_notifier.py b/.github/actions/pr_notifier/pr_notifier.py
deleted file mode 100644
index 5ad39556efe36..0000000000000
--- a/.github/actions/pr_notifier/pr_notifier.py
+++ /dev/null
@@ -1,266 +0,0 @@
-# Script for collecting PRs in need of review, and informing maintainers via
-# slack.
-#
-# By default this runs in "developer mode" which means that it collects PRs
-# associated with maintainers and API reviewers, and spits them out (badly
-# formatted) to the command line.
-#
-# .github/workflows/pr_notifier.yml runs the script with --cron_job
-# which instead sends the collected PRs to the various slack channels.
-#
-# NOTE: Slack IDs can be found in the user's full profile from within Slack.
-
-from __future__ import print_function
-
-import argparse
-import datetime
-import os
-import sys
-
-import github
-from slack_sdk import WebClient
-from slack_sdk.errors import SlackApiError
-
-MAINTAINERS = {
- 'alyssawilk': 'U78RP48V9',
- 'mattklein123': 'U5CALEVSL',
- 'lizan': 'U79E51EQ6',
- 'snowp': 'U93KTPQP6',
- 'ggreenway': 'U78MBV869',
- 'htuch': 'U78E7055Z',
- 'zuercher': 'U78J72Q82',
- 'phlax': 'U017PLM0GNQ',
- 'jmarantz': 'U80HPLBPG',
- 'ravenblackx': 'U02MJHFEX35',
- 'yanavlasov': 'UJHLR5KFS',
- 'RyanTheOptimist': 'U01SW3JC8GP',
- 'adisuissa': 'UT17EMMTP',
- 'KBaichoo': 'U016ZPU8KBK',
- 'wbpcode': 'U017KF5C0Q6',
- 'kyessenov': 'U7KTRAA8M',
- 'keith': 'UGS5P90CF',
- 'abeyad': 'U03CVM7GPM1',
-}
-
-# First pass reviewers who are not maintainers should get
-# notifications but not result in a PR not getting assigned a
-# maintainer owner.
-FIRST_PASS = {
- 'dmitri-d': 'UB1883Q5S',
- 'tonya11en': 'U989BG2CW',
- 'esmet': 'U01BCGBUUAE',
- 'mathetake': 'UG9TD2FSB',
-}
-
-# Only notify API reviewers who aren't maintainers.
-# Maintainers are already notified of pending PRs.
-API_REVIEWERS = {
- 'markdroth': 'UMN8K55A6',
- 'adisuissa': 'UT17EMMTP',
-}
-
-
-def get_slo_hours():
- # on Monday, allow for 24h + 48h
- if datetime.date.today().weekday() == 0:
- return 72
- return 24
-
-
-# Return true if the PR has a waiting tag, false otherwise.
-def is_waiting(labels):
- for label in labels:
- if label.name == 'waiting' or label.name == 'waiting:any':
- return True
- return False
-
-
-def is_contrib(labels):
- return any(label.name == "contrib" for label in labels)
-
-
-# Return true if the PR has an API tag, false otherwise.
-def is_api(labels):
- for label in labels:
- if label.name == 'api':
- return True
- return False
-
-
-# Generate a pr message, bolding the time if it's out-SLO
-def pr_message(pr_age, pr_url, pr_title, delta_days, delta_hours):
- if pr_age < datetime.timedelta(hours=get_slo_hours()):
- return "<%s|%s> has been waiting %s days %s hours\n" % (
- pr_url, pr_title, delta_days, delta_hours)
- else:
- return "<%s|%s> has been waiting *%s days %s hours*\n" % (
- pr_url, pr_title, delta_days, delta_hours)
-
-
-# Adds reminder lines to the appropriate assignee to review the assigned PRs
-# Returns true if one of the assignees is in the primary_assignee_map, false otherwise.
-def add_reminders(
- assignees, assignees_and_prs, message, primary_assignee_map, first_pass_assignee_map):
- has_primary_assignee = False
- for assignee_info in assignees:
- assignee = assignee_info.login
- if assignee in primary_assignee_map:
- has_primary_assignee = True
- elif assignee not in first_pass_assignee_map:
- continue
- if assignee not in assignees_and_prs.keys():
- assignees_and_prs[
- assignee] = "Hello, %s, here are your PR reminders for the day \n" % assignee
- assignees_and_prs[assignee] = assignees_and_prs[assignee] + message
- return has_primary_assignee
-
-
-# Returns true if the PR needs an LGTM from an API shephard.
-def needs_api_review(labels, repo, pr_info):
- # API reviews should always have the label, so don't bother doing an RPC if
- # it's not tagged (this helps avoid github rate limiting)
- if not (is_api(labels)):
- return False
- # repokitten tags each commit as pending unless there has been an API LGTM
- # since the latest API changes. If this PR is tagged pendding it needs an
- # API review, otherwise it's set.
- status = repo.get_commit(pr_info.head.sha).get_statuses()
- return status[0].state == "pending" if status.totalCount else False
-
-
-def track_prs(github_token):
- git = github.Github(github_token)
-
- repo = git.get_repo('envoyproxy/envoy')
-
- # The list of PRs which are not waiting, but are well within review SLO
- recent_prs = []
- # A dict of maintainer : outstanding_pr_string to be sent to slack
- maintainers_and_prs = {}
- # A placeholder for unassigned PRs, to be sent to #maintainers eventually
- maintainers_and_prs['unassigned'] = ""
- # A dict of shephard : outstanding_pr_string to be sent to slack
- api_review_and_prs = {}
- # Out-SLO PRs to be sent to #envoy-maintainer-oncall
- stalled_prs = ""
-
- # Snag all PRs, including drafts
- for pr_info in repo.get_pulls("open", "updated", "desc"):
- labels = pr_info.labels
- assignees = pr_info.assignees
- # If the PR is waiting, continue.
- if is_waiting(labels):
- continue
- # Drafts are not covered by our SLO (repokitteh warns of this)
- if pr_info.draft:
- continue
- # Don't warn for dependabot.
- if pr_info.user.login == 'dependabot[bot]':
- continue
-
- # Update the time based on the time zone delta from github's
- pr_age = pr_info.updated_at - datetime.timedelta(hours=4)
- delta = datetime.datetime.now() - pr_age
- delta_days = delta.days
- delta_hours = delta.seconds // 3600
-
- # If we get to this point, the review may be in SLO - nudge if it's in
- # SLO, nudge in bold if not.
- message = pr_message(delta, pr_info.html_url, pr_info.title, delta_days, delta_hours)
-
- if (needs_api_review(labels, repo, pr_info)):
- add_reminders(pr_info.assignees, api_review_and_prs, message, API_REVIEWERS, [])
-
- # If the PR has been out-SLO for over a day, inform on-call
- if delta > datetime.timedelta(hours=get_slo_hours() + 36):
- stalled_prs = stalled_prs + message
-
- # Add a reminder to each maintainer-assigner on the PR.
- has_maintainer_assignee = add_reminders(
- pr_info.assignees, maintainers_and_prs, message, MAINTAINERS, FIRST_PASS)
-
- # If there was no maintainer, track it as unassigned.
- if not has_maintainer_assignee and not is_contrib(labels):
- maintainers_and_prs['unassigned'] = maintainers_and_prs['unassigned'] + message
-
- # Return the dict of {maintainers : PR notifications},
- # the dict of {api-shephards-who-are-not-maintainers: PR notifications},
- # and stalled PRs
- return maintainers_and_prs, api_review_and_prs, stalled_prs
-
-
-def post_to_assignee(client, assignees_and_messages, assignees_map):
- # Post updates to individual assignees
- for key in assignees_and_messages:
- message = assignees_and_messages[key]
-
- # Only send messages if we have the slack UID
- if key not in assignees_map:
- continue
- uid = assignees_map[key]
-
- # Ship messages off to slack.
- try:
- print(assignees_and_messages[key])
- response = client.conversations_open(users=uid, text="hello")
- channel_id = response["channel"]["id"]
- client.chat_postMessage(channel=channel_id, text=message)
- except SlackApiError as e:
- print("Unexpected error %s", e.response["error"])
-
-
-def post_to_oncall(client, unassigned_prs, out_slo_prs):
- # Post updates to #envoy-maintainer-oncall
- unassigned_prs = maintainers_and_messages['unassigned']
- try:
- client.chat_postMessage(
- channel='#envoy-maintainer-oncall',
- text=("*'Unassigned' PRs* (PRs with no maintainer assigned)\n%s" % unassigned_prs))
- client.chat_postMessage(
- channel='#envoy-maintainer-oncall',
- text=("*Stalled PRs* (PRs with review out-SLO, please address)\n%s" % out_slo_prs))
- issue_link = "https://github.com/envoyproxy/envoy/issues?q=is%3Aissue+is%3Aopen+label%3Atriage"
- client.chat_postMessage(
- channel='#envoy-maintainer-oncall',
- text=(
- "*Untriaged Issues* (please tag and cc area experts)\n<%s|%s>" %
- (issue_link, issue_link)))
- except SlackApiError as e:
- print("Unexpected error %s", e.response["error"])
-
-
-if __name__ == '__main__':
- parser = argparse.ArgumentParser()
- parser.add_argument(
- '--cron_job',
- action="store_true",
- help="true if this is run by the daily cron job, false if run manually by a developer")
- args = parser.parse_args()
-
- github_token = os.getenv('GITHUB_TOKEN')
- if not github_token:
- print('Missing GITHUB_TOKEN: please check github workflow configuration')
- sys.exit(1)
-
- slack_bot_token = os.getenv('SLACK_BOT_TOKEN')
- if not slack_bot_token:
- print(
- 'Missing SLACK_BOT_TOKEN: please export token from https://api.slack.com/apps/A023NPQQ33K/oauth?'
- )
- sys.exit(1)
-
- maintainers_and_messages, shephards_and_messages, stalled_prs = track_prs(github_token)
-
- if not args.cron_job:
- print(maintainers_and_messages)
- print("\n\n\n")
- print(shephards_and_messages)
- print("\n\n\n")
- print(stalled_prs)
- exit(0)
-
- client = WebClient(token=slack_bot_token)
- post_to_oncall(client, maintainers_and_messages['unassigned'], stalled_prs)
- post_to_assignee(client, shephards_and_messages, API_REVIEWERS)
- post_to_assignee(client, maintainers_and_messages, MAINTAINERS)
- post_to_assignee(client, maintainers_and_messages, FIRST_PASS)
diff --git a/.github/actions/pr_notifier/requirements.in b/.github/actions/pr_notifier/requirements.in
deleted file mode 100644
index b27ccacba25ae..0000000000000
--- a/.github/actions/pr_notifier/requirements.in
+++ /dev/null
@@ -1,2 +0,0 @@
-pygithub
-slack_sdk
diff --git a/.github/actions/pr_notifier/requirements.txt b/.github/actions/pr_notifier/requirements.txt
deleted file mode 100644
index f9dfcc84ad240..0000000000000
--- a/.github/actions/pr_notifier/requirements.txt
+++ /dev/null
@@ -1,224 +0,0 @@
-#
-# This file is autogenerated by pip-compile
-# To update, run:
-#
-# pip-compile --generate-hashes .github/actions/pr_notifier/requirements.txt
-#
-certifi==2023.7.22 \
- --hash=sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082 \
- --hash=sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9
- # via requests
-cffi==1.14.5 \
- --hash=sha256:005a36f41773e148deac64b08f233873a4d0c18b053d37da83f6af4d9087b813 \
- --hash=sha256:04c468b622ed31d408fea2346bec5bbffba2cc44226302a0de1ade9f5ea3d373 \
- --hash=sha256:06d7cd1abac2ffd92e65c0609661866709b4b2d82dd15f611e602b9b188b0b69 \
- --hash=sha256:06db6321b7a68b2bd6df96d08a5adadc1fa0e8f419226e25b2a5fbf6ccc7350f \
- --hash=sha256:0857f0ae312d855239a55c81ef453ee8fd24136eaba8e87a2eceba644c0d4c06 \
- --hash=sha256:0f861a89e0043afec2a51fd177a567005847973be86f709bbb044d7f42fc4e05 \
- --hash=sha256:1071534bbbf8cbb31b498d5d9db0f274f2f7a865adca4ae429e147ba40f73dea \
- --hash=sha256:158d0d15119b4b7ff6b926536763dc0714313aa59e320ddf787502c70c4d4bee \
- --hash=sha256:1bf1ac1984eaa7675ca8d5745a8cb87ef7abecb5592178406e55858d411eadc0 \
- --hash=sha256:1f436816fc868b098b0d63b8920de7d208c90a67212546d02f84fe78a9c26396 \
- --hash=sha256:24a570cd11895b60829e941f2613a4f79df1a27344cbbb82164ef2e0116f09c7 \
- --hash=sha256:24ec4ff2c5c0c8f9c6b87d5bb53555bf267e1e6f70e52e5a9740d32861d36b6f \
- --hash=sha256:2894f2df484ff56d717bead0a5c2abb6b9d2bf26d6960c4604d5c48bbc30ee73 \
- --hash=sha256:29314480e958fd8aab22e4a58b355b629c59bf5f2ac2492b61e3dc06d8c7a315 \
- --hash=sha256:293e7ea41280cb28c6fcaaa0b1aa1f533b8ce060b9e701d78511e1e6c4a1de76 \
- --hash=sha256:34eff4b97f3d982fb93e2831e6750127d1355a923ebaeeb565407b3d2f8d41a1 \
- --hash=sha256:35f27e6eb43380fa080dccf676dece30bef72e4a67617ffda586641cd4508d49 \
- --hash=sha256:3c3f39fa737542161d8b0d680df2ec249334cd70a8f420f71c9304bd83c3cbed \
- --hash=sha256:3d3dd4c9e559eb172ecf00a2a7517e97d1e96de2a5e610bd9b68cea3925b4892 \
- --hash=sha256:43e0b9d9e2c9e5d152946b9c5fe062c151614b262fda2e7b201204de0b99e482 \
- --hash=sha256:48e1c69bbacfc3d932221851b39d49e81567a4d4aac3b21258d9c24578280058 \
- --hash=sha256:51182f8927c5af975fece87b1b369f722c570fe169f9880764b1ee3bca8347b5 \
- --hash=sha256:58e3f59d583d413809d60779492342801d6e82fefb89c86a38e040c16883be53 \
- --hash=sha256:5de7970188bb46b7bf9858eb6890aad302577a5f6f75091fd7cdd3ef13ef3045 \
- --hash=sha256:65fa59693c62cf06e45ddbb822165394a288edce9e276647f0046e1ec26920f3 \
- --hash=sha256:681d07b0d1e3c462dd15585ef5e33cb021321588bebd910124ef4f4fb71aef55 \
- --hash=sha256:69e395c24fc60aad6bb4fa7e583698ea6cc684648e1ffb7fe85e3c1ca131a7d5 \
- --hash=sha256:6c97d7350133666fbb5cf4abdc1178c812cb205dc6f41d174a7b0f18fb93337e \
- --hash=sha256:6e4714cc64f474e4d6e37cfff31a814b509a35cb17de4fb1999907575684479c \
- --hash=sha256:72d8d3ef52c208ee1c7b2e341f7d71c6fd3157138abf1a95166e6165dd5d4369 \
- --hash=sha256:8ae6299f6c68de06f136f1f9e69458eae58f1dacf10af5c17353eae03aa0d827 \
- --hash=sha256:8b198cec6c72df5289c05b05b8b0969819783f9418e0409865dac47288d2a053 \
- --hash=sha256:99cd03ae7988a93dd00bcd9d0b75e1f6c426063d6f03d2f90b89e29b25b82dfa \
- --hash=sha256:9cf8022fb8d07a97c178b02327b284521c7708d7c71a9c9c355c178ac4bbd3d4 \
- --hash=sha256:9de2e279153a443c656f2defd67769e6d1e4163952b3c622dcea5b08a6405322 \
- --hash=sha256:9e93e79c2551ff263400e1e4be085a1210e12073a31c2011dbbda14bda0c6132 \
- --hash=sha256:9ff227395193126d82e60319a673a037d5de84633f11279e336f9c0f189ecc62 \
- --hash=sha256:a465da611f6fa124963b91bf432d960a555563efe4ed1cc403ba5077b15370aa \
- --hash=sha256:ad17025d226ee5beec591b52800c11680fca3df50b8b29fe51d882576e039ee0 \
- --hash=sha256:afb29c1ba2e5a3736f1c301d9d0abe3ec8b86957d04ddfa9d7a6a42b9367e396 \
- --hash=sha256:b85eb46a81787c50650f2392b9b4ef23e1f126313b9e0e9013b35c15e4288e2e \
- --hash=sha256:bb89f306e5da99f4d922728ddcd6f7fcebb3241fc40edebcb7284d7514741991 \
- --hash=sha256:cbde590d4faaa07c72bf979734738f328d239913ba3e043b1e98fe9a39f8b2b6 \
- --hash=sha256:cc5a8e069b9ebfa22e26d0e6b97d6f9781302fe7f4f2b8776c3e1daea35f1adc \
- --hash=sha256:cd2868886d547469123fadc46eac7ea5253ea7fcb139f12e1dfc2bbd406427d1 \
- --hash=sha256:d42b11d692e11b6634f7613ad8df5d6d5f8875f5d48939520d351007b3c13406 \
- --hash=sha256:df5052c5d867c1ea0b311fb7c3cd28b19df469c056f7fdcfe88c7473aa63e333 \
- --hash=sha256:f2d45f97ab6bb54753eab54fffe75aaf3de4ff2341c9daee1987ee1837636f1d \
- --hash=sha256:fd78e5fee591709f32ef6edb9a015b4aa1a5022598e36227500c8f4e02328d9c
- # via
- # cryptography
- # pynacl
-charset-normalizer==3.1.0 \
- --hash=sha256:04afa6387e2b282cf78ff3dbce20f0cc071c12dc8f685bd40960cc68644cfea6 \
- --hash=sha256:04eefcee095f58eaabe6dc3cc2262f3bcd776d2c67005880894f447b3f2cb9c1 \
- --hash=sha256:0be65ccf618c1e7ac9b849c315cc2e8a8751d9cfdaa43027d4f6624bd587ab7e \
- --hash=sha256:0c95f12b74681e9ae127728f7e5409cbbef9cd914d5896ef238cc779b8152373 \
- --hash=sha256:0ca564606d2caafb0abe6d1b5311c2649e8071eb241b2d64e75a0d0065107e62 \
- --hash=sha256:10c93628d7497c81686e8e5e557aafa78f230cd9e77dd0c40032ef90c18f2230 \
- --hash=sha256:11d117e6c63e8f495412d37e7dc2e2fff09c34b2d09dbe2bee3c6229577818be \
- --hash=sha256:11d3bcb7be35e7b1bba2c23beedac81ee893ac9871d0ba79effc7fc01167db6c \
- --hash=sha256:12a2b561af122e3d94cdb97fe6fb2bb2b82cef0cdca131646fdb940a1eda04f0 \
- --hash=sha256:12d1a39aa6b8c6f6248bb54550efcc1c38ce0d8096a146638fd4738e42284448 \
- --hash=sha256:1435ae15108b1cb6fffbcea2af3d468683b7afed0169ad718451f8db5d1aff6f \
- --hash=sha256:1c60b9c202d00052183c9be85e5eaf18a4ada0a47d188a83c8f5c5b23252f649 \
- --hash=sha256:1e8fcdd8f672a1c4fc8d0bd3a2b576b152d2a349782d1eb0f6b8e52e9954731d \
- --hash=sha256:20064ead0717cf9a73a6d1e779b23d149b53daf971169289ed2ed43a71e8d3b0 \
- --hash=sha256:21fa558996782fc226b529fdd2ed7866c2c6ec91cee82735c98a197fae39f706 \
- --hash=sha256:22908891a380d50738e1f978667536f6c6b526a2064156203d418f4856d6e86a \
- --hash=sha256:3160a0fd9754aab7d47f95a6b63ab355388d890163eb03b2d2b87ab0a30cfa59 \
- --hash=sha256:322102cdf1ab682ecc7d9b1c5eed4ec59657a65e1c146a0da342b78f4112db23 \
- --hash=sha256:34e0a2f9c370eb95597aae63bf85eb5e96826d81e3dcf88b8886012906f509b5 \
- --hash=sha256:3573d376454d956553c356df45bb824262c397c6e26ce43e8203c4c540ee0acb \
- --hash=sha256:3747443b6a904001473370d7810aa19c3a180ccd52a7157aacc264a5ac79265e \
- --hash=sha256:38e812a197bf8e71a59fe55b757a84c1f946d0ac114acafaafaf21667a7e169e \
- --hash=sha256:3a06f32c9634a8705f4ca9946d667609f52cf130d5548881401f1eb2c39b1e2c \
- --hash=sha256:3a5fc78f9e3f501a1614a98f7c54d3969f3ad9bba8ba3d9b438c3bc5d047dd28 \
- --hash=sha256:3d9098b479e78c85080c98e1e35ff40b4a31d8953102bb0fd7d1b6f8a2111a3d \
- --hash=sha256:3dc5b6a8ecfdc5748a7e429782598e4f17ef378e3e272eeb1340ea57c9109f41 \
- --hash=sha256:4155b51ae05ed47199dc5b2a4e62abccb274cee6b01da5b895099b61b1982974 \
- --hash=sha256:49919f8400b5e49e961f320c735388ee686a62327e773fa5b3ce6721f7e785ce \
- --hash=sha256:53d0a3fa5f8af98a1e261de6a3943ca631c526635eb5817a87a59d9a57ebf48f \
- --hash=sha256:5f008525e02908b20e04707a4f704cd286d94718f48bb33edddc7d7b584dddc1 \
- --hash=sha256:628c985afb2c7d27a4800bfb609e03985aaecb42f955049957814e0491d4006d \
- --hash=sha256:65ed923f84a6844de5fd29726b888e58c62820e0769b76565480e1fdc3d062f8 \
- --hash=sha256:6734e606355834f13445b6adc38b53c0fd45f1a56a9ba06c2058f86893ae8017 \
- --hash=sha256:6baf0baf0d5d265fa7944feb9f7451cc316bfe30e8df1a61b1bb08577c554f31 \
- --hash=sha256:6f4f4668e1831850ebcc2fd0b1cd11721947b6dc7c00bf1c6bd3c929ae14f2c7 \
- --hash=sha256:6f5c2e7bc8a4bf7c426599765b1bd33217ec84023033672c1e9a8b35eaeaaaf8 \
- --hash=sha256:6f6c7a8a57e9405cad7485f4c9d3172ae486cfef1344b5ddd8e5239582d7355e \
- --hash=sha256:7381c66e0561c5757ffe616af869b916c8b4e42b367ab29fedc98481d1e74e14 \
- --hash=sha256:73dc03a6a7e30b7edc5b01b601e53e7fc924b04e1835e8e407c12c037e81adbd \
- --hash=sha256:74db0052d985cf37fa111828d0dd230776ac99c740e1a758ad99094be4f1803d \
- --hash=sha256:75f2568b4189dda1c567339b48cba4ac7384accb9c2a7ed655cd86b04055c795 \
- --hash=sha256:78cacd03e79d009d95635e7d6ff12c21eb89b894c354bd2b2ed0b4763373693b \
- --hash=sha256:80d1543d58bd3d6c271b66abf454d437a438dff01c3e62fdbcd68f2a11310d4b \
- --hash=sha256:830d2948a5ec37c386d3170c483063798d7879037492540f10a475e3fd6f244b \
- --hash=sha256:891cf9b48776b5c61c700b55a598621fdb7b1e301a550365571e9624f270c203 \
- --hash=sha256:8f25e17ab3039b05f762b0a55ae0b3632b2e073d9c8fc88e89aca31a6198e88f \
- --hash=sha256:9a3267620866c9d17b959a84dd0bd2d45719b817245e49371ead79ed4f710d19 \
- --hash=sha256:a04f86f41a8916fe45ac5024ec477f41f886b3c435da2d4e3d2709b22ab02af1 \
- --hash=sha256:aaf53a6cebad0eae578f062c7d462155eada9c172bd8c4d250b8c1d8eb7f916a \
- --hash=sha256:abc1185d79f47c0a7aaf7e2412a0eb2c03b724581139193d2d82b3ad8cbb00ac \
- --hash=sha256:ac0aa6cd53ab9a31d397f8303f92c42f534693528fafbdb997c82bae6e477ad9 \
- --hash=sha256:ac3775e3311661d4adace3697a52ac0bab17edd166087d493b52d4f4f553f9f0 \
- --hash=sha256:b06f0d3bf045158d2fb8837c5785fe9ff9b8c93358be64461a1089f5da983137 \
- --hash=sha256:b116502087ce8a6b7a5f1814568ccbd0e9f6cfd99948aa59b0e241dc57cf739f \
- --hash=sha256:b82fab78e0b1329e183a65260581de4375f619167478dddab510c6c6fb04d9b6 \
- --hash=sha256:bd7163182133c0c7701b25e604cf1611c0d87712e56e88e7ee5d72deab3e76b5 \
- --hash=sha256:c36bcbc0d5174a80d6cccf43a0ecaca44e81d25be4b7f90f0ed7bcfbb5a00909 \
- --hash=sha256:c3af8e0f07399d3176b179f2e2634c3ce9c1301379a6b8c9c9aeecd481da494f \
- --hash=sha256:c84132a54c750fda57729d1e2599bb598f5fa0344085dbde5003ba429a4798c0 \
- --hash=sha256:cb7b2ab0188829593b9de646545175547a70d9a6e2b63bf2cd87a0a391599324 \
- --hash=sha256:cca4def576f47a09a943666b8f829606bcb17e2bc2d5911a46c8f8da45f56755 \
- --hash=sha256:cf6511efa4801b9b38dc5546d7547d5b5c6ef4b081c60b23e4d941d0eba9cbeb \
- --hash=sha256:d16fd5252f883eb074ca55cb622bc0bee49b979ae4e8639fff6ca3ff44f9f854 \
- --hash=sha256:d2686f91611f9e17f4548dbf050e75b079bbc2a82be565832bc8ea9047b61c8c \
- --hash=sha256:d7fc3fca01da18fbabe4625d64bb612b533533ed10045a2ac3dd194bfa656b60 \
- --hash=sha256:dd5653e67b149503c68c4018bf07e42eeed6b4e956b24c00ccdf93ac79cdff84 \
- --hash=sha256:de5695a6f1d8340b12a5d6d4484290ee74d61e467c39ff03b39e30df62cf83a0 \
- --hash=sha256:e0ac8959c929593fee38da1c2b64ee9778733cdf03c482c9ff1d508b6b593b2b \
- --hash=sha256:e1b25e3ad6c909f398df8921780d6a3d120d8c09466720226fc621605b6f92b1 \
- --hash=sha256:e633940f28c1e913615fd624fcdd72fdba807bf53ea6925d6a588e84e1151531 \
- --hash=sha256:e89df2958e5159b811af9ff0f92614dabf4ff617c03a4c1c6ff53bf1c399e0e1 \
- --hash=sha256:ea9f9c6034ea2d93d9147818f17c2a0860d41b71c38b9ce4d55f21b6f9165a11 \
- --hash=sha256:f645caaf0008bacf349875a974220f1f1da349c5dbe7c4ec93048cdc785a3326 \
- --hash=sha256:f8303414c7b03f794347ad062c0516cee0e15f7a612abd0ce1e25caf6ceb47df \
- --hash=sha256:fca62a8301b605b954ad2e9c3666f9d97f63872aa4efcae5492baca2056b74ab
- # via requests
-cryptography==41.0.2 \
- --hash=sha256:01f1d9e537f9a15b037d5d9ee442b8c22e3ae11ce65ea1f3316a41c78756b711 \
- --hash=sha256:079347de771f9282fbfe0e0236c716686950c19dee1b76240ab09ce1624d76d7 \
- --hash=sha256:182be4171f9332b6741ee818ec27daff9fb00349f706629f5cbf417bd50e66fd \
- --hash=sha256:192255f539d7a89f2102d07d7375b1e0a81f7478925b3bc2e0549ebf739dae0e \
- --hash=sha256:2a034bf7d9ca894720f2ec1d8b7b5832d7e363571828037f9e0c4f18c1b58a58 \
- --hash=sha256:342f3767e25876751e14f8459ad85e77e660537ca0a066e10e75df9c9e9099f0 \
- --hash=sha256:439c3cc4c0d42fa999b83ded80a9a1fb54d53c58d6e59234cfe97f241e6c781d \
- --hash=sha256:49c3222bb8f8e800aead2e376cbef687bc9e3cb9b58b29a261210456a7783d83 \
- --hash=sha256:674b669d5daa64206c38e507808aae49904c988fa0a71c935e7006a3e1e83831 \
- --hash=sha256:7a9a3bced53b7f09da251685224d6a260c3cb291768f54954e28f03ef14e3766 \
- --hash=sha256:7af244b012711a26196450d34f483357e42aeddb04128885d95a69bd8b14b69b \
- --hash=sha256:7d230bf856164de164ecb615ccc14c7fc6de6906ddd5b491f3af90d3514c925c \
- --hash=sha256:84609ade00a6ec59a89729e87a503c6e36af98ddcd566d5f3be52e29ba993182 \
- --hash=sha256:9a6673c1828db6270b76b22cc696f40cde9043eb90373da5c2f8f2158957f42f \
- --hash=sha256:9b6d717393dbae53d4e52684ef4f022444fc1cce3c48c38cb74fca29e1f08eaa \
- --hash=sha256:9c3fe6534d59d071ee82081ca3d71eed3210f76ebd0361798c74abc2bcf347d4 \
- --hash=sha256:a719399b99377b218dac6cf547b6ec54e6ef20207b6165126a280b0ce97e0d2a \
- --hash=sha256:b332cba64d99a70c1e0836902720887fb4529ea49ea7f5462cf6640e095e11d2 \
- --hash=sha256:d124682c7a23c9764e54ca9ab5b308b14b18eba02722b8659fb238546de83a76 \
- --hash=sha256:d73f419a56d74fef257955f51b18d046f3506270a5fd2ac5febbfa259d6c0fa5 \
- --hash=sha256:f0dc40e6f7aa37af01aba07277d3d64d5a03dc66d682097541ec4da03cc140ee \
- --hash=sha256:f14ad275364c8b4e525d018f6716537ae7b6d369c094805cae45300847e0894f \
- --hash=sha256:f772610fe364372de33d76edcd313636a25684edb94cee53fd790195f5989d14
- # via pyjwt
-deprecated==1.2.13 \
- --hash=sha256:43ac5335da90c31c24ba028af536a91d41d53f9e6901ddb021bcc572ce44e38d \
- --hash=sha256:64756e3e14c8c5eea9795d93c524551432a0be75629f8f29e67ab8caf076c76d
- # via pygithub
-idna==2.10 \
- --hash=sha256:b307872f855b18632ce0c21c5e45be78c0ea7ae4c15c828c20788b26921eb3f6 \
- --hash=sha256:b97d804b1e9b523befed77c48dacec60e6dcb0b5391d57af6a65a312a90648c0
- # via requests
-pycparser==2.20 \
- --hash=sha256:2d475327684562c3a96cc71adf7dc8c4f0565175cf86b6d7a404ff4c771f15f0 \
- --hash=sha256:7582ad22678f0fcd81102833f60ef8d0e57288b6b5fb00323d101be910e35705
- # via cffi
-pygithub==1.59.0 \
- --hash=sha256:126bdbae72087d8d038b113aab6b059b4553cb59348e3024bb1a1cae406ace9e \
- --hash=sha256:6e05ff49bac3caa7d1d6177a10c6e55a3e20c85b92424cc198571fd0cf786690
- # via -r requirements.in
-pyjwt[crypto]==2.4.0 \
- --hash=sha256:72d1d253f32dbd4f5c88eaf1fdc62f3a19f676ccbadb9dbc5d07e951b2b26daf \
- --hash=sha256:d42908208c699b3b973cbeb01a969ba6a96c821eefb1c5bfe4c390c01d67abba
- # via pygithub
-pynacl==1.4.0 \
- --hash=sha256:06cbb4d9b2c4bd3c8dc0d267416aaed79906e7b33f114ddbf0911969794b1cc4 \
- --hash=sha256:11335f09060af52c97137d4ac54285bcb7df0cef29014a1a4efe64ac065434c4 \
- --hash=sha256:2fe0fc5a2480361dcaf4e6e7cea00e078fcda07ba45f811b167e3f99e8cff574 \
- --hash=sha256:30f9b96db44e09b3304f9ea95079b1b7316b2b4f3744fe3aaecccd95d547063d \
- --hash=sha256:4e10569f8cbed81cb7526ae137049759d2a8d57726d52c1a000a3ce366779634 \
- --hash=sha256:511d269ee845037b95c9781aa702f90ccc36036f95d0f31373a6a79bd8242e25 \
- --hash=sha256:537a7ccbea22905a0ab36ea58577b39d1fa9b1884869d173b5cf111f006f689f \
- --hash=sha256:54e9a2c849c742006516ad56a88f5c74bf2ce92c9f67435187c3c5953b346505 \
- --hash=sha256:757250ddb3bff1eecd7e41e65f7f833a8405fede0194319f87899690624f2122 \
- --hash=sha256:7757ae33dae81c300487591c68790dfb5145c7d03324000433d9a2c141f82af7 \
- --hash=sha256:7c6092102219f59ff29788860ccb021e80fffd953920c4a8653889c029b2d420 \
- --hash=sha256:8122ba5f2a2169ca5da936b2e5a511740ffb73979381b4229d9188f6dcb22f1f \
- --hash=sha256:9c4a7ea4fb81536c1b1f5cc44d54a296f96ae78c1ebd2311bd0b60be45a48d96 \
- --hash=sha256:c914f78da4953b33d4685e3cdc7ce63401247a21425c16a39760e282075ac4a6 \
- --hash=sha256:cd401ccbc2a249a47a3a1724c2918fcd04be1f7b54eb2a5a71ff915db0ac51c6 \
- --hash=sha256:d452a6746f0a7e11121e64625109bc4468fc3100452817001dbe018bb8b08514 \
- --hash=sha256:ea6841bc3a76fa4942ce00f3bda7d436fda21e2d91602b9e21b7ca9ecab8f3ff \
- --hash=sha256:f8851ab9041756003119368c1e6cd0b9c631f46d686b3904b18c0139f4419f80
- # via pygithub
-requests==2.31.0 \
- --hash=sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f \
- --hash=sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1
- # via pygithub
-six==1.16.0 \
- --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \
- --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254
- # via pynacl
-slack-sdk==3.21.3 \
- --hash=sha256:20829bdc1a423ec93dac903470975ebf3bc76fd3fd91a4dadc0eeffc940ecb0c \
- --hash=sha256:de3c07b92479940b61cd68c566f49fbc9974c8f38f661d26244078f3903bb9cc
- # via -r requirements.in
-urllib3==1.26.6 \
- --hash=sha256:39fb8672126159acb139a7718dd10806104dec1e2f0f6c88aab05d17df10c8d4 \
- --hash=sha256:f57b4c16c62fa2760b7e3d97c35b255512fb6b59a259730f36ba32ce9f8e342f
- # via requests
-wrapt==1.12.1 \
- --hash=sha256:b62ffa81fb85f4332a4f609cab4ac40709470da05643a082ec1eb88e6d9b97d7
- # via deprecated
diff --git a/.github/actions/publish/release/setup/action.yml b/.github/actions/publish/release/setup/action.yml
deleted file mode 100644
index 4e0935710d2db..0000000000000
--- a/.github/actions/publish/release/setup/action.yml
+++ /dev/null
@@ -1,26 +0,0 @@
-inputs:
- ref:
- type: string
- required: true
- bucket:
- type: string
- required: true
-
-runs:
- using: composite
- steps:
- - id: url
- run: |
- echo "base=https://storage.googleapis.com/${{ inputs.bucket }}/${REF:0:7}/release" \
- >> "$GITHUB_OUTPUT"
- env:
- REF: ${{ inputs.ref }}
- shell: bash
- - uses: envoyproxy/toolshed/gh-actions/fetch@actions-v0.0.10
- id: fetch
- with:
- url: "${{ steps.url.outputs.base }}/release.signed.tar.zst"
- - run: |
- mkdir -p ${{ runner.temp }}/release.signed
- mv ${{ steps.fetch.outputs.path }} ${{ runner.temp }}/release.signed
- shell: bash
diff --git a/.github/actions/verify/examples/setup/action.yml b/.github/actions/verify/examples/setup/action.yml
deleted file mode 100644
index 18f3205721ce1..0000000000000
--- a/.github/actions/verify/examples/setup/action.yml
+++ /dev/null
@@ -1,37 +0,0 @@
-inputs:
- ref:
- type: string
- required: true
- bucket:
- type: string
- default: envoy-pr
-
-runs:
- using: composite
- steps:
- - id: url
- run: |
- echo "base=https://storage.googleapis.com/${{ inputs.bucket }}/${REF:0:7}/docker" \
- >> "$GITHUB_OUTPUT"
- env:
- REF: ${{ inputs.ref }}
- shell: bash
- - uses: envoyproxy/toolshed/gh-actions/docker/fetch@actions-v0.0.10
- with:
- url: "${{ steps.url.outputs.base }}/envoy.tar"
- variant: dev
- - uses: envoyproxy/toolshed/gh-actions/docker/fetch@actions-v0.0.10
- with:
- url: "${{ steps.url.outputs.base }}/envoy-contrib.tar"
- variant: contrib-dev
- - uses: envoyproxy/toolshed/gh-actions/docker/fetch@actions-v0.0.10
- with:
- url: "${{ steps.url.outputs.base }}/envoy-google-vrp.tar"
- variant: google-vrp-dev
- - run: docker images | grep envoy
- shell: bash
- - run: |
- export DEBIAN_FRONTEND=noninteractive
- sudo apt-get -qq update -y
- sudo apt-get -qq install -y --no-install-recommends expect
- shell: bash
diff --git a/.github/config.yml b/.github/config.yml
new file mode 100644
index 0000000000000..a9dde4d03896d
--- /dev/null
+++ b/.github/config.yml
@@ -0,0 +1,189 @@
+agent-ubuntu: ubuntu-22.04
+build-image:
+ # Authoritative configuration for build image/s
+ repo: envoyproxy/envoy-build-ubuntu
+ sha: 3c8a3ce6f90dcfb5d09dc8f79bb01404d3526d420061f9a176e0a8e91e1e573e
+ mobile-sha: f47fb698cfda583769b9d28e8d1c58cfc7774d5da4f31cd8190d8975c3850c7e
+ # this is authoritative, but is not currently used in github ci
+ gcr-sha: 2a473cd9808182735d54e03b158975389948b9559b8e8fc624cfafbaf7059e62
+ tag: fdd65c6270a8507a18d5acd6cf19a18cb695e4fa
+
+config:
+ envoy:
+ icon: >-
+ [](#)
+
+checks:
+ # Checks: this configures which _checks_ will be activated or skipped
+ #
+ # The configured _names_ need to match the checks configured for the repo
+ #
+ # Any check that is marked as `required` but is not triggered by the run
+ # config above in a given CI run is marked as `skipped`
+ #
+ # For example if macos is marked as `required: true` but then has a path
+ # selection that means its doesnt run the check will be `skipped` and pass
+ macos:
+ name: Envoy/macOS
+ required: true
+ on-run:
+ - build-macos
+ prechecks:
+ name: Envoy/Prechecks
+ on-run:
+ - precheck-deps
+ required: true
+ # yamllint disable rule:line-length
+ advice:
+ general: |
+ ### Ensuring your commits are signed off
+
+ You can set up DCO using Envoy's git hooks.
+
+ ### Git hooks
+
+ To set this up, do the following:
+
+ ```console
+ $ ./support/bootstrap
+ ```
+
+ If you only want the DCO check you can do the following to disable the
+ other hooks
+
+ ```console
+ $ echo NO_VERIFY=1 > .env
+ ```
+ deps: |
+ ### Advice on updating dependencies
+
+ General information about Envoy's depdendencies [can be found here](https://github.com/envoyproxy/envoy/blob/main/DEPENDENCY_POLICY.md)
+ format: |
+ ### Advice on correct formatting
+
+ Envoy ensures a minimum standard for all files in the repository.
+
+ You are strongly advised to heed the following CI notice:
+
+ ```console
+ Please fix your editor to ensure:
+
+ - no trailing whitespace
+ - no preceding mixed tabs/spaces
+ - all files end with a newline
+ ```
+ # yamllint enable rule:line-length
+ publish:
+ name: >-
+ Envoy/Publish and verify
+ on-run:
+ - publish
+ - verify
+ required: true
+
+run:
+ build-macos:
+ paths:
+ - .bazelrc
+ - .bazelversion
+ - .github/config.yml
+ - api/**/*
+ - bazel/**/*
+ - ci/**/*
+ - configs/**/*
+ - contrib/**/*
+ - envoy/**/*
+ - source/**/*
+ - test/**/*
+ precheck-deps:
+ paths:
+ - .bazelrc
+ - .bazelversion
+ - .github/config.yml
+ - .github/dependabot.yml
+ - bazel/BUILD
+ - tools/dependency/*
+ - "**/*.bzl"
+ - "**/requirements.txt"
+ publish:
+ paths:
+ - .bazelrc
+ - .bazelversion
+ - .github/config.yml
+ - api/**/*
+ - bazel/**/*
+ - ci/**/*
+ - contrib/**/*
+ - envoy/**/*
+ - examples/**/*
+ - source/**/*
+ - tools/**/*
+ - VERSION.txt
+ verify:
+ paths:
+ - .bazelrc
+ - .bazelversion
+ - .github/config.yml
+ - api/**/*
+ - bazel/**/*
+ - ci/**/*
+ - contrib/**/*
+ - envoy/**/*
+ - examples/**/*
+ - source/**/*
+ - tools/**/*
+ - VERSION.txt
+ push: paths
+
+tables:
+ env:
+ collapse: true
+ title: Environment
+ table-title: Request variables
+ filter: |
+ .request
+ | del(.["build-image" as $prefix | keys[] | select(startswith($prefix))])
+ | del(.["version" as $prefix | keys[] | select(startswith($prefix))])
+ | .actor = "
@\(.actor.name)"
+ build-image:
+ collapse: true
+ title: Build image
+ table-title: Container image/s (as used in this CI run)
+ filter: |
+ "https://hub.docker.com/r/envoyproxy/envoy-build-ubuntu/tags?page=1&name=" as $dockerLink
+ | .request["build-image"]
+ | del(.changed)
+ | with_entries(
+ .value as $v
+ | ($v | split(":") | .[1] | split("@") | .[0]) as $tag
+ | .value = "[\($v | split("@") | .[0])](\($dockerLink)\($tag))")
+ build-image-current:
+ collapse: true
+ title: Build image (current)
+ table-title: Current or previous container image
+ filter: |
+ "https://hub.docker.com/r/envoyproxy/envoy-build-ubuntu/tags?page=1&name=" as $dockerLink
+ | if .request["build-image"].changed then
+ .request["build-image-current"]
+ | with_entries(
+ .value as $v
+ | ($v | split(":") | .[1] | split("@") | .[0]) as $tag
+ | .value = "[\($v | split("@") | .[0])](\($dockerLink)\($tag))")
+ else {} end
+ version:
+ collapse: true
+ title: Version
+ table-title: Envoy version (as used in this CI run)
+ filter: |
+ .request.version
+ | del(.changed)
+ version-current:
+ collapse: true
+ title: Version (current)
+ table-title: Current or previous version
+ filter: |
+ if .request.version.changed then
+ .request["version-current"]
+ else
+ {}
+ end
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
index fe477923a6a6f..265bea2d2c706 100644
--- a/.github/dependabot.yml
+++ b/.github/dependabot.yml
@@ -9,12 +9,6 @@ updates:
#
# Please ensure any new ones are added here, and any that are removed are removed here also.
-- package-ecosystem: "pip"
- directory: "/.github/actions/pr_notifier"
- schedule:
- interval: "daily"
- time: "06:00"
-
- package-ecosystem: "pip"
directory: "/examples/grpc-bridge/client"
schedule:
@@ -39,12 +33,6 @@ updates:
interval: "daily"
time: "06:00"
-- package-ecosystem: "pip"
- directory: "/mobile/docs"
- schedule:
- interval: "daily"
- time: "06:00"
-
- package-ecosystem: "pip"
directory: "/tools/base"
schedule:
@@ -209,49 +197,27 @@ updates:
time: "06:00"
- package-ecosystem: "gomod"
- directory: "/contrib/golang/filters/http/test/test_data/basic"
+ directory: "/contrib/golang/filters/http/test/test_data"
+ groups:
+ contrib-golang:
+ patterns:
+ - "*"
schedule:
interval: daily
time: "06:00"
- package-ecosystem: "gomod"
- directory: "/contrib/golang/filters/http/test/test_data/dummy"
- schedule:
- interval: daily
- time: "06:00"
-
-- package-ecosystem: "gomod"
- directory: "/contrib/golang/filters/http/test/test_data/echo"
- schedule:
- interval: daily
- time: "06:00"
-
-- package-ecosystem: "gomod"
- directory: "/contrib/golang/filters/http/test/test_data/passthrough"
- schedule:
- interval: daily
- time: "06:00"
-
-- package-ecosystem: "gomod"
- directory: "/contrib/golang/filters/http/test/test_data/routeconfig"
- schedule:
- interval: daily
- time: "06:00"
-
-- package-ecosystem: "gomod"
- directory: "/contrib/golang/router/cluster_specifier/test/test_data/simple"
+ directory: "/contrib/golang/filters/http/test/test_data/access_log"
schedule:
interval: daily
time: "06:00"
- package-ecosystem: "gomod"
directory: "/contrib/golang/filters/network/test/test_data"
- schedule:
- interval: daily
- time: "06:00"
-
-- package-ecosystem: "gomod"
- directory: "/examples/ext_authz/auth/grpc-service"
+ groups:
+ contrib-golang:
+ patterns:
+ - "*"
schedule:
interval: daily
time: "06:00"
diff --git a/.github/workflows/POLICY.md b/.github/workflows/POLICY.md
index 86d775493dc9d..c52488cd22efe 100644
--- a/.github/workflows/POLICY.md
+++ b/.github/workflows/POLICY.md
@@ -40,7 +40,7 @@ Do not allow any bots or app users to do so, unless this is specifically require
For example, you could add a `job` condition to prevent any bots from triggering the workflow:
```yaml
- if: |
+ if: >-
${{
github.repository == 'envoyproxy/envoy'
&& (github.event.schedule
diff --git a/.github/workflows/_cache_docker.yml b/.github/workflows/_cache_docker.yml
deleted file mode 100644
index f0d653cab0248..0000000000000
--- a/.github/workflows/_cache_docker.yml
+++ /dev/null
@@ -1,43 +0,0 @@
-name: Cache prime (docker)
-
-permissions:
- contents: read
-
-on:
- workflow_call:
- inputs:
- image_tag:
- type: string
- required: true
- image_repo:
- type: string
- required: true
- image_sha:
- type: string
- required: true
-
-concurrency:
- group: cache_docker-${{ inputs.image_tag }}
- cancel-in-progress: false
-
-## Docker cache
-#
-# This workflow will only prime the cache, and should be done separately first, prior
-# to any jobs that require it.
-#
-# For a job that does, you can restore with something like:
-#
-# steps:
-# - uses: envoyproxy/toolshed/gh-actions/docker/cache/restore@actions-v0.0.5
-# with:
-# key: "${{ needs.env.outputs.build_image_ubuntu }}"
-#
-
-jobs:
- docker:
- runs-on: ubuntu-22.04
- steps:
- - uses: envoyproxy/toolshed/gh-actions/docker/cache/prime@actions-v0.0.10
- name: Prime Docker cache (${{ inputs.image_repo }}:${{ inputs.image_tag }}@sha256:${{ inputs.image_sha }})
- with:
- image_tag: "${{ inputs.image_repo }}:${{ inputs.image_tag }}@sha256:${{ inputs.image_sha }}"
diff --git a/.github/workflows/_ci.yml b/.github/workflows/_ci.yml
deleted file mode 100644
index c036a726ef335..0000000000000
--- a/.github/workflows/_ci.yml
+++ /dev/null
@@ -1,173 +0,0 @@
-name: Envoy CI
-
-on:
- workflow_call:
- inputs:
- target:
- required: true
- type: string
- rbe:
- type: boolean
- default: true
- managed:
- type: boolean
- default: true
-
- auth_bazel_rbe:
- type: string
- default: ''
-
- bazel_extra:
- type: string
- default:
- bazel_local_cache:
- type: string
- default:
- bazel_rbe_cache:
- type: string
- default: grpcs://remotebuildexecution.googleapis.com
- bazel_rbe_instance:
- type: string
- default: projects/envoy-ci/instances/default_instance
- bazel_rbe_jobs:
- type: number
- default: 75
-
- cache_build_image:
- type: string
-
- command_prefix:
- type: string
- default: ./ci/run_envoy_docker.sh
- command_ci:
- type: string
- default: ./ci/do_ci.sh
-
- diskspace_hack:
- type: boolean
- default: false
-
- run_pre:
- type: string
- default:
- run_pre_with:
- type: string
- default:
-
- run_post:
- type: string
- default:
- run_post_with:
- type: string
- default:
-
- repo_fetch_depth:
- type: number
- default: 1
- repo_ref:
- type: string
- skip:
- type: boolean
- default: false
- trusted:
- type: boolean
- default: false
-
- env:
- type: string
-
-concurrency:
- group: |
- ${{ github.actor != 'trigger-release-envoy[bot]'
- && github.event.inputs.head_ref
- || github.run_id
- }}-${{ github.workflow }}-${{ inputs.target }}
- cancel-in-progress: true
-
-jobs:
- do_ci:
- if: ${{ ! inputs.skip }}
- runs-on: ubuntu-22.04
- name: ${{ inputs.command_ci }} ${{ inputs.target }}
- steps:
- - if: ${{ inputs.cache_build_image }}
- name: Restore Docker cache (${{ inputs.cache_build_image }})
- uses: envoyproxy/toolshed/gh-actions/docker/cache/restore@actions-v0.0.10
- with:
- image_tag: ${{ inputs.cache_build_image }}
-
- # If the run is "trusted" (ie has access to secrets) then it should
- # **not** set the ref and should use the code of the calling context.
- - if: ${{ inputs.repo_ref && inputs.trusted }}
- run: |
- echo '`repo_ref` should not be set for trusted CI runs'
- exit 1
-
- - uses: actions/checkout@v3
- name: Checkout Envoy repository
- with:
- fetch-depth: ${{ inputs.repo_fetch_depth }}
- # WARNING: This allows untrusted code to run!!!
- # If this is set, then anything before or after in the job should be regarded as
- # compromised.
- ref: ${{ ! inputs.trusted && inputs.repo_ref || '' }}
- - name: Add safe directory
- run: git config --global --add safe.directory /__w/envoy/envoy
-
- - if: ${{ inputs.diskspace_hack }}
- name: Cruft removal
- run: |
- echo "Disk space before cruft removal"
- df -h
-
- TO_REMOVE=(
- /opt/hostedtoolcache
- /usr/local/lib/android
- /usr/local/.ghcup)
-
- for removal in "${TO_REMOVE[@]}"; do
- echo "Removing: ${removal} ..."
- sudo rm -rf "$removal"
- done
- - run: |
- echo "disk space at beginning of build:"
- df -h
- name: "Check disk space at beginning"
-
- - if: ${{ inputs.run_pre }}
- name: Run pre action ${{ inputs.run_pre && format('({0})', inputs.run_pre) || '' }}
- uses: envoyproxy/toolshed/gh-actions/using/recurse@actions-v0.0.10
- with:
- uses: ${{ inputs.run_pre }}
- with: ${{ inputs.run_pre_with }}
-
- - uses: ./.github/actions/do_ci
- name: Do CI
- with:
- target: ${{ inputs.target }}
- rbe: ${{ inputs.rbe }}
- managed: ${{ inputs.managed }}
- auth_bazel_rbe: ${{ inputs.auth_bazel_rbe }}
- bazel_extra: ${{ inputs.bazel_extra }}
- bazel_local_cache: ${{ inputs.bazel_local_cache }}
- bazel_rbe_cache: ${{ inputs.bazel_rbe_cache }}
- bazel_rbe_instance: ${{ inputs.bazel_rbe_instance }}
- bazel_rbe_jobs: ${{ inputs.bazel_rbe_jobs }}
- command_prefix: ${{ inputs.command_prefix }}
- command_ci: ${{ inputs.command_ci }}
- env: ${{ inputs.env }}
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
-
- - if: ${{ inputs.run_post }}
- name: Run post action ${{ inputs.run_pre && format('({0})', inputs.run_post) || '' }}
- uses: envoyproxy/toolshed/gh-actions/using/recurse@actions-v0.0.10
- with:
- uses: ${{ inputs.run_post }}
- with: ${{ inputs.run_post_with }}
-
- - run: |
- echo "disk space at end of build:"
- df -h
- echo
- du -ch "${{ runner.temp }}" | grep -E "[0-9]{2,}M|[0-9]G"
- name: "Check disk space at end"
diff --git a/.github/workflows/_env.yml b/.github/workflows/_env.yml
deleted file mode 100644
index c31814c893ca4..0000000000000
--- a/.github/workflows/_env.yml
+++ /dev/null
@@ -1,185 +0,0 @@
-name: Environment
-
-permissions:
- contents: read
-
-on:
- workflow_call:
- inputs:
- # Authoritative configuration for build image/s
- build_image_repo:
- type: string
- default: envoyproxy/envoy-build-ubuntu
- build_image_sha:
- type: string
- default: 50337314a150ed12447c87c1622eac6f611a069888722fb9a426e21ed161cc26
- build_image_mobile_sha:
- type: string
- default: ca26ff05bd3f3a09468242faaf38ae48315e57f0a87c102352162f95ac620e6f
- build_image_tag:
- type: string
- default: 41c5a05d708972d703661b702a63ef5060125c33
-
- check_mobile_run:
- type: boolean
- default: true
- prime_build_image:
- type: boolean
- default: false
-
- start_check_status:
- type: string
- default:
-
- repo_ref:
- type: string
- default:
- repo_ref_sha:
- type: string
- default:
- repo_ref_name:
- type: string
- default:
-
- outputs:
- agent_ubuntu:
- value: ubuntu-22.04
- build_image_ubuntu:
- value: ${{ jobs.repo.outputs.build_image_ubuntu }}
- build_image_ubuntu_mobile:
- value: ${{ jobs.repo.outputs.build_image_ubuntu_mobile }}
- mobile_android_build:
- value: ${{ jobs.repo.outputs.mobile_android_build }}
- mobile_android_build_all:
- value: ${{ jobs.repo.outputs.mobile_android_build_all }}
- mobile_android_tests:
- value: ${{ jobs.repo.outputs.mobile_android_tests }}
- mobile_asan:
- value: ${{ jobs.repo.outputs.mobile_asan }}
- mobile_cc_tests:
- value: ${{ jobs.repo.outputs.mobile_cc_tests }}
- mobile_compile_time_options:
- value: ${{ jobs.repo.outputs.mobile_compile_time_options }}
- mobile_coverage:
- value: ${{ jobs.repo.outputs.mobile_coverage }}
- mobile_formatting:
- value: ${{ jobs.repo.outputs.mobile_formatting }}
- mobile_ios_build:
- value: ${{ jobs.repo.outputs.mobile_ios_build }}
- mobile_ios_build_all:
- value: ${{ jobs.repo.outputs.mobile_ios_build_all }}
- mobile_ios_tests:
- value: ${{ jobs.repo.outputs.mobile_ios_tests }}
- mobile_release_validation:
- value: ${{ jobs.repo.outputs.mobile_release_validation }}
- mobile_tsan:
- value: ${{ jobs.repo.outputs.mobile_tsan }}
-
- repo_ref:
- value: ${{ jobs.repo.outputs.repo_ref }}
- repo_ref_name:
- value: ${{ jobs.repo.outputs.repo_ref_name }}
- repo_ref_sha:
- value: ${{ jobs.repo.outputs.repo_ref_sha }}
- repo_ref_sha_short:
- value: ${{ jobs.repo.outputs.repo_ref_sha_short }}
- repo_ref_title:
- value: ${{ jobs.repo.outputs.repo_ref_title }}
-
- trusted:
- value: ${{ jobs.repo.outputs.trusted }}
-
- version_dev:
- value: ${{ jobs.repo.outputs.version_dev }}
- version_patch:
- value: ${{ jobs.repo.outputs.version_patch }}
-
-concurrency:
- group: |
- ${{ github.actor != 'trigger-release-envoy[bot]'
- && github.event.inputs.head_ref
- || github.run_id
- }}-${{ github.workflow }}-env
- cancel-in-progress: true
-
-jobs:
- repo:
- if: github.repository == 'envoyproxy/envoy'
- runs-on: ubuntu-22.04
- outputs:
- build_image_ubuntu: ${{ steps.env.outputs.build_image_ubuntu }}
- build_image_ubuntu_mobile: ${{ steps.env.outputs.build_image_ubuntu_mobile }}
- mobile_android_build: ${{ steps.env.outputs.mobile_android_build }}
- mobile_android_build_all: ${{ steps.env.outputs.mobile_android_build_all }}
- mobile_android_tests: ${{ steps.env.outputs.mobile_android_tests }}
- mobile_asan: ${{ steps.env.outputs.mobile_asan }}
- mobile_cc_tests: ${{ steps.env.outputs.mobile_cc_tests }}
- mobile_compile_time_options: ${{ steps.env.outputs.mobile_compile_time_options }}
- mobile_coverage: ${{ steps.env.outputs.mobile_coverage }}
- mobile_formatting: ${{ steps.env.outputs.mobile_formatting }}
- mobile_ios_build: ${{ steps.env.outputs.mobile_ios_build }}
- mobile_ios_build_all: ${{ steps.env.outputs.mobile_ios_build_all }}
- mobile_ios_tests: ${{ steps.env.outputs.mobile_ios_tests }}
- mobile_release_validation: ${{ steps.env.outputs.mobile_release_validation }}
- mobile_tsan: ${{ steps.env.outputs.mobile_tsan }}
- repo_ref: ${{ steps.env.outputs.repo_ref }}
- repo_ref_name: ${{ steps.env.outputs.repo_ref_name }}
- repo_ref_sha: ${{ steps.env.outputs.repo_ref_sha }}
- repo_ref_sha_short: ${{ steps.env.outputs.repo_ref_sha_short }}
- repo_ref_title: ${{ steps.env.outputs.repo_ref_title }}
- trusted: ${{ steps.env.outputs.trusted }}
- version_dev: ${{ steps.env.outputs.version_dev }}
- version_patch: ${{ steps.env.outputs.version_patch }}
- steps:
- - uses: actions/checkout@v3
- name: Checkout Envoy repository
- with:
- fetch-depth: ${{ ! inputs.check_mobile_run && 1 || 0 }}
- - uses: ./.github/actions/env
- name: Generate environment variables
- id: env
- with:
- check_mobile_run: ${{ inputs.check_mobile_run }}
- repo_ref: ${{ inputs.repo_ref }}
- repo_ref_name: ${{ inputs.repo_ref_name }}
- repo_ref_sha: ${{ inputs.repo_ref_sha }}
- build_image_repo: ${{ inputs.build_image_repo }}
- build_image_tag: ${{ inputs.build_image_tag }}
- build_image_mobile_sha: ${{ inputs.build_image_mobile_sha }}
- build_image_sha: ${{ inputs.build_image_sha }}
-
- - name: 'Print env'
- run: |
- echo "version_dev=${{ steps.env.outputs.version_dev }}"
- echo "version_patch=${{ steps.env.outputs.version_patch }}"
- echo "trusted=${{ steps.env.outputs.trusted }}"
- echo "repo_ref=${{ steps.env.outputs.repo_ref }}"
- echo "repo_ref_name=${{ steps.env.outputs.repo_ref_name }}"
- echo "repo_ref_pr_number=${{ steps.env.outputs.repo_ref_pr_number }}"
- echo "repo_ref_sha=${{ steps.env.outputs.repo_ref_sha }}"
- echo "repo_ref_sha_short=${{ steps.env.outputs.repo_ref_sha_short }}"
- echo "repo_ref_title=${{ steps.env.outputs.repo_ref_title }}"
- echo "build_image_ubuntu=${{ steps.env.outputs.build_image_ubuntu }}"
- echo "build_image_ubuntu_mobile=${{ steps.env.outputs.build_image_ubuntu_mobile }}"
- echo
- if [[ -n "${{ steps.env.outputs.repo_ref_pr_number }}" ]]; then
- echo "PR: https://github.com/envoyproxy/envoy/pull/${{ steps.env.outputs.repo_ref_pr_number }}"
- fi
-
- check:
- if: ${{ inputs.start_check_status && github.event_name != 'pull_request' }}
- uses: ./.github/workflows/_workflow-start.yml
- permissions:
- contents: read
- statuses: write
- with:
- workflow_name: ${{ inputs.start_check_status }}
- sha: ${{ inputs.repo_ref_sha }}
-
- cache:
- if: ${{ inputs.prime_build_image }}
- uses: ./.github/workflows/_cache_docker.yml
- with:
- image_repo: ${{ inputs.build_image_repo }}
- image_tag: ${{ inputs.build_image_tag }}
- image_sha: ${{ inputs.build_image_sha }}
diff --git a/.github/workflows/_stage_publish.yml b/.github/workflows/_stage_publish.yml
deleted file mode 100644
index 2b0dcca963ccd..0000000000000
--- a/.github/workflows/_stage_publish.yml
+++ /dev/null
@@ -1,92 +0,0 @@
-name: Publish
-
-permissions:
- contents: read
-
-# The matrices in this config can be combined once the calling workflow has shifted
-# to a `pull_request`/`commit` pattern (ie not `workflow_dispatch`)
-#
-# For now pre/post submit is split between `publish_ci`/`publish`, the latter running
-# only for "trusted" runs and having access to secrets/permissions
-
-on:
- workflow_call:
- inputs:
- trusted:
- type: boolean
- default: false
- build_image_ubuntu:
- type: string
- default: ''
- version_dev:
- type: string
- default: ''
- head_ref:
- type: string
- default: ''
- repo_ref:
- type: string
- given_ref:
- type: string
-
-concurrency:
- group: ${{ github.head_ref || github.run_id }}-${{ github.workflow }}-publish
- cancel-in-progress: true
-
-jobs:
- publish_ci:
- if: ${{ ! inputs.trusted }}
- name: ${{ matrix.name || matrix.target }}
- strategy:
- fail-fast: false
- matrix:
- include:
- - target: publish
- name: github
- run_pre: ./.github/actions/publish/release/setup
- run_pre_with: |
- ref: ${{ inputs.given_ref }}
- bucket: envoy-pr
- env: |
- export ENVOY_PUBLISH_DRY_RUN=1
- uses: ./.github/workflows/_ci.yml
- with:
- target: ${{ matrix.target }}
- rbe: false
- managed: true
- cache_build_image: ${{ inputs.build_image_ubuntu }}
- run_pre: ${{ matrix.run_pre }}
- run_pre_with: ${{ matrix.run_pre_with }}
- env: ${{ matrix.env }}
- trusted: false
- repo_ref: ${{ inputs.repo_ref }}
-
- publish:
- if: ${{ inputs.trusted }}
- name: ${{ matrix.name || matrix.target }}
- permissions:
- contents: write
- strategy:
- fail-fast: false
- matrix:
- include:
- - target: publish
- name: github
- run_pre: ./.github/actions/publish/release/setup
- run_pre_with: |
- ref: ${{ inputs.given_ref }}
- bucket: envoy-postsubmit
- env: |
- if [[ '${{ inputs.version_dev }}' != '' ]]; then
- export ENVOY_PUBLISH_DRY_RUN=1
- fi
- uses: ./.github/workflows/_ci.yml
- with:
- target: ${{ matrix.target }}
- rbe: false
- managed: true
- cache_build_image: ${{ inputs.build_image_ubuntu }}
- run_pre: ${{ matrix.run_pre }}
- run_pre_with: ${{ matrix.run_pre_with }}
- env: ${{ matrix.env }}
- trusted: true
diff --git a/.github/workflows/_stage_verify.yml b/.github/workflows/_stage_verify.yml
deleted file mode 100644
index a9dcf195c5db0..0000000000000
--- a/.github/workflows/_stage_verify.yml
+++ /dev/null
@@ -1,53 +0,0 @@
-name: Verify
-
-permissions:
- contents: read
-
-on:
- workflow_call:
- inputs:
- trusted:
- type: boolean
- default: false
- repo_ref:
- type: string
- given_ref:
- type: string
-
-concurrency:
- group: ${{ github.head_ref || github.run_id }}-${{ github.workflow }}-verify
- cancel-in-progress: true
-
-jobs:
- verify:
- name: ${{ matrix.name || matrix.target }}
- strategy:
- fail-fast: false
- matrix:
- include:
- - target: verify_examples
- name: examples
- rbe: false
- managed: true
- cache_build_image: ""
- command_prefix: ""
- diskspace_hack: true
- run_pre: ./.github/actions/verify/examples/setup
- run_pre_with: |
- bucket: envoy-${{ inputs.trusted && 'postsubmit' || 'pr' }}
- ref: ${{ inputs.given_ref }}
- env: |
- export NO_BUILD_SETUP=1
- uses: ./.github/workflows/_ci.yml
- with:
- target: ${{ matrix.target }}
- rbe: ${{ matrix.rbe }}
- managed: ${{ matrix.managed }}
- cache_build_image: ${{ matrix.cache_build_image }}
- diskspace_hack: ${{ matrix.diskspace_hack }}
- command_prefix: ${{ matrix.command_prefix }}
- run_pre: ${{ matrix.run_pre }}
- run_pre_with: ${{ matrix.run_pre_with }}
- env: ${{ matrix.env }}
- trusted: ${{ inputs.trusted }}
- repo_ref: ${{ ! inputs.trusted && inputs.repo_ref || '' }}
diff --git a/.github/workflows/_workflow-start.yml b/.github/workflows/_workflow-start.yml
deleted file mode 100644
index b4e758778c2b5..0000000000000
--- a/.github/workflows/_workflow-start.yml
+++ /dev/null
@@ -1,50 +0,0 @@
-name: Workflow start
-# This workflow is only required for externally triggered jobs that need to manually
-# set the check status for a commit/PR
-
-permissions:
- contents: read
-
-on:
- workflow_call:
- inputs:
- workflow_name:
- required: true
- type: string
- sha:
- required: true
- type: string
-
-jobs:
- start:
- runs-on: ubuntu-22.04
- permissions:
- statuses: write
- steps:
- - uses: actions/checkout@v3
- - uses: ./.github/actions/env
- id: env
- with:
- check_mobile_run: false
-
- - if: ${{ steps.env.outputs.trusted != 'true' }}
- name: Start status check
- uses: envoyproxy/toolshed/gh-actions/status@actions-v0.0.10
- with:
- authToken: ${{ secrets.GITHUB_TOKEN }}
- context: ${{ inputs.workflow_name }}
- state: 'pending'
- sha: ${{ inputs.sha }}
- target_url: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
- - if: ${{ steps.env.outputs.trusted != 'true' }}
- name: Save the SHA
- env:
- STATE_SHA: ${{ inputs.sha }}
- run: |
- mkdir -p ./sha
- echo $STATE_SHA > ./sha/state_sha
- - if: ${{ steps.env.outputs.trusted != 'true' }}
- uses: actions/upload-artifact@v3
- with:
- name: state_sha
- path: sha/
diff --git a/.github/workflows/check-deps.yml b/.github/workflows/check-deps.yml
index 984a52a57b0ff..b652f213d9079 100644
--- a/.github/workflows/check-deps.yml
+++ b/.github/workflows/check-deps.yml
@@ -1,16 +1,17 @@
name: Check dependencies
+permissions:
+ contents: read
+
on:
schedule:
- - cron: '0 8 * * *'
+ - cron: '0 8 * * *'
workflow_dispatch:
-permissions: read-all
-
jobs:
build:
runs-on: ubuntu-22.04
- if: |
+ if: >-
${{
github.repository == 'envoyproxy/envoy'
&& (github.event.schedule
@@ -34,6 +35,6 @@ jobs:
TODAY_DATE=$(date -u -I"date")
export TODAY_DATE
bazel run //tools/dependency:check --action_env=TODAY_DATE -- -c release_issues --fix
- bazel run //tools/dependency:check --action_env=TODAY_DATE -- -c cves -w error
+ bazel run --//tools/dependency:preload_cve_data //tools/dependency:check --action_env=TODAY_DATE -- -c cves -w error
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/commands.yml b/.github/workflows/commands.yml
deleted file mode 100644
index d1e4339f34ad2..0000000000000
--- a/.github/workflows/commands.yml
+++ /dev/null
@@ -1,27 +0,0 @@
-name: commands
-
-on:
- issue_comment:
- types: [created]
-
-permissions:
- contents: read
-
-jobs:
- retest:
- if: |
- ${{
- github.event.issue.pull_request
- && github.repository == 'envoyproxy/envoy'
- && github.actor != 'repokitteh-read-only[bot]'
- && github.actor != 'dependabot[bot]'
- }}
- name: Retest
- runs-on: ubuntu-22.04
- permissions:
- pull-requests: write
- actions: write
- steps:
- - uses: envoyproxy/toolshed/gh-actions/retest@actions-v0.0.10
- with:
- token: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/envoy-dependency.yml b/.github/workflows/envoy-dependency.yml
new file mode 100644
index 0000000000000..50582d9e7393f
--- /dev/null
+++ b/.github/workflows/envoy-dependency.yml
@@ -0,0 +1,171 @@
+name: Envoy/dependency
+
+permissions:
+ contents: read
+
+on:
+ workflow_dispatch:
+ inputs:
+ task:
+ description: Select a task
+ required: true
+ default: bazel
+ type: choice
+ options:
+ - bazel
+ - bazel-api
+ - build-image
+ dependency:
+ description: Dependency to update (if applicable)
+ version:
+ description: Version to set (optional)
+ pr:
+ type: boolean
+ default: true
+ pr_message:
+ description: Additional message for PR, eg to fix an issue (optional)
+
+concurrency:
+ group: ${{ github.run_id }}-${{ github.workflow }}
+ cancel-in-progress: true
+
+
+jobs:
+ update_bazel:
+ if: startsWith(inputs.task, 'bazel')
+ name: >-
+ Update dep
+ (${{ inputs.pr && 'PR/' || '' }}${{ inputs.task == 'bazel' && 'bazel' || 'bazel/api' }}/${{ inputs.dependency }}/${{ inputs.version }})
+ runs-on: ubuntu-22.04
+ steps:
+ - id: checkout
+ name: Checkout Envoy repository
+ uses: envoyproxy/toolshed/gh-actions/github/checkout@actions-v0.0.25
+ with:
+ app_id: ${{ secrets.ENVOY_CI_DEP_APP_ID }}
+ app_key: ${{ secrets.ENVOY_CI_DEP_APP_KEY }}
+ - id: version
+ name: Shorten (possible) SHA
+ uses: envoyproxy/toolshed/gh-actions/str/sub@actions-v0.0.25
+ with:
+ string: ${{ inputs.version }}
+ length: 7
+ min: 40
+ - run: |
+ echo "Updating(${TASK}): ${DEPENDENCY} -> ${VERSION}"
+ bazel run --config=ci //bazel:${TARGET} $DEPENDENCY $VERSION
+ name: Update dependency
+ env:
+ DEPENDENCY: ${{ inputs.dependency }}
+ VERSION: ${{ inputs.version }}
+ TARGET: ${{ inputs.task == 'bazel' && 'update' || 'api-update' }}
+ TASK: ${{ inputs.task == 'bazel' && 'bazel' || 'api/bazel' }}
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ - uses: envoyproxy/toolshed/gh-actions/upload/diff@actions-v0.0.25
+ name: Upload diff
+ with:
+ name: ${{ inputs.dependency }}-${{ steps.version.outputs.string }}
+ - name: Create a PR
+ if: ${{ inputs.pr }}
+ uses: envoyproxy/toolshed/gh-actions/github/pr@actions-v0.0.25
+ with:
+ base: main
+ body: |
+ Created by Envoy dependency bot for @${{ github.actor }}
+
+ ${{ inputs.pr_message }}
+ branch: >-
+ dependency/${{ inputs.task }}/${{ inputs.dependency }}/${{ steps.version.outputs.string }}
+ committer-name: publish-envoy[bot]
+ committer-email: 140627008+publish-envoy[bot]@users.noreply.github.com
+ title: >-
+ ${{ inputs.task == 'bazel' && 'deps' || 'deps/api' }}: Bump `${{ inputs.dependency }}`
+ -> ${{ steps.version.outputs.string }}
+ GITHUB_TOKEN: ${{ steps.checkout.outputs.token }}
+
+ update_build_image:
+ if: github.event.inputs.task == 'build-image'
+ name: Update build image (PR)
+ runs-on: ubuntu-22.04
+ steps:
+ - name: Fetch token for app auth
+ id: appauth
+ uses: envoyproxy/toolshed/gh-actions/appauth@actions-v0.0.23
+ with:
+ app_id: ${{ secrets.ENVOY_CI_DEP_APP_ID }}
+ key: ${{ secrets.ENVOY_CI_DEP_APP_KEY }}
+ - uses: actions/checkout@v4
+ name: Checkout Envoy repository
+ with:
+ path: envoy
+ fetch-depth: 0
+ token: ${{ steps.appauth.outputs.token }}
+ - uses: actions/checkout@v4
+ name: Checkout Envoy build tools repository
+ with:
+ repository: envoyproxy/envoy-build-tools
+ path: build-tools
+ fetch-depth: 0
+ - run: |
+ shas=(
+ tag
+ sha
+ mobile_sha
+ gcr_sha)
+ for sha in "${shas[@]}"; do
+ current_sha=$(bazel run //tools/dependency:build-image-sha "$sha")
+ echo "${sha}=${current_sha}" >> "$GITHUB_OUTPUT"
+ done
+ id: current
+ name: Current SHAs
+ working-directory: envoy
+ - run: |
+ # get current build image version
+ CONTAINER_TAG=$(git log -1 --pretty=format:"%H" "./docker")
+ echo "tag=${CONTAINER_TAG}" >> "$GITHUB_OUTPUT"
+ echo "tag_short=${CONTAINER_TAG::7}" >> "$GITHUB_OUTPUT"
+ id: build-tools
+ name: Build image SHA
+ working-directory: build-tools
+
+ - name: Check Docker SHAs
+ id: build-images
+ uses: envoyproxy/toolshed/gh-actions/docker/shas@actions-v0.0.23
+ with:
+ images: |
+ sha: envoyproxy/envoy-build-ubuntu:${{ steps.build-tools.outputs.tag }}
+ mobile_sha: envoyproxy/envoy-build-ubuntu:mobile-${{ steps.build-tools.outputs.tag }}
+ gcr_sha: gcr.io/envoy-ci/envoy-build:${{ steps.build-tools.outputs.tag }}
+
+ - run: |
+ SHA_REPLACE=(
+ "$CURRENT_ENVOY_TAG:$ENVOY_TAG"
+ "$CURRENT_ENVOY_SHA:${OUTPUT_sha}"
+ "$CURRENT_ENVOY_MOBILE_SHA:${OUTPUT_mobile_sha}"
+ "$CURRENT_ENVOY_GCR_SHA:${OUTPUT_gcr_sha}")
+ echo "replace=${SHA_REPLACE[*]}" >> "$GITHUB_OUTPUT"
+ name: Find SHAs to replace
+ id: shas
+ env:
+ ENVOY_TAG: ${{ steps.build-tools.outputs.tag }}
+ CURRENT_ENVOY_TAG: ${{ steps.current.outputs.tag }}
+ CURRENT_ENVOY_SHA: ${{ steps.current.outputs.sha }}
+ CURRENT_ENVOY_MOBILE_SHA: ${{ steps.current.outputs.mobile_sha }}
+ CURRENT_ENVOY_GCR_SHA: ${{ steps.current.outputs.gcr_sha }}
+ - run: |
+ echo "${SHA_REPLACE}" | xargs bazel run @envoy_toolshed//sha:replace "${PWD}"
+ env:
+ SHA_REPLACE: ${{ steps.shas.outputs.replace }}
+ name: Update SHAs
+ working-directory: envoy
+ - name: Create a PR
+ uses: envoyproxy/toolshed/gh-actions/github/pr@actions-v0.0.23
+ with:
+ base: main
+ body: Created by Envoy dependency bot
+ branch: dependency-envoy/build-image/latest
+ committer-name: publish-envoy[bot]
+ committer-email: 140627008+publish-envoy[bot]@users.noreply.github.com
+ title: 'deps: Bump build images -> `${{ steps.build-tools.outputs.tag_short }}`'
+ GITHUB_TOKEN: ${{ steps.appauth.outputs.token }}
+ working-directory: envoy
diff --git a/.github/workflows/envoy-prechecks.yml b/.github/workflows/envoy-prechecks.yml
deleted file mode 100644
index 67fff9920a8e7..0000000000000
--- a/.github/workflows/envoy-prechecks.yml
+++ /dev/null
@@ -1,50 +0,0 @@
-name: Envoy/prechecks
-
-permissions:
- contents: read
-
-on:
- push:
- branches:
- - main
- - release/v*
- pull_request:
- paths:
- - '**/requirements*.txt'
- - '**/go.mod'
- - '**/*.bzl'
- - 'WORKSPACE'
- - '.github/workflows/envoy-prechecks.yml'
- - '.github/workflows/_*.yml'
-
-concurrency:
- group: ${{ github.event.inputs.head_ref || github.run_id }}-${{ github.workflow }}
- cancel-in-progress: true
-
-jobs:
- env:
- uses: ./.github/workflows/_env.yml
- with:
- prime_build_image: true
- check_mobile_run: false
- permissions:
- contents: read
- statuses: write
-
- prechecks:
- needs:
- - env
- strategy:
- fail-fast: false
- matrix:
- include:
- - target: deps
- rbe: false
- managed: true
- uses: ./.github/workflows/_ci.yml
- name: CI ${{ matrix.target }}
- with:
- target: ${{ matrix.target }}
- rbe: ${{ matrix.rbe }}
- managed: ${{ matrix.managed }}
- cache_build_image: ${{ needs.env.outputs.build_image_ubuntu }}
diff --git a/.github/workflows/envoy-publish.yml b/.github/workflows/envoy-publish.yml
deleted file mode 100644
index 2ec5bd5969bd0..0000000000000
--- a/.github/workflows/envoy-publish.yml
+++ /dev/null
@@ -1,69 +0,0 @@
-name: Publish & verify
-
-on:
- # This runs untrusted code, do not expose secrets in the verify job
- workflow_dispatch:
- inputs:
- ref:
- description: "Git SHA ref to checkout"
- sha:
- description: "Git SHA of commit HEAD (ie last commit of PR)"
- head_ref:
- description: "Ref for grouping PRs"
-
-concurrency:
- group: |
- ${{ github.actor != 'trigger-release-envoy[bot]'
- && github.event.inputs.head_ref
- || github.run_id
- }}-${{ github.workflow }}
- cancel-in-progress: true
-
-permissions:
- contents: read
-
-jobs:
- env:
- if: |
- ${{
- github.repository == 'envoyproxy/envoy'
- && (!contains(github.actor, '[bot]')
- || github.actor == 'trigger-workflow-envoy[bot]'
- || github.actor == 'trigger-release-envoy[bot]')
- }}
- uses: ./.github/workflows/_env.yml
- with:
- check_mobile_run: false
- prime_build_image: true
- start_check_status: Verify/examples
- repo_ref: ${{ inputs.ref }}
- repo_ref_sha: ${{ inputs.sha }}
- repo_ref_name: ${{ inputs.head_ref }}
-
- permissions:
- contents: read
- statuses: write
-
- publish:
- needs:
- - env
- uses: ./.github/workflows/_stage_publish.yml
- name: Publish ${{ needs.env.outputs.repo_ref_title }}
- with:
- build_image_ubuntu: ${{ needs.env.outputs.build_image_ubuntu }}
- trusted: ${{ needs.env.outputs.trusted == 'true' && true || false }}
- version_dev: ${{ needs.env.outputs.version_dev }}
- given_ref: ${{ inputs.ref }}
- repo_ref: ${{ needs.env.outputs.trusted != 'true' && inputs.ref || '' }}
- permissions:
- contents: write
-
- verify:
- uses: ./.github/workflows/_stage_verify.yml
- name: Verify ${{ needs.env.outputs.repo_ref_title }}
- needs:
- - env
- with:
- trusted: ${{ needs.env.outputs.trusted == 'true' && true || false }}
- given_ref: ${{ inputs.ref }}
- repo_ref: ${{ needs.env.outputs.trusted != 'true' && needs.env.outputs.repo_ref || '' }}
diff --git a/.github/workflows/mobile-android_build.yml b/.github/workflows/mobile-android_build.yml
deleted file mode 100644
index 4ce6c329c7d1c..0000000000000
--- a/.github/workflows/mobile-android_build.yml
+++ /dev/null
@@ -1,229 +0,0 @@
-name: android_build
-
-on:
- push:
- branches:
- - main
- pull_request:
-
-concurrency:
- group: ${{ github.head_ref || github.run_id }}-${{ github.workflow }}
- cancel-in-progress: true
-
-jobs:
- env:
- if: ${{ github.repository == 'envoyproxy/envoy' }}
- uses: ./.github/workflows/_env.yml
- secrets: inherit
-
- androidbuild:
- if: ${{ needs.env.outputs.mobile_android_build == 'true' }}
- needs: env
- name: android_build
- runs-on: ${{ needs.env.outputs.agent_ubuntu }}
- timeout-minutes: 90
- container:
- image: ${{ needs.env.outputs.build_image_ubuntu_mobile }}
- env:
- CC: /opt/llvm/bin/clang
- CXX: /opt/llvm/bin/clang++
- steps:
- - uses: actions/checkout@v3
- - name: Add safe directory
- run: git config --global --add safe.directory /__w/envoy/envoy
- - name: 'Build envoy.aar distributable'
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- run: |
- cd mobile && ./bazelw build \
- $([ -z $GITHUB_TOKEN ] || echo "--config=remote-ci-linux-clang") \
- --fat_apk_cpu=x86_64 \
- --linkopt=-fuse-ld=lld \
- //:android_dist
-
- javahelloworld:
- if: ${{ needs.env.outputs.mobile_android_build_all == 'true' }}
- needs:
- - env
- - androidbuild
- name: java_helloworld
- runs-on: macos-12
- timeout-minutes: 50
- steps:
- - uses: actions/checkout@v3
- - uses: actions/setup-java@5ffc13f4174014e2d4d4572b3d74c3fa61aeb2c2
- with:
- java-version: '8'
- java-package: jdk
- architecture: x64
- distribution: zulu
- - run: cd mobile && ./ci/mac_ci_setup.sh --android
- name: 'Install dependencies'
- - uses: nick-fields/retry@943e742917ac94714d2f408a0e8320f2d1fcafcd
- name: 'Start emulator'
- with:
- timeout_minutes: 10
- max_attempts: 3
- command: ./mobile/ci/start_android_emulator.sh
- # Return to using:
- # cd mobile && ./bazelw mobile-install --fat_apk_cpu=x86_64 --start_app //examples/java/hello_world:hello_envoy
- # When https://github.com/envoyproxy/envoy-mobile/issues/853 is fixed.
- - name: 'Start java app'
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- run: |
- cd mobile && ./bazelw build \
- $([ -z $GITHUB_TOKEN ] || echo "--config=remote-ci-macos") \
- --fat_apk_cpu=x86_64 \
- //examples/java/hello_world:hello_envoy
- adb install -r --no-incremental bazel-bin/examples/java/hello_world/hello_envoy.apk
- adb shell am start -n io.envoyproxy.envoymobile.helloenvoy/.MainActivity
- - name: 'Check connectivity'
- run: |
- timeout 30 adb logcat -e "received headers with status 301" -m 1 || {
- echo "Failed checking for headers in adb logcat" >&2
- timeout 30 adb logcat || {
- echo "Failed dumping adb logcat" >&2
- }
- exit 1
- }
- kotlinhelloworld:
- if: ${{ needs.env.outputs.mobile_android_build == 'true' }}
- needs:
- - env
- - androidbuild
- name: kotlin_helloworld
- runs-on: macos-12
- timeout-minutes: 50
- steps:
- - uses: actions/checkout@v3
- - uses: actions/setup-java@5ffc13f4174014e2d4d4572b3d74c3fa61aeb2c2
- with:
- java-version: '8'
- java-package: jdk
- architecture: x64
- distribution: zulu
- - name: 'Install dependencies'
- run: cd mobile && ./ci/mac_ci_setup.sh --android
- - uses: nick-fields/retry@943e742917ac94714d2f408a0e8320f2d1fcafcd
- name: 'Start emulator'
- with:
- timeout_minutes: 10
- max_attempts: 3
- command: ./mobile/ci/start_android_emulator.sh
- # Return to using:
- # ./bazelw mobile-install --fat_apk_cpu=x86_64 --start_app //examples/kotlin/hello_world:hello_envoy_kt
- # When https://github.com/envoyproxy/envoy-mobile/issues/853 is fixed.
- - name: 'Start kotlin app'
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- run: |
- cd mobile && ./bazelw build \
- $([ -z $GITHUB_TOKEN ] || echo "--config=remote-ci-macos") \
- --fat_apk_cpu=x86_64 \
- //examples/kotlin/hello_world:hello_envoy_kt
- adb install -r --no-incremental bazel-bin/examples/kotlin/hello_world/hello_envoy_kt.apk
- adb shell am start -n io.envoyproxy.envoymobile.helloenvoykotlin/.MainActivity
- - name: 'Check connectivity'
- run: |
- timeout 30 adb logcat -e "received headers with status 200" -m 1 || {
- echo "Failed checking for headers in adb logcat" >&2
- timeout 30 adb logcat || {
- echo "Failed dumping adb logcat" >&2
- }
- exit 1
- }
-
- kotlinbaselineapp:
- if: ${{ needs.env.outputs.mobile_android_build_all == 'true' }}
- needs:
- - env
- - androidbuild
- name: kotlin_baseline_app
- runs-on: macos-12
- timeout-minutes: 50
- steps:
- - uses: actions/checkout@v3
- - uses: actions/setup-java@5ffc13f4174014e2d4d4572b3d74c3fa61aeb2c2
- with:
- java-version: '8'
- java-package: jdk
- architecture: x64
- distribution: zulu
- - name: 'Install dependencies'
- run: cd mobile && ./ci/mac_ci_setup.sh --android
- - uses: nick-fields/retry@943e742917ac94714d2f408a0e8320f2d1fcafcd
- name: 'Start emulator'
- with:
- timeout_minutes: 10
- max_attempts: 3
- command: ./mobile/ci/start_android_emulator.sh
- # Return to using:
- # ./bazelw mobile-install --fat_apk_cpu=x86_64 --start_app //examples/kotlin/hello_world:hello_envoy_kt
- # When https://github.com/envoyproxy/envoy-mobile/issues/853 is fixed.
- - name: 'Start kotlin app'
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- run: |
- cd mobile && ./bazelw build \
- $([ -z $GITHUB_TOKEN ] || echo "--config=remote-ci-macos") \
- --fat_apk_cpu=x86_64 \
- //test/kotlin/apps/baseline:hello_envoy_kt
- adb install -r --no-incremental bazel-bin/test/kotlin/apps/baseline/hello_envoy_kt.apk
- adb shell am start -n io.envoyproxy.envoymobile.helloenvoybaselinetest/.MainActivity
- - name: 'Check connectivity'
- run: |
- timeout 30 adb logcat -e "received headers with status 301" -m 1 || {
- echo "Failed checking for headers in adb logcat" >&2
- timeout 30 adb logcat || {
- echo "Failed dumping adb logcat" >&2
- }
- exit 1
- }
- kotlinexperimentalapp:
- if: ${{ needs.env.outputs.mobile_android_build_all == 'true' }}
- needs:
- - env
- - androidbuild
- name: kotlin_experimental_app
- runs-on: macos-12
- timeout-minutes: 50
- steps:
- - uses: actions/checkout@v3
- - uses: actions/setup-java@5ffc13f4174014e2d4d4572b3d74c3fa61aeb2c2
- with:
- java-version: '8'
- java-package: jdk
- architecture: x64
- distribution: zulu
- - name: 'Install dependencies'
- run: cd mobile && ./ci/mac_ci_setup.sh --android
- - uses: nick-fields/retry@943e742917ac94714d2f408a0e8320f2d1fcafcd
- name: 'Start emulator'
- with:
- timeout_minutes: 10
- max_attempts: 3
- command: ./mobile/ci/start_android_emulator.sh
- # Return to using:
- # ./bazelw mobile-install --fat_apk_cpu=x86_64 --start_app //examples/kotlin/hello_world:hello_envoy_kt
- # When https://github.com/envoyproxy/envoy-mobile/issues/853 is fixed.
- - name: 'Start kotlin app'
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- run: |
- cd mobile && ./bazelw build \
- $([ -z $GITHUB_TOKEN ] || echo "--config=remote-ci-macos") \
- --fat_apk_cpu=x86_64 \
- --define envoy_mobile_listener=enabled \
- //test/kotlin/apps/experimental:hello_envoy_kt
- adb install -r --no-incremental bazel-bin/test/kotlin/apps/experimental/hello_envoy_kt.apk
- adb shell am start -n io.envoyproxy.envoymobile.helloenvoyexperimentaltest/.MainActivity
- - name: 'Check connectivity'
- run: |
- timeout 30 adb logcat -e "received headers with status 200" -m 1 || {
- echo "Failed checking for headers in adb logcat" >&2
- timeout 30 adb logcat || {
- echo "Failed dumping adb logcat" >&2
- }
- exit 1
- }
diff --git a/.github/workflows/mobile-android_tests.yml b/.github/workflows/mobile-android_tests.yml
deleted file mode 100644
index 917c7f8871e5d..0000000000000
--- a/.github/workflows/mobile-android_tests.yml
+++ /dev/null
@@ -1,104 +0,0 @@
-name: android_tests
-
-on:
- push:
- branches:
- - main
- pull_request:
-
-concurrency:
- group: ${{ github.head_ref || github.run_id }}-${{ github.workflow }}
- cancel-in-progress: true
-
-jobs:
- env:
- uses: ./.github/workflows/_env.yml
- secrets: inherit
-
- kotlintestsmac:
- if: ${{ needs.env.outputs.mobile_android_tests == 'true' }}
- needs: env
- # revert to //test/kotlin/... once fixed
- # https://github.com/envoyproxy/envoy-mobile/issues/1932
- name: kotlin_tests_mac
- runs-on: macos-12
- timeout-minutes: 90
- steps:
- - uses: actions/checkout@v3
- - name: 'Java setup'
- uses: actions/setup-java@5ffc13f4174014e2d4d4572b3d74c3fa61aeb2c2
- with:
- java-version: '8'
- java-package: jdk
- architecture: x64
- distribution: zulu
- - name: 'Install dependencies'
- run: cd mobile && ./ci/mac_ci_setup.sh
- - name: 'Run Kotlin library tests'
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- run: |
- cd mobile && ./bazelw test \
- --test_output=all \
- --build_tests_only \
- $([ -z $GITHUB_TOKEN ] || echo "--config=remote-ci-macos") \
- --define=signal_trace=disabled \
- //test/kotlin/io/...
- javatestsmac:
- if: ${{ needs.env.outputs.mobile_android_tests == 'true' }}
- needs: env
- name: java_tests_mac
- runs-on: macos-12
- timeout-minutes: 120
- steps:
- - uses: actions/checkout@v3
- - name: 'Java setup'
- uses: actions/setup-java@5ffc13f4174014e2d4d4572b3d74c3fa61aeb2c2
- with:
- java-version: '8'
- java-package: jdk
- architecture: x64
- distribution: zulu
- - name: 'Install dependencies'
- run: cd mobile && ./ci/mac_ci_setup.sh
- - name: 'Run Java library tests'
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- run: |
- cd mobile && ./bazelw test \
- --test_output=all \
- --build_tests_only \
- --config test-android \
- --define envoy_mobile_listener=enabled \
- $([ -z $GITHUB_TOKEN ] || echo "--config=remote-ci-macos") \
- --define=signal_trace=disabled \
- --define=system-helper=android \
- //test/java/...
- kotlintestslinux:
- if: ${{ needs.env.outputs.mobile_android_tests == 'true' }}
- needs: env
- # Only kotlin tests are executed since with linux:
- # https://github.com/envoyproxy/envoy-mobile/issues/1418.
- name: kotlin_tests_linux
- runs-on: ${{ needs.env.outputs.agent_ubuntu }}
- timeout-minutes: 90
- container:
- image: ${{ needs.env.outputs.build_image_ubuntu_mobile }}
- env:
- CC: /opt/llvm/bin/clang
- CXX: /opt/llvm/bin/clang++
- steps:
- - uses: actions/checkout@v3
- - name: Add safe directory
- run: git config --global --add safe.directory /__w/envoy/envoy
- - name: 'Run Kotlin library integration tests'
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- run: |
- cd mobile && ./bazelw test \
- --test_output=all \
- --build_tests_only \
- --config test-android \
- $([ -z $GITHUB_TOKEN ] || echo "--config=remote-ci-linux-clang") \
- --define=signal_trace=disabled \
- //test/kotlin/...
diff --git a/.github/workflows/mobile-asan.yml b/.github/workflows/mobile-asan.yml
deleted file mode 100644
index c54a9a028eb45..0000000000000
--- a/.github/workflows/mobile-asan.yml
+++ /dev/null
@@ -1,41 +0,0 @@
-name: mobile_asan
-
-on:
- push:
- branches:
- - main
- pull_request:
-
-concurrency:
- group: ${{ github.head_ref || github.run_id }}-${{ github.workflow }}
- cancel-in-progress: true
-
-jobs:
- env:
- if: ${{ github.repository == 'envoyproxy/envoy' }}
- uses: ./.github/workflows/_env.yml
- secrets: inherit
-
- asan:
- if: ${{ needs.env.outputs.mobile_asan == 'true' }}
- needs: env
- name: asan
- runs-on: ${{ needs.env.outputs.agent_ubuntu }}
- timeout-minutes: 180
- container:
- image: ${{ needs.env.outputs.build_image_ubuntu_mobile }}
- env:
- CC: /opt/llvm/bin/clang
- CXX: /opt/llvm/bin/clang++
- steps:
- - uses: actions/checkout@v3
- - name: Add safe directory
- run: git config --global --add safe.directory /__w/envoy/envoy
- - name: 'Run tests'
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- run: |
- cd mobile && ./bazelw test --test_output=all \
- --test_env=ENVOY_IP_TEST_VERSIONS=v4only \
- $([ -z $GITHUB_TOKEN ] || echo "--config=remote-ci-linux-asan") \
- //test/common/...
diff --git a/.github/workflows/mobile-cc_tests.yml b/.github/workflows/mobile-cc_tests.yml
deleted file mode 100644
index b9fb3b5cfad1b..0000000000000
--- a/.github/workflows/mobile-cc_tests.yml
+++ /dev/null
@@ -1,41 +0,0 @@
-name: mobile_cc_tests
-
-on:
- push:
- branches:
- - main
- pull_request:
-
-concurrency:
- group: ${{ github.head_ref || github.run_id }}-${{ github.workflow }}
- cancel-in-progress: true
-
-jobs:
- env:
- if: ${{ github.repository == 'envoyproxy/envoy' }}
- uses: ./.github/workflows/_env.yml
- secrets: inherit
-
- cctests:
- if: ${{ needs.env.outputs.mobile_cc_tests == 'true' }}
- needs: env
- name: cc_tests
- runs-on: ${{ needs.env.outputs.agent_ubuntu }}
- timeout-minutes: 120
- container:
- image: ${{ needs.env.outputs.build_image_ubuntu }}
- steps:
- - uses: actions/checkout@v3
- - name: Add safe directory
- run: git config --global --add safe.directory /__w/envoy/envoy
- - env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: 'Run tests'
- # Regression test using the new API listener. TODO(#2711) clean up.
- run: |
- cd mobile && ./bazelw test \
- --action_env=LD_LIBRARY_PATH \
- --test_output=all \
- --copt=-DUSE_API_LISTENER \
- $([ -z $GITHUB_TOKEN ] || echo "--config=remote-ci-linux") \
- //test/cc/...
diff --git a/.github/workflows/mobile-compile_time_options.yml b/.github/workflows/mobile-compile_time_options.yml
deleted file mode 100644
index f724428c794c9..0000000000000
--- a/.github/workflows/mobile-compile_time_options.yml
+++ /dev/null
@@ -1,126 +0,0 @@
-name: mobile_compile_time_options
-
-on:
- push:
- branches:
- - main
- pull_request:
-
-concurrency:
- group: ${{ github.head_ref || github.run_id }}-${{ github.workflow }}
- cancel-in-progress: true
-
-jobs:
- env:
- if: ${{ github.repository == 'envoyproxy/envoy' }}
- uses: ./.github/workflows/_env.yml
- secrets: inherit
-
- cc_test_no_yaml:
- needs: env
- name: cc_test_no_yaml
- runs-on: ubuntu-20.04
- timeout-minutes: 120
- container:
- image: envoyproxy/envoy-build-ubuntu:41c5a05d708972d703661b702a63ef5060125c33
- steps:
- - uses: actions/checkout@v3
- - name: Add safe directory
- run: git config --global --add safe.directory /__w/envoy/envoy
- - name: 'Running C++ test with YAML disabled'
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- # Envoy Mobile build which verifies that the build configuration where YAML is disabled.
- run: |
- cd mobile
- ./bazelw test \
- $([ -z $GITHUB_TOKEN ] || echo "--config=remote-ci-linux") \
- --config=ci \
- --define=envoy_yaml=disabled \
- --test_env=ENVOY_IP_TEST_VERSIONS=v4only \
- //test/common/integration:client_integration_test --test_output=all
- cc_test:
- needs: env
- name: cc_test
- runs-on: ${{ needs.env.outputs.agent_ubuntu }}
- timeout-minutes: 120
- container:
- image: envoyproxy/envoy-build-ubuntu:41c5a05d708972d703661b702a63ef5060125c33
- steps:
- - uses: actions/checkout@v3
- - name: Add safe directory
- run: git config --global --add safe.directory /__w/envoy/envoy
- - name: 'Running C++ tests'
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- run: |
- cd mobile
- ./bazelw test \
- --test_output=all \
- $([ -z $GITHUB_TOKEN ] || echo "--config=remote-ci-linux") \
- --config=ci \
- --define=signal_trace=disabled \
- --define=envoy_mobile_request_compression=disabled \
- --define=envoy_enable_http_datagrams=disabled \
- --define=google_grpc=disabled \
- --@com_envoyproxy_protoc_gen_validate//bazel:template-flavor= \
- $(bazel query //test/cc/... + //test/common/... except //test/common/integration:client_integration_test)
- swift_build:
- if: ${{ needs.env.outputs.mobile_compile_time_options == 'true' }}
- needs: env
- name: swift_build
- runs-on: macos-12
- timeout-minutes: 120
- steps:
- - uses: actions/checkout@v3
- - run: cd mobile && ./ci/mac_ci_setup.sh
- name: 'Install dependencies'
- - name: 'Build Swift library'
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- run: |
- cd mobile
- ./bazelw shutdown
- ./bazelw build \
- --config=ios \
- $([ -z $GITHUB_TOKEN ] || echo "--config=remote-ci-macos") \
- --define=signal_trace=disabled \
- --define=envoy_mobile_request_compression=disabled \
- --define=envoy_mobile_stats_reporting=disabled \
- --define=envoy_mobile_swift_cxx_interop=disabled \
- --define=envoy_enable_http_datagrams=disabled \
- --define=google_grpc=disabled \
- --@envoy//bazel:http3=False \
- --@com_envoyproxy_protoc_gen_validate//bazel:template-flavor= \
- //library/swift:ios_framework
- kotlin_build:
- if: ${{ needs.env.outputs.mobile_compile_time_options == 'true' }}
- needs: env
- name: kotlin_build
- runs-on: macos-12
- timeout-minutes: 120
- steps:
- - uses: actions/checkout@v3
- - uses: actions/setup-java@5ffc13f4174014e2d4d4572b3d74c3fa61aeb2c2
- with:
- java-version: '8'
- java-package: jdk
- architecture: x64
- distribution: zulu
- - name: 'Install dependencies'
- run: cd mobile && ./ci/mac_ci_setup.sh --android
- - name: 'Build Kotlin library'
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- run: |
- cd mobile
- ./bazelw build \
- $([ -z $GITHUB_TOKEN ] || echo "--config=remote-ci-macos") \
- --fat_apk_cpu=x86_64 \
- --define=signal_trace=disabled \
- --define=envoy_mobile_request_compression=disabled \
- --define=envoy_enable_http_datagrams=disabled \
- --define=google_grpc=disabled \
- --define=envoy_yaml=disabled \
- --@com_envoyproxy_protoc_gen_validate//bazel:template-flavor= \
- //:android_dist
diff --git a/.github/workflows/mobile-core.yml b/.github/workflows/mobile-core.yml
deleted file mode 100644
index a35a77397178d..0000000000000
--- a/.github/workflows/mobile-core.yml
+++ /dev/null
@@ -1,44 +0,0 @@
-name: mobile_core
-
-on:
- push:
- branches:
- - main
- pull_request:
-
-concurrency:
- group: ${{ github.head_ref || github.run_id }}-${{ github.workflow }}
- cancel-in-progress: true
-
-jobs:
- env:
- if: ${{ github.repository == 'envoyproxy/envoy' }}
- uses: ./.github/workflows/_env.yml
- secrets: inherit
-
- unittests:
- if: ${{ github.repository == 'envoyproxy/envoy' }}
- needs: env
- name: unit_tests
- runs-on: ${{ needs.env.outputs.agent_ubuntu }}
- timeout-minutes: 120
- container:
- image: ${{ needs.env.outputs.build_image_ubuntu }}
- steps:
- - uses: actions/checkout@v3
- - name: Ensure no listener leaks
- run: rm source/extensions/listener_managers/listener_manager/listener_manager_impl.h
- - name: Add safe directory
- run: git config --global --add safe.directory /__w/envoy/envoy
- - name: 'Run tests'
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- run: |
- cd mobile && ./bazelw test \
- --build_tests_only \
- --action_env=LD_LIBRARY_PATH \
- --test_env=ENVOY_IP_TEST_VERSIONS=v4only \
- --test_output=all \
- --define envoy_mobile_listener=disabled \
- $([ -z $GITHUB_TOKEN ] || echo "--config=remote-ci-linux") \
- //test/common/...
diff --git a/.github/workflows/mobile-coverage.yml b/.github/workflows/mobile-coverage.yml
deleted file mode 100644
index afd6a89430883..0000000000000
--- a/.github/workflows/mobile-coverage.yml
+++ /dev/null
@@ -1,49 +0,0 @@
-name: mobile_coverage
-
-on:
- push:
- branches:
- - main
- pull_request:
-
-concurrency:
- group: ${{ github.head_ref || github.run_id }}-${{ github.workflow }}
- cancel-in-progress: true
-
-jobs:
- env:
- if: ${{ github.repository == 'envoyproxy/envoy' }}
- uses: ./.github/workflows/_env.yml
- secrets: inherit
-
- coverage:
- if: ${{ needs.env.outputs.mobile_coverage == 'true' }}
- needs: env
- name: coverage
- runs-on: ${{ needs.env.outputs.agent_ubuntu }}
- timeout-minutes: 120
- defaults:
- run:
- shell: bash
- container:
- image: ${{ needs.env.outputs.build_image_ubuntu }}
- steps:
- - uses: actions/checkout@v3
- - name: Add safe directory
- run: git config --global --add safe.directory /__w/envoy/envoy
- - name: 'Run coverage'
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- run: |
- cd mobile && BAZEL_BUILD_OPTION_LIST="--config=remote-ci-linux-coverage" \
- PATH=/opt/llvm/bin:${PATH} \
- COVERAGE_THRESHOLD=76 \
- ../test/run_envoy_bazel_coverage.sh //test/common/... //test/cc/...
- - name: 'Package coverage'
- run: |
- cd mobile && tar -czf coverage.tar.gz generated/coverage
- - name: 'Upload report'
- uses: actions/upload-artifact@v3
- with:
- name: coverage.tar.gz
- path: mobile/coverage.tar.gz
diff --git a/.github/workflows/mobile-docs.yml b/.github/workflows/mobile-docs.yml
deleted file mode 100644
index b0180a972aa5f..0000000000000
--- a/.github/workflows/mobile-docs.yml
+++ /dev/null
@@ -1,44 +0,0 @@
-name: mobile_docs
-
-on:
- push:
- branches:
- - main
- pull_request:
-
-concurrency:
- group: ${{ github.head_ref || github.run_id }}-${{ github.workflow }}
- cancel-in-progress: true
-
-jobs:
- env:
- if: ${{ github.repository == 'envoyproxy/envoy' }}
- uses: ./.github/workflows/_env.yml
- secrets: inherit
-
- docs:
- if: ${{ github.repository == 'envoyproxy/envoy' }}
- needs: env
- runs-on: ${{ needs.env.outputs.agent_ubuntu }}
- timeout-minutes: 20
- container:
- image: ${{ needs.env.outputs.build_image_ubuntu }}
- steps:
- - uses: actions/checkout@v3
- - name: Add safe directory
- run: git config --global --add safe.directory "$GITHUB_WORKSPACE"
- - name: Generate docs
- run: mobile/docs/build.sh
- - name: Set up deploy key
- if: github.ref == 'refs/heads/main'
- uses: shimataro/ssh-key-action@v2.5.1
- with:
- key: ${{ secrets.ENVOY_MOBILE_WEBSITE_DEPLOY_KEY }}
- known_hosts: unnecessary
- - name: Publish docs
- if: github.ref == 'refs/heads/main'
- run: mobile/docs/publish.sh
- - uses: actions/upload-artifact@v3
- with:
- name: docs
- path: generated/docs
diff --git a/.github/workflows/mobile-format.yml b/.github/workflows/mobile-format.yml
deleted file mode 100644
index 68a871d39d2d6..0000000000000
--- a/.github/workflows/mobile-format.yml
+++ /dev/null
@@ -1,100 +0,0 @@
-name: mobile_format
-
-on:
- push:
- branches:
- - main
- pull_request:
-
-concurrency:
- group: ${{ github.head_ref || github.run_id }}-${{ github.workflow }}
- cancel-in-progress: true
-
-jobs:
- env:
- if: ${{ github.repository == 'envoyproxy/envoy' }}
- uses: ./.github/workflows/_env.yml
- secrets: inherit
-
- formatall:
- if: ${{ needs.env.outputs.mobile_formatting == 'true' }}
- needs: env
- name: format_all
- runs-on: ${{ needs.env.outputs.agent_ubuntu }}
- timeout-minutes: 45
- container:
- image: ${{ needs.env.outputs.build_image_ubuntu }}
- env:
- CLANG_FORMAT: /opt/llvm/bin/clang-format
- BUILDIFIER_BIN: /usr/local/bin/buildifier
- BUILDOZER_BIN: /usr/local/bin/buildozer
- ENVOY_BAZEL_PREFIX: "@envoy"
- steps:
- - uses: actions/checkout@v3
- - name: Add safe directory
- run: git config --global --add safe.directory /__w/envoy/envoy
- - name: 'Run formatters'
- run: cd mobile && ./tools/check_format.sh
- precommit:
- if: ${{ needs.env.outputs.mobile_formatting == 'true' }}
- needs: env
- name: precommit
- runs-on: macos-12
- timeout-minutes: 45
- steps:
- - uses: actions/checkout@v3
- - name: 'Install precommit'
- run: brew install pre-commit
- - name: 'Run precommit'
- run: cd mobile && find mobile/* | pre-commit run --files
- swiftlint:
- if: ${{ needs.env.outputs.mobile_formatting == 'true' }}
- needs: env
- name: swift_lint
- runs-on: ${{ needs.env.outputs.agent_ubuntu }}
- timeout-minutes: 5
- container:
- image: ghcr.io/realm/swiftlint:0.50.3
- steps:
- - uses: actions/checkout@v3
- - name: 'Run Swift Lint (SwiftLint)'
- run: swiftlint lint --strict
- working-directory: mobile
- drstring:
- if: ${{ needs.env.outputs.mobile_formatting == 'true' }}
- needs: env
- name: drstring
- runs-on: macos-12
- timeout-minutes: 10
- steps:
- - uses: actions/checkout@v3
- - name: 'Run DrString'
- env:
- DEVELOPER_DIR: /Applications/Xcode_14.1.app
- run: cd mobile && ./bazelw run @DrString//:drstring check
- kotlinlint:
- if: ${{ needs.env.outputs.mobile_formatting == 'true' }}
- needs: env
- name: kotlin_lint
- runs-on: macos-12
- timeout-minutes: 45
- steps:
- - uses: actions/checkout@v3
- - uses: actions/setup-java@5ffc13f4174014e2d4d4572b3d74c3fa61aeb2c2
- with:
- java-version: '8'
- java-package: jdk
- architecture: x64
- distribution: zulu
- - run: cd mobile && ./ci/mac_ci_setup.sh
- name: 'Install dependencies'
- - name: 'Run Kotlin Lint (Detekt)'
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- run: |
- cd mobile && ./bazelw build \
- $([ -z $GITHUB_TOKEN ] || echo "--config=remote-ci-macos") \
- //library/kotlin/io/envoyproxy/envoymobile:envoy_lib_lint \
- //examples/kotlin/hello_world:hello_envoy_kt_lint
- - name: 'Run Kotlin Formatter (ktlint)'
- run: cd mobile && ./bazelw build kotlin_format
diff --git a/.github/workflows/mobile-ios_build.yml b/.github/workflows/mobile-ios_build.yml
deleted file mode 100644
index 33ef5fbca5b2f..0000000000000
--- a/.github/workflows/mobile-ios_build.yml
+++ /dev/null
@@ -1,258 +0,0 @@
-name: ios_build
-
-on:
- push:
- branches:
- - main
- pull_request:
-
-concurrency:
- group: ${{ github.head_ref || github.run_id }}-${{ github.workflow }}
- cancel-in-progress: true
-
-jobs:
- env:
- if: ${{ github.repository == 'envoyproxy/envoy' }}
- uses: ./.github/workflows/_env.yml
- secrets: inherit
-
- iosbuild:
- if: ${{ needs.env.outputs.mobile_ios_build == 'true' }}
- needs: env
- name: ios_build
- runs-on: macos-12
- timeout-minutes: 120
- steps:
- - uses: actions/checkout@v3
- - run: cd mobile && ./ci/mac_ci_setup.sh
- name: 'Install dependencies'
- - name: 'Build Envoy.framework distributable'
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- run: |
- cd mobile && ./bazelw shutdown
- ./bazelw build \
- --config=ios \
- $([ -z $GITHUB_TOKEN ] || echo "--config=remote-ci-macos") \
- //library/swift:ios_framework
- swifthelloworld:
- if: ${{ needs.env.outputs.mobile_ios_build == 'true' }}
- name: swift_helloworld
- needs:
- - env
- - iosbuild
- runs-on: macos-12
- timeout-minutes: 50
- steps:
- - uses: actions/checkout@v3
- - run: cd mobile && ./ci/mac_ci_setup.sh
- name: 'Install dependencies'
- - name: 'Build app'
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- run: |
- cd mobile && ./bazelw build \
- --config=ios \
- $([ -z $GITHUB_TOKEN ] || echo "--config=remote-ci-macos") \
- //examples/swift/hello_world:app
- - uses: nick-fields/retry@943e742917ac94714d2f408a0e8320f2d1fcafcd
- name: 'Start simulator'
- with:
- timeout_minutes: 5
- max_attempts: 3
- command: ./mobile/ci/start_ios_simulator.sh
- # Run the app in the background and redirect logs.
- - name: 'Run app'
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- run: |
- cd mobile && ./bazelw run \
- --config=ios \
- $([ -z $GITHUB_TOKEN ] || echo "--config=remote-ci-macos") \
- //examples/swift/hello_world:app &> /tmp/envoy.log &
- - run: sed '/received headers with status 200/q' <(touch /tmp/envoy.log && tail -F /tmp/envoy.log)
- name: 'Check connectivity'
- - run: cat /tmp/envoy.log
- if: ${{ failure() || cancelled() }}
- name: 'Log app run'
- swiftbaselineapp:
- if: ${{ needs.env.outputs.mobile_ios_build_all == 'true' }}
- needs:
- - env
- - iosbuild
- name: swift_baseline_app
- runs-on: macos-12
- timeout-minutes: 50
- steps:
- - uses: actions/checkout@v3
- - run: cd mobile && ./ci/mac_ci_setup.sh
- name: 'Install dependencies'
- - name: 'Build app'
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- run: |
- cd mobile && ./bazelw build \
- --config=ios \
- $([ -z $GITHUB_TOKEN ] || echo "--config=remote-ci-macos") \
- //test/swift/apps/baseline:app
- - uses: nick-fields/retry@943e742917ac94714d2f408a0e8320f2d1fcafcd
- name: 'Start simulator'
- with:
- timeout_minutes: 5
- max_attempts: 3
- command: ./mobile/ci/start_ios_simulator.sh
- # Run the app in the background and redirect logs.
- - name: 'Run app'
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- run: |
- cd mobile && ./bazelw run \
- --config=ios \
- $([ -z $GITHUB_TOKEN ] || echo "--config=remote-ci-macos") \
- //test/swift/apps/baseline:app &> /tmp/envoy.log &
- - run: sed '/received headers with status 301/q' <(touch /tmp/envoy.log && tail -F /tmp/envoy.log)
- name: 'Check connectivity'
- - run: cat /tmp/envoy.log
- if: ${{ failure() || cancelled() }}
- name: 'Log app run'
- swiftexperimentalapp:
- if: ${{ needs.env.outputs.mobile_ios_build_all == 'true' }}
- needs:
- - env
- - iosbuild
- name: swift_experimental_app
- runs-on: macos-12
- timeout-minutes: 50
- steps:
- - uses: actions/checkout@v3
- - run: cd mobile && ./ci/mac_ci_setup.sh
- name: 'Install dependencies'
- - name: 'Build app'
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- run: |
- cd mobile && ./bazelw build \
- --config=ios \
- $([ -z $GITHUB_TOKEN ] || echo "--config=remote-ci-macos") \
- --define=admin_functionality=enabled \
- --define envoy_mobile_listener=enabled \
- //test/swift/apps/experimental:app
- - uses: nick-fields/retry@943e742917ac94714d2f408a0e8320f2d1fcafcd
- name: 'Start simulator'
- with:
- timeout_minutes: 5
- max_attempts: 3
- command: ./mobile/ci/start_ios_simulator.sh
- # Run the app in the background and redirect logs.
- - name: 'Run app'
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- run: |
- cd mobile && ./bazelw run \
- --config=ios \
- $([ -z $GITHUB_TOKEN ] || echo "--config=remote-ci-macos") \
- --define=admin_functionality=enabled \
- --define envoy_mobile_listener=enabled \
- //test/swift/apps/experimental:app &> /tmp/envoy.log &
- - run: sed '/received headers with status 200/q' <(touch /tmp/envoy.log && tail -F /tmp/envoy.log)
- name: 'Check connectivity'
- - run: cat /tmp/envoy.log
- if: ${{ failure() || cancelled() }}
- name: 'Log app run'
- swiftasyncawait:
- if: ${{ needs.env.outputs.mobile_ios_build_all == 'true' }}
- needs:
- - env
- - iosbuild
- name: swift_async_await
- runs-on: macos-12
- timeout-minutes: 50
- steps:
- - uses: actions/checkout@v3
- - run: cd mobile && ./ci/mac_ci_setup.sh
- name: 'Install dependencies'
- - name: 'Build app'
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- run: |
- cd mobile && ./bazelw build \
- --config=ios \
- $([ -z $GITHUB_TOKEN ] || echo "--config=remote-ci-macos") \
- //examples/swift/async_await:app
- - uses: nick-fields/retry@943e742917ac94714d2f408a0e8320f2d1fcafcd
- name: 'Start simulator'
- with:
- timeout_minutes: 5
- max_attempts: 3
- command: ./mobile/ci/start_ios_simulator.sh
- # Run the app in the background and redirect logs.
- - name: 'Run app'
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- run: |
- cd mobile && ./bazelw run \
- --config=ios \
- $([ -z $GITHUB_TOKEN ] || echo "--config=remote-ci-macos") \
- //examples/swift/async_await:app &> /tmp/envoy.log &
- - run: |
- checklogs () {
- sed '/\[2\] Uploaded 7 MB of data/q' <(touch /tmp/envoy.log && tail -F /tmp/envoy.log)
- }
- export -f checklogs
- # TODO(phlax): figure if this needs this long
- timeout 5m bash -c checklogs || {
- retcode=$?
- if [[ "$retcode" != 124 ]]; then
- echo "Command failed" >&2
- elif grep -q "Upload failed" /tmp/envoy.log; then
- echo "Upload failed" >&2
- else
- echo "Upload timed out" >&2
- fi
- exit 1
- }
- if: steps.should_run.outputs.run_ci_job == 'true'
- name: 'Check upload succeeded'
- - run: cat /tmp/envoy.log
- if: ${{ failure() || cancelled() }}
- name: 'Log app run'
- objchelloworld:
- if: ${{ needs.env.outputs.mobile_ios_build_all == 'true' }}
- needs:
- - env
- - iosbuild
- name: objc_helloworld
- runs-on: macos-12
- timeout-minutes: 50
- steps:
- - uses: actions/checkout@v3
- - run: cd mobile && ./ci/mac_ci_setup.sh
- name: 'Install dependencies'
- - name: 'Build app'
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- run: |
- cd mobile && ./bazelw build \
- --config=ios \
- $([ -z $GITHUB_TOKEN ] || echo "--config=remote-ci-macos") \
- //examples/objective-c/hello_world:app
- - uses: nick-fields/retry@943e742917ac94714d2f408a0e8320f2d1fcafcd
- name: 'Start simulator'
- with:
- timeout_minutes: 5
- max_attempts: 3
- command: ./mobile/ci/start_ios_simulator.sh
- # Run the app in the background and redirect logs.
- - name: 'Run app'
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- run: |
- cd mobile && ./bazelw run \
- --config=ios \
- $([ -z $GITHUB_TOKEN ] || echo "--config=remote-ci-macos") \
- //examples/objective-c/hello_world:app &> /tmp/envoy.log &
- - run: sed '/received headers with status 301/q' <(touch /tmp/envoy.log && tail -F /tmp/envoy.log)
- name: 'Check connectivity'
- - run: cat /tmp/envoy.log
- if: ${{ failure() || cancelled() }}
- name: 'Log app run'
diff --git a/.github/workflows/mobile-ios_tests.yml b/.github/workflows/mobile-ios_tests.yml
deleted file mode 100644
index 02df1e8d2f6b7..0000000000000
--- a/.github/workflows/mobile-ios_tests.yml
+++ /dev/null
@@ -1,62 +0,0 @@
-name: ios_tests
-
-on:
- push:
- branches:
- - main
- pull_request:
-
-concurrency:
- group: ${{ github.head_ref || github.run_id }}-${{ github.workflow }}
- cancel-in-progress: true
-
-jobs:
- env:
- if: ${{ github.repository == 'envoyproxy/envoy' }}
- uses: ./.github/workflows/_env.yml
- secrets: inherit
-
- swifttests:
- if: ${{ needs.env.outputs.mobile_ios_tests == 'true' }}
- needs: env
- name: swift_tests
- runs-on: macos-12
- timeout-minutes: 120
- steps:
- - uses: actions/checkout@v3
- - name: 'Install dependencies'
- run: cd mobile && ./ci/mac_ci_setup.sh
- - name: 'Run swift library tests'
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- # runs with the listener enabled due to IdleTimeoutTest not setting up a test backend.
- run: |
- cd mobile && ./bazelw test \
- --experimental_ui_max_stdouterr_bytes=10485760 \
- --test_output=all \
- --config=ios \
- --define envoy_mobile_listener=enabled \
- --build_tests_only \
- $([ -z $GITHUB_TOKEN ] || echo "--config=remote-ci-macos") \
- //test/swift/...
- objctests:
- if: ${{ needs.env.outputs.mobile_ios_tests == 'true' }}
- needs: env
- name: c_and_objc_tests
- runs-on: macos-12
- timeout-minutes: 120
- steps:
- - uses: actions/checkout@v3
- - name: 'Install dependencies'
- run: cd mobile && ./ci/mac_ci_setup.sh
- - name: 'Run Objective-C library tests'
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- run: |
- cd mobile && ./bazelw test \
- --test_output=all \
- --config=ios \
- --build_tests_only \
- $([ -z $GITHUB_TOKEN ] || echo "--config=remote-ci-macos") \
- //test/objective-c/... \
- //test/cc/unit:envoy_config_test
diff --git a/.github/workflows/mobile-perf.yml b/.github/workflows/mobile-perf.yml
deleted file mode 100644
index 754097c2b0aaf..0000000000000
--- a/.github/workflows/mobile-perf.yml
+++ /dev/null
@@ -1,98 +0,0 @@
-name: mobile_perf
-
-on:
- push:
- branches:
- - main
- pull_request:
-
-concurrency:
- group: ${{ github.head_ref || github.run_id }}-${{ github.workflow }}
- cancel-in-progress: true
-
-jobs:
- sizecurrent:
- if: ${{ github.repository == 'envoyproxy/envoy' }}
- name: size_current
- runs-on: ubuntu-22.04
- timeout-minutes: 120
- container:
- image: ${{ needs.env.outputs.build_image_ubuntu }}
- env:
- CC: /opt/llvm/bin/clang
- CXX: /opt/llvm/bin/clang++
- steps:
- - uses: actions/checkout@v3
- - name: Add safe directory
- run: git config --global --add safe.directory /__w/envoy/envoy
- - name: 'Build test binary'
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- run: |
- cd mobile && ./bazelw build \
- --config=sizeopt \
- --config=release-common \
- --config=remote-ci-linux-clang \
- //test/performance:test_binary_size
- - uses: actions/upload-artifact@v3
- with:
- name: sizecurrent
- path: mobile/bazel-bin/test/performance/test_binary_size
- sizemain:
- if: ${{ github.repository == 'envoyproxy/envoy' }}
- name: size_main
- runs-on: ubuntu-22.04
- timeout-minutes: 90
- container:
- image: ${{ needs.env.outputs.build_image_ubuntu }}
- env:
- CC: /opt/llvm/bin/clang
- CXX: /opt/llvm/bin/clang++
- steps:
- - uses: actions/checkout@v3
- - name: Add safe directory
- run: |
- git config --global --add safe.directory /__w/envoy/envoy
- - name: 'Build test binary'
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- run: |
- git checkout main && git pull origin main
- cd mobile && ./bazelw build \
- --config=sizeopt \
- --config=release-common \
- --config=remote-ci-linux-clang \
- //test/performance:test_binary_size
- - uses: actions/upload-artifact@v3
- with:
- name: sizemain
- path: mobile/bazel-bin/test/performance/test_binary_size
- sizecompare:
- if: ${{ github.repository == 'envoyproxy/envoy' }}
- needs:
- - sizecurrent
- - sizemain
- name: size_compare
- runs-on: ubuntu-22.04
- timeout-minutes: 30
- container:
- image: ${{ needs.env.outputs.build_image_ubuntu }}
- steps:
- - uses: actions/checkout@v3
- - uses: actions/download-artifact@v3
- with:
- name: sizecurrent
- path: dist/sizecurrent
- - uses: actions/download-artifact@v3
- with:
- name: sizemain
- path: dist/sizemain
- - name: 'Strip and Zip binary'
- run: |
- ls -lh dist/
- strip -s -o dist/main.stripped dist/sizemain/test_binary_size
- strip -s -o dist/current.stripped dist/sizecurrent/test_binary_size
- zip -9 dist/main.zip dist/main.stripped
- zip -9 dist/current.zip dist/current.stripped
- - name: 'Test size regression'
- run: cd mobile && ./ci/test_size_regression.sh ../dist/main.zip ../dist/current.zip
diff --git a/.github/workflows/mobile-release_validation.yml b/.github/workflows/mobile-release_validation.yml
deleted file mode 100644
index 88286e8a3e81a..0000000000000
--- a/.github/workflows/mobile-release_validation.yml
+++ /dev/null
@@ -1,42 +0,0 @@
-name: mobile_release_validation
-
-on:
- push:
- branches:
- - main
- pull_request:
-
-concurrency:
- group: ${{ github.head_ref || github.run_id }}-${{ github.workflow }}
- cancel-in-progress: true
-
-jobs:
- env:
- if: ${{ github.repository == 'envoyproxy/envoy' }}
- uses: ./.github/workflows/_env.yml
- secrets: inherit
-
- validate_swiftpm_example:
- if: ${{ needs.env.outputs.mobile_release_validation == 'true' }}
- needs: env
- name: validate_swiftpm_example
- runs-on: macos-12
- timeout-minutes: 120
- steps:
- - uses: actions/checkout@v3
- - run: cd mobile && ./ci/mac_ci_setup.sh
- name: 'Install dependencies'
- - name: 'Build xcframework'
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- run: |
- cd mobile && ./bazelw build \
- --config=ios \
- $([ -z $GITHUB_TOKEN ] || echo "--config=remote-ci-macos") \
- //:ios_xcframework
- # Ignore errors: Bad CRC when unzipping large files: https://bbs.archlinux.org/viewtopic.php?id=153011
- - run: unzip mobile/bazel-bin/library/swift/Envoy.xcframework.zip -d mobile/examples/swift/swiftpm/Packages || true
- name: 'Unzip xcframework'
- - run: xcodebuild -project mobile/examples/swift/swiftpm/EnvoySwiftPMExample.xcodeproj -scheme EnvoySwiftPMExample -destination platform="iOS Simulator,name=iPhone 14 Pro Max,OS=16.1"
- name: 'Build app'
- # TODO(jpsim): Run app and inspect logs to validate
diff --git a/.github/workflows/mobile-traffic_director.yml b/.github/workflows/mobile-traffic_director.yml
deleted file mode 100644
index 85a9bdf0b8925..0000000000000
--- a/.github/workflows/mobile-traffic_director.yml
+++ /dev/null
@@ -1,46 +0,0 @@
-name: mobile_traffic_director
-
-on:
- schedule:
- # Once a day at midnight.
- - cron: '0 0 * * *'
- # Allows manual triggering in the UI. Makes it easier to test.
- workflow_dispatch:
-
-permissions:
- contents: read
-
-concurrency:
- group: ${{ github.head_ref || github.run_id }}-github.workflow
- cancel-in-progress: true
-
-jobs:
- cc_test:
- if: |
- ${{
- github.repository == 'envoyproxy/envoy'
- && (github.event.schedule
- || !contains(github.actor, '[bot]'))
- }}
- name: cc_test
- permissions:
- packages: read
- runs-on: ubuntu-20.04
- timeout-minutes: 120
- steps:
- - name: Checkout repository
- uses: actions/checkout@v3
- - name: Add safe directory
- run: git config --global --add safe.directory /__w/envoy/envoy
- - name: 'Run GcpTrafficDirectorIntegrationTest'
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- GCP_JWT_PRIVATE_KEY: ${{ secrets.GCP_SERVICE_ACCOUNT_JWT_TOKEN }}
- ENVOY_IP_TEST_VERSIONS: v4only
- run: |
- cd mobile
- ./bazelw run \
- --config=remote-ci-linux \
- --config=ci \
- --test_output=all \
- //test/non_hermetic:gcp_traffic_director_integration_test
diff --git a/.github/workflows/mobile-tsan.yml b/.github/workflows/mobile-tsan.yml
deleted file mode 100644
index f72a907666c8d..0000000000000
--- a/.github/workflows/mobile-tsan.yml
+++ /dev/null
@@ -1,42 +0,0 @@
-name: mobile_tsan
-
-on:
- push:
- branches:
- - main
- pull_request:
-
-concurrency:
- group: ${{ github.head_ref || github.run_id }}-${{ github.workflow }}
- cancel-in-progress: true
-
-jobs:
- env:
- if: ${{ github.repository == 'envoyproxy/envoy' }}
- uses: ./.github/workflows/_env.yml
- secrets: inherit
-
- tsan:
- if: ${{ needs.env.outputs.mobile_tsan == 'true' }}
- needs: env
- name: tsan
- runs-on: ${{ needs.env.outputs.agent_ubuntu }}
- timeout-minutes: 90
- container:
- image: ${{ needs.env.outputs.build_image_ubuntu_mobile }}
- env:
- CC: /opt/llvm/bin/clang
- CXX: /opt/llvm/bin/clang++
- steps:
- - uses: actions/checkout@v3
- - name: Add safe directory
- run: git config --global --add safe.directory /__w/envoy/envoy
- - name: 'Run tests'
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- run: |
- cd mobile && ./bazelw test \
- --test_output=all \
- --test_env=ENVOY_IP_TEST_VERSIONS=v4only \
- $([ -z $GITHUB_TOKEN ] || echo "--config=remote-ci-linux-tsan") \
- //test/common/...
diff --git a/.github/workflows/mobile_release.yml b/.github/workflows/mobile_release.yml
deleted file mode 100644
index aaecefb138b6d..0000000000000
--- a/.github/workflows/mobile_release.yml
+++ /dev/null
@@ -1,115 +0,0 @@
-name: mobile_release
-
-on:
- workflow_dispatch:
- schedule:
- # Mondays at 1pm UTC (8am EST)
- - cron: "0 13 * * 1"
-
-jobs:
- android_release_artifacts:
- if: |
- ${{
- github.repository == 'envoyproxy/envoy'
- && (github.event.schedule
- || !contains(github.actor, '[bot]'))
- }}
- name: android_release_artifacts
- runs-on: ubuntu-22.04
- timeout-minutes: 120
- container:
- image: ${{ needs.env.outputs.build_image_ubuntu_mobile }}
- env:
- CC: /opt/llvm/bin/clang
- CXX: /opt/llvm/bin/clang++
- steps:
- - uses: actions/checkout@v3
- with:
- fetch-depth: 0
- - name: Add safe directory
- run: git config --global --add safe.directory /__w/envoy/envoy
- - name: 'Build envoy.aar distributable'
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- working-directory: mobile
- run: |
- version="0.5.0.$(date '+%Y%m%d')"
- ./bazelw build \
- $([ -z $GITHUB_TOKEN ] || echo "--config=remote-ci-linux-clang") \
- --remote_header="Authorization=Bearer $GITHUB_TOKEN" \
- --fat_apk_cpu=x86,x86_64,armeabi-v7a,arm64-v8a \
- --define=pom_version="$version" \
- --config=release-android \
- --linkopt=-fuse-ld=lld \
- //:android_dist
- - name: 'Tar artifacts'
- run: |
- tar -czvf envoy_android_aar_sources.tar.gz \
- bazel-bin/library/kotlin/io/envoyproxy/envoymobile/envoy.aar \
- bazel-bin/library/kotlin/io/envoyproxy/envoymobile/envoy-pom.xml \
- bazel-bin/library/kotlin/io/envoyproxy/envoymobile/envoy-sources.jar \
- bazel-bin/library/kotlin/io/envoyproxy/envoymobile/envoy-javadoc.jar
- working-directory: mobile
- - uses: actions/upload-artifact@v3
- with:
- name: envoy_android_aar_sources
- path: mobile/envoy_android_aar_sources.tar.gz
- android_release_deploy:
- name: android_release_deploy
- needs: android_release_artifacts
- runs-on: ubuntu-22.04
- timeout-minutes: 20
- steps:
- - uses: actions/checkout@v3
- with:
- fetch-depth: 0
- - name: Add safe directory
- run: git config --global --add safe.directory /__w/envoy/envoy
- - uses: actions/download-artifact@v3
- with:
- name: envoy_android_aar_sources
- path: .
- - name: Expand archive
- run: |
- tar -xvf envoy_android_aar_sources.tar.gz
- mv bazel-bin/library/kotlin/io/envoyproxy/envoymobile/* .
- - name: 'Configure gpg signing'
- env:
- GPG_KEY: ${{ secrets.EM_GPG_KEY }}
- GPG_KEY_NAME: ${{ secrets.EM_GPG_KEY_NAME }}
- GPG_PASSPHRASE: ${{ secrets.EM_GPG_PASSPHRASE }}
- run: |
- # https://github.com/keybase/keybase-issues/issues/2798
- export GPG_TTY=$(tty)
- # Import gpg keys and warm the passphrase to avoid the gpg
- # passphrase prompt when initating a deploy
- # `--pinentry-mode=loopback` could be needed to ensure we
- # suppress the gpg prompt
- echo $GPG_KEY | base64 --decode > signing-key
- gpg --passphrase $GPG_PASSPHRASE --batch --import signing-key
- shred signing-key
-
- gpg --pinentry-mode=loopback --passphrase $GPG_PASSPHRASE -ab envoy.aar
- gpg --pinentry-mode=loopback --passphrase $GPG_PASSPHRASE -ab envoy-pom.xml
- gpg --pinentry-mode=loopback --passphrase $GPG_PASSPHRASE -ab envoy-javadoc.jar
- gpg --pinentry-mode=loopback --passphrase $GPG_PASSPHRASE -ab envoy-sources.jar
- - name: 'Release to sonatype repository'
- env:
- READWRITE_USER: ${{ secrets.EM_SONATYPE_USER }}
- READWRITE_API_KEY: ${{ secrets.EM_SONATYPE_PASSWORD }}
- SONATYPE_PROFILE_ID: ${{ secrets.EM_SONATYPE_PROFILE_ID }}
- run: |
- version="0.5.0.$(date '+%Y%m%d')"
- python mobile/ci/sonatype_nexus_upload.py \
- --profile_id=$SONATYPE_PROFILE_ID \
- --version=$version \
- --files \
- envoy.aar \
- envoy-pom.xml \
- envoy-sources.jar \
- envoy-javadoc.jar \
- --signed_files \
- envoy.aar.asc \
- envoy-pom.xml.asc \
- envoy-sources.jar.asc \
- envoy-javadoc.jar.asc
diff --git a/.github/workflows/pr_notifier.yml b/.github/workflows/pr_notifier.yml
index f7303a1678d68..df31d16768d9d 100644
--- a/.github/workflows/pr_notifier.yml
+++ b/.github/workflows/pr_notifier.yml
@@ -14,7 +14,7 @@ jobs:
pull-requests: read # for pr_notifier.py
name: PR Notifier
runs-on: ubuntu-22.04
- if: |
+ if: >-
${{
github.repository == 'envoyproxy/envoy'
&& (github.event.schedule
diff --git a/.github/workflows/request.yml b/.github/workflows/request.yml
new file mode 100644
index 0000000000000..a245052db14a4
--- /dev/null
+++ b/.github/workflows/request.yml
@@ -0,0 +1,39 @@
+# This file must live on every branch and pass necessary secrets and permissions
+# to initiate the request
+name: Request
+
+permissions:
+ contents: read
+
+on:
+ pull_request_target:
+ push:
+ branches:
+ - main
+ - release/v*
+
+concurrency:
+ group: |
+ ${{ github.head_ref
+ || github.run_id
+ }}-${{ github.workflow }}-request
+ cancel-in-progress: true
+
+
+jobs:
+ request:
+ # For branches this can be pinned to a specific version if required
+ # NB: `uses` cannot be dynamic so it _must_ be hardcoded anywhere it is read
+ uses: envoyproxy/envoy/.github/workflows/_request.yml@main
+ if: ${{ vars.ENVOY_CI || github.repository == 'envoyproxy/envoy' }}
+ permissions:
+ actions: read
+ contents: read
+ # required for engflow/bazel caching (not yet used)
+ packages: read
+ # required to fetch merge commit
+ pull-requests: read
+ secrets:
+ # these are required to start checks
+ app-key: ${{ secrets.ENVOY_CI_APP_KEY }}
+ app-id: ${{ secrets.ENVOY_CI_APP_ID }}
diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml
index aa6d198d07449..d8f8986bae8a2 100644
--- a/.github/workflows/stale.yml
+++ b/.github/workflows/stale.yml
@@ -1,3 +1,8 @@
+name: Prune stale
+
+permissions:
+ contents: read
+
on:
workflow_dispatch:
schedule:
@@ -5,17 +10,17 @@ on:
jobs:
prune_stale:
- permissions:
- issues: write # for actions/stale to close stale issues
- pull-requests: write # for actions/stale to close stale PRs
- name: Prune Stale
- runs-on: ubuntu-22.04
- if: |
+ if: >-
${{
github.repository == 'envoyproxy/envoy'
&& (github.event.schedule
|| !contains(github.actor, '[bot]'))
}}
+ permissions:
+ issues: write # for actions/stale to close stale issues
+ pull-requests: write # for actions/stale to close stale PRs
+ name: Prune stale
+ runs-on: ubuntu-22.04
steps:
- name: Prune Stale
diff --git a/.github/workflows/verify-requirements.in b/.github/workflows/verify-requirements.in
new file mode 100644
index 0000000000000..87de2e955af37
--- /dev/null
+++ b/.github/workflows/verify-requirements.in
@@ -0,0 +1 @@
+yq
diff --git a/.github/workflows/verify-requirements.txt b/.github/workflows/verify-requirements.txt
new file mode 100644
index 0000000000000..2c6e79d55e41c
--- /dev/null
+++ b/.github/workflows/verify-requirements.txt
@@ -0,0 +1,74 @@
+#
+# This file is autogenerated by pip-compile with Python 3.11
+# by the following command:
+#
+# pip-compile --allow-unsafe --generate-hashes verify-requirements.in
+#
+argcomplete==3.2.1 \
+ --hash=sha256:30891d87f3c1abe091f2142613c9d33cac84a5e15404489f033b20399b691fec \
+ --hash=sha256:437f67fb9b058da5a090df505ef9be0297c4883993f3f56cb186ff087778cfb4
+ # via yq
+pyyaml==6.0.1 \
+ --hash=sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5 \
+ --hash=sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc \
+ --hash=sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df \
+ --hash=sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741 \
+ --hash=sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206 \
+ --hash=sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27 \
+ --hash=sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595 \
+ --hash=sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62 \
+ --hash=sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98 \
+ --hash=sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696 \
+ --hash=sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290 \
+ --hash=sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9 \
+ --hash=sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d \
+ --hash=sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6 \
+ --hash=sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867 \
+ --hash=sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47 \
+ --hash=sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486 \
+ --hash=sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6 \
+ --hash=sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3 \
+ --hash=sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007 \
+ --hash=sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938 \
+ --hash=sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0 \
+ --hash=sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c \
+ --hash=sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735 \
+ --hash=sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d \
+ --hash=sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28 \
+ --hash=sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4 \
+ --hash=sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba \
+ --hash=sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8 \
+ --hash=sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5 \
+ --hash=sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd \
+ --hash=sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3 \
+ --hash=sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0 \
+ --hash=sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515 \
+ --hash=sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c \
+ --hash=sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c \
+ --hash=sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924 \
+ --hash=sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34 \
+ --hash=sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43 \
+ --hash=sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859 \
+ --hash=sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673 \
+ --hash=sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54 \
+ --hash=sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a \
+ --hash=sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b \
+ --hash=sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab \
+ --hash=sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa \
+ --hash=sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c \
+ --hash=sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585 \
+ --hash=sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d \
+ --hash=sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f
+ # via yq
+tomlkit==0.12.3 \
+ --hash=sha256:75baf5012d06501f07bee5bf8e801b9f343e7aac5a92581f20f80ce632e6b5a4 \
+ --hash=sha256:b0a645a9156dc7cb5d3a1f0d4bab66db287fcb8e0430bdd4664a095ea16414ba
+ # via yq
+xmltodict==0.13.0 \
+ --hash=sha256:341595a488e3e01a85a9d8911d8912fd922ede5fecc4dce437eb4b6c8d037e56 \
+ --hash=sha256:aa89e8fd76320154a40d19a0df04a4695fb9dc5ba977cbb68ab3e4eb225e7852
+ # via yq
+yq==3.2.3 \
+ --hash=sha256:29c8fe1d36b4f64163f4d01314c6ae217539870f610216dee6025dfb5eafafb1 \
+ --hash=sha256:b50c91894dad9894d1d36ea77d5722d5495cac9482d2351e55089360a90709ae
+ # via -r verify-requirements.in
diff --git a/.github/workflows/workflow-complete.yml b/.github/workflows/workflow-complete.yml
deleted file mode 100644
index e81503bcca993..0000000000000
--- a/.github/workflows/workflow-complete.yml
+++ /dev/null
@@ -1,62 +0,0 @@
-name: Workflow complete
-# This workflow is only required for externally triggered jobs that have manually
-# set the check status for a commit/PR
-
-permissions:
- contents: read
-
-on:
- # Do not run untrusted code here
- workflow_run:
- workflows:
- - Publish & verify
- types:
- - completed
-
-jobs:
- complete:
- if: ${{ github.actor == 'trigger-workflow-envoy[bot]' }}
- runs-on: ubuntu-22.04
- permissions:
- statuses: write
- steps:
- - name: 'Download artifact'
- uses: actions/github-script@v6
- with:
- script: |
- let allArtifacts = await github.rest.actions.listWorkflowRunArtifacts({
- owner: context.repo.owner,
- repo: context.repo.repo,
- run_id: context.payload.workflow_run.id,
- });
- let matchArtifact = allArtifacts.data.artifacts.filter((artifact) => {
- return artifact.name == "state_sha"
- })[0];
- let download = await github.rest.actions.downloadArtifact({
- owner: context.repo.owner,
- repo: context.repo.repo,
- artifact_id: matchArtifact.id,
- archive_format: 'zip',
- });
- let fs = require('fs');
- fs.writeFileSync(`${process.env.GITHUB_WORKSPACE}/state_sha.zip`, Buffer.from(download.data));
-
- - run: |
- set -e
- unzip state_sha.zip
- STATE_SHA="$(cat state_sha)"
- echo "state_sha=$STATE_SHA" >> "$GITHUB_OUTPUT"
- STATE="${{ github.event.workflow_run.conclusion }}"
- if [[ ${STATE} != "success" ]]; then
- STATE=failure
- fi
- echo "state=${STATE}" >> "$GITHUB_OUTPUT"
- id: job
- - name: Complete status check
- uses: envoyproxy/toolshed/gh-actions/status@actions-v0.0.10
- with:
- authToken: ${{ secrets.GITHUB_TOKEN }}
- context: Verify/examples
- state: ${{ steps.job.outputs.state }}
- sha: ${{ steps.job.outputs.state_sha }}
- target_url: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.event.workflow_run.id }}
diff --git a/.gitignore b/.gitignore
index de313efb4416e..6aad749804db6 100644
--- a/.gitignore
+++ b/.gitignore
@@ -55,3 +55,5 @@ bazel.output.txt
tools/dev/src
distribution/custom
examples/websocket/certs
+/contrib/golang/**/test_data/go.sum
+/contrib/golang/**/test_data/*/go.sum
diff --git a/BUILD b/BUILD
index 8e5e07c3073c0..3b48868fd6f31 100644
--- a/BUILD
+++ b/BUILD
@@ -1,11 +1,20 @@
+load("//bazel:envoy_build_system.bzl", "envoy_package")
+load("//tools/python:namespace.bzl", "envoy_py_namespace")
+
licenses(["notice"]) # Apache 2
+envoy_package()
+
+envoy_py_namespace()
+
exports_files([
"VERSION.txt",
"API_VERSION.txt",
".clang-format",
"pytest.ini",
".coveragerc",
+ "CODEOWNERS",
+ "OWNERS.md",
])
alias(
diff --git a/CODEOWNERS b/CODEOWNERS
index d169a56f97ad8..a8927fdf68680 100644
--- a/CODEOWNERS
+++ b/CODEOWNERS
@@ -361,6 +361,8 @@ extensions/filters/http/oauth2 @derekargueta @mattklein123
/contrib/vcl/ @florincoras @KfreeZ
/contrib/hyperscan/ @zhxie @soulxu
/contrib/language/ @realtimetodie @realtimetodie
-/contrib/dlb/ @mattklein123 @daixiang0
+# TODO(phlax): move this extension (https://github.com/envoyproxy/envoy/issues/29550)
+/contrib/network/connection_balance/dlb @mattklein123 @daixiang0
/contrib/qat/ @giantcroc @soulxu
/contrib/generic_proxy/ @wbpcode @soulxu @zhaohuabing @rojkov @htuch
+/contrib/mcp_sse_stateful_session/ @jue-yin @UNOWNED
diff --git a/VERSION.txt b/VERSION.txt
index 5db08bf2dc579..127aeda7e58ae 100644
--- a/VERSION.txt
+++ b/VERSION.txt
@@ -1 +1 @@
-1.27.0
+1.27.7
diff --git a/api/BUILD b/api/BUILD
index 201c89aaed00e..6671b20fcc817 100644
--- a/api/BUILD
+++ b/api/BUILD
@@ -73,10 +73,13 @@ proto_library(
visibility = ["//visibility:public"],
deps = [
"//contrib/envoy/extensions/filters/http/dynamo/v3:pkg",
+ "//contrib/envoy/extensions/filters/http/http_dubbo_transcoder/v3:pkg",
"//contrib/envoy/extensions/filters/http/golang/v3alpha:pkg",
"//contrib/envoy/extensions/filters/http/language/v3alpha:pkg",
+ "//contrib/envoy/extensions/filters/http/mcp_sse_stateful_session/v3alpha:pkg",
"//contrib/envoy/extensions/filters/http/squash/v3:pkg",
"//contrib/envoy/extensions/filters/http/sxg/v3alpha:pkg",
+ "//contrib/envoy/extensions/filters/http/llm_inference/v3:pkg",
"//contrib/envoy/extensions/filters/network/client_ssl_auth/v3:pkg",
"//contrib/envoy/extensions/filters/network/generic_proxy/action/v3:pkg",
"//contrib/envoy/extensions/filters/network/generic_proxy/codecs/dubbo/v3:pkg",
@@ -92,6 +95,7 @@ proto_library(
"//contrib/envoy/extensions/filters/network/sip_proxy/router/v3alpha:pkg",
"//contrib/envoy/extensions/filters/network/sip_proxy/tra/v3alpha:pkg",
"//contrib/envoy/extensions/filters/network/sip_proxy/v3alpha:pkg",
+ "//contrib/envoy/extensions/http/mcp_sse_stateful_session/envelope/v3alpha:pkg",
"//contrib/envoy/extensions/matching/input_matchers/hyperscan/v3alpha:pkg",
"//contrib/envoy/extensions/network/connection_balance/dlb/v3alpha:pkg",
"//contrib/envoy/extensions/private_key_providers/cryptomb/v3alpha:pkg",
diff --git a/api/CONTRIBUTING.md b/api/CONTRIBUTING.md
index a1e61a7072c45..0ff244623984e 100644
--- a/api/CONTRIBUTING.md
+++ b/api/CONTRIBUTING.md
@@ -23,19 +23,19 @@ documentation.
The documentation can be built locally in the root of https://github.com/envoyproxy/envoy via:
```
-docs/build.sh
+ci/do_ci.sh docs
```
To skip configuration examples validation:
```
-SPHINX_SKIP_CONFIG_VALIDATION=true docs/build.sh
+SPHINX_SKIP_CONFIG_VALIDATION=true ci/do_ci.sh docs
```
Or to use a hermetic Docker container:
```
-./ci/run_envoy_docker.sh './ci/do_ci.sh docs'
+./ci/run_envoy_docker.sh 'ci/do_ci.sh docs'
```
This process builds RST documentation directly from the proto files, merges it with the static RST
diff --git a/api/bazel/BUILD b/api/bazel/BUILD
index 63651c1e5a48e..5ac7a0e55c365 100644
--- a/api/bazel/BUILD
+++ b/api/bazel/BUILD
@@ -1,5 +1,5 @@
+load("@envoy_toolshed//:macros.bzl", "json_data")
load("@io_bazel_rules_go//proto:compiler.bzl", "go_proto_compiler")
-load(":utils.bzl", "json_data")
load(":repository_locations.bzl", "REPOSITORY_LOCATIONS_SPEC")
load(":repository_locations_utils.bzl", "load_repository_locations_spec")
load(
diff --git a/api/bazel/repositories.bzl b/api/bazel/repositories.bzl
index 0bfd9b81063ce..c6ab16fc1fbfa 100644
--- a/api/bazel/repositories.bzl
+++ b/api/bazel/repositories.bzl
@@ -24,6 +24,7 @@ def api_dependencies():
external_http_archive(
name = "com_google_googleapis",
)
+
external_http_archive(
name = "com_github_cncf_udpa",
)
@@ -55,6 +56,10 @@ def api_dependencies():
name = "com_github_chrusty_protoc_gen_jsonschema",
)
+ external_http_archive(
+ name = "envoy_toolshed",
+ )
+
PROMETHEUSMETRICS_BUILD_CONTENT = """
load("@envoy_api//bazel:api_build_system.bzl", "api_cc_py_proto_library")
load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library")
diff --git a/api/bazel/repository_locations.bzl b/api/bazel/repository_locations.bzl
index 24682a66ac10e..4de08fe6d33d6 100644
--- a/api/bazel/repository_locations.bzl
+++ b/api/bazel/repository_locations.bzl
@@ -17,14 +17,14 @@ REPOSITORY_LOCATIONS_SPEC = dict(
project_desc = "protoc plugin to generate polyglot message validators",
project_url = "https://github.com/bufbuild/protoc-gen-validate",
use_category = ["api"],
- sha256 = "f1ec013cfdfffa7a17d75b55d41265dad47d24e0e9d86c02311562e15be52da9",
- version = "1.0.1",
+ sha256 = "0b1b1ea8c248dce8c7592dc1a93e4adebd116f0d68123f8eb34251e7ce410866",
+ version = "1.0.2",
urls = ["https://github.com/bufbuild/protoc-gen-validate/archive/refs/tags/v{version}.zip"],
strip_prefix = "protoc-gen-validate-{version}",
- release_date = "2023-05-09",
+ release_date = "2023-06-26",
implied_untracked_deps = [
"com_github_iancoleman_strcase",
- "com_github_lyft_protoc_gen_star",
+ "com_github_lyft_protoc_gen_star_v2",
"com_github_spf13_afero",
"org_golang_google_genproto",
"org_golang_x_text",
@@ -151,4 +151,18 @@ REPOSITORY_LOCATIONS_SPEC = dict(
use_category = ["build"],
release_date = "2023-05-30",
),
+ envoy_toolshed = dict(
+ project_name = "envoy_toolshed",
+ project_desc = "Tooling, libraries, runners and checkers for Envoy proxy's CI",
+ project_url = "https://github.com/envoyproxy/toolshed",
+ version = "0.1.1",
+ sha256 = "ee759b57270a2747f3f2a3d6ecaad63b834dd9887505a9f1c919d72429dbeffd",
+ strip_prefix = "toolshed-bazel-v{version}/bazel",
+ urls = ["https://github.com/envoyproxy/toolshed/archive/bazel-v{version}.tar.gz"],
+ use_category = ["build"],
+ release_date = "2023-10-21",
+ cpe = "N/A",
+ license = "Apache-2.0",
+ license_url = "https://github.com/envoyproxy/envoy/blob/bazel-v{version}/LICENSE",
+ ),
)
diff --git a/api/bazel/utils.bzl b/api/bazel/utils.bzl
deleted file mode 100644
index 0961f00eb446a..0000000000000
--- a/api/bazel/utils.bzl
+++ /dev/null
@@ -1,18 +0,0 @@
-load("@bazel_skylib//rules:write_file.bzl", "write_file")
-
-def json_data(
- name,
- data,
- visibility = ["//visibility:public"],
- **kwargs):
- """Write a bazel object to a file
-
- The provided `data` object should be json serializable.
- """
- write_file(
- name = name,
- out = "%s.json" % name,
- content = json.encode(data).split("\n"),
- visibility = visibility,
- **kwargs
- )
diff --git a/api/contrib/envoy/extensions/custom_cluster_plugins/cluster_fallback/v3/BUILD b/api/contrib/envoy/extensions/custom_cluster_plugins/cluster_fallback/v3/BUILD
new file mode 100644
index 0000000000000..ee92fb652582e
--- /dev/null
+++ b/api/contrib/envoy/extensions/custom_cluster_plugins/cluster_fallback/v3/BUILD
@@ -0,0 +1,9 @@
+# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.
+
+load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package")
+
+licenses(["notice"]) # Apache 2
+
+api_proto_package(
+ deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"],
+)
diff --git a/api/contrib/envoy/extensions/custom_cluster_plugins/cluster_fallback/v3/cluster_fallback.proto b/api/contrib/envoy/extensions/custom_cluster_plugins/cluster_fallback/v3/cluster_fallback.proto
new file mode 100644
index 0000000000000..2734e32a0ddcb
--- /dev/null
+++ b/api/contrib/envoy/extensions/custom_cluster_plugins/cluster_fallback/v3/cluster_fallback.proto
@@ -0,0 +1,31 @@
+syntax = "proto3";
+
+package envoy.extensions.custom_cluster_plugins.cluster_fallback.v3;
+
+import "udpa/annotations/sensitive.proto";
+import "udpa/annotations/status.proto";
+import "validate/validate.proto";
+
+option java_package = "io.envoyproxy.envoy.extensions.custom_cluster_plugins.cluster_fallback.v3";
+option java_outer_classname = "ClusterFallbackProto";
+option java_multiple_files = true;
+option go_package = "github.com/envoyproxy/go-control-plane/envoy/extensions/custom_cluster_plugins/cluster_fallback/v3;cluster_fallbackv3";
+option (udpa.annotations.file_status).package_version_status = ACTIVE;
+
+message ClusterFallbackConfig {
+ message ClusterConfig {
+ string routing_cluster = 1;
+
+ repeated string fallback_clusters = 2;
+ }
+
+ message WeightedClusterConfig {
+ repeated ClusterConfig config = 1;
+ }
+
+ oneof config_specifier {
+ ClusterConfig cluster_config = 1;
+
+ WeightedClusterConfig weighted_cluster_config = 2;
+ }
+}
diff --git a/api/contrib/envoy/extensions/filters/http/http_dubbo_transcoder/v3/BUILD b/api/contrib/envoy/extensions/filters/http/http_dubbo_transcoder/v3/BUILD
new file mode 100644
index 0000000000000..ee92fb652582e
--- /dev/null
+++ b/api/contrib/envoy/extensions/filters/http/http_dubbo_transcoder/v3/BUILD
@@ -0,0 +1,9 @@
+# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.
+
+load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package")
+
+licenses(["notice"]) # Apache 2
+
+api_proto_package(
+ deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"],
+)
diff --git a/api/contrib/envoy/extensions/filters/http/http_dubbo_transcoder/v3/http_dubbo_transcoder.proto b/api/contrib/envoy/extensions/filters/http/http_dubbo_transcoder/v3/http_dubbo_transcoder.proto
new file mode 100644
index 0000000000000..bde78686e4b3c
--- /dev/null
+++ b/api/contrib/envoy/extensions/filters/http/http_dubbo_transcoder/v3/http_dubbo_transcoder.proto
@@ -0,0 +1,120 @@
+syntax = "proto3";
+
+package envoy.extensions.filters.http.http_dubbo_transcoder.v3;
+
+import "google/protobuf/wrappers.proto";
+
+import "udpa/annotations/status.proto";
+import "udpa/annotations/versioning.proto";
+import "validate/validate.proto";
+
+option java_package = "io.envoyproxy.envoy.extensions.filters.http.http_dubbo_transcoder.v3";
+option java_outer_classname = "HttpDubboTranscoderProto";
+option java_multiple_files = true;
+option go_package = "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/http_dubbo_transcoder/v3;http_dubbo_transcoderv3";
+option (udpa.annotations.file_status).package_version_status = ACTIVE;
+
+// [#protodoc-title: Buffer]
+// Buffer :ref:`configuration overview `.
+// [#extension: envoy.filters.http.buffer]
+message HttpDubboTranscoder {
+ enum UrlUnescapeSpec {
+ // URL path parameters will not decode RFC 6570 reserved characters.
+ // For example, segment `%2f%23/%20%2523` is unescaped to `%2f%23/ %23`.
+ ALL_CHARACTERS_EXCEPT_RESERVED = 0;
+
+ // URL path parameters will be fully URI-decoded except in
+ // cases of single segment matches in reserved expansion, where "%2F" will be
+ // left encoded.
+ // For example, segment `%2f%23/%20%2523` is unescaped to `%2f#/ %23`.
+ ALL_CHARACTERS_EXCEPT_SLASH = 1;
+
+ // URL path parameters will be fully URI-decoded.
+ // For example, segment `%2f%23/%20%2523` is unescaped to `/#/ %23`.
+ ALL_CHARACTERS = 2;
+ }
+
+ message RequestValidateOptions {
+ // default this trascoder will pass the request which contains unknown query paramters,
+ // if this option set to true, the request will be reject with 400 Bad Request.
+ bool reject_unknown_query_parameters = 1;
+
+ bool reject_unknown_method = 2;
+ }
+
+ message DubboMethodMapping {
+ enum MatchHttpMethodSpec {
+ ALL_GET = 0;
+ ALL_POST = 1;
+ ALL_PUT = 2;
+ ALL_DELETE = 3;
+ ALL_PATCH = 4;
+ }
+
+ message ParameterMapping {
+ enum ExtractKeySpec {
+ ALL_QUERY_PARAMETER = 0;
+ ALL_HEADER = 1;
+ ALL_PATH = 2;
+ ALL_BODY = 3;
+ }
+
+ ExtractKeySpec extract_key_spec = 1;
+
+ string extract_key = 2;
+
+ string mapping_type = 3;
+ }
+
+ message PathMatcher {
+ string match_pattern = 1;
+
+ MatchHttpMethodSpec match_http_method_spec = 2;
+ }
+
+ message PassthroughSetting {
+ message PassthroughHeaders {
+ repeated string keys = 1;
+ }
+
+ oneof headers_setting {
+ bool passthrough_all_headers = 1;
+
+ PassthroughHeaders passthrough_headers = 2;
+ }
+ }
+
+ string name = 1 [(validate.rules).string = {min_len: 1}];
+
+ PathMatcher path_matcher = 2;
+
+ repeated ParameterMapping parameter_mapping = 3;
+
+ PassthroughSetting passthrough_setting = 4;
+ }
+
+ message DubboServiceMapping {
+ string name = 1 [(validate.rules).string = {min_len: 1}];
+
+ string version = 2;
+
+ repeated DubboMethodMapping method_mapping = 3;
+
+ string group = 4;
+ }
+
+ // Configure the behavior when handling requests that cannot be transcoded.
+ //
+ // By default, the transcoder will silently pass through HTTP requests that are malformed.
+ // This includes requests with unknown query parameters, unregister paths, etc.
+ RequestValidateOptions request_validation_options = 2;
+
+ // URL unescaping policy.
+ // This spec is only applied when extracting variable with multiple segments in the URL path.
+ // For example, in case of `/foo/{x=*}/bar/{y=prefix/*}/{z=**}` `x` variable is single segment and `y` and `z` are multiple segments.
+ // For a path with `/foo/first/bar/prefix/second/third/fourth`, `x=first`, `y=prefix/second`, `z=third/fourth`.
+ // If this setting is not specified, the value defaults to :ref:`ALL_CHARACTERS_EXCEPT_RESERVED`.
+ UrlUnescapeSpec url_unescape_spec = 3 [(validate.rules).enum = {defined_only: true}];
+
+ repeated DubboServiceMapping services_mapping = 4;
+}
diff --git a/api/contrib/envoy/extensions/filters/http/llm_inference/v3/BUILD b/api/contrib/envoy/extensions/filters/http/llm_inference/v3/BUILD
new file mode 100644
index 0000000000000..ee92fb652582e
--- /dev/null
+++ b/api/contrib/envoy/extensions/filters/http/llm_inference/v3/BUILD
@@ -0,0 +1,9 @@
+# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.
+
+load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package")
+
+licenses(["notice"]) # Apache 2
+
+api_proto_package(
+ deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"],
+)
diff --git a/api/contrib/envoy/extensions/filters/http/llm_inference/v3/llm_inference.proto b/api/contrib/envoy/extensions/filters/http/llm_inference/v3/llm_inference.proto
new file mode 100644
index 0000000000000..d83e78862095a
--- /dev/null
+++ b/api/contrib/envoy/extensions/filters/http/llm_inference/v3/llm_inference.proto
@@ -0,0 +1,30 @@
+syntax = "proto3";
+
+package envoy.extensions.filters.http.llm_inference.v3;
+
+import "udpa/annotations/status.proto";
+import "validate/validate.proto";
+
+option java_package = "io.envoyproxy.envoy.extensions.filters.http.llm_inference.v3";
+option java_outer_classname = "LlmInferenceProto";
+option java_multiple_files = true;
+option go_package = "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/llm_inference/v3;llm_inferencev3";
+option (udpa.annotations.file_status).package_version_status = ACTIVE;
+
+message modelParameter {
+ int32 n_threads = 1;
+
+ int32 n_parallel = 2;
+
+ map chat_modelpath = 3;
+
+ map embedding_modelpath = 4;
+}
+
+message modelChosen {
+ string usemodel = 1;
+
+ int32 first_byte_timeout = 2;
+
+ int32 inference_timeout = 3;
+}
diff --git a/examples/grpc-bridge/server/kv/go.mod b/api/contrib/envoy/extensions/filters/http/mcp_proxy/BUILD
similarity index 100%
rename from examples/grpc-bridge/server/kv/go.mod
rename to api/contrib/envoy/extensions/filters/http/mcp_proxy/BUILD
diff --git a/api/contrib/envoy/extensions/filters/http/mcp_sse_stateful_session/v3alpha/BUILD b/api/contrib/envoy/extensions/filters/http/mcp_sse_stateful_session/v3alpha/BUILD
new file mode 100644
index 0000000000000..1c1a6f6b44235
--- /dev/null
+++ b/api/contrib/envoy/extensions/filters/http/mcp_sse_stateful_session/v3alpha/BUILD
@@ -0,0 +1,12 @@
+# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.
+
+load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package")
+
+licenses(["notice"]) # Apache 2
+
+api_proto_package(
+ deps = [
+ "//envoy/config/core/v3:pkg",
+ "@com_github_cncf_udpa//udpa/annotations:pkg",
+ ],
+)
diff --git a/api/contrib/envoy/extensions/filters/http/mcp_sse_stateful_session/v3alpha/mcp_sse_stateful_session.proto b/api/contrib/envoy/extensions/filters/http/mcp_sse_stateful_session/v3alpha/mcp_sse_stateful_session.proto
new file mode 100644
index 0000000000000..cdb4d16dbbbe0
--- /dev/null
+++ b/api/contrib/envoy/extensions/filters/http/mcp_sse_stateful_session/v3alpha/mcp_sse_stateful_session.proto
@@ -0,0 +1,46 @@
+syntax = "proto3";
+
+package envoy.extensions.filters.http.mcp_sse_stateful_session.v3alpha;
+
+import "envoy/config/core/v3/extension.proto";
+
+import "udpa/annotations/status.proto";
+import "validate/validate.proto";
+
+option java_package = "io.envoyproxy.envoy.extensions.filters.http.mcp_sse_stateful_session.v3alpha";
+option java_outer_classname = "McpSseStatefulSessionProto";
+option java_multiple_files = true;
+option go_package = "github.com/envoyproxy/go-control-plane/contrib/envoy/extensions/filters/http/mcp_sse_stateful_session/v3alpha";
+option (udpa.annotations.file_status).package_version_status = ACTIVE;
+
+// [#protodoc-title: Model Context Protocol(MCP) server-side events(SSE) Stateful session filter]
+// MCP SSE Stateful session :ref:`configuration overview `.
+// [#extension: envoy.filters.http.mcp_sse_stateful_session]
+
+//
+message McpSseStatefulSession {
+ // Specifies the implementation of session state. This session state is used to store and retrieve the address of the
+ // upstream host assigned to the session.
+ //
+ // [#extension-category: envoy.http.mcp_sse_stateful_session]
+ config.core.v3.TypedExtensionConfig session_state = 1;
+
+ // Determines whether the HTTP request must be strictly routed to the requested destination. When set to ``true``,
+ // if the requested destination is unavailable, Envoy will return a 503 status code. The default value is ``false``,
+ // which allows Envoy to fall back to its load balancing mechanism. In this case, if the requested destination is not
+ // found, the request will be routed according to the load balancing algorithm.
+ bool strict = 2;
+}
+
+message McpSseStatefulSessionPerRoute {
+ oneof override {
+ option (validate.required) = true;
+
+ // Disable the stateful session filter for this particular vhost or route. If disabled is
+ // specified in multiple per-filter-configs, the most specific one will be used.
+ bool disabled = 1 [(validate.rules).bool = {const: true}];
+
+ // Per-route stateful session configuration that can be served by RDS or static route table.
+ McpSseStatefulSession mcp_sse_stateful_session = 2;
+ }
+}
diff --git a/api/contrib/envoy/extensions/http/mcp_sse_stateful_session/envelope/v3alpha/BUILD b/api/contrib/envoy/extensions/http/mcp_sse_stateful_session/envelope/v3alpha/BUILD
new file mode 100644
index 0000000000000..ee92fb652582e
--- /dev/null
+++ b/api/contrib/envoy/extensions/http/mcp_sse_stateful_session/envelope/v3alpha/BUILD
@@ -0,0 +1,9 @@
+# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.
+
+load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package")
+
+licenses(["notice"]) # Apache 2
+
+api_proto_package(
+ deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"],
+)
diff --git a/api/contrib/envoy/extensions/http/mcp_sse_stateful_session/envelope/v3alpha/envelope.proto b/api/contrib/envoy/extensions/http/mcp_sse_stateful_session/envelope/v3alpha/envelope.proto
new file mode 100644
index 0000000000000..67a85d00debaf
--- /dev/null
+++ b/api/contrib/envoy/extensions/http/mcp_sse_stateful_session/envelope/v3alpha/envelope.proto
@@ -0,0 +1,75 @@
+syntax = "proto3";
+
+package envoy.extensions.http.mcp_sse_stateful_session.envelope.v3alpha;
+
+import "udpa/annotations/status.proto";
+import "validate/validate.proto";
+
+option java_package = "io.envoyproxy.envoy.extensions.http.mcp_sse_stateful_session.envelope.v3alpha";
+option java_outer_classname = "EnvelopeProto";
+option java_multiple_files = true;
+option go_package = "github.com/envoyproxy/go-control-plane/contrib/envoy/extensions/http/mcp_sse_stateful_session/envelope/v3alpha";
+option (udpa.annotations.file_status).package_version_status = ACTIVE;
+
+// [#protodoc-title: Model Context Protocol(MCP) server-side events(SSE) stateful session extension]
+
+// The extension implements MCP 241105 spec for SSE-based session tracking.
+// It enables Envoy to handle session context in SSE event streams, allowing session ID
+// and upstream host to be encoded/decoded as required by the protocol.
+//
+// When processing the response from the upstream, Envoy will check if the SSE data stream contains
+// the session context. If the SSE data stream contains the session context, Envoy will join it and
+// the upstream host as new session context using a separator.
+//
+// When processing the request from the downstream, Envoy will check if the url query params contain
+// the session context. If the request contains the session context, Envoy will strip the
+// upstream host from the session context.
+// [#extension: envoy.http.mcp_sse_stateful_session.envelope]
+message EnvelopeSessionState {
+ // The query parameter name used to track the session state in SSE data streams.
+ // If the query parameter specified by this field is present in the SSE data stream,
+ // the upstream host address will be encoded in following format:
+ //
+ // .. code-block:: none
+ //
+ // sessionId={original_value}.{encoded_host}
+ //
+ // Where {encoded_host} is the Base64Url encoded host address.
+ //
+ // When processing the request from downstream, this extension will:
+ // 1. Split the value at the last dot
+ // 2. Decode the host address for upstream routing
+ // 3. Keep only the original session ID in the request
+ //
+ // For example:
+ //
+ // .. code-block:: none
+ //
+ // GET /path?sessionId=original_session_id.{encoded_host}
+ // # after processing:
+ // GET /path?sessionId=original_session_id
+ //
+ // Note: Uses Base64Url encoding for the host address and '.' as separator.
+ string param_name = 1 [(validate.rules).string = {min_len: 1}];
+
+ // The list of patterns to match the chunk end in the SSE data stream.
+ // Any of these patterns matched will be considered as the end of a chunk.
+ // recommended value is ["\r\n\r\n", "\n\n", "\r\r"]
+ // according to the HTML standard, the end of a server-sent-events' chunk can be
+ // - \r\n\r\n (double Carriage-Return Line-Feed)
+ // - \n\n (double Line-Feed)
+ // - \r\r (double Carriage-Return)
+ // https://html.spec.whatwg.org/multipage/server-sent-events.html#parsing-an-event-stream
+ // Customized patterns can be added to match the chunk end pattern.
+ repeated string chunk_end_patterns = 2 [(validate.rules).repeated = {
+ min_items: 1
+ items {string {min_len: 1}}
+ }];
+
+ // The maximum size of the pending chunk.
+ // If the pending chunk size is greater than this value, this filter will be disabled.
+ // This is to prevent the filter from consuming too much memory when the SSE data stream is large.
+ // In normal cases, the sessionId should be the initialize message and be in a small chunk.
+ // The default value is 4KB.
+ int32 max_pending_chunk_size = 3;
+}
diff --git a/api/contrib/envoy/extensions/upstreams/http/dubbo_tcp/v3/BUILD b/api/contrib/envoy/extensions/upstreams/http/dubbo_tcp/v3/BUILD
new file mode 100644
index 0000000000000..ee92fb652582e
--- /dev/null
+++ b/api/contrib/envoy/extensions/upstreams/http/dubbo_tcp/v3/BUILD
@@ -0,0 +1,9 @@
+# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.
+
+load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package")
+
+licenses(["notice"]) # Apache 2
+
+api_proto_package(
+ deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"],
+)
diff --git a/api/contrib/envoy/extensions/upstreams/http/dubbo_tcp/v3/tcp_connection_pool.proto b/api/contrib/envoy/extensions/upstreams/http/dubbo_tcp/v3/tcp_connection_pool.proto
new file mode 100644
index 0000000000000..4fc0440a22615
--- /dev/null
+++ b/api/contrib/envoy/extensions/upstreams/http/dubbo_tcp/v3/tcp_connection_pool.proto
@@ -0,0 +1,18 @@
+syntax = "proto3";
+
+package envoy.extensions.upstreams.http.dubbo_tcp.v3;
+
+import "udpa/annotations/status.proto";
+
+option java_package = "io.envoyproxy.envoy.extensions.upstreams.http.dubbo_tcp.v3";
+option java_outer_classname = "TcpConnectionPoolProto";
+option java_multiple_files = true;
+option go_package = "github.com/envoyproxy/go-control-plane/envoy/extensions/upstreams/http/dubbo_tcp/v3;dubbo_tcpv3";
+option (udpa.annotations.file_status).package_version_status = ACTIVE;
+
+// [#protodoc-title: Tcp Connection Pool]
+
+// A connection pool which forwards downstream HTTP as TCP to upstream,
+// [#extension: envoy.upstreams.http.tcp]
+message DubboTcpConnectionPoolProto {
+}
diff --git a/api/envoy/api/v2/core/health_check.proto b/api/envoy/api/v2/core/health_check.proto
index 347ac9c96b909..0b50677829cf8 100644
--- a/api/envoy/api/v2/core/health_check.proto
+++ b/api/envoy/api/v2/core/health_check.proto
@@ -306,4 +306,6 @@ message HealthCheck {
// This allows overriding the cluster TLS settings, just for health check connections.
TlsOptions tls_options = 21;
+
+ bool store_metrics = 127;
}
diff --git a/api/envoy/config/core/v3/health_check.proto b/api/envoy/config/core/v3/health_check.proto
index 2ec258d8ac095..2b6bce6ba61b2 100644
--- a/api/envoy/config/core/v3/health_check.proto
+++ b/api/envoy/config/core/v3/health_check.proto
@@ -426,4 +426,6 @@ message HealthCheck {
// the cluster's :ref:`transport socket `
// will be used for health check socket configuration.
google.protobuf.Struct transport_socket_match_criteria = 23;
+
+ bool store_metrics = 127;
}
diff --git a/api/envoy/config/filter/http/jwt_authn/v2alpha/config.proto b/api/envoy/config/filter/http/jwt_authn/v2alpha/config.proto
index e87c9478db635..00ce562f8dfb9 100644
--- a/api/envoy/config/filter/http/jwt_authn/v2alpha/config.proto
+++ b/api/envoy/config/filter/http/jwt_authn/v2alpha/config.proto
@@ -391,8 +391,7 @@ message FilterStateRule {
// A map of string keys to requirements. The string key is the string value
// in the FilterState with the name specified in the *name* field above.
- map
- requires = 3;
+ map requires = 3;
}
// This is the Envoy HTTP filter config for JWT authentication.
diff --git a/api/envoy/config/listener/v3/listener_components.proto b/api/envoy/config/listener/v3/listener_components.proto
index 150a6851d523e..2adb8bc2c80ce 100644
--- a/api/envoy/config/listener/v3/listener_components.proto
+++ b/api/envoy/config/listener/v3/listener_components.proto
@@ -45,7 +45,6 @@ message Filter {
// Configuration source specifier for an extension configuration discovery
// service. In case of a failure and without the default configuration, the
// listener closes the connections.
- // [#not-implemented-hide:]
core.v3.ExtensionConfigSource config_discovery = 5;
}
}
diff --git a/api/envoy/config/route/v3/route_components.proto b/api/envoy/config/route/v3/route_components.proto
index 014bb0d9261ab..e545a0137ed23 100644
--- a/api/envoy/config/route/v3/route_components.proto
+++ b/api/envoy/config/route/v3/route_components.proto
@@ -219,6 +219,23 @@ message VirtualHost {
// It takes precedence over the route config mirror policy entirely.
// That is, policies are not merged, the most specific non-empty one becomes the mirror policies.
repeated RouteAction.RequestMirrorPolicy request_mirror_policies = 22;
+
+ // If non-empty, a list of server names (such as SNI for the TLS protocol) is used to determine
+ // whether this request is allowed to access this VirutalHost. If not allowed, 421 Misdirected Request will be returned.
+ //
+ // The server name can be matched whith wildcard domains, i.e. ``www.example.com`` can be matched with
+ // ``www.example.com``, ``*.example.com`` and ``*.com``.
+ //
+ // Note that partial wildcards are not supported, and values like ``*w.example.com`` are invalid.
+ //
+ // This is useful when expose all virtual hosts to arbitrary HCM filters (such as using SRDS), and you want to make
+ // mTLS-protected routes invisible to requests with different SNIs.
+ //
+ // .. attention::
+ //
+ // See the :ref:`FAQ entry ` on how to configure SNI for more
+ // information.
+ repeated string allow_server_names = 101;
}
// A filter-defined action type.
@@ -367,6 +384,7 @@ message Route {
// multiple upstream clusters along with weights that indicate the percentage of
// traffic to be forwarded to each cluster. The router selects an upstream cluster based on the
// weights.
+// [#next-free-field: 102]
message WeightedCluster {
option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.WeightedCluster";
@@ -494,6 +512,16 @@ message WeightedCluster {
// ` for how key names map to the underlying implementation.
string runtime_key_prefix = 2;
+ // Name of the cluster specifier plugin to use to determine the cluster for requests on this route.
+ // The cluster specifier plugin name must be defined in the associated
+ // :ref:`cluster specifier plugins `
+ // in the :ref:`name ` field.
+ string cluster_specifier_plugin = 100;
+
+ // Custom cluster specifier plugin configuration to use to determine the cluster for requests
+ // on this route.
+ ClusterSpecifierPlugin inline_cluster_specifier_plugin = 101;
+
oneof random_value_specifier {
// Specifies the header name that is used to look up the random value passed in the request header.
// This is used to ensure consistent cluster picking across multiple proxy levels for weighted traffic.
@@ -725,7 +753,7 @@ message CorsPolicy {
google.protobuf.BoolValue allow_private_network_access = 12;
}
-// [#next-free-field: 42]
+// [#next-free-field: 1001]
message RouteAction {
option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RouteAction";
@@ -1377,6 +1405,8 @@ message RouteAction {
// Specifies the maximum stream duration for this route.
MaxStreamDuration max_stream_duration = 36;
+
+ InternalActiveRedirectPolicy internal_active_redirect_policy = 1000;
}
// HTTP retry :ref:`architecture overview `.
@@ -2385,6 +2415,109 @@ message InternalRedirectPolicy {
bool allow_cross_scheme_redirect = 4;
}
+// Redirects to the specified URI based on the response code.
+// [#next-free-field: 22]
+message InternalActiveRedirectPolicy {
+ // [#next-free-field: 23]
+ message RedirectPolicy {
+ // An internal redirect is not handled, unless the number of previous internal redirects that a
+ // downstream request has encountered is lower than this value.
+ // In the case where a downstream request is bounced among multiple routes by internal redirect,
+ // the first route that hits this threshold, or does not set :ref:`internal_redirect_policy
+ // `
+ // will pass the redirect back to downstream.
+ //
+ // If not specified, at most one redirect will be followed.
+ google.protobuf.UInt32Value max_internal_redirects = 10;
+
+ // Defines what upstream response codes are allowed to trigger internal redirect.
+ // All response codes support redirection except 200.
+ repeated uint32 redirect_response_codes = 11 [(validate.rules).repeated = {max_items: 50}];
+
+ // The URI of the redirect.
+ oneof redirect_url_rewrite_specifier {
+ option (validate.required) = true;
+
+ string redirect_url = 12 [(validate.rules).string = {min_len: 1}];
+
+ type.matcher.v3.RegexMatchAndSubstitute redirect_url_rewrite_regex = 13;
+ }
+
+ // Specifies a list of predicates that are queried when an upstream response is deemed
+ // to trigger an internal redirect by all other criteria. Any predicate in the list can reject
+ // the redirect, causing the response to be proxied to downstream.
+ repeated core.v3.TypedExtensionConfig predicates = 14;
+
+ // Allow internal redirect to follow a target URI with a different scheme than the value of
+ // x-forwarded-proto. The default is false.
+ bool allow_cross_scheme_redirect = 15;
+
+ // HTTP headers to add to a local reply. This allows the response mapper to append, to add
+ // or to override headers of any local reply before it is sent to a downstream client.
+ repeated core.v3.HeaderValueOption request_headers_to_add = 16
+ [(validate.rules).repeated = {max_items: 1000}];
+
+ // Indicates that during forwarding, the host header will be swapped with
+ // this value.
+ string host_rewrite_literal = 17
+ [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}];
+
+ // If true, the host name in the downstream request is used for redirection.
+ bool forced_use_original_host = 20;
+
+ bool forced_add_header_before_route_matcher = 22;
+ }
+
+ // An internal redirect is not handled, unless the number of previous internal redirects that a
+ // downstream request has encountered is lower than this value.
+ // In the case where a downstream request is bounced among multiple routes by internal redirect,
+ // the first route that hits this threshold, or does not set :ref:`internal_redirect_policy
+ // `
+ // will pass the redirect back to downstream.
+ //
+ // If not specified, at most one redirect will be followed.
+ google.protobuf.UInt32Value max_internal_redirects = 1;
+
+ // Defines what upstream response codes are allowed to trigger internal redirect.
+ // All response codes support redirection except 200.
+ repeated uint32 redirect_response_codes = 2 [(validate.rules).repeated = {max_items: 50}];
+
+ // The URI of the redirect.
+ oneof redirect_url_rewrite_specifier {
+ // ption (validate.required) = true;
+
+ string redirect_url = 7 [(validate.rules).string = {min_len: 1}];
+
+ type.matcher.v3.RegexMatchAndSubstitute redirect_url_rewrite_regex = 8;
+ }
+
+ // Specifies a list of predicates that are queried when an upstream response is deemed
+ // to trigger an internal redirect by all other criteria. Any predicate in the list can reject
+ // the redirect, causing the response to be proxied to downstream.
+ repeated core.v3.TypedExtensionConfig predicates = 4;
+
+ // Allow internal redirect to follow a target URI with a different scheme than the value of
+ // x-forwarded-proto. The default is false.
+ bool allow_cross_scheme_redirect = 5;
+
+ // HTTP headers to add to a local reply. This allows the response mapper to append, to add
+ // or to override headers of any local reply before it is sent to a downstream client.
+ repeated core.v3.HeaderValueOption request_headers_to_add = 6
+ [(validate.rules).repeated = {max_items: 1000}];
+
+ // Indicates that during forwarding, the host header will be swapped with
+ // this value.
+ string host_rewrite_literal = 9
+ [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}];
+
+ // If true, the host name in the downstream request is used for redirection.
+ bool forced_use_original_host = 19;
+
+ bool forced_add_header_before_route_matcher = 21;
+
+ repeated RedirectPolicy policies = 18;
+}
+
// A simple wrapper for an HTTP filter config. This is intended to be used as a wrapper for the
// map value in
// :ref:`VirtualHost.typed_per_filter_config`,
diff --git a/api/envoy/extensions/filters/http/composite/v3/composite.proto b/api/envoy/extensions/filters/http/composite/v3/composite.proto
index 08a72e411b9f7..027c9322532cc 100644
--- a/api/envoy/extensions/filters/http/composite/v3/composite.proto
+++ b/api/envoy/extensions/filters/http/composite/v3/composite.proto
@@ -2,11 +2,14 @@ syntax = "proto3";
package envoy.extensions.filters.http.composite.v3;
+import "envoy/config/core/v3/config_source.proto";
import "envoy/config/core/v3/extension.proto";
import "xds/annotations/v3/status.proto";
+import "udpa/annotations/migrate.proto";
import "udpa/annotations/status.proto";
+import "validate/validate.proto";
option java_package = "io.envoyproxy.envoy.extensions.filters.http.composite.v3";
option java_outer_classname = "CompositeProto";
@@ -32,8 +35,30 @@ message Composite {
option (xds.annotations.v3.message_status).work_in_progress = true;
}
+// Configuration for an extension configuration discovery service with name.
+message DynamicConfig {
+ // The name of the extension configuration. It also serves as a resource name in ExtensionConfigDS.
+ string name = 1 [(validate.rules).string = {min_len: 1}];
+
+ // Configuration source specifier for an extension configuration discovery
+ // service. In case of a failure and without the default configuration,
+ // 500(Internal Server Error) will be returned.
+ config.core.v3.ExtensionConfigSource config_discovery = 2;
+}
+
// Composite match action (see :ref:`matching docs ` for more info on match actions).
// This specifies the filter configuration of the filter that the composite filter should delegate filter interactions to.
message ExecuteFilterAction {
- config.core.v3.TypedExtensionConfig typed_config = 1;
+ // Filter specific configuration which depends on the filter being
+ // instantiated. See the supported filters for further documentation.
+ // Only one of ``typed_config`` or ``dynamic_config`` can be set.
+ // [#extension-category: envoy.filters.http]
+ config.core.v3.TypedExtensionConfig typed_config = 1
+ [(udpa.annotations.field_migrate).oneof_promotion = "config_type"];
+
+ // Dynamic configuration of filter obtained via extension configuration discovery
+ // service.
+ // Only one of ``typed_config`` or ``dynamic_config`` can be set.
+ DynamicConfig dynamic_config = 2
+ [(udpa.annotations.field_migrate).oneof_promotion = "config_type"];
}
diff --git a/api/envoy/extensions/filters/http/custom_response/v3/custom_response.proto b/api/envoy/extensions/filters/http/custom_response/v3/custom_response.proto
index cd28640fefdac..5426e181de057 100644
--- a/api/envoy/extensions/filters/http/custom_response/v3/custom_response.proto
+++ b/api/envoy/extensions/filters/http/custom_response/v3/custom_response.proto
@@ -6,6 +6,7 @@ import "xds/annotations/v3/status.proto";
import "xds/type/matcher/v3/matcher.proto";
import "udpa/annotations/status.proto";
+import "validate/validate.proto";
option java_package = "io.envoyproxy.envoy.extensions.filters.http.custom_response.v3";
option java_outer_classname = "CustomResponseProto";
@@ -105,4 +106,16 @@ message CustomResponse {
// documentation for more information on the matcher trees.
// [#extension-category: envoy.http.custom_response]
xds.type.matcher.v3.Matcher custom_response_matcher = 1;
+
+ // Indicates whether the router filter should cache the body.
+ BufferSettings with_request_body = 101;
+}
+
+
+// Configuration for buffering the request data.
+message BufferSettings {
+ // Sets the maximum size of a message body that the filter will hold in memory.
+ // Exceeding this size does not result in a ``HTTP 413`` error; however, it prevents
+ // the full original body from being used during internal redirection.
+ uint32 max_request_bytes = 1 [(validate.rules).uint32 = {gt: 0}];
}
diff --git a/api/envoy/extensions/filters/http/jwt_authn/v3/config.proto b/api/envoy/extensions/filters/http/jwt_authn/v3/config.proto
index bf88896e70309..24f65514ee471 100644
--- a/api/envoy/extensions/filters/http/jwt_authn/v3/config.proto
+++ b/api/envoy/extensions/filters/http/jwt_authn/v3/config.proto
@@ -614,8 +614,7 @@ message FilterStateRule {
// A map of string keys to requirements. The string key is the string value
// in the FilterState with the name specified in the ``name`` field above.
- map
- requires = 3;
+ map requires = 3;
}
// This is the Envoy HTTP filter config for JWT authentication.
diff --git a/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto b/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto
index f86be41f0493c..dadd19daa93cd 100644
--- a/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto
+++ b/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto
@@ -870,6 +870,14 @@ message HttpConnectionManager {
// This should be set to `false` in cases where Envoy's view of the downstream address may not correspond to the
// actual client address, for example, if there's another proxy in front of the Envoy.
google.protobuf.BoolValue add_proxy_protocol_connection_state = 53;
+
+ // The timeout seconds configured here will be set in the "Keep-Alive" response header.
+ // For example, configuring 10s will return the response header "Connection: keep-alive" and "Keep-Alive: timeout=10".
+ // If not specified, the default is 0, which means this behavior is disabled.
+ // The "Keep-Alive" header field is recognized by Mozilla and Apache HTTPClient.
+ // Note that the "Connection" and "Keep-Alive" response headers will only be added when the downstream protocol is HTTP1.0 or HTTP1.1
+ // and it is not an upgrade connection scenario.
+ google.protobuf.Duration keepalive_header_timeout = 1058;
}
// The configuration to customize local reply returned by Envoy.
@@ -1052,11 +1060,34 @@ message ScopedRoutes {
}
}
+ message HostValueExtractor {
+ option (udpa.annotations.versioning).previous_message_type =
+ "envoy.config.filter.network.http_connection_manager.v2.ScopedRoutes.ScopeKeyBuilder."
+ "FragmentBuilder.HostValueExtractor";
+
+ // The maximum number of host superset recomputes. If not specified, defaults to 100.
+ google.protobuf.UInt32Value max_recompute_num = 1;
+ }
+
+ message LocalPortValueExtractor {
+ option (udpa.annotations.versioning).previous_message_type =
+ "envoy.config.filter.network.http_connection_manager.v2.ScopedRoutes.ScopeKeyBuilder."
+ "FragmentBuilder.LocalPortValueExtractor";
+ }
+
+
oneof type {
option (validate.required) = true;
// Specifies how a header field's value should be extracted.
HeaderValueExtractor header_value_extractor = 1;
+
+ // Extract the fragemnt value from the :authority header, and support recompute with the wildcard domains,
+ // i.e. ``www.example.com`` can be recomputed with ``*.example.com``, then ``*.com``, then ``*``.
+ HostValueExtractor host_value_extractor = 101;
+
+ // Extract the fragment value from local port of the connection.
+ LocalPortValueExtractor local_port_value_extractor = 102;
}
}
@@ -1094,6 +1125,8 @@ message ScopedRoutes {
// in this message.
ScopedRds scoped_rds = 5;
}
+
+ google.protobuf.BoolValue retry_other_scope_when_not_found = 101;
}
message ScopedRds {
diff --git a/api/envoy/extensions/http/custom_response/redirect_policy/v3/redirect_policy.proto b/api/envoy/extensions/http/custom_response/redirect_policy/v3/redirect_policy.proto
index 73cf7ed7a8645..35d235f2df02c 100644
--- a/api/envoy/extensions/http/custom_response/redirect_policy/v3/redirect_policy.proto
+++ b/api/envoy/extensions/http/custom_response/redirect_policy/v3/redirect_policy.proto
@@ -50,8 +50,17 @@ message RedirectPolicy {
// - `prefix_rewrite`
// - `regex_rewrite`
config.route.v3.RedirectAction redirect_action = 2;
+
+ string uri_from_response_header = 106 [(validate.rules).string = {min_len: 1}];
+
+ google.protobuf.BoolValue use_original_request_uri = 107;
}
+ google.protobuf.UInt32Value max_internal_redirects = 108;
+ google.protobuf.BoolValue keep_original_response_code = 109;
+ google.protobuf.BoolValue use_original_request_body = 110;
+ google.protobuf.BoolValue only_redirect_upstream_code = 111;
+
// The new response status code if specified. This is used to override the
// status code of the response from the new upstream if it is not an error status.
google.protobuf.UInt32Value status_code = 3 [(validate.rules).uint32 = {lte: 999 gte: 100}];
diff --git a/api/envoy/service/extension/v3/config_discovery.proto b/api/envoy/service/extension/v3/config_discovery.proto
index 5801f6946b565..c4d4b93a69777 100644
--- a/api/envoy/service/extension/v3/config_discovery.proto
+++ b/api/envoy/service/extension/v3/config_discovery.proto
@@ -18,11 +18,12 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE;
// [#protodoc-title: Extension config discovery service (ECDS)]
// A service that supports dynamic configuration updates for a specific filter.
-// Currently, ECDS is supported for HTTP filters and Listener filters. Please check
-// :ref:`Extension Config Discovery Service (ECDS) API `.
+// Currently, ECDS is supported for downstream network filters, HTTP filters and Listener filters.
+// Please check :ref:`Extension Config Discovery Service (ECDS) API `.
// The overall extension config discovery service works as follows:
//
-// 1. A filter (:ref:`Listener `
+// 1. A filter (:ref:`Network `,
+// :ref:`Listener `
// or :ref:`HTTP `)
// contains a :ref:`config_discovery ` configuration. This configuration
// includes a :ref:`config_source `,
diff --git a/api/versioning/BUILD b/api/versioning/BUILD
index 52f5060b54d37..80eef01359e7a 100644
--- a/api/versioning/BUILD
+++ b/api/versioning/BUILD
@@ -9,10 +9,14 @@ proto_library(
name = "active_protos",
visibility = ["//visibility:public"],
deps = [
- "//contrib/envoy/extensions/config/v3alpha:pkg",
+ "//contrib/envoy/extensions/custom_cluster_plugins/cluster_fallback/v3:pkg",
"//contrib/envoy/extensions/filters/http/dynamo/v3:pkg",
+ "//contrib/envoy/extensions/filters/http/http_dubbo_transcoder/v3:pkg",
+ "//contrib/envoy/extensions/config/v3alpha:pkg",
"//contrib/envoy/extensions/filters/http/golang/v3alpha:pkg",
"//contrib/envoy/extensions/filters/http/language/v3alpha:pkg",
+ "//contrib/envoy/extensions/filters/http/llm_inference/v3:pkg",
+ "//contrib/envoy/extensions/filters/http/mcp_sse_stateful_session/v3alpha:pkg",
"//contrib/envoy/extensions/filters/http/squash/v3:pkg",
"//contrib/envoy/extensions/filters/http/sxg/v3alpha:pkg",
"//contrib/envoy/extensions/filters/network/client_ssl_auth/v3:pkg",
@@ -30,10 +34,12 @@ proto_library(
"//contrib/envoy/extensions/filters/network/sip_proxy/router/v3alpha:pkg",
"//contrib/envoy/extensions/filters/network/sip_proxy/tra/v3alpha:pkg",
"//contrib/envoy/extensions/filters/network/sip_proxy/v3alpha:pkg",
+ "//contrib/envoy/extensions/http/mcp_sse_stateful_session/envelope/v3alpha:pkg",
"//contrib/envoy/extensions/matching/input_matchers/hyperscan/v3alpha:pkg",
"//contrib/envoy/extensions/network/connection_balance/dlb/v3alpha:pkg",
"//contrib/envoy/extensions/private_key_providers/cryptomb/v3alpha:pkg",
"//contrib/envoy/extensions/private_key_providers/qat/v3alpha:pkg",
+ "//contrib/envoy/extensions/upstreams/http/dubbo_tcp/v3:pkg",
"//contrib/envoy/extensions/regex_engines/hyperscan/v3alpha:pkg",
"//contrib/envoy/extensions/router/cluster_specifier/golang/v3alpha:pkg",
"//contrib/envoy/extensions/vcl/v3alpha:pkg",
diff --git a/bazel/BUILD b/bazel/BUILD
index 71db4ba301e45..1398d50fd1ac6 100644
--- a/bazel/BUILD
+++ b/bazel/BUILD
@@ -1,11 +1,12 @@
-load("//bazel:envoy_build_system.bzl", "envoy_package")
-load("//bazel:envoy_internal.bzl", "envoy_select_force_libcpp")
-load("@envoy_api//bazel:utils.bzl", "json_data")
load("@bazel_skylib//lib:selects.bzl", "selects")
load("@bazel_skylib//rules:common_settings.bzl", "bool_flag")
-load(":repository_locations.bzl", "REPOSITORY_LOCATIONS_SPEC")
load("@envoy_api//bazel:repository_locations.bzl", API_REPOSITORY_LOCATIONS_SPEC = "REPOSITORY_LOCATIONS_SPEC")
load("@envoy_api//bazel:repository_locations_utils.bzl", "load_repository_locations_spec", "merge_dicts")
+load("@envoy_toolshed//:macros.bzl", "json_data")
+load("@envoy_toolshed//dependency:macros.bzl", "updater")
+load("//bazel:envoy_build_system.bzl", "envoy_package")
+load("//bazel:envoy_internal.bzl", "envoy_select_force_libcpp")
+load(":repository_locations.bzl", "REPOSITORY_LOCATIONS_SPEC")
licenses(["notice"]) # Apache 2
@@ -41,8 +42,8 @@ genrule(
outs = ["gnu_build_id.ldscript"],
cmd = """
echo --build-id=0x$$(
- grep BUILD_SCM_REVISION bazel-out/volatile-status.txt \\
- | sed 's/^BUILD_SCM_REVISION //') \\
+ grep -E "^BUILD_SCM_REVISION" bazel-out/volatile-status.txt \
+ | sed 's/^BUILD_SCM_REVISION //') \
> $@
""",
# Undocumented attr to depend on workspace status files.
@@ -55,8 +56,8 @@ genrule(
name = "raw_build_id",
outs = ["raw_build_id.ldscript"],
cmd = """
- grep BUILD_SCM_REVISION bazel-out/volatile-status.txt \\
- | sed 's/^BUILD_SCM_REVISION //' \\
+ grep -E "^BUILD_SCM_REVISION" bazel-out/volatile-status.txt \
+ | sed 's/^BUILD_SCM_REVISION //' \
| tr -d '\\n' \\
> $@
""",
@@ -673,6 +674,13 @@ config_setting(
define_values = {"FUZZING_ENGINE": "oss-fuzz"},
)
+# By default we enable Higress build. If want to build community
+# version then build Envoy with flag of '--define higress=false'.
+config_setting(
+ name = "higress",
+ values = {"define": "higress=false"},
+)
+
alias(
name = "fuzzing_engine",
actual = select({
@@ -882,3 +890,28 @@ cc_library(
name = "python_headers",
visibility = ["//visibility:public"],
)
+
+# These can be run as follows:
+#
+# $ bazel run //bazel:update ENVOY_DEP NEW_VERSION
+# $ bazel run //bazel:api-update API_DEP NEW_VERSION
+updater(
+ name = "update",
+ data = ["//tools/dependency:check"],
+ dependencies = "//tools/dependency:filtered-dependencies",
+ post_script = ":version_update_post.sh",
+ pydict = True,
+ tags = ["skip_on_windows"],
+ version_file = ":repository_locations.bzl",
+)
+
+updater(
+ name = "api-update",
+ data = ["//tools/dependency:check"],
+ dependencies = "@envoy_api//bazel:repository_locations",
+ post_script = ":version_update_post.sh",
+ pydict = True,
+ tags = ["skip_on_windows"],
+ version_file = "@envoy_api//bazel:repository_locations.bzl",
+ version_path_replace = "external/envoy_api:api",
+)
diff --git a/bazel/README.md b/bazel/README.md
index 38cd9a9f0df35..34b0a75239d25 100644
--- a/bazel/README.md
+++ b/bazel/README.md
@@ -930,66 +930,14 @@ TEST_TMPDIR=/tmp tools/gen_compilation_database.py
# Running format linting without docker
-The easiest way to run the clang-format check/fix commands is to run them via
-docker, which helps ensure the right toolchain is set up. However you may prefer
-to run clang-format scripts on your workstation directly:
- * It's possible there is a speed advantage
- * Docker itself can sometimes go awry and you then have to deal with that
- * Type-ahead doesn't always work when waiting running a command through docker
-
-To run the tools directly, you must install the correct version of clang. This
-may change over time, check the version of clang in the docker image. You must
-also have 'buildifier' installed from the bazel distribution.
-
Note that if you run the `check_spelling.py` script you will need to have `aspell` installed.
-Edit the paths shown here to reflect the installation locations on your system:
-
-```shell
-export CLANG_FORMAT="$HOME/ext/clang+llvm-14.0.0-x86_64-linux-gnu-ubuntu-18.04/bin/clang-format"
-export BUILDIFIER_BIN="/usr/bin/buildifier"
-```
-
-A relatively easy way to use the correct `clang-format` in your host system is to copy the `clang-format` from the ci docker image.
-
-* Run the ci docker image
-
-```shell
-ci/run_envoy_docker.sh bash
-```
-
-* Get the docker container ID
-
-```shell
-dockerContainerID=$(docker ps | grep envoy-build-ubuntu | awk '{print $1}')
-```
-
-* Copy the `clang-format` to host machine
-
-```shell
-docker cp $dockerContainerID:/opt/llvm/bin/clang-format clang-format-ci
-```
-
-* Ensure that the copied `clang-format` is the default one, by ensuring it is in `$PATH`:
-
-```shell
-cp clang-format-ci /usr/local/bin/clang-format
-```
-
-Alternatively, if you are a non-root user, you can use a bin dir and add that to `$PATH`
-
-```shell
-mkdir bin
-mv clang-format-ci bin/clang-format
-export PATH=$PATH:$PWD/bin/
-```
-
-Once this is set up, you can run clang-format without docker:
+You can run clang-format directly, without docker:
```shell
-./tools/code_format/check_format.py check
+bazel run //tools/code_format:check_format -- check
./tools/spelling/check_spelling_pedantic.py check
-./tools/code_format/check_format.py fix
+bazel run //tools/code_format:check_format -- fix
./tools/spelling/check_spelling_pedantic.py fix
```
diff --git a/bazel/api_binding.bzl b/bazel/api_binding.bzl
index 65ed382836fcd..8d46d4c1827b8 100644
--- a/bazel/api_binding.bzl
+++ b/bazel/api_binding.bzl
@@ -13,7 +13,6 @@ def _default_envoy_api_impl(ctx):
]
for d in api_dirs:
ctx.symlink(ctx.path(ctx.attr.envoy_root).dirname.get_child(ctx.attr.reldir).get_child(d), d)
- ctx.symlink(ctx.path(ctx.attr.envoy_root).dirname.get_child("api").get_child("bazel").get_child("utils.bzl"), "utils.bzl")
_default_envoy_api = repository_rule(
implementation = _default_envoy_api_impl,
diff --git a/bazel/coverage/BUILD b/bazel/coverage/BUILD
index 9aa87d0869687..56f73dc2ad1d4 100644
--- a/bazel/coverage/BUILD
+++ b/bazel/coverage/BUILD
@@ -1,9 +1,3 @@
licenses(["notice"]) # Apache 2
-# TODO(lizan): Add test for this and upstream to upstream Bazel.
-filegroup(
- name = "coverage_support",
- srcs = ["collect_cc_coverage.sh"],
-)
-
exports_files(["fuzz_coverage_wrapper.sh"])
diff --git a/bazel/coverage/collect_cc_coverage.sh b/bazel/coverage/collect_cc_coverage.sh
deleted file mode 100755
index 3f9fd700a8edf..0000000000000
--- a/bazel/coverage/collect_cc_coverage.sh
+++ /dev/null
@@ -1,175 +0,0 @@
-#!/bin/bash -e
-#
-# This is a fork of https://github.com/bazelbuild/bazel/blob/3.1.0/tools/test/collect_cc_coverage.sh
-# to cover most of use cases in Envoy.
-# TODO(lizan): Move this to upstream Bazel
-#
-# Copyright 2016 The Bazel Authors. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# This script collects code coverage data for C++ sources, after the tests
-# were executed.
-#
-# Bazel C++ code coverage collection support is poor and limited. There is
-# an ongoing effort to improve this (tracking issue #1118).
-#
-# Bazel uses the lcov tool for gathering coverage data. There is also
-# an experimental support for clang llvm coverage, which uses the .profraw
-# data files to compute the coverage report.
-#
-# This script assumes the following environment variables are set:
-# - COVERAGE_DIR Directory containing metadata files needed for
-# coverage collection (e.g. gcda files, profraw).
-# - COVERAGE_MANIFEST Location of the instrumented file manifest.
-# - COVERAGE_GCOV_PATH Location of gcov. This is set by the TestRunner.
-# - COVERAGE_GCOV_OPTIONS Additional options to pass to gcov.
-# - ROOT Location from where the code coverage collection
-# was invoked.
-#
-# The script looks in $COVERAGE_DIR for the C++ metadata coverage files (either
-# gcda or profraw) and uses either lcov or gcov to get the coverage data.
-# The coverage data is placed in $COVERAGE_OUTPUT_FILE.
-
-read -ra COVERAGE_GCOV_OPTIONS <<< "${COVERAGE_GCOV_OPTIONS:-}"
-
-# Checks if clang llvm coverage should be used instead of lcov.
-function uses_llvm() {
- if stat "${COVERAGE_DIR}"/*.profraw >/dev/null 2>&1; then
- return 0
- fi
- return 1
-}
-
-# Returns 0 if gcov must be used, 1 otherwise.
-function uses_gcov() {
- [[ "$GCOV_COVERAGE" -eq "1" ]] && return 0
- return 1
-}
-
-function init_gcov() {
- # Symlink the gcov tool such with a link called gcov. Clang comes with a tool
- # called llvm-cov, which behaves like gcov if symlinked in this way (otherwise
- # we would need to invoke it with "llvm-cov gcov").
- # For more details see https://llvm.org/docs/CommandGuide/llvm-cov.html.
- GCOV="${COVERAGE_DIR}/gcov"
- ln -s "${COVERAGE_GCOV_PATH}" "${GCOV}"
-}
-
-# Computes code coverage data using the clang generated metadata found under
-# $COVERAGE_DIR.
-# Writes the collected coverage into the given output file.
-function llvm_coverage() {
- local output_file="${1}" object_file object_files object_param=()
- shift
- export LLVM_PROFILE_FILE="${COVERAGE_DIR}/%h-%p-%m.profraw"
- "${COVERAGE_GCOV_PATH}" merge -output "${output_file}.data" \
- "${COVERAGE_DIR}"/*.profraw
-
-
- object_files="$(find -L "${RUNFILES_DIR}" -type f -exec file -L {} \; \
- | grep ELF | grep -v "LSB core" | sed 's,:.*,,')"
-
- for object_file in ${object_files}; do
- object_param+=(-object "${object_file}")
- done
-
- llvm-cov export -instr-profile "${output_file}.data" -format=lcov \
- -ignore-filename-regex='.*external/.+' \
- -ignore-filename-regex='/tmp/.+' \
- "${object_param[@]}" | sed 's#/proc/self/cwd/##' > "${output_file}"
-}
-
-# Generates a code coverage report in gcov intermediate text format by invoking
-# gcov and using the profile data (.gcda) and notes (.gcno) files.
-#
-# The profile data files are expected to be found under $COVERAGE_DIR.
-# The notes file are expected to be found under $ROOT.
-#
-# - output_file The location of the file where the generated code coverage
-# report is written.
-function gcov_coverage() {
- local gcda gcno_path line output_file="${1}"
- shift
-
- # Copy .gcno files next to their corresponding .gcda files in $COVERAGE_DIR
- # because gcov expects them to be in the same directory.
- while read -r line; do
- if [[ ${line: -4} == "gcno" ]]; then
- gcno_path=${line}
- gcda="${COVERAGE_DIR}/$(dirname "${gcno_path}")/$(basename "${gcno_path}" .gcno).gcda"
- # If the gcda file was not found we skip generating coverage from the gcno
- # file.
- if [[ -f "$gcda" ]]; then
- # gcov expects both gcno and gcda files to be in the same directory.
- # We overcome this by copying the gcno to $COVERAGE_DIR where the gcda
- # files are expected to be.
- if [ ! -f "${COVERAGE_DIR}/${gcno_path}" ]; then
- mkdir -p "${COVERAGE_DIR}/$(dirname "${gcno_path}")"
- cp "$ROOT/${gcno_path}" "${COVERAGE_DIR}/${gcno_path}"
- fi
- # Invoke gcov to generate a code coverage report with the flags:
- # -i Output gcov file in an intermediate text format.
- # The output is a single .gcov file per .gcda file.
- # No source code is required.
- # -o directory The directory containing the .gcno and
- # .gcda data files.
- # "${gcda"} The input file name. gcov is looking for data files
- # named after the input filename without its extension.
- # gcov produces files called .gcov in the current
- # directory. These contain the coverage information of the source file
- # they correspond to. One .gcov file is produced for each source
- # (or header) file containing code which was compiled to produce the
- # .gcda files.
- # Don't generate branch coverage (-b) because of a gcov issue that
- # segfaults when both -i and -b are used (see
- # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=84879).
- "${GCOV}" -i "${COVERAGE_GCOV_OPTIONS[@]}" -o "$(dirname "${gcda}")" "${gcda}"
-
- # Append all .gcov files in the current directory to the output file.
- cat ./*.gcov >> "$output_file"
- # Delete the .gcov files.
- rm ./*.gcov
- fi
- fi
- done < "${COVERAGE_MANIFEST}"
-}
-
-function main() {
- init_gcov
-
- # If llvm code coverage is used, we output the raw code coverage report in
- # the $COVERAGE_OUTPUT_FILE. This report will not be converted to any other
- # format by LcovMerger.
- # TODO(#5881): Convert profdata reports to lcov.
- if uses_llvm; then
- BAZEL_CC_COVERAGE_TOOL="PROFDATA"
- fi
-
- # When using either gcov or lcov, have an output file specific to the test
- # and format used. For lcov we generate a ".dat" output file and for gcov
- # a ".gcov" output file. It is important that these files are generated under
- # COVERAGE_DIR.
- # When this script is invoked by tools/test/collect_coverage.sh either of
- # these two coverage reports will be picked up by LcovMerger and their
- # content will be converted and/or merged with other reports to an lcov
- # format, generating the final code coverage report.
- case "$BAZEL_CC_COVERAGE_TOOL" in
- ("GCOV") gcov_coverage "$COVERAGE_DIR/_cc_coverage.gcov" ;;
- ("PROFDATA") llvm_coverage "$COVERAGE_DIR/_cc_coverage.dat" ;;
- (*) echo "Coverage tool $BAZEL_CC_COVERAGE_TOOL not supported" \
- && exit 1
- esac
-}
-
-main
diff --git a/bazel/dependency_imports.bzl b/bazel/dependency_imports.bzl
index b743a1936d0d8..21ff0abc420c8 100644
--- a/bazel/dependency_imports.bzl
+++ b/bazel/dependency_imports.bzl
@@ -18,7 +18,7 @@ load("@com_google_cel_cpp//bazel:deps.bzl", "parser_deps")
load("@com_github_chrusty_protoc_gen_jsonschema//:deps.bzl", protoc_gen_jsonschema_go_dependencies = "go_dependencies")
# go version for rules_go
-GO_VERSION = "1.18"
+GO_VERSION = "1.20"
JQ_VERSION = "1.6"
YQ_VERSION = "4.24.4"
diff --git a/bazel/engflow-bazel-credential-helper.sh b/bazel/engflow-bazel-credential-helper.sh
new file mode 100755
index 0000000000000..c6c1bd339b624
--- /dev/null
+++ b/bazel/engflow-bazel-credential-helper.sh
@@ -0,0 +1,8 @@
+#!/usr/bin/env bash
+
+# Bazel expects the helper to read stdin.
+# See https://github.com/bazelbuild/bazel/pull/17666
+cat /dev/stdin > /dev/null
+
+# `GITHUB_TOKEN` is provided as a secret.
+echo "{\"headers\":{\"Authorization\":[\"Bearer ${GITHUB_TOKEN}\"]}}"
diff --git a/bazel/envoy_binary.bzl b/bazel/envoy_binary.bzl
index 58343f8bb3220..86aea5c49bb0f 100644
--- a/bazel/envoy_binary.bzl
+++ b/bazel/envoy_binary.bzl
@@ -9,6 +9,7 @@ load(
"envoy_select_exported_symbols",
"envoy_stdlib_deps",
"tcmalloc_external_dep",
+ "envoy_select_higress",
)
# Envoy C++ binary targets should be specified with this function.
@@ -86,7 +87,7 @@ def _envoy_linkopts():
"@envoy//bazel:boringssl_fips": [],
"@envoy//bazel:windows_x86_64": [],
"//conditions:default": ["-pie"],
- }) + envoy_select_exported_symbols(["-Wl,-E"])
+ }) + envoy_select_exported_symbols(["-Wl,-E"]) + envoy_select_higress(["-lcrypt"])
def _envoy_stamped_deps():
return select({
diff --git a/bazel/envoy_internal.bzl b/bazel/envoy_internal.bzl
index a1f8f1dc6e50a..62b89d2f0daab 100644
--- a/bazel/envoy_internal.bzl
+++ b/bazel/envoy_internal.bzl
@@ -126,6 +126,7 @@ def envoy_copts(repository, test = False):
_envoy_select_perf_annotation(["-DENVOY_PERF_ANNOTATION"]) + \
_envoy_select_perfetto(["-DENVOY_PERFETTO"]) + \
envoy_select_google_grpc(["-DENVOY_GOOGLE_GRPC"], repository) + \
+ envoy_select_higress(["-DHIGRESS"]) + \
envoy_select_signal_trace(["-DENVOY_HANDLE_SIGNALS"], repository) + \
_envoy_select_path_normalization_by_default(["-DENVOY_NORMALIZE_PATH_BY_DEFAULT"], repository)
@@ -192,6 +193,12 @@ def _envoy_select_perf_annotation(xs):
"//conditions:default": [],
})
+def envoy_select_higress(xs):
+ return select({
+ "@envoy//bazel:higress": [],
+ "//conditions:default": xs,
+ })
+
def _envoy_select_perfetto(xs):
return select({
"@envoy//bazel:enable_perf_tracing": xs,
diff --git a/bazel/envoy_library.bzl b/bazel/envoy_library.bzl
index 152302ed6432b..e12841eb222b5 100644
--- a/bazel/envoy_library.bzl
+++ b/bazel/envoy_library.bzl
@@ -103,6 +103,7 @@ def envoy_cc_library(
tags = [],
deps = [],
strip_include_prefix = None,
+ higress_deps = [],
include_prefix = None,
textual_hdrs = None,
alwayslink = None,
@@ -111,6 +112,11 @@ def envoy_cc_library(
if tcmalloc_dep:
deps += tcmalloc_external_deps(repository)
+ deps = deps + select({
+ "@envoy//bazel:higress": [],
+ "//conditions:default": higress_deps,
+ })
+
# If alwayslink is not specified, allow turning it off via --define=library_autolink=disabled
# alwayslink is defaulted on for envoy_cc_extensions to ensure the REGISTRY macros work.
if alwayslink == None:
diff --git a/bazel/envoy_select.bzl b/bazel/envoy_select.bzl
index 7cd774bd460e4..cfea2b6fda6d4 100644
--- a/bazel/envoy_select.bzl
+++ b/bazel/envoy_select.bzl
@@ -173,7 +173,8 @@ def envoy_select_wasm_v8_bool():
def envoy_select_wasm_wamr(xs):
return select({
"@envoy//bazel:wasm_wamr": xs,
- "//conditions:default": [],
+ "@envoy//bazel:higress": [],
+ "//conditions:default": xs,
})
# Selects the given values depending on the Wasm runtimes enabled in the current build.
diff --git a/bazel/envoy_test.bzl b/bazel/envoy_test.bzl
index c331735abe53c..9e13c3fdad299 100644
--- a/bazel/envoy_test.bzl
+++ b/bazel/envoy_test.bzl
@@ -16,6 +16,7 @@ load(
"envoy_select_force_libcpp",
"envoy_stdlib_deps",
"tcmalloc_external_dep",
+ "envoy_select_higress",
)
# Envoy C++ related test infrastructure (that want gtest, gmock, but may be
@@ -72,7 +73,7 @@ def _envoy_test_linkopts():
# TODO(mattklein123): It's not great that we universally link against the following libs.
# In particular, -latomic and -lrt are not needed on all platforms. Make this more granular.
"//conditions:default": ["-pthread", "-lrt", "-ldl"],
- }) + envoy_select_force_libcpp([], ["-lstdc++fs", "-latomic"]) + envoy_dbg_linkopts() + envoy_select_exported_symbols(["-Wl,-E"])
+ }) + envoy_select_force_libcpp([], ["-lstdc++fs", "-latomic"]) + envoy_select_higress(["-lcrypt"]) + envoy_dbg_linkopts() + envoy_select_exported_symbols(["-Wl,-E"])
# Envoy C++ fuzz test targets. These are not included in coverage runs.
def envoy_cc_fuzz_test(
@@ -81,7 +82,6 @@ def envoy_cc_fuzz_test(
dictionaries = [],
repository = "",
size = "medium",
- shard_count = None,
deps = [],
tags = [],
**kwargs):
@@ -121,7 +121,6 @@ def envoy_cc_fuzz_test(
"//conditions:default": ["$(locations %s)" % corpus_name],
}),
data = [corpus_name],
- shard_count = shard_count,
# No fuzzing on macOS or Windows
deps = select({
"@envoy//bazel:apple": [repository + "//test:dummy_main"],
@@ -153,6 +152,7 @@ def envoy_cc_test(
repository = "",
external_deps = [],
deps = [],
+ higress_deps = [],
tags = [],
args = [],
copts = [],
@@ -166,6 +166,11 @@ def envoy_cc_test(
exec_properties = {}):
coverage_tags = tags + ([] if coverage else ["nocoverage"])
+ deps = deps + select({
+ "@envoy//bazel:higress": [],
+ "//conditions:default": higress_deps,
+ })
+
native.cc_test(
name = name,
srcs = srcs,
@@ -200,13 +205,21 @@ def envoy_cc_test_library(
data = [],
external_deps = [],
deps = [],
+ higress_deps = [],
repository = "",
tags = [],
include_prefix = None,
copts = [],
alwayslink = 1,
**kargs):
+
+ deps = deps + select({
+ "@envoy//bazel:higress": [],
+ "//conditions:default": higress_deps,
+ })
+
disable_pch = kargs.pop("disable_pch", True)
+
_envoy_cc_test_infrastructure_library(
name,
srcs,
diff --git a/bazel/external/boringssl_fips.BUILD b/bazel/external/boringssl_fips.BUILD
index 1af9f34b1f020..353b1b43292d3 100644
--- a/bazel/external/boringssl_fips.BUILD
+++ b/bazel/external/boringssl_fips.BUILD
@@ -30,5 +30,5 @@ genrule(
"ssl/libssl.a",
],
cmd = "$(location {}) $(location crypto/libcrypto.a) $(location ssl/libssl.a)".format("@envoy//bazel/external:boringssl_fips.genrule_cmd"),
- exec_tools = ["@envoy//bazel/external:boringssl_fips.genrule_cmd"],
+ tools = ["@envoy//bazel/external:boringssl_fips.genrule_cmd"],
)
diff --git a/bazel/external/quiche.BUILD b/bazel/external/quiche.BUILD
index 41bcc3784c90a..1d04c6659d79f 100644
--- a/bazel/external/quiche.BUILD
+++ b/bazel/external/quiche.BUILD
@@ -1,4 +1,3 @@
-load("@rules_proto//proto:defs.bzl", "proto_library")
load(
"@envoy//bazel:envoy_build_system.bzl",
"envoy_cc_library",
@@ -10,6 +9,7 @@ load(
"envoy_quiche_platform_impl_cc_library",
"envoy_quiche_platform_impl_cc_test_library",
)
+load("@rules_proto//proto:defs.bzl", "proto_library")
licenses(["notice"]) # Apache 2
@@ -3551,15 +3551,15 @@ envoy_cc_library(
srcs = select({
"@envoy//bazel:windows_x86_64": [],
"//conditions:default": [
- "quiche/quic/core/io/event_loop_socket_factory.cc",
"quiche/quic/core/io/event_loop_connecting_client_socket.cc",
+ "quiche/quic/core/io/event_loop_socket_factory.cc",
],
}),
hdrs = select({
"@envoy//bazel:windows_x86_64": [],
"//conditions:default": [
- "quiche/quic/core/io/event_loop_socket_factory.h",
"quiche/quic/core/io/event_loop_connecting_client_socket.h",
+ "quiche/quic/core/io/event_loop_socket_factory.h",
],
}),
copts = quiche_copts,
diff --git a/bazel/external/quiche_sequencer_fix.patch b/bazel/external/quiche_sequencer_fix.patch
new file mode 100644
index 0000000000000..b4203e92b6e31
--- /dev/null
+++ b/bazel/external/quiche_sequencer_fix.patch
@@ -0,0 +1,16 @@
+# Fix https://github.com/envoyproxy/envoy-setec/issues/1496#issue-2251291349
+
+diff --git a/quiche/quic/core/quic_stream_sequencer_buffer.cc b/quiche/quic/core/quic_stream_sequencer_buffer.cc
+index d364d61bc..0966af4b0 100644
+--- a/quiche/quic/core/quic_stream_sequencer_buffer.cc
++++ b/quiche/quic/core/quic_stream_sequencer_buffer.cc
+@@ -388,7 +388,8 @@ bool QuicStreamSequencerBuffer::PeekRegion(QuicStreamOffset offset,
+
+ // Determine if entire block has been received.
+ size_t end_block_idx = GetBlockIndex(FirstMissingByte());
+- if (block_idx == end_block_idx) {
++ if (block_idx == end_block_idx &&
++ block_offset < GetInBlockOffset(FirstMissingByte())) {
+ // Only read part of block before FirstMissingByte().
+ iov->iov_len = GetInBlockOffset(FirstMissingByte()) - block_offset;
+ } else {
diff --git a/bazel/external/quiche_stream_fix.patch b/bazel/external/quiche_stream_fix.patch
new file mode 100644
index 0000000000000..b5a777a3af083
--- /dev/null
+++ b/bazel/external/quiche_stream_fix.patch
@@ -0,0 +1,50 @@
+# Fix https://github.com/envoyproxy/envoy-setec/issues/1496#issuecomment-2064844217
+
+diff --git a/quiche/quic/core/http/quic_spdy_stream.cc b/quiche/quic/core/http/quic_spdy_stream.cc
+index 4a5c2ede2..d69895055 100644
+--- a/quiche/quic/core/http/quic_spdy_stream.cc
++++ b/quiche/quic/core/http/quic_spdy_stream.cc
+@@ -1865,6 +1865,18 @@ bool QuicSpdyStream::AreHeaderFieldValuesValid(
+ return true;
+ }
+
++void QuicSpdyStream::StopReading() {
++ QuicStream::StopReading();
++ if (GetQuicReloadableFlag(
++ quic_stop_reading_also_stops_header_decompression) &&
++ VersionUsesHttp3(transport_version()) && !fin_received() &&
++ spdy_session_->qpack_decoder()) {
++ // Clean up Qpack decoding states.
++ spdy_session_->qpack_decoder()->OnStreamReset(id());
++ qpack_decoded_headers_accumulator_.reset();
++ }
++}
++
+ void QuicSpdyStream::OnInvalidHeaders() { Reset(QUIC_BAD_APPLICATION_PAYLOAD); }
+
+ void QuicSpdyStream::CloseReadSide() {
+diff --git a/quiche/quic/core/http/quic_spdy_stream.h b/quiche/quic/core/http/quic_spdy_stream.h
+index 10c34b10f..5c0cb0128 100644
+--- a/quiche/quic/core/http/quic_spdy_stream.h
++++ b/quiche/quic/core/http/quic_spdy_stream.h
+@@ -117,6 +117,7 @@ class QUICHE_EXPORT QuicSpdyStream
+
+ // QuicStream implementation
+ void OnClose() override;
++ void StopReading() override;
+
+ // Override to maybe close the write side after writing.
+ void OnCanWrite() override;
+diff --git a/quiche/quic/core/quic_flags_list.h b/quiche/quic/core/quic_flags_list.h
+index d2b1864ee..044d9f8ce 100644
+--- a/quiche/quic/core/quic_flags_list.h
++++ b/quiche/quic/core/quic_flags_list.h
+@@ -117,6 +117,8 @@ QUIC_FLAG(quic_reloadable_flag_quic_bbr2_probe_two_rounds, true)
+ QUIC_FLAG(quic_reloadable_flag_quic_bbr2_simplify_inflight_hi, true)
+ // When true, the BBR4 copt sets the extra_acked window to 20 RTTs and BBR5 sets it to 40 RTTs.
+ QUIC_FLAG(quic_reloadable_flag_quic_bbr2_extra_acked_window, true)
++// If true, QUIC stream will not continue decompressing buffer headers after StopReading() called.
++QUIC_FLAG(quic_reloadable_flag_quic_stop_reading_also_stops_header_decompression, true)
+
+ #endif
+
diff --git a/bazel/external/rapidjson.BUILD b/bazel/external/rapidjson.BUILD
index 6138f5fa351fa..9ec0d38a5b226 100644
--- a/bazel/external/rapidjson.BUILD
+++ b/bazel/external/rapidjson.BUILD
@@ -7,5 +7,5 @@ cc_library(
includes = ["include"],
# rapidjson is only needed to build external dependency of the Zipkin tracer.
# For Envoy source code plese use source/common/json/json_loader.h
- visibility = ["@io_opencensus_cpp//opencensus/exporters/trace/zipkin:__pkg__"],
+ visibility = ["//visibility:public"],
)
diff --git a/bazel/foreign_cc/BUILD b/bazel/foreign_cc/BUILD
index 67caf394a1076..b87c01db5fc98 100644
--- a/bazel/foreign_cc/BUILD
+++ b/bazel/foreign_cc/BUILD
@@ -1,5 +1,5 @@
-load("//bazel:envoy_build_system.bzl", "envoy_cmake", "envoy_package")
load("@rules_foreign_cc//foreign_cc:configure.bzl", "configure_make")
+load("//bazel:envoy_build_system.bzl", "envoy_cmake", "envoy_package")
licenses(["notice"]) # Apache 2
@@ -56,7 +56,7 @@ configure_make(
name = "librdkafka_build",
configure_in_place = True,
configure_options = ["--disable-ssl --disable-gssapi --disable-lz4-ext --disable-zstd --disable-curl && cp Makefile.config src/.. && cp config.h src/.."],
- lib_source = "@edenhill_librdkafka//:all",
+ lib_source = "@confluentinc_librdkafka//:all",
out_static_libs = [
"librdkafka.a",
"librdkafka++.a",
@@ -65,7 +65,7 @@ configure_make(
targets = [
"ARFLAGS='' libs install-subdirs",
],
- alwayslink = True,
+ alwayslink = False,
)
cc_library(
@@ -465,24 +465,164 @@ envoy_cmake(
}),
)
+envoy_cmake(
+ name = "llvm_15_0_7",
+ cache_entries = {
+ # Disable both: BUILD and INCLUDE, since some of the INCLUDE
+ # targets build code instead of only generating build files.
+ "LLVM_BUILD_BENCHMARKS": "off",
+ "LLVM_BUILD_DOCS": "off",
+ "LLVM_BUILD_EXAMPLES": "off",
+ "LLVM_BUILD_TESTS": "off",
+ "LLVM_BUILD_TOOLS": "off",
+ "LLVM_ENABLE_IDE": "off",
+ "LLVM_ENABLE_LIBEDIT": "off",
+ "LLVM_ENABLE_LIBXML2": "off",
+ "LLVM_ENABLE_TERMINFO": "off",
+ "LLVM_ENABLE_ZLIB": "off",
+ "LLVM_ENABLE_ZSTD": "off",
+ "LLVM_INCLUDE_BENCHMARKS": "off",
+ "LLVM_INCLUDE_DOCS": "off",
+ "LLVM_INCLUDE_EXAMPLES": "off",
+ "LLVM_INCLUDE_TESTS": "off",
+ "LLVM_INCLUDE_TOOLS": "off",
+ "LLVM_TARGETS_TO_BUILD": "X86",
+ "LLVM_USE_PERF": "on",
+ "CMAKE_CXX_FLAGS": "-Wno-unused-command-line-argument",
+ },
+ generate_args = ["-GNinja"] + select({
+ # `lld` doesn't work on MacOS
+ "@platforms//os:linux": ["-DLLVM_USE_LINKER=lld"],
+ "//conditions:default": [],
+ }) + select({
+ "//bazel:dbg_build": ["-DCMAKE_BUILD_TYPE=Debug"],
+ "//conditions:default": ["-DCMAKE_BUILD_TYPE=MinSizeRel"],
+ }),
+ lib_source = "@org_llvm_llvm_15_0_7//:all",
+ out_data_dirs = [
+ "bin",
+ "include",
+ "lib",
+ "libexec",
+ "share",
+ ],
+ out_static_libs = [
+ # How to get the library list:
+ # build LLVM with "-DLLVM_INCLUDE_TOOLS=ON"
+ # cd bin and run "./llvm-config --libnames"
+ "libLLVMWindowsManifest.a",
+ "libLLVMXRay.a",
+ "libLLVMLibDriver.a",
+ "libLLVMDlltoolDriver.a",
+ "libLLVMCoverage.a",
+ "libLLVMLineEditor.a",
+ "libLLVMX86Disassembler.a",
+ "libLLVMX86AsmParser.a",
+ "libLLVMX86CodeGen.a",
+ "libLLVMX86Desc.a",
+ "libLLVMX86Info.a",
+ "libLLVMOrcJIT.a",
+ "libLLVMMCJIT.a",
+ "libLLVMJITLink.a",
+ "libLLVMInterpreter.a",
+ "libLLVMExecutionEngine.a",
+ "libLLVMRuntimeDyld.a",
+ "libLLVMOrcTargetProcess.a",
+ "libLLVMOrcShared.a",
+ "libLLVMDWP.a",
+ "libLLVMSymbolize.a",
+ "libLLVMDebugInfoPDB.a",
+ "libLLVMDebugInfoGSYM.a",
+ "libLLVMOption.a",
+ "libLLVMObjectYAML.a",
+ "libLLVMMCA.a",
+ "libLLVMMCDisassembler.a",
+ "libLLVMLTO.a",
+ "libLLVMPasses.a",
+ "libLLVMCFGuard.a",
+ "libLLVMCoroutines.a",
+ "libLLVMObjCARCOpts.a",
+ "libLLVMipo.a",
+ "libLLVMVectorize.a",
+ "libLLVMLinker.a",
+ "libLLVMInstrumentation.a",
+ "libLLVMFrontendOpenMP.a",
+ "libLLVMFrontendOpenACC.a",
+ "libLLVMExtensions.a",
+ "libLLVMDWARFLinker.a",
+ "libLLVMGlobalISel.a",
+ "libLLVMMIRParser.a",
+ "libLLVMAsmPrinter.a",
+ "libLLVMDebugInfoMSF.a",
+ "libLLVMDebugInfoDWARF.a",
+ "libLLVMSelectionDAG.a",
+ "libLLVMCodeGen.a",
+ "libLLVMIRReader.a",
+ "libLLVMAsmParser.a",
+ "libLLVMInterfaceStub.a",
+ "libLLVMFileCheck.a",
+ "libLLVMFuzzMutate.a",
+ "libLLVMTarget.a",
+ "libLLVMScalarOpts.a",
+ "libLLVMInstCombine.a",
+ "libLLVMAggressiveInstCombine.a",
+ "libLLVMTransformUtils.a",
+ "libLLVMBitWriter.a",
+ "libLLVMAnalysis.a",
+ "libLLVMProfileData.a",
+ "libLLVMObject.a",
+ "libLLVMTextAPI.a",
+ "libLLVMMCParser.a",
+ "libLLVMMC.a",
+ "libLLVMDebugInfoCodeView.a",
+ "libLLVMBitReader.a",
+ "libLLVMCore.a",
+ "libLLVMRemarks.a",
+ "libLLVMBitstreamReader.a",
+ "libLLVMBinaryFormat.a",
+ "libLLVMTableGen.a",
+ "libLLVMSupport.a",
+ "libLLVMDemangle.a",
+ "libLLVMPerfJITEvents.a",
+ ],
+ working_directory = "llvm",
+)
+
envoy_cmake(
name = "wamr",
cache_entries = {
- "WAMR_BUILD_AOT": "0",
- "WAMR_BUILD_FAST_INTERP": "1",
+ # aot/jit by default
+ "LLVM_DIR": "$EXT_BUILD_DEPS/copy_llvm_15_0_7/llvm/lib/cmake/llvm",
+ "WAMR_BUILD_AOT": "1",
+ "WAMR_BUILD_FAST_INTERP": "0",
"WAMR_BUILD_INTERP": "1",
- "WAMR_BUILD_JIT": "0",
+ "WAMR_BUILD_JIT": "1",
+ # disable WASI
"WAMR_BUILD_LIBC_WASI": "0",
- "WAMR_BUILD_MULTI_MODULE": "0",
+ "WAMR_BUILD_LIBC_BUILTIN": "0",
+ # MVP
+ "WAMR_BUILD_BULK_MEMORY": "1",
+ "WAMR_BUILD_REF_TYPES": "1",
+ # only for jit and aot
"WAMR_BUILD_SIMD": "0",
"WAMR_BUILD_TAIL_CALL": "1",
"WAMR_BUILD_WASM_CACHE": "0",
- "WAMR_DISABLE_HW_BOUND_CHECK": "0",
- "WAMR_DISABLE_STACK_HW_BOUND_CHECK": "1",
+ "WAMR_BUILD_MULTI_MODULE": "0",
+ # enable below to enhance development experience
+ # name section
+ "WAMR_BUILD_CUSTOM_NAME_SECTION": "1",
+ "WAMR_BUILD_LOAD_CUSTOM_SECTION": "1",
+ # output call stack if meet a trap
+ "WAMR_BUILD_DUMP_CALL_STACK": "1",
+ # linux perf. only for jit and aot
+ "WAMR_BUILD_LINUX_PERF": "1",
+ # avoid conflicts between os_thread_signal_init and the signal stack in the golang filter.
+ "WAMR_DISABLE_HW_BOUND_CHECK": "1",
},
lib_source = "@com_github_wamr//:all",
out_static_libs = ["libvmlib.a"],
tags = ["skip_on_windows"],
+ deps = [":llvm_15_0_7"],
)
envoy_cmake(
@@ -570,3 +710,24 @@ envoy_cmake(
}),
working_directory = "build/cmake",
)
+
+envoy_cmake(
+ name = "llama",
+ cache_entries = {
+ "CMAKE_INSTALL_LIBDIR": "lib",
+ "BUILD_SHARED_LIBS": "off",
+ "CMAKE_BUILD_TYPE": "Release"
+ },
+ linkopts = ["-fopenmp"],
+ lib_source = "@com_github_ggerganov_llama//:all",
+ out_static_libs = select({
+ "//conditions:default": [
+ "libllama.a",
+ "libggml.a",
+ ],
+ }),
+ tags = ["skip_on_windows"],
+ postfix_script = select({
+ "//conditions:default": "rm -rf $INSTALLDIR/include/common && mkdir $INSTALLDIR/include/common && cp -rL $EXT_BUILD_ROOT/external/com_github_ggerganov_llama/common/* $INSTALLDIR/include/common",
+ }),
+)
diff --git a/bazel/foreign_cc/nghttp2.patch b/bazel/foreign_cc/nghttp2.patch
index d1cbab6356e5b..511e2a2e4b29b 100644
--- a/bazel/foreign_cc/nghttp2.patch
+++ b/bazel/foreign_cc/nghttp2.patch
@@ -14,3 +14,171 @@ diff -u -r a/CMakeLists.txt b/CMakeLists.txt
endif()
# AC_TYPE_UINT8_T
# AC_TYPE_UINT16_T
+diff --git a/doc/Makefile.am b/doc/Makefile.am
+index 7d7f31c6..ce50d89e 100644
+--- a/doc/Makefile.am
++++ b/doc/Makefile.am
+@@ -74,6 +74,7 @@ APIDOCS= \
+ nghttp2_option_set_peer_max_concurrent_streams.rst \
+ nghttp2_option_set_server_fallback_rfc7540_priorities.rst \
+ nghttp2_option_set_user_recv_extension_type.rst \
++ nghttp2_option_set_max_continuations.rst \
+ nghttp2_option_set_max_outbound_ack.rst \
+ nghttp2_option_set_max_settings.rst \
+ nghttp2_option_set_stream_reset_rate_limit.rst \
+diff --git a/lib/includes/nghttp2/nghttp2.h b/lib/includes/nghttp2/nghttp2.h
+index 7910db23..a54efbfd 100644
+--- a/lib/includes/nghttp2/nghttp2.h
++++ b/lib/includes/nghttp2/nghttp2.h
+@@ -440,7 +440,12 @@ typedef enum {
+ * exhaustion on server side to send these frames forever and does
+ * not read network.
+ */
+- NGHTTP2_ERR_FLOODED = -904
++ NGHTTP2_ERR_FLOODED = -904,
++ /**
++ * When a local endpoint receives too many CONTINUATION frames
++ * following a HEADER frame.
++ */
++ NGHTTP2_ERR_TOO_MANY_CONTINUATIONS = -905,
+ } nghttp2_error;
+
+ /**
+@@ -2773,6 +2778,17 @@ NGHTTP2_EXTERN void
+ nghttp2_option_set_stream_reset_rate_limit(nghttp2_option *option,
+ uint64_t burst, uint64_t rate);
+
++/**
++ * @function
++ *
++ * This function sets the maximum number of CONTINUATION frames
++ * following an incoming HEADER frame. If more than those frames are
++ * received, the remote endpoint is considered to be misbehaving and
++ * session will be closed. The default value is 8.
++ */
++NGHTTP2_EXTERN void nghttp2_option_set_max_continuations(nghttp2_option *option,
++ size_t val);
++
+ /**
+ * @function
+ *
+diff --git a/lib/nghttp2_helper.c b/lib/nghttp2_helper.c
+index 93dd4754..b3563d98 100644
+--- a/lib/nghttp2_helper.c
++++ b/lib/nghttp2_helper.c
+@@ -336,6 +336,8 @@ const char *nghttp2_strerror(int error_code) {
+ "closed";
+ case NGHTTP2_ERR_TOO_MANY_SETTINGS:
+ return "SETTINGS frame contained more than the maximum allowed entries";
++ case NGHTTP2_ERR_TOO_MANY_CONTINUATIONS:
++ return "Too many CONTINUATION frames following a HEADER frame";
+ default:
+ return "Unknown error code";
+ }
+diff --git a/lib/nghttp2_option.c b/lib/nghttp2_option.c
+index 43d4e952..53144b9b 100644
+--- a/lib/nghttp2_option.c
++++ b/lib/nghttp2_option.c
+@@ -150,3 +150,8 @@ void nghttp2_option_set_stream_reset_rate_limit(nghttp2_option *option,
+ option->stream_reset_burst = burst;
+ option->stream_reset_rate = rate;
+ }
++
++void nghttp2_option_set_max_continuations(nghttp2_option *option, size_t val) {
++ option->opt_set_mask |= NGHTTP2_OPT_MAX_CONTINUATIONS;
++ option->max_continuations = val;
++}
+diff --git a/lib/nghttp2_option.h b/lib/nghttp2_option.h
+index 2259e184..c89cb97f 100644
+--- a/lib/nghttp2_option.h
++++ b/lib/nghttp2_option.h
+@@ -71,6 +71,7 @@ typedef enum {
+ NGHTTP2_OPT_SERVER_FALLBACK_RFC7540_PRIORITIES = 1 << 13,
+ NGHTTP2_OPT_NO_RFC9113_LEADING_AND_TRAILING_WS_VALIDATION = 1 << 14,
+ NGHTTP2_OPT_STREAM_RESET_RATE_LIMIT = 1 << 15,
++ NGHTTP2_OPT_MAX_CONTINUATIONS = 1 << 16,
+ } nghttp2_option_flag;
+
+ /**
+@@ -98,6 +99,10 @@ struct nghttp2_option {
+ * NGHTTP2_OPT_MAX_SETTINGS
+ */
+ size_t max_settings;
++ /**
++ * NGHTTP2_OPT_MAX_CONTINUATIONS
++ */
++ size_t max_continuations;
+ /**
+ * Bitwise OR of nghttp2_option_flag to determine that which fields
+ * are specified.
+diff --git a/lib/nghttp2_session.c b/lib/nghttp2_session.c
+index ce21caf9..18949528 100644
+--- a/lib/nghttp2_session.c
++++ b/lib/nghttp2_session.c
+@@ -496,6 +496,7 @@ static int session_new(nghttp2_session **session_ptr,
+ (*session_ptr)->max_send_header_block_length = NGHTTP2_MAX_HEADERSLEN;
+ (*session_ptr)->max_outbound_ack = NGHTTP2_DEFAULT_MAX_OBQ_FLOOD_ITEM;
+ (*session_ptr)->max_settings = NGHTTP2_DEFAULT_MAX_SETTINGS;
++ (*session_ptr)->max_continuations = NGHTTP2_DEFAULT_MAX_CONTINUATIONS;
+
+ if (option) {
+ if ((option->opt_set_mask & NGHTTP2_OPT_NO_AUTO_WINDOW_UPDATE) &&
+@@ -584,6 +585,10 @@ static int session_new(nghttp2_session **session_ptr,
+ option->stream_reset_burst,
+ option->stream_reset_rate);
+ }
++
++ if (option->opt_set_mask & NGHTTP2_OPT_MAX_CONTINUATIONS) {
++ (*session_ptr)->max_continuations = option->max_continuations;
++ }
+ }
+
+ rv = nghttp2_hd_deflate_init2(&(*session_ptr)->hd_deflater,
+@@ -6778,6 +6783,8 @@ ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in,
+ }
+ }
+ session_inbound_frame_reset(session);
++
++ session->num_continuations = 0;
+ }
+ break;
+ }
+@@ -6899,6 +6906,10 @@ ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in,
+ }
+ #endif /* DEBUGBUILD */
+
++ if (++session->num_continuations > session->max_continuations) {
++ return NGHTTP2_ERR_TOO_MANY_CONTINUATIONS;
++ }
++
+ readlen = inbound_frame_buf_read(iframe, in, last);
+ in += readlen;
+
+diff --git a/lib/nghttp2_session.h b/lib/nghttp2_session.h
+index b119329a..ef8f7b27 100644
+--- a/lib/nghttp2_session.h
++++ b/lib/nghttp2_session.h
+@@ -110,6 +110,10 @@ typedef struct {
+ #define NGHTTP2_DEFAULT_STREAM_RESET_BURST 1000
+ #define NGHTTP2_DEFAULT_STREAM_RESET_RATE 33
+
++/* The default max number of CONTINUATION frames following an incoming
++ HEADER frame. */
++#define NGHTTP2_DEFAULT_MAX_CONTINUATIONS 8
++
+ /* Internal state when receiving incoming frame */
+ typedef enum {
+ /* Receiving frame header */
+@@ -290,6 +294,12 @@ struct nghttp2_session {
+ size_t max_send_header_block_length;
+ /* The maximum number of settings accepted per SETTINGS frame. */
+ size_t max_settings;
++ /* The maximum number of CONTINUATION frames following an incoming
++ HEADER frame. */
++ size_t max_continuations;
++ /* The number of CONTINUATION frames following an incoming HEADER
++ frame. This variable is reset when END_HEADERS flag is seen. */
++ size_t num_continuations;
+ /* Next Stream ID. Made unsigned int to detect >= (1 << 31). */
+ uint32_t next_stream_id;
+ /* The last stream ID this session initiated. For client session,
diff --git a/bazel/get_workspace_status b/bazel/get_workspace_status
index ca5159e6dea90..bc43475f01aca 100755
--- a/bazel/get_workspace_status
+++ b/bazel/get_workspace_status
@@ -23,6 +23,7 @@
if [ -f SOURCE_VERSION ]
then
echo "BUILD_SCM_REVISION $(cat SOURCE_VERSION)"
+ echo "ENVOY_BUILD_SCM_REVISION $(cat SOURCE_VERSION)"
echo "STABLE_BUILD_SCM_REVISION $(cat SOURCE_VERSION)"
echo "BUILD_SCM_STATUS Distribution"
exit 0
@@ -30,11 +31,13 @@ fi
if [[ -n "$BAZEL_FAKE_SCM_REVISION" ]]; then
echo "BUILD_SCM_REVISION $BAZEL_FAKE_SCM_REVISION"
+ echo "ENVOY_BUILD_SCM_REVISION $BAZEL_FAKE_SCM_REVISION"
echo "STABLE_BUILD_SCM_REVISION $BAZEL_FAKE_SCM_REVISION"
else
# The code below presents an implementation that works for git repository
git_rev=$(git rev-parse HEAD) || exit 1
echo "BUILD_SCM_REVISION ${git_rev}"
+ echo "ENVOY_BUILD_SCM_REVISION ${git_rev}"
echo "STABLE_BUILD_SCM_REVISION ${git_rev}"
fi
diff --git a/bazel/grpc.patch b/bazel/grpc.patch
index c8872879824c6..4608049f1bf8e 100644
--- a/bazel/grpc.patch
+++ b/bazel/grpc.patch
@@ -23,4 +23,17 @@ index 1bb970e049..81265483e9 100644
+ "-layering_check",
],
)
-
+
+diff --git a/src/core/lib/channel/channel_args.h b/src/core/lib/channel/channel_args.h
+index 38bb070213..b53086e680 100644
+--- a/src/core/lib/channel/channel_args.h
++++ b/src/core/lib/channel/channel_args.h
+@@ -284,7 +284,7 @@ class ChannelArgs {
+
+ class Value {
+ public:
+- explicit Value(int n) : rep_(reinterpret_cast(n), &int_vtable_) {}
++ explicit Value(int n) : rep_(reinterpret_cast(static_cast(n)), &int_vtable_) {}
+ explicit Value(std::string s)
+ : rep_(RefCountedString::Make(s).release(), &string_vtable_) {}
+ explicit Value(Pointer p) : rep_(std::move(p)) {}
diff --git a/bazel/protobuf_hash_cache.patch b/bazel/protobuf_hash_cache.patch
new file mode 100644
index 0000000000000..13cef2cbfa644
--- /dev/null
+++ b/bazel/protobuf_hash_cache.patch
@@ -0,0 +1,462 @@
+diff --git a/src/google/protobuf/BUILD.bazel b/src/google/protobuf/BUILD.bazel
+index 77ed2309f..825189ca5 100644
+--- a/src/google/protobuf/BUILD.bazel
++++ b/src/google/protobuf/BUILD.bazel
+@@ -504,6 +504,7 @@ cc_library(
+ "@com_google_absl//absl/synchronization",
+ "@com_google_absl//absl/time",
+ "@utf8_range//:utf8_validity",
++ "@com_github_cyan4973_xxhash//:xxhash",
+ ],
+ )
+
+diff --git a/src/google/protobuf/message.cc b/src/google/protobuf/message.cc
+index fc474dd7c..4db68a09d 100644
+--- a/src/google/protobuf/message.cc
++++ b/src/google/protobuf/message.cc
+@@ -34,6 +34,7 @@
+
+ #include "google/protobuf/message.h"
+
++#include
+ #include
+ #include
+
+@@ -60,7 +61,8 @@
+ #include "google/protobuf/unknown_field_set.h"
+ #include "google/protobuf/wire_format.h"
+ #include "google/protobuf/wire_format_lite.h"
+-
++#include "google/protobuf/dynamic_message.h"
++#include "xxhash.h"
+
+ // Must be included last.
+ #include "google/protobuf/port_def.inc"
+@@ -74,6 +76,93 @@ namespace internal {
+ // defined in generated_message_reflection.cc
+ void RegisterFileLevelMetadata(const DescriptorTable* descriptor_table);
+
++// Helper function to extract type name from Any type_url
++std::string ExtractTypeNameFromUrl(const std::string& type_url) {
++ size_t last_slash = type_url.find_last_of('/');
++ if (last_slash != std::string::npos && last_slash + 1 < type_url.length()) {
++ return type_url.substr(last_slash + 1);
++ }
++ return type_url; // Fallback to full URL if parsing fails
++}
++
++// Helper function to check if map value is message type
++bool IsMapValueMessageTyped(const FieldDescriptor* map_field) {
++ return map_field->message_type()->field(1)->cpp_type() ==
++ FieldDescriptor::CPPTYPE_MESSAGE;
++}
++
++// Helper function to hash a single field value
++uint64_t HashFieldValue(const Reflection* reflection, const Message& message,
++ const FieldDescriptor* field, int index = -1) {
++ switch (field->cpp_type()) {
++ case FieldDescriptor::CPPTYPE_MESSAGE:
++ if (index >= 0) {
++ const Message& sub_message = reflection->GetRepeatedMessage(message, field, index);
++ return sub_message.GetCachedHashValue();
++ } else if (reflection->HasField(message, field)) {
++ const Message& sub_message = reflection->GetMessage(message, field);
++ return sub_message.GetCachedHashValue();
++ }
++ return 0;
++ case FieldDescriptor::CPPTYPE_INT32:{
++ int32_t val = index >= 0 ? reflection->GetRepeatedInt32(message, field, index)
++ : reflection->GetInt32(message, field);
++ return XXH64(&val, sizeof(val), 0);
++ }
++ case FieldDescriptor::CPPTYPE_INT64:{
++ int64_t val = index >= 0 ? reflection->GetRepeatedInt64(message, field, index)
++ : reflection->GetInt64(message, field);
++ return XXH64(&val, sizeof(val), 0);
++ }
++ case FieldDescriptor::CPPTYPE_UINT32:{
++ uint32_t val = index >= 0 ? reflection->GetRepeatedUInt32(message, field, index)
++ : reflection->GetUInt32(message, field);
++ return XXH64(&val, sizeof(val), 0);
++ }
++ case FieldDescriptor::CPPTYPE_UINT64:{
++ uint64_t val = index >= 0 ? reflection->GetRepeatedUInt64(message, field, index)
++ : reflection->GetUInt64(message, field);
++ return XXH64(&val, sizeof(val), 0);
++ }
++ case FieldDescriptor::CPPTYPE_DOUBLE:{
++ double val = index >= 0 ? reflection->GetRepeatedDouble(message, field, index)
++ : reflection->GetDouble(message, field);
++ return XXH64(&val, sizeof(val), 0);
++ }
++ case FieldDescriptor::CPPTYPE_FLOAT:{
++ float val = index >= 0 ? reflection->GetRepeatedFloat(message, field, index)
++ : reflection->GetFloat(message, field);
++ return XXH64(&val, sizeof(val), 0);
++ }
++ case FieldDescriptor::CPPTYPE_BOOL:{
++ bool val = index >= 0 ? reflection->GetRepeatedBool(message, field, index)
++ : reflection->GetBool(message, field);
++ return XXH64(&val, sizeof(val), 0);
++ }
++ case FieldDescriptor::CPPTYPE_ENUM:{
++ int32_t val = index >= 0 ? reflection->GetRepeatedEnumValue(message, field, index)
++ : reflection->GetEnumValue(message, field);
++ return XXH64(&val, sizeof(val), 0);
++ }
++ case FieldDescriptor::CPPTYPE_STRING:{
++ std::string val = index >= 0 ? reflection->GetRepeatedString(message, field, index)
++ : reflection->GetString(message, field);
++ return XXH64(val.data(), val.size(), 0);
++ }
++ default:{
++ if(index >= 0){
++ fprintf(stderr, "Message::HashFieldValue: Unexpected repeated field type: %d\n", field->cpp_type());
++ const Message& sub_message = reflection->GetRepeatedMessage(message, field, index);
++ return sub_message.GetCachedHashValue();
++ } else if (reflection->HasField(message, field)){
++ fprintf(stderr, "Message::HashFieldValue: Unexpected field type: %d\n", field->cpp_type());
++ const Message& sub_message = reflection->GetMessage(message, field);
++ return sub_message.GetCachedHashValue();
++ }
++ return 0;
++ }
++ }
++}
+ } // namespace internal
+
+ using internal::DownCast;
+@@ -215,6 +304,296 @@ uint64_t Message::GetInvariantPerBuild(uint64_t salt) {
+ return salt;
+ }
+
++// Hash computation methods implementation
++uint64_t Message::ComputeHashValue() const {
++
++ const Reflection* reflection = GetReflection();
++ const Descriptor* descriptor = GetDescriptor();
++
++ // Use a stable hash seed that's consistent across runs
++ // This ensures deterministic hashing regardless of memory layout
++ uint64_t hash = 0x9e3779b97f4a7c15; // xxhash seed
++
++ // Hash the descriptor type
++ hash = XXH64(descriptor->full_name().data(), descriptor->full_name().size(), hash);
++
++ // Special handling for google.protobuf.Any type
++ if (descriptor->full_name() == "google.protobuf.Any") {
++ // For Any types, we need to hash the unpacked content to ensure consistency
++ // This mimics TextFormat's approach of expanding Any messages
++ const Reflection* reflection = GetReflection();
++ const FieldDescriptor* type_url_field = descriptor->FindFieldByNumber(1);
++ const FieldDescriptor* value_field = descriptor->FindFieldByNumber(2);
++
++ if (type_url_field && value_field &&
++ reflection->HasField(*this, type_url_field) &&
++ reflection->HasField(*this, value_field)) {
++
++ std::string type_url = reflection->GetString(*this, type_url_field);
++ std::string serialized_value = reflection->GetString(*this, value_field);
++
++ // Hash the type URL
++ hash = XXH64(type_url.data(), type_url.size(), hash);
++ /*
++ // Try to parse and hash the unpacked message for consistency
++ // This ensures that Any messages with same content produce same hash
++ // regardless of serialization order in the value field
++ try {
++ // Create a temporary message from the serialized value
++ DynamicMessageFactory factory;
++ const Descriptor* value_descriptor =
++ factory.GetPrototype(descriptor)->GetDescriptor()->file()->pool()
++ ->FindMessageTypeByName(internal::ExtractTypeNameFromUrl(type_url));
++
++ if (value_descriptor) {
++ std::unique_ptr unpacked_message(
++ factory.GetPrototype(value_descriptor)->New());
++ if (unpacked_message->ParseFromString(serialized_value)) {
++ // Hash the unpacked message content
++ uint64_t unpacked_message_hash = unpacked_message->GetCachedHashValue();
++ hash = XXH64(&unpacked_message_hash, sizeof(unpacked_message_hash), hash);
++ } else {
++ fprintf(stderr, "Message::ComputeHashValue: Parsing failed for Any message: %s\n", serialized_value.c_str());
++ // If parsing fails, hash the raw serialized value
++ hash = XXH64(serialized_value.data(), serialized_value.size(), hash);
++ }
++ } else {
++ fprintf(stderr, "Message::ComputeHashValue: Type not found: %s\n", type_url.c_str());
++ // If type not found, hash the raw serialized value
++ hash = XXH64(serialized_value.data(), serialized_value.size(), hash);
++ }
++ } catch (e) {
++ fprintf(stderr, "Message::ComputeHashValue: Error parsing Any message: %s\n", e.what());
++ // If any error occurs, fall back to hashing the raw value
++ hash = XXH64(serialized_value.data(), serialized_value.size(), hash);
++ }
++ */
++
++ // Skip the any parsing and just hash the serialized value
++ hash = XXH64(serialized_value.data(), serialized_value.size(), hash);
++
++ // Skip normal field processing for Any types since we've handled them specially
++ return hash;
++ }
++ }
++
++ // Iterate through all fields and hash their values recursively
++ std::vector fields;
++ reflection->ListFields(*this, &fields);
++
++ // Sort fields by field number to ensure consistent order
++ // Use stable_sort for deterministic ordering across runs
++ std::stable_sort(fields.begin(), fields.end(),
++ [](const FieldDescriptor* a, const FieldDescriptor* b) {
++ if (a->number() != b->number()) {
++ return a->number() < b->number(); // Primary: field number
++ }
++ // Secondary: field name for stability when field numbers are equal
++ return a->name() < b->name();
++ });
++
++ for (const FieldDescriptor* field : fields) {
++ // Hash field number and type
++ uint32_t field_number = field->number();
++ uint32_t field_type = field->type();
++ hash = XXH64(&field_number, sizeof(field_number), hash);
++ hash = XXH64(&field_type, sizeof(field_type), hash);
++
++ if (field->is_repeated()) {
++ // Handle repeated fields using RepeatedFieldAccessor for consistent access
++ const internal::RepeatedFieldAccessor* accessor = reflection->RepeatedFieldAccessor(field);
++ void* repeated_field_data = reflection->RepeatedFieldData(const_cast(this), field,
++ field->cpp_type(),
++ field->message_type());
++ int size = accessor->Size(repeated_field_data);
++ hash = XXH64(&size, sizeof(size), hash);
++
++ if (field->is_map()) {
++ // For map fields, use MapField to access the underlying map data
++ // This provides better performance and guarantees consistent ordering
++
++ // Get key and value field descriptors
++ const Descriptor* map_entry_desc = field->message_type();
++ const FieldDescriptor* key_field = map_entry_desc->field(0); // key field
++ const FieldDescriptor* value_field = map_entry_desc->field(1); // value field
++
++ // Check if map value is message type
++ bool is_value_message = internal::IsMapValueMessageTyped(field);
++
++ std::vector> map_entries;
++
++ // Use MapIterator to iterate through the map
++ for (MapIterator iter = reflection->MapBegin(const_cast(this), field);
++ iter != reflection->MapEnd(const_cast(this), field);
++ ++iter) {
++
++ const MapKey& key = iter.GetKey();
++ const MapValueRef& value = iter.GetValueRef();
++
++ uint64_t key_hash = 0;
++ uint64_t value_hash = 0;
++
++ // Hash key based on its type
++ switch (key_field->cpp_type()) {
++ case FieldDescriptor::CPPTYPE_STRING: {
++ std::string key_str = key.GetStringValue();
++ key_hash = XXH64(key_str.data(), key_str.size(), 0);
++ break;
++ }
++ case FieldDescriptor::CPPTYPE_INT64: {
++ int64_t key_int = key.GetInt64Value();
++ key_hash = XXH64(&key_int, sizeof(key_int), 0);
++ break;
++ }
++ case FieldDescriptor::CPPTYPE_INT32: {
++ int32_t key_int = key.GetInt32Value();
++ key_hash = XXH64(&key_int, sizeof(key_int), 0);
++ break;
++ }
++ case FieldDescriptor::CPPTYPE_UINT64: {
++ uint64_t key_int = key.GetUInt64Value();
++ key_hash = XXH64(&key_int, sizeof(key_int), 0);
++ break;
++ }
++ case FieldDescriptor::CPPTYPE_UINT32: {
++ uint32_t key_int = key.GetUInt32Value();
++ key_hash = XXH64(&key_int, sizeof(key_int), 0);
++ break;
++ }
++ case FieldDescriptor::CPPTYPE_BOOL: {
++ bool key_bool = key.GetBoolValue();
++ key_hash = XXH64(&key_bool, sizeof(key_bool), 0);
++ break;
++ }
++ default:
++ // Should not reach here for valid map key types
++ fprintf(stderr, "Message::ComputeHashValue: Unexpected map key type: %d\n", key_field->cpp_type());
++ break;
++ }
++
++ // Hash value based on its type
++ if (is_value_message) {
++ // For message values, use GetCachedHashValue
++ const Message& value_msg = value.GetMessageValue();
++ value_hash = value_msg.GetCachedHashValue();
++ } else {
++ // For primitive values, hash directly
++ switch (value_field->cpp_type()) {
++ case FieldDescriptor::CPPTYPE_STRING: {
++ std::string value_str = value.GetStringValue();
++ value_hash = XXH64(value_str.data(), value_str.size(), 0);
++ break;
++ }
++ case FieldDescriptor::CPPTYPE_INT64: {
++ int64_t value_int = value.GetInt64Value();
++ value_hash = XXH64(&value_int, sizeof(value_int), 0);
++ break;
++ }
++ case FieldDescriptor::CPPTYPE_INT32: {
++ int32_t value_int = value.GetInt32Value();
++ value_hash = XXH64(&value_int, sizeof(value_int), 0);
++ break;
++ }
++ case FieldDescriptor::CPPTYPE_UINT64: {
++ uint64_t value_int = value.GetUInt64Value();
++ value_hash = XXH64(&value_int, sizeof(value_int), 0);
++ break;
++ }
++ case FieldDescriptor::CPPTYPE_UINT32: {
++ uint32_t value_int = value.GetUInt32Value();
++ value_hash = XXH64(&value_int, sizeof(value_int), 0);
++ break;
++ }
++ case FieldDescriptor::CPPTYPE_DOUBLE: {
++ double value_double = value.GetDoubleValue();
++ value_hash = XXH64(&value_double, sizeof(value_double), 0);
++ break;
++ }
++ case FieldDescriptor::CPPTYPE_FLOAT: {
++ float value_float = value.GetFloatValue();
++ value_hash = XXH64(&value_float, sizeof(value_float), 0);
++ break;
++ }
++ case FieldDescriptor::CPPTYPE_BOOL: {
++ bool value_bool = value.GetBoolValue();
++ value_hash = XXH64(&value_bool, sizeof(value_bool), 0);
++ break;
++ }
++ case FieldDescriptor::CPPTYPE_ENUM: {
++ int32_t value_enum = value.GetEnumValue();
++ value_hash = XXH64(&value_enum, sizeof(value_enum), 0);
++ break;
++ }
++ default:
++ // Should not reach here for valid map value types
++ fprintf(stderr, "Message::ComputeHashValue: Unexpected map value type: %d\n", value_field->cpp_type());
++ break;
++ }
++ }
++
++ map_entries.emplace_back(key_hash, value_hash);
++ }
++
++ // Sort map entries by key hash for consistent ordering
++ // MapField provides consistent iteration order, but we still sort for extra safety
++ std::stable_sort(map_entries.begin(), map_entries.end(),
++ [](const auto& a, const auto& b) {
++ if (a.first != b.first) {
++ return a.first < b.first; // Primary: key hash
++ }
++ return a.second < b.second; // Secondary: value hash
++ });
++
++ // Hash sorted map entries
++ for (const auto& entry : map_entries) {
++ hash = XXH64(&entry.first, sizeof(entry.first), hash);
++ hash = XXH64(&entry.second, sizeof(entry.second), hash);
++ }
++ } else {
++ // Handle regular repeated fields (non-map) using RepeatedFieldAccessor
++ for (int i = 0; i < size; ++i) {
++ // Use a simplified approach: directly use HashFieldValue with index
++ uint64_t hash_value = internal::HashFieldValue(reflection, *this, field, i);
++ hash = XXH64(&hash_value, sizeof(hash_value), hash);
++ }
++ }
++ } else {
++ // Handle singular fields
++ uint64_t field_value = internal::HashFieldValue(reflection, *this, field);
++ hash = XXH64(&field_value, sizeof(field_value), hash);
++ }
++ }
++
++ // Hash unknown fields if present
++ if (_internal_metadata_.have_unknown_fields()) {
++ const UnknownFieldSet& unknown_fields = reflection->GetUnknownFields(*this);
++ // Use field count and space used for unknown fields hash
++ uint32_t field_count = unknown_fields.field_count();
++ uint64_t space_used = unknown_fields.SpaceUsedLong();
++ hash = XXH64(&field_count, sizeof(field_count), hash);
++ hash = XXH64(&space_used, sizeof(space_used), hash);
++ }
++
++ return hash;
++}
++
++uint64_t Message::GetCachedHashValue() const {
++ if (!hash_cached_) {
++ cached_hash_value_ = ComputeHashValue();
++ hash_cached_ = true;
++ }
++ return cached_hash_value_;
++}
++
++bool Message::HasCachedHashValue() const {
++ return hash_cached_;
++}
++
++void Message::SetCachedHashValue(uint64_t hash_value) const {
++ cached_hash_value_ = hash_value;
++ hash_cached_ = true;
++}
++
+ namespace internal {
+ void* CreateSplitMessageGeneric(Arena* arena, const void* default_split,
+ size_t size, const void* message,
+diff --git a/src/google/protobuf/message.h b/src/google/protobuf/message.h
+index 6c5e24f9d..b9078785c 100644
+--- a/src/google/protobuf/message.h
++++ b/src/google/protobuf/message.h
+@@ -362,6 +362,22 @@ class PROTOBUF_EXPORT Message : public MessageLite {
+ uint8_t* _InternalSerialize(uint8_t* target,
+ io::EpsCopyOutputStream* stream) const override;
+
++ // Hash computation methods ----------------------------------------
++ // Optimized hash computation with caching support
++
++ // Compute hash value for this message using recursive hashing
++ // This avoids serialization and provides better performance
++ uint64_t ComputeHashValue() const;
++
++ // Get cached hash value if available, otherwise compute and cache it
++ uint64_t GetCachedHashValue() const;
++
++ // Set cached hash value
++ void SetCachedHashValue(uint64_t hash_value) const;
++
++ // Check if hash value is cached
++ bool HasCachedHashValue() const;
++
+ private:
+ // This is called only by the default implementation of ByteSize(), to
+ // update the cached size. If you override ByteSize(), you do not need
+@@ -418,6 +434,9 @@ class PROTOBUF_EXPORT Message : public MessageLite {
+ size_t MaybeComputeUnknownFieldsSize(size_t total_size,
+ internal::CachedSize* cached_size) const;
+
++ // Hash caching support
++ mutable uint64_t cached_hash_value_ = 0;
++ mutable bool hash_cached_ = false;
+
+ protected:
+ static uint64_t GetInvariantPerBuild(uint64_t salt);
diff --git a/bazel/python_dependencies.bzl b/bazel/python_dependencies.bzl
index 0033a53645475..ea50bf30ba386 100644
--- a/bazel/python_dependencies.bzl
+++ b/bazel/python_dependencies.bzl
@@ -1,7 +1,10 @@
load("@rules_python//python:pip.bzl", "pip_parse")
load("@python3_11//:defs.bzl", "interpreter")
+load("@envoy_toolshed//:packages.bzl", "load_packages")
def envoy_python_dependencies():
+ # TODO(phlax): rename base_pip3 -> pip3 and remove this
+ load_packages()
pip_parse(
name = "base_pip3",
python_interpreter_target = interpreter,
diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl
index a33c90baa229d..e1bb52062301d 100644
--- a/bazel/repositories.bzl
+++ b/bazel/repositories.bzl
@@ -1,4 +1,6 @@
load(":dev_binding.bzl", "envoy_dev_binding")
+load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
+load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository", "new_git_repository")
load("@envoy_api//bazel:envoy_http_archive.bzl", "envoy_http_archive")
load("@envoy_api//bazel:external_deps.bzl", "load_repository_locations")
load(":repository_locations.bzl", "PROTOC_VERSIONS", "REPOSITORY_LOCATIONS_SPEC")
@@ -6,6 +8,8 @@ load("@com_google_googleapis//:repository_rules.bzl", "switched_rules_by_languag
PPC_SKIP_TARGETS = ["envoy.filters.http.lua"]
+DARWIN_SKIP_TARGETS = []
+
WINDOWS_SKIP_TARGETS = [
"envoy.extensions.http.cache.file_system_http_cache",
"envoy.filters.http.file_system_buffer",
@@ -108,13 +112,14 @@ envoy_entry_point(
name = "get_project_json",
pkg = "envoy.base.utils",
script = "envoy.project_data",
+ init_data = [":__init__.py"],
)
genrule(
name = "project",
outs = ["project.json"],
cmd = """
- $(location :get_project_json) . > $@
+ $(location :get_project_json) $$(dirname $(location @envoy//:VERSION.txt)) > $@
""",
tools = [
":get_project_json",
@@ -132,6 +137,7 @@ envoy_entry_point(
],
pkg = "envoy.base.utils",
script = "envoy.project",
+ init_data = [":__init__.py"],
)
envoy_entry_point(
@@ -142,6 +148,7 @@ envoy_entry_point(
],
pkg = "envoy.base.utils",
script = "envoy.project",
+ init_data = [":__init__.py"],
)
envoy_entry_point(
@@ -152,6 +159,7 @@ envoy_entry_point(
],
pkg = "envoy.base.utils",
script = "envoy.project",
+ init_data = [":__init__.py"],
)
envoy_entry_point(
@@ -162,6 +170,7 @@ envoy_entry_point(
],
pkg = "envoy.base.utils",
script = "envoy.project",
+ init_data = [":__init__.py"],
)
envoy_entry_point(
@@ -172,6 +181,7 @@ envoy_entry_point(
],
pkg = "envoy.base.utils",
script = "envoy.project",
+ init_data = [":__init__.py"],
)
''')
@@ -254,8 +264,9 @@ def envoy_dependencies(skip_targets = []):
# semi-standard in the Bazel community, intended to avoid both duplicate
# dependencies and name conflicts.
_com_github_axboe_liburing()
+ _com_github_bazel_buildtools()
_com_github_c_ares_c_ares()
- _com_github_circonus_labs_libcircllhist()
+ _com_github_openhistogram_libcircllhist()
_com_github_cyan4973_xxhash()
_com_github_datadog_dd_trace_cpp()
_com_github_mirror_tclap()
@@ -267,6 +278,7 @@ def envoy_dependencies(skip_targets = []):
_com_github_google_libprotobuf_mutator()
_com_github_google_libsxg()
_com_github_google_tcmalloc()
+ _com_github_ggerganov_llama()
_com_github_gperftools_gperftools()
_com_github_grpc_grpc()
_com_github_unicode_org_icu()
@@ -336,6 +348,7 @@ def envoy_dependencies(skip_targets = []):
_kafka_deps()
_org_llvm_llvm()
+ _org_llvm_llvm_15_0_7()
_com_github_wamr()
_com_github_wavm_wavm()
_com_github_wasmtime()
@@ -370,14 +383,14 @@ def _boringssl_fips():
build_file = "@envoy//bazel/external:boringssl_fips.BUILD",
)
-def _com_github_circonus_labs_libcircllhist():
+def _com_github_openhistogram_libcircllhist():
external_http_archive(
- name = "com_github_circonus_labs_libcircllhist",
+ name = "com_github_openhistogram_libcircllhist",
build_file = "@envoy//bazel/external:libcircllhist.BUILD",
)
native.bind(
name = "libcircllhist",
- actual = "@com_github_circonus_labs_libcircllhist//:libcircllhist",
+ actual = "@com_github_openhistogram_libcircllhist//:libcircllhist",
)
def _com_github_axboe_liburing():
@@ -390,6 +403,13 @@ def _com_github_axboe_liburing():
actual = "@envoy//bazel/foreign_cc:liburing",
)
+def _com_github_bazel_buildtools():
+ # TODO(phlax): Add binary download
+ # cf: https://github.com/bazelbuild/buildtools/issues/367
+ external_http_archive(
+ name = "com_github_bazelbuild_buildtools",
+ )
+
def _com_github_c_ares_c_ares():
external_http_archive(
name = "com_github_c_ares_c_ares",
@@ -693,6 +713,10 @@ def _com_github_tencent_rapidjson():
name = "com_github_tencent_rapidjson",
build_file = "@envoy//bazel/external:rapidjson.BUILD",
)
+ native.bind(
+ name = "rapidjson",
+ actual = "@com_github_tencent_rapidjson//:rapidjson",
+ )
def _com_github_nlohmann_json():
external_http_archive(
@@ -720,6 +744,10 @@ def _com_github_alibaba_hessian2_codec():
name = "hessian2_codec_codec_impl",
actual = "@com_github_alibaba_hessian2_codec//hessian2:codec_impl_lib",
)
+ native.bind(
+ name = "hessian2_codec_object_impl",
+ actual = "@com_github_alibaba_hessian2_codec//hessian2:object_lib",
+ )
def _com_github_ncopa_suexec():
external_http_archive(
@@ -874,7 +902,8 @@ def _com_google_protobuf():
external_http_archive(
"com_google_protobuf",
- patches = ["@envoy//bazel:protobuf.patch"],
+ patches = ["@envoy//bazel:protobuf.patch",
+ "@envoy//bazel:protobuf_hash_cache.patch"],
patch_args = ["-p1"],
)
@@ -989,6 +1018,11 @@ def _com_github_google_quiche():
external_http_archive(
name = "com_github_google_quiche",
patch_cmds = ["find quiche/ -type f -name \"*.bazel\" -delete"],
+ patches = [
+ "@envoy//bazel/external:quiche_sequencer_fix.patch",
+ "@envoy//bazel/external:quiche_stream_fix.patch",
+ ],
+ patch_args = ["-p1"],
build_file = "@envoy//bazel/external:quiche.BUILD",
)
native.bind(
@@ -1207,6 +1241,17 @@ def _com_github_google_tcmalloc():
actual = "@com_github_google_tcmalloc//tcmalloc:malloc_extension",
)
+def _com_github_ggerganov_llama():
+ external_http_archive(
+ name = "com_github_ggerganov_llama",
+ build_file_content = BUILD_ALL_CONTENT,
+ )
+
+ native.bind(
+ name = "llama",
+ actual = "@envoy//bazel/foreign_cc:llama",
+ )
+
def _com_github_gperftools_gperftools():
external_http_archive(
name = "com_github_gperftools_gperftools",
@@ -1229,6 +1274,16 @@ def _org_llvm_llvm():
actual = "@envoy//bazel/foreign_cc:llvm",
)
+def _org_llvm_llvm_15_0_7():
+ external_http_archive(
+ name = "org_llvm_llvm_15_0_7",
+ build_file_content = BUILD_ALL_CONTENT,
+ )
+ native.bind(
+ name = "llvm-15_0_7",
+ actual = "@envoy//bazel/foreign_cc:llvm_15_0_7",
+ )
+
def _com_github_wamr():
external_http_archive(
name = "com_github_wamr",
@@ -1320,7 +1375,7 @@ filegroup(
# This archive provides Kafka C/CPP client used by mesh filter to communicate with upstream
# Kafka clusters.
external_http_archive(
- name = "edenhill_librdkafka",
+ name = "confluentinc_librdkafka",
build_file_content = BUILD_ALL_CONTENT,
# (adam.kotwasinski) librdkafka bundles in cJSON, which is also bundled in by libvppinfra.
# For now, let's just drop this dependency from Kafka, as it's used only for monitoring.
diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl
index e4454e3d454bd..bfcbeb1eef4ae 100644
--- a/bazel/repository_locations.bzl
+++ b/bazel/repository_locations.bzl
@@ -68,6 +68,17 @@ REPOSITORY_LOCATIONS_SPEC = dict(
license = "Apache-2.0",
license_url = "https://github.com/bazelbuild/rules_apple/blob/{version}/LICENSE",
),
+ com_github_bazelbuild_buildtools = dict(
+ project_name = "Bazel build tools",
+ project_desc = "Developer tools for working with Google's bazel buildtool.",
+ project_url = "https://github.com/bazelbuild/buildtools",
+ version = "6.3.3",
+ sha256 = "42968f9134ba2c75c03bb271bd7bb062afb7da449f9b913c96e5be4ce890030a",
+ release_date = "2023-08-25",
+ strip_prefix = "buildtools-{version}",
+ urls = ["https://github.com/bazelbuild/buildtools/archive/v{version}.tar.gz"],
+ use_category = ["test_only"],
+ ),
rules_fuzzing = dict(
project_name = "Fuzzing Rules for Bazel",
project_desc = "Bazel rules for fuzz tests",
@@ -91,11 +102,11 @@ REPOSITORY_LOCATIONS_SPEC = dict(
project_name = "envoy-build-tools",
project_desc = "Common build tools shared by the Envoy/UDPA ecosystem",
project_url = "https://github.com/envoyproxy/envoy-build-tools",
- version = "49a27300e7b480955d3a6000eea159ff52998b52",
- sha256 = "67fbba8f4329e16f693f9fabaa6e430eddb3f27b80186df884d5b801208be8d9",
+ version = "f727ec142156c8076384a35c0e2d51da3c1d7813",
+ sha256 = "72510592f34f3fd6269c5fdd2286465a05ce6ca438ac1faebfdb88ed309fe9da",
strip_prefix = "envoy-build-tools-{version}",
urls = ["https://github.com/envoyproxy/envoy-build-tools/archive/{version}.tar.gz"],
- release_date = "2023-05-16",
+ release_date = "2023-10-16",
use_category = ["build"],
license = "Apache-2.0",
license_url = "https://github.com/envoyproxy/envoy-build-tools/blob/{version}/LICENSE",
@@ -217,19 +228,19 @@ REPOSITORY_LOCATIONS_SPEC = dict(
license = "c-ares",
license_url = "https://github.com/c-ares/c-ares/blob/cares-{underscore_version}/LICENSE.md",
),
- com_github_circonus_labs_libcircllhist = dict(
+ com_github_openhistogram_libcircllhist = dict(
project_name = "libcircllhist",
- project_desc = "An implementation of Circonus log-linear histograms",
- project_url = "https://github.com/circonus-labs/libcircllhist",
+ project_desc = "An implementation of OpenHistogram log-linear histograms",
+ project_url = "https://github.com/openhistogram/libcircllhist",
version = "39f9db724a81ba78f5d037f1cae79c5a07107c8e",
sha256 = "fd2492f6cc1f8734f8f57be8c2e7f2907e94ee2a4c02445ce59c4241fece144b",
strip_prefix = "libcircllhist-{version}",
- urls = ["https://github.com/circonus-labs/libcircllhist/archive/{version}.tar.gz"],
+ urls = ["https://github.com/openhistogram/libcircllhist/archive/{version}.tar.gz"],
use_category = ["controlplane", "observability_core", "dataplane_core"],
release_date = "2019-05-21",
cpe = "N/A",
license = "Apache-2.0",
- license_url = "https://github.com/circonus-labs/libcircllhist/blob/{version}/LICENSE",
+ license_url = "https://github.com/openhistogram/libcircllhist/blob/{version}/LICENSE",
),
com_github_cyan4973_xxhash = dict(
project_name = "xxHash",
@@ -347,6 +358,18 @@ REPOSITORY_LOCATIONS_SPEC = dict(
license = "Apache-2.0",
license_url = "https://github.com/google/tcmalloc/blob/{version}/LICENSE",
),
+ com_github_ggerganov_llama = dict(
+ project_name = "llama.cpp",
+ project_desc = "LLM inference in C/C++",
+ project_url = "https://github.com/ggerganov/llama.cpp",
+ version = "947538acb8617756a092042ff7e58db18dde05ec",
+ sha256 = "566ec06009584be8303d5d4b0070ccb0b531695fef3008019e1db97bb7c427c4",
+ strip_prefix = "llama.cpp-{version}",
+ urls = ["https://github.com/ggerganov/llama.cpp/archive/{version}.zip"],
+ use_category = ["dataplane_core"],
+ release_date = "2024-09-06",
+ cpe = "N/A",
+ ),
com_github_gperftools_gperftools = dict(
project_name = "gperftools",
project_desc = "tcmalloc and profiling libraries",
@@ -365,12 +388,12 @@ REPOSITORY_LOCATIONS_SPEC = dict(
project_name = "gRPC",
project_desc = "gRPC C core library",
project_url = "https://grpc.io",
- version = "1.56.2",
- sha256 = "931f07db9d48cff6a6007c1033ba6d691fe655bea2765444bc1ad974dfc840aa",
+ version = "1.59.4",
+ sha256 = "6edc67c2ad200c5b618c421f6e8c1b734a4aa3e741975e683491da03390ebf63",
strip_prefix = "grpc-{version}",
urls = ["https://github.com/grpc/grpc/archive/v{version}.tar.gz"],
use_category = ["dataplane_core", "controlplane"],
- release_date = "2023-07-14",
+ release_date = "2024-02-05",
cpe = "cpe:2.3:a:grpc:grpc:*",
license = "Apache-2.0",
license_url = "https://github.com/grpc/grpc/blob/v{version}/LICENSE",
@@ -398,17 +421,17 @@ REPOSITORY_LOCATIONS_SPEC = dict(
com_github_intel_ipp_crypto_crypto_mb = dict(
project_name = "libipp-crypto",
project_desc = "Intel® Integrated Performance Primitives Cryptography",
- project_url = "https://github.com/intel/ipp-crypto",
+ project_url = "https://github.com/intel/cryptography-primitives",
version = "2021.6",
- sha256 = "632cc5ba54413eeab575682619c05d247e9b7f2fc58ea3e5f4a02bdcab3e6b78",
- strip_prefix = "ipp-crypto-ippcp_{version}",
- urls = ["https://github.com/intel/ipp-crypto/archive/ippcp_{version}.tar.gz"],
+ sha256 = "a52bf15208d493adb846994f2ce928bd02c74fd8ff3a2def2fca7b072d67e6bf",
+ strip_prefix = "cryptography-primitives-ippcp_{version}",
+ urls = ["https://github.com/intel/cryptography-primitives/archive/ippcp_{version}.tar.gz"],
release_date = "2022-08-09",
use_category = ["dataplane_ext"],
extensions = ["envoy.tls.key_providers.cryptomb"],
cpe = "cpe:2.3:a:intel:cryptography_for_intel_integrated_performance_primitives:*",
license = "Apache-2.0",
- license_url = "https://github.com/intel/ipp-crypto/blob/ippcp_{version}/LICENSE",
+ license_url = "https://github.com/intel/cryptography-primitives/blob/ippcp_{version}/LICENSE",
),
com_github_intel_qatlib = dict(
project_name = "qatlib",
@@ -445,12 +468,12 @@ REPOSITORY_LOCATIONS_SPEC = dict(
project_name = "Nghttp2",
project_desc = "Implementation of HTTP/2 and its header compression algorithm HPACK in C",
project_url = "https://nghttp2.org",
- version = "1.55.1",
- sha256 = "e12fddb65ae3218b4edc083501519379928eba153e71a1673b185570f08beb96",
+ version = "1.59.0",
+ sha256 = "90fd27685120404544e96a60ed40398a3457102840c38e7215dc6dec8684470f",
strip_prefix = "nghttp2-{version}",
urls = ["https://github.com/nghttp2/nghttp2/releases/download/v{version}/nghttp2-{version}.tar.gz"],
use_category = ["controlplane", "dataplane_core"],
- release_date = "2023-07-14",
+ release_date = "2024-01-21",
cpe = "cpe:2.3:a:nghttp2:nghttp2:*",
license = "MIT",
license_url = "https://github.com/nghttp2/nghttp2/blob/v{version}/LICENSE",
@@ -495,7 +518,6 @@ REPOSITORY_LOCATIONS_SPEC = dict(
project_name = "skywalking-data-collect-protocol",
project_desc = "Data Collect Protocols of Apache SkyWalking",
project_url = "https://github.com/apache/skywalking-data-collect-protocol",
- name = "skywalking_data_collect_protocol",
sha256 = "49bd689b9c1c0ea12064bd35581689cef7835e5ac15d335dc425fbfc2029aa90",
urls = ["https://github.com/apache/skywalking-data-collect-protocol/archive/v{version}.tar.gz"],
strip_prefix = "skywalking-data-collect-protocol-{version}",
@@ -592,8 +614,6 @@ REPOSITORY_LOCATIONS_SPEC = dict(
],
release_date = "2021-12-28",
cpe = "N/A",
- license = "MIT",
- license_url = "https://github.com/adrian-thurston/colm/blob/{version}/COPYING",
),
net_colm_open_source_ragel = dict(
project_name = "Ragel",
@@ -639,16 +659,16 @@ REPOSITORY_LOCATIONS_SPEC = dict(
project_name = "Boost",
project_desc = "Boost C++ source libraries",
project_url = "https://www.boost.org/",
- version = "1.78.0",
- sha256 = "94ced8b72956591c4775ae2207a9763d3600b30d9d7446562c552f0a14a63be7",
+ version = "1.84.0",
+ sha256 = "a5800f405508f5df8114558ca9855d2640a2de8f0445f051fa1c7c3383045724",
strip_prefix = "boost_{underscore_version}",
- urls = ["https://boostorg.jfrog.io/artifactory/main/release/{version}/source/boost_{underscore_version}.tar.gz"],
+ urls = ["https://archives.boost.io/release/{version}/source/boost_{underscore_version}.tar.gz"],
use_category = ["dataplane_ext"],
extensions = [
"envoy.matching.input_matchers.hyperscan",
"envoy.regex_engines.hyperscan",
],
- release_date = "2021-12-08",
+ release_date = "2023-12-13",
cpe = "cpe:2.3:a:boost:boost:*",
license = "Boost",
license_url = "https://github.com/boostorg/boost/blob/boost-{version}/LICENSE_1_0.txt",
@@ -738,16 +758,14 @@ REPOSITORY_LOCATIONS_SPEC = dict(
project_name = "hessian2-codec",
project_desc = "hessian2-codec is a C++ library for hessian2 codec",
project_url = "https://github.com/alibaba/hessian2-codec.git",
- version = "e9bb36e206f2c5054b50d11f88bb1b95c77766f8",
- sha256 = "82743dcbf2bd624a68eb2c0d54963ea87446eba4eb08c117744f0669ddc70786",
+ version = "dd8e05487a27b367b90ce81f4e6e6f62d693a212",
+ sha256 = "93260c54406e11b7be078a7ea120f7ab0df475c733e68d010fde400c5c8c8162",
strip_prefix = "hessian2-codec-{version}",
urls = ["https://github.com/alibaba/hessian2-codec/archive/{version}.tar.gz"],
use_category = ["dataplane_ext"],
extensions = ["envoy.filters.network.dubbo_proxy"],
- release_date = "2022-10-10",
+ release_date = "2021-04-05",
cpe = "N/A",
- license = "Apache-2.0",
- license_url = "https://github.com/alibaba/hessian2-codec/blob/{version}/LICENSE",
),
com_github_tencent_rapidjson = dict(
project_name = "RapidJSON",
@@ -978,22 +996,38 @@ REPOSITORY_LOCATIONS_SPEC = dict(
release_date = "2021-07-09",
use_category = ["dataplane_ext"],
extensions = [
- "envoy.wasm.runtime.wamr",
"envoy.wasm.runtime.wavm",
],
cpe = "cpe:2.3:a:llvm:*:*",
license = "Apache-2.0",
license_url = "https://github.com/llvm/llvm-project/blob/llvmorg-{version}/llvm/LICENSE.TXT",
),
+ org_llvm_llvm_15_0_7 = dict(
+ project_name = "LLVM_15_0_7",
+ project_desc = "LLVM Compiler Infrastructure",
+ project_url = "https://llvm.org",
+ version = "15.0.7",
+ sha256 = "8b5fcb24b4128cf04df1b0b9410ce8b1a729cb3c544e6da885d234280dedeac6",
+ strip_prefix = "llvm-project-{version}.src",
+ urls = ["https://github.com/llvm/llvm-project/releases/download/llvmorg-{version}/llvm-project-{version}.src.tar.xz"],
+ release_date = "2023-01-12",
+ use_category = ["dataplane_ext"],
+ extensions = [
+ "envoy.wasm.runtime.wamr",
+ ],
+ cpe = "cpe:2.3:a:llvm:*:*",
+ license = "Apache-2.0",
+ license_url = "https://github.com/llvm/llvm-project/blob/llvmorg-{version}/llvm/LICENSE.TXT",
+ ),
com_github_wamr = dict(
project_name = "Webassembly Micro Runtime",
project_desc = "A standalone runtime with a small footprint for WebAssembly",
project_url = "https://github.com/bytecodealliance/wasm-micro-runtime",
- version = "WAMR-1.2.2",
- sha256 = "d328fc1e19c54cfdb4248b861de54b62977b9b85c0a40eaaeb9cd9b628c0c788",
+ version = "WAMR-2.0.0",
+ sha256 = "7663a34b61d6d0ff90778d9be37efde92e2f28ec9baad89f7b18555f0db435ab",
strip_prefix = "wasm-micro-runtime-{version}",
urls = ["https://github.com/bytecodealliance/wasm-micro-runtime/archive/{version}.tar.gz"],
- release_date = "2023-05-16",
+ release_date = "2024-04-23",
use_category = ["dataplane_ext"],
extensions = ["envoy.wasm.runtime.wamr"],
cpe = "N/A",
@@ -1065,8 +1099,8 @@ REPOSITORY_LOCATIONS_SPEC = dict(
project_name = "curl",
project_desc = "Library for transferring data with URLs",
project_url = "https://curl.haxx.se",
- version = "8.0.1",
- sha256 = "5fd29000a4089934f121eff456101f0a5d09e2a3e89da1d714adf06c4be887cb",
+ version = "8.4.0",
+ sha256 = "816e41809c043ff285e8c0f06a75a1fa250211bbfb2dc0a037eeef39f1a9e427",
strip_prefix = "curl-{version}",
urls = ["https://github.com/curl/curl/releases/download/curl-{underscore_version}/curl-{version}.tar.gz"],
use_category = ["dataplane_ext", "observability_ext"],
@@ -1076,7 +1110,7 @@ REPOSITORY_LOCATIONS_SPEC = dict(
"envoy.grpc_credentials.aws_iam",
"envoy.tracers.opencensus",
],
- release_date = "2023-03-20",
+ release_date = "2023-10-11",
cpe = "cpe:2.3:a:haxx:libcurl:*",
license = "curl",
license_url = "https://github.com/curl/curl/blob/curl-{underscore_version}/COPYING",
@@ -1131,7 +1165,7 @@ REPOSITORY_LOCATIONS_SPEC = dict(
project_url = "https://quiche.googlesource.com/googleurl",
# Static snapshot of https://quiche.googlesource.com/googleurl/+archive/dd4080fec0b443296c0ed0036e1e776df8813aa7.tar.gz
version = "dd4080fec0b443296c0ed0036e1e776df8813aa7",
- sha256 = "59f14d4fb373083b9dc8d389f16bbb817b5f936d1d436aa67e16eb6936028a51",
+ sha256 = "fc694942e8a7491dcc1dde1bddf48a31370a1f46fef862bc17acf07c34dc6325",
urls = ["https://storage.googleapis.com/quiche-envoy-integration/{version}.tar.gz"],
use_category = ["controlplane", "dataplane_core"],
extensions = [],
@@ -1250,41 +1284,41 @@ REPOSITORY_LOCATIONS_SPEC = dict(
project_name = "Kafka (source)",
project_desc = "Open-source distributed event streaming platform",
project_url = "https://kafka.apache.org",
- version = "3.4.0",
- sha256 = "9eeaf83ffddb85d253a2441a29ba6be0a563cd3d6eb9ddf0eeb8d6e2f49c0ef7",
+ version = "3.5.1",
+ sha256 = "9715589a02148fb21bc80d79f29763dbd371457bedcbbeab3db4f5c7fdd2d29c",
strip_prefix = "kafka-{version}/clients/src/main/resources/common/message",
urls = ["https://github.com/apache/kafka/archive/{version}.zip"],
use_category = ["dataplane_ext"],
extensions = ["envoy.filters.network.kafka_broker", "envoy.filters.network.kafka_mesh"],
- release_date = "2023-01-31",
+ release_date = "2023-07-14",
cpe = "cpe:2.3:a:apache:kafka:*",
license = "Apache-2.0",
license_url = "https://github.com/apache/kafka/blob/{version}/LICENSE",
),
- edenhill_librdkafka = dict(
+ confluentinc_librdkafka = dict(
project_name = "Kafka (C/C++ client)",
project_desc = "C/C++ client for Apache Kafka (open-source distributed event streaming platform)",
- project_url = "https://github.com/edenhill/librdkafka",
- version = "2.2.0",
- sha256 = "af9a820cbecbc64115629471df7c7cecd40403b6c34bfdbb9223152677a47226",
+ project_url = "https://github.com/confluentinc/librdkafka",
+ version = "2.3.0",
+ sha256 = "2d49c35c77eeb3d42fa61c43757fcbb6a206daa560247154e60642bcdcc14d12",
strip_prefix = "librdkafka-{version}",
- urls = ["https://github.com/edenhill/librdkafka/archive/v{version}.tar.gz"],
+ urls = ["https://github.com/confluentinc/librdkafka/archive/v{version}.tar.gz"],
use_category = ["dataplane_ext"],
extensions = ["envoy.filters.network.kafka_mesh"],
- release_date = "2023-07-12",
+ release_date = "2023-10-25",
cpe = "N/A",
license = "librdkafka",
- license_url = "https://github.com/edenhill/librdkafka/blob/v{version}/LICENSE",
+ license_url = "https://github.com/confluentinc/librdkafka/blob/v{version}/LICENSE",
),
kafka_server_binary = dict(
project_name = "Kafka (server binary)",
project_desc = "Open-source distributed event streaming platform",
project_url = "https://kafka.apache.org",
- version = "3.4.0",
- sha256 = "67025feb03eb963a8852d4adc5b2810744f493a672c5992728955e38bed43da8",
+ version = "3.5.1",
+ sha256 = "f7b74d544023f2c0ec52a179de59975cb64e34ea03650d829328b407b560e4da",
strip_prefix = "kafka_2.13-{version}",
urls = ["https://archive.apache.org/dist/kafka/{version}/kafka_2.13-{version}.tgz"],
- release_date = "2023-01-31",
+ release_date = "2023-07-21",
use_category = ["test_only"],
),
kafka_python_client = dict(
@@ -1303,11 +1337,11 @@ REPOSITORY_LOCATIONS_SPEC = dict(
proxy_wasm_cpp_sdk = dict(
project_name = "WebAssembly for Proxies (C++ SDK)",
project_desc = "WebAssembly for Proxies (C++ SDK)",
- project_url = "https://github.com/proxy-wasm/proxy-wasm-cpp-sdk",
- version = "e30535b7c0cd3126e6401bc3769063a74bbb37be",
- sha256 = "94e474ebea782225821224734ed5992fa749301e12e06b6520b8b4d4e1c05ffc",
+ project_url = "https://github.com/higress-group/proxy-wasm-cpp-sdk",
+ version = "47bb9cd141a151415ad6a597ed60c78bea2ce0b7",
+ sha256 = "cab5efa54c0cec8eb17c0a2f6ce72b9cd84ebba2b332e919187f963a5d7cfaa1",
strip_prefix = "proxy-wasm-cpp-sdk-{version}",
- urls = ["https://github.com/proxy-wasm/proxy-wasm-cpp-sdk/archive/{version}.tar.gz"],
+ urls = ["https://github.com/higress-group/proxy-wasm-cpp-sdk/archive/{version}.tar.gz"],
use_category = ["dataplane_ext"],
extensions = [
"envoy.access_loggers.wasm",
@@ -1321,19 +1355,17 @@ REPOSITORY_LOCATIONS_SPEC = dict(
"envoy.wasm.runtime.wavm",
"envoy.wasm.runtime.wasmtime",
],
- release_date = "2022-03-15",
+ release_date = "2021-06-24",
cpe = "N/A",
- license = "Apache-2.0",
- license_url = "https://github.com/proxy-wasm/proxy-wasm-cpp-sdk/blob/{version}/LICENSE",
),
proxy_wasm_cpp_host = dict(
project_name = "WebAssembly for Proxies (C++ host implementation)",
project_desc = "WebAssembly for Proxies (C++ host implementation)",
- project_url = "https://github.com/proxy-wasm/proxy-wasm-cpp-host",
- version = "5d76116c449d6892b298b7ae79a84ef1cf5752bf",
- sha256 = "a5825a1a5bbd5b0178c6189b227d5cf4370ac713a883b41f6a54edd768a03cb7",
+ project_url = "https://github.com/higress-group/proxy-wasm-cpp-host",
+ version = "04ef279d83a39d507d882bb35e3199abcecfe5af",
+ sha256 = "2573ecab4f3c12c10a61f2e34a69a3c4d6f20525c9ae07bcaac72b0a9921df78",
strip_prefix = "proxy-wasm-cpp-host-{version}",
- urls = ["https://github.com/proxy-wasm/proxy-wasm-cpp-host/archive/{version}.tar.gz"],
+ urls = ["https://github.com/higress-group/proxy-wasm-cpp-host/archive/{version}.tar.gz"],
use_category = ["dataplane_ext"],
extensions = [
"envoy.access_loggers.wasm",
@@ -1347,10 +1379,8 @@ REPOSITORY_LOCATIONS_SPEC = dict(
"envoy.wasm.runtime.wavm",
"envoy.wasm.runtime.wasmtime",
],
- release_date = "2023-06-01",
+ release_date = "2024-05-18",
cpe = "N/A",
- license = "Apache-2.0",
- license_url = "https://github.com/proxy-wasm/proxy-wasm-cpp-host/blob/{version}/LICENSE",
),
proxy_wasm_rust_sdk = dict(
project_name = "WebAssembly for Proxies (Rust SDK)",
@@ -1384,12 +1414,13 @@ REPOSITORY_LOCATIONS_SPEC = dict(
project_name = "Bazel rust rules",
project_desc = "Bazel rust rules (used by Wasm)",
project_url = "https://github.com/bazelbuild/rules_rust",
- version = "0.25.1",
- sha256 = "4a9cb4fda6ccd5b5ec393b2e944822a62e050c7c06f1ea41607f14c4fdec57a2",
- urls = ["https://github.com/bazelbuild/rules_rust/releases/download/{version}/rules_rust-v{version}.tar.gz"],
+ version = "0.27.0",
+ strip_prefix = "rules_rust-{version}",
+ sha256 = "d9a3981f4ef18ced850341bc05c7e2a506006a47a0207b6f7191f271cb893233",
+ urls = ["https://github.com/bazelbuild/rules_rust/archive/{version}.tar.gz"],
use_category = ["dataplane_ext"],
extensions = ["envoy.wasm.runtime.wasmtime"],
- release_date = "2023-07-05",
+ release_date = "2023-08-31",
cpe = "N/A",
license = "Apache-2.0",
license_url = "https://github.com/bazelbuild/rules_rust/blob/{version}/LICENSE.txt",
diff --git a/bazel/rules_java.patch b/bazel/rules_java.patch
new file mode 100644
index 0000000000000..91bd69eb69fa7
--- /dev/null
+++ b/bazel/rules_java.patch
@@ -0,0 +1,293 @@
+diff --git a/java/repositories.bzl b/java/repositories.bzl
+index 7e5b939..e8d10b3 100644
+--- a/java/repositories.bzl
++++ b/java/repositories.bzl
+@@ -88,7 +88,7 @@ def remote_jdk8_repos(name = ""):
+ maybe(
+ remote_java_repository,
+ name = "remote_jdk8_linux_aarch64",
+- target_compatible_with = [
++ exec_compatible_with = [
+ "@platforms//os:linux",
+ "@platforms//cpu:aarch64",
+ ],
+@@ -103,7 +103,7 @@ def remote_jdk8_repos(name = ""):
+ maybe(
+ remote_java_repository,
+ name = "remote_jdk8_linux_s390x",
+- target_compatible_with = [
++ exec_compatible_with = [
+ "@platforms//os:linux",
+ "@platforms//cpu:s390x",
+ ],
+@@ -117,7 +117,7 @@ def remote_jdk8_repos(name = ""):
+ maybe(
+ remote_java_repository,
+ name = "remote_jdk8_linux",
+- target_compatible_with = [
++ exec_compatible_with = [
+ "@platforms//os:linux",
+ "@platforms//cpu:x86_64",
+ ],
+@@ -132,7 +132,7 @@ def remote_jdk8_repos(name = ""):
+ maybe(
+ remote_java_repository,
+ name = "remote_jdk8_macos_aarch64",
+- target_compatible_with = [
++ exec_compatible_with = [
+ "@platforms//os:macos",
+ "@platforms//cpu:aarch64",
+ ],
+@@ -146,7 +146,7 @@ def remote_jdk8_repos(name = ""):
+ maybe(
+ remote_java_repository,
+ name = "remote_jdk8_macos",
+- target_compatible_with = [
++ exec_compatible_with = [
+ "@platforms//os:macos",
+ "@platforms//cpu:x86_64",
+ ],
+@@ -161,7 +161,7 @@ def remote_jdk8_repos(name = ""):
+ maybe(
+ remote_java_repository,
+ name = "remote_jdk8_windows",
+- target_compatible_with = [
++ exec_compatible_with = [
+ "@platforms//os:windows",
+ "@platforms//cpu:x86_64",
+ ],
+@@ -189,7 +189,7 @@ def remote_jdk11_repos():
+ maybe(
+ remote_java_repository,
+ name = "remotejdk11_linux",
+- target_compatible_with = [
++ exec_compatible_with = [
+ "@platforms//os:linux",
+ "@platforms//cpu:x86_64",
+ ],
+@@ -205,7 +205,7 @@ def remote_jdk11_repos():
+ maybe(
+ remote_java_repository,
+ name = "remotejdk11_linux_aarch64",
+- target_compatible_with = [
++ exec_compatible_with = [
+ "@platforms//os:linux",
+ "@platforms//cpu:aarch64",
+ ],
+@@ -221,7 +221,7 @@ def remote_jdk11_repos():
+ maybe(
+ remote_java_repository,
+ name = "remotejdk11_linux_ppc64le",
+- target_compatible_with = [
++ exec_compatible_with = [
+ "@platforms//os:linux",
+ "@platforms//cpu:ppc",
+ ],
+@@ -237,7 +237,7 @@ def remote_jdk11_repos():
+ maybe(
+ remote_java_repository,
+ name = "remotejdk11_linux_s390x",
+- target_compatible_with = [
++ exec_compatible_with = [
+ "@platforms//os:linux",
+ "@platforms//cpu:s390x",
+ ],
+@@ -253,7 +253,7 @@ def remote_jdk11_repos():
+ maybe(
+ remote_java_repository,
+ name = "remotejdk11_macos",
+- target_compatible_with = [
++ exec_compatible_with = [
+ "@platforms//os:macos",
+ "@platforms//cpu:x86_64",
+ ],
+@@ -269,7 +269,7 @@ def remote_jdk11_repos():
+ maybe(
+ remote_java_repository,
+ name = "remotejdk11_macos_aarch64",
+- target_compatible_with = [
++ exec_compatible_with = [
+ "@platforms//os:macos",
+ "@platforms//cpu:aarch64",
+ ],
+@@ -285,7 +285,7 @@ def remote_jdk11_repos():
+ maybe(
+ remote_java_repository,
+ name = "remotejdk11_win",
+- target_compatible_with = [
++ exec_compatible_with = [
+ "@platforms//os:windows",
+ "@platforms//cpu:x86_64",
+ ],
+@@ -301,7 +301,7 @@ def remote_jdk11_repos():
+ maybe(
+ remote_java_repository,
+ name = "remotejdk11_win_arm64",
+- target_compatible_with = [
++ exec_compatible_with = [
+ "@platforms//os:windows",
+ "@platforms//cpu:arm64",
+ ],
+@@ -318,7 +318,7 @@ def remote_jdk17_repos():
+ maybe(
+ remote_java_repository,
+ name = "remotejdk17_linux",
+- target_compatible_with = [
++ exec_compatible_with = [
+ "@platforms//os:linux",
+ "@platforms//cpu:x86_64",
+ ],
+@@ -334,7 +334,7 @@ def remote_jdk17_repos():
+ maybe(
+ remote_java_repository,
+ name = "remotejdk17_linux_aarch64",
+- target_compatible_with = [
++ exec_compatible_with = [
+ "@platforms//os:linux",
+ "@platforms//cpu:aarch64",
+ ],
+@@ -350,7 +350,7 @@ def remote_jdk17_repos():
+ maybe(
+ remote_java_repository,
+ name = "remotejdk17_linux_s390x",
+- target_compatible_with = [
++ exec_compatible_with = [
+ "@platforms//os:linux",
+ "@platforms//cpu:s390x",
+ ],
+@@ -366,7 +366,7 @@ def remote_jdk17_repos():
+ maybe(
+ remote_java_repository,
+ name = "remotejdk17_linux_ppc64le",
+- target_compatible_with = [
++ exec_compatible_with = [
+ "@platforms//os:linux",
+ "@platforms//cpu:ppc",
+ ],
+@@ -382,7 +382,7 @@ def remote_jdk17_repos():
+ maybe(
+ remote_java_repository,
+ name = "remotejdk17_macos",
+- target_compatible_with = [
++ exec_compatible_with = [
+ "@platforms//os:macos",
+ "@platforms//cpu:x86_64",
+ ],
+@@ -398,7 +398,7 @@ def remote_jdk17_repos():
+ maybe(
+ remote_java_repository,
+ name = "remotejdk17_macos_aarch64",
+- target_compatible_with = [
++ exec_compatible_with = [
+ "@platforms//os:macos",
+ "@platforms//cpu:aarch64",
+ ],
+@@ -413,7 +413,7 @@ def remote_jdk17_repos():
+ maybe(
+ remote_java_repository,
+ name = "remotejdk17_win",
+- target_compatible_with = [
++ exec_compatible_with = [
+ "@platforms//os:windows",
+ "@platforms//cpu:x86_64",
+ ],
+@@ -428,7 +428,7 @@ def remote_jdk17_repos():
+ maybe(
+ remote_java_repository,
+ name = "remotejdk17_win_arm64",
+- target_compatible_with = [
++ exec_compatible_with = [
+ "@platforms//os:windows",
+ "@platforms//cpu:arm64",
+ ],
+@@ -446,7 +446,7 @@ def remote_jdk20_repos():
+ maybe(
+ remote_java_repository,
+ name = "remotejdk20_linux",
+- target_compatible_with = [
++ exec_compatible_with = [
+ "@platforms//os:linux",
+ "@platforms//cpu:x86_64",
+ ],
+@@ -462,7 +462,7 @@ def remote_jdk20_repos():
+ maybe(
+ remote_java_repository,
+ name = "remotejdk20_linux_aarch64",
+- target_compatible_with = [
++ exec_compatible_with = [
+ "@platforms//os:linux",
+ "@platforms//cpu:aarch64",
+ ],
+@@ -478,7 +478,7 @@ def remote_jdk20_repos():
+ maybe(
+ remote_java_repository,
+ name = "remotejdk20_macos",
+- target_compatible_with = [
++ exec_compatible_with = [
+ "@platforms//os:macos",
+ "@platforms//cpu:x86_64",
+ ],
+@@ -494,7 +494,7 @@ def remote_jdk20_repos():
+ maybe(
+ remote_java_repository,
+ name = "remotejdk20_macos_aarch64",
+- target_compatible_with = [
++ exec_compatible_with = [
+ "@platforms//os:macos",
+ "@platforms//cpu:aarch64",
+ ],
+@@ -509,7 +509,7 @@ def remote_jdk20_repos():
+ maybe(
+ remote_java_repository,
+ name = "remotejdk20_win",
+- target_compatible_with = [
++ exec_compatible_with = [
+ "@platforms//os:windows",
+ "@platforms//cpu:x86_64",
+ ],
+diff --git a/toolchains/remote_java_repository.bzl b/toolchains/remote_java_repository.bzl
+index 86916ec..5521fcf 100644
+--- a/toolchains/remote_java_repository.bzl
++++ b/toolchains/remote_java_repository.bzl
+@@ -32,20 +32,20 @@ _toolchain_config = repository_rule(
+ },
+ )
+
+-def remote_java_repository(name, version, target_compatible_with = None, prefix = "remotejdk", **kwargs):
++def remote_java_repository(name, version, exec_compatible_with = None, prefix = "remotejdk", **kwargs):
+ """Imports a JDK from a http archive and creates runtime toolchain definitions for it.
+
+ Register the toolchains defined by this macro via `register_toolchains("@//:all")`, where
+ `` is the value of the `name` parameter.
+
+- Toolchain resolution is determined with target_compatible_with
++ Toolchain resolution is determined with exec_compatible_with
+ parameter and constrained with --java_runtime_version flag either having value
+ of "version" or "{prefix}_{version}" parameters.
+
+ Args:
+ name: A unique name for this rule.
+ version: Version of the JDK imported.
+- target_compatible_with: Target platform constraints (CPU and OS) for this JDK.
++ exec_compatible_with: Target platform constraints (CPU and OS) for this JDK.
+ prefix: Optional alternative prefix for configuration flag value used to determine this JDK.
+ **kwargs: Refer to http_archive documentation
+ """
+@@ -77,7 +77,7 @@ alias(
+ )
+ toolchain(
+ name = "toolchain",
+- target_compatible_with = {target_compatible_with},
++ exec_compatible_with = {exec_compatible_with},
+ target_settings = [":version_or_prefix_version_setting"],
+ toolchain_type = "@bazel_tools//tools/jdk:runtime_toolchain_type",
+ toolchain = "{toolchain}",
+@@ -85,7 +85,7 @@ toolchain(
+ """.format(
+ prefix = prefix,
+ version = version,
+- target_compatible_with = target_compatible_with,
++ exec_compatible_with = exec_compatible_with,
+ toolchain = "@{repo}//:jdk".format(repo = name),
+ ),
+ )
diff --git a/bazel/toolchains/BUILD b/bazel/toolchains/BUILD
deleted file mode 100644
index e6a6833650289..0000000000000
--- a/bazel/toolchains/BUILD
+++ /dev/null
@@ -1,17 +0,0 @@
-licenses(["notice"]) # Apache 2
-
-platform(
- name = "rbe_ubuntu_clang_platform",
- parents = ["@rbe_ubuntu_clang//config:platform"],
- remote_execution_properties = """
- {PARENT_REMOTE_EXECUTION_PROPERTIES}
- properties: {
- name: "dockerAddCapabilities"
- value: "SYS_PTRACE,NET_RAW,NET_ADMIN"
- }
- properties: {
- name: "dockerNetwork"
- value: "standard"
- }
- """,
-)
diff --git a/bazel/version_update_post.sh b/bazel/version_update_post.sh
new file mode 100644
index 0000000000000..ac877c1861f30
--- /dev/null
+++ b/bazel/version_update_post.sh
@@ -0,0 +1,78 @@
+#!/bin/bash -e
+
+set -o pipefail
+
+
+EXISTING_DATE="$("${JQ}" -r ".${DEP}.release_date" "${DEP_DATA}")"
+DATE_SEARCH="release_date = \"${EXISTING_DATE}\","
+DEP_CHECK="${DEP_CHECK:-tools/dependency/check}"
+
+find_date_line () {
+ local match match_ln date_match_ln
+ # This needs to find the correct date to replace
+ match="$(\
+ grep -n "${DEP_SEARCH}" "${VERSION_FILE}" \
+ | cut -d: -f-2)"
+ match_ln="$(\
+ echo "${match}" \
+ | cut -d: -f1)"
+ match_ln="$((match_ln + 1))"
+ date_match_ln="$(\
+ tail -n "+${match_ln}" "${VERSION_FILE}" \
+ | grep -n "${DATE_SEARCH}" \
+ | head -n1 \
+ | cut -d: -f1)"
+ date_match_ln="$((match_ln + date_match_ln - 1))"
+ printf '%s' "$date_match_ln"
+}
+
+update_date () {
+ local match_ln search replace
+ match_ln="$1"
+ search="$2"
+ replace="$3"
+ echo "Updating date(${match_ln}): ${search} -> ${replace}"
+ sed -i "${match_ln}s/${search}/${replace}/" "$VERSION_FILE"
+}
+
+get_new_date () {
+ # create a repository_locations with just the dep and with updated version
+ tmpfile="$(mktemp)"
+ # shellcheck disable=SC2016
+ "$JQ" --arg new_version "$VERSION" \
+ --arg existing_version "$EXISTING_VERSION" \
+ --arg dep "$DEP" \
+ 'if has($dep) then .[$dep].version = $new_version | .[$dep].urls |= map(gsub($existing_version; $new_version)) else . end' \
+ "$DEP_DATA" > "$tmpfile"
+ output="$(\
+ "$DEP_CHECK" \
+ --repository_locations="$tmpfile" \
+ --path "${BUILD_WORKSPACE_DIRECTORY}" \
+ -c release_dates 2>&1)"
+ echo "$output" \
+ | grep -E "^Mismatch" \
+ | grep "$DEP" \
+ | cut -d= -f2 \
+ | xargs || {
+ cat "$tmpfile" >&2
+ echo "$output" >&2
+ rm "$tmpfile"
+ exit 1
+ }
+ rm "$tmpfile"
+}
+
+post_version_update () {
+ local date_ln new_date
+ if [[ "$EXISTING_VERSION" == "$VERSION" ]]; then
+ echo "Nothing to update" >&2
+ exit 0
+ fi
+ date_ln="$(find_date_line)"
+ new_date="$(get_new_date)"
+ if [[ -z "$new_date" ]]; then
+ echo "Unable to retrieve date" >&2
+ exit 1
+ fi
+ update_date "$date_ln" "$EXISTING_DATE" "$new_date"
+}
diff --git a/changelogs/1.24.11.yaml b/changelogs/1.24.11.yaml
new file mode 100644
index 0000000000000..c5c5e55329bb6
--- /dev/null
+++ b/changelogs/1.24.11.yaml
@@ -0,0 +1,19 @@
+date: October 10, 2023
+
+behavior_changes:
+- area: http
+ change: |
+ Close HTTP/2 and HTTP/3 connections that prematurely reset streams. The runtime key
+ ``overload.premature_reset_min_stream_lifetime_seconds`` determines the interval where received stream
+ reset is considered premature (with 1 second default). The runtime key ``overload.premature_reset_total_stream_count``,
+ with the default value of 500, determines the number of requests received from a connection before the check for premature
+ resets is applied. The connection is disconnected if more than 50% of resets are premature.
+ Setting the runtime key ``envoy.restart_features.send_goaway_for_premature_rst_streams`` to ``false`` completely disables
+ this check.
+- area: http
+ change: |
+ Add runtime flag ``http.max_requests_per_io_cycle`` for setting the limit on the number of HTTP requests processed
+ from a single connection in a single I/O cycle. Requests over this limit are processed in subsequent I/O cycles. This
+ mitigates CPU starvation by connections that simultaneously send high number of requests by allowing requests from other
+ connections to make progress. This runtime value can be set to 1 in the presence of abusive HTTP/2 or HTTP/3 connections.
+ By default this limit is disabled.
diff --git a/changelogs/1.24.12.yaml b/changelogs/1.24.12.yaml
new file mode 100644
index 0000000000000..4beae10fad69d
--- /dev/null
+++ b/changelogs/1.24.12.yaml
@@ -0,0 +1,7 @@
+date: October 16, 2023
+
+bug_fixes:
+- area: http
+ change: |
+ Fixed a bug where processing of deferred streams with the value of ``http.max_requests_per_io_cycle`` more than 1,
+ can cause a crash.
diff --git a/changelogs/1.25.10.yaml b/changelogs/1.25.10.yaml
new file mode 100644
index 0000000000000..087ad323021df
--- /dev/null
+++ b/changelogs/1.25.10.yaml
@@ -0,0 +1,34 @@
+date: October 10, 2023
+
+behavior_changes:
+- area: http
+ change: |
+ Close HTTP/2 and HTTP/3 connections that prematurely reset streams. The runtime key
+ ``overload.premature_reset_min_stream_lifetime_seconds`` determines the interval where received stream
+ reset is considered premature (with 1 second default). The runtime key ``overload.premature_reset_total_stream_count``,
+ with the default value of 500, determines the number of requests received from a connection before the check for premature
+ resets is applied. The connection is disconnected if more than 50% of resets are premature.
+ Setting the runtime key ``envoy.restart_features.send_goaway_for_premature_rst_streams`` to ``false`` completely disables
+ this check.
+- area: http
+ change: |
+ Add runtime flag ``http.max_requests_per_io_cycle`` for setting the limit on the number of HTTP requests processed
+ from a single connection in a single I/O cycle. Requests over this limit are processed in subsequent I/O cycles. This
+ mitigates CPU starvation by connections that simultaneously send high number of requests by allowing requests from other
+ connections to make progress. This runtime value can be set to 1 in the presence of abusive HTTP/2 or HTTP/3 connections.
+ By default this limit is disabled.
+- area: http
+ change: |
+ Add runtime flag ``http.max_requests_per_io_cycle`` for setting the limit on the number of HTTP requests processed
+ from a single connection in a single I/O cycle. Requests over this limit are processed in subsequent I/O cycles. This
+ mitigates CPU starvation by connections that simultaneously send high number of requests by allowing requests from other
+ connections to make progress. This runtime value can be set to 1 in the presence of abusive HTTP/2 or HTTP/3 connections.
+ By default this limit is disabled.
+
+bug_fixes:
+- area: tls
+ change: |
+ fixed a bug where handshake may fail when both private key provider and cert validation are set.
+- area: docker/publishing
+ change: |
+ Update base images to resolve various glibc vulnerabilities.
diff --git a/changelogs/1.25.11.yaml b/changelogs/1.25.11.yaml
new file mode 100644
index 0000000000000..4beae10fad69d
--- /dev/null
+++ b/changelogs/1.25.11.yaml
@@ -0,0 +1,7 @@
+date: October 16, 2023
+
+bug_fixes:
+- area: http
+ change: |
+ Fixed a bug where processing of deferred streams with the value of ``http.max_requests_per_io_cycle`` more than 1,
+ can cause a crash.
diff --git a/changelogs/1.26.5.yaml b/changelogs/1.26.5.yaml
new file mode 100644
index 0000000000000..5f248d665be67
--- /dev/null
+++ b/changelogs/1.26.5.yaml
@@ -0,0 +1,24 @@
+date: October 10, 2023
+
+behavior_changes:
+- area: http
+ change: |
+ Close HTTP/2 and HTTP/3 connections that prematurely reset streams. The runtime key
+ ``overload.premature_reset_min_stream_lifetime_seconds`` determines the interval where received stream
+ reset is considered premature (with 1 second default). The runtime key ``overload.premature_reset_total_stream_count``,
+ with the default value of 500, determines the number of requests received from a connection before the check for premature
+ resets is applied. The connection is disconnected if more than 50% of resets are premature.
+ Setting the runtime key ``envoy.restart_features.send_goaway_for_premature_rst_streams`` to ``false`` completely disables
+ this check.
+- area: http
+ change: |
+ Add runtime flag ``http.max_requests_per_io_cycle`` for setting the limit on the number of HTTP requests processed
+ from a single connection in a single I/O cycle. Requests over this limit are processed in subsequent I/O cycles. This
+ mitigates CPU starvation by connections that simultaneously send high number of requests by allowing requests from other
+ connections to make progress. This runtime value can be set to 1 in the presence of abusive HTTP/2 or HTTP/3 connections.
+ By default this limit is disabled.
+
+bug_fixes:
+- area: tls
+ change: |
+ fixed a bug where handshake may fail when both private key provider and cert validation are set.
diff --git a/changelogs/1.26.6.yaml b/changelogs/1.26.6.yaml
new file mode 100644
index 0000000000000..a5caeaa72fa50
--- /dev/null
+++ b/changelogs/1.26.6.yaml
@@ -0,0 +1,10 @@
+date: October 17, 2023
+
+bug_fixes:
+- area: tracing
+ change: |
+ Fixed a bug in the Datadog tracer where Datadog's "operation name" field would contain what should be in the "resource name" field.
+- area: http
+ change: |
+ Fixed a bug where processing of deferred streams with the value of ``http.max_requests_per_io_cycle`` more than 1,
+ can cause a crash.
diff --git a/changelogs/1.26.7.yaml b/changelogs/1.26.7.yaml
new file mode 100644
index 0000000000000..48e27387880c7
--- /dev/null
+++ b/changelogs/1.26.7.yaml
@@ -0,0 +1,28 @@
+date: February 9, 2024
+
+bug_fixes:
+- area: buffer
+ change: |
+ Fixed a bug (https://github.com/envoyproxy/envoy/issues/28760) that the internal listener causes an undefined
+ behavior due to the unintended release of the buffer memory.
+- area: http
+ change: |
+ Fixed recursion when HTTP connection is disconnected due to a high number of premature resets.
+- area: proxy protocol
+ change: |
+ fixed a crash when Envoy is configured for PROXY protocol on both a listener and cluster, and the listener receives
+ a PROXY protocol header with address type LOCAL (typically used for health checks).
+- area: proxy_protocol
+ change: |
+ Fix crash due to uncaught exception when the operating system does not support an address type (such as IPv6) that is
+ received in a proxy protocol header. Connections will instead be dropped/reset.
+- area: proxy_protocol
+ change: |
+ Fixed a bug where TLVs with non utf8 characters were inserted as protobuf values into filter metadata circumventing
+ ext_authz checks when ``failure_mode_allow`` is set to ``true``.
+- area: http
+ change: |
+ Fixed crash when HTTP request idle and per try timeouts occurs within backoff interval.
+- area: url matching
+ change: |
+ Fixed excessive CPU utilization when using regex URL template matcher.
diff --git a/changelogs/1.26.8.yaml b/changelogs/1.26.8.yaml
new file mode 100644
index 0000000000000..a59f0acb0ad0c
--- /dev/null
+++ b/changelogs/1.26.8.yaml
@@ -0,0 +1,13 @@
+date: April 4, 2024
+
+bug_fixes:
+- area: http2
+ change: |
+ Update nghttp2 to resolve CVE-2024-30255 (https://github.com/envoyproxy/envoy/security/advisories/GHSA-j654-3ccm-vfmm).
+
+new_features:
+- area: google_grpc
+ change: |
+ Added an off-by-default runtime flag
+ ``envoy.reloadable_features.google_grpc_disable_tls_13`` to disable TLSv1.3
+ usage by gRPC SDK for ``google_grpc`` services.
diff --git a/changelogs/1.27.0.yaml b/changelogs/1.27.0.yaml
new file mode 100644
index 0000000000000..b6e64f270e821
--- /dev/null
+++ b/changelogs/1.27.0.yaml
@@ -0,0 +1,575 @@
+date: July 26, 2023
+
+behavior_changes:
+- area: build
+ change: |
+ Moved the subset, ring_hash, and maglev LB code into extensions. If you use these load balancers and override
+ :repo:`bazel/extensions_build_config.bzl` you will need to include them explicitly.
+- area: build
+ change: |
+ Moved xDS code extensions. If you use the xDS and override :repo:`bazel/extensions_build_config.bzl` you will
+ need to include the new config_subscriptions explicitly.
+- area: http
+ change: |
+ When ``append_x_forwarded_host`` is enabled for a given route action it is now only appended iff it is different from the last
+ value in the list. This resolves issues where a retry caused the same value to be appended multiple times. This
+ behavioral change can be temporarily reverted by setting runtime guard ``envoy_reloadable_features_append_xfh_idempotent`` to ``false``.
+- area: ext_proc
+ change: |
+ The proto field :ref:`value ` type is string.
+ This make it unable to support enconding non-utf8 characters in the ``HeaderValue`` message.
+ To support sending header value with non-utf8 characters, a new proto field is added in the HeaderValue message:
+ :ref:`raw_value `.
+ The header values are now encoded in this ``raw_value`` field when Envoy ext_proc filter sending
+ and receiving messages from the ext_proc server. This behavioral change can be temporarily
+ reverted by setting the runtime guard ``envoy_reloadable_features_send_header_raw_value`` to ``false``.
+- area: ext_proc
+ change: |
+ Apply header mutation rules from the ext_proc config to the ``ImmediateResponse``. This behavior change can be temporarily
+ reverted by setting the runtime guard ``envoy_reloadable_features_immediate_response_use_filter_mutation_rule`` to false.
+- area: active health check
+ change: |
+ Preserve the active-health check status of a host after a cluster/assignment update. This is now preserved in cases
+ where the assignment updates a host's locality. This behavioral change can be temporarily reverted by setting the
+ runtime flag ``envoy.reloadable_features.keep_endpoint_active_hc_status_on_locality_update`` to ``false``.
+- area: quic
+ change: |
+ Add a default false runtime flag ``envoy.reloadable_features.quic_reject_all`` to disable QUIC listener if needed.
+- area: stats tls
+ change: |
+ Fixed metric tag extraction so that TLS parameters are properly extracted from the stats, both for listeners and clusters.
+ This changes the Prometheus names from
+ ``envoy_listener_ssl_ciphers_ECDHE_RSA_AES128_GCM_SHA256{envoy_listener_address="0.0.0.0_10000"}`` to
+ ``envoy_listener_ssl_ciphers{envoy_listener_address="0.0.0.0_10000", envoy_ssl_cipher="ECDHE_RSA_AES128_GCM_SHA256"}``, and
+ similar for ``envoy_listener_ssl_versions_TLSv1_2``, ``envoy_cluster_ssl_versions_TLSv1_2``, ``envoy_listener_ssl_curves_P_256``,
+ ``envoy_cluster_ssl_curves_P_256``, ``envoy_listener_ssl_sigalgs_rsa_pss_rsae_sha256``.
+
+minor_behavior_changes:
+- area: connection pool
+ change: |
+ Increase granularity mapping connection pool failures to specific stream failure reasons to make it more transparent why
+ the stream is reset when a connection pool's connection fails.
+- area: custom response
+ change: |
+ The filter now traverses matchers from most specific to least specific per filter config till a match is found for the response.
+- area: http1
+ change: |
+ Allowing mixed case schemes in absolute urls (e.g. HtTp://www.google.com). Mixed case schemes will be normalized to
+ the lower cased equivalents before being forwarded upstream. This behavior can be reverted by setting runtime flag
+ ``envoy.reloadable_features.allow_absolute_url_with_mixed_scheme`` to false.
+- area: http1
+ change: |
+ The HTTP1 server-side codec no longer considers encoding 1xx headers as
+ starting the response. This allows the codec to raise protocol errors,
+ sending detailed local replies instead of just closing the connection. This
+ behavior can be reverted by setting runtime flag
+ ``envoy.reloadable_features.http1_allow_codec_error_response_after_1xx_headers``
+ to ``false``.
+- area: dns
+ change: |
+ Changing the DNS cache to use ``host:port`` as the cache key rather than ``host``. This allows a
+ downstream DFP filter to serve both secure and insecure clusters. This behavioral change
+ can be reverted by setting runtime flag ``envoy.reloadable_features.dfp_mixed_scheme`` to ``false``.
+- area: uhv
+ change: |
+ Preserve case of %-encoded triplets in the default header validator. This behavior can be reverted by setting runtime flag
+ ``envoy.reloadable_features.uhv_preserve_url_encoded_case`` to ``false``, in which case %-encoded triplets are normalized
+ to uppercase characters. This setting is only applicable when the Unversal Header Validator is enabled and has no effect otherwise.
+- area: uhv
+ change: |
+ Allow malformed URL encoded triplets in the default header validator. This behavior can be reverted by setting runtime flag
+ ``envoy.reloadable_features.uhv_allow_malformed_url_encoding`` to ``false``, in which case requests with malformed URL encoded triplets
+ in path are rejected. This setting is only applicable when the Unversal Header Validator is enabled and has no effect otherwise.
+- area: ext_proc
+ change: |
+ When :ref:`clear_route_cache ` is set, ext_proc will check
+ for header mutations beforce clearing the route cache. Failures due to this check will be counted under the
+ ``clear_route_cache_ignored`` stat.
+- area: aws
+ change: |
+ Added support for fetching credentials from the AWS credentials file, which only happens if credentials cannot be fetched
+ from environment variables. This behavioral change can be reverted by setting runtime guard
+ ``envoy.reloadable_features.enable_aws_credentials_file`` to ``false``.
+- area: http cookies
+ change: |
+ Changed internal format of http cookie to protobuf and added expiry timestamp. Processing expired cookie
+ results in selection of a new upstream host and sending a new cookie to the client. Previous format of
+ the cookie is still accepted, but is planned to be obsoleted in the future.
+ This behavior change can be reverted by setting
+ ``envoy.reloadable_features.stateful_session_encode_ttl_in_cookie`` to ``false``.
+- area: overload manager
+ change: |
+ Changed behavior of the overload manager to error on unknown overload
+ manager actions. Prior it would silently fail. This change can be reverted
+ temporarily by setting the runtime guard
+ ``envoy.reloadable_features.overload_manager_error_unknown_action`` to
+ false.
+- area: router
+ change: |
+ Added check for existing metadata before setting metadata due to ``auto_sni``, ``auto_san_validation``, or
+ ``override_auto_sni_header`` to prevent triggering ``ENVOY_BUG`` when an earlier filter has set the metadata.
+- area: resource_monitors
+ change: |
+ Changed behavior of the fixed heap monitor to count unused mapped pages as
+ free memory. This change can be reverted temporarily by setting the runtime guard
+ ``envoy.reloadable_features.count_unused_mapped_pages_as_free`` to ``false``.
+- area: ext_proc
+ change: |
+ Filter metadata containing ext proc stats has been moved from ``ext-proc-logging-info`` to a namespace corresponding
+ to the name of the ext_proc filter.
+- area: stats
+ change: |
+ Added new type of gauge with type hidden. These stats are hidden from admin/stats-sinks but can shown with a
+ query-parameter of ``/stats?hidden=include`` or ``/stats?hidden=showonly``.
+- area: ext_authz
+ change: |
+ Forward :ref:`typed_filter_metadata ` selected by
+ ``typed_metadata_context_namespaces`` and :ref:`filter_metadata `
+ selected by
+ :ref:`metadata_context_namespaces `
+ from connection metadata to external auth service. This is addition to the current behavior of forwarding request metadata.
+ In the event of both connection and request metadata containing the requested metadata the request value will be provided.
+- area: eds
+ change: |
+ Added the ability to specify mulitple addresses for a host in an EDS cluster. Connections to the host with more than one
+ address will be established using the Happy Eyeballs algorithm.
+- area: upstream
+ change: |
+ Changed behavior of the unpausing connect with 2xx status codes. This change can be reverted temporarily by
+ setting the runtime guard ``envoy.reloadable_features.upstream_allow_connect_with_2xx`` to ``false``.
+- area: http
+ change: |
+ Round trip time will not be refreshed for every request by default. And if this is necessary, it can be
+ enabled by setting runtime guard ``envoy.reloadable_features.refresh_rtt_after_request`` to ``true``.
+- area: http
+ change: |
+ Envoy will now lower case scheme values by default. This behaviorial change can be temporarily reverted
+ by setting runtime guard ``envoy.reloadable_features.lowercase_scheme`` to ``false``.
+
+bug_fixes:
+- area: oauth2
+ change: |
+ The Max-Age attribute of Set-Cookie HTTP response header was being assigned a value representing Seconds Since
+ the Epoch, causing cookies to expire in ~53 years. This was fixed an now it is being assigned a value representing
+ the number of seconds until the cookie expires.
+ This behavioral change can be temporarily reverted by setting runtime guard
+ ``envoy.reloadable_features.oauth_use_standard_max_age_value`` to ``false``.
+- area: tls
+ change: |
+ Fix build FIPS compliance when using both FIPS mode and Wasm extensions (``--define boringssl=fips`` and ``--define wasm=v8``).
+- area: http
+ change: |
+ Switched Envoy internal scheme checks from case sensitive to case insensitive. This behaviorial change can be temporarily
+ reverted by setting runtime guard ``envoy.reloadable_features.handle_uppercase_scheme`` to ``false``.
+
+ Fix `CVE-2023-35944 `_.
+
+- area: ext_authz
+ change: |
+ Fix a bug where the ext_authz filter will ignore the request body when the
+ :ref:`pack_as_bytes ` is set to ``true`` and
+ HTTP authorization service is configured.
+- area: ext_authz
+ change: |
+ Fix a bug where the ext_authz filter will remove non UTF-8 characters from the body of a request when configured
+ to use :ref:`http_service `, if configured
+ to send the body.
+- area: router
+ change: |
+ Fixed the bug that updating :ref:`scope_key_builder
+ `
+ of SRDS config doesn't work and multiple HCM share the same ``scope_key_builder``.
+- area: http
+ change: |
+ The :ref:`is_optional
+ `
+ field of HTTP filter can only be used for configuration loading of
+ :ref:`HTTP filter `
+ and will be ignored for loading of route or virtual host level filter config. This behavioral change
+ can be temporarily reverted by setting runtime guard
+ ``envoy.reloadable_features.ignore_optional_option_from_hcm_for_route_config`` to ``false``.
+ You can also use
+ :ref:`route/virtual host optional flag `
+ as a replacement of the feature.
+- area: logging
+ change: |
+ Do not display GRPC_STATUS_NUMBER for non gRPC requests.
+ This behavioral change can be temporarily reverted by setting runtime guard
+ ``envoy.reloadable_features.validate_grpc_header_before_log_grpc_status`` to ``false``.
+- area: boringssl
+ change: |
+ Fixed the crash that occurs when contrib is compiled with ``boringssl=fips`` defined.
+- area: oauth2
+ change: |
+ The ``httpOnly`` attribute for ``Set-Cookie`` for tokens in HTTP response header was missing,
+ causing tokens to be accessible from the JavaScript making the apps vulnerable.
+ This was fixed now by marking the cookie as ``httpOnly``.
+ This behavioral change can be temporarily reverted by setting runtime guard
+ ``envoy.reloadable_features.oauth_make_token_cookie_httponly`` to ``false``.
+
+ Fix `CVE-2023-35941 `_.
+
+- area: opentelemetry/grpc/access log
+ change: |
+ Fixed a bug in the open telemetry access logger. This logger now uses the
+ server scope for stats instead of the listener's global scope. This fixes a
+ use-after-free that can occur if the listener is drained but the cached
+ gRPC access logger uses the listener's global scope for stats.
+
+ Fix `CVE-2023-35942 `_.
+
+- area: dependency
+ change: |
+ Update Wasmtime and related deps -> 9.0.3 to resolve
+ `CVE-2023-30624 `_.
+- area: dependency
+ change: |
+ Update C-ares -> 1.91.1 to resolve:
+
+ - `CVE-2023-31130 `_.
+ - `CVE-2023-31147 `_.
+ - `CVE-2023-31124 `_.
+ - `CVE-2023-32067 `_.
+- area: tcp_proxy
+ change: |
+ Fixed assert crash when multiple ``readDisable`` are called for TCP tunneling
+ scenarios, by allowing multiple calls. This will also cause stats that indicate
+ disable or enable of downstream read to be flushed only once per actual disabling
+ or enabling.
+- area: redis_proxy
+ change: |
+ Fixes a bug where route properties such as ``key_formatter``,
+ ``prefix`` and ``remove_prefix`` do not take effect when configured for :ref:`catch_all_route
+ `.
+- area: upstream
+ change: |
+ Fixes a bug where the ``healthStatus()`` method of host return incorrect health status
+ when the host status is updated by the EDS.
+- area: upstream
+ change: |
+ Fixes a bug where the ``healthStatus()`` method of host return unmatched health status
+ with the ``coarseHealth()`` method.
+- area: original_dst
+ change: |
+ Fixes an issue with the ``ORIGINAL_DST`` cluster cleanup timer lifetime, which
+ can occur if the cluster is removed while the timer is armed.
+- area: maglev loadbalancer
+ change: |
+ Fixes maglev stability problem. Previously, maglev returns slightly different backend assignment from the same backends and keys.
+- area: redis
+ change: |
+ Fixes a bug where redis transactions do not work properly when redis traffic is mirrored.
+- area: http2
+ change: |
+ Fix memory leak in nghttp2 when scheduled requests are cancelled due to the ``GOAWAY`` frame being received from the
+ upstream service.
+- area: cors
+ change: |
+ Fix a use-after-free bug that occurs in the CORS filter if the ``origin`` header is removed between
+ request header decoding and response header encoding.
+
+ Fix `CVE-2023-35943 `_.
+
+- area: oauth2
+ change: |
+ Fixed a cookie validator bug that meant the HMAC calculation could be the same for different payloads.
+
+ This prevents malicious clients from constructing credentials with permanent validity in some specific scenarios.
+- area: postgres
+ change: |
+ Enable parsing when using upstream SSL.
+
+removed_config_or_runtime:
+- area: http
+ change: |
+ Removed runtime key ``envoy.reloadable_features.closer_shadow_behavior`` and legacy code paths.
+- area: http
+ change: |
+ Removed runtime key ``envoy.reloadable_features.allow_upstream_filters`` and legacy code paths.
+- area: quic
+ change: |
+ Removed runtime key ``envoy.reloadable_features.quic_defer_send_in_response_to_packet`` and legacy code paths.
+- area: upstream
+ change: |
+ Removed runtime key ``envoy.reloadable_features.fix_hash_key`` and legacy code paths.
+- area: logging
+ change: |
+ Removed runtime key ``envoy.reloadable_features.correct_remote_address`` and legacy code paths.
+- area: http
+ change: |
+ Removed runtime key ``envoy.reloadable_features.http_response_half_close`` and legacy code paths.
+- area: udp
+ change: |
+ Removed runtime key ``envoy.reloadable_features.udp_proxy_connect`` and legacy code paths.
+- area: header_formatters
+ change: |
+ Removed runtime key ``envoy.reloadable_features.unified_header_formatter`` and legacy code paths.
+- area: tls
+ change: |
+ Remove runtime key ``envoy.reloadable_features.tls_async_cert_validation`` and legacy code paths.
+- area: config
+ change: |
+ Removed runtime key ``envoy.reloadable_features.delta_xds_subscription_state_tracking_fix`` and legacy code paths.
+- area: http
+ change: |
+ Removed runtime key ``envoy.reloadable_features.http_strip_fragment_from_path_unsafe_if_disabled`` and legacy code paths.
+- area: grpc_stats
+ change: |
+ Removed runtime key ``envoy.deprecated_features.grpc_stats_filter_enable_stats_for_all_methods_by_default`` and legacy code paths.
+
+new_features:
+- area: golang
+ change: |
+ Added new :ref:`l4 golang network filter `.
+- area: access_log
+ change: |
+ Added ``%ACCESS_LOG_TYPE%`` substitution string, to help distinguishing between access log records and when they are being
+ recorded. Please refer to the access log configuration documentation for more information.
+- area: access_log
+ change: |
+ Added :ref:`CEL ` access log formatter to print CEL expression.
+- area: access_log
+ change: |
+ (QUIC only) Added support for %BYTES_RETRANSMITTED% and %PACKETS_RETRANSMITTED%.
+- area: access_log
+ change: |
+ Added :ref:`DisableBuiltinLables
+ `
+ to disable envoy builtin resource labels.
+- area: dynamic_forward_proxy
+ change: |
+ Added :ref:`sub_clusters_config
+ ` to enable
+ independent sub cluster for each host:port, with STRICT_DNS cluster type.
+- area: http
+ change: |
+ Added runtime feature ``envoy.reloadable_features.max_request_headers_size_kb`` to override the default value of
+ :ref:`max request headers size
+ `.
+- area: http
+ change: |
+ Added support for CONNECT-UDP (RFC 9298). Can be disabled by setting runtime feature
+ ``envoy.reloadable_features.enable_connect_udp_support`` to false.
+- area: listeners
+ change: |
+ Added :ref:`max_connections_to_accept_per_socket_event
+ `
+ that sets the maximum number of new connections to be accepted per socket
+ event on a listener. If there are more connections to be accepted beyond
+ the maximum, the remaining connections would be processed in later
+ dispatcher loop iterations. Added listener histogram
+ ``connections_accepted_per_socket_event`` to allow users to empirically
+ determine an appropriate configuration for their deployment.
+- area: load shed point
+ change: |
+ Added load shed point ``envoy.load_shed_points.http_connection_manager_decode_headers`` that rejects new http streams
+ by sending a local reply.
+- area: load shed point
+ change: |
+ Added load shed point ``envoy.load_shed_points.http1_server_abort_dispatch`` that rejects HTTP1 server processing of requests.
+- area: load shed point
+ change: |
+ Added load shed point ``envoy.load_shed_points.http2_server_go_away_on_dispatch`` that sends
+ ``GOAWAY`` for HTTP2 server processing of requests. When a ``GOAWAY`` frame is submitted by
+ this the counter ``http2.goaway_sent`` will be incremented.
+- area: matchers
+ change: |
+ Added :ref:`RuntimeFraction ` input
+ matcher. It allows matching hash of the input on a runtime key.
+- area: stat_sinks
+ change: |
+ Added ``envoy.stat_sinks.open_telemetry`` stats_sink, that supports flushing metrics by the OTLP protocol,
+ for supported Open Telemetry collectors.
+- area: redis_proxy
+ change: |
+ Added new configuration field :ref:`key_formatter
+ ` to format redis key.
+ The field supports using %KEY% as a formatter command for substituting the redis key as part of the substitution formatter expression.
+- area: stats
+ change: |
+ Added config :ref:`enable_deferred_creation_stats
+ `.
+ When set to ``true``, enables deferred instantiation on supported stats structures.
+- area: ratelimit
+ change: |
+ Added new configuration field :ref:`domain
+ ` to allow for setting rate limit domains on a
+ per-route basis.
+- area: tls_inspector
+ change: |
+ Added histogram ``bytes_processed`` which records the number of bytes of
+ the tls_inspector processed while analyzing for tls usage. In cases where
+ the connection uses tls this records the tls client hello size. In cases
+ where the connection doesn't use tls this records the amount of bytes the
+ tls_inspector processed until it realized the connection was not using tls.
+- area: tls_inspector
+ change: |
+ Added new configuration field :ref:`initial_read_buffer_size
+ `
+ to allow users to tune the buffer size requested by the filter. If
+ configured, and the filter needs additional bytes, the filter will double
+ the number of bytes requested up to the default 64KiB maximum.
+- area: access_log
+ change: |
+ Added access log filter :ref:`log_type_filter `
+ to filter access log records based on the type of the record.
+- area: ext_proc
+ change: |
+ Added new configuration field
+ :ref:`disable_clear_route_cache `
+ to force the ext_proc filter from clearing the route cache. Failures to clear from setting this field will be counted under the
+ ``clear_route_cache_disabled`` stat.
+- area: ext_proc
+ change: |
+ Added new configuration field
+ :ref:`allow_mode_override `
+ If set to true, the filter config
+ :ref:`processing_mode `
+ can be overridden by the
+ :ref:`mode_override `
+ in the response message from the external processing server.
+ If not set, the ``mode_override`` API in the response message will be ignored.
+- area: ext_proc
+ change: |
+ :ref:`forward_rules `
+ to only allow headers matching the rules to be forwarded to the external processing server.
+- area: redis_proxy
+ change: |
+ Added new field :ref:`connection_rate_limit
+ `
+ to limit reconnection rate to redis server to avoid reconnection storm.
+- area: match_delegate
+ change: |
+ Added :ref:`per route configuration
+ ` to the
+ :ref:`ExtensionWithMatcher
+ ` filter.
+ Which allows the associated matcher to be defined on a per route basis.
+- area: match_delegate
+ change: |
+ If no matcher is set the :ref:`ExtensionWithMatcher
+ ` filter is now set to skip rather than erroring out.
+- area: access_log
+ change: |
+ Added additional HCM access log option :ref:`flush_log_on_tunnel_successfully_established
+ `.
+ Enabling this option will write a log to all access loggers when HTTP tunnels (e.g. Websocket and ``CONNECT``)
+ are successfully established.
+- area: admin
+ change: |
+ Adds a new admin stats html bucket-mode ``detailed`` to generate all recorded buckets and summary percentiles.
+- area: http
+ change: |
+ Add support to the route/virtual host level
+ :ref:`is_optional ` field.
+ A route/virtual host level per filter config can be marked as optional, which means that if
+ the filter fails to load, the configuration will no be rejected.
+- area: upstream
+ change: |
+ Added :ref:`cluster provided extension
+ `
+ to suppport the :ref:`load balancer policy `.
+- area: fault
+ change: |
+ Added new field ``envoy.extensions.filters.http.fault.v3.HTTPFault.filter_metadata`` to aid in logging.
+ Metadata will be stored in StreamInfo dynamic metadata under a namespace corresponding to the name of the fault filter.
+- area: load_balancing
+ change: |
+ Added new option
+ :ref:`weighted_priority_health `
+ to compute the health of a :ref:`priority level ` by using
+ :ref:`load balancing weight `
+ instead of the count of healthy hosts.
+- area: application_logs
+ change: |
+ Added bootstrap option
+ :ref:`application_log_format `
+ to enable setting application log format as JSON structure.
+- area: application_logs
+ change: |
+ Added bootstrap option
+ :ref:`application_log_format `
+ to enable setting application log text format from config.
+- area: ext_proc
+ change: |
+ Added new field ``filter_metadata `
+ and :ref:`CEL input matcher `.
+- area: tls
+ change: |
+ Added support for hot-reloading CRL file when the file changes on disk.
+ This works with dynamic secrets when
+ :ref:`CertificateValidationContext `
+ is delivered via SDS.
+- area: http
+ change: |
+ Added support for configuring additional :ref:`cookie attributes `.
+- area: http
+ change: |
+ Added support for the route/virtual host level
+ :ref:`disabled ` field.
+ A route/virtual host level per filter config can be marked as disabled, which means that
+ the filter will be disabled in a specific route/virtual host.
+- area: health_check
+ change: |
+ Added host related information :ref:`metadata ` and
+ :ref:`locality ` to
+ the :ref:`health check event ` definition.
+- area: zookeeper
+ change: |
+ Added the ``addWatch`` opcode support to the ZooKeeper proxy filter.
+- area: config
+ change: |
+ added a statistic :ref:`warming_state ` to indicate the current warming state of a cluster.
+- area: access_log
+ change: |
+ Added bytes snapshotting for upstream and downstream logging that will be reset after every periodic log. Downstream
+ periodic loggers should read ``BytesMeter::bytesAtLastDownstreamPeriodicLog()``, and upstream periodic loggers should read
+ ``BytesMeter::bytesAtLastUpstreamPeriodicLog()``.
+- area: lds
+ change: |
+ Pause SRDS when LDS is updated.
+- area: http
+ change: |
+ Added :ref:`outbound_control_frames_active ` and :ref:`outbound_frames_active `
+ statistic.
+- area: original_dst
+ change: |
+ Filter state is pulled from request context first (if available), then falls back to connection context. Added ability to pick host
+ from dynamic metadata using :ref:`metadata_key `.
+ Same behavior - looks in request context first (if available), falls back to connection context.
+- area: tls
+ change: |
+ Added support to configure the new config option
+ :ref:`enforce_rsa_key_usage `.
+ This can be used to override its configuration in BoringSSL. It is currently default to false but expected to be changed
+ to true by default in a future release. ``ssl.was_key_usage_invalid`` is added to :ref:`listener metrics `
+ and will be incremented for certificate configurations that would fail if this option were set to true.
+- area: http
+ change: |
+ Added ``OVERWRITE_IF_EXISTS`` header manipulation keyword to overwrite a header only when it exists before manipulation.
+- area: tls
+ change: |
+ Added FIPS compliant build for arm64.
+
+deprecated:
+- area: access_log
+ change: |
+ Deprecated (1.25.0) :ref:`intermediate_log_entry `
+ in favour of :ref:`access_log_type `.
+- area: health_check
+ change: |
+ deprecated the :ref:`HealthCheck event_log_path ` in favor of
+ :ref:`HealthCheck event_logger extension `.
+- area: stats
+ change: |
+ Added :ref:`enable_deferred_creation_stats
+ `.
+ support for ``ClusterTrafficStats``.
+- area: access_log
+ change: |
+ Added ``%DOWNSTREAM_LOCAL_DNS_SAN%``, ``%DOWNSTREAM_PEER_DNS_SAN%``, ``%DOWNSTREAM_LOCAL_IP_SAN%``
+ and ``%DOWNSTREAM_PEER_IP_SAN%`` substitution formatters.
diff --git a/changelogs/1.27.1.yaml b/changelogs/1.27.1.yaml
new file mode 100644
index 0000000000000..a6ce592912136
--- /dev/null
+++ b/changelogs/1.27.1.yaml
@@ -0,0 +1,30 @@
+date: October 11, 2023
+
+behavior_changes:
+- area: http
+ change: |
+ Close HTTP/2 and HTTP/3 connections that prematurely reset streams. The runtime key
+ ``overload.premature_reset_min_stream_lifetime_seconds`` determines the interval where received stream
+ reset is considered premature (with 1 second default). The runtime key ``overload.premature_reset_total_stream_count``,
+ with the default value of 500, determines the number of requests received from a connection before the check for premature
+ resets is applied. The connection is disconnected if more than 50% of resets are premature.
+ Setting the runtime key ``envoy.restart_features.send_goaway_for_premature_rst_streams`` to ``false`` completely disables
+ this check.
+- area: http
+ change: |
+ Add runtime flag ``http.max_requests_per_io_cycle`` for setting the limit on the number of HTTP requests processed
+ from a single connection in a single I/O cycle. Requests over this limit are processed in subsequent I/O cycles. This
+ mitigates CPU starvation by connections that simultaneously send high number of requests by allowing requests from other
+ connections to make progress. This runtime value can be set to 1 in the presence of abusive HTTP/2 or HTTP/3 connections.
+ By default this limit is disabled.
+
+bug_fixes:
+- area: connection limit
+ change: |
+ fixed a use-after-free bug in the connection limit filter.
+- area: tls
+ change: |
+ fixed a bug where handshake may fail when both private key provider and cert validation are set.
+- area: docker/publishing
+ change: |
+ Update base images to resolve various glibc vulnerabilities.
diff --git a/changelogs/1.27.2.yaml b/changelogs/1.27.2.yaml
new file mode 100644
index 0000000000000..91d3633c01549
--- /dev/null
+++ b/changelogs/1.27.2.yaml
@@ -0,0 +1,10 @@
+date: October 16, 2023
+
+bug_fixes:
+- area: tracing
+ change: |
+ Fixed a bug in the Datadog tracer where Datadog's "operation name" field would contain what should be in the "resource name" field.
+- area: http
+ change: |
+ Fixed a bug where processing of deferred streams with the value of ``http.max_requests_per_io_cycle`` more than 1,
+ can cause a crash.
diff --git a/changelogs/1.27.3.yaml b/changelogs/1.27.3.yaml
new file mode 100644
index 0000000000000..a67d0b6cabb28
--- /dev/null
+++ b/changelogs/1.27.3.yaml
@@ -0,0 +1,52 @@
+date: February 9, 2024
+
+minor_behavior_changes:
+- area: access_log
+ change: |
+ When emitting grpc logs, only downstream filter state was used. Now, both downstream and upstream filter states will be tried
+ to find the keys configured in filter_state_objects_to_log.
+
+bug_fixes:
+- area: buffer
+ change: |
+ Fixed a bug (https://github.com/envoyproxy/envoy/issues/28760) that the internal listener causes an undefined
+ behavior due to the unintended release of the buffer memory.
+- area: http
+ change: |
+ Fixed recursion when HTTP connection is disconnected due to a high number of premature resets.
+- area: grpc
+ change: |
+ Fixed a bug in gRPC async client cache which intermittently causes CPU spikes due to busy loop in timer expiration.
+- area: tracing
+ change: |
+ Fixed a bug where Datadog spans tagged as errors would not have the appropriate error property set.
+- area: tracing
+ change: |
+ Fixed a bug where child spans produced by the Datadog tracer would have an incorrect operation name.
+- area: tracing
+ change: |
+ Fixed a bug that caused the Datadog tracing extension to drop traces that
+ should be kept on account of an extracted sampling decision.
+- area: proxy protocol
+ change: |
+ fixed a crash when Envoy is configured for PROXY protocol on both a listener and cluster, and the listener receives
+ a PROXY protocol header with address type LOCAL (typically used for health checks).
+- area: proxy_protocol
+ change: |
+ Fix crash due to uncaught exception when the operating system does not support an address type (such as IPv6) that is
+ received in a proxy protocol header. Connections will instead be dropped/reset.
+- area: proxy_protocol
+ change: |
+ Fixed a bug where TLVs with non utf8 characters were inserted as protobuf values into filter metadata circumventing
+ ext_authz checks when ``failure_mode_allow`` is set to ``true``.
+- area: tls
+ change: |
+ Fix crash due to uncaught exception when the operating system does not support an address type (such as IPv6) that is
+ received in an mTLS client cert IP SAN. These SANs will be ignored. This applies only when using formatter
+ ``%DOWNSTREAM_PEER_IP_SAN%``.
+- area: http
+ change: |
+ Fixed crash when HTTP request idle and per try timeouts occurs within backoff interval.
+- area: url matching
+ change: |
+ Fixed excessive CPU utilization when using regex URL template matcher.
diff --git a/changelogs/1.27.4.yaml b/changelogs/1.27.4.yaml
new file mode 100644
index 0000000000000..73d73f7b7a331
--- /dev/null
+++ b/changelogs/1.27.4.yaml
@@ -0,0 +1,20 @@
+date: April 4, 2024
+
+behavior_changes:
+- area: http2
+ change: |
+ Discard the ``Host`` header if the ``:authority`` header was received to bring Envoy into compliance with
+ https://www.rfc-editor.org/rfc/rfc9113#section-8.3.1 This behavioral change can be reverted by setting runtime flag
+ ``envoy.reloadable_features.http2_discard_host_header`` to false.
+
+bug_fixes:
+- area: http2
+ change: |
+ Update nghttp2 to resolve CVE-2024-30255 (https://github.com/envoyproxy/envoy/security/advisories/GHSA-j654-3ccm-vfmm).
+
+new_features:
+- area: google_grpc
+ change: |
+ Added an off-by-default runtime flag
+ ``envoy.reloadable_features.google_grpc_disable_tls_13`` to disable TLSv1.3
+ usage by gRPC SDK for ``google_grpc`` services.
diff --git a/changelogs/1.27.5.yaml b/changelogs/1.27.5.yaml
new file mode 100644
index 0000000000000..ec9a51b0eb0a8
--- /dev/null
+++ b/changelogs/1.27.5.yaml
@@ -0,0 +1,7 @@
+date: April 18, 2024
+
+bug_fixes:
+- area: tls
+ change: |
+ Fix a RELEASE_ASSERT when using :ref:`auto_sni `
+ if the downstream request ``:authority`` was longer than 255 characters.
diff --git a/changelogs/1.27.6.yaml b/changelogs/1.27.6.yaml
new file mode 100644
index 0000000000000..cc73ba5da9a92
--- /dev/null
+++ b/changelogs/1.27.6.yaml
@@ -0,0 +1,33 @@
+date: June 4, 2024
+
+bug_fixes:
+- area: router
+ change: |
+ Fix a timing issue when upstream requests are empty when decoding data and send local reply when that happends. This is
+ controlled by ``envoy_reloadable_features_send_local_reply_when_no_buffer_and_upstream_request``.
+- area: quic
+ change: |
+ Applied 2 QUICHE patches for crash bugs in ``QuicSpdyStream`` ``OnDataAvailable()`` and ``OnInitialHeaderComplete()``.
+- area: quic
+ change: |
+ Fixed crash bug when QUIC downstream stream was read closed and then timed out.
+- area: decompression
+ change: |
+ Fixed a bug where Envoy will go into an endless loop when using the brotli decompressor. If the input stream has
+ redundant data, the decompressor will loop forever.
+- area: websocket
+ change: |
+ Only 101 is considered a successful response for websocket handshake for HTTP/1.1, and Envoy as a proxy will proxy the response
+ header from upstream to downstream and then close the request if other status is received. This behavior can be
+ reverted by ``envoy_reloadable_features_check_switch_protocol_websocket_handshake``.
+- area: async http client
+ change: |
+ Added one option to disable the response body buffering for mirror request. Also introduced a 32MB cap for the response
+ buffer, which can be changed by the runtime flag ``http.async_response_buffer_limit`` based on the product needs.
+
+removed_config_or_runtime:
+# *Normally occurs at the end of the* :ref:`deprecation period `
+
+new_features:
+
+deprecated:
diff --git a/changelogs/current.yaml b/changelogs/current.yaml
index b6e64f270e821..26d799cbd618f 100644
--- a/changelogs/current.yaml
+++ b/changelogs/current.yaml
@@ -1,575 +1,32 @@
-date: July 26, 2023
-
-behavior_changes:
-- area: build
- change: |
- Moved the subset, ring_hash, and maglev LB code into extensions. If you use these load balancers and override
- :repo:`bazel/extensions_build_config.bzl` you will need to include them explicitly.
-- area: build
- change: |
- Moved xDS code extensions. If you use the xDS and override :repo:`bazel/extensions_build_config.bzl` you will
- need to include the new config_subscriptions explicitly.
-- area: http
- change: |
- When ``append_x_forwarded_host`` is enabled for a given route action it is now only appended iff it is different from the last
- value in the list. This resolves issues where a retry caused the same value to be appended multiple times. This
- behavioral change can be temporarily reverted by setting runtime guard ``envoy_reloadable_features_append_xfh_idempotent`` to ``false``.
-- area: ext_proc
- change: |
- The proto field :ref:`value ` type is string.
- This make it unable to support enconding non-utf8 characters in the ``HeaderValue`` message.
- To support sending header value with non-utf8 characters, a new proto field is added in the HeaderValue message:
- :ref:`raw_value `.
- The header values are now encoded in this ``raw_value`` field when Envoy ext_proc filter sending
- and receiving messages from the ext_proc server. This behavioral change can be temporarily
- reverted by setting the runtime guard ``envoy_reloadable_features_send_header_raw_value`` to ``false``.
-- area: ext_proc
- change: |
- Apply header mutation rules from the ext_proc config to the ``ImmediateResponse``. This behavior change can be temporarily
- reverted by setting the runtime guard ``envoy_reloadable_features_immediate_response_use_filter_mutation_rule`` to false.
-- area: active health check
- change: |
- Preserve the active-health check status of a host after a cluster/assignment update. This is now preserved in cases
- where the assignment updates a host's locality. This behavioral change can be temporarily reverted by setting the
- runtime flag ``envoy.reloadable_features.keep_endpoint_active_hc_status_on_locality_update`` to ``false``.
-- area: quic
- change: |
- Add a default false runtime flag ``envoy.reloadable_features.quic_reject_all`` to disable QUIC listener if needed.
-- area: stats tls
- change: |
- Fixed metric tag extraction so that TLS parameters are properly extracted from the stats, both for listeners and clusters.
- This changes the Prometheus names from
- ``envoy_listener_ssl_ciphers_ECDHE_RSA_AES128_GCM_SHA256{envoy_listener_address="0.0.0.0_10000"}`` to
- ``envoy_listener_ssl_ciphers{envoy_listener_address="0.0.0.0_10000", envoy_ssl_cipher="ECDHE_RSA_AES128_GCM_SHA256"}``, and
- similar for ``envoy_listener_ssl_versions_TLSv1_2``, ``envoy_cluster_ssl_versions_TLSv1_2``, ``envoy_listener_ssl_curves_P_256``,
- ``envoy_cluster_ssl_curves_P_256``, ``envoy_listener_ssl_sigalgs_rsa_pss_rsae_sha256``.
-
-minor_behavior_changes:
-- area: connection pool
- change: |
- Increase granularity mapping connection pool failures to specific stream failure reasons to make it more transparent why
- the stream is reset when a connection pool's connection fails.
-- area: custom response
- change: |
- The filter now traverses matchers from most specific to least specific per filter config till a match is found for the response.
-- area: http1
- change: |
- Allowing mixed case schemes in absolute urls (e.g. HtTp://www.google.com). Mixed case schemes will be normalized to
- the lower cased equivalents before being forwarded upstream. This behavior can be reverted by setting runtime flag
- ``envoy.reloadable_features.allow_absolute_url_with_mixed_scheme`` to false.
-- area: http1
- change: |
- The HTTP1 server-side codec no longer considers encoding 1xx headers as
- starting the response. This allows the codec to raise protocol errors,
- sending detailed local replies instead of just closing the connection. This
- behavior can be reverted by setting runtime flag
- ``envoy.reloadable_features.http1_allow_codec_error_response_after_1xx_headers``
- to ``false``.
-- area: dns
- change: |
- Changing the DNS cache to use ``host:port`` as the cache key rather than ``host``. This allows a
- downstream DFP filter to serve both secure and insecure clusters. This behavioral change
- can be reverted by setting runtime flag ``envoy.reloadable_features.dfp_mixed_scheme`` to ``false``.
-- area: uhv
- change: |
- Preserve case of %-encoded triplets in the default header validator. This behavior can be reverted by setting runtime flag
- ``envoy.reloadable_features.uhv_preserve_url_encoded_case`` to ``false``, in which case %-encoded triplets are normalized
- to uppercase characters. This setting is only applicable when the Unversal Header Validator is enabled and has no effect otherwise.
-- area: uhv
- change: |
- Allow malformed URL encoded triplets in the default header validator. This behavior can be reverted by setting runtime flag
- ``envoy.reloadable_features.uhv_allow_malformed_url_encoding`` to ``false``, in which case requests with malformed URL encoded triplets
- in path are rejected. This setting is only applicable when the Unversal Header Validator is enabled and has no effect otherwise.
-- area: ext_proc
- change: |
- When :ref:`clear_route_cache ` is set, ext_proc will check
- for header mutations beforce clearing the route cache. Failures due to this check will be counted under the
- ``clear_route_cache_ignored`` stat.
-- area: aws
- change: |
- Added support for fetching credentials from the AWS credentials file, which only happens if credentials cannot be fetched
- from environment variables. This behavioral change can be reverted by setting runtime guard
- ``envoy.reloadable_features.enable_aws_credentials_file`` to ``false``.
-- area: http cookies
- change: |
- Changed internal format of http cookie to protobuf and added expiry timestamp. Processing expired cookie
- results in selection of a new upstream host and sending a new cookie to the client. Previous format of
- the cookie is still accepted, but is planned to be obsoleted in the future.
- This behavior change can be reverted by setting
- ``envoy.reloadable_features.stateful_session_encode_ttl_in_cookie`` to ``false``.
-- area: overload manager
- change: |
- Changed behavior of the overload manager to error on unknown overload
- manager actions. Prior it would silently fail. This change can be reverted
- temporarily by setting the runtime guard
- ``envoy.reloadable_features.overload_manager_error_unknown_action`` to
- false.
-- area: router
- change: |
- Added check for existing metadata before setting metadata due to ``auto_sni``, ``auto_san_validation``, or
- ``override_auto_sni_header`` to prevent triggering ``ENVOY_BUG`` when an earlier filter has set the metadata.
-- area: resource_monitors
- change: |
- Changed behavior of the fixed heap monitor to count unused mapped pages as
- free memory. This change can be reverted temporarily by setting the runtime guard
- ``envoy.reloadable_features.count_unused_mapped_pages_as_free`` to ``false``.
-- area: ext_proc
- change: |
- Filter metadata containing ext proc stats has been moved from ``ext-proc-logging-info`` to a namespace corresponding
- to the name of the ext_proc filter.
-- area: stats
- change: |
- Added new type of gauge with type hidden. These stats are hidden from admin/stats-sinks but can shown with a
- query-parameter of ``/stats?hidden=include`` or ``/stats?hidden=showonly``.
-- area: ext_authz
- change: |
- Forward :ref:`typed_filter_metadata ` selected by
- ``typed_metadata_context_namespaces`` and :ref:`filter_metadata `
- selected by
- :ref:`metadata_context_namespaces `
- from connection metadata to external auth service. This is addition to the current behavior of forwarding request metadata.
- In the event of both connection and request metadata containing the requested metadata the request value will be provided.
-- area: eds
- change: |
- Added the ability to specify mulitple addresses for a host in an EDS cluster. Connections to the host with more than one
- address will be established using the Happy Eyeballs algorithm.
-- area: upstream
- change: |
- Changed behavior of the unpausing connect with 2xx status codes. This change can be reverted temporarily by
- setting the runtime guard ``envoy.reloadable_features.upstream_allow_connect_with_2xx`` to ``false``.
-- area: http
- change: |
- Round trip time will not be refreshed for every request by default. And if this is necessary, it can be
- enabled by setting runtime guard ``envoy.reloadable_features.refresh_rtt_after_request`` to ``true``.
-- area: http
- change: |
- Envoy will now lower case scheme values by default. This behaviorial change can be temporarily reverted
- by setting runtime guard ``envoy.reloadable_features.lowercase_scheme`` to ``false``.
+date: June 28, 2024
bug_fixes:
-- area: oauth2
- change: |
- The Max-Age attribute of Set-Cookie HTTP response header was being assigned a value representing Seconds Since
- the Epoch, causing cookies to expire in ~53 years. This was fixed an now it is being assigned a value representing
- the number of seconds until the cookie expires.
- This behavioral change can be temporarily reverted by setting runtime guard
- ``envoy.reloadable_features.oauth_use_standard_max_age_value`` to ``false``.
-- area: tls
- change: |
- Fix build FIPS compliance when using both FIPS mode and Wasm extensions (``--define boringssl=fips`` and ``--define wasm=v8``).
- area: http
change: |
- Switched Envoy internal scheme checks from case sensitive to case insensitive. This behaviorial change can be temporarily
- reverted by setting runtime guard ``envoy.reloadable_features.handle_uppercase_scheme`` to ``false``.
-
- Fix `CVE-2023-35944 `_.
-
-- area: ext_authz
- change: |
- Fix a bug where the ext_authz filter will ignore the request body when the
- :ref:`pack_as_bytes ` is set to ``true`` and
- HTTP authorization service is configured.
-- area: ext_authz
- change: |
- Fix a bug where the ext_authz filter will remove non UTF-8 characters from the body of a request when configured
- to use :ref:`http_service `, if configured
- to send the body.
-- area: router
- change: |
- Fixed the bug that updating :ref:`scope_key_builder
- `
- of SRDS config doesn't work and multiple HCM share the same ``scope_key_builder``.
-- area: http
- change: |
- The :ref:`is_optional
- `
- field of HTTP filter can only be used for configuration loading of
- :ref:`HTTP filter `
- and will be ignored for loading of route or virtual host level filter config. This behavioral change
- can be temporarily reverted by setting runtime guard
- ``envoy.reloadable_features.ignore_optional_option_from_hcm_for_route_config`` to ``false``.
- You can also use
- :ref:`route/virtual host optional flag `
- as a replacement of the feature.
-- area: logging
- change: |
- Do not display GRPC_STATUS_NUMBER for non gRPC requests.
- This behavioral change can be temporarily reverted by setting runtime guard
- ``envoy.reloadable_features.validate_grpc_header_before_log_grpc_status`` to ``false``.
-- area: boringssl
- change: |
- Fixed the crash that occurs when contrib is compiled with ``boringssl=fips`` defined.
-- area: oauth2
- change: |
- The ``httpOnly`` attribute for ``Set-Cookie`` for tokens in HTTP response header was missing,
- causing tokens to be accessible from the JavaScript making the apps vulnerable.
- This was fixed now by marking the cookie as ``httpOnly``.
- This behavioral change can be temporarily reverted by setting runtime guard
- ``envoy.reloadable_features.oauth_make_token_cookie_httponly`` to ``false``.
-
- Fix `CVE-2023-35941 `_.
-
-- area: opentelemetry/grpc/access log
- change: |
- Fixed a bug in the open telemetry access logger. This logger now uses the
- server scope for stats instead of the listener's global scope. This fixes a
- use-after-free that can occur if the listener is drained but the cached
- gRPC access logger uses the listener's global scope for stats.
-
- Fix `CVE-2023-35942 `_.
-
-- area: dependency
- change: |
- Update Wasmtime and related deps -> 9.0.3 to resolve
- `CVE-2023-30624 `_.
-- area: dependency
- change: |
- Update C-ares -> 1.91.1 to resolve:
-
- - `CVE-2023-31130 `_.
- - `CVE-2023-31147 `_.
- - `CVE-2023-31124 `_.
- - `CVE-2023-32067 `_.
-- area: tcp_proxy
- change: |
- Fixed assert crash when multiple ``readDisable`` are called for TCP tunneling
- scenarios, by allowing multiple calls. This will also cause stats that indicate
- disable or enable of downstream read to be flushed only once per actual disabling
- or enabling.
-- area: redis_proxy
- change: |
- Fixes a bug where route properties such as ``key_formatter``,
- ``prefix`` and ``remove_prefix`` do not take effect when configured for :ref:`catch_all_route
- `.
-- area: upstream
- change: |
- Fixes a bug where the ``healthStatus()`` method of host return incorrect health status
- when the host status is updated by the EDS.
-- area: upstream
- change: |
- Fixes a bug where the ``healthStatus()`` method of host return unmatched health status
- with the ``coarseHealth()`` method.
-- area: original_dst
- change: |
- Fixes an issue with the ``ORIGINAL_DST`` cluster cleanup timer lifetime, which
- can occur if the cluster is removed while the timer is armed.
-- area: maglev loadbalancer
- change: |
- Fixes maglev stability problem. Previously, maglev returns slightly different backend assignment from the same backends and keys.
-- area: redis
- change: |
- Fixes a bug where redis transactions do not work properly when redis traffic is mirrored.
-- area: http2
- change: |
- Fix memory leak in nghttp2 when scheduled requests are cancelled due to the ``GOAWAY`` frame being received from the
- upstream service.
-- area: cors
- change: |
- Fix a use-after-free bug that occurs in the CORS filter if the ``origin`` header is removed between
- request header decoding and response header encoding.
-
- Fix `CVE-2023-35943 `_.
-
-- area: oauth2
- change: |
- Fixed a cookie validator bug that meant the HMAC calculation could be the same for different payloads.
-
- This prevents malicious clients from constructing credentials with permanent validity in some specific scenarios.
-- area: postgres
- change: |
- Enable parsing when using upstream SSL.
-
+ Fixed a bug where additional :ref:`cookie attributes `
+ are not sent properly to clients.
+# *Changes expected to improve the state of the world and are unlikely to have negative effects*
removed_config_or_runtime:
-- area: http
- change: |
- Removed runtime key ``envoy.reloadable_features.closer_shadow_behavior`` and legacy code paths.
-- area: http
- change: |
- Removed runtime key ``envoy.reloadable_features.allow_upstream_filters`` and legacy code paths.
-- area: quic
- change: |
- Removed runtime key ``envoy.reloadable_features.quic_defer_send_in_response_to_packet`` and legacy code paths.
-- area: upstream
- change: |
- Removed runtime key ``envoy.reloadable_features.fix_hash_key`` and legacy code paths.
-- area: logging
- change: |
- Removed runtime key ``envoy.reloadable_features.correct_remote_address`` and legacy code paths.
-- area: http
- change: |
- Removed runtime key ``envoy.reloadable_features.http_response_half_close`` and legacy code paths.
-- area: udp
- change: |
- Removed runtime key ``envoy.reloadable_features.udp_proxy_connect`` and legacy code paths.
-- area: header_formatters
- change: |
- Removed runtime key ``envoy.reloadable_features.unified_header_formatter`` and legacy code paths.
-- area: tls
- change: |
- Remove runtime key ``envoy.reloadable_features.tls_async_cert_validation`` and legacy code paths.
-- area: config
- change: |
- Removed runtime key ``envoy.reloadable_features.delta_xds_subscription_state_tracking_fix`` and legacy code paths.
-- area: http
- change: |
- Removed runtime key ``envoy.reloadable_features.http_strip_fragment_from_path_unsafe_if_disabled`` and legacy code paths.
-- area: grpc_stats
- change: |
- Removed runtime key ``envoy.deprecated_features.grpc_stats_filter_enable_stats_for_all_methods_by_default`` and legacy code paths.
+# *Normally occurs at the end of the* :ref:`deprecation period `
new_features:
-- area: golang
- change: |
- Added new :ref:`l4 golang network filter `.
-- area: access_log
- change: |
- Added ``%ACCESS_LOG_TYPE%`` substitution string, to help distinguishing between access log records and when they are being
- recorded. Please refer to the access log configuration documentation for more information.
-- area: access_log
- change: |
- Added :ref:`CEL ` access log formatter to print CEL expression.
-- area: access_log
- change: |
- (QUIC only) Added support for %BYTES_RETRANSMITTED% and %PACKETS_RETRANSMITTED%.
-- area: access_log
- change: |
- Added :ref:`DisableBuiltinLables
- `
- to disable envoy builtin resource labels.
-- area: dynamic_forward_proxy
- change: |
- Added :ref:`sub_clusters_config
- ` to enable
- independent sub cluster for each host:port, with STRICT_DNS cluster type.
-- area: http
- change: |
- Added runtime feature ``envoy.reloadable_features.max_request_headers_size_kb`` to override the default value of
- :ref:`max request headers size
- `.
-- area: http
- change: |
- Added support for CONNECT-UDP (RFC 9298). Can be disabled by setting runtime feature
- ``envoy.reloadable_features.enable_connect_udp_support`` to false.
-- area: listeners
- change: |
- Added :ref:`max_connections_to_accept_per_socket_event
- `
- that sets the maximum number of new connections to be accepted per socket
- event on a listener. If there are more connections to be accepted beyond
- the maximum, the remaining connections would be processed in later
- dispatcher loop iterations. Added listener histogram
- ``connections_accepted_per_socket_event`` to allow users to empirically
- determine an appropriate configuration for their deployment.
-- area: load shed point
- change: |
- Added load shed point ``envoy.load_shed_points.http_connection_manager_decode_headers`` that rejects new http streams
- by sending a local reply.
-- area: load shed point
- change: |
- Added load shed point ``envoy.load_shed_points.http1_server_abort_dispatch`` that rejects HTTP1 server processing of requests.
-- area: load shed point
- change: |
- Added load shed point ``envoy.load_shed_points.http2_server_go_away_on_dispatch`` that sends
- ``GOAWAY`` for HTTP2 server processing of requests. When a ``GOAWAY`` frame is submitted by
- this the counter ``http2.goaway_sent`` will be incremented.
-- area: matchers
- change: |
- Added :ref:`RuntimeFraction ` input
- matcher. It allows matching hash of the input on a runtime key.
-- area: stat_sinks
- change: |
- Added ``envoy.stat_sinks.open_telemetry`` stats_sink, that supports flushing metrics by the OTLP protocol,
- for supported Open Telemetry collectors.
-- area: redis_proxy
- change: |
- Added new configuration field :ref:`key_formatter
- ` to format redis key.
- The field supports using %KEY% as a formatter command for substituting the redis key as part of the substitution formatter expression.
-- area: stats
- change: |
- Added config :ref:`enable_deferred_creation_stats
- `.
- When set to ``true``, enables deferred instantiation on supported stats structures.
-- area: ratelimit
- change: |
- Added new configuration field :ref:`domain
- ` to allow for setting rate limit domains on a
- per-route basis.
-- area: tls_inspector
- change: |
- Added histogram ``bytes_processed`` which records the number of bytes of
- the tls_inspector processed while analyzing for tls usage. In cases where
- the connection uses tls this records the tls client hello size. In cases
- where the connection doesn't use tls this records the amount of bytes the
- tls_inspector processed until it realized the connection was not using tls.
-- area: tls_inspector
- change: |
- Added new configuration field :ref:`initial_read_buffer_size
- `
- to allow users to tune the buffer size requested by the filter. If
- configured, and the filter needs additional bytes, the filter will double
- the number of bytes requested up to the default 64KiB maximum.
-- area: access_log
- change: |
- Added access log filter :ref:`log_type_filter `
- to filter access log records based on the type of the record.
-- area: ext_proc
- change: |
- Added new configuration field
- :ref:`disable_clear_route_cache `
- to force the ext_proc filter from clearing the route cache. Failures to clear from setting this field will be counted under the
- ``clear_route_cache_disabled`` stat.
-- area: ext_proc
- change: |
- Added new configuration field
- :ref:`allow_mode_override `
- If set to true, the filter config
- :ref:`processing_mode `
- can be overridden by the
- :ref:`mode_override `
- in the response message from the external processing server.
- If not set, the ``mode_override`` API in the response message will be ignored.
-- area: ext_proc
- change: |
- :ref:`forward_rules `
- to only allow headers matching the rules to be forwarded to the external processing server.
-- area: redis_proxy
- change: |
- Added new field :ref:`connection_rate_limit
- `
- to limit reconnection rate to redis server to avoid reconnection storm.
-- area: match_delegate
- change: |
- Added :ref:`per route configuration
- ` to the
- :ref:`ExtensionWithMatcher
- ` filter.
- Which allows the associated matcher to be defined on a per route basis.
-- area: match_delegate
- change: |
- If no matcher is set the :ref:`ExtensionWithMatcher
- ` filter is now set to skip rather than erroring out.
- area: access_log
change: |
- Added additional HCM access log option :ref:`flush_log_on_tunnel_successfully_established
- `.
- Enabling this option will write a log to all access loggers when HTTP tunnels (e.g. Websocket and ``CONNECT``)
- are successfully established.
-- area: admin
- change: |
- Adds a new admin stats html bucket-mode ``detailed`` to generate all recorded buckets and summary percentiles.
-- area: http
- change: |
- Add support to the route/virtual host level
- :ref:`is_optional ` field.
- A route/virtual host level per filter config can be marked as optional, which means that if
- the filter fails to load, the configuration will no be rejected.
-- area: upstream
- change: |
- Added :ref:`cluster provided extension
- `
- to suppport the :ref:`load balancer policy `.
-- area: fault
+ added %RESPONSE_FLAGS_LONG% substitution string, that will output a pascal case string representing the response flags.
+ The output response flags will correspond with %RESPONSE_FLAGS%, only with a long textual string representation.
+- area: extension_discovery_service
change: |
- Added new field ``envoy.extensions.filters.http.fault.v3.HTTPFault.filter_metadata`` to aid in logging.
- Metadata will be stored in StreamInfo dynamic metadata under a namespace corresponding to the name of the fault filter.
-- area: load_balancing
- change: |
- Added new option
- :ref:`weighted_priority_health `
- to compute the health of a :ref:`priority level ` by using
- :ref:`load balancing weight `
- instead of the count of healthy hosts.
-- area: application_logs
- change: |
- Added bootstrap option
- :ref:`application_log_format `
- to enable setting application log format as JSON structure.
-- area: application_logs
- change: |
- Added bootstrap option
- :ref:`application_log_format `
- to enable setting application log text format from config.
-- area: ext_proc
- change: |
- Added new field ``filter_metadata `
- and :ref:`CEL input matcher `.
-- area: tls
- change: |
- Added support for hot-reloading CRL file when the file changes on disk.
- This works with dynamic secrets when
- :ref:`CertificateValidationContext `
- is delivered via SDS.
-- area: http
- change: |
- Added support for configuring additional :ref:`cookie attributes `.
-- area: http
- change: |
- Added support for the route/virtual host level
- :ref:`disabled ` field.
- A route/virtual host level per filter config can be marked as disabled, which means that
- the filter will be disabled in a specific route/virtual host.
-- area: health_check
- change: |
- Added host related information :ref:`metadata ` and
- :ref:`locality ` to
- the :ref:`health check event ` definition.
-- area: zookeeper
- change: |
- Added the ``addWatch`` opcode support to the ZooKeeper proxy filter.
-- area: config
- change: |
- added a statistic :ref:`warming_state ` to indicate the current warming state of a cluster.
+ added ECDS support for :ref:` downstream network filters`.
- area: access_log
change: |
- Added bytes snapshotting for upstream and downstream logging that will be reset after every periodic log. Downstream
- periodic loggers should read ``BytesMeter::bytesAtLastDownstreamPeriodicLog()``, and upstream periodic loggers should read
- ``BytesMeter::bytesAtLastUpstreamPeriodicLog()``.
-- area: lds
- change: |
- Pause SRDS when LDS is updated.
-- area: http
- change: |
- Added :ref:`outbound_control_frames_active ` and :ref:`outbound_frames_active `
- statistic.
-- area: original_dst
- change: |
- Filter state is pulled from request context first (if available), then falls back to connection context. Added ability to pick host
- from dynamic metadata using :ref:`metadata_key `.
- Same behavior - looks in request context first (if available), falls back to connection context.
-- area: tls
- change: |
- Added support to configure the new config option
- :ref:`enforce_rsa_key_usage `.
- This can be used to override its configuration in BoringSSL. It is currently default to false but expected to be changed
- to true by default in a future release. ``ssl.was_key_usage_invalid`` is added to :ref:`listener metrics `
- and will be incremented for certificate configurations that would fail if this option were set to true.
-- area: http
- change: |
- Added ``OVERWRITE_IF_EXISTS`` header manipulation keyword to overwrite a header only when it exists before manipulation.
-- area: tls
+ Added support for logging upstream connection establishment duration in the
+ :ref:`%COMMON_DURATION% ` access log
+ formatter operator. The following time points were added: ``%US_CX_BEG%``,
+ ``%US_CX_END%``, ``%US_HS_END%``.
+- area: golang
change: |
- Added FIPS compliant build for arm64.
+ added http golang filter config destroy callback. When a config gets deleted from envoy, the go plugin calls the
+ Destroy function on the config instance. config should implement the new
+ github.com/envoyproxy/envoy/contrib/golang/common/go/api.Config interface, implementing the Destroy function.
deprecated:
-- area: access_log
- change: |
- Deprecated (1.25.0) :ref:`intermediate_log_entry `
- in favour of :ref:`access_log_type `.
-- area: health_check
- change: |
- deprecated the :ref:`HealthCheck event_log_path ` in favor of
- :ref:`HealthCheck event_logger extension `.
-- area: stats
- change: |
- Added :ref:`enable_deferred_creation_stats
- `.
- support for ``ClusterTrafficStats``.
-- area: access_log
- change: |
- Added ``%DOWNSTREAM_LOCAL_DNS_SAN%``, ``%DOWNSTREAM_PEER_DNS_SAN%``, ``%DOWNSTREAM_LOCAL_IP_SAN%``
- and ``%DOWNSTREAM_PEER_IP_SAN%`` substitution formatters.
diff --git a/ci/Dockerfile-envoy b/ci/Dockerfile-envoy
index 9ea1d1a06cceb..befae3be07553 100644
--- a/ci/Dockerfile-envoy
+++ b/ci/Dockerfile-envoy
@@ -1,5 +1,5 @@
ARG BUILD_OS=ubuntu
-ARG BUILD_TAG=20.04
+ARG BUILD_TAG=20.04@sha256:874aca52f79ae5f8258faff03e10ce99ae836f6e7d2df6ecd3da5c1cad3a912b
ARG ENVOY_VRP_BASE_IMAGE=envoy-base
@@ -58,8 +58,7 @@ COPY --chown=0:0 --chmod=755 \
# STAGE: envoy-distroless
-# gcr.io/distroless/base-nossl-debian11:nonroot
-FROM gcr.io/distroless/base-nossl-debian11:nonroot@sha256:f10e1fbf558c630a4b74a987e6c754d45bf59f9ddcefce090f6b111925996767 AS envoy-distroless
+FROM gcr.io/distroless/base-nossl-debian12:nonroot@sha256:8a09e5752fb3ab9c9534fcc627eb1f451cd9bcfe66a6b149df62dcb84fb841a6 AS envoy-distroless
EXPOSE 10000
ENTRYPOINT ["/usr/local/bin/envoy"]
CMD ["-c", "/etc/envoy/envoy.yaml"]
diff --git a/ci/README.md b/ci/README.md
index 143b8f3235991..7bfec04f0fda0 100644
--- a/ci/README.md
+++ b/ci/README.md
@@ -5,7 +5,7 @@ and an image based on Windows2019.
## Ubuntu Envoy image
-The Ubuntu based Envoy Docker image at [`envoyproxy/envoy-build:`](https://hub.docker.com/r/envoyproxy/envoy-build/) is used for CI checks,
+The Ubuntu based Envoy Docker image at [`envoyproxy/envoy-build-ubuntu:`](https://hub.docker.com/r/envoyproxy/envoy-build/) is used for CI checks,
where `` is specified in [`envoy_build_sha.sh`](https://github.com/envoyproxy/envoy/blob/main/ci/envoy_build_sha.sh). Developers
may work with the latest build image SHA in [envoy-build-tools](https://github.com/envoyproxy/envoy-build-tools/blob/main/toolchains/rbe_toolchains_config.bzl#L8)
repo to provide a self-contained environment for building Envoy binaries and running tests that reflects the latest built Ubuntu Envoy image.
@@ -13,9 +13,14 @@ Moreover, the Docker image at [`envoyproxy/envoy:dev-`](https://hub.docker
The `` corresponds to the main commit at which the binary was compiled. Lastly, `envoyproxy/envoy:dev` contains an Envoy
binary built from the latest tip of main that passed tests.
-## Alpine Envoy image
+## Distroless Envoy image
+
+Minimal images based on a [distroless](https://github.com/GoogleContainerTools/distroless) allow for quicker deployment of Envoy.
+
+The Distroless base image is only built with symbols stripped.
+
+## Debug Envoy image
-Minimal images based on Alpine Linux allow for quicker deployment of Envoy. The Alpine base image is only built with symbols stripped.
To get the binary with symbols, use the corresponding Ubuntu based debug image. The image is pushed with two different tags:
`` and `latest`. Parallel to the Ubuntu images above, `` corresponds to the
main commit at which the binary was compiled, and `latest` corresponds to a binary built from the latest tip of main that passed tests.
@@ -81,7 +86,7 @@ ENVOY_DOCKER_PULL=true ./ci/run_envoy_docker.sh
An example basic invocation to build a developer version of the Envoy static binary (using the Bazel `fastbuild` type) is:
```bash
-./ci/run_envoy_docker.sh './ci/do_ci.sh bazel.dev'
+./ci/run_envoy_docker.sh './ci/do_ci.sh dev'
```
The Envoy binary can be found in `/tmp/envoy-docker-build/envoy/source/exe/envoy-fastbuild` on the Docker host. You
@@ -89,22 +94,29 @@ can control this by setting `ENVOY_DOCKER_BUILD_DIR` in the environment, e.g. to
generate the binary in `~/build/envoy/source/exe/envoy-fastbuild` you can run:
```bash
-ENVOY_DOCKER_BUILD_DIR=~/build ./ci/run_envoy_docker.sh './ci/do_ci.sh bazel.dev'
+ENVOY_DOCKER_BUILD_DIR=~/build ./ci/run_envoy_docker.sh './ci/do_ci.sh dev'
```
For a release version of the Envoy binary you can run:
```bash
-./ci/run_envoy_docker.sh './ci/do_ci.sh bazel.release.server_only'
+./ci/run_envoy_docker.sh './ci/do_ci.sh release.server_only'
```
The build artifact can be found in `/tmp/envoy-docker-build/envoy/source/exe/envoy` (or wherever
`$ENVOY_DOCKER_BUILD_DIR` points).
+To enable the previous behavior of the `release.server_only` target where the final binary was copied to a tar.gz file
+(e.g. envoy-binary.tar.gz), you can run:
+
+ ```bash
+ ./ci/run_envoy_docker.sh './ci/do_ci.sh release.server_only.binary
+ ```
+
For a debug version of the Envoy binary you can run:
```bash
-./ci/run_envoy_docker.sh './ci/do_ci.sh bazel.debug.server_only'
+./ci/run_envoy_docker.sh './ci/do_ci.sh debug.server_only'
```
The build artifact can be found in `/tmp/envoy-docker-build/envoy/source/exe/envoy-debug` (or wherever
@@ -119,33 +131,34 @@ the BAZEL_BUILD_EXTRA_OPTIONS environment variable
The `./ci/run_envoy_docker.sh './ci/do_ci.sh '` targets are:
-* `bazel.api` — build and run API tests under `-c fastbuild` with clang.
-* `bazel.asan` — build and run tests under `-c dbg --config=clang-asan` with clang.
-* `bazel.asan ` — build and run a specified test or test dir under `-c dbg --config=clang-asan` with clang.
-* `bazel.debug` — build Envoy static binary and run tests under `-c dbg`.
-* `bazel.debug ` — build Envoy static binary and run a specified test or test dir under `-c dbg`.
-* `bazel.debug.server_only` — build Envoy static binary under `-c dbg`.
-* `bazel.dev` — build Envoy static binary and run tests under `-c fastbuild` with clang.
-* `bazel.dev ` — build Envoy static binary and run a specified test or test dir under `-c fastbuild` with clang.
-* `bazel.dev.contrib` — build Envoy static binary with contrib and run tests under `-c fastbuild` with clang.
-* `bazel.dev.contrib ` — build Envoy static binary with contrib and run a specified test or test dir under `-c fastbuild` with clang.
-* `bazel.release` — build Envoy static binary and run tests under `-c opt` with clang.
-* `bazel.release ` — build Envoy static binary and run a specified test or test dir under `-c opt` with clang.
-* `bazel.release.server_only` — build Envoy static binary under `-c opt` with clang.
-* `bazel.sizeopt` — build Envoy static binary and run tests under `-c opt --config=sizeopt` with clang.
-* `bazel.sizeopt ` — build Envoy static binary and run a specified test or test dir under `-c opt --config=sizeopt` with clang.
-* `bazel.sizeopt.server_only` — build Envoy static binary under `-c opt --config=sizeopt` with clang.
-* `bazel.coverage` — build and run tests under `-c dbg` with gcc, generating coverage information in `$ENVOY_DOCKER_BUILD_DIR/envoy/generated/coverage/coverage.html`.
-* `bazel.coverage ` — build and run a specified test or test dir under `-c dbg` with gcc, generating coverage information in `$ENVOY_DOCKER_BUILD_DIR/envoy/generated/coverage/coverage.html`. Specify `//contrib/...` to get contrib coverage.
-* `bazel.msan` — build and run tests under `-c dbg --config=clang-msan` with clang.
-* `bazel.msan ` — build and run a specified test or test dir under `-c dbg --config=clang-msan` with clang.
-* `bazel.tsan` — build and run tests under `-c dbg --config=clang-tsan` with clang.
-* `bazel.tsan ` — build and run a specified test or test dir under `-c dbg --config=clang-tsan` with clang.
-* `bazel.fuzz` — build and run fuzz tests under `-c dbg --config=asan-fuzzer` with clang.
-* `bazel.fuzz ` — build and run a specified fuzz test or test dir under `-c dbg --config=asan-fuzzer` with clang. If specifying a single fuzz test, must use the full target name with "_with_libfuzzer" for ``.
-* `bazel.compile_time_options` — build Envoy and run tests with various compile-time options toggled to their non-default state, to ensure they still build.
-* `bazel.compile_time_options ` — build Envoy and run a specified test or test dir with various compile-time options toggled to their non-default state, to ensure they still build.
-* `bazel.clang_tidy ` — build and run clang-tidy specified source files, if no files specified, runs against the diff with the last GitHub commit.
+* `api` — build and run API tests under `-c fastbuild` with clang.
+* `asan` — build and run tests under `-c dbg --config=clang-asan` with clang.
+* `asan ` — build and run a specified test or test dir under `-c dbg --config=clang-asan` with clang.
+* `debug` — build Envoy static binary and run tests under `-c dbg`.
+* `debug ` — build Envoy static binary and run a specified test or test dir under `-c dbg`.
+* `debug.server_only` — build Envoy static binary under `-c dbg`.
+* `docker` — build Docker images, expects `release` or `release.server_only` to have been run furst.
+* `dev` — build Envoy static binary and run tests under `-c fastbuild` with clang.
+* `dev ` — build Envoy static binary and run a specified test or test dir under `-c fastbuild` with clang.
+* `dev.contrib` — build Envoy static binary with contrib and run tests under `-c fastbuild` with clang.
+* `dev.contrib ` — build Envoy static binary with contrib and run a specified test or test dir under `-c fastbuild` with clang.
+* `release` — build Envoy static binary and run tests under `-c opt` with clang.
+* `release ` — build Envoy static binaries and run a specified test or test dir under `-c opt` with clang.
+* `release.server_only` — build Envoy static binaries under `-c opt` with clang.
+* `sizeopt` — build Envoy static binary and run tests under `-c opt --config=sizeopt` with clang.
+* `sizeopt ` — build Envoy static binary and run a specified test or test dir under `-c opt --config=sizeopt` with clang.
+* `sizeopt.server_only` — build Envoy static binary under `-c opt --config=sizeopt` with clang.
+* `coverage` — build and run tests under `-c dbg` with gcc, generating coverage information in `$ENVOY_DOCKER_BUILD_DIR/envoy/generated/coverage/coverage.html`.
+* `coverage ` — build and run a specified test or test dir under `-c dbg` with gcc, generating coverage information in `$ENVOY_DOCKER_BUILD_DIR/envoy/generated/coverage/coverage.html`. Specify `//contrib/...` to get contrib coverage.
+* `msan` — build and run tests under `-c dbg --config=clang-msan` with clang.
+* `msan ` — build and run a specified test or test dir under `-c dbg --config=clang-msan` with clang.
+* `tsan` — build and run tests under `-c dbg --config=clang-tsan` with clang.
+* `tsan ` — build and run a specified test or test dir under `-c dbg --config=clang-tsan` with clang.
+* `fuzz` — build and run fuzz tests under `-c dbg --config=asan-fuzzer` with clang.
+* `fuzz ` — build and run a specified fuzz test or test dir under `-c dbg --config=asan-fuzzer` with clang. If specifying a single fuzz test, must use the full target name with "_with_libfuzzer" for ``.
+* `compile_time_options` — build Envoy and run tests with various compile-time options toggled to their non-default state, to ensure they still build.
+* `compile_time_options ` — build Envoy and run a specified test or test dir with various compile-time options toggled to their non-default state, to ensure they still build.
+* `clang_tidy ` — build and run clang-tidy specified source files, if no files specified, runs against the diff with the last GitHub commit.
* `check_proto_format`— check configuration, formatting and build issues in API proto files.
* `fix_proto_format`— fix configuration, formatting and build issues in API proto files.
* `format`— run validation, linting and formatting tools.
diff --git a/ci/build_setup.sh b/ci/build_setup.sh
index f4a94398f1bf6..00f4c2c752278 100755
--- a/ci/build_setup.sh
+++ b/ci/build_setup.sh
@@ -28,6 +28,7 @@ export ENVOY_BUILD_FILTER_EXAMPLE="${ENVOY_BUILD_FILTER_EXAMPLE:-0}"
read -ra BAZEL_BUILD_EXTRA_OPTIONS <<< "${BAZEL_BUILD_EXTRA_OPTIONS:-}"
read -ra BAZEL_EXTRA_TEST_OPTIONS <<< "${BAZEL_EXTRA_TEST_OPTIONS:-}"
+read -ra BAZEL_STARTUP_EXTRA_OPTIONS <<< "${BAZEL_STARTUP_EXTRA_OPTIONS:-}"
read -ra BAZEL_OPTIONS <<< "${BAZEL_OPTIONS:-}"
echo "ENVOY_SRCDIR=${ENVOY_SRCDIR}"
@@ -86,7 +87,6 @@ fi
export ENVOY_TEST_TMPDIR="${ENVOY_TEST_TMPDIR:-$BUILD_DIR/tmp}"
export LLVM_ROOT="${LLVM_ROOT:-/opt/llvm}"
export PATH=${LLVM_ROOT}/bin:${PATH}
-export CLANG_FORMAT="${CLANG_FORMAT:-clang-format}"
if [[ -f "/etc/redhat-release" ]]; then
BAZEL_BUILD_EXTRA_OPTIONS+=("--copt=-DENVOY_IGNORE_GLIBCXX_USE_CXX11_ABI_ERROR=1")
@@ -105,25 +105,20 @@ trap cleanup EXIT
_bazel="$(which bazel)"
BAZEL_STARTUP_OPTIONS=(
+ "${BAZEL_STARTUP_EXTRA_OPTIONS[@]}"
"--output_user_root=${BUILD_DIR}/bazel_root"
"--output_base=${BUILD_DIR}/bazel_root/base")
bazel () {
- # echo "RUNNING BAZEL (${PWD}): ${BAZEL_STARTUP_OPTIONS[*]} <> ${*}" >&2
- "$_bazel" "${BAZEL_STARTUP_OPTIONS[@]}" "$@"
+ local startup_options
+ read -ra startup_options <<< "${BAZEL_STARTUP_OPTION_LIST:-}"
+ # echo "RUNNING BAZEL (${PWD}): ${startup_options[*]} <> ${*}" >&2
+ "$_bazel" "${startup_options[@]}" "$@"
}
export _bazel
export -f bazel
-if [[ -n "$BAZEL_NO_CACHE_TEST_RESULTS" ]]; then
- VERSION_DEV="$(cut -d- -f2 "${ENVOY_SRCDIR}/VERSION.txt")"
- # Use uncached test results for non-release commits to a branch.
- if [[ $VERSION_DEV == "dev" ]]; then
- BAZEL_EXTRA_TEST_OPTIONS+=("--nocache_test_results")
- fi
-fi
-
# Use https://docs.bazel.build/versions/master/command-line-reference.html#flag--experimental_repository_cache_hardlinks
# to save disk space.
BAZEL_GLOBAL_OPTIONS=(
@@ -134,8 +129,6 @@ BAZEL_BUILD_OPTIONS=(
"${BAZEL_GLOBAL_OPTIONS[@]}"
"--verbose_failures"
"--experimental_generate_json_trace_profile"
- "--test_output=errors"
- "--action_env=CLANG_FORMAT"
"${BAZEL_BUILD_EXTRA_OPTIONS[@]}"
"${BAZEL_EXTRA_TEST_OPTIONS[@]}")
@@ -191,9 +184,6 @@ mkdir -p "${ENVOY_FAILED_TEST_LOGS}"
export ENVOY_BUILD_PROFILE="${ENVOY_BUILD_DIR}"/generated/build-profile
mkdir -p "${ENVOY_BUILD_PROFILE}"
-export BUILDIFIER_BIN="${BUILDIFIER_BIN:-/usr/local/bin/buildifier}"
-export BUILDOZER_BIN="${BUILDOZER_BIN:-/usr/local/bin/buildozer}"
-
if [[ "${ENVOY_BUILD_FILTER_EXAMPLE}" == "true" ]]; then
# shellcheck source=ci/filter_example_setup.sh
. "$(dirname "$0")"/filter_example_setup.sh
diff --git a/ci/do_ci.sh b/ci/do_ci.sh
index b0a48d6b8aaa4..e7211b1babf91 100755
--- a/ci/do_ci.sh
+++ b/ci/do_ci.sh
@@ -4,13 +4,10 @@
set -e
-
# TODO(phlax): Clarify and/or integrate SRCDIR and ENVOY_SRCDIR
export SRCDIR="${SRCDIR:-$PWD}"
export ENVOY_SRCDIR="${ENVOY_SRCDIR:-$PWD}"
-# shellcheck source=ci/setup_cache.sh
-. "$(dirname "$0")"/setup_cache.sh
# shellcheck source=ci/build_setup.sh
. "$(dirname "$0")"/build_setup.sh
@@ -19,6 +16,66 @@ echo "building for ${ENVOY_BUILD_ARCH}"
cd "${SRCDIR}"
+# Its better to fetch too little rather than too much, as whatever is
+# actually used is what will be cached.
+# Fetching is mostly for robustness rather than optimization.
+FETCH_TARGETS=(
+ @bazel_tools//tools/jdk:remote_jdk11
+ @envoy_build_tools//...
+ //tools/gsutil
+ //tools/zstd)
+FETCH_BUILD_TARGETS=(
+ //contrib/exe/...
+ //distribution/...
+ //source/exe/...)
+FETCH_GCC_TARGETS=(
+ //source/exe/...)
+# TODO(phlax): add this as a general cache
+# this fetches a bit too much for some of the targets
+# but its not really possible to filter their needs so move
+# to a shared precache
+FETCH_TEST_TARGETS=(
+ @nodejs//...
+ //test/...)
+FETCH_ALL_TEST_TARGETS=(
+ @com_github_google_quiche//:ci_tests
+ "${FETCH_TEST_TARGETS[@]}")
+FETCH_API_TARGETS=(
+ @envoy_api//...
+ //tools/api_proto_plugin/...
+ //tools/protoprint/...
+ //tools/protoxform/...
+ //tools/type_whisperer/...
+ //tools/testdata/protoxform/...)
+FETCH_DOCS_TARGETS+=(
+ //docs/...)
+FETCH_FORMAT_TARGETS+=(
+ //tools/code_format/...)
+FETCH_PROTO_TARGETS=(
+ @com_github_bufbuild_buf//:bin/buf
+ //tools/proto_format/...)
+
+retry () {
+ local n wait iterations
+ wait="${1}"
+ iterations="${2}"
+ shift 2
+ n=0
+ until [ "$n" -ge "$iterations" ]; do
+ "${@}" \
+ && break
+ n=$((n+1))
+ if [[ "$n" -lt "$iterations" ]]; then
+ sleep "$wait"
+ echo "Retrying ..."
+ else
+ echo "Fetch failed"
+ exit 1
+ fi
+ done
+}
+
+
if [[ "${ENVOY_BUILD_ARCH}" == "x86_64" ]]; then
BUILD_ARCH_DIR="/linux/amd64"
elif [[ "${ENVOY_BUILD_ARCH}" == "aarch64" ]]; then
@@ -29,12 +86,14 @@ else
fi
function collect_build_profile() {
- declare -g build_profile_count=${build_profile_count:-1}
- mv -f \
- "$(bazel info "${BAZEL_BUILD_OPTIONS[@]}" output_base)/command.profile.gz" \
- "${ENVOY_BUILD_PROFILE}/${build_profile_count}-$1.profile.gz" \
- || :
- ((build_profile_count++))
+ local output_base
+ declare -g build_profile_count=${build_profile_count:-1}
+ output_base="$(bazel info "${BAZEL_BUILD_OPTIONS[@]}" output_base)"
+ mv -f \
+ "${output_base}/command.profile.gz" \
+ "${ENVOY_BUILD_PROFILE}/${build_profile_count}-$1.profile.gz" \
+ || :
+ ((build_profile_count++))
}
function bazel_with_collection() {
@@ -104,7 +163,7 @@ function bazel_binary_build() {
# The COMPILE_TYPE variable is redundant in this case and is only here for
# readability. It is already set in the .bazelrc config for sizeopt.
COMPILE_TYPE="opt"
- CONFIG_ARGS="--config=sizeopt"
+ CONFIG_ARGS=("--config=sizeopt")
elif [[ "${BINARY_TYPE}" == "fastbuild" ]]; then
COMPILE_TYPE="fastbuild"
fi
@@ -122,7 +181,7 @@ function bazel_binary_build() {
# This is a workaround for https://github.com/bazelbuild/bazel/issues/11834
[[ -n "${ENVOY_RBE}" ]] && rm -rf bazel-bin/"${ENVOY_BIN}"*
- bazel build "${BAZEL_BUILD_OPTIONS[@]}" --remote_download_toplevel -c "${COMPILE_TYPE}" "${BUILD_TARGET}" ${CONFIG_ARGS}
+ bazel build "${BAZEL_BUILD_OPTIONS[@]}" --remote_download_toplevel -c "${COMPILE_TYPE}" "${BUILD_TARGET}" "${CONFIG_ARGS[@]}"
collect_build_profile "${BINARY_TYPE}"_build
# Copy the built envoy binary somewhere that we can access outside of the
@@ -132,14 +191,14 @@ function bazel_binary_build() {
if [[ "${COMPILE_TYPE}" == "dbg" || "${COMPILE_TYPE}" == "opt" ]]; then
# Generate dwp file for debugging since we used split DWARF to reduce binary
# size
- bazel build "${BAZEL_BUILD_OPTIONS[@]}" --remote_download_toplevel -c "${COMPILE_TYPE}" "${BUILD_DEBUG_INFORMATION}" ${CONFIG_ARGS}
+ bazel build "${BAZEL_BUILD_OPTIONS[@]}" --remote_download_toplevel -c "${COMPILE_TYPE}" "${BUILD_DEBUG_INFORMATION}" "${CONFIG_ARGS[@]}"
# Copy the debug information
cp -f bazel-bin/"${ENVOY_BIN}".dwp "${FINAL_DELIVERY_DIR}"/envoy.dwp
fi
# Validation tools for the tools image.
bazel build "${BAZEL_BUILD_OPTIONS[@]}" --remote_download_toplevel -c "${COMPILE_TYPE}" \
- //test/tools/schema_validator:schema_validator_tool ${CONFIG_ARGS}
+ //test/tools/schema_validator:schema_validator_tool "${CONFIG_ARGS[@]}"
# Build su-exec utility
bazel build "${BAZEL_BUILD_OPTIONS[@]}" --remote_download_toplevel -c "${COMPILE_TYPE}" external:su-exec
@@ -167,25 +226,27 @@ function run_ci_verify () {
CI_TARGET=$1
shift
+if [[ "$CI_TARGET" =~ bazel.* ]]; then
+ ORIG_CI_TARGET="$CI_TARGET"
+ CI_TARGET="$(echo "${CI_TARGET}" | cut -d. -f2-)"
+ echo "Using \`${ORIG_CI_TARGET}\` is deprecated, please use \`${CI_TARGET}\`"
+fi
+
if [[ $# -ge 1 ]]; then
COVERAGE_TEST_TARGETS=("$@")
TEST_TARGETS=("$@")
else
# Coverage test will add QUICHE tests by itself.
COVERAGE_TEST_TARGETS=("//test/...")
- if [[ "$CI_TARGET" == "bazel.release" ]]; then
+ if [[ "${CI_TARGET}" == "release" ]]; then
# We test contrib on release only.
COVERAGE_TEST_TARGETS=("${COVERAGE_TEST_TARGETS[@]}" "//contrib/...")
- elif [[ "${CI_TARGET}" == "bazel.msan" ]]; then
+ elif [[ "${CI_TARGET}" == "msan" ]]; then
COVERAGE_TEST_TARGETS=("${COVERAGE_TEST_TARGETS[@]}" "-//test/extensions/...")
fi
TEST_TARGETS=("${COVERAGE_TEST_TARGETS[@]}" "@com_github_google_quiche//:ci_tests")
fi
-if [[ "$CI_TARGET" =~ bazel.* ]]; then
- CI_TARGET="$(echo "${CI_TARGET}" | cut -d. -f2-)"
-fi
-
case $CI_TARGET in
api)
# Use libstdc++ because the API booster links to prebuilt libclang*/libLLVM* installed in /opt/llvm/lib,
@@ -217,9 +278,7 @@ case $CI_TARGET in
;&
api.go)
- if [[ -z "$NO_BUILD_SETUP" ]]; then
- setup_clang_toolchain
- fi
+ setup_clang_toolchain
GO_IMPORT_BASE="github.com/envoyproxy/go-control-plane"
GO_TARGETS=(@envoy_api//...)
read -r -a GO_PROTOS <<< "$(bazel query "${BAZEL_GLOBAL_OPTIONS[@]}" "kind('go_proto_library', ${GO_TARGETS[*]})" | tr '\n' ' ')"
@@ -325,26 +384,16 @@ case $CI_TARGET in
}
;;
+ clean|expunge)
+ setup_clang_toolchain
+ if [[ "$CI_TARGET" == "expunge" ]]; then
+ CLEAN_ARGS+=(--expunge)
+ fi
+ bazel clean "${BAZEL_GLOBAL_OPTIONS[@]}" "${CLEAN_ARGS[@]}"
+ ;;
+
compile_time_options)
- # Right now, none of the available compile-time options conflict with each other. If this
- # changes, this build type may need to be broken up.
- COMPILE_TIME_OPTIONS=(
- "--define" "admin_html=disabled"
- "--define" "signal_trace=disabled"
- "--define" "hot_restart=disabled"
- "--define" "google_grpc=disabled"
- "--define" "boringssl=fips"
- "--define" "log_debug_assert_in_release=enabled"
- "--define" "path_normalization_by_default=true"
- "--define" "deprecated_features=disabled"
- "--define" "tcmalloc=gperftools"
- "--define" "zlib=ng"
- "--define" "uhv=enabled"
- "--@envoy//bazel:http3=False"
- "--@envoy//source/extensions/filters/http/kill_request:enabled"
- "--test_env=ENVOY_HAS_EXTRA_EXTENSIONS=true"
- "--remote_download_minimal"
- "--config=libc++20")
+ # See `compile-time-options` in `.bazelrc`
setup_clang_toolchain
# This doesn't go into CI but is available for developer convenience.
echo "bazel with different compiletime options build with tests..."
@@ -354,8 +403,8 @@ case $CI_TARGET in
echo "Building and testing with wasm=wamr: ${TEST_TARGETS[*]}"
bazel_with_collection \
test "${BAZEL_BUILD_OPTIONS[@]}" \
+ --config=compile-time-options \
--define wasm=wamr \
- "${COMPILE_TIME_OPTIONS[@]}" \
-c fastbuild \
"${TEST_TARGETS[@]}" \
--test_tag_filters=-nofips \
@@ -363,10 +412,9 @@ case $CI_TARGET in
echo "Building and testing with wasm=wasmtime: and admin_functionality and admin_html disabled ${TEST_TARGETS[*]}"
bazel_with_collection \
test "${BAZEL_BUILD_OPTIONS[@]}" \
+ --config=compile-time-options \
--define wasm=wasmtime \
- --define admin_html=disabled \
--define admin_functionality=disabled \
- "${COMPILE_TIME_OPTIONS[@]}" \
-c fastbuild \
"${TEST_TARGETS[@]}" \
--test_tag_filters=-nofips \
@@ -374,8 +422,8 @@ case $CI_TARGET in
echo "Building and testing with wasm=wavm: ${TEST_TARGETS[*]}"
bazel_with_collection \
test "${BAZEL_BUILD_OPTIONS[@]}" \
+ --config=compile-time-options \
--define wasm=wavm \
- "${COMPILE_TIME_OPTIONS[@]}" \
-c fastbuild \
"${TEST_TARGETS[@]}" \
--test_tag_filters=-nofips \
@@ -384,28 +432,28 @@ case $CI_TARGET in
# these tests under "-c opt" to save time in CI.
bazel_with_collection \
test "${BAZEL_BUILD_OPTIONS[@]}" \
+ --config=compile-time-options \
--define wasm=wavm \
- "${COMPILE_TIME_OPTIONS[@]}" \
-c opt \
@envoy//test/common/common:assert_test \
@envoy//test/server:server_test
# "--define log_fast_debug_assert_in_release=enabled" must be tested with a release build, so run only these tests under "-c opt" to save time in CI. This option will test only ASSERT()s without SLOW_ASSERT()s, so additionally disable "--define log_debug_assert_in_release" which compiles in both.
bazel_with_collection \
test "${BAZEL_BUILD_OPTIONS[@]}" \
+ --config=compile-time-options \
--define wasm=wavm \
- "${COMPILE_TIME_OPTIONS[@]}" \
-c opt \
@envoy//test/common/common:assert_test \
--define log_fast_debug_assert_in_release=enabled \
--define log_debug_assert_in_release=disabled
echo "Building binary with wasm=wavm... and logging disabled"
bazel build "${BAZEL_BUILD_OPTIONS[@]}" \
- --define wasm=wavm \
- --define enable_logging=disabled \
- "${COMPILE_TIME_OPTIONS[@]}" \
- -c fastbuild \
- @envoy//source/exe:envoy-static \
- --build_tag_filters=-nofips
+ --config=compile-time-options \
+ --define wasm=wavm \
+ --define enable_logging=disabled \
+ -c fastbuild \
+ @envoy//source/exe:envoy-static \
+ --build_tag_filters=-nofips
collect_build_profile build
;;
@@ -415,9 +463,8 @@ case $CI_TARGET in
if [[ "$CI_TARGET" == "fuzz_coverage" ]]; then
export FUZZ_COVERAGE=true
fi
- # We use custom BAZEL_BUILD_OPTIONS here to cover profiler's code.
- BAZEL_BUILD_OPTION_LIST="${BAZEL_BUILD_OPTIONS[*]} --define tcmalloc=gperftools" \
- "${ENVOY_SRCDIR}/test/run_envoy_bazel_coverage.sh" \
+ export BAZEL_GRPC_LOG="${ENVOY_BUILD_DIR}/grpc.log"
+ "${ENVOY_SRCDIR}/test/run_envoy_bazel_coverage.sh" \
"${COVERAGE_TEST_TARGETS[@]}"
collect_build_profile coverage
;;
@@ -469,6 +516,7 @@ case $CI_TARGET in
TODAY_DATE=$(date -u -I"date")
export TODAY_DATE
bazel run "${BAZEL_BUILD_OPTIONS[@]}" //tools/dependency:check \
+ --//tools/dependency:preload_cve_data \
--action_env=TODAY_DATE \
-- -v warn \
-c cves release_dates releases
@@ -507,20 +555,26 @@ case $CI_TARGET in
# Extract the Envoy binary from the tarball
mkdir -p distribution/custom
if [[ "${ENVOY_BUILD_ARCH}" == "x86_64" ]]; then
- ENVOY_RELEASE_TARBALL="/build/bazel.release/x64/bin/release.tar.zst"
+ ENVOY_RELEASE_TARBALL="/build/release/x64/bin/release.tar.zst"
else
- ENVOY_RELEASE_TARBALL="/build/bazel.release/arm64/bin/release.tar.zst"
+ ENVOY_RELEASE_TARBALL="/build/release/arm64/bin/release.tar.zst"
fi
bazel run "${BAZEL_BUILD_OPTIONS[@]}" \
//tools/zstd \
-- --stdout \
-d "$ENVOY_RELEASE_TARBALL" \
| tar xfO - envoy > distribution/custom/envoy
+ bazel run "${BAZEL_BUILD_OPTIONS[@]}" \
+ //tools/zstd \
+ -- --stdout \
+ -d "$ENVOY_RELEASE_TARBALL" \
+ | tar xfO - envoy-contrib > distribution/custom/envoy-contrib
# Build the packages
bazel build "${BAZEL_BUILD_OPTIONS[@]}" \
--remote_download_toplevel \
-c opt \
--//distribution:envoy-binary=//distribution:custom/envoy \
+ --//distribution:envoy-contrib-binary=//distribution:custom/envoy-contrib \
//distribution:packages.tar.gz
if [[ "${ENVOY_BUILD_ARCH}" == "x86_64" ]]; then
cp -a bazel-bin/distribution/packages.tar.gz "${ENVOY_BUILD_DIR}/packages.x64.tar.gz"
@@ -529,21 +583,51 @@ case $CI_TARGET in
fi
;;
- docs)
- setup_clang_toolchain
- echo "generating docs..."
- # Build docs.
- "${ENVOY_SRCDIR}/docs/build.sh"
- ;;
-
- docs-upload)
- setup_clang_toolchain
- "${ENVOY_SRCDIR}/ci/upload_gcs_artifact.sh" /source/generated/docs docs
- ;;
-
- docs-publish-latest)
- BUILD_SHA=$(git rev-parse HEAD)
- curl -X POST -d "$BUILD_SHA" "$NETLIFY_TRIGGER_URL"
+ docker)
+ # This is limited to linux x86/arm64 and expects `release` or `release.server_only` to have
+ # been run first.
+ if ! docker ps &> /dev/null; then
+ echo "Unable to build with Docker. If you are running with ci/run_envoy_docker.sh" \
+ "you should set ENVOY_DOCKER_IN_DOCKER=1"
+ exit 1
+ fi
+ if [[ -z "$CI_SHA1" ]]; then
+ CI_SHA1="$(git rev-parse HEAD~1)"
+ export CI_SHA1
+ fi
+ ENVOY_ARCH_DIR="$(dirname "${ENVOY_BUILD_DIR}")"
+ ENVOY_TARBALL_DIR="${ENVOY_TARBALL_DIR:-${ENVOY_ARCH_DIR}}"
+ _PLATFORMS=()
+ PLATFORM_NAMES=(
+ x64:linux/amd64
+ arm64:linux/arm64)
+ # TODO(phlax): avoid copying bins
+ for platform_name in "${PLATFORM_NAMES[@]}"; do
+ path="$(echo "${platform_name}" | cut -d: -f1)"
+ platform="$(echo "${platform_name}" | cut -d: -f2)"
+ bin_folder="${ENVOY_TARBALL_DIR}/${path}/bin"
+ if [[ ! -e "${bin_folder}/release.tar.zst" ]]; then
+ continue
+ fi
+ _PLATFORMS+=("$platform")
+ if [[ -e "$platform" ]]; then
+ rm -rf "$platform"
+ fi
+ mkdir -p "${platform}"
+ cp -a "${bin_folder}"/* "$platform"
+ done
+ if [[ -z "${_PLATFORMS[*]}" ]]; then
+ echo "No tarballs found in ${ENVOY_TARBALL_DIR}, did you run \`release\` first?" >&2
+ exit 1
+ fi
+ PLATFORMS="$(IFS=, ; echo "${_PLATFORMS[*]}")"
+ export DOCKER_PLATFORM="$PLATFORMS"
+ if [[ -z "${DOCKERHUB_PASSWORD}" && "${#_PLATFORMS[@]}" -eq 1 && -z $ENVOY_DOCKER_SAVE_IMAGE ]]; then
+ # if you are not pushing the images and there is only one platform
+ # then load to Docker (ie local build)
+ export DOCKER_LOAD_IMAGES=1
+ fi
+ "${ENVOY_SRCDIR}/ci/docker_ci.sh"
;;
docker-upload)
@@ -560,10 +644,86 @@ case $CI_TARGET in
dockerhub-readme)
setup_clang_toolchain
bazel build "${BAZEL_BUILD_OPTIONS[@]}" \
+ --remote_download_toplevel \
//distribution/dockerhub:readme
cat bazel-bin/distribution/dockerhub/readme.md
;;
+ docs)
+ setup_clang_toolchain
+ echo "generating docs..."
+ # Build docs.
+ [[ -z "${DOCS_OUTPUT_DIR}" ]] && DOCS_OUTPUT_DIR=generated/docs
+ rm -rf "${DOCS_OUTPUT_DIR}"
+ mkdir -p "${DOCS_OUTPUT_DIR}"
+ if [[ -n "${CI_TARGET_BRANCH}" ]] || [[ -n "${SPHINX_QUIET}" ]]; then
+ export SPHINX_RUNNER_ARGS="-v warn"
+ BAZEL_BUILD_OPTIONS+=("--action_env=SPHINX_RUNNER_ARGS")
+ fi
+ if [[ -n "${DOCS_BUILD_RST}" ]]; then
+ bazel "${BAZEL_STARTUP_OPTIONS[@]}" build "${BAZEL_BUILD_OPTIONS[@]}" //docs:rst
+ cp bazel-bin/docs/rst.tar.gz "$DOCS_OUTPUT_DIR"/envoy-docs-rst.tar.gz
+ fi
+ DOCS_OUTPUT_DIR="$(realpath "$DOCS_OUTPUT_DIR")"
+ bazel "${BAZEL_STARTUP_OPTIONS[@]}" run \
+ "${BAZEL_BUILD_OPTIONS[@]}" \
+ --//tools/tarball:target=//docs:html \
+ //tools/tarball:unpack \
+ "$DOCS_OUTPUT_DIR"
+ ;;
+
+ docs-upload)
+ setup_clang_toolchain
+ "${ENVOY_SRCDIR}/ci/upload_gcs_artifact.sh" /source/generated/docs docs
+ ;;
+
+ fetch|fetch-*)
+ case $CI_TARGET in
+ fetch)
+ targets=("${FETCH_TARGETS[@]}")
+ ;;
+ fetch-check_and_fix_proto_format)
+ targets=("${FETCH_PROTO_TARGETS[@]}")
+ ;;
+ fetch-docs)
+ targets=("${FETCH_DOCS_TARGETS[@]}")
+ ;;
+ fetch-format)
+ targets=("${FETCH_FORMAT_TARGETS[@]}")
+ ;;
+ fetch-gcc)
+ targets=("${FETCH_GCC_TARGETS[@]}")
+ ;;
+ fetch-release)
+ targets=(
+ "${FETCH_BUILD_TARGETS[@]}"
+ "${FETCH_ALL_TEST_TARGETS[@]}")
+ ;;
+ fetch-*coverage)
+ targets=("${FETCH_TEST_TARGETS[@]}")
+ ;;
+ fetch-*san|fetch-compile_time_options)
+ targets=("${FETCH_ALL_TEST_TARGETS[@]}")
+ ;;
+ fetch-api)
+ targets=("${FETCH_API_TARGETS[@]}")
+ ;;
+ *)
+ exit 0
+ ;;
+ esac
+ setup_clang_toolchain
+ FETCH_ARGS=(
+ --noshow_progress
+ --noshow_loading_progress)
+ echo "Fetching ${targets[*]} ..."
+ retry 15 10 bazel \
+ fetch \
+ "${BAZEL_GLOBAL_OPTIONS[@]}" \
+ "${FETCH_ARGS[@]}" \
+ "${targets[@]}"
+ ;;
+
fix_proto_format)
# proto_format.sh needs to build protobuf.
setup_clang_toolchain
@@ -600,6 +760,11 @@ case $CI_TARGET in
bazel_envoy_binary_build fastbuild
;;
+ info)
+ setup_clang_toolchain
+ bazel info "${BAZEL_BUILD_OPTIONS[@]}"
+ ;;
+
msan)
ENVOY_STDLIB=libc++
setup_clang_toolchain
@@ -620,6 +785,8 @@ case $CI_TARGET in
publish)
setup_clang_toolchain
BUILD_SHA="$(git rev-parse HEAD)"
+ ENVOY_COMMIT="${ENVOY_COMMIT:-${BUILD_SHA}}"
+ ENVOY_REPO="${ENVOY_REPO:-envoyproxy/envoy}"
VERSION_DEV="$(cut -d- -f2 < VERSION.txt)"
PUBLISH_ARGS=(
--publish-commitish="$BUILD_SHA"
@@ -629,21 +796,28 @@ case $CI_TARGET in
fi
bazel run "${BAZEL_BUILD_OPTIONS[@]}" \
@envoy_repo//:publish \
- -- "${PUBLISH_ARGS[@]}"
- ;;
-
- release)
- # When testing memory consumption, we want to test against exact byte-counts
- # where possible. As these differ between platforms and compile options, we
- # define the 'release' builds as canonical and test them only in CI, so the
- # toolchain is kept consistent. This ifdef is checked in
- # test/common/stats/stat_test_utility.cc when computing
- # Stats::TestUtil::MemoryTest::mode().
- if [[ "${ENVOY_BUILD_ARCH}" == "x86_64" ]]; then
- BAZEL_BUILD_OPTIONS+=("--test_env=ENVOY_MEMORY_TEST_EXACT=true")
+ -- --repo="$ENVOY_REPO" \
+ "${PUBLISH_ARGS[@]}"
+ ;;
+
+ release|release.server_only)
+ if [[ "$CI_TARGET" == "release" ]]; then
+ # When testing memory consumption, we want to test against exact byte-counts
+ # where possible. As these differ between platforms and compile options, we
+ # define the 'release' builds as canonical and test them only in CI, so the
+ # toolchain is kept consistent. This ifdef is checked in
+ # test/common/stats/stat_test_utility.cc when computing
+ # Stats::TestUtil::MemoryTest::mode().
+ if [[ "${ENVOY_BUILD_ARCH}" == "x86_64" ]]; then
+ BAZEL_BUILD_OPTIONS+=("--test_env=ENVOY_MEMORY_TEST_EXACT=true")
+ fi
fi
setup_clang_toolchain
ENVOY_BINARY_DIR="${ENVOY_BUILD_DIR}/bin"
+ if [[ -e "${ENVOY_BINARY_DIR}" ]]; then
+ echo "Existing output directory found (${ENVOY_BINARY_DIR}), removing ..."
+ rm -rf "${ENVOY_BINARY_DIR}"
+ fi
mkdir -p "$ENVOY_BINARY_DIR"
# As the binary build package enforces compiler options, adding here to ensure the tests and distribution build
# reuse settings and any already compiled artefacts, the bundle itself will always be compiled
@@ -651,18 +825,22 @@ case $CI_TARGET in
BAZEL_RELEASE_OPTIONS=(
--stripopt=--strip-all
-c opt)
- # Run release tests
- echo "Testing with:"
- echo " targets: ${TEST_TARGETS[*]}"
- echo " build options: ${BAZEL_BUILD_OPTIONS[*]}"
- echo " release options: ${BAZEL_RELEASE_OPTIONS[*]}"
- bazel_with_collection \
- test "${BAZEL_BUILD_OPTIONS[@]}" \
- --remote_download_minimal \
- "${BAZEL_RELEASE_OPTIONS[@]}" \
- "${TEST_TARGETS[@]}"
+ if [[ "$CI_TARGET" == "release" ]]; then
+ # Run release tests
+ echo "Testing with:"
+ echo " targets: ${TEST_TARGETS[*]}"
+ echo " build options: ${BAZEL_BUILD_OPTIONS[*]}"
+ echo " release options: ${BAZEL_RELEASE_OPTIONS[*]}"
+ bazel_with_collection \
+ test "${BAZEL_BUILD_OPTIONS[@]}" \
+ --remote_download_minimal \
+ "${BAZEL_RELEASE_OPTIONS[@]}" \
+ "${TEST_TARGETS[@]}"
+ fi
# Build release binaries
- bazel build "${BAZEL_BUILD_OPTIONS[@]}" "${BAZEL_RELEASE_OPTIONS[@]}" \
+ bazel build "${BAZEL_BUILD_OPTIONS[@]}" \
+ "${BAZEL_RELEASE_OPTIONS[@]}" \
+ --remote_download_outputs=toplevel \
//distribution/binary:release
# Copy release binaries to binary export directory
cp -a \
@@ -677,9 +855,10 @@ case $CI_TARGET in
cp -a \
bazel-bin/test/tools/schema_validator/schema_validator_tool.stripped \
"${ENVOY_BINARY_DIR}/schema_validator_tool"
+ echo "Release files created in ${ENVOY_BINARY_DIR}"
;;
- release.server_only)
+ release.server_only.binary)
setup_clang_toolchain
echo "bazel release build..."
bazel_envoy_binary_build release
@@ -688,9 +867,6 @@ case $CI_TARGET in
release.signed)
echo "Signing binary packages..."
setup_clang_toolchain
- # The default config expects these files
- mkdir -p distribution/custom
- cp -a /build/bazel.*/*64 distribution/custom/
bazel build "${BAZEL_BUILD_OPTIONS[@]}" //distribution:signed
cp -a bazel-bin/distribution/release.signed.tar.zst "${BUILD_DIR}/envoy/"
"${ENVOY_SRCDIR}/ci/upload_gcs_artifact.sh" "${BUILD_DIR}/envoy" release
@@ -737,10 +913,12 @@ case $CI_TARGET in
;;
verify_distro)
+ # this can be required if any python deps require compilation
+ setup_clang_toolchain
if [[ "${ENVOY_BUILD_ARCH}" == "x86_64" ]]; then
- PACKAGE_BUILD=/build/bazel.distribution/x64/packages.x64.tar.gz
+ PACKAGE_BUILD=/build/distribution/x64/packages.x64.tar.gz
else
- PACKAGE_BUILD=/build/bazel.distribution/arm64/packages.arm64.tar.gz
+ PACKAGE_BUILD=/build/distribution/arm64/packages.arm64.tar.gz
fi
bazel run "${BAZEL_BUILD_OPTIONS[@]}" \
//distribution:verify_packages \
@@ -756,7 +934,6 @@ case $CI_TARGET in
WORKFLOW="envoy-publish.yml"
# * Note on vars *
# `ENVOY_REPO`: Should always be envoyproxy/envoy unless testing
- # `ENVOY_BRANCH`: Target branch for PRs, source branch for others
# `COMMIT`: This may be a merge commit in a PR
# `ENVOY_COMMIT`: The actual last commit of branch/PR
# `ENVOY_HEAD_REF`: must also be set in PRs to provide a unique key for job grouping,
@@ -764,9 +941,10 @@ case $CI_TARGET in
COMMIT="$(git rev-parse HEAD)"
ENVOY_COMMIT="${ENVOY_COMMIT:-${COMMIT}}"
ENVOY_REPO="${ENVOY_REPO:-envoyproxy/envoy}"
+ # Note: CI is always called in main, the CI request is matched from there
echo "Trigger workflow (${WORKFLOW})"
echo " Repo: ${ENVOY_REPO}"
- echo " Branch: ${ENVOY_BRANCH}"
+ echo " Branch: main"
echo " Ref: ${COMMIT}"
echo " Inputs:"
echo " sha: ${ENVOY_COMMIT}"
@@ -779,7 +957,7 @@ case $CI_TARGET in
-- --repo="$ENVOY_REPO" \
--trigger-app-id="$GITHUB_APP_ID" \
--trigger-installation-id="$GITHUB_INSTALL_ID" \
- --trigger-ref="$ENVOY_BRANCH" \
+ --trigger-ref="main" \
--trigger-workflow="$WORKFLOW" \
--trigger-inputs="$INPUTS"
;;
diff --git a/ci/docker_ci.sh b/ci/docker_ci.sh
index 3845486acf07e..fdf6cdaf74b56 100755
--- a/ci/docker_ci.sh
+++ b/ci/docker_ci.sh
@@ -14,22 +14,23 @@ set -e
# DOCKERHUB_PASSWORD=mypassword
#
## Set these to simulate types of CI run
-# AZP_SHA1=MOCKSHA
-# AZP_BRANCH=refs/heads/main
-# AZP_BRANCH=refs/heads/release/v1.43
-# AZP_BRANCH=refs/tags/v1.77.3
+# CI_SHA1=MOCKSHA
+# CI_BRANCH=refs/heads/main
+# CI_BRANCH=refs/heads/release/v1.43
+# CI_BRANCH=refs/tags/v1.77.3
##
# Workaround for https://github.com/envoyproxy/envoy/issues/26634
DOCKER_BUILD_TIMEOUT="${DOCKER_BUILD_TIMEOUT:-400}"
+DOCKER_PLATFORM="${DOCKER_PLATFORM:-linux/arm64,linux/amd64}"
function is_windows() {
[[ -n "$DOCKER_FAKE_WIN" ]] || [[ "$(uname -s)" == *NT* ]]
}
if [[ -n "$DOCKER_CI_DRYRUN" ]]; then
- AZP_SHA1="${AZP_SHA1:-MOCKSHA}"
+ CI_SHA1="${CI_SHA1:-MOCKSHA}"
if is_windows; then
WINDOWS_IMAGE_BASE="${WINDOWS_IMAGE_BASE:-mcr.microsoft.com/windows/fakecore}"
@@ -50,7 +51,7 @@ fi
if [[ "$ENVOY_VERSION" =~ $DEV_VERSION_REGEX ]]; then
# Dev version
IMAGE_POSTFIX="-dev"
- IMAGE_NAME="${AZP_SHA1}"
+ IMAGE_NAME="${CI_SHA1}"
else
# Non-dev version
IMAGE_POSTFIX=""
@@ -58,12 +59,14 @@ else
fi
# Only push images for main builds, and non-dev release branch builds
-if [[ -n "$DOCKERHUB_USERNAME" ]] && [[ -n "$DOCKERHUB_PASSWORD" ]]; then
- if [[ "${AZP_BRANCH}" == "${MAIN_BRANCH}" ]]; then
+if [[ -n "$DOCKER_LOAD_IMAGES" ]]; then
+ LOAD_IMAGES=1
+elif [[ -n "$DOCKERHUB_USERNAME" ]] && [[ -n "$DOCKERHUB_PASSWORD" ]]; then
+ if [[ "${CI_BRANCH}" == "${MAIN_BRANCH}" ]]; then
echo "Pushing images for main."
PUSH_IMAGES_TO_REGISTRY=1
- elif [[ "${AZP_BRANCH}" =~ ${RELEASE_BRANCH_REGEX} ]] && ! [[ "$ENVOY_VERSION" =~ $DEV_VERSION_REGEX ]]; then
- echo "Pushing images for release branch ${AZP_BRANCH}."
+ elif [[ "${CI_BRANCH}" =~ ${RELEASE_BRANCH_REGEX} ]] && ! [[ "$ENVOY_VERSION" =~ $DEV_VERSION_REGEX ]]; then
+ echo "Pushing images for release branch ${CI_BRANCH}."
PUSH_IMAGES_TO_REGISTRY=1
else
echo 'Ignoring non-release branch for docker push.'
@@ -72,7 +75,7 @@ else
echo 'No credentials for docker push.'
fi
-ENVOY_DOCKER_IMAGE_DIRECTORY="${ENVOY_DOCKER_IMAGE_DIRECTORY:-${BUILD_STAGINGDIRECTORY:-.}/build_images}"
+ENVOY_DOCKER_IMAGE_DIRECTORY="${ENVOY_DOCKER_IMAGE_DIRECTORY:-${BUILD_DIR:-.}/build_images}"
# This prefix is altered for the private security images on setec builds.
DOCKER_IMAGE_PREFIX="${DOCKER_IMAGE_PREFIX:-envoyproxy/envoy}"
if [[ -z "$DOCKER_CI_DRYRUN" ]]; then
@@ -84,7 +87,7 @@ config_env() {
echo ">> BUILDX: install"
echo "> docker run --rm --privileged tonistiigi/binfmt --install all"
echo "> docker buildx rm multi-builder 2> /dev/null || :"
- echo "> docker buildx create --use --name multi-builder --platform linux/arm64,linux/amd64"
+ echo "> docker buildx create --use --name multi-builder --platform ${DOCKER_PLATFORM}"
if [[ -n "$DOCKER_CI_DRYRUN" ]]; then
return
@@ -95,7 +98,7 @@ config_env() {
# Remove older build instance
docker buildx rm multi-builder 2> /dev/null || :
- docker buildx create --use --name multi-builder --platform linux/arm64,linux/amd64
+ docker buildx create --use --name multi-builder --platform "${DOCKER_PLATFORM}"
}
if is_windows; then
@@ -152,7 +155,7 @@ build_platforms() {
elif [[ "${build_type}" == *-google-vrp ]]; then
echo -n "linux/amd64"
else
- echo -n "linux/arm64,linux/amd64"
+ echo -n "$DOCKER_PLATFORM"
fi
}
@@ -210,7 +213,10 @@ build_and_maybe_push_image () {
args+=(
"--sbom=false"
"--provenance=false")
- if [[ "${image_type}" =~ debug ]]; then
+ if [[ -n "$LOAD_IMAGES" ]]; then
+ action="BUILD+LOAD"
+ args+=("--load")
+ elif [[ "${image_type}" =~ debug ]]; then
# For linux if its the debug image then push immediately for release branches,
# otherwise just test the build
if [[ -n "$PUSH_IMAGES_TO_REGISTRY" ]]; then
@@ -341,7 +347,7 @@ tag_variants () {
# Only push latest on main/dev builds.
if [[ "$ENVOY_VERSION" =~ $DEV_VERSION_REGEX ]]; then
- if [[ "${AZP_BRANCH}" == "${MAIN_BRANCH}" ]]; then
+ if [[ "${CI_BRANCH}" == "${MAIN_BRANCH}" ]]; then
variant_type="latest"
fi
else
diff --git a/ci/envoy_build_sha.sh b/ci/envoy_build_sha.sh
index e2923189e35e2..03d8eb936ab9c 100644
--- a/ci/envoy_build_sha.sh
+++ b/ci/envoy_build_sha.sh
@@ -1,4 +1,12 @@
#!/bin/bash
-ENVOY_BUILD_SHA=$(grep envoyproxy/envoy-build-ubuntu "$(dirname "$0")"/../.bazelrc | sed -e 's#.*envoyproxy/envoy-build-ubuntu:\(.*\)#\1#' | uniq)
+
+ENVOY_BUILD_CONTAINER="$(grep envoyproxy/envoy-build-ubuntu "$(dirname "$0")"/../.bazelrc | sed -e 's#.*envoyproxy/envoy-build-ubuntu:\(.*\)#\1#' | uniq)"
+ENVOY_BUILD_SHA="$(echo "${ENVOY_BUILD_CONTAINER}" | cut -d@ -f1)"
+ENVOY_BUILD_CONTAINER_SHA="$(echo "${ENVOY_BUILD_CONTAINER}" | cut -d@ -f2)"
+
+if [[ -n "$ENVOY_BUILD_CONTAINER_SHA" ]]; then
+ ENVOY_BUILD_CONTAINER_SHA="${ENVOY_BUILD_CONTAINER_SHA:7}"
+fi
+
[[ $(wc -l <<< "${ENVOY_BUILD_SHA}" | awk '{$1=$1};1') == 1 ]] || (echo ".bazelrc envoyproxy/envoy-build-ubuntu hashes are inconsistent!" && exit 1)
diff --git a/ci/filter_example_setup.sh b/ci/filter_example_setup.sh
index f0605cadb2369..94511ac5babeb 100644
--- a/ci/filter_example_setup.sh
+++ b/ci/filter_example_setup.sh
@@ -16,10 +16,10 @@ ENVOY_FILTER_EXAMPLE_TESTS=(
if [[ ! -d "${ENVOY_FILTER_EXAMPLE_SRCDIR}/.git" ]]; then
rm -rf "${ENVOY_FILTER_EXAMPLE_SRCDIR}"
- git clone https://github.com/envoyproxy/envoy-filter-example.git "${ENVOY_FILTER_EXAMPLE_SRCDIR}"
+ git clone -q https://github.com/envoyproxy/envoy-filter-example.git "${ENVOY_FILTER_EXAMPLE_SRCDIR}"
fi
-(cd "${ENVOY_FILTER_EXAMPLE_SRCDIR}" && git fetch origin && git checkout -f "${ENVOY_FILTER_EXAMPLE_GITSHA}")
+(cd "${ENVOY_FILTER_EXAMPLE_SRCDIR}" && git fetch -q origin && git checkout -q -f "${ENVOY_FILTER_EXAMPLE_GITSHA}")
sed -e "s|{ENVOY_SRCDIR}|${ENVOY_SRCDIR}|" "${ENVOY_SRCDIR}"/ci/WORKSPACE.filter.example > "${ENVOY_FILTER_EXAMPLE_SRCDIR}"/WORKSPACE
mkdir -p "${ENVOY_FILTER_EXAMPLE_SRCDIR}"/bazel
diff --git a/ci/format_pre.sh b/ci/format_pre.sh
index c85da1c6ae3d4..f4627dea89d8b 100755
--- a/ci/format_pre.sh
+++ b/ci/format_pre.sh
@@ -57,14 +57,13 @@ CURRENT=spelling
"${ENVOY_SRCDIR}/tools/spelling/check_spelling_pedantic.py" --mark check
# TODO(phlax): move clang/buildifier checks to bazel rules (/aspects)
-if [[ -n "$AZP_BRANCH" ]]; then
+if [[ -n "$CI_BRANCH" ]]; then
CURRENT=check_format_test
"${ENVOY_SRCDIR}/tools/code_format/check_format_test_helper.sh" --log=WARN
fi
CURRENT=check_format
-echo "Running ${ENVOY_SRCDIR}/tools/code_format/check_format.py"
-time "${ENVOY_SRCDIR}/tools/code_format/check_format.py" fix --fail_on_diff
+bazel "${BAZEL_STARTUP_OPTIONS[@]}" run "${BAZEL_BUILD_OPTIONS[@]}" //tools/code_format:check_format -- fix --fail_on_diff
if [[ "${#FAILED[@]}" -ne "0" ]]; then
echo "${BASH_ERR_PREFIX}TESTS FAILED:" >&2
diff --git a/ci/mac_ci_setup.sh b/ci/mac_ci_setup.sh
index 2d9ad14caa702..2646a366e9572 100755
--- a/ci/mac_ci_setup.sh
+++ b/ci/mac_ci_setup.sh
@@ -29,20 +29,23 @@ function install {
}
function retry () {
- local returns=1 i=1
- while ((i<=HOMEBREW_RETRY_ATTEMPTS)); do
+ local returns=1 i=1 attempts
+ attempts="${1}"
+ interval="${2}"
+ shift 2
+ while ((i<=attempts)); do
if "$@"; then
returns=0
break
else
- sleep "$HOMEBREW_RETRY_INTERVAL";
+ sleep "$interval";
((i++))
fi
done
return "$returns"
}
-if ! retry brew update; then
+if ! retry "$HOMEBREW_RETRY_ATTEMPTS" "$HOMEBREW_RETRY_INTERVAL" brew update; then
# Do not exit early if update fails.
echo "Failed to update homebrew"
fi
@@ -53,13 +56,4 @@ do
is_installed "${DEP}" || install "${DEP}"
done
-# Required as bazel and a foreign bazelisk are installed in the latest macos vm image, we have
-# to unlink/overwrite them to install bazelisk
-echo "Installing bazelisk"
-brew reinstall --force bazelisk
-if ! brew link --overwrite bazelisk; then
- echo "Failed to install and link bazelisk"
- exit 1
-fi
-
-bazel version
+retry 5 2 bazel version
diff --git a/ci/mac_ci_steps.sh b/ci/mac_ci_steps.sh
index 2ab857c72970a..dc779a665c713 100755
--- a/ci/mac_ci_steps.sh
+++ b/ci/mac_ci_steps.sh
@@ -11,9 +11,6 @@ trap finish EXIT
echo "disk space at beginning of build:"
df -h
-# shellcheck source=ci/setup_cache.sh
-. "$(dirname "$0")"/setup_cache.sh
-
read -ra BAZEL_BUILD_EXTRA_OPTIONS <<< "${BAZEL_BUILD_EXTRA_OPTIONS:-}"
read -ra BAZEL_EXTRA_TEST_OPTIONS <<< "${BAZEL_EXTRA_TEST_OPTIONS:-}"
diff --git a/ci/run_envoy_docker.sh b/ci/run_envoy_docker.sh
index a158d851b5878..a8a5fe1bf5760 100755
--- a/ci/run_envoy_docker.sh
+++ b/ci/run_envoy_docker.sh
@@ -65,7 +65,12 @@ fi
# The IMAGE_ID defaults to the CI hash but can be set to an arbitrary image ID (found with 'docker
# images').
-[[ -z "${IMAGE_ID}" ]] && IMAGE_ID="${ENVOY_BUILD_SHA}"
+if [[ -z "${IMAGE_ID}" ]]; then
+ IMAGE_ID="${ENVOY_BUILD_SHA}"
+ if ! is_windows && [[ -n "$ENVOY_BUILD_CONTAINER_SHA" ]]; then
+ IMAGE_ID="${ENVOY_BUILD_SHA}@sha256:${ENVOY_BUILD_CONTAINER_SHA}"
+ fi
+fi
[[ -z "${ENVOY_DOCKER_BUILD_DIR}" ]] && ENVOY_DOCKER_BUILD_DIR="${DEFAULT_ENVOY_DOCKER_BUILD_DIR}"
# Replace backslash with forward slash for Windows style paths
ENVOY_DOCKER_BUILD_DIR="${ENVOY_DOCKER_BUILD_DIR//\\//}"
@@ -81,13 +86,13 @@ VOLUMES=(
-v "${ENVOY_DOCKER_BUILD_DIR}":"${BUILD_DIR_MOUNT_DEST}"
-v "${SOURCE_DIR}":"${SOURCE_DIR_MOUNT_DEST}")
-if ! is_windows && [[ -n "$ENVOY_DOCKER_IN_DOCKER" ]]; then
+if [[ -n "$ENVOY_DOCKER_IN_DOCKER" || -n "$ENVOY_SHARED_TMP_DIR" ]]; then
# Create a "shared" directory that has the same path in/outside the container
# This allows the host docker engine to see artefacts using a temporary path created inside the container,
# at the same path.
# For example, a directory created with `mktemp -d --tmpdir /tmp/bazel-shared` can be mounted as a volume
# from within the build container.
- SHARED_TMP_DIR=/tmp/bazel-shared
+ SHARED_TMP_DIR="${ENVOY_SHARED_TMP_DIR:-/tmp/bazel-shared}"
mkdir -p "${SHARED_TMP_DIR}"
chmod +rwx "${SHARED_TMP_DIR}"
VOLUMES+=(-v "${SHARED_TMP_DIR}":"${SHARED_TMP_DIR}")
@@ -97,13 +102,10 @@ if [[ -n "${ENVOY_DOCKER_PULL}" ]]; then
time docker pull "${ENVOY_BUILD_IMAGE}"
fi
-
# Since we specify an explicit hash, docker-run will pull from the remote repo if missing.
docker run --rm \
"${ENVOY_DOCKER_OPTIONS[@]}" \
"${VOLUMES[@]}" \
- -e AZP_BRANCH \
- -e AZP_COMMIT_SHA \
-e HTTP_PROXY \
-e HTTPS_PROXY \
-e NO_PROXY \
@@ -113,15 +115,18 @@ docker run --rm \
-e BAZEL_EXTRA_TEST_OPTIONS \
-e BAZEL_FAKE_SCM_REVISION \
-e BAZEL_REMOTE_CACHE \
+ -e BAZEL_STARTUP_EXTRA_OPTIONS \
+ -e CI_BRANCH \
+ -e CI_SHA1 \
-e CI_TARGET_BRANCH \
-e DOCKERHUB_USERNAME \
-e DOCKERHUB_PASSWORD \
+ -e ENVOY_DOCKER_SAVE_IMAGE \
-e ENVOY_STDLIB \
-e BUILD_REASON \
- -e BAZEL_NO_CACHE_TEST_RESULTS \
-e BAZEL_REMOTE_INSTANCE \
- -e GOOGLE_BES_PROJECT_ID \
-e GCP_SERVICE_ACCOUNT_KEY \
+ -e GCP_SERVICE_ACCOUNT_KEY_PATH \
-e NUM_CPUS \
-e ENVOY_BRANCH \
-e ENVOY_RBE \
@@ -134,12 +139,12 @@ docker run --rm \
-e ENVOY_HEAD_REF \
-e ENVOY_PUBLISH_DRY_RUN \
-e ENVOY_REPO \
+ -e ENVOY_TARBALL_DIR \
-e SYSTEM_PULLREQUEST_PULLREQUESTNUMBER \
-e GCS_ARTIFACT_BUCKET \
-e GITHUB_TOKEN \
-e GITHUB_APP_ID \
-e GITHUB_INSTALL_ID \
- -e NETLIFY_TRIGGER_URL \
-e BUILD_SOURCEBRANCHNAME \
-e BAZELISK_BASE_URL \
-e ENVOY_BUILD_ARCH \
diff --git a/ci/setup_cache.sh b/ci/setup_cache.sh
deleted file mode 100755
index 6c770323eb355..0000000000000
--- a/ci/setup_cache.sh
+++ /dev/null
@@ -1,50 +0,0 @@
-#!/bin/bash
-
-set -e
-
-if [[ -n "${GCP_SERVICE_ACCOUNT_KEY:0:1}" ]]; then
- # mktemp will create a tempfile with u+rw permission minus umask, it will not be readable by all
- # users by default.
- GCP_SERVICE_ACCOUNT_KEY_FILE=$(mktemp -t gcp_service_account.XXXXXX.json)
-
- gcp_service_account_cleanup() {
- echo "Deleting service account key file..."
- rm -rf "${GCP_SERVICE_ACCOUNT_KEY_FILE}"
- }
-
- trap gcp_service_account_cleanup EXIT
-
- bash -c 'echo "${GCP_SERVICE_ACCOUNT_KEY}"' | base64 --decode > "${GCP_SERVICE_ACCOUNT_KEY_FILE}"
-
- export BAZEL_BUILD_EXTRA_OPTIONS+=" --google_credentials=${GCP_SERVICE_ACCOUNT_KEY_FILE}"
-
- if [[ -n "${GOOGLE_BES_PROJECT_ID}" ]]; then
- export BAZEL_BUILD_EXTRA_OPTIONS+=" --config=google-bes --bes_instance_name=${GOOGLE_BES_PROJECT_ID}"
- fi
-
-fi
-
-if [[ -n "${BAZEL_REMOTE_CACHE}" ]]; then
- export BAZEL_BUILD_EXTRA_OPTIONS+=" --remote_cache=${BAZEL_REMOTE_CACHE}"
- echo "Set up bazel remote read/write cache at ${BAZEL_REMOTE_CACHE}."
-
- if [[ -z "${ENVOY_RBE}" ]]; then
- export BAZEL_BUILD_EXTRA_OPTIONS+=" --jobs=HOST_CPUS*.99 --remote_timeout=600"
- echo "using local build cache."
- # Normalize branches - `release/vX.xx`, `vX.xx`, `vX.xx.x` -> `vX.xx`
- BRANCH_NAME="$(echo "${CI_TARGET_BRANCH}" | cut -d/ -f2 | cut -d. -f-2)"
- if [[ "$BRANCH_NAME" == "merge" ]]; then
- # Manually run PR commit - there is no easy way of telling which branch
- # it is, so just set it to `main` - otherwise it tries to cache as `branch/merge`
- BRANCH_NAME=main
- fi
- BAZEL_REMOTE_INSTANCE="branch/${BRANCH_NAME}"
- fi
-
- if [[ -n "${BAZEL_REMOTE_INSTANCE}" ]]; then
- export BAZEL_BUILD_EXTRA_OPTIONS+=" --remote_instance_name=${BAZEL_REMOTE_INSTANCE}"
- echo "instance_name: ${BAZEL_REMOTE_INSTANCE}."
- fi
-else
- echo "No remote cache is set, skipping setup remote cache."
-fi
diff --git a/ci/test_docker_ci.sh b/ci/test_docker_ci.sh
index 6bfa4479aa4b2..bd9748aa2b05f 100755
--- a/ci/test_docker_ci.sh
+++ b/ci/test_docker_ci.sh
@@ -54,7 +54,7 @@ _test () {
fi
export ENVOY_VERSION="${version}"
- export AZP_BRANCH="$branch"
+ export CI_BRANCH="$branch"
# this should be ignored if the non-push
export DOCKERHUB_USERNAME=DHUSER
export DOCKERHUB_PASSWORD=DHPASSWORD
@@ -68,13 +68,13 @@ _test () {
if [[ "$DOCKER_CI_TEST_COMMIT" ]]; then
echo "COMMIT(${name}): > ${testdata}"
- echo " DOCKER_FAKE_WIN=${DOCKER_FAKE_WIN} ENVOY_VERSION=${version} ENVOY_DOCKER_IMAGE_DIRECTORY=/non/existent/test/path AZP_BRANCH=${branch} DOCKER_CI_DRYRUN=1 ./ci/docker_ci.sh | grep -E \"^>\""
+ echo " DOCKER_FAKE_WIN=${DOCKER_FAKE_WIN} ENVOY_VERSION=${version} ENVOY_DOCKER_IMAGE_DIRECTORY=/non/existent/test/path CI_BRANCH=${branch} DOCKER_CI_DRYRUN=1 ./ci/docker_ci.sh | grep -E \"^>\""
./ci/docker_ci.sh | grep -E "^>" > "$testdata"
return
fi
echo "TEST(${name}): <> ${testdata}"
- echo " DOCKER_FAKE_WIN=${DOCKER_FAKE_WIN} ENVOY_VERSION=${version} ENVOY_DOCKER_IMAGE_DIRECTORY=/non/existent/test/path AZP_BRANCH=${branch} DOCKER_CI_DRYRUN=1 ./ci/docker_ci.sh | grep -E \"^>\""
+ echo " DOCKER_FAKE_WIN=${DOCKER_FAKE_WIN} ENVOY_VERSION=${version} ENVOY_DOCKER_IMAGE_DIRECTORY=/non/existent/test/path CI_BRANCH=${branch} DOCKER_CI_DRYRUN=1 ./ci/docker_ci.sh | grep -E \"^>\""
generated="$(mktemp)"
./ci/docker_ci.sh | grep -E "^>" > "$generated"
diff --git a/ci/upload_gcs_artifact.sh b/ci/upload_gcs_artifact.sh
index 6367184a408b5..339a4e98dc4dc 100755
--- a/ci/upload_gcs_artifact.sh
+++ b/ci/upload_gcs_artifact.sh
@@ -7,27 +7,17 @@ if [[ -z "${GCS_ARTIFACT_BUCKET}" ]]; then
exit 1
fi
-if [[ -z "${GCP_SERVICE_ACCOUNT_KEY}" ]]; then
- echo "GCP key is not set, not uploading artifacts."
- exit 1
-fi
-
read -ra BAZEL_STARTUP_OPTIONS <<< "${BAZEL_STARTUP_OPTION_LIST:-}"
read -ra BAZEL_BUILD_OPTIONS <<< "${BAZEL_BUILD_OPTION_LIST:-}"
-remove_key () {
- rm -rf "$KEYFILE"
-}
-
-trap remove_key EXIT
-
-# Fail when service account key is not specified
-KEYFILE="$(mktemp)"
-bash -c 'echo ${GCP_SERVICE_ACCOUNT_KEY}' | base64 --decode > "$KEYFILE"
+if [[ ! -s "${GCP_SERVICE_ACCOUNT_KEY_PATH}" ]]; then
+ echo "GCP key is not set, not uploading artifacts."
+ exit 1
+fi
cat < ~/.boto
[Credentials]
-gs_service_key_file=${KEYFILE}
+gs_service_key_file=${GCP_SERVICE_ACCOUNT_KEY_PATH}
EOF
SOURCE_DIRECTORY="$1"
diff --git a/ci/verify_examples.sh b/ci/verify_examples.sh
index f710f497e0728..62b57d5ed2e9c 100755
--- a/ci/verify_examples.sh
+++ b/ci/verify_examples.sh
@@ -13,6 +13,8 @@ FLAKY_SANDBOXES=(
double-proxy
# https://github.com/envoyproxy/envoy/issues/28543
golang-network
+ # https://github.com/envoyproxy/envoy/issues/31347
+ local_ratelimit
# https://github.com/envoyproxy/envoy/issues/28541
wasm-cc
# https://github.com/envoyproxy/envoy/issues/28546
diff --git a/ci/windows_ci_steps.sh b/ci/windows_ci_steps.sh
index 58fd0a9a81d58..c16d7392602ac 100755
--- a/ci/windows_ci_steps.sh
+++ b/ci/windows_ci_steps.sh
@@ -11,9 +11,6 @@ trap finish EXIT
echo "disk space at beginning of build:"
df -h
-# shellcheck source=ci/setup_cache.sh
-. "$(dirname "$0")"/setup_cache.sh
-
[ -z "${ENVOY_SRCDIR}" ] && export ENVOY_SRCDIR=/c/source
read -ra BAZEL_STARTUP_OPTIONS <<< "${BAZEL_STARTUP_OPTIONS:-}"
diff --git a/configs/BUILD b/configs/BUILD
index 17b5cf99b5fd6..ab37531ddba9b 100644
--- a/configs/BUILD
+++ b/configs/BUILD
@@ -1,9 +1,9 @@
+load("@base_pip3//:requirements.bzl", "requirement")
+load("@rules_python//python:defs.bzl", "py_binary")
load(
"//bazel:envoy_build_system.bzl",
"envoy_package",
)
-load("@rules_python//python:defs.bzl", "py_binary")
-load("@base_pip3//:requirements.bzl", "requirement")
licenses(["notice"]) # Apache 2
@@ -43,8 +43,8 @@ filegroup(
"//bazel:disable_admin_functionality": [],
"//conditions:default": [
"envoy-demo.yaml",
- "freebind/freebind.yaml",
"envoy-tap-config.yaml",
+ "freebind/freebind.yaml",
],
}),
)
diff --git a/contrib/BUILD b/contrib/BUILD
index f6813770abcca..34a896d22e409 100644
--- a/contrib/BUILD
+++ b/contrib/BUILD
@@ -1,4 +1,4 @@
-load("@envoy_api//bazel:utils.bzl", "json_data")
+load("@envoy_toolshed//:macros.bzl", "json_data")
load(":contrib_build_config.bzl", "CONTRIB_EXTENSIONS")
licenses(["notice"]) # Apache 2
diff --git a/contrib/common/active_redirect/source/BUILD b/contrib/common/active_redirect/source/BUILD
new file mode 100644
index 0000000000000..8a770f9b26089
--- /dev/null
+++ b/contrib/common/active_redirect/source/BUILD
@@ -0,0 +1,34 @@
+load(
+ "//bazel:envoy_build_system.bzl",
+ "envoy_cc_library",
+ "envoy_contrib_package",
+)
+
+licenses(["notice"]) # Apache 2
+
+envoy_contrib_package()
+
+envoy_cc_library(
+ name = "active_redirect_policy_lib",
+ srcs = ["active_redirect_policy_impl.cc"],
+ hdrs = ["active_redirect_policy_impl.h"],
+ external_deps = ["abseil_optional"],
+ visibility = ["//visibility:public"],
+ deps = [
+ "//contrib/envoy/http:active_redirect_policy_interface",
+ "//envoy/http:header_map_interface",
+ "//envoy/router:router_interface",
+ "//source/common/common:empty_string",
+ "//source/common/common:utility_lib",
+ "//source/common/config:utility_lib",
+ "//source/common/http:header_utility_lib",
+ "//source/common/http:headers_lib",
+ "//source/common/http:path_utility_lib",
+ "//source/common/http:utility_lib",
+ "//source/common/protobuf:utility_lib",
+ "//source/common/router:header_parser_lib",
+ "@envoy_api//envoy/config/core/v3:pkg_cc_proto",
+ "@envoy_api//envoy/config/route/v3:pkg_cc_proto",
+ "@envoy_api//envoy/type/v3:pkg_cc_proto",
+ ],
+)
diff --git a/contrib/common/active_redirect/source/active_redirect_policy_impl.cc b/contrib/common/active_redirect/source/active_redirect_policy_impl.cc
new file mode 100644
index 0000000000000..03d9bdeab2ebd
--- /dev/null
+++ b/contrib/common/active_redirect/source/active_redirect_policy_impl.cc
@@ -0,0 +1,225 @@
+#include "contrib/common/active_redirect/source/active_redirect_policy_impl.h"
+
+#include
+#include
+
+#include "source/common/common/empty_string.h"
+#include "source/common/common/regex.h"
+#include "source/common/common/utility.h"
+#include "source/common/config/utility.h"
+#include "source/common/http/path_utility.h"
+
+namespace Envoy {
+namespace Router {
+
+InternalActiveRedirectPolicyImpl::InternalActiveRedirectPolicyImpl(
+ const envoy::config::route::v3::InternalActiveRedirectPolicy& policy_config,
+ ProtobufMessage::ValidationVisitor& validator, absl::string_view current_route_name)
+ : current_route_name_(current_route_name),
+ redirect_response_codes_(buildRedirectResponseCodes(policy_config)),
+ max_internal_redirects_(
+ PROTOBUF_GET_WRAPPED_OR_DEFAULT(policy_config, max_internal_redirects, 1)),
+ enabled_(true), allow_cross_scheme_redirect_(policy_config.allow_cross_scheme_redirect()),
+ redirect_url_(policy_config.redirect_url()),
+ request_headers_parser_(HeaderParser::configure(policy_config.request_headers_to_add())),
+ redirect_url_rewrite_regex_(
+ policy_config.has_redirect_url_rewrite_regex()
+ ? Regex::Utility::parseRegex(policy_config.redirect_url_rewrite_regex().pattern())
+ : nullptr),
+ redirect_url_rewrite_regex_substitution_(
+ policy_config.has_redirect_url_rewrite_regex()
+ ? policy_config.redirect_url_rewrite_regex().substitution()
+ : ""),
+ host_rewrite_(policy_config.host_rewrite_literal()),
+ forced_use_original_host_(policy_config.forced_use_original_host()),
+ forced_add_header_before_route_matcher_(policy_config.forced_add_header_before_route_matcher()) {
+ for (const auto& predicate : policy_config.predicates()) {
+ auto& factory =
+ Envoy::Config::Utility::getAndCheckFactory(predicate);
+ auto config = factory.createEmptyConfigProto();
+ Envoy::Config::Utility::translateOpaqueConfig(predicate.typed_config(), validator, *config);
+ predicate_factories_.emplace_back(&factory, std::move(config));
+ }
+}
+
+InternalActiveRedirectPolicyImpl::InternalActiveRedirectPolicyImpl(
+ const envoy::config::route::v3::InternalActiveRedirectPolicy::RedirectPolicy& policy_config,
+ ProtobufMessage::ValidationVisitor& validator, absl::string_view current_route_name)
+ : current_route_name_(current_route_name),
+ redirect_response_codes_(buildRedirectResponseCodes(policy_config)),
+ max_internal_redirects_(
+ PROTOBUF_GET_WRAPPED_OR_DEFAULT(policy_config, max_internal_redirects, 1)),
+ enabled_(true), allow_cross_scheme_redirect_(policy_config.allow_cross_scheme_redirect()),
+ redirect_url_(policy_config.redirect_url()),
+ request_headers_parser_(HeaderParser::configure(policy_config.request_headers_to_add())),
+ redirect_url_rewrite_regex_(
+ policy_config.has_redirect_url_rewrite_regex()
+ ? Regex::Utility::parseRegex(policy_config.redirect_url_rewrite_regex().pattern())
+ : nullptr),
+ redirect_url_rewrite_regex_substitution_(
+ policy_config.has_redirect_url_rewrite_regex()
+ ? policy_config.redirect_url_rewrite_regex().substitution()
+ : ""),
+ host_rewrite_(policy_config.host_rewrite_literal()) {
+ for (const auto& predicate : policy_config.predicates()) {
+ auto& factory =
+ Envoy::Config::Utility::getAndCheckFactory(predicate);
+ auto config = factory.createEmptyConfigProto();
+ Envoy::Config::Utility::translateOpaqueConfig(predicate.typed_config(), validator, *config);
+ predicate_factories_.emplace_back(&factory, std::move(config));
+ }
+}
+
+std::vector
+InternalActiveRedirectPolicyImpl::predicates() const {
+ std::vector predicates;
+ for (const auto& predicate_factory : predicate_factories_) {
+ predicates.emplace_back(predicate_factory.first->createInternalRedirectPredicate(
+ *predicate_factory.second, current_route_name_));
+ }
+ return predicates;
+}
+
+absl::flat_hash_set InternalActiveRedirectPolicyImpl::buildRedirectResponseCodes(
+ const envoy::config::route::v3::InternalActiveRedirectPolicy& policy_config) const {
+ if (policy_config.redirect_response_codes_size() == 0) {
+ return absl::flat_hash_set{};
+ }
+
+ absl::flat_hash_set ret;
+ std::for_each(policy_config.redirect_response_codes().begin(),
+ policy_config.redirect_response_codes().end(), [&ret](uint32_t response_code) {
+ const absl::flat_hash_set valid_redirect_response_code = {
+ 301, 302, 303, 307, 308, 200};
+ if (!valid_redirect_response_code.contains(response_code)) {
+ ret.insert(static_cast(response_code));
+ }
+ });
+ return ret;
+}
+
+absl::flat_hash_set InternalActiveRedirectPolicyImpl::buildRedirectResponseCodes(
+ const envoy::config::route::v3::InternalActiveRedirectPolicy::RedirectPolicy& policy_config)
+ const {
+ if (policy_config.redirect_response_codes_size() == 0) {
+ return absl::flat_hash_set{};
+ }
+
+ absl::flat_hash_set ret;
+ std::for_each(policy_config.redirect_response_codes().begin(),
+ policy_config.redirect_response_codes().end(), [&ret](uint32_t response_code) {
+ const absl::flat_hash_set valid_redirect_response_code = {
+ 301, 302, 303, 307, 308, 200};
+ if (!valid_redirect_response_code.contains(response_code)) {
+ ret.insert(static_cast(response_code));
+ }
+ });
+ return ret;
+}
+
+void InternalActiveRedirectPolicyImpl::evaluateHeaders(
+ Http::HeaderMap& headers, const StreamInfo::StreamInfo* stream_info) const {
+ request_headers_parser_->evaluateHeaders(headers, stream_info);
+ if (!host_rewrite_.empty()) {
+ Http::RequestHeaderMap& request_headers = dynamic_cast(headers);
+ request_headers.setHost(host_rewrite_);
+ }
+}
+
+std::string
+InternalActiveRedirectPolicyImpl::redirectUrl(absl::optional current_path) const {
+ if (!redirect_url_.empty()) {
+ ENVOY_LOG(debug, "The redirect url: {}", redirect_url_);
+ return redirect_url_;
+ }
+
+ RELEASE_ASSERT(current_path.has_value(),
+ "The internal redirect address uses a regular expression, but does not pass in "
+ "the current path value");
+ auto just_path(Http::PathUtil::removeQueryAndFragment(current_path.value()));
+ return redirect_url_rewrite_regex_->replaceAll(just_path,
+ redirect_url_rewrite_regex_substitution_);
+}
+
+bool InternalActiveRedirectPolicyImpl::forcedUseOriginalHost() const {
+ return forced_use_original_host_;
+}
+
+InternalActiveRedirectPoliciesImpl::InternalActiveRedirectPoliciesImpl(
+ const envoy::config::route::v3::InternalActiveRedirectPolicy& policy_config,
+ ProtobufMessage::ValidationVisitor& validator, absl::string_view current_route_name) {
+ if (policy_config.policies().empty() && !policy_config.redirect_response_codes().empty()) {
+ ENVOY_LOG(warn, "Please configure the redirection policy using the Policies field, the old "
+ "configuration will be deprecated");
+ auto policy = std::make_unique(policy_config, validator,
+ current_route_name);
+ policies_.emplace_back(std::move(policy));
+ }
+
+ for (const auto& policy : policy_config.policies()) {
+ auto policy_impl =
+ std::make_unique(policy, validator, current_route_name);
+ policies_.emplace_back(std::move(policy_impl));
+ }
+
+ if (policies_.empty()) {
+ ENVOY_LOG(warn, "No redirection policy is currently configured. A default value is generated");
+ auto policy_impl = std::make_unique();
+ policies_.emplace_back(std::move(policy_impl));
+ }
+}
+
+InternalActiveRedirectPoliciesImpl::InternalActiveRedirectPoliciesImpl() {
+ auto policy_impl = std::make_unique();
+ policies_.emplace_back(std::move(policy_impl));
+}
+
+std::vector
+InternalActiveRedirectPoliciesImpl::predicates() const {
+ return policies_.at(current_policy_index_)->predicates();
+}
+
+void InternalActiveRedirectPoliciesImpl::evaluateHeaders(
+ Http::HeaderMap& headers, const StreamInfo::StreamInfo* stream_info) const {
+ return policies_.at(current_policy_index_)->evaluateHeaders(headers, stream_info);
+}
+
+std::string
+InternalActiveRedirectPoliciesImpl::redirectUrl(absl::optional current_path) const {
+ return policies_.at(current_policy_index_)->redirectUrl(current_path);
+}
+
+bool InternalActiveRedirectPoliciesImpl::enabled() const {
+ return policies_.at(current_policy_index_)->enabled();
+}
+
+bool InternalActiveRedirectPoliciesImpl::shouldRedirectForResponseCode(
+ const Http::Code& response_code) const {
+ for (ActiveRedirectPolicies::size_type i = 0; i < policies_.size(); i++) {
+ if (policies_.at(i)->shouldRedirectForResponseCode(response_code)) {
+ current_policy_index_ = i;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+uint32_t InternalActiveRedirectPoliciesImpl::maxInternalRedirects() const {
+ return policies_.at(current_policy_index_)->maxInternalRedirects();
+}
+
+bool InternalActiveRedirectPoliciesImpl::isCrossSchemeRedirectAllowed() const {
+ return policies_.at(current_policy_index_)->isCrossSchemeRedirectAllowed();
+}
+
+bool InternalActiveRedirectPoliciesImpl::forcedUseOriginalHost() const {
+ return policies_.at(current_policy_index_)->forcedUseOriginalHost();
+}
+
+bool InternalActiveRedirectPoliciesImpl::forcedAddHeaderBeforeRouteMatcher() const {
+ return policies_.at(current_policy_index_)->forcedAddHeaderBeforeRouteMatcher();
+}
+
+} // namespace Router
+} // namespace Envoy
diff --git a/contrib/common/active_redirect/source/active_redirect_policy_impl.h b/contrib/common/active_redirect/source/active_redirect_policy_impl.h
new file mode 100644
index 0000000000000..1facdd3e8cb38
--- /dev/null
+++ b/contrib/common/active_redirect/source/active_redirect_policy_impl.h
@@ -0,0 +1,119 @@
+#pragma once
+
+#include
+#include
+
+#include "envoy/config/core/v3/base.pb.h"
+#include "envoy/config/route/v3/route.pb.h"
+#include "envoy/config/route/v3/route_components.pb.h"
+#include "envoy/type/v3/percent.pb.h"
+
+#include "source/common/http/header_utility.h"
+#include "source/common/protobuf/protobuf.h"
+#include "source/common/protobuf/utility.h"
+#include "source/common/router/header_parser.h"
+
+#include "absl/container/node_hash_map.h"
+#include "absl/types/optional.h"
+#include "contrib/envoy/http/active_redirect_policy.h"
+
+namespace Envoy {
+namespace Router {
+
+/**
+ * Implementation of InternalActiveRedirectPolicyImpl that reads from the proto
+ * InternalActiveRedirectPolicyImpl of the RouteAction.
+ */
+class InternalActiveRedirectPolicyImpl : public InternalActiveRedirectPolicy,
+ Logger::Loggable {
+public:
+ // Constructor that enables internal redirect with policy_config controlling the configurable
+ // behaviors.
+ explicit InternalActiveRedirectPolicyImpl(
+ const envoy::config::route::v3::InternalActiveRedirectPolicy& policy_config,
+ ProtobufMessage::ValidationVisitor& validator, absl::string_view current_route_name);
+ explicit InternalActiveRedirectPolicyImpl(
+ const envoy::config::route::v3::InternalActiveRedirectPolicy::RedirectPolicy& policy_config,
+ ProtobufMessage::ValidationVisitor& validator, absl::string_view current_route_name);
+ // Default constructor that disables internal redirect.
+ InternalActiveRedirectPolicyImpl() = default;
+
+ bool enabled() const override { return enabled_; }
+
+ bool shouldRedirectForResponseCode(const Http::Code& response_code) const override {
+ return redirect_response_codes_.contains(response_code);
+ }
+
+ std::vector predicates() const override;
+
+ uint32_t maxInternalRedirects() const override { return max_internal_redirects_; }
+
+ bool isCrossSchemeRedirectAllowed() const override { return allow_cross_scheme_redirect_; }
+
+ void evaluateHeaders(Http::HeaderMap& headers,
+ const StreamInfo::StreamInfo* stream_info) const override;
+
+ std::string redirectUrl(absl::optional