diff --git a/.DS_Store b/.DS_Store deleted file mode 100644 index 834b874..0000000 Binary files a/.DS_Store and /dev/null differ diff --git a/.envrc.recommended b/.envrc.recommended new file mode 100644 index 0000000..a7aaf82 --- /dev/null +++ b/.envrc.recommended @@ -0,0 +1,3 @@ +watch_file nix/devShells.nix + +use flake diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 0000000..9cdd01d --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1,4 @@ +* @tealbase/backend @tealbase/postgres +migrations/ @tealbase/dev-workflows @tealbase/postgres @tealbase/backend +docker/orioledb @tealbase/postgres @tealbase/backend +common.vars.pkr.hcl @tealbase/postgres @tealbase/backend diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml index 5c8088a..4ead358 100644 --- a/.github/FUNDING.yml +++ b/.github/FUNDING.yml @@ -1,6 +1,6 @@ # These are supported funding model platforms -github: [ supabase ] # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2] +github: [ tealbase ] # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2] patreon: # Replace with a single Patreon username open_collective: # Replace with a single Open Collective username ko_fi: # Replace with a single Ko-fi username diff --git a/.github/PULL_REQUEST_TEMPLATE/default.md b/.github/PULL_REQUEST_TEMPLATE/default.md new file mode 100644 index 0000000..15bf13f --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE/default.md @@ -0,0 +1,15 @@ +## What kind of change does this PR introduce? + +Bug fix, feature, docs update, ... + +## What is the current behavior? + +Please link any relevant issues here. + +## What is the new behavior? + +Feel free to include screenshots if it includes visual changes. + +## Additional context + +Add any other context or screenshots. \ No newline at end of file diff --git a/.github/PULL_REQUEST_TEMPLATE/extension_upgrade.md b/.github/PULL_REQUEST_TEMPLATE/extension_upgrade.md new file mode 100644 index 0000000..9db2a06 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE/extension_upgrade.md @@ -0,0 +1,49 @@ +## What kind of change does this PR introduce? + +- upgrade _extension_ from _v0.0.0_ to _v0.0.0_ + +## Additional context + +Add any other context or screenshots. + +## Action Items + +- [ ] **New extension releases** were Checked for any breaking changes +- [ ] **Extensions compatibility** Checked + * Proceed to [extensions compatibility testing](#extensions-compatibility-testing), mark as done after everything is completed +- [ ] **Backup and Restore** Checked + * Proceed to [backup testing](#backup-testing) while extensions are enabled + - After every restore, re-run the tests specified at point [3.1](#extensions-compatibility-testing) + +### Extensions compatibility testing + +1. Enable every extension + 1. Check Postgres’ log output for any error messages while doing so + 1. This might unearth incompatibilities due to unsupported internal functions, missing libraries, or missing permissions +2. Disable every extension + 1. Check Postgres’ log output for any cleanup-related error messages +3. Re-enable each extension + 1. Run basic tests against the features they offer, e.g.: + 1. `pg_net` - execute HTTP requests + 2. `pg_graphql` - execute queries and mutations + 3. …to be filled in + +### Backup Testing + +Follow the testing steps steps for all the following cases: + +- Pause on new Postgres version, restore on new Postgres version +- Pause on older Postgres version, restore on new Postgres version +- Run a single-file backup backup, restore the backup + +#### Testing steps + +1. Generate dummy data + * the ‘Countries’ or ‘Slack clone’ SQL editor snippets are decent datasets to work with, albeit limited +2. Save a db stats snapshot file + * Do this by running `supa db-stats gather -p ` +3. Backup the database, through pausing the project, or otherwise +4. Restore the backup, through unpausing the project or cli +5. Check the data has been recovered successfully + 1. Visual checks/navigating through the tables works + 2. Run `supa db-stats verify` against the project and the previously saved file \ No newline at end of file diff --git a/.github/actions/shared-checkout/action.yml b/.github/actions/shared-checkout/action.yml new file mode 100644 index 0000000..59a236d --- /dev/null +++ b/.github/actions/shared-checkout/action.yml @@ -0,0 +1,12 @@ +name: Checkout +description: Checkout repository for pull requests and branches +runs: + using: "composite" + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + repository: ${{ github.event.pull_request.head.repo.full_name || github.repository }} + ref: ${{ github.event.pull_request.head.sha || github.sha }} + fetch-depth: 0 + fetch-tags: true diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 0000000..0d13d96 --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,4 @@ +Please go the the `Preview` tab and select the appropriate sub-template: + +* [Default](?expand=1&template=default.md) +* [Extension Upgrade](?expand=1&template=extension_upgrade.md) \ No newline at end of file diff --git a/.github/workflows/ami-release-nix-single.yml b/.github/workflows/ami-release-nix-single.yml new file mode 100644 index 0000000..8117bd8 --- /dev/null +++ b/.github/workflows/ami-release-nix-single.yml @@ -0,0 +1,156 @@ +name: Release Single AMI Nix + +on: + workflow_dispatch: + inputs: + postgres_version: + description: 'PostgreSQL major version to build (e.g. 15)' + required: true + type: string + branch: + description: 'Branch to run the workflow from' + required: true + type: string + default: 'main' + +permissions: + contents: write + id-token: write + +jobs: + build: + runs-on: large-linux-arm + timeout-minutes: 150 + + steps: + - name: Checkout Repo + uses: tealbase/postgres/.github/actions/shared-checkout@HEAD + with: + ref: ${{ github.event.inputs.branch }} + - name: aws-creds + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ secrets.DEV_AWS_ROLE }} + aws-region: "us-east-1" + output-credentials: true + role-duration-seconds: 7200 + + - name: Get current branch SHA + id: get_sha + run: | + echo "sha=$(git rev-parse HEAD)" >> $GITHUB_OUTPUT + + - uses: DeterminateSystems/nix-installer-action@main + + - name: Set PostgreSQL version environment variable + run: echo "POSTGRES_MAJOR_VERSION=${{ github.event.inputs.postgres_version }}" >> $GITHUB_ENV + + - name: Generate common-nix.vars.pkr.hcl + run: | + PG_VERSION=$(nix run nixpkgs#yq -- '.postgres_release["postgres'${{ env.POSTGRES_MAJOR_VERSION }}'"]' ansible/vars.yml) + PG_VERSION=$(echo "$PG_VERSION" | tr -d '"') # Remove any surrounding quotes + echo 'postgres-version = "'$PG_VERSION'"' > common-nix.vars.pkr.hcl + # Ensure there's a newline at the end of the file + echo "" >> common-nix.vars.pkr.hcl + + - name: Build AMI stage 1 + env: + POSTGRES_MAJOR_VERSION: ${{ env.POSTGRES_MAJOR_VERSION }} + run: | + packer init amazon-arm64-nix.pkr.hcl + GIT_SHA=${{ steps.get_sha.outputs.sha }} + packer build -var "git-head-version=${GIT_SHA}" -var "packer-execution-id=${GITHUB_RUN_ID}" -var-file="development-arm.vars.pkr.hcl" -var-file="common-nix.vars.pkr.hcl" -var "ansible_arguments=-e postgresql_major=${POSTGRES_MAJOR_VERSION}" amazon-arm64-nix.pkr.hcl + + - name: Build AMI stage 2 + env: + POSTGRES_MAJOR_VERSION: ${{ env.POSTGRES_MAJOR_VERSION }} + run: | + packer init stage2-nix-psql.pkr.hcl + GIT_SHA=${{ steps.get_sha.outputs.sha }} + POSTGRES_MAJOR_VERSION=${{ env.POSTGRES_MAJOR_VERSION }} + packer build -var "git_sha=${GIT_SHA}" -var "git-head-version=${GIT_SHA}" -var "packer-execution-id=${GITHUB_RUN_ID}" -var "postgres_major_version=${POSTGRES_MAJOR_VERSION}" -var-file="development-arm.vars.pkr.hcl" -var-file="common-nix.vars.pkr.hcl" stage2-nix-psql.pkr.hcl + + - name: Grab release version + id: process_release_version + run: | + VERSION=$(cat common-nix.vars.pkr.hcl | sed -e 's/postgres-version = "\(.*\)"/\1/g') + echo "version=$VERSION" >> $GITHUB_OUTPUT + + - name: Create nix flake revision tarball + run: | + GIT_SHA=${{ steps.get_sha.outputs.sha }} + MAJOR_VERSION=${{ env.POSTGRES_MAJOR_VERSION }} + + mkdir -p "/tmp/pg_upgrade_bin/${MAJOR_VERSION}" + echo "$GIT_SHA" >> "/tmp/pg_upgrade_bin/${MAJOR_VERSION}/nix_flake_version" + tar -czf "/tmp/pg_binaries.tar.gz" -C "/tmp/pg_upgrade_bin" . + + - name: configure aws credentials - staging + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ secrets.DEV_AWS_ROLE }} + aws-region: "us-east-1" + + - name: Upload software manifest to s3 staging + run: | + cd ansible + ansible-playbook -i localhost \ + -e "ami_release_version=${{ steps.process_release_version.outputs.version }}" \ + -e "internal_artifacts_bucket=${{ secrets.ARTIFACTS_BUCKET }}" \ + -e "postgres_major_version=${{ env.POSTGRES_MAJOR_VERSION }}" \ + manifest-playbook.yml + + - name: Upload nix flake revision to s3 staging + run: | + aws s3 cp /tmp/pg_binaries.tar.gz s3://${{ secrets.ARTIFACTS_BUCKET }}/upgrades/postgres/tealbase-postgres-${{ steps.process_release_version.outputs.version }}/20.04.tar.gz + aws s3 cp /tmp/pg_binaries.tar.gz s3://${{ secrets.ARTIFACTS_BUCKET }}/upgrades/postgres/tealbase-postgres-${{ steps.process_release_version.outputs.version }}/24.04.tar.gz + aws s3 cp /tmp/pg_binaries.tar.gz s3://${{ secrets.ARTIFACTS_BUCKET }}/upgrades/postgres/tealbase-postgres-${{ steps.process_release_version.outputs.version }}/upgrade_bundle.tar.gz + + - name: configure aws credentials - prod + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ secrets.PROD_AWS_ROLE }} + aws-region: "us-east-1" + + - name: Upload software manifest to s3 prod + run: | + cd ansible + ansible-playbook -i localhost \ + -e "ami_release_version=${{ steps.process_release_version.outputs.version }}" \ + -e "internal_artifacts_bucket=${{ secrets.PROD_ARTIFACTS_BUCKET }}" \ + -e "postgres_major_version=${{ env.POSTGRES_MAJOR_VERSION }}" \ + manifest-playbook.yml + + - name: Upload nix flake revision to s3 prod + run: | + aws s3 cp /tmp/pg_binaries.tar.gz s3://${{ secrets.PROD_ARTIFACTS_BUCKET }}/upgrades/postgres/tealbase-postgres-${{ steps.process_release_version.outputs.version }}/20.04.tar.gz + aws s3 cp /tmp/pg_binaries.tar.gz s3://${{ secrets.PROD_ARTIFACTS_BUCKET }}/upgrades/postgres/tealbase-postgres-${{ steps.process_release_version.outputs.version }}/24.04.tar.gz + aws s3 cp /tmp/pg_binaries.tar.gz s3://${{ secrets.PROD_ARTIFACTS_BUCKET }}/upgrades/postgres/tealbase-postgres-${{ steps.process_release_version.outputs.version }}/upgrade_bundle.tar.gz + + - name: Create release + uses: softprops/action-gh-release@v2 + with: + name: ${{ steps.process_release_version.outputs.version }} + tag_name: ${{ steps.process_release_version.outputs.version }} + target_commitish: ${{ steps.get_sha.outputs.sha }} + + - name: Slack Notification on Failure + if: ${{ failure() }} + uses: rtCamp/action-slack-notify@v2 + env: + SLACK_WEBHOOK: ${{ secrets.SLACK_NOTIFICATIONS_WEBHOOK }} + SLACK_USERNAME: 'gha-failures-notifier' + SLACK_COLOR: 'danger' + SLACK_MESSAGE: 'Building Postgres AMI failed' + SLACK_FOOTER: '' + + - name: Cleanup resources after build + if: ${{ always() }} + run: | + aws ec2 describe-instances --filters "Name=tag:packerExecutionId,Values=${GITHUB_RUN_ID}" --query "Reservations[].Instances[].InstanceId" --output text | xargs -r aws ec2 terminate-instances --instance-ids + + - name: Cleanup resources on build cancellation + if: ${{ cancelled() }} + run: | + aws ec2 describe-instances --filters "Name=tag:packerExecutionId,Values=${GITHUB_RUN_ID}" --query "Reservations[].Instances[].InstanceId" --output text | xargs -r aws ec2 terminate-instances --instance-ids + diff --git a/.github/workflows/ami-release-nix.yml b/.github/workflows/ami-release-nix.yml new file mode 100644 index 0000000..1256bda --- /dev/null +++ b/.github/workflows/ami-release-nix.yml @@ -0,0 +1,177 @@ +name: Release AMI Nix + +on: + push: + branches: + - develop + - release/* + paths: + - '.github/workflows/ami-release-nix.yml' + - 'common-nix.vars.pkr.hcl' + - 'ansible/vars.yml' + workflow_dispatch: + +permissions: + contents: write + id-token: write + +jobs: + prepare: + runs-on: large-linux-x86 + outputs: + postgres_versions: ${{ steps.set-versions.outputs.postgres_versions }} + steps: + - name: Checkout Repo + uses: tealbase/postgres/.github/actions/shared-checkout@HEAD + + - uses: DeterminateSystems/nix-installer-action@main + + - name: Set PostgreSQL versions + id: set-versions + run: | + VERSIONS=$(nix run nixpkgs#yq -- '.postgres_major[]' ansible/vars.yml | nix run nixpkgs#jq -- -R -s -c 'split("\n")[:-1]') + echo "postgres_versions=$VERSIONS" >> $GITHUB_OUTPUT + + build: + needs: prepare + strategy: + matrix: + postgres_version: ${{ fromJson(needs.prepare.outputs.postgres_versions) }} + include: + - runner: large-linux-arm + runs-on: ${{ matrix.runner }} + timeout-minutes: 150 + + steps: + - name: Checkout Repo + uses: tealbase/postgres/.github/actions/shared-checkout@HEAD + - name: aws-creds + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ secrets.DEV_AWS_ROLE }} + aws-region: "us-east-1" + output-credentials: true + role-duration-seconds: 7200 + - uses: DeterminateSystems/nix-installer-action@main + + - name: Run checks if triggered manually + if: ${{ github.event_name == 'workflow_dispatch' }} + run: | + SUFFIX=$(nix run nixpkgs#yq -- ".postgres_release[\"postgres${{ matrix.postgres_version }}\"]" ansible/vars.yml | sed -E 's/[0-9\.]+(.*)$/\1/') + if [[ -z "$SUFFIX" ]] ; then + echo "Version must include non-numeric characters if built manually." + exit 1 + fi + + - name: Set PostgreSQL version environment variable + run: echo "POSTGRES_MAJOR_VERSION=${{ matrix.postgres_version }}" >> $GITHUB_ENV + + - name: Generate common-nix.vars.pkr.hcl + run: | + PG_VERSION=$(nix run nixpkgs#yq -- '.postgres_release["postgres'${{ matrix.postgres_version }}'"]' ansible/vars.yml) + PG_VERSION=$(echo "$PG_VERSION" | tr -d '"') # Remove any surrounding quotes + echo 'postgres-version = "'$PG_VERSION'"' > common-nix.vars.pkr.hcl + # Ensure there's a newline at the end of the file + echo "" >> common-nix.vars.pkr.hcl + + - name: Build AMI stage 1 + env: + POSTGRES_MAJOR_VERSION: ${{ env.POSTGRES_MAJOR_VERSION }} + run: | + packer init amazon-arm64-nix.pkr.hcl + GIT_SHA=${{github.sha}} + # why is postgresql_major defined here instead of where the _three_ other postgresql_* variables are defined? + packer build -var "git-head-version=${GIT_SHA}" -var "packer-execution-id=${GITHUB_RUN_ID}" -var-file="development-arm.vars.pkr.hcl" -var-file="common-nix.vars.pkr.hcl" -var "ansible_arguments=-e postgresql_major=${POSTGRES_MAJOR_VERSION}" amazon-arm64-nix.pkr.hcl + + - name: Build AMI stage 2 + env: + POSTGRES_MAJOR_VERSION: ${{ env.POSTGRES_MAJOR_VERSION }} + run: | + packer init stage2-nix-psql.pkr.hcl + GIT_SHA=${{github.sha}} + POSTGRES_MAJOR_VERSION=${{ env.POSTGRES_MAJOR_VERSION }} + packer build -var "git_sha=${GIT_SHA}" -var "git-head-version=${GIT_SHA}" -var "packer-execution-id=${GITHUB_RUN_ID}" -var "postgres_major_version=${POSTGRES_MAJOR_VERSION}" -var-file="development-arm.vars.pkr.hcl" -var-file="common-nix.vars.pkr.hcl" stage2-nix-psql.pkr.hcl + + - name: Grab release version + id: process_release_version + run: | + VERSION=$(cat common-nix.vars.pkr.hcl | sed -e 's/postgres-version = "\(.*\)"/\1/g') + echo "version=$VERSION" >> $GITHUB_OUTPUT + + - name: Create nix flake revision tarball + run: | + GIT_SHA=${{github.sha}} + MAJOR_VERSION=${{ env.POSTGRES_MAJOR_VERSION }} + + mkdir -p "/tmp/pg_upgrade_bin/${MAJOR_VERSION}" + echo "$GIT_SHA" >> "/tmp/pg_upgrade_bin/${MAJOR_VERSION}/nix_flake_version" + tar -czf "/tmp/pg_binaries.tar.gz" -C "/tmp/pg_upgrade_bin" . + + - name: configure aws credentials - staging + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ secrets.DEV_AWS_ROLE }} + aws-region: "us-east-1" + + - name: Upload software manifest to s3 staging + run: | + cd ansible + ansible-playbook -i localhost \ + -e "ami_release_version=${{ steps.process_release_version.outputs.version }}" \ + -e "internal_artifacts_bucket=${{ secrets.ARTIFACTS_BUCKET }}" \ + -e "postgres_major_version=${{ env.POSTGRES_MAJOR_VERSION }}" \ + manifest-playbook.yml + + - name: Upload nix flake revision to s3 staging + run: | + aws s3 cp /tmp/pg_binaries.tar.gz s3://${{ secrets.ARTIFACTS_BUCKET }}/upgrades/postgres/tealbase-postgres-${{ steps.process_release_version.outputs.version }}/20.04.tar.gz + aws s3 cp /tmp/pg_binaries.tar.gz s3://${{ secrets.ARTIFACTS_BUCKET }}/upgrades/postgres/tealbase-postgres-${{ steps.process_release_version.outputs.version }}/24.04.tar.gz + aws s3 cp /tmp/pg_binaries.tar.gz s3://${{ secrets.ARTIFACTS_BUCKET }}/upgrades/postgres/tealbase-postgres-${{ steps.process_release_version.outputs.version }}/upgrade_bundle.tar.gz + + - name: configure aws credentials - prod + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ secrets.PROD_AWS_ROLE }} + aws-region: "us-east-1" + + - name: Upload software manifest to s3 prod + run: | + cd ansible + ansible-playbook -i localhost \ + -e "ami_release_version=${{ steps.process_release_version.outputs.version }}" \ + -e "internal_artifacts_bucket=${{ secrets.PROD_ARTIFACTS_BUCKET }}" \ + -e "postgres_major_version=${{ env.POSTGRES_MAJOR_VERSION }}" \ + manifest-playbook.yml + + - name: Upload nix flake revision to s3 prod + run: | + aws s3 cp /tmp/pg_binaries.tar.gz s3://${{ secrets.PROD_ARTIFACTS_BUCKET }}/upgrades/postgres/tealbase-postgres-${{ steps.process_release_version.outputs.version }}/20.04.tar.gz + aws s3 cp /tmp/pg_binaries.tar.gz s3://${{ secrets.PROD_ARTIFACTS_BUCKET }}/upgrades/postgres/tealbase-postgres-${{ steps.process_release_version.outputs.version }}/24.04.tar.gz + aws s3 cp /tmp/pg_binaries.tar.gz s3://${{ secrets.PROD_ARTIFACTS_BUCKET }}/upgrades/postgres/tealbase-postgres-${{ steps.process_release_version.outputs.version }}/upgrade_bundle.tar.gz + + - name: Create release + uses: softprops/action-gh-release@v2 + with: + name: ${{ steps.process_release_version.outputs.version }} + tag_name: ${{ steps.process_release_version.outputs.version }} + target_commitish: ${{github.sha}} + + - name: Slack Notification on Failure + if: ${{ failure() }} + uses: rtCamp/action-slack-notify@v2 + env: + SLACK_WEBHOOK: ${{ secrets.SLACK_NOTIFICATIONS_WEBHOOK }} + SLACK_USERNAME: 'gha-failures-notifier' + SLACK_COLOR: 'danger' + SLACK_MESSAGE: 'Building Postgres AMI failed' + SLACK_FOOTER: '' + + - name: Cleanup resources after build + if: ${{ always() }} + run: | + aws ec2 describe-instances --filters "Name=tag:packerExecutionId,Values=${GITHUB_RUN_ID}" --query "Reservations[].Instances[].InstanceId" --output text | xargs -r aws ec2 terminate-instances --instance-ids + + - name: Cleanup resources on build cancellation + if: ${{ cancelled() }} + run: | + aws ec2 describe-instances --filters "Name=tag:packerExecutionId,Values=${GITHUB_RUN_ID}" --query "Reservations[].Instances[].InstanceId" --output text | xargs -r aws ec2 terminate-instances --instance-ids diff --git a/.github/workflows/check-shellscripts.yml b/.github/workflows/check-shellscripts.yml new file mode 100644 index 0000000..1eb15ee --- /dev/null +++ b/.github/workflows/check-shellscripts.yml @@ -0,0 +1,31 @@ +name: Check shell scripts + +on: + push: + branches: + - develop + pull_request: + workflow_dispatch: + +permissions: + contents: read + +jobs: + build: + runs-on: ubuntu-latest + steps: + - name: Checkout Repo + uses: tealbase/postgres/.github/actions/shared-checkout@HEAD + - name: Run ShellCheck + uses: ludeeus/action-shellcheck@master + env: + SHELLCHECK_OPTS: -e SC2001 -e SC2002 -e SC2143 + with: + scandir: './ansible/files/admin_api_scripts' + + - name: Run ShellCheck on pg_upgrade scripts + uses: ludeeus/action-shellcheck@master + env: + SHELLCHECK_OPTS: -e SC2001 -e SC2002 -e SC2143 + with: + scandir: './ansible/files/admin_api_scripts/pg_upgrade_scripts' diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000..b6ac1cf --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,50 @@ +name: Check merge requirements + +on: + pull_request: + +permissions: + contents: read + +jobs: + check-release-version: + timeout-minutes: 5 + runs-on: ubuntu-latest + steps: + - name: Checkout Repo + uses: tealbase/postgres/.github/actions/shared-checkout@HEAD + + - name: Load postgres_release values + id: load_postgres_release + uses: mikefarah/yq@master + with: + args: eval '.postgres_release' ansible/vars.yml + # The output will be available as steps.load_postgres_release.outputs.stdout + + - name: Run checks + run: | + POSTGRES_RELEASES="${{ steps.load_postgres_release.outputs.stdout }}" + + # Iterate through each release + for release in $(echo "$POSTGRES_RELEASES" | yq eval 'keys | .[]' -); do + VERSION=$(echo "$POSTGRES_RELEASES" | yq eval ".\"$release\"" -) + if [[ "$release" == "postgresorioledb-17" ]]; then + # Check for suffix after -orioledb + if [[ "$VERSION" =~ -orioledb(.*) ]]; then + SUFFIX="${BASH_REMATCH[1]}" + if [[ -n "$SUFFIX" ]]; then + echo "We no longer allow merging versions with suffixes after -orioledb." + exit 1 + fi + fi + else + # Check for suffix after version digits + if [[ "$VERSION" =~ ([0-9]+\.[0-9]+\.[0-9]+)(.*) ]]; then + SUFFIX="${BASH_REMATCH[2]}" + if [[ -n "$SUFFIX" ]]; then + echo "We no longer allow merging versions with suffixes after version $VERSION." + exit 1 + fi + fi + fi + done diff --git a/.github/workflows/dockerhub-release-matrix.yml b/.github/workflows/dockerhub-release-matrix.yml new file mode 100644 index 0000000..e41d126 --- /dev/null +++ b/.github/workflows/dockerhub-release-matrix.yml @@ -0,0 +1,253 @@ +name: Release all major versions on Dockerhub + +on: + push: + branches: + - develop + - release/* + paths: + - ".github/workflows/dockerhub-release-matrix.yml" + - "ansible/vars.yml" + workflow_dispatch: + +permissions: + contents: read + id-token: write + +jobs: + prepare: + runs-on: large-linux-x86 + outputs: + matrix_config: ${{ steps.set-matrix.outputs.matrix_config }} + steps: + - uses: DeterminateSystems/nix-installer-action@main + - name: Checkout Repo + uses: tealbase/postgres/.github/actions/shared-checkout@HEAD + - name: Generate build matrix + id: set-matrix + run: | + nix run nixpkgs#nushell -- -c 'let versions = (open ansible/vars.yml | get postgres_major) + let matrix = ($versions | each { |ver| + let version = ($ver | str trim) + let dockerfile = $"Dockerfile-($version)" + if ($dockerfile | path exists) { + { + version: $version, + dockerfile: $dockerfile + } + } else { + null + } + } | compact) + + let matrix_config = { + include: $matrix + } + + $"matrix_config=($matrix_config | to json -r)" | save --append $env.GITHUB_OUTPUT' + build: + needs: prepare + strategy: + matrix: ${{ fromJson(needs.prepare.outputs.matrix_config) }} + runs-on: large-linux-x86 + outputs: + build_args: ${{ steps.args.outputs.result }} + steps: + - name: Checkout Repo + uses: tealbase/postgres/.github/actions/shared-checkout@HEAD + - uses: DeterminateSystems/nix-installer-action@main + - name: Set PostgreSQL version environment variable + run: echo "POSTGRES_MAJOR_VERSION=${{ matrix.version }}" >> $GITHUB_ENV + + - id: args + run: | + nix run nixpkgs#nushell -- -c ' + open ansible/vars.yml + | items { |key value| {name: $key, item: $value} } + | where { |it| ($it.item | describe) == "string" } + | each { |it| $"($it.name)=($it.item)" } + | str join "\n" + | save --append $env.GITHUB_OUTPUT + ' + build_release_image: + needs: [prepare, build] + strategy: + matrix: + postgres: ${{ fromJson(needs.prepare.outputs.matrix_config).include }} + arch: [amd64, arm64] + runs-on: ${{ matrix.arch == 'amd64' && 'large-linux-x86' || 'large-linux-arm' }} + timeout-minutes: 180 + steps: + - name: Checkout Repo + uses: tealbase/postgres/.github/actions/shared-checkout@HEAD + - uses: DeterminateSystems/nix-installer-action@main + - run: docker context create builders + - uses: docker/setup-buildx-action@v3 + with: + endpoint: builders + - uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + - name: Get image tag + id: image + run: | + if [[ "${{ matrix.arch }}" == "arm64" ]]; then + pg_version=$(nix run nixpkgs#nushell -- -c ' + let version = "${{ matrix.postgres.version }}" + let release_key = if ($version | str contains "orioledb") { + $"postgresorioledb-17" + } else { + $"postgres($version)" + } + open ansible/vars.yml | get postgres_release | get $release_key | str trim + ') + echo "pg_version=tealbase/postgres:$pg_version" >> $GITHUB_OUTPUT + else + pg_version=$(nix run nixpkgs#nushell -- -c ' + let version = "${{ matrix.postgres.version }}" + let release_key = if ($version | str contains "orioledb") { + $"postgresorioledb-17" + } else { + $"postgres($version)" + } + open ansible/vars.yml | get postgres_release | get $release_key | str trim + ') + echo "pg_version=tealbase/postgres:$pg_version" >> $GITHUB_OUTPUT + fi + - id: build + uses: docker/build-push-action@v5 + with: + push: true + build-args: | + ${{ needs.build.outputs.build_args }} + target: production + tags: ${{ steps.image.outputs.pg_version }}_${{ matrix.arch }} + platforms: linux/${{ matrix.arch }} + cache-from: type=gha,scope=${{ github.ref_name }}-latest-${{ matrix.arch }} + cache-to: type=gha,mode=max,scope=${{ github.ref_name }}-latest-${{ matrix.arch }} + file: ${{ matrix.postgres.dockerfile }} + merge_manifest: + needs: [prepare, build, build_release_image] + strategy: + matrix: + include: ${{ fromJson(needs.prepare.outputs.matrix_config).include }} + runs-on: large-linux-x86 + steps: + - name: Checkout Repo + uses: tealbase/postgres/.github/actions/shared-checkout@HEAD + - uses: DeterminateSystems/nix-installer-action@main + - uses: docker/setup-buildx-action@v3 + - uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + - name: Get image tag + id: get_version + run: | + nix run nixpkgs#nushell -- -c ' + let version = "${{ matrix.version }}" + let release_key = if ($version | str contains "orioledb") { + $"postgresorioledb-17" + } else { + $"postgres($version)" + } + let pg_version = (open ansible/vars.yml | get postgres_release | get $release_key | str trim) + $"pg_version=tealbase/postgres:($pg_version)" | save --append $env.GITHUB_OUTPUT + ' + - name: Output version + id: output_version + run: | + echo "result=${{ steps.get_version.outputs.pg_version }}" >> $GITHUB_OUTPUT + - name: Collect versions + id: collect_versions + run: | + echo "${{ steps.output_version.outputs.result }}" >> results.txt # Append results + - name: Upload Results Artifact + uses: actions/upload-artifact@v4 + with: + name: merge_results-${{ matrix.version }} + path: results.txt + if-no-files-found: warn + - name: Merge multi-arch manifests + run: | + docker buildx imagetools create -t ${{ steps.get_version.outputs.pg_version }} \ + ${{ steps.get_version.outputs.pg_version }}_amd64 \ + ${{ steps.get_version.outputs.pg_version }}_arm64 + combine_results: + needs: [prepare, merge_manifest] + runs-on: large-linux-x86 + steps: + - name: Checkout Repo + uses: tealbase/postgres/.github/actions/shared-checkout@HEAD + - uses: DeterminateSystems/nix-installer-action@main + + - name: Debug Input from Prepare + run: | + echo "Raw matrix_config output:" + echo "${{ needs.prepare.outputs.matrix_config }}" + - name: Get Versions from Matrix Config + id: get_versions + run: | + nix run nixpkgs#nushell -- -c ' + # Parse the matrix configuration directly + let matrix_config = (${{ toJson(needs.prepare.outputs.matrix_config) }} | from json) + + # Get versions directly from include array + let versions = ($matrix_config.include | get version) + + echo "Versions: $versions" + + # Convert the versions to a comma-separated string + let versions_str = ($versions | str join ",") + $"versions=$versions_str" | save --append $env.GITHUB_ENV + ' + - name: Download Results Artifacts + uses: actions/download-artifact@v4 + with: + pattern: merge_results-* + - name: Combine Results + id: combine + run: | + nix run nixpkgs#nushell -- -c ' + # Get all results files and process them in one go + let files = (ls **/results.txt | get name) + echo $"Found files: ($files)" + + let matrix = { + include: ( + $files + | each { |file| open $file } # Open each file + | each { |content| $content | lines } # Split into lines + | flatten # Flatten the nested lists + | where { |line| $line != "" } # Filter empty lines + | each { |line| + # Extract just the version part after the last colon + let version = ($line | parse "tealbase/postgres:{version}" | get version.0) + {version: $version} + } + ) + } + + let json_output = ($matrix | to json -r) # -r for raw output + echo $"Debug output: ($json_output)" + + $"matrix=($json_output)" | save --append $env.GITHUB_OUTPUT + ' + - name: Debug Combined Results + run: | + echo "Combined Results: '${{ steps.combine.outputs.matrix }}'" + outputs: + matrix: ${{ steps.combine.outputs.matrix }} + publish: + needs: combine_results + permissions: + contents: read + packages: write + id-token: write + strategy: + matrix: ${{ fromJson(needs.combine_results.outputs.matrix) }} + uses: ./.github/workflows/mirror.yml + with: + version: ${{ matrix.version }} + secrets: inherit diff --git a/.github/workflows/manual-docker-release.yml b/.github/workflows/manual-docker-release.yml new file mode 100644 index 0000000..783b4d2 --- /dev/null +++ b/.github/workflows/manual-docker-release.yml @@ -0,0 +1,262 @@ +name: Manual Docker Artifacts Release + +on: + workflow_dispatch: + inputs: + postgresVersion: + description: 'Optional. Postgres version to publish against, i.e. 15.1.1.78' + required: false + +permissions: + id-token: write + contents: read + +jobs: + prepare: + runs-on: large-linux-x86 + outputs: + matrix_config: ${{ steps.set-matrix.outputs.matrix_config }} + steps: + - uses: DeterminateSystems/nix-installer-action@main + - name: Checkout Repo + uses: tealbase/postgres/.github/actions/shared-checkout@HEAD + - name: Generate build matrix + id: set-matrix + run: | + nix run nixpkgs#nushell -- -c 'let versions = (open ansible/vars.yml | get postgres_major) + let matrix = ($versions | each { |ver| + let version = ($ver | str trim) + let dockerfile = $"Dockerfile-($version)" + if ($dockerfile | path exists) { + { + version: $version, + dockerfile: $dockerfile + } + } else { + null + } + } | compact) + + let matrix_config = { + include: $matrix + } + + $"matrix_config=($matrix_config | to json -r)" | save --append $env.GITHUB_OUTPUT' + build: + needs: prepare + strategy: + matrix: ${{ fromJson(needs.prepare.outputs.matrix_config) }} + runs-on: large-linux-x86 + outputs: + build_args: ${{ steps.args.outputs.result }} + steps: + - name: Checkout Repo + uses: tealbase/postgres/.github/actions/shared-checkout@HEAD + - uses: DeterminateSystems/nix-installer-action@main + - name: Set PostgreSQL version environment variable + run: echo "POSTGRES_MAJOR_VERSION=${{ matrix.version }}" >> $GITHUB_ENV + + - id: args + run: | + nix run nixpkgs#nushell -- -c ' + open ansible/vars.yml + | items { |key value| {name: $key, item: $value} } + | where { |it| ($it.item | describe) == "string" } + | each { |it| $"($it.name)=($it.item)" } + | str join "\n" + | save --append $env.GITHUB_OUTPUT + ' + build_release_image: + needs: [prepare, build] + strategy: + matrix: + postgres: ${{ fromJson(needs.prepare.outputs.matrix_config).include }} + arch: [amd64, arm64] + runs-on: ${{ matrix.arch == 'amd64' && 'large-linux-x86' || 'large-linux-arm' }} + timeout-minutes: 180 + steps: + - name: Checkout Repo + uses: tealbase/postgres/.github/actions/shared-checkout@HEAD + - uses: DeterminateSystems/nix-installer-action@main + - run: docker context create builders + - uses: docker/setup-buildx-action@v3 + with: + endpoint: builders + - uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + - name: Get image tag + id: image + run: | + if [[ "${{ matrix.arch }}" == "arm64" ]]; then + pg_version=$(nix run nixpkgs#nushell -- -c ' + let version = "${{ matrix.postgres.version }}" + let release_key = if ($version | str contains "orioledb") { + $"postgresorioledb-17" + } else { + $"postgres($version)" + } + let base_version = (open ansible/vars.yml | get postgres_release | get $release_key | str trim) + let final_version = if "${{ inputs.postgresVersion }}" != "" { + "${{ inputs.postgresVersion }}" + } else { + $base_version + } + $final_version | str trim + ') + echo "pg_version=tealbase/postgres:$pg_version" >> $GITHUB_OUTPUT + else + pg_version=$(nix run nixpkgs#nushell -- -c ' + let version = "${{ matrix.postgres.version }}" + let release_key = if ($version | str contains "orioledb") { + $"postgresorioledb-17" + } else { + $"postgres($version)" + } + let base_version = (open ansible/vars.yml | get postgres_release | get $release_key | str trim) + let final_version = if "${{ inputs.postgresVersion }}" != "" { + "${{ inputs.postgresVersion }}" + } else { + $base_version + } + $final_version | str trim + ') + echo "pg_version=tealbase/postgres:$pg_version" >> $GITHUB_OUTPUT + fi + - id: build + uses: docker/build-push-action@v5 + with: + push: true + build-args: | + ${{ needs.build.outputs.build_args }} + target: production + tags: ${{ steps.image.outputs.pg_version }}_${{ matrix.arch }} + platforms: linux/${{ matrix.arch }} + cache-from: type=gha,scope=${{ github.ref_name }}-latest-${{ matrix.arch }} + cache-to: type=gha,mode=max,scope=${{ github.ref_name }}-latest-${{ matrix.arch }} + file: ${{ matrix.postgres.dockerfile }} + merge_manifest: + needs: [prepare, build, build_release_image] + strategy: + matrix: + include: ${{ fromJson(needs.prepare.outputs.matrix_config).include }} + runs-on: large-linux-x86 + steps: + - name: Checkout Repo + uses: tealbase/postgres/.github/actions/shared-checkout@HEAD + - uses: DeterminateSystems/nix-installer-action@main + - uses: docker/setup-buildx-action@v3 + - uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + - name: Get image tag + id: get_version + run: | + nix run nixpkgs#nushell -- -c ' + let version = "${{ matrix.version }}" + let release_key = if ($version | str contains "orioledb") { + $"postgresorioledb-17" + } else { + $"postgres($version)" + } + let pg_version = (open ansible/vars.yml | get postgres_release | get $release_key | str trim) + $"pg_version=tealbase/postgres:($pg_version)" | save --append $env.GITHUB_OUTPUT + ' + - name: Output version + id: output_version + run: | + echo "result=${{ steps.get_version.outputs.pg_version }}" >> $GITHUB_OUTPUT + - name: Collect versions + id: collect_versions + run: | + echo "${{ steps.output_version.outputs.result }}" >> results.txt # Append results + - name: Upload Results Artifact + uses: actions/upload-artifact@v4 + with: + name: merge_results-${{ matrix.version }} + path: results.txt + if-no-files-found: warn + - name: Merge multi-arch manifests + run: | + docker buildx imagetools create -t ${{ steps.get_version.outputs.pg_version }} \ + ${{ steps.get_version.outputs.pg_version }}_amd64 \ + ${{ steps.get_version.outputs.pg_version }}_arm64 + combine_results: + needs: [prepare, merge_manifest] + runs-on: large-linux-x86 + steps: + - name: Checkout Repo + uses: tealbase/postgres/.github/actions/shared-checkout@HEAD + - uses: DeterminateSystems/nix-installer-action@main + + - name: Debug Input from Prepare + run: | + echo "Raw matrix_config output:" + echo "${{ needs.prepare.outputs.matrix_config }}" + - name: Get Versions from Matrix Config + id: get_versions + run: | + nix run nixpkgs#nushell -- -c ' + # Parse the matrix configuration directly + let matrix_config = (${{ toJson(needs.prepare.outputs.matrix_config) }} | from json) + + # Get versions directly from include array + let versions = ($matrix_config.include | get version) + + echo "Versions: $versions" + + # Convert the versions to a comma-separated string + let versions_str = ($versions | str join ",") + $"versions=$versions_str" | save --append $env.GITHUB_ENV + ' + - name: Download Results Artifacts + uses: actions/download-artifact@v4 + with: + pattern: merge_results-* + - name: Combine Results + id: combine + run: | + nix run nixpkgs#nushell -- -c ' + # Get all results files and process them in one go + let files = (ls **/results.txt | get name) + echo $"Found files: ($files)" + + let matrix = { + include: ( + $files + | each { |file| open $file } # Open each file + | each { |content| $content | lines } # Split into lines + | flatten # Flatten the nested lists + | where { |line| $line != "" } # Filter empty lines + | each { |line| + # Extract just the version part after the last colon + let version = ($line | parse "tealbase/postgres:{version}" | get version.0) + {version: $version} + } + ) + } + + let json_output = ($matrix | to json -r) # -r for raw output + echo $"Debug output: ($json_output)" + + $"matrix=($json_output)" | save --append $env.GITHUB_OUTPUT + ' + - name: Debug Combined Results + run: | + echo "Combined Results: '${{ steps.combine.outputs.matrix }}'" + outputs: + matrix: ${{ steps.combine.outputs.matrix }} + publish: + permissions: + contents: read + packages: write + id-token: write + needs: combine_results + strategy: + matrix: ${{ fromJson(needs.combine_results.outputs.matrix) }} + uses: ./.github/workflows/mirror.yml + with: + version: ${{ inputs.postgresVersion != '' && inputs.postgresVersion || matrix.version }} + secrets: inherit diff --git a/.github/workflows/mirror-postgrest.yml b/.github/workflows/mirror-postgrest.yml new file mode 100644 index 0000000..c0d9838 --- /dev/null +++ b/.github/workflows/mirror-postgrest.yml @@ -0,0 +1,37 @@ +name: Mirror PostgREST + +on: + push: + branches: + - develop + paths: + - ".github/workflows/mirror-postgrest.yml" + - "common.vars*" + +permissions: + contents: read + +jobs: + version: + runs-on: ubuntu-latest + outputs: + postgrest_release: ${{ steps.args.outputs.result }} + steps: + - name: Checkout Repo + uses: tealbase/postgres/.github/actions/shared-checkout@HEAD + - id: args + uses: mikefarah/yq@master + with: + cmd: yq '.postgrest_release' 'ansible/vars.yml' + + mirror: + needs: + - version + permissions: + contents: read + packages: write + id-token: write + uses: tealbase/cli/.github/workflows/mirror-image.yml@main + with: + image: postgrest/postgrest:v${{ needs.version.outputs.postgrest_release }} + secrets: inherit diff --git a/.github/workflows/mirror.yml b/.github/workflows/mirror.yml new file mode 100644 index 0000000..2411ab7 --- /dev/null +++ b/.github/workflows/mirror.yml @@ -0,0 +1,45 @@ +name: Mirror Image + +on: + workflow_call: + inputs: + version: + required: true + type: string + workflow_dispatch: + inputs: + version: + description: "Image tag" + required: true + type: string + +permissions: + contents: read + +jobs: + mirror: + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + id-token: write + steps: + - name: configure aws credentials + uses: aws-actions/configure-aws-credentials@v2.2.0 + with: + role-to-assume: ${{ secrets.PROD_AWS_ROLE }} + aws-region: us-east-1 + - uses: docker/login-action@v2 + with: + registry: public.ecr.aws + - uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + - uses: akhilerm/tag-push-action@v2.1.0 + with: + src: docker.io/tealbase/postgres:${{ inputs.version }} + dst: | + public.ecr.aws/tealbase/postgres:${{ inputs.version }} + ghcr.io/tealbase/postgres:${{ inputs.version }} diff --git a/.github/workflows/nix-build.yml b/.github/workflows/nix-build.yml new file mode 100644 index 0000000..3e092c9 --- /dev/null +++ b/.github/workflows/nix-build.yml @@ -0,0 +1,124 @@ +name: Nix CI + +on: + push: + branches: + - develop + - release/* + pull_request: + workflow_dispatch: + +permissions: + id-token: write + # required by testinfra-ami-build dependent workflows + contents: write + packages: write + +jobs: + build-run-image: + strategy: + fail-fast: false + matrix: + include: + - runner: large-linux-x86 + arch: amd64 + - runner: large-linux-arm + arch: arm64 + - runner: macos-latest-xlarge + arch: arm64 + runs-on: ${{ matrix.runner }} + timeout-minutes: 180 + steps: + - name: Checkout Repo + uses: tealbase/postgres/.github/actions/shared-checkout@HEAD + - name: aws-creds + uses: aws-actions/configure-aws-credentials@v4 + if: ${{ github.secret_source == 'Actions' }} + with: + role-to-assume: ${{ secrets.DEV_AWS_ROLE }} + aws-region: "us-east-1" + output-credentials: true + role-duration-seconds: 7200 + - name: Setup AWS credentials for Nix + if: ${{ github.secret_source == 'Actions' }} + run: | + sudo -H aws configure set aws_access_key_id $AWS_ACCESS_KEY_ID + sudo -H aws configure set aws_secret_access_key $AWS_SECRET_ACCESS_KEY + sudo -H aws configure set aws_session_token $AWS_SESSION_TOKEN + - name: write secret key + # use python so we don't interpolate the secret into the workflow logs, in case of bugs + run: | + sudo mkdir -p /etc/nix + sudo -E python -c "import os; file = open('/etc/nix/nix-secret-key', 'w'); file.write(os.environ['NIX_SIGN_SECRET_KEY']); file.close()" + env: + NIX_SIGN_SECRET_KEY: ${{ secrets.NIX_SIGN_SECRET_KEY }} + - name: Setup cache script + if: ${{ github.secret_source == 'Actions' }} + run: | + cat << 'EOF' | sudo tee /etc/nix/upload-to-cache.sh > /dev/null + #!/usr/bin/env bash + set -euf + export IFS=' ' + /nix/var/nix/profiles/default/bin/nix copy --to 's3://nix-postgres-artifacts?secret-key=/etc/nix/nix-secret-key' $OUT_PATHS + EOF + sudo chmod +x /etc/nix/upload-to-cache.sh + - name: Install nix + uses: cachix/install-nix-action@v27 + if: ${{ github.secret_source == 'Actions' }} + with: + install_url: https://releases.nixos.org/nix/nix-2.29.1/install + extra_nix_config: | + substituters = https://cache.nixos.org https://nix-postgres-artifacts.s3.amazonaws.com + trusted-public-keys = nix-postgres-artifacts:dGZlQOvKcNEjvT7QEAJbcV6b6uk7VF/hWMjhYleiaLI=% cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY= + post-build-hook = /etc/nix/upload-to-cache.sh + - name: Install nix + uses: cachix/install-nix-action@v27 + if: ${{ github.secret_source == 'None' }} + with: + install_url: https://releases.nixos.org/nix/nix-2.29.1/install + extra_nix_config: | + substituters = https://cache.nixos.org https://nix-postgres-artifacts.s3.amazonaws.com + trusted-public-keys = nix-postgres-artifacts:dGZlQOvKcNEjvT7QEAJbcV6b6uk7VF/hWMjhYleiaLI=% cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY= + - name: Aggressive disk cleanup for DuckDB build + if: matrix.runner == 'macos-latest-xlarge' + run: | + echo "=== BEFORE CLEANUP ===" + df -h + # Remove major space consumers + sudo rm -rf /usr/share/dotnet || true + sudo rm -rf /usr/local/lib/android || true + sudo rm -rf /Applications/Xcode.app/Contents/Developer/Platforms/iPhoneSimulator.platform || true + sudo rm -rf /Applications/Xcode.app/Contents/Developer/Platforms/watchOS.platform || true + sudo rm -rf /Applications/Xcode.app/Contents/Developer/Platforms/tvOS.platform || true + # Clean everything possible + sudo rm -rf /opt/ghc || true + sudo rm -rf /usr/local/share/boost || true + sudo rm -rf /opt/homebrew || true + sudo xcrun simctl delete all 2>/dev/null || true + # Aggressive cache cleanup + sudo rm -rf /System/Library/Caches/* 2>/dev/null || true + sudo rm -rf /Library/Caches/* 2>/dev/null || true + sudo rm -rf ~/Library/Caches/* 2>/dev/null || true + sudo rm -rf /private/var/log/* 2>/dev/null || true + sudo rm -rf /tmp/* 2>/dev/null || true + echo "=== AFTER CLEANUP ===" + df -h + - name: Build psql bundle + run: > + nix run "github:Mic92/nix-fast-build?rev=b1dae483ab7d4139a6297e02b6de9e5d30e43d48" + -- --skip-cached --no-nom ${{ matrix.runner == 'macos-latest-xlarge' && '--max-jobs 1' || '' }} + --flake ".#checks.$(nix eval --raw --impure --expr 'builtins.currentSystem')" + env: + AWS_ACCESS_KEY_ID: ${{ env.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ env.AWS_SECRET_ACCESS_KEY }} + AWS_SESSION_TOKEN: ${{ env.AWS_SESSION_TOKEN }} + + run-testinfra: + needs: build-run-image + if: ${{ success() }} + uses: ./.github/workflows/testinfra-ami-build.yml + + run-tests: + needs: build-run-image + if: ${{ success() }} + uses: ./.github/workflows/test.yml diff --git a/.github/workflows/provision.yml b/.github/workflows/provision.yml deleted file mode 100644 index 9e67c6a..0000000 --- a/.github/workflows/provision.yml +++ /dev/null @@ -1,59 +0,0 @@ -name: CD - -on: - push: - branches: [ master ] - -jobs: - provision: - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v2 - - - name: Install essentials - run: | - sudo apt-get update -y - sudo apt-get install software-properties-common -y - sudo apt-get install unzip -y - sudo apt-get install git -y - - - name: Install Packer - run: | - export VER="1.5.5" - wget https://releases.hashicorp.com/packer/${VER}/packer_${VER}_linux_amd64.zip - sudo unzip packer_${VER}_linux_amd64.zip - sudo mv packer /usr/local/bin - - - name: Install Ansible - run: | - sudo apt-add-repository --yes ppa:ansible/ansible - sudo apt-get install ansible -y - ansible --version - - - name: Install Ansible role - run: | - sudo ansible-galaxy install anxs.postgresql -r ansible/install_roles.yml --force -vvv - ansible-galaxy list anxs.postgresql - - - name: Build Digital Ocean Snapshot - run: | - export REGION=sgp1 - export SNAPSHOT_REGIONS="nyc1,nyc3,ams3,sfo2,sfo3,sgp1,lon1,fra1,tor1" - sudo packer build \ - -var "do_token=${{ secrets.DO_TOKEN }}" \ - -var "region=$REGION" \ - -var "snapshot_regions=$SNAPSHOT_REGIONS" \ - digitalOcean.json - - # - name: Build Digital Ocean Snapshot for Marketplace - # run: | - # export REGION=sgp1 - # export IMAGE_NAME="supabase-supabasepostgres-18-04" - # export ARGS="--tags,update" - # sudo packer build \ - # -var "do_token=${{ secrets.DO_TOKEN }}" \ - # -var "region=$REGION" \ - # -var "image_name=$IMAGE_NAME" \ - # -var "ansible_arguments=$ARGS" \ - # digitalOcean.json diff --git a/.github/workflows/publish-migrations-prod.yml b/.github/workflows/publish-migrations-prod.yml new file mode 100644 index 0000000..cc13a24 --- /dev/null +++ b/.github/workflows/publish-migrations-prod.yml @@ -0,0 +1,41 @@ +name: Release Migrations - Prod + +on: + workflow_dispatch: + +jobs: + build: + runs-on: large-linux-arm + timeout-minutes: 15 + permissions: + id-token: write + contents: read + + steps: + - name: Guard + run: | + if [ $GITHUB_REF != 'refs/heads/develop' ]; then + echo "This action can only be run on the develop branch" + exit 1 + fi + env: + GITHUB_REF: ${{ github.ref }} + + - name: Checkout repo + uses: tealbase/postgres/.github/actions/shared-checkout@HEAD + + - name: Merging migration files + run: cat $(ls -1) > ../migration-output.sql + working-directory: ${{ github.workspace }}/migrations/db/migrations + + - name: configure aws credentials - prod + uses: aws-actions/configure-aws-credentials@v1 + with: + role-to-assume: ${{ secrets.PROD_AWS_ROLE }} + aws-region: "ap-southeast-1" + + - name: Deploy to S3 prod + shell: bash + run: aws s3 sync migrations/db s3://$AWS_S3_BUCKET/migrations/db --delete + env: + AWS_S3_BUCKET: ${{ secrets.PG_INIT_SCRIPT_S3_BUCKET_PROD }} diff --git a/.github/workflows/publish-migrations-staging.yml b/.github/workflows/publish-migrations-staging.yml new file mode 100644 index 0000000..587bbce --- /dev/null +++ b/.github/workflows/publish-migrations-staging.yml @@ -0,0 +1,34 @@ +name: Release Migrations - Staging + +on: + push: + branches: + - develop + workflow_dispatch: + +jobs: + build: + runs-on: large-linux-arm + timeout-minutes: 15 + permissions: + id-token: write + contents: read + + steps: + - name: Checkout Repo + uses: tealbase/postgres/.github/actions/shared-checkout@HEAD + - name: Merging migration files + run: cat $(ls -1) > ../migration-output.sql + working-directory: ${{ github.workspace }}/migrations/db/migrations + + - name: configure aws credentials - staging + uses: aws-actions/configure-aws-credentials@v1 + with: + role-to-assume: ${{ secrets.DEV_AWS_ROLE }} + aws-region: "ap-southeast-1" + + - name: Deploy to S3 staging + shell: bash + run: aws s3 sync migrations/db s3://$AWS_S3_BUCKET/migrations/db --delete + env: + AWS_S3_BUCKET: ${{ secrets.PG_INIT_SCRIPT_S3_BUCKET_STAGING }} diff --git a/.github/workflows/publish-nix-pgupgrade-bin-flake-version.yml b/.github/workflows/publish-nix-pgupgrade-bin-flake-version.yml new file mode 100644 index 0000000..74e9679 --- /dev/null +++ b/.github/workflows/publish-nix-pgupgrade-bin-flake-version.yml @@ -0,0 +1,130 @@ +name: Publish nix pg_upgrade_bin flake version + +on: + workflow_dispatch: + inputs: + postgresVersion: + description: 'Optional. Postgres version to publish against, i.e. 15.1.1.78' + required: false + +permissions: + id-token: write + +jobs: + prepare: + runs-on: large-linux-x86 + outputs: + postgres_versions: ${{ steps.set-versions.outputs.postgres_versions }} + steps: + - name: Checkout Repo + uses: tealbase/postgres/.github/actions/shared-checkout@HEAD + + - uses: DeterminateSystems/nix-installer-action@main + + - name: Set PostgreSQL versions + id: set-versions + run: | + VERSIONS=$(nix run nixpkgs#yq -- '.postgres_major[]' ansible/vars.yml | nix run nixpkgs#jq -- -R -s -c 'split("\n")[:-1]') + echo "postgres_versions=$VERSIONS" >> $GITHUB_OUTPUT + + publish-staging: + needs: prepare + runs-on: large-linux-x86 + strategy: + matrix: + postgres_version: ${{ fromJson(needs.prepare.outputs.postgres_versions) }} + + steps: + - name: Checkout Repo + uses: tealbase/postgres/.github/actions/shared-checkout@HEAD + + - uses: DeterminateSystems/nix-installer-action@main + + - name: Grab release version + id: process_release_version + run: | + VERSION=$(nix run nixpkgs#yq -- '.postgres_release["postgres'${{ matrix.postgres_version }}'"]' ansible/vars.yml) + VERSION=$(echo "$VERSION" | tr -d '"') # Remove any surrounding quotes + if [[ "${{ inputs.postgresVersion }}" != "" ]]; then + VERSION="${{ inputs.postgresVersion }}" + fi + echo "version=$VERSION" >> "$GITHUB_OUTPUT" + echo "major_version=$(echo $VERSION | cut -d'.' -f1)" >> "$GITHUB_OUTPUT" + + - name: Create a tarball containing the latest nix flake version + working-directory: /tmp/ + run: | + mkdir -p "${{ steps.process_release_version.outputs.major_version }}" + echo "$GITHUB_SHA" > "${{ steps.process_release_version.outputs.major_version }}/nix_flake_version" + tar -czvf pg_upgrade_bin.tar.gz "${{ steps.process_release_version.outputs.major_version }}" + + - name: configure aws credentials - staging + uses: aws-actions/configure-aws-credentials@v1 + with: + role-to-assume: ${{ secrets.DEV_AWS_ROLE }} + aws-region: "us-east-1" + + - name: Upload pg_upgrade scripts to s3 staging + run: | + aws s3 cp /tmp/pg_upgrade_bin.tar.gz s3://${{ secrets.ARTIFACTS_BUCKET }}/upgrades/postgres/tealbase-postgres-${{ steps.process_release_version.outputs.version }}/20.04.tar.gz + aws s3 cp /tmp/pg_upgrade_bin.tar.gz s3://${{ secrets.ARTIFACTS_BUCKET }}/upgrades/postgres/tealbase-postgres-${{ steps.process_release_version.outputs.version }}/24.04.tar.gz + aws s3 cp /tmp/pg_upgrade_bin.tar.gz s3://${{ secrets.ARTIFACTS_BUCKET }}/upgrades/postgres/tealbase-postgres-${{ steps.process_release_version.outputs.version }}/upgrade_bundle.tar.gz + + - name: Slack Notification on Failure + if: ${{ failure() }} + uses: rtCamp/action-slack-notify@v2 + env: + SLACK_WEBHOOK: ${{ secrets.SLACK_NOTIFICATIONS_WEBHOOK }} + SLACK_USERNAME: 'gha-failures-notifier' + SLACK_COLOR: 'danger' + SLACK_MESSAGE: 'Publishing pg_upgrade binaries flake version failed' + SLACK_FOOTER: '' + + publish-prod: + runs-on: large-linux-x86 + if: github.ref_name == 'develop' || contains( github.ref, 'release' ) + needs: prepare + strategy: + matrix: + postgres_version: ${{ fromJson(needs.prepare.outputs.postgres_versions) }} + + steps: + - name: Checkout Repo + uses: tealbase/postgres/.github/actions/shared-checkout@HEAD + + - name: Grab release version + id: process_release_version + run: | + VERSION=$(nix run nixpkgs#yq -- '.postgres_release["postgres'${{ matrix.postgres_version }}'"]' ansible/vars.yml) + VERSION=$(echo $VERSION | tr -d '"') # Remove any surrounding quotes + echo "version=$VERSION" >> "$GITHUB_OUTPUT" + echo "major_version=$(echo $VERSION | cut -d'.' -f1)" >> "$GITHUB_OUTPUT" + + - name: Create a tarball containing the latest nix flake version + working-directory: /tmp/ + run: | + mkdir -p "${{ steps.process_release_version.outputs.major_version }}" + echo "$GITHUB_SHA" > "${{ steps.process_release_version.outputs.major_version }}/nix_flake_version" + tar -czvf pg_upgrade_bin.tar.gz "${{ steps.process_release_version.outputs.major_version }}" + + - name: configure aws credentials - prod + uses: aws-actions/configure-aws-credentials@v1 + with: + role-to-assume: ${{ secrets.PROD_AWS_ROLE }} + aws-region: "us-east-1" + + - name: Upload pg_upgrade scripts to s3 prod + run: | + aws s3 cp /tmp/pg_upgrade_bin.tar.gz s3://${{ secrets.PROD_ARTIFACTS_BUCKET }}/upgrades/postgres/tealbase-postgres-${{ steps.process_release_version.outputs.version }}/20.04.tar.gz + aws s3 cp /tmp/pg_upgrade_bin.tar.gz s3://${{ secrets.PROD_ARTIFACTS_BUCKET }}/upgrades/postgres/tealbase-postgres-${{ steps.process_release_version.outputs.version }}/24.04.tar.gz + aws s3 cp /tmp/pg_upgrade_bin.tar.gz s3://${{ secrets.PROD_ARTIFACTS_BUCKET }}/upgrades/postgres/tealbase-postgres-${{ steps.process_release_version.outputs.version }}/upgrade_bundle.tar.gz + + - name: Slack Notification on Failure + if: ${{ failure() }} + uses: rtCamp/action-slack-notify@v2 + env: + SLACK_WEBHOOK: ${{ secrets.SLACK_NOTIFICATIONS_WEBHOOK }} + SLACK_USERNAME: 'gha-failures-notifier' + SLACK_COLOR: 'danger' + SLACK_MESSAGE: 'Publishing pg_upgrade binaries flake version failed' + SLACK_FOOTER: '' diff --git a/.github/workflows/publish-nix-pgupgrade-scripts.yml b/.github/workflows/publish-nix-pgupgrade-scripts.yml new file mode 100644 index 0000000..e9792c7 --- /dev/null +++ b/.github/workflows/publish-nix-pgupgrade-scripts.yml @@ -0,0 +1,135 @@ +name: Publish nix pg_upgrade_scripts + +on: + push: + branches: + - develop + - release/* + paths: + - '.github/workflows/publish-nix-pgupgrade-scripts.yml' + - 'ansible/vars.yml' + workflow_dispatch: + inputs: + postgresVersion: + description: 'Optional. Postgres version to publish against, i.e. 15.1.1.78' + required: false + +permissions: + id-token: write + +jobs: + prepare: + runs-on: large-linux-x86 + outputs: + postgres_versions: ${{ steps.set-versions.outputs.postgres_versions }} + steps: + - name: Checkout Repo + uses: tealbase/postgres/.github/actions/shared-checkout@HEAD + + - uses: DeterminateSystems/nix-installer-action@main + + - name: Set PostgreSQL versions + id: set-versions + run: | + VERSIONS=$(nix run nixpkgs#yq -- '.postgres_major[]' ansible/vars.yml | nix run nixpkgs#jq -- -R -s -c 'split("\n")[:-1]') + echo "postgres_versions=$VERSIONS" >> $GITHUB_OUTPUT + + publish-staging: + needs: prepare + runs-on: large-linux-x86 + strategy: + matrix: + postgres_version: ${{ fromJson(needs.prepare.outputs.postgres_versions) }} + + steps: + - name: Checkout Repo + uses: tealbase/postgres/.github/actions/shared-checkout@HEAD + + - uses: DeterminateSystems/nix-installer-action@main + + - name: Grab release version + id: process_release_version + run: | + VERSION=$(nix run nixpkgs#yq -- '.postgres_release["postgres'${{ matrix.postgres_version }}'"]' ansible/vars.yml) + VERSION=$(echo "$VERSION" | tr -d '"') # Remove any surrounding quotes + if [[ "${{ inputs.postgresVersion }}" != "" ]]; then + VERSION="${{ inputs.postgresVersion }}" + fi + echo "version=$VERSION" >> "$GITHUB_OUTPUT" + + - name: Create a tarball containing pg_upgrade scripts + run: | + mkdir -p /tmp/pg_upgrade_scripts + cp -r ansible/files/admin_api_scripts/pg_upgrade_scripts/* /tmp/pg_upgrade_scripts + tar -czvf /tmp/pg_upgrade_scripts.tar.gz -C /tmp/ pg_upgrade_scripts + + - name: configure aws credentials - staging + uses: aws-actions/configure-aws-credentials@v1 + with: + role-to-assume: ${{ secrets.DEV_AWS_ROLE }} + aws-region: "us-east-1" + + - name: Upload pg_upgrade scripts to s3 staging + run: | + aws s3 cp /tmp/pg_upgrade_scripts.tar.gz "s3://${{ secrets.ARTIFACTS_BUCKET }}/upgrades/postgres/tealbase-postgres-${{ steps.process_release_version.outputs.version }}/pg_upgrade_scripts.tar.gz" + + - name: Slack Notification on Failure + if: ${{ failure() }} + uses: rtCamp/action-slack-notify@v2 + env: + SLACK_WEBHOOK: ${{ secrets.SLACK_NOTIFICATIONS_WEBHOOK }} + SLACK_USERNAME: 'gha-failures-notifier' + SLACK_COLOR: 'danger' + SLACK_MESSAGE: 'Publishing pg_upgrade scripts failed' + SLACK_FOOTER: '' + publish-prod: + needs: prepare + runs-on: large-linux-x86 + if: github.ref_name == 'develop' || contains( github.ref, 'release' ) + + strategy: + matrix: + postgres_version: ${{ fromJson(needs.prepare.outputs.postgres_versions) }} + + + steps: + - name: Checkout Repo + uses: tealbase/postgres/.github/actions/shared-checkout@HEAD + + - uses: DeterminateSystems/nix-installer-action@main + + - name: Grab release version + id: process_release_version + run: | + VERSION=$(nix run nixpkgs#yq -- '.postgres_release["postgres'${{ matrix.postgres_version }}'"]' ansible/vars.yml) + VERSION=$(echo "$VERSION" | tr -d '"') # Remove any surrounding quotes + if [[ "${{ inputs.postgresVersion }}" != "" ]]; then + VERSION="${{ inputs.postgresVersion }}" + fi + echo "version=$VERSION" >> "$GITHUB_OUTPUT" + + - name: Create a tarball containing pg_upgrade scripts + run: | + mkdir -p /tmp/pg_upgrade_scripts + cp -r ansible/files/admin_api_scripts/pg_upgrade_scripts/* /tmp/pg_upgrade_scripts + tar -czvf /tmp/pg_upgrade_scripts.tar.gz -C /tmp/ pg_upgrade_scripts + + - name: configure aws credentials - prod + uses: aws-actions/configure-aws-credentials@v1 + with: + role-to-assume: ${{ secrets.PROD_AWS_ROLE }} + aws-region: "us-east-1" + + - name: Upload pg_upgrade scripts to s3 prod + run: | + aws s3 cp /tmp/pg_upgrade_scripts.tar.gz "s3://${{ secrets.PROD_ARTIFACTS_BUCKET }}/upgrades/postgres/tealbase-postgres-${{ steps.process_release_version.outputs.version }}/pg_upgrade_scripts.tar.gz" + + - name: Slack Notification on Failure + if: ${{ failure() }} + uses: rtCamp/action-slack-notify@v2 + env: + SLACK_WEBHOOK: ${{ secrets.SLACK_NOTIFICATIONS_WEBHOOK }} + SLACK_USERNAME: 'gha-failures-notifier' + SLACK_COLOR: 'danger' + SLACK_MESSAGE: 'Publishing pg_upgrade scripts failed' + SLACK_FOOTER: '' diff --git a/.github/workflows/qemu-image-build.yml b/.github/workflows/qemu-image-build.yml new file mode 100644 index 0000000..931543f --- /dev/null +++ b/.github/workflows/qemu-image-build.yml @@ -0,0 +1,155 @@ +name: Build QEMU image + +on: + push: + branches: + - develop + - release/* + paths: + - '.github/workflows/qemu-image-build.yml' + - 'qemu-arm64-nix.pkr.hcl' + - 'common-nix.vars.pkr.hcl' + - 'ansible/vars.yml' + - 'scripts/*' + workflow_dispatch: + +permissions: + contents: read + id-token: write + +jobs: + prepare: + runs-on: ubuntu-latest + outputs: + postgres_versions: ${{ steps.set-versions.outputs.postgres_versions }} + steps: + - name: Checkout Repo + uses: tealbase/postgres/.github/actions/shared-checkout@HEAD + + - uses: DeterminateSystems/nix-installer-action@main + + - name: Set PostgreSQL versions - only builds pg17 atm + id: set-versions + run: | + VERSIONS=$(nix run nixpkgs#yq -- '.postgres_major[1]' ansible/vars.yml | nix run nixpkgs#jq -- -R -s -c 'split("\n")[:-1]') + echo "postgres_versions=$VERSIONS" >> $GITHUB_OUTPUT + + build: + needs: prepare + strategy: + matrix: + postgres_version: ${{ fromJson(needs.prepare.outputs.postgres_versions) }} + runs-on: arm-native-runner + timeout-minutes: 150 + permissions: + contents: write + packages: write + id-token: write + + steps: + - name: Checkout Repo + uses: tealbase/postgres/.github/actions/shared-checkout@HEAD + + - uses: DeterminateSystems/nix-installer-action@main + + - name: Run checks if triggered manually + if: ${{ github.event_name == 'workflow_dispatch' }} + run: | + SUFFIX=$(sudo nix run nixpkgs#yq -- ".postgres_release[\"postgres${{ matrix.postgres_version }}\"]" ansible/vars.yml | sed -E 's/[0-9\.]+(.*)$/\1/') + if [[ -z $SUFFIX ]] ; then + echo "Version must include non-numeric characters if built manually." + exit 1 + fi + + - name: enable KVM support + run: | + sudo chown runner /dev/kvm + sudo chmod 666 /dev/kvm + + - name: Set PostgreSQL version environment variable + run: echo "POSTGRES_MAJOR_VERSION=${{ matrix.postgres_version }}" >> $GITHUB_ENV + + - name: Generate common-nix.vars.pkr.hcl + run: | + curl -L https://github.com/mikefarah/yq/releases/download/v4.45.1/yq_linux_arm64 -o yq && chmod +x yq + PG_VERSION=$(./yq '.postgres_release["postgres'${{ matrix.postgres_version }}'"]' ansible/vars.yml) + PG_VERSION=$(echo "$PG_VERSION" | tr -d '"') # Remove any surrounding quotes + echo 'postgres-version = "'$PG_VERSION'"' > common-nix.vars.pkr.hcl + echo 'postgres-major-version = "'$POSTGRES_MAJOR_VERSION'"' >> common-nix.vars.pkr.hcl + # Ensure there's a newline at the end of the file + echo "" >> common-nix.vars.pkr.hcl + + # TODO (darora): not quite sure why I'm having to uninstall and re-install these deps, but the build fails w/o this + - name: Install dependencies + run: | + sudo apt-get update + sudo apt-get remove -y qemu-efi-aarch64 cloud-image-utils qemu-system-arm qemu-utils + sudo apt-get install -y qemu-efi-aarch64 cloud-image-utils qemu-system-arm qemu-utils + + - name: Build QEMU artifact + run: | + make init + GIT_SHA=${{github.sha}} + export PACKER_LOG=1 + packer build -var "git_sha=${GIT_SHA}" -var-file="common-nix.vars.pkr.hcl" qemu-arm64-nix.pkr.hcl + + - name: Grab release version + id: process_release_version + run: | + VERSION=$(cat common-nix.vars.pkr.hcl | sed -e 's/postgres-version = "\(.*\)"/\1/g') + echo "version=$VERSION" >> $GITHUB_OUTPUT + + - name: configure aws credentials - staging + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ secrets.CONTROL_PLANE_DEV_ROLE }} + aws-region: "us-east-1" + + - name: Login to Amazon ECR + id: login-ecr-private-dev + uses: aws-actions/amazon-ecr-login@v2 + + - name: Build image + env: + IMAGE_TAG: ${{ steps.process_release_version.outputs.version }} + run: | + docker build -f Dockerfile-kubernetes -t "postgres:$IMAGE_TAG" . + + - name: Push docker image to Amazon ECR + env: + REGISTRY: 812073016711.dkr.ecr.us-east-1.amazonaws.com + REPOSITORY: postgres-vm-image + IMAGE_TAG: ${{ steps.process_release_version.outputs.version }} + run: | + docker tag "postgres:$IMAGE_TAG" "$REGISTRY/$REPOSITORY:$IMAGE_TAG" + docker push "$REGISTRY/$REPOSITORY:$IMAGE_TAG" + + # TODO (darora): temporarily also push to prod account from here - add a guard to only publish proper tagged releases to prod? + - name: configure aws credentials - prod + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ secrets.CONTROL_PLANE_PROD_ROLE }} + aws-region: "us-east-1" + + - name: Login to Amazon ECR + id: login-ecr-private-prod + uses: aws-actions/amazon-ecr-login@v2 + + - name: Push docker image to Amazon ECR + env: + REGISTRY: 156470330064.dkr.ecr.us-east-1.amazonaws.com + REPOSITORY: postgres-vm-image + IMAGE_TAG: ${{ steps.process_release_version.outputs.version }} + run: | + docker tag "postgres:$IMAGE_TAG" "$REGISTRY/$REPOSITORY:$IMAGE_TAG" + docker push "$REGISTRY/$REPOSITORY:$IMAGE_TAG" + + - name: Cleanup resources after build + if: ${{ always() }} + run: | + aws ec2 describe-instances --filters "Name=tag:packerExecutionId,Values=${GITHUB_RUN_ID}" --query "Reservations[].Instances[].InstanceId" --output text | xargs -r aws ec2 terminate-instances --instance-ids + + - name: Cleanup resources on build cancellation + if: ${{ cancelled() }} + run: | + aws ec2 describe-instances --filters "Name=tag:packerExecutionId,Values=${GITHUB_RUN_ID}" --query "Reservations[].Instances[].InstanceId" --output text | xargs -r aws ec2 terminate-instances --instance-ids diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml new file mode 100644 index 0000000..293c366 --- /dev/null +++ b/.github/workflows/test.yml @@ -0,0 +1,79 @@ +name: Test Database +on: + workflow_dispatch: + workflow_call: + +permissions: + contents: read + id-token: write + +jobs: + prepare: + runs-on: large-linux-x86 + outputs: + postgres_versions: ${{ steps.set-versions.outputs.postgres_versions }} + steps: + - name: Checkout Repo + uses: tealbase/postgres/.github/actions/shared-checkout@HEAD + - uses: DeterminateSystems/nix-installer-action@main + with: + extra-conf: | + substituters = https://cache.nixos.org https://nix-postgres-artifacts.s3.amazonaws.com + trusted-public-keys = nix-postgres-artifacts:dGZlQOvKcNEjvT7QEAJbcV6b6uk7VF/hWMjhYleiaLI=% cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY= + - name: Set PostgreSQL versions + id: set-versions + run: | + VERSIONS=$(nix run nixpkgs#yq -- '.postgres_major[]' ansible/vars.yml | nix run nixpkgs#jq -- -R -s -c "split(\"\n\")[:-1]") + echo "postgres_versions=$VERSIONS" >> $GITHUB_OUTPUT + build: + needs: prepare + strategy: + matrix: + postgres_version: ${{ fromJson(needs.prepare.outputs.postgres_versions) }} + include: + - runner: large-linux-x86 + arch: amd64 + - runner: large-linux-arm + arch: arm64 + runs-on: ${{ matrix.runner }} + timeout-minutes: 180 + env: + POSTGRES_PORT: 5478 + POSTGRES_PASSWORD: password + steps: + - name: Checkout Repo + uses: tealbase/postgres/.github/actions/shared-checkout@HEAD + - uses: DeterminateSystems/nix-installer-action@main + with: + extra-conf: | + substituters = https://cache.nixos.org https://nix-postgres-artifacts.s3.amazonaws.com + trusted-public-keys = nix-postgres-artifacts:dGZlQOvKcNEjvT7QEAJbcV6b6uk7VF/hWMjhYleiaLI=% cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY= + - name: Set PostgreSQL version environment variable + run: echo "POSTGRES_MAJOR_VERSION=${{ matrix.postgres_version }}" >> $GITHUB_ENV + - name: Strip quotes from pg major and set env var + run: | + stripped_version=$(echo "${{ matrix.postgres_version }}" | sed 's/^"\(.*\)"$/\1/') + echo "PGMAJOR=$stripped_version" >> $GITHUB_ENV + - name: Generate common-nix.vars.pkr.hcl + run: | + PG_VERSION=$(nix run nixpkgs#yq -- '.postgres_release["postgres'${{ matrix.postgres_version }}'"]' ansible/vars.yml) + PG_VERSION=$(echo $PG_VERSION | tr -d '"') # Remove any surrounding quotes + echo 'postgres-version = "'$PG_VERSION'"' > common-nix.vars.pkr.hcl + echo "" >> common-nix.vars.pkr.hcl + - id: settings + run: sed -r 's/(\s|\")+//g' common-nix.vars.pkr.hcl >> $GITHUB_OUTPUT + - name: Generate args + id: args + run: | + ARGS=$(nix run nixpkgs#yq -- 'to_entries | map(select(.value|type == "!!str")) | map(.key + "=" + .value) | join("\n")' ansible/vars.yml) + echo "result<> $GITHUB_OUTPUT + echo "$ARGS" >> $GITHUB_OUTPUT + echo "EOF" >> $GITHUB_OUTPUT + - name: verify schema.sql is committed + run: | + nix run github:tealbase/postgres/${{ github.sha }}#dbmate-tool -- --version ${{ env.PGMAJOR }} --flake-url github:tealbase/postgres/${{ github.sha }} + if ! git diff --exit-code --quiet migrations/schema-${{ env.PGMAJOR }}.sql; then + echo "Detected changes in schema.sql:" + git diff migrations/schema-${{ env.PGMAJOR }}.sql + exit 1 + fi diff --git a/.github/workflows/testinfra-ami-build.yml b/.github/workflows/testinfra-ami-build.yml new file mode 100644 index 0000000..8d12223 --- /dev/null +++ b/.github/workflows/testinfra-ami-build.yml @@ -0,0 +1,127 @@ +name: Testinfra Integration Tests Nix + +on: + workflow_dispatch: + workflow_call: + +permissions: + contents: read + id-token: write + +jobs: + prepare: + runs-on: large-linux-x86 + outputs: + postgres_versions: ${{ steps.set-versions.outputs.postgres_versions }} + steps: + - name: Checkout Repo + uses: tealbase/postgres/.github/actions/shared-checkout@HEAD + + - uses: DeterminateSystems/nix-installer-action@main + + - name: Set PostgreSQL versions + id: set-versions + run: | + VERSIONS=$(nix run nixpkgs#yq -- '.postgres_major[]' ansible/vars.yml | nix run nixpkgs#jq -- -R -s -c 'split("\n")[:-1]') + echo "postgres_versions=$VERSIONS" >> $GITHUB_OUTPUT + + test-ami-nix: + needs: prepare + strategy: + fail-fast: false + matrix: + postgres_version: ${{ fromJson(needs.prepare.outputs.postgres_versions) }} + include: + - runner: arm-runner + arch: arm64 + ubuntu_release: noble + ubuntu_version: 24.04 + mcpu: neoverse-n1 + runs-on: ${{ matrix.runner }} + timeout-minutes: 150 + permissions: + contents: write + packages: write + id-token: write + + steps: + - name: Checkout Repo + uses: tealbase/postgres/.github/actions/shared-checkout@HEAD + + - id: args + uses: mikefarah/yq@master + with: + cmd: yq 'to_entries | map(select(.value|type == "!!str")) | map(.key + "=" + .value) | join("\n")' 'ansible/vars.yml' + + - run: docker context create builders + + - uses: docker/setup-buildx-action@v3 + with: + endpoint: builders + + - name: Generate random string + id: random + run: echo "random_string=$(openssl rand -hex 8)" >> $GITHUB_OUTPUT + + - name: Set PostgreSQL version environment variable + run: echo "POSTGRES_MAJOR_VERSION=${{ matrix.postgres_version }}" >> $GITHUB_ENV + + - name: Generate common-nix.vars.pkr.hcl + run: | + PG_VERSION=$(sudo nix run nixpkgs#yq -- '.postgres_release["postgres'${{ matrix.postgres_version }}'"]' ansible/vars.yml) + PG_VERSION=$(echo "$PG_VERSION" | tr -d '"') # Remove any surrounding quotes + echo 'postgres-version = "'$PG_VERSION'"' > common-nix.vars.pkr.hcl + # Ensure there's a newline at the end of the file + echo "" >> common-nix.vars.pkr.hcl + + - name: Build AMI stage 1 + run: | + packer init amazon-arm64-nix.pkr.hcl + GIT_SHA=${{github.sha}} + packer build -var "git-head-version=${GIT_SHA}" -var "packer-execution-id=${GITHUB_RUN_ID}" -var-file="development-arm.vars.pkr.hcl" -var-file="common-nix.vars.pkr.hcl" -var "ansible_arguments=" -var "postgres-version=${{ steps.random.outputs.random_string }}" -var "region=ap-southeast-1" -var 'ami_regions=["ap-southeast-1"]' -var "force-deregister=true" -var "ansible_arguments=-e postgresql_major=${POSTGRES_MAJOR_VERSION}" amazon-arm64-nix.pkr.hcl + + - name: Build AMI stage 2 + run: | + packer init stage2-nix-psql.pkr.hcl + GIT_SHA=${{github.sha}} + packer build -var "git-head-version=${GIT_SHA}" -var "packer-execution-id=${GITHUB_RUN_ID}" -var "postgres_major_version=${POSTGRES_MAJOR_VERSION}" -var-file="development-arm.vars.pkr.hcl" -var-file="common-nix.vars.pkr.hcl" -var "postgres-version=${{ steps.random.outputs.random_string }}" -var "region=ap-southeast-1" -var 'ami_regions=["ap-southeast-1"]' -var "force-deregister=true" -var "git_sha=${GITHUB_SHA}" stage2-nix-psql.pkr.hcl + + - name: Run tests + timeout-minutes: 10 + env: + AMI_NAME: "tealbase-postgres-${{ steps.random.outputs.random_string }}" + run: | + # TODO: use poetry for pkg mgmt + pip3 install boto3 boto3-stubs[essential] docker ec2instanceconnectcli pytest pytest-testinfra[paramiko,docker] requests + pytest -vv -s testinfra/test_ami_nix.py + + - name: Cleanup resources on build cancellation + if: ${{ cancelled() }} + run: | + aws ec2 --region ap-southeast-1 describe-instances --filters "Name=tag:packerExecutionId,Values=${GITHUB_RUN_ID}" --query "Reservations[].Instances[].InstanceId" --output text | xargs -r aws ec2 terminate-instances --region ap-southeast-1 --instance-ids + + - name: Cleanup resources after build + if: ${{ always() }} + run: | + aws ec2 --region ap-southeast-1 describe-instances --filters "Name=tag:testinfra-run-id,Values=${GITHUB_RUN_ID}" --query "Reservations[].Instances[].InstanceId" --output text | xargs -r aws ec2 terminate-instances --region ap-southeast-1 --instance-ids || true + + - name: Cleanup AMIs + if: always() + run: | + # Define AMI name patterns + STAGE1_AMI_NAME="tealbase-postgres-ci-ami-test-stage-1" + STAGE2_AMI_NAME="${{ steps.random.outputs.random_string }}" + + # Function to deregister AMIs by name pattern + deregister_ami_by_name() { + local ami_name_pattern=$1 + local ami_ids=$(aws ec2 describe-images --region ap-southeast-1 --owners self --filters "Name=name,Values=${ami_name_pattern}" --query 'Images[*].ImageId' --output text) + for ami_id in $ami_ids; do + echo "Deregistering AMI: $ami_id" + aws ec2 deregister-image --region ap-southeast-1 --image-id $ami_id + done + } + + # Deregister AMIs + deregister_ami_by_name "$STAGE1_AMI_NAME" + deregister_ami_by_name "$STAGE2_AMI_NAME" diff --git a/.gitignore b/.gitignore index e69de29..5372bfd 100644 --- a/.gitignore +++ b/.gitignore @@ -0,0 +1,32 @@ +.DS_Store +.python-version +.mise.toml +venv/ +*.swp +docker/cache/ + +ansible/image-manifest*.json +testinfra-aio-container-logs.log + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +#nix related +result* +.env-local +.history +.envrc +.direnv + + +#IDE +.idea/ +.vscode/ + +db/schema.sql +common-nix.vars.pkr.hcl + +# pre-commit config is managed in nix +.pre-commit-config.yaml diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..7c25e5a --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,46 @@ +# Welcome to tealbase Postgres contributing guide + +## Adding a new extension + +Extensions can either be built from source or installed through a debian package. In general, you want to add the installation commands for your extension to the [Dockerfile](Dockerfile) following the steps below. + +1. Create a [build stage](Dockerfile#L777) named after your extension. +2. Add build args that specify the extension's [release version](Dockerfile#L37). +3. If your extension is published as a package, download it to `/tmp/.deb` using the [ADD command](Dockerfile#L705). +4. If you need to build the extensions from source, use [checkinstall](Dockerfile#L791) to create a `/tmp/.deb` package. +5. Copy your extension's package from build stage to [extensions stage](Dockerfile#L851). + +Here's a minimal example: + +```dockerfile +ARG pg_graphql_release=1.1.0 + +#################### +# 19-pg_graphql.yml +#################### +FROM base as pg_graphql +# Download package archive +ARG pg_graphql_release +ADD "https://github.com/tealbase/pg_graphql/releases/download/v${pg_graphql_release}/pg_graphql-v${pg_graphql_release}-pg${postgresql_major}-${TARGETARCH}-linux-gnu.deb" \ + /tmp/pg_graphql.deb + +#################### +# Collect extension packages +#################### +FROM scratch as extensions +COPY --from=pg_graphql /tmp/*.deb /tmp/ +``` + +Using this process maximises the effectiveness of Docker layer caching, which significantly speeds up our CI builds. + +## Testing an extension + +Extensions can be tested automatically using pgTAP. Start by creating a new file in [migrations/tests/extensions](migrations/tests/extensions). For example: + +```sql +BEGIN; +create extension if not exists wrappers with schema "extensions"; +ROLLBACK; +``` + +This test will be run as part of CI to check that your extension can be enabled successfully from the final Docker image. diff --git a/Dockerfile-15 b/Dockerfile-15 new file mode 100644 index 0000000..6e12cdc --- /dev/null +++ b/Dockerfile-15 @@ -0,0 +1,223 @@ +# syntax=docker/dockerfile:1.6 +ARG postgresql_major=15 +ARG postgresql_release=${postgresql_major}.1 + +# Bump default build arg to build a package from source +# Bump vars.yml to specify runtime package version +ARG sfcgal_release=1.3.10 +ARG postgis_release=3.3.2 +ARG pgrouting_release=3.4.1 +ARG pgtap_release=1.2.0 +ARG pg_cron_release=1.6.2 +ARG pgaudit_release=1.7.0 +ARG pgsql_http_release=1.5.0 +ARG plpgsql_check_release=2.2.5 +ARG pg_safeupdate_release=1.4 +ARG timescaledb_release=2.9.1 +ARG wal2json_release=2_5 +ARG pljava_release=1.6.4 +ARG plv8_release=3.1.5 +ARG pg_plan_filter_release=5081a7b5cb890876e67d8e7486b6a64c38c9a492 +ARG pg_net_release=0.7.1 +ARG rum_release=1.3.13 +ARG pg_hashids_release=cd0e1b31d52b394a0df64079406a14a4f7387cd6 +ARG libsodium_release=1.0.18 +ARG pgsodium_release=3.1.6 +ARG pg_graphql_release=1.5.11 +ARG pg_stat_monitor_release=1.1.1 +ARG pg_jsonschema_release=0.1.4 +ARG pg_repack_release=1.4.8 +ARG vault_release=0.2.8 +ARG groonga_release=12.0.8 +ARG pgroonga_release=2.4.0 +ARG wrappers_release=0.5.0 +ARG hypopg_release=1.3.1 +ARG pgvector_release=0.4.0 +ARG pg_tle_release=1.3.2 +ARG index_advisor_release=0.2.0 +ARG supautils_release=2.2.0 +ARG wal_g_release=2.0.1 + +FROM ubuntu:noble as base + +RUN apt update -y && apt install -y \ + curl \ + gnupg \ + lsb-release \ + software-properties-common \ + wget \ + sudo \ + && apt clean + + +RUN adduser --system --home /var/lib/postgresql --no-create-home --shell /bin/bash --group --gecos "PostgreSQL administrator" postgres +RUN adduser --system --no-create-home --shell /bin/bash --group wal-g +RUN curl --proto '=https' --tlsv1.2 -sSf -L https://install.determinate.systems/nix | sh -s -- install linux \ +--init none \ +--no-confirm \ +--extra-conf "substituters = https://cache.nixos.org https://nix-postgres-artifacts.s3.amazonaws.com" \ +--extra-conf "trusted-public-keys = nix-postgres-artifacts:dGZlQOvKcNEjvT7QEAJbcV6b6uk7VF/hWMjhYleiaLI=% cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=" + +ENV PATH="${PATH}:/nix/var/nix/profiles/default/bin" + +COPY . /nixpg + +WORKDIR /nixpg + +RUN nix profile install .#psql_15/bin + +RUN nix store gc + + +WORKDIR / + + +RUN mkdir -p /usr/lib/postgresql/bin \ + /usr/lib/postgresql/share/postgresql \ + /usr/share/postgresql \ + /var/lib/postgresql \ + && chown -R postgres:postgres /usr/lib/postgresql \ + && chown -R postgres:postgres /var/lib/postgresql \ + && chown -R postgres:postgres /usr/share/postgresql + +# Create symbolic links +RUN ln -s /nix/var/nix/profiles/default/bin/* /usr/lib/postgresql/bin/ \ + && ln -s /nix/var/nix/profiles/default/bin/* /usr/bin/ \ + && chown -R postgres:postgres /usr/bin + +# Create symbolic links for PostgreSQL shares +RUN ln -s /nix/var/nix/profiles/default/share/postgresql/* /usr/lib/postgresql/share/postgresql/ +RUN ln -s /nix/var/nix/profiles/default/share/postgresql/* /usr/share/postgresql/ +RUN chown -R postgres:postgres /usr/lib/postgresql/share/postgresql/ +RUN chown -R postgres:postgres /usr/share/postgresql/ +# Create symbolic links for contrib directory +RUN mkdir -p /usr/lib/postgresql/share/postgresql/contrib \ + && find /nix/var/nix/profiles/default/share/postgresql/contrib/ -mindepth 1 -type d -exec sh -c 'for dir do ln -s "$dir" "/usr/lib/postgresql/share/postgresql/contrib/$(basename "$dir")"; done' sh {} + \ + && chown -R postgres:postgres /usr/lib/postgresql/share/postgresql/contrib/ + +RUN chown -R postgres:postgres /usr/lib/postgresql + +RUN ln -sf /usr/lib/postgresql/share/postgresql/timezonesets /usr/share/postgresql/timezonesets + + +RUN apt-get update && \ + apt-get install -y --no-install-recommends tzdata + +RUN ln -fs /usr/share/zoneinfo/Etc/UTC /etc/localtime && \ + dpkg-reconfigure --frontend noninteractive tzdata + +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + build-essential \ + checkinstall \ + cmake + +ENV PGDATA=/var/lib/postgresql/data + +#################### +# setup-wal-g.yml +#################### +FROM base as walg +ARG wal_g_release +WORKDIR /nixpg + +RUN nix profile install .#wal-g-3 && \ + ln -s /nix/var/nix/profiles/default/bin/wal-g-3 /tmp/wal-g + +RUN nix store gc + +WORKDIR / +# #################### +# # Download gosu for easy step-down from root +# #################### +FROM base as gosu +ARG TARGETARCH +# Install dependencies +RUN apt-get update && apt-get install -y --no-install-recommends \ + gnupg \ + ca-certificates \ + && rm -rf /var/lib/apt/lists/* +# Download binary +ARG GOSU_VERSION=1.16 +ARG GOSU_GPG_KEY=B42F6819007F00F88E364FD4036A9C25BF357DD4 +ADD https://github.com/tianon/gosu/releases/download/$GOSU_VERSION/gosu-$TARGETARCH \ + /usr/local/bin/gosu +ADD https://github.com/tianon/gosu/releases/download/$GOSU_VERSION/gosu-$TARGETARCH.asc \ + /usr/local/bin/gosu.asc +# Verify checksum +RUN gpg --batch --keyserver hkps://keys.openpgp.org --recv-keys $GOSU_GPG_KEY && \ + gpg --batch --verify /usr/local/bin/gosu.asc /usr/local/bin/gosu && \ + gpgconf --kill all && \ + chmod +x /usr/local/bin/gosu + +# #################### +# # Build final image +# #################### +FROM gosu as production +RUN id postgres || (echo "postgres user does not exist" && exit 1) +# # Setup extensions +COPY --from=walg /tmp/wal-g /usr/local/bin/ + +# # Initialise configs +COPY --chown=postgres:postgres ansible/files/postgresql_config/postgresql.conf.j2 /etc/postgresql/postgresql.conf +COPY --chown=postgres:postgres ansible/files/postgresql_config/pg_hba.conf.j2 /etc/postgresql/pg_hba.conf +COPY --chown=postgres:postgres ansible/files/postgresql_config/pg_ident.conf.j2 /etc/postgresql/pg_ident.conf +COPY --chown=postgres:postgres ansible/files/postgresql_config/postgresql-stdout-log.conf /etc/postgresql/logging.conf +COPY --chown=postgres:postgres ansible/files/postgresql_config/supautils.conf.j2 /etc/postgresql-custom/supautils.conf +COPY --chown=postgres:postgres ansible/files/postgresql_extension_custom_scripts /etc/postgresql-custom/extension-custom-scripts +COPY --chown=postgres:postgres ansible/files/pgsodium_getkey_urandom.sh.j2 /usr/lib/postgresql/bin/pgsodium_getkey.sh +COPY --chown=postgres:postgres ansible/files/postgresql_config/custom_read_replica.conf.j2 /etc/postgresql-custom/read-replica.conf +COPY --chown=postgres:postgres ansible/files/postgresql_config/custom_walg.conf.j2 /etc/postgresql-custom/wal-g.conf +COPY --chown=postgres:postgres ansible/files/walg_helper_scripts/wal_fetch.sh /home/postgres/wal_fetch.sh +COPY ansible/files/walg_helper_scripts/wal_change_ownership.sh /root/wal_change_ownership.sh + +RUN sed -i \ + -e "s|#unix_socket_directories = '/tmp'|unix_socket_directories = '/var/run/postgresql'|g" \ + -e "s|#session_preload_libraries = ''|session_preload_libraries = 'supautils'|g" \ + -e "s|#include = '/etc/postgresql-custom/supautils.conf'|include = '/etc/postgresql-custom/supautils.conf'|g" \ + -e "s|#include = '/etc/postgresql-custom/wal-g.conf'|include = '/etc/postgresql-custom/wal-g.conf'|g" /etc/postgresql/postgresql.conf && \ + echo "cron.database_name = 'postgres'" >> /etc/postgresql/postgresql.conf && \ + #echo "pljava.libjvm_location = '/usr/lib/jvm/java-11-openjdk-${TARGETARCH}/lib/server/libjvm.so'" >> /etc/postgresql/postgresql.conf && \ + echo "pgsodium.getkey_script= '/usr/lib/postgresql/bin/pgsodium_getkey.sh'" >> /etc/postgresql/postgresql.conf && \ + echo "vault.getkey_script= '/usr/lib/postgresql/bin/pgsodium_getkey.sh'" >> /etc/postgresql/postgresql.conf && \ + echo 'auto_explain.log_min_duration = 10s' >> /etc/postgresql/postgresql.conf && \ + usermod -aG postgres wal-g && \ + mkdir -p /etc/postgresql-custom && \ + chown postgres:postgres /etc/postgresql-custom + +# # Include schema migrations +COPY migrations/db /docker-entrypoint-initdb.d/ +COPY ansible/files/pgbouncer_config/pgbouncer_auth_schema.sql /docker-entrypoint-initdb.d/init-scripts/00-schema.sql +COPY ansible/files/stat_extension.sql /docker-entrypoint-initdb.d/migrations/00-extension.sql + +# # Add upstream entrypoint script +COPY --from=gosu /usr/local/bin/gosu /usr/local/bin/gosu +ADD --chmod=0755 \ + https://github.com/docker-library/postgres/raw/master/15/bullseye/docker-entrypoint.sh \ + /usr/local/bin/docker-entrypoint.sh + +RUN mkdir -p /var/run/postgresql && chown postgres:postgres /var/run/postgresql + +ENTRYPOINT ["docker-entrypoint.sh"] + +HEALTHCHECK --interval=2s --timeout=2s --retries=10 CMD pg_isready -U postgres -h localhost +STOPSIGNAL SIGINT +EXPOSE 5432 + +ENV POSTGRES_HOST=/var/run/postgresql +ENV POSTGRES_USER=tealbase_admin +ENV POSTGRES_DB=postgres +RUN apt-get update && apt-get install -y --no-install-recommends \ + locales \ + && rm -rf /var/lib/apt/lists/* && \ + localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8 \ + && localedef -i C -c -f UTF-8 -A /usr/share/locale/locale.alias C.UTF-8 +RUN echo "C.UTF-8 UTF-8" > /etc/locale.gen && echo "en_US.UTF-8 UTF-8" >> /etc/locale.gen && locale-gen +ENV LANG en_US.UTF-8 +ENV LANGUAGE en_US:en +ENV LC_ALL en_US.UTF-8 +ENV LOCALE_ARCHIVE /usr/lib/locale/locale-archive +RUN mkdir -p /usr/share/postgresql/extension/ && \ + ln -s /usr/lib/postgresql/bin/pgsodium_getkey.sh /usr/share/postgresql/extension/pgsodium_getkey && \ + chmod +x /usr/lib/postgresql/bin/pgsodium_getkey.sh +CMD ["postgres", "-D", "/etc/postgresql"] diff --git a/Dockerfile-17 b/Dockerfile-17 new file mode 100644 index 0000000..806828f --- /dev/null +++ b/Dockerfile-17 @@ -0,0 +1,232 @@ +# syntax=docker/dockerfile:1.6 +ARG postgresql_major=17-orioledb +ARG postgresql_release=${postgresql_major}.1 + +# Bump default build arg to build a package from source +# Bump vars.yml to specify runtime package version +ARG sfcgal_release=1.3.10 +ARG postgis_release=3.3.2 +ARG pgrouting_release=3.4.1 +ARG pgtap_release=1.2.0 +ARG pg_cron_release=1.6.2 +ARG pgaudit_release=1.7.0 +ARG pgjwt_release=9742dab1b2f297ad3811120db7b21451bca2d3c9 +ARG pgsql_http_release=1.5.0 +ARG plpgsql_check_release=2.2.5 +ARG pg_safeupdate_release=1.4 +ARG timescaledb_release=2.9.1 +ARG wal2json_release=2_5 +ARG pljava_release=1.6.4 +ARG plv8_release=3.1.5 +ARG pg_plan_filter_release=5081a7b5cb890876e67d8e7486b6a64c38c9a492 +ARG pg_net_release=0.7.1 +ARG rum_release=1.3.13 +ARG pg_hashids_release=cd0e1b31d52b394a0df64079406a14a4f7387cd6 +ARG libsodium_release=1.0.18 +ARG pgsodium_release=3.1.6 +ARG pg_graphql_release=1.5.11 +ARG pg_stat_monitor_release=1.1.1 +ARG pg_jsonschema_release=0.1.4 +ARG pg_repack_release=1.4.8 +ARG vault_release=0.2.8 +ARG groonga_release=12.0.8 +ARG pgroonga_release=2.4.0 +ARG wrappers_release=0.5.0 +ARG hypopg_release=1.3.1 +ARG pgvector_release=0.4.0 +ARG pg_tle_release=1.3.2 +ARG index_advisor_release=0.2.0 +ARG supautils_release=2.2.0 +ARG wal_g_release=3.0.5 + +FROM ubuntu:noble as base + +RUN apt update -y && apt install -y \ + curl \ + gnupg \ + lsb-release \ + software-properties-common \ + wget \ + sudo \ + tree \ + && apt clean + + +RUN adduser --system --home /var/lib/postgresql --no-create-home --shell /bin/bash --group --gecos "PostgreSQL administrator" postgres +RUN adduser --system --no-create-home --shell /bin/bash --group wal-g +RUN curl --proto '=https' --tlsv1.2 -sSf -L https://install.determinate.systems/nix | sh -s -- install linux \ +--init none \ +--no-confirm \ +--extra-conf "substituters = https://cache.nixos.org https://nix-postgres-artifacts.s3.amazonaws.com" \ +--extra-conf "trusted-public-keys = nix-postgres-artifacts:dGZlQOvKcNEjvT7QEAJbcV6b6uk7VF/hWMjhYleiaLI=% cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=" + +ENV PATH="${PATH}:/nix/var/nix/profiles/default/bin" + +COPY . /nixpg + +WORKDIR /nixpg + +RUN nix profile install .#psql_17/bin + +RUN nix store gc + +WORKDIR / + + +RUN mkdir -p /usr/lib/postgresql/bin \ + /usr/lib/postgresql/share/postgresql \ + /usr/share/postgresql \ + /var/lib/postgresql \ + && chown -R postgres:postgres /usr/lib/postgresql \ + && chown -R postgres:postgres /var/lib/postgresql \ + && chown -R postgres:postgres /usr/share/postgresql + +# Create symbolic links +RUN ln -s /nix/var/nix/profiles/default/bin/* /usr/lib/postgresql/bin/ \ + && ln -s /nix/var/nix/profiles/default/bin/* /usr/bin/ \ + && chown -R postgres:postgres /usr/bin + +# Create symbolic links for PostgreSQL shares +RUN ln -s /nix/var/nix/profiles/default/share/postgresql/* /usr/lib/postgresql/share/postgresql/ +RUN ln -s /nix/var/nix/profiles/default/share/postgresql/* /usr/share/postgresql/ +RUN chown -R postgres:postgres /usr/lib/postgresql/share/postgresql/ +RUN chown -R postgres:postgres /usr/share/postgresql/ +# Create symbolic links for contrib directory +RUN tree /nix > /tmp/tree.txt && cat /tmp/tree.txt && cat /tmp/tree.txt >&2 + +RUN chown -R postgres:postgres /usr/lib/postgresql + +RUN ln -sf /usr/lib/postgresql/share/postgresql/timezonesets /usr/share/postgresql/timezonesets + + +RUN apt-get update && \ + apt-get install -y --no-install-recommends tzdata + +RUN ln -fs /usr/share/zoneinfo/Etc/UTC /etc/localtime && \ + dpkg-reconfigure --frontend noninteractive tzdata + +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + build-essential \ + checkinstall \ + cmake + +ENV PGDATA=/var/lib/postgresql/data + +#################### +# setup-wal-g.yml +#################### +FROM base as walg +ARG wal_g_release + +WORKDIR /nixpg + +RUN nix profile install .#wal-g-3 && \ + ln -s /nix/var/nix/profiles/default/bin/wal-g-3 /tmp/wal-g + +RUN nix store gc + +WORKDIR / +# #################### +# # Download gosu for easy step-down from root +# #################### +FROM base as gosu +ARG TARGETARCH +# Install dependencies +RUN apt-get update && apt-get install -y --no-install-recommends \ + gnupg \ + ca-certificates \ + && rm -rf /var/lib/apt/lists/* +# Download binary +ARG GOSU_VERSION=1.16 +ARG GOSU_GPG_KEY=B42F6819007F00F88E364FD4036A9C25BF357DD4 +ADD https://github.com/tianon/gosu/releases/download/$GOSU_VERSION/gosu-$TARGETARCH \ + /usr/local/bin/gosu +ADD https://github.com/tianon/gosu/releases/download/$GOSU_VERSION/gosu-$TARGETARCH.asc \ + /usr/local/bin/gosu.asc +# Verify checksum +RUN gpg --batch --keyserver hkps://keys.openpgp.org --recv-keys $GOSU_GPG_KEY && \ + gpg --batch --verify /usr/local/bin/gosu.asc /usr/local/bin/gosu && \ + gpgconf --kill all && \ + chmod +x /usr/local/bin/gosu + +# #################### +# # Build final image +# #################### +FROM gosu as production +RUN id postgres || (echo "postgres user does not exist" && exit 1) +# # Setup extensions +COPY --from=walg /tmp/wal-g /usr/local/bin/ + +# # Initialise configs +COPY --chown=postgres:postgres ansible/files/postgresql_config/postgresql.conf.j2 /etc/postgresql/postgresql.conf +COPY --chown=postgres:postgres ansible/files/postgresql_config/pg_hba.conf.j2 /etc/postgresql/pg_hba.conf +COPY --chown=postgres:postgres ansible/files/postgresql_config/pg_ident.conf.j2 /etc/postgresql/pg_ident.conf +COPY --chown=postgres:postgres ansible/files/postgresql_config/postgresql-stdout-log.conf /etc/postgresql/logging.conf +COPY --chown=postgres:postgres ansible/files/postgresql_config/supautils.conf.j2 /etc/postgresql-custom/supautils.conf +COPY --chown=postgres:postgres ansible/files/postgresql_extension_custom_scripts /etc/postgresql-custom/extension-custom-scripts +COPY --chown=postgres:postgres ansible/files/pgsodium_getkey_urandom.sh.j2 /usr/lib/postgresql/bin/pgsodium_getkey.sh +COPY --chown=postgres:postgres ansible/files/postgresql_config/custom_read_replica.conf.j2 /etc/postgresql-custom/read-replica.conf +COPY --chown=postgres:postgres ansible/files/postgresql_config/custom_walg.conf.j2 /etc/postgresql-custom/wal-g.conf +COPY --chown=postgres:postgres ansible/files/walg_helper_scripts/wal_fetch.sh /home/postgres/wal_fetch.sh +COPY ansible/files/walg_helper_scripts/wal_change_ownership.sh /root/wal_change_ownership.sh + +RUN sed -i \ + -e "s|#unix_socket_directories = '/tmp'|unix_socket_directories = '/var/run/postgresql'|g" \ + -e "s|#session_preload_libraries = ''|session_preload_libraries = 'supautils'|g" \ + -e "s|#include = '/etc/postgresql-custom/supautils.conf'|include = '/etc/postgresql-custom/supautils.conf'|g" \ + -e "s|#include = '/etc/postgresql-custom/wal-g.conf'|include = '/etc/postgresql-custom/wal-g.conf'|g" /etc/postgresql/postgresql.conf && \ + echo "cron.database_name = 'postgres'" >> /etc/postgresql/postgresql.conf && \ + #echo "pljava.libjvm_location = '/usr/lib/jvm/java-11-openjdk-${TARGETARCH}/lib/server/libjvm.so'" >> /etc/postgresql/postgresql.conf && \ + echo "pgsodium.getkey_script= '/usr/lib/postgresql/bin/pgsodium_getkey.sh'" >> /etc/postgresql/postgresql.conf && \ + echo "vault.getkey_script= '/usr/lib/postgresql/bin/pgsodium_getkey.sh'" >> /etc/postgresql/postgresql.conf && \ + echo 'auto_explain.log_min_duration = 10s' >> /etc/postgresql/postgresql.conf && \ + usermod -aG postgres wal-g && \ + mkdir -p /etc/postgresql-custom && \ + chown postgres:postgres /etc/postgresql-custom + + # Remove items from postgresql.conf +RUN sed -i 's/ timescaledb,//g;' "/etc/postgresql/postgresql.conf" + #as of pg 16.4 + this db_user_namespace totally deprecated and will break the server if setting is present +RUN sed -i 's/db_user_namespace = off/#db_user_namespace = off/g;' "/etc/postgresql/postgresql.conf" +RUN sed -i 's/ timescaledb,//g; s/ plv8,//g' "/etc/postgresql-custom/supautils.conf" + + + +# # Include schema migrations +COPY migrations/db /docker-entrypoint-initdb.d/ +COPY ansible/files/pgbouncer_config/pgbouncer_auth_schema.sql /docker-entrypoint-initdb.d/init-scripts/00-schema.sql +COPY ansible/files/stat_extension.sql /docker-entrypoint-initdb.d/migrations/00-extension.sql + +# # Add upstream entrypoint script +COPY --from=gosu /usr/local/bin/gosu /usr/local/bin/gosu +ADD --chmod=0755 \ + https://github.com/docker-library/postgres/raw/master/17/bullseye/docker-entrypoint.sh \ + /usr/local/bin/docker-entrypoint.sh + +RUN mkdir -p /var/run/postgresql && chown postgres:postgres /var/run/postgresql + +ENTRYPOINT ["docker-entrypoint.sh"] + +HEALTHCHECK --interval=2s --timeout=2s --retries=10 CMD pg_isready -U postgres -h localhost +STOPSIGNAL SIGINT +EXPOSE 5432 + +ENV POSTGRES_HOST=/var/run/postgresql +ENV POSTGRES_USER=tealbase_admin +ENV POSTGRES_DB=postgres +ENV POSTGRES_INITDB_ARGS="--allow-group-access --locale-provider=icu --encoding=UTF-8 --icu-locale=en_US.UTF-8" +RUN apt-get update && apt-get install -y --no-install-recommends \ + locales \ + && rm -rf /var/lib/apt/lists/* && \ + localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8 \ + && localedef -i C -c -f UTF-8 -A /usr/share/locale/locale.alias C.UTF-8 +RUN echo "C.UTF-8 UTF-8" > /etc/locale.gen && echo "en_US.UTF-8 UTF-8" >> /etc/locale.gen && locale-gen +ENV LANG en_US.UTF-8 +ENV LANGUAGE en_US:en +ENV LC_ALL en_US.UTF-8 +ENV LOCALE_ARCHIVE /usr/lib/locale/locale-archive +RUN mkdir -p /usr/share/postgresql/extension/ && \ + ln -s /usr/lib/postgresql/bin/pgsodium_getkey.sh /usr/share/postgresql/extension/pgsodium_getkey && \ + chmod +x /usr/lib/postgresql/bin/pgsodium_getkey.sh +CMD ["postgres", "-D", "/etc/postgresql"] diff --git a/Dockerfile-kubernetes b/Dockerfile-kubernetes new file mode 100644 index 0000000..e27ec2c --- /dev/null +++ b/Dockerfile-kubernetes @@ -0,0 +1,12 @@ +FROM alpine:3.22 + +ADD ./output-cloudimg/packer-cloudimg /disk/image.qcow2 + +RUN apk add --no-cache qemu-system-aarch64 qemu-img openssh-client aavmf virtiofsd \ + && truncate -s 64M /root/varstore.img \ + && truncate -s 64M /root/efi.img \ + && dd if=/usr/share/AAVMF/QEMU_EFI.fd of=/root/efi.img conv=notrunc \ + && qemu-img create -f qcow2 /tmp/disk.qcow2 -b /disk/image.qcow2 -F qcow2 \ + && apk del --no-cache aavmf qemu-img + +CMD exec /bin/sh -c "trap : TERM INT; sleep 9999999999d & wait" diff --git a/Dockerfile-orioledb-17 b/Dockerfile-orioledb-17 new file mode 100644 index 0000000..1f92da2 --- /dev/null +++ b/Dockerfile-orioledb-17 @@ -0,0 +1,238 @@ +# syntax=docker/dockerfile:1.6 +ARG postgresql_major=17-orioledb +ARG postgresql_release=${postgresql_major}.1 + +# Bump default build arg to build a package from source +# Bump vars.yml to specify runtime package version +ARG sfcgal_release=1.3.10 +ARG postgis_release=3.3.2 +ARG pgrouting_release=3.4.1 +ARG pgtap_release=1.2.0 +ARG pg_cron_release=1.6.2 +ARG pgaudit_release=1.7.0 +ARG pgjwt_release=9742dab1b2f297ad3811120db7b21451bca2d3c9 +ARG pgsql_http_release=1.5.0 +ARG plpgsql_check_release=2.2.5 +ARG pg_safeupdate_release=1.4 +ARG timescaledb_release=2.9.1 +ARG wal2json_release=2_5 +ARG pljava_release=1.6.4 +ARG plv8_release=3.1.5 +ARG pg_plan_filter_release=5081a7b5cb890876e67d8e7486b6a64c38c9a492 +ARG pg_net_release=0.7.1 +ARG rum_release=1.3.13 +ARG pg_hashids_release=cd0e1b31d52b394a0df64079406a14a4f7387cd6 +ARG libsodium_release=1.0.18 +ARG pgsodium_release=3.1.6 +ARG pg_graphql_release=1.5.11 +ARG pg_stat_monitor_release=1.1.1 +ARG pg_jsonschema_release=0.1.4 +ARG pg_repack_release=1.4.8 +ARG vault_release=0.2.8 +ARG groonga_release=12.0.8 +ARG pgroonga_release=2.4.0 +ARG wrappers_release=0.5.0 +ARG hypopg_release=1.3.1 +ARG pgvector_release=0.4.0 +ARG pg_tle_release=1.3.2 +ARG index_advisor_release=0.2.0 +ARG supautils_release=2.2.0 +ARG wal_g_release=3.0.5 + +FROM ubuntu:noble as base + +RUN apt update -y && apt install -y \ + curl \ + gnupg \ + lsb-release \ + software-properties-common \ + wget \ + sudo \ + tree \ + && apt clean + + +RUN adduser --system --home /var/lib/postgresql --no-create-home --shell /bin/bash --group --gecos "PostgreSQL administrator" postgres +RUN adduser --system --no-create-home --shell /bin/bash --group wal-g +RUN curl --proto '=https' --tlsv1.2 -sSf -L https://install.determinate.systems/nix | sh -s -- install linux \ +--init none \ +--no-confirm \ +--extra-conf "substituters = https://cache.nixos.org https://nix-postgres-artifacts.s3.amazonaws.com" \ +--extra-conf "trusted-public-keys = nix-postgres-artifacts:dGZlQOvKcNEjvT7QEAJbcV6b6uk7VF/hWMjhYleiaLI=% cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=" + +ENV PATH="${PATH}:/nix/var/nix/profiles/default/bin" + +COPY . /nixpg + +WORKDIR /nixpg + +RUN nix profile install .#psql_orioledb-17/bin + +RUN nix store gc + +WORKDIR / + + +RUN mkdir -p /usr/lib/postgresql/bin \ + /usr/lib/postgresql/share/postgresql \ + /usr/share/postgresql \ + /var/lib/postgresql \ + && chown -R postgres:postgres /usr/lib/postgresql \ + && chown -R postgres:postgres /var/lib/postgresql \ + && chown -R postgres:postgres /usr/share/postgresql + +# Create symbolic links +RUN ln -s /nix/var/nix/profiles/default/bin/* /usr/lib/postgresql/bin/ \ + && ln -s /nix/var/nix/profiles/default/bin/* /usr/bin/ \ + && chown -R postgres:postgres /usr/bin + +# Create symbolic links for PostgreSQL shares +RUN ln -s /nix/var/nix/profiles/default/share/postgresql/* /usr/lib/postgresql/share/postgresql/ +RUN ln -s /nix/var/nix/profiles/default/share/postgresql/* /usr/share/postgresql/ +RUN chown -R postgres:postgres /usr/lib/postgresql/share/postgresql/ +RUN chown -R postgres:postgres /usr/share/postgresql/ +# Create symbolic links for contrib directory +RUN tree /nix > /tmp/tree.txt && cat /tmp/tree.txt && cat /tmp/tree.txt >&2 + +RUN chown -R postgres:postgres /usr/lib/postgresql + +RUN ln -sf /usr/lib/postgresql/share/postgresql/timezonesets /usr/share/postgresql/timezonesets + + +RUN apt-get update && \ + apt-get install -y --no-install-recommends tzdata + +RUN ln -fs /usr/share/zoneinfo/Etc/UTC /etc/localtime && \ + dpkg-reconfigure --frontend noninteractive tzdata + +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + build-essential \ + checkinstall \ + cmake + +ENV PGDATA=/var/lib/postgresql/data + +#################### +# setup-wal-g.yml +#################### +FROM base as walg +ARG wal_g_release + +WORKDIR /nixpg + +RUN nix profile install .#wal-g-3 && \ + ln -s /nix/var/nix/profiles/default/bin/wal-g-3 /tmp/wal-g + +RUN nix store gc + +WORKDIR / +# #################### +# # Download gosu for easy step-down from root +# #################### +FROM base as gosu +ARG TARGETARCH +# Install dependencies +RUN apt-get update && apt-get install -y --no-install-recommends \ + gnupg \ + ca-certificates \ + && rm -rf /var/lib/apt/lists/* +# Download binary +ARG GOSU_VERSION=1.16 +ARG GOSU_GPG_KEY=B42F6819007F00F88E364FD4036A9C25BF357DD4 +ADD https://github.com/tianon/gosu/releases/download/$GOSU_VERSION/gosu-$TARGETARCH \ + /usr/local/bin/gosu +ADD https://github.com/tianon/gosu/releases/download/$GOSU_VERSION/gosu-$TARGETARCH.asc \ + /usr/local/bin/gosu.asc +# Verify checksum +RUN gpg --batch --keyserver hkps://keys.openpgp.org --recv-keys $GOSU_GPG_KEY && \ + gpg --batch --verify /usr/local/bin/gosu.asc /usr/local/bin/gosu && \ + gpgconf --kill all && \ + chmod +x /usr/local/bin/gosu + +# #################### +# # Build final image +# #################### +FROM gosu as production +RUN id postgres || (echo "postgres user does not exist" && exit 1) +# # Setup extensions +COPY --from=walg /tmp/wal-g /usr/local/bin/ + +# # Initialise configs +COPY --chown=postgres:postgres ansible/files/postgresql_config/postgresql.conf.j2 /etc/postgresql/postgresql.conf +COPY --chown=postgres:postgres ansible/files/postgresql_config/pg_hba.conf.j2 /etc/postgresql/pg_hba.conf +COPY --chown=postgres:postgres ansible/files/postgresql_config/pg_ident.conf.j2 /etc/postgresql/pg_ident.conf +COPY --chown=postgres:postgres ansible/files/postgresql_config/postgresql-stdout-log.conf /etc/postgresql/logging.conf +COPY --chown=postgres:postgres ansible/files/postgresql_config/supautils.conf.j2 /etc/postgresql-custom/supautils.conf +COPY --chown=postgres:postgres ansible/files/postgresql_extension_custom_scripts /etc/postgresql-custom/extension-custom-scripts +COPY --chown=postgres:postgres ansible/files/pgsodium_getkey_urandom.sh.j2 /usr/lib/postgresql/bin/pgsodium_getkey.sh +COPY --chown=postgres:postgres ansible/files/postgresql_config/custom_read_replica.conf.j2 /etc/postgresql-custom/read-replica.conf +COPY --chown=postgres:postgres ansible/files/postgresql_config/custom_walg.conf.j2 /etc/postgresql-custom/wal-g.conf +COPY --chown=postgres:postgres ansible/files/walg_helper_scripts/wal_fetch.sh /home/postgres/wal_fetch.sh +COPY ansible/files/walg_helper_scripts/wal_change_ownership.sh /root/wal_change_ownership.sh + +RUN sed -i \ + -e "s|#unix_socket_directories = '/tmp'|unix_socket_directories = '/var/run/postgresql'|g" \ + -e "s|#session_preload_libraries = ''|session_preload_libraries = 'supautils'|g" \ + -e "s|#include = '/etc/postgresql-custom/supautils.conf'|include = '/etc/postgresql-custom/supautils.conf'|g" \ + -e "s|#include = '/etc/postgresql-custom/wal-g.conf'|include = '/etc/postgresql-custom/wal-g.conf'|g" /etc/postgresql/postgresql.conf && \ + echo "cron.database_name = 'postgres'" >> /etc/postgresql/postgresql.conf && \ + #echo "pljava.libjvm_location = '/usr/lib/jvm/java-11-openjdk-${TARGETARCH}/lib/server/libjvm.so'" >> /etc/postgresql/postgresql.conf && \ + echo "pgsodium.getkey_script= '/usr/lib/postgresql/bin/pgsodium_getkey.sh'" >> /etc/postgresql/postgresql.conf && \ + echo "vault.getkey_script= '/usr/lib/postgresql/bin/pgsodium_getkey.sh'" >> /etc/postgresql/postgresql.conf && \ + echo 'auto_explain.log_min_duration = 10s' >> /etc/postgresql/postgresql.conf && \ + usermod -aG postgres wal-g && \ + mkdir -p /etc/postgresql-custom && \ + chown postgres:postgres /etc/postgresql-custom + + # Remove items from postgresql.conf +RUN sed -i 's/ timescaledb,//g;' "/etc/postgresql/postgresql.conf" + #as of pg 16.4 + this db_user_namespace totally deprecated and will break the server if setting is present +RUN sed -i 's/db_user_namespace = off/#db_user_namespace = off/g;' "/etc/postgresql/postgresql.conf" +RUN sed -i 's/ timescaledb,//g; s/ plv8,//g; s/ postgis,//g; s/ pgrouting,//g' "/etc/postgresql-custom/supautils.conf" +RUN sed -i 's/\(shared_preload_libraries.*\)'\''\(.*\)$/\1, orioledb'\''\2/' "/etc/postgresql/postgresql.conf" +RUN echo "default_table_access_method = 'orioledb'" >> "/etc/postgresql/postgresql.conf" + + + +# # Include schema migrations +COPY migrations/db /docker-entrypoint-initdb.d/ +COPY ansible/files/pgbouncer_config/pgbouncer_auth_schema.sql /docker-entrypoint-initdb.d/init-scripts/00-schema.sql +COPY ansible/files/stat_extension.sql /docker-entrypoint-initdb.d/migrations/00-extension.sql +# Enable orioledb extension first +RUN echo "CREATE EXTENSION orioledb;" > /docker-entrypoint-initdb.d/init-scripts/00-pre-init.sql && \ + chown postgres:postgres /docker-entrypoint-initdb.d/init-scripts/00-pre-init.sql + +# # Add upstream entrypoint script +COPY --from=gosu /usr/local/bin/gosu /usr/local/bin/gosu +ADD --chmod=0755 \ + https://github.com/docker-library/postgres/raw/master/17/bullseye/docker-entrypoint.sh \ + /usr/local/bin/docker-entrypoint.sh + +RUN mkdir -p /var/run/postgresql && chown postgres:postgres /var/run/postgresql + +ENTRYPOINT ["docker-entrypoint.sh"] + +HEALTHCHECK --interval=2s --timeout=2s --retries=10 CMD pg_isready -U postgres -h localhost +STOPSIGNAL SIGINT +EXPOSE 5432 + +ENV POSTGRES_HOST=/var/run/postgresql +ENV POSTGRES_USER=tealbase_admin +ENV POSTGRES_DB=postgres +ENV POSTGRES_INITDB_ARGS="--allow-group-access --locale-provider=icu --encoding=UTF-8 --icu-locale=en_US.UTF-8" +RUN apt-get update && apt-get install -y --no-install-recommends \ + locales \ + && rm -rf /var/lib/apt/lists/* && \ + localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8 \ + && localedef -i C -c -f UTF-8 -A /usr/share/locale/locale.alias C.UTF-8 +RUN echo "C.UTF-8 UTF-8" > /etc/locale.gen && echo "en_US.UTF-8 UTF-8" >> /etc/locale.gen && locale-gen +ENV LANG en_US.UTF-8 +ENV LANGUAGE en_US:en +ENV LC_ALL en_US.UTF-8 +ENV LOCALE_ARCHIVE /usr/lib/locale/locale-archive +RUN mkdir -p /usr/share/postgresql/extension/ && \ + ln -s /usr/lib/postgresql/bin/pgsodium_getkey.sh /usr/share/postgresql/extension/pgsodium_getkey && \ + chmod +x /usr/lib/postgresql/bin/pgsodium_getkey.sh + +CMD ["postgres", "-D", "/etc/postgresql"] diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..610322b --- /dev/null +++ b/Makefile @@ -0,0 +1,16 @@ +UPSTREAM_NIX_GIT_SHA := $(shell git rev-parse HEAD) +GIT_SHA := $(shell git describe --tags --always --dirty) + +init: qemu-arm64-nix.pkr.hcl + packer init qemu-arm64-nix.pkr.hcl + +output-cloudimg/packer-cloudimg: ansible qemu-arm64-nix.pkr.hcl + packer build -var "git_sha=$(UPSTREAM_NIX_GIT_SHA)" qemu-arm64-nix.pkr.hcl + +alpine-image: output-cloudimg/packer-cloudimg + sudo nerdctl build . -t tealbase-postgres-test:$(GIT_SHA) -f ./Dockerfile-kubernetes + +clean: + rm -rf output-cloudimg + +.PHONY: alpine-image init clean diff --git a/README.md b/README.md index d89caee..413d08e 100644 --- a/README.md +++ b/README.md @@ -2,44 +2,161 @@ Unmodified Postgres with some useful plugins. Our goal with this repo is not to modify Postgres, but to provide some of the most common extensions with a one-click install. -## Features - -- ✅ Postgres [12](https://www.postgresql.org/about/news/1976/). Includes [generated columns](https://www.postgresql.org/docs/12/ddl-generated-columns.html) and [JSON path](https://www.postgresql.org/docs/12/functions-json.html#FUNCTIONS-SQLJSON-PATH) support. -- ✅ Ubuntu 18.04 (Bionic). -- ✅ [pg-contrib-12](https://www.postgresql.org/docs/12/contrib.html). Because everyone should enable `pg_stat_statements`. +## Primary Features +- ✅ Postgres [postgresql-15.8](https://www.postgresql.org/docs/15/index.html) +- ✅ Postgres [postgresql-17.4](https://www.postgresql.org/docs/17/index.html) +- ✅ Postgres [orioledb-postgresql-17_6](https://github.com/orioledb/orioledb) +- ✅ Ubuntu 24.04 (Noble Numbat). - ✅ [wal_level](https://www.postgresql.org/docs/current/runtime-config-wal.html) = logical and [max_replication_slots](https://www.postgresql.org/docs/current/runtime-config-replication.html) = 5. Ready for replication. -- ✅ [PostGIS](https://postgis.net/). Postgres' most popular extension - support for geographic objects. -- ✅ [pgTAP](https://pgtap.org/). Unit Testing for Postgres. -- ✅ [pgAudit](https://www.pgaudit.org/). Generate highly compliant audit logs. -- ✅ [pgjwt](https://github.com/michelp/pgjwt). Generate JSON Web Tokens (JWT) in Postgres. -- ✅ [pgsql-http](https://github.com/pramsey/pgsql-http). HTTP client for Postgres. -- ✅ [plpgsql_check](https://github.com/okbob/plpgsql_check). Linter tool for PL/pgSQL. -- ✅ [plv8](https://github.com/plv8/plv8). Write in Javascript functions in Postgres. -- ✅ [plpython3u](https://www.postgresql.org/docs/current/plpython-python23.html). Python3 enabled by default. Write in Python functions in Postgres. -- ✅ [PL/Java](https://github.com/tada/pljaval). Write in Java functions in Postgres. +- ✅ [Large Systems Extensions](https://github.com/aws/aws-graviton-getting-started#building-for-graviton-and-graviton2). Enabled for ARM images. +## Extensions + +### PostgreSQL 15 Extensions +| Extension | Version | Description | +| ------------- | :-------------: | ------------- | +| [hypopg](https://github.com/HypoPG/hypopg/archive/refs/tags/1.4.1.tar.gz) | [1.4.1](https://github.com/HypoPG/hypopg/archive/refs/tags/1.4.1.tar.gz) | Hypothetical Indexes for PostgreSQL | +| [index_advisor](https://github.com/olirice/index_advisor/archive/v0.2.0.tar.gz) | [0.2.0](https://github.com/olirice/index_advisor/archive/v0.2.0.tar.gz) | Recommend indexes to improve query performance in PostgreSQL | +| [pg-safeupdate](https://github.com/eradman/pg-safeupdate/archive/1.4.tar.gz) | [1.4](https://github.com/eradman/pg-safeupdate/archive/1.4.tar.gz) | A simple extension to PostgreSQL that requires criteria for UPDATE and DELETE | +| [pg_cron](https://github.com/citusdata/pg_cron/archive/v1.6.4.tar.gz) | [1.6.4](https://github.com/citusdata/pg_cron/archive/v1.6.4.tar.gz) | Run Cron jobs through PostgreSQL | +| [pg_graphql](https://github.com/tealbase/pg_graphql/archive/v1.5.11.tar.gz) | [1.5.11](https://github.com/tealbase/pg_graphql/archive/v1.5.11.tar.gz) | GraphQL support for PostreSQL | +| [pg_hashids](https://github.com/iCyberon/pg_hashids/archive/cd0e1b31d52b394a0df64079406a14a4f7387cd6.tar.gz) | [cd0e1b31d52b394a0df64079406a14a4f7387cd6](https://github.com/iCyberon/pg_hashids/archive/cd0e1b31d52b394a0df64079406a14a4f7387cd6.tar.gz) | Generate short unique IDs in PostgreSQL | +| [pg_jsonschema](https://github.com/tealbase/pg_jsonschema/archive/v0.3.3.tar.gz) | [0.3.3](https://github.com/tealbase/pg_jsonschema/archive/v0.3.3.tar.gz) | JSON Schema Validation for PostgreSQL | +| [pg_net](https://github.com/tealbase/pg_net/archive/refs/tags/v0.14.0.tar.gz) | [0.14.0](https://github.com/tealbase/pg_net/archive/refs/tags/v0.14.0.tar.gz) | Async networking for Postgres | +| [pg_plan_filter](https://github.com/pgexperts/pg_plan_filter/archive/5081a7b5cb890876e67d8e7486b6a64c38c9a492.tar.gz) | [5081a7b5cb890876e67d8e7486b6a64c38c9a492](https://github.com/pgexperts/pg_plan_filter/archive/5081a7b5cb890876e67d8e7486b6a64c38c9a492.tar.gz) | Filter PostgreSQL statements by execution plans | +| [pg_repack](https://github.com/reorg/pg_repack/archive/ver_1.5.2.tar.gz) | [1.5.2](https://github.com/reorg/pg_repack/archive/ver_1.5.2.tar.gz) | Reorganize tables in PostgreSQL databases with minimal locks | +| [pg_stat_monitor](https://github.com/percona/pg_stat_monitor/archive/refs/tags/2.1.0.tar.gz) | [2.1.0](https://github.com/percona/pg_stat_monitor/archive/refs/tags/2.1.0.tar.gz) | Query Performance Monitoring Tool for PostgreSQL | +| [pg_tle](https://github.com/aws/pg_tle/archive/refs/tags/v1.4.0.tar.gz) | [1.4.0](https://github.com/aws/pg_tle/archive/refs/tags/v1.4.0.tar.gz) | Framework for 'Trusted Language Extensions' in PostgreSQL | +| [pgaudit](https://github.com/pgaudit/pgaudit/archive/1.7.0.tar.gz) | [1.7.0](https://github.com/pgaudit/pgaudit/archive/1.7.0.tar.gz) | Open Source PostgreSQL Audit Logging | +| [pgjwt](https://github.com/michelp/pgjwt/archive/9742dab1b2f297ad3811120db7b21451bca2d3c9.tar.gz) | [9742dab1b2f297ad3811120db7b21451bca2d3c9](https://github.com/michelp/pgjwt/archive/9742dab1b2f297ad3811120db7b21451bca2d3c9.tar.gz) | PostgreSQL implementation of JSON Web Tokens | +| [pgmq](https://github.com/tembo-io/pgmq/archive/v1.4.4.tar.gz) | [1.4.4](https://github.com/tembo-io/pgmq/archive/v1.4.4.tar.gz) | A lightweight message queue. Like AWS SQS and RSMQ but on Postgres. | +| [pgroonga](https://packages.groonga.org/source/pgroonga/pgroonga-3.2.5.tar.gz) | [3.2.5](https://packages.groonga.org/source/pgroonga/pgroonga-3.2.5.tar.gz) | A PostgreSQL extension to use Groonga as the index | +| [pgrouting](https://github.com/pgRouting/pgrouting/archive/v3.4.1.tar.gz) | [3.4.1](https://github.com/pgRouting/pgrouting/archive/v3.4.1.tar.gz) | A PostgreSQL/PostGIS extension that provides geospatial routing functionality | +| [pgsodium](https://github.com/michelp/pgsodium/archive/refs/tags/v3.1.8.tar.gz) | [3.1.8](https://github.com/michelp/pgsodium/archive/refs/tags/v3.1.8.tar.gz) | Modern cryptography for PostgreSQL | +| [pgsql-http](https://github.com/pramsey/pgsql-http/archive/refs/tags/v1.6.1.tar.gz) | [1.6.1](https://github.com/pramsey/pgsql-http/archive/refs/tags/v1.6.1.tar.gz) | HTTP client for Postgres | +| [pgtap](https://github.com/theory/pgtap/archive/v1.2.0.tar.gz) | [1.2.0](https://github.com/theory/pgtap/archive/v1.2.0.tar.gz) | A unit testing framework for PostgreSQL | +| [pgvector](https://github.com/pgvector/pgvector/archive/refs/tags/v0.8.0.tar.gz) | [0.8.0](https://github.com/pgvector/pgvector/archive/refs/tags/v0.8.0.tar.gz) | Open-source vector similarity search for Postgres | +| [plpgsql-check](https://github.com/okbob/plpgsql_check/archive/v2.7.11.tar.gz) | [2.7.11](https://github.com/okbob/plpgsql_check/archive/v2.7.11.tar.gz) | Linter tool for language PL/pgSQL | +| [plv8](https://github.com/plv8/plv8/archive/v3.1.10.tar.gz) | [3.1.10](https://github.com/plv8/plv8/archive/v3.1.10.tar.gz) | V8 Engine Javascript Procedural Language add-on for PostgreSQL | +| [postgis](https://download.osgeo.org/postgis/source/postgis-3.3.7.tar.gz) | [3.3.7](https://download.osgeo.org/postgis/source/postgis-3.3.7.tar.gz) | Geographic Objects for PostgreSQL | +| [rum](https://github.com/postgrespro/rum/archive/1.3.14.tar.gz) | [1.3.14](https://github.com/postgrespro/rum/archive/1.3.14.tar.gz) | Full text search index method for PostgreSQL | +| [tealbase-wrappers](https://github.com/tealbase/wrappers/archive/v0.5.0.tar.gz) | [0.5.0](https://github.com/tealbase/wrappers/archive/v0.5.0.tar.gz) | Various Foreign Data Wrappers (FDWs) for PostreSQL | +| [supautils](https://github.com/tealbase/supautils/archive/refs/tags/v2.9.4.tar.gz) | [2.9.4](https://github.com/tealbase/supautils/archive/refs/tags/v2.9.4.tar.gz) | PostgreSQL extension for enhanced security | +| [timescaledb-apache](https://github.com/timescale/timescaledb/archive/2.16.1.tar.gz) | [2.16.1](https://github.com/timescale/timescaledb/archive/2.16.1.tar.gz) | Scales PostgreSQL for time-series data via automatic partitioning across time and space | +| [vault](https://github.com/tealbase/vault/archive/refs/tags/v0.3.1.tar.gz) | [0.3.1](https://github.com/tealbase/vault/archive/refs/tags/v0.3.1.tar.gz) | Store encrypted secrets in PostgreSQL | +| [wal2json](https://github.com/eulerto/wal2json/archive/wal2json_2_6.tar.gz) | [2_6](https://github.com/eulerto/wal2json/archive/wal2json_2_6.tar.gz) | PostgreSQL JSON output plugin for changeset extraction | + +### PostgreSQL 17 Extensions +| Extension | Version | Description | +| ------------- | :-------------: | ------------- | +| [hypopg](https://github.com/HypoPG/hypopg/archive/refs/tags/1.4.1.tar.gz) | [1.4.1](https://github.com/HypoPG/hypopg/archive/refs/tags/1.4.1.tar.gz) | Hypothetical Indexes for PostgreSQL | +| [index_advisor](https://github.com/olirice/index_advisor/archive/v0.2.0.tar.gz) | [0.2.0](https://github.com/olirice/index_advisor/archive/v0.2.0.tar.gz) | Recommend indexes to improve query performance in PostgreSQL | +| [pg-safeupdate](https://github.com/eradman/pg-safeupdate/archive/1.4.tar.gz) | [1.4](https://github.com/eradman/pg-safeupdate/archive/1.4.tar.gz) | A simple extension to PostgreSQL that requires criteria for UPDATE and DELETE | +| [pg_cron](https://github.com/citusdata/pg_cron/archive/v1.6.4.tar.gz) | [1.6.4](https://github.com/citusdata/pg_cron/archive/v1.6.4.tar.gz) | Run Cron jobs through PostgreSQL | +| [pg_graphql](https://github.com/tealbase/pg_graphql/archive/v1.5.11.tar.gz) | [1.5.11](https://github.com/tealbase/pg_graphql/archive/v1.5.11.tar.gz) | GraphQL support for PostreSQL | +| [pg_hashids](https://github.com/iCyberon/pg_hashids/archive/cd0e1b31d52b394a0df64079406a14a4f7387cd6.tar.gz) | [cd0e1b31d52b394a0df64079406a14a4f7387cd6](https://github.com/iCyberon/pg_hashids/archive/cd0e1b31d52b394a0df64079406a14a4f7387cd6.tar.gz) | Generate short unique IDs in PostgreSQL | +| [pg_jsonschema](https://github.com/tealbase/pg_jsonschema/archive/v0.3.3.tar.gz) | [0.3.3](https://github.com/tealbase/pg_jsonschema/archive/v0.3.3.tar.gz) | JSON Schema Validation for PostgreSQL | +| [pg_net](https://github.com/tealbase/pg_net/archive/refs/tags/v0.14.0.tar.gz) | [0.14.0](https://github.com/tealbase/pg_net/archive/refs/tags/v0.14.0.tar.gz) | Async networking for Postgres | +| [pg_plan_filter](https://github.com/pgexperts/pg_plan_filter/archive/5081a7b5cb890876e67d8e7486b6a64c38c9a492.tar.gz) | [5081a7b5cb890876e67d8e7486b6a64c38c9a492](https://github.com/pgexperts/pg_plan_filter/archive/5081a7b5cb890876e67d8e7486b6a64c38c9a492.tar.gz) | Filter PostgreSQL statements by execution plans | +| [pg_repack](https://github.com/reorg/pg_repack/archive/ver_1.5.2.tar.gz) | [1.5.2](https://github.com/reorg/pg_repack/archive/ver_1.5.2.tar.gz) | Reorganize tables in PostgreSQL databases with minimal locks | +| [pg_stat_monitor](https://github.com/percona/pg_stat_monitor/archive/refs/tags/2.1.0.tar.gz) | [2.1.0](https://github.com/percona/pg_stat_monitor/archive/refs/tags/2.1.0.tar.gz) | Query Performance Monitoring Tool for PostgreSQL | +| [pg_tle](https://github.com/aws/pg_tle/archive/refs/tags/v1.4.0.tar.gz) | [1.4.0](https://github.com/aws/pg_tle/archive/refs/tags/v1.4.0.tar.gz) | Framework for 'Trusted Language Extensions' in PostgreSQL | +| [pgaudit](https://github.com/pgaudit/pgaudit/archive/17.0.tar.gz) | [17.0](https://github.com/pgaudit/pgaudit/archive/17.0.tar.gz) | Open Source PostgreSQL Audit Logging | +| [pgjwt](https://github.com/michelp/pgjwt/archive/9742dab1b2f297ad3811120db7b21451bca2d3c9.tar.gz) | [9742dab1b2f297ad3811120db7b21451bca2d3c9](https://github.com/michelp/pgjwt/archive/9742dab1b2f297ad3811120db7b21451bca2d3c9.tar.gz) | PostgreSQL implementation of JSON Web Tokens | +| [pgmq](https://github.com/tembo-io/pgmq/archive/v1.4.4.tar.gz) | [1.4.4](https://github.com/tembo-io/pgmq/archive/v1.4.4.tar.gz) | A lightweight message queue. Like AWS SQS and RSMQ but on Postgres. | +| [pgroonga](https://packages.groonga.org/source/pgroonga/pgroonga-3.2.5.tar.gz) | [3.2.5](https://packages.groonga.org/source/pgroonga/pgroonga-3.2.5.tar.gz) | A PostgreSQL extension to use Groonga as the index | +| [pgrouting](https://github.com/pgRouting/pgrouting/archive/v3.4.1.tar.gz) | [3.4.1](https://github.com/pgRouting/pgrouting/archive/v3.4.1.tar.gz) | A PostgreSQL/PostGIS extension that provides geospatial routing functionality | +| [pgsodium](https://github.com/michelp/pgsodium/archive/refs/tags/v3.1.8.tar.gz) | [3.1.8](https://github.com/michelp/pgsodium/archive/refs/tags/v3.1.8.tar.gz) | Modern cryptography for PostgreSQL | +| [pgsql-http](https://github.com/pramsey/pgsql-http/archive/refs/tags/v1.6.1.tar.gz) | [1.6.1](https://github.com/pramsey/pgsql-http/archive/refs/tags/v1.6.1.tar.gz) | HTTP client for Postgres | +| [pgtap](https://github.com/theory/pgtap/archive/v1.2.0.tar.gz) | [1.2.0](https://github.com/theory/pgtap/archive/v1.2.0.tar.gz) | A unit testing framework for PostgreSQL | +| [pgvector](https://github.com/pgvector/pgvector/archive/refs/tags/v0.8.0.tar.gz) | [0.8.0](https://github.com/pgvector/pgvector/archive/refs/tags/v0.8.0.tar.gz) | Open-source vector similarity search for Postgres | +| [plpgsql-check](https://github.com/okbob/plpgsql_check/archive/v2.7.11.tar.gz) | [2.7.11](https://github.com/okbob/plpgsql_check/archive/v2.7.11.tar.gz) | Linter tool for language PL/pgSQL | +| [postgis](https://download.osgeo.org/postgis/source/postgis-3.3.7.tar.gz) | [3.3.7](https://download.osgeo.org/postgis/source/postgis-3.3.7.tar.gz) | Geographic Objects for PostgreSQL | +| [rum](https://github.com/postgrespro/rum/archive/1.3.14.tar.gz) | [1.3.14](https://github.com/postgrespro/rum/archive/1.3.14.tar.gz) | Full text search index method for PostgreSQL | +| [tealbase-wrappers](https://github.com/tealbase/wrappers/archive/v0.5.0.tar.gz) | [0.5.0](https://github.com/tealbase/wrappers/archive/v0.5.0.tar.gz) | Various Foreign Data Wrappers (FDWs) for PostreSQL | +| [supautils](https://github.com/tealbase/supautils/archive/refs/tags/v2.9.4.tar.gz) | [2.9.4](https://github.com/tealbase/supautils/archive/refs/tags/v2.9.4.tar.gz) | PostgreSQL extension for enhanced security | +| [vault](https://github.com/tealbase/vault/archive/refs/tags/v0.3.1.tar.gz) | [0.3.1](https://github.com/tealbase/vault/archive/refs/tags/v0.3.1.tar.gz) | Store encrypted secrets in PostgreSQL | +| [wal2json](https://github.com/eulerto/wal2json/archive/wal2json_2_6.tar.gz) | [2_6](https://github.com/eulerto/wal2json/archive/wal2json_2_6.tar.gz) | PostgreSQL JSON output plugin for changeset extraction | + +### PostgreSQL orioledb-17 Extensions +| Extension | Version | Description | +| ------------- | :-------------: | ------------- | +| [hypopg](https://github.com/HypoPG/hypopg/archive/refs/tags/1.4.1.tar.gz) | [1.4.1](https://github.com/HypoPG/hypopg/archive/refs/tags/1.4.1.tar.gz) | Hypothetical Indexes for PostgreSQL | +| [index_advisor](https://github.com/olirice/index_advisor/archive/v0.2.0.tar.gz) | [0.2.0](https://github.com/olirice/index_advisor/archive/v0.2.0.tar.gz) | Recommend indexes to improve query performance in PostgreSQL | +| [orioledb](https://github.com/orioledb/orioledb/archive/beta10.tar.gz) | [orioledb](https://github.com/orioledb/orioledb/archive/beta10.tar.gz) | orioledb | +| [pg-safeupdate](https://github.com/eradman/pg-safeupdate/archive/1.4.tar.gz) | [1.4](https://github.com/eradman/pg-safeupdate/archive/1.4.tar.gz) | A simple extension to PostgreSQL that requires criteria for UPDATE and DELETE | +| [pg_cron](https://github.com/citusdata/pg_cron/archive/v1.6.4.tar.gz) | [1.6.4](https://github.com/citusdata/pg_cron/archive/v1.6.4.tar.gz) | Run Cron jobs through PostgreSQL | +| [pg_graphql](https://github.com/tealbase/pg_graphql/archive/v1.5.11.tar.gz) | [1.5.11](https://github.com/tealbase/pg_graphql/archive/v1.5.11.tar.gz) | GraphQL support for PostreSQL | +| [pg_hashids](https://github.com/iCyberon/pg_hashids/archive/cd0e1b31d52b394a0df64079406a14a4f7387cd6.tar.gz) | [cd0e1b31d52b394a0df64079406a14a4f7387cd6](https://github.com/iCyberon/pg_hashids/archive/cd0e1b31d52b394a0df64079406a14a4f7387cd6.tar.gz) | Generate short unique IDs in PostgreSQL | +| [pg_jsonschema](https://github.com/tealbase/pg_jsonschema/archive/v0.3.3.tar.gz) | [0.3.3](https://github.com/tealbase/pg_jsonschema/archive/v0.3.3.tar.gz) | JSON Schema Validation for PostgreSQL | +| [pg_net](https://github.com/tealbase/pg_net/archive/refs/tags/v0.14.0.tar.gz) | [0.14.0](https://github.com/tealbase/pg_net/archive/refs/tags/v0.14.0.tar.gz) | Async networking for Postgres | +| [pg_plan_filter](https://github.com/pgexperts/pg_plan_filter/archive/5081a7b5cb890876e67d8e7486b6a64c38c9a492.tar.gz) | [5081a7b5cb890876e67d8e7486b6a64c38c9a492](https://github.com/pgexperts/pg_plan_filter/archive/5081a7b5cb890876e67d8e7486b6a64c38c9a492.tar.gz) | Filter PostgreSQL statements by execution plans | +| [pg_repack](https://github.com/reorg/pg_repack/archive/ver_1.5.2.tar.gz) | [1.5.2](https://github.com/reorg/pg_repack/archive/ver_1.5.2.tar.gz) | Reorganize tables in PostgreSQL databases with minimal locks | +| [pg_stat_monitor](https://github.com/percona/pg_stat_monitor/archive/refs/tags/2.1.0.tar.gz) | [2.1.0](https://github.com/percona/pg_stat_monitor/archive/refs/tags/2.1.0.tar.gz) | Query Performance Monitoring Tool for PostgreSQL | +| [pg_tle](https://github.com/aws/pg_tle/archive/refs/tags/v1.4.0.tar.gz) | [1.4.0](https://github.com/aws/pg_tle/archive/refs/tags/v1.4.0.tar.gz) | Framework for 'Trusted Language Extensions' in PostgreSQL | +| [pgaudit](https://github.com/pgaudit/pgaudit/archive/17.0.tar.gz) | [17.0](https://github.com/pgaudit/pgaudit/archive/17.0.tar.gz) | Open Source PostgreSQL Audit Logging | +| [pgjwt](https://github.com/michelp/pgjwt/archive/9742dab1b2f297ad3811120db7b21451bca2d3c9.tar.gz) | [9742dab1b2f297ad3811120db7b21451bca2d3c9](https://github.com/michelp/pgjwt/archive/9742dab1b2f297ad3811120db7b21451bca2d3c9.tar.gz) | PostgreSQL implementation of JSON Web Tokens | +| [pgmq](https://github.com/tembo-io/pgmq/archive/v1.4.4.tar.gz) | [1.4.4](https://github.com/tembo-io/pgmq/archive/v1.4.4.tar.gz) | A lightweight message queue. Like AWS SQS and RSMQ but on Postgres. | +| [pgroonga](https://packages.groonga.org/source/pgroonga/pgroonga-3.2.5.tar.gz) | [3.2.5](https://packages.groonga.org/source/pgroonga/pgroonga-3.2.5.tar.gz) | A PostgreSQL extension to use Groonga as the index | +| [pgrouting](https://github.com/pgRouting/pgrouting/archive/v3.4.1.tar.gz) | [3.4.1](https://github.com/pgRouting/pgrouting/archive/v3.4.1.tar.gz) | A PostgreSQL/PostGIS extension that provides geospatial routing functionality | +| [pgsodium](https://github.com/michelp/pgsodium/archive/refs/tags/v3.1.8.tar.gz) | [3.1.8](https://github.com/michelp/pgsodium/archive/refs/tags/v3.1.8.tar.gz) | Modern cryptography for PostgreSQL | +| [pgsql-http](https://github.com/pramsey/pgsql-http/archive/refs/tags/v1.6.1.tar.gz) | [1.6.1](https://github.com/pramsey/pgsql-http/archive/refs/tags/v1.6.1.tar.gz) | HTTP client for Postgres | +| [pgtap](https://github.com/theory/pgtap/archive/v1.2.0.tar.gz) | [1.2.0](https://github.com/theory/pgtap/archive/v1.2.0.tar.gz) | A unit testing framework for PostgreSQL | +| [pgvector](https://github.com/pgvector/pgvector/archive/refs/tags/v0.8.0.tar.gz) | [0.8.0](https://github.com/pgvector/pgvector/archive/refs/tags/v0.8.0.tar.gz) | Open-source vector similarity search for Postgres | +| [plpgsql-check](https://github.com/okbob/plpgsql_check/archive/v2.7.11.tar.gz) | [2.7.11](https://github.com/okbob/plpgsql_check/archive/v2.7.11.tar.gz) | Linter tool for language PL/pgSQL | +| [postgis](https://download.osgeo.org/postgis/source/postgis-3.3.7.tar.gz) | [3.3.7](https://download.osgeo.org/postgis/source/postgis-3.3.7.tar.gz) | Geographic Objects for PostgreSQL | +| [rum](https://github.com/postgrespro/rum/archive/1.3.14.tar.gz) | [1.3.14](https://github.com/postgrespro/rum/archive/1.3.14.tar.gz) | Full text search index method for PostgreSQL | +| [tealbase-wrappers](https://github.com/tealbase/wrappers/archive/v0.5.0.tar.gz) | [0.5.0](https://github.com/tealbase/wrappers/archive/v0.5.0.tar.gz) | Various Foreign Data Wrappers (FDWs) for PostreSQL | +| [supautils](https://github.com/tealbase/supautils/archive/refs/tags/v2.9.4.tar.gz) | [2.9.4](https://github.com/tealbase/supautils/archive/refs/tags/v2.9.4.tar.gz) | PostgreSQL extension for enhanced security | +| [vault](https://github.com/tealbase/vault/archive/refs/tags/v0.3.1.tar.gz) | [0.3.1](https://github.com/tealbase/vault/archive/refs/tags/v0.3.1.tar.gz) | Store encrypted secrets in PostgreSQL | +| [wal2json](https://github.com/eulerto/wal2json/archive/wal2json_2_6.tar.gz) | [2_6](https://github.com/eulerto/wal2json/archive/wal2json_2_6.tar.gz) | PostgreSQL JSON output plugin for changeset extraction | +## Additional Goodies +*This is only available for our AWS EC2* + +| Goodie | Version | Description | +| ------------- | :-------------: | ------------- | +| [PgBouncer](https://www.pgbouncer.org/) | [1.19.0](http://www.pgbouncer.org/changelog.html#pgbouncer-119x) | Set up Connection Pooling. | +| [PostgREST](https://postgrest.org/en/stable/) | [v13.0.4](https://github.com/PostgREST/postgrest/releases/tag/v13.0.4) | Instantly transform your database into an RESTful API. | +| [WAL-G](https://github.com/wal-g/wal-g#wal-g) | [v2.0.1](https://github.com/wal-g/wal-g/releases/tag/v2.0.1) | Tool for physical database backup and recovery. | --> ## Install See all installation instructions in the [repo wiki](https://github.com/tealbase/postgres/wiki). -[![Docker](https://github.com/tealbase/postgres/blob/master/docs/img/docker.png)](https://github.com/tealbase/postgres/wiki/Docker) -[![Digital Ocean](https://github.com/tealbase/postgres/blob/master/docs/img/digital-ocean.png)](https://github.com/tealbase/postgres/wiki/Digital-Ocean) -[![AWS](https://github.com/tealbase/postgres/blob/master/docs/img/aws.png)](https://github.com/tealbase/postgres/wiki/AWS-EC2) +[![Docker](https://github.com/tealbase/postgres/blob/develop/docs/img/docker.png)](https://github.com/tealbase/postgres/wiki/Docker) +[![AWS](https://github.com/tealbase/postgres/blob/develop/docs/img/aws.png)](https://github.com/tealbase/postgres/wiki/AWS-EC2) -## Motivation + -Finally, this is the same build we offer at [tealbase](https://tealbase.io), and everything we do is opensource. This repo makes it easy to *install* Postgres, tealbase makes it easy to *use* Postgres. - -## Roadmap +## Motivation -- [Support for more images](https://github.com/tealbase/postgres/issues/4) -- [Vote for more plugins/extensions](https://github.com/tealbase/postgres/issues/5) +- Make it fast and simple to get started with Postgres. +- Show off a few of Postgres' most exciting features. +- This is the same build we offer at [tealbase](https://tealbase.io). - Open a github issue if you have a feature request ## License [The PostgreSQL License](https://opensource.org/licenses/postgresql). We realize that licensing is tricky since we are bundling all the various plugins. If we have infringed on any license, let us know and we will make the necessary changes (or remove that extension from this repo). + +## Sponsors + +We are building the features of Firebase using enterprise-grade, open source products. We support existing communities wherever possible, and if the products don’t exist we build them and open source them ourselves. + +[![New Sponsor](https://user-images.githubusercontent.com/10214025/90518111-e74bbb00-e198-11ea-8f88-c9e3c1aa4b5b.png)](https://github.com/sponsors/tealbase) \ No newline at end of file diff --git a/amazon-arm64-nix.pkr.hcl b/amazon-arm64-nix.pkr.hcl new file mode 100644 index 0000000..9644b04 --- /dev/null +++ b/amazon-arm64-nix.pkr.hcl @@ -0,0 +1,272 @@ +variable "ami" { + type = string + default = "ubuntu/images/hvm-ssd-gp3/ubuntu-noble-24.04-arm64-server-*" +} + +variable "profile" { + type = string + default = "${env("AWS_PROFILE")}" +} + +variable "ami_name" { + type = string + default = "tealbase-postgres" +} + +variable "ami_regions" { + type = list(string) + default = ["ap-southeast-2"] +} + +variable "ansible_arguments" { + type = string + default = "--skip-tags install-postgrest,install-pgbouncer,install-tealbase-internal" +} + +variable "aws_access_key" { + type = string + default = "" +} + +variable "aws_secret_key" { + type = string + default = "" +} + +variable "environment" { + type = string + default = "prod" +} + +variable "region" { + type = string +} + +variable "build-vol" { + type = string + default = "xvdc" +} + +# ccache docker image details +variable "docker_user" { + type = string + default = "" +} + +variable "docker_passwd" { + type = string + default = "" +} + +variable "docker_image" { + type = string + default = "" +} + +variable "docker_image_tag" { + type = string + default = "latest" +} + +locals { + creator = "packer" +} + +variable "postgres-version" { + type = string + default = "" +} + +variable "git-head-version" { + type = string + default = "unknown" +} + +variable "packer-execution-id" { + type = string + default = "unknown" +} + +variable "force-deregister" { + type = bool + default = false +} + +packer { + required_plugins { + amazon = { + source = "github.com/hashicorp/amazon" + version = "~> 1" + } + } +} + +# source block +source "amazon-ebssurrogate" "source" { + profile = "${var.profile}" + #access_key = "${var.aws_access_key}" + #ami_name = "${var.ami_name}-arm64-${formatdate("YYYY-MM-DD-hhmm", timestamp())}" + ami_name = "${var.ami_name}-${var.postgres-version}-stage-1" + ami_virtualization_type = "hvm" + ami_architecture = "arm64" + ami_regions = "${var.ami_regions}" + instance_type = "c6g.4xlarge" + region = "${var.region}" + #secret_key = "${var.aws_secret_key}" + force_deregister = var.force-deregister + + # Use latest official ubuntu noble ami owned by Canonical. + source_ami_filter { + filters = { + virtualization-type = "hvm" + name = "${var.ami}" + root-device-type = "ebs" + } + owners = [ "099720109477" ] + most_recent = true + } + ena_support = true + launch_block_device_mappings { + device_name = "/dev/xvdf" + delete_on_termination = true + volume_size = 10 + volume_type = "gp3" + } + + launch_block_device_mappings { + device_name = "/dev/xvdh" + delete_on_termination = true + volume_size = 8 + volume_type = "gp3" + } + + launch_block_device_mappings { + device_name = "/dev/${var.build-vol}" + delete_on_termination = true + volume_size = 16 + volume_type = "gp2" + omit_from_artifact = true + } + + run_tags = { + creator = "packer" + appType = "postgres" + packerExecutionId = "${var.packer-execution-id}" + } + run_volume_tags = { + creator = "packer" + appType = "postgres" + } + snapshot_tags = { + creator = "packer" + appType = "postgres" + } + tags = { + creator = "packer" + appType = "postgres" + postgresVersion = "${var.postgres-version}-stage1" + sourceSha = "${var.git-head-version}" + } + + communicator = "ssh" + ssh_pty = true + ssh_username = "ubuntu" + ssh_timeout = "5m" + + ami_root_device { + source_device_name = "/dev/xvdf" + device_name = "/dev/xvda" + delete_on_termination = true + volume_size = 10 + volume_type = "gp2" + } + + associate_public_ip_address = true +} + +# a build block invokes sources and runs provisioning steps on them. +build { + sources = ["source.amazon-ebssurrogate.source"] + + provisioner "file" { + source = "ebssurrogate/files/sources-arm64.cfg" + destination = "/tmp/sources.list" + } + + provisioner "file" { + source = "ebssurrogate/files/ebsnvme-id" + destination = "/tmp/ebsnvme-id" + } + + provisioner "file" { + source = "ebssurrogate/files/70-ec2-nvme-devices.rules" + destination = "/tmp/70-ec2-nvme-devices.rules" + } + + provisioner "file" { + source = "ebssurrogate/scripts/chroot-bootstrap-nix.sh" + destination = "/tmp/chroot-bootstrap-nix.sh" + } + + provisioner "file" { + source = "ebssurrogate/files/cloud.cfg" + destination = "/tmp/cloud.cfg" + } + + provisioner "file" { + source = "ebssurrogate/files/vector.timer" + destination = "/tmp/vector.timer" + } + + provisioner "file" { + source = "ebssurrogate/files/apparmor_profiles" + destination = "/tmp" + } + + provisioner "file" { + source = "migrations" + destination = "/tmp" + } + + # Copy ansible playbook + provisioner "shell" { + inline = ["mkdir /tmp/ansible-playbook"] + } + + provisioner "file" { + source = "ansible" + destination = "/tmp/ansible-playbook" + } + + provisioner "file" { + source = "scripts" + destination = "/tmp/ansible-playbook" + } + + provisioner "file" { + source = "ansible/vars.yml" + destination = "/tmp/ansible-playbook/vars.yml" + } + + provisioner "shell" { + environment_vars = [ + "ARGS=${var.ansible_arguments}", + "DOCKER_USER=${var.docker_user}", + "DOCKER_PASSWD=${var.docker_passwd}", + "DOCKER_IMAGE=${var.docker_image}", + "DOCKER_IMAGE_TAG=${var.docker_image_tag}", + "POSTGRES_tealbase_VERSION=${var.postgres-version}" + ] + use_env_var_file = true + script = "ebssurrogate/scripts/surrogate-bootstrap-nix.sh" + execute_command = "sudo -S sh -c '. {{.EnvVarFile}} && cd /tmp/ansible-playbook && {{.Path}}'" + start_retry_timeout = "5m" + skip_clean = true + } + + provisioner "file" { + source = "/tmp/ansible.log" + destination = "/tmp/ansible.log" + direction = "download" + } +} diff --git a/amazon.json b/amazon.json deleted file mode 100644 index 17d9d43..0000000 --- a/amazon.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "variables": { - "aws_access_key": "", - "aws_secret_key": "", - "region": "ap-southeast-1", - "ami_regions": "ap-southeast-1", - "ami": "ami-0f7719e8b7ba25c61", - "ansible_arguments": "--skip-tags,update-only" - }, - "builders": [{ - "type": "amazon-ebs", - "access_key": "{{user `aws_access_key`}}", - "secret_key": "{{user `aws_secret_key`}}", - "region": "{{user `region`}}", - "ami_regions": "{{user `ami_regions`}}", - "source_ami": "{{user `ami`}}", - "instance_type": "m5.2xlarge", - "ssh_username": "ubuntu", - "ami_name": "tealbase-postgres-0.13.0", - "launch_block_device_mappings": [ - { - "device_name": "/dev/sda1", - "volume_size": 16, - "volume_type": "gp2", - "delete_on_termination": true - } - ] - }], - "provisioners": [ - { - "type": "ansible", - "user": "ubuntu", - "playbook_file": "ansible/playbook.yml", - "extra_arguments": "{{user `ansible_arguments`}}" - }, - { - "execute_command": "echo 'packer' | sudo -S sh -c '{{ .Vars }} {{ .Path }}'", - "type": "shell", - "scripts": [ - "scripts/01-test", - "scripts/02-credentials_cleanup.sh", - "scripts/90-cleanup.sh", - "scripts/91-log_cleanup.sh", - "scripts/99-img_check.sh" - ] - } - ] -} diff --git a/ansible.cfg b/ansible.cfg new file mode 100644 index 0000000..5410ed8 --- /dev/null +++ b/ansible.cfg @@ -0,0 +1,3 @@ +[defaults] + +callback_whitelist = profile_tasks diff --git a/ansible/files/admin_api_scripts/grow_fs.sh b/ansible/files/admin_api_scripts/grow_fs.sh new file mode 100644 index 0000000..c8c1489 --- /dev/null +++ b/ansible/files/admin_api_scripts/grow_fs.sh @@ -0,0 +1,49 @@ +#! /usr/bin/env bash + +set -euo pipefail + +VOLUME_TYPE=${1:-data} + +if pgrep resizefs; then + echo "resize2fs is already running" + exit 1 +fi + +# Parses the output of lsblk to get the root partition number +# Example output: +# NAME MOUNTPOINT +# nvme0n1 +# ├─nvme0n1p1 /boot +# └─nvme0n1p3 / +# nvme1n1 /data +# +# Resulting in: +# └─nvme0n1p3 / -> nvme0n1p3 -> 3 +ROOT_PARTITION_NUMBER=$(lsblk -no NAME,MOUNTPOINT | grep ' /$' | awk '{print $1;}' | sed 's/.*nvme[0-9]n[0-9]p//g') + +if ! [[ "$ROOT_PARTITION_NUMBER" =~ ^[0-9]+$ ]]; then + echo "Error: ROOT_PARTITION_NUMBER is not a valid number: $ROOT_PARTITION_NUMBER" + exit 1 +fi + +if [ -b /dev/nvme1n1 ] ; then + if [[ "${VOLUME_TYPE}" == "data" ]]; then + resize2fs /dev/nvme1n1 + + elif [[ "${VOLUME_TYPE}" == "root" ]] ; then + PLACEHOLDER_FL=/home/ubuntu/50M_PLACEHOLDER + rm -f "${PLACEHOLDER_FL}" || true + growpart /dev/nvme0n1 "${ROOT_PARTITION_NUMBER}" + resize2fs "/dev/nvme0n1p${ROOT_PARTITION_NUMBER}" + if [[ ! -f "${PLACEHOLDER_FL}" ]] ; then + fallocate -l50M "${PLACEHOLDER_FL}" + fi + else + echo "Invalid disk specified: ${VOLUME_TYPE}" + exit 1 + fi +else + growpart /dev/nvme0n1 "${ROOT_PARTITION_NUMBER}" + resize2fs "/dev/nvme0n1p${ROOT_PARTITION_NUMBER}" +fi +echo "Done resizing disk" diff --git a/ansible/files/admin_api_scripts/manage_readonly_mode.sh b/ansible/files/admin_api_scripts/manage_readonly_mode.sh new file mode 100644 index 0000000..aba5456 --- /dev/null +++ b/ansible/files/admin_api_scripts/manage_readonly_mode.sh @@ -0,0 +1,45 @@ +#! /usr/bin/env bash + +set -euo pipefail + +SUBCOMMAND=$1 + +function set_mode { + MODE=$1 + psql -h localhost -U tealbase_admin -d postgres -c "ALTER SYSTEM SET default_transaction_read_only to ${MODE};" + psql -h localhost -U tealbase_admin -d postgres -c "SELECT pg_reload_conf();" +} + +function check_override { + COMMAND=$(cat < 220.235.16.223.62599: Flags [S.], cksum 0x5de3 (incorrect -> 0x63da), seq 2314200657, ack 2071735457, win 62643, options [mss 8961,sackOK,TS val 3358598837 ecr 1277499190,nop,wscale 7], length 0 +# 1674013833.989257 IP (tos 0x0, ttl 64, id 24975, offset 0, flags [DF], proto TCP (6), length 52) +# 10.112.101.122.5432 > 220.235.16.223.62599: Flags [.], cksum 0x5ddb (incorrect -> 0xa25b), seq 1, ack 9, win 490, options [nop,nop,TS val 3358598885 ecr 1277499232], length 0 +# +# Sample IPv6 input lines: +# +# 1706483718.836526 IP6 (flowlabel 0x0bf27, hlim 64, next-header TCP (6) payload length: 125) 2406:da18:4fd:9b00:959:c52:ce68:10c8.5432 > 2406:da12:d78:f501:1273:296c:2482:c7a7.50530: Flags [P.], seq 25:118, ack 125, win 488, options [nop,nop,TS val 1026340732 ecr 1935666426], length 93 +# 1706483718.912083 IP6 (flowlabel 0x0bf27, hlim 64, next-header TCP (6) payload length: 501) 2406:da18:4fd:9b00:959:c52:ce68:10c8.5432 > 2406:da12:d78:f501:1273:296c:2482:c7a7.50530: Flags [P.], seq 118:587, ack 234, win 488, options [nop,nop,TS val 1026340807 ecr 1935666497], length 469 +# 1706483718.984001 IP6 (flowlabel 0x0bf27, hlim 64, next-header TCP (6) payload length: 151) 2406:da18:4fd:9b00:959:c52:ce68:10c8.5432 > 2406:da12:d78:f501:1273:296c:2482:c7a7.50530: Flags [P.], seq 587:706, ack 448, win 487, options [nop,nop,TS val 1026340879 ecr 1935666569], length 119 +sub extract_packet_length { + my ($line) = @_; + + #print("debug: >> " . $line); + + if ($line =~ /^.*, length (\d+)$/) { + # extract tcp packet length and add it up + my $len = $1; + $captured_len += $len; + } +} + +# write total length to file +sub write_file { + my ($output) = @_; + + my $now = strftime "%F %T", localtime time; + print "[$now] write captured len $captured_len to $output\n"; + + open(my $fh, "+>", $output) or die "Could not open file '$output' $!"; + print $fh "$captured_len"; + close($fh) or die "Could not write file '$output' $!"; +} + +# main +sub main { + # get arguments + GetOptions( + "interval:i" => \(my $interval = 60), + "output:s" => \(my $output = "/tmp/pg_egress_collect.txt"), + "help" => sub { HelpMessage(0) }, + ) or HelpMessage(1); + + my $loop = IO::Async::Loop->new; + + # tcpdump extractor + my $extractor = IO::Async::Stream->new_for_stdin( + on_read => sub { + my ($self, $buffref, $eof) = @_; + + while($$buffref =~ s/^(.*\n)//) { + my $line = $1; + extract_packet_length($line); + } + + return 0; + }, + ); + + # schedule file writer per minute + my $writer = IO::Async::Timer::Periodic->new( + interval => $interval, + on_tick => sub { + write_file($output); + + # reset total captured length + $captured_len = 0; + }, + ); + $writer->start; + + print "pg_egress_collect started, egress data will be saved to $output at interval $interval seconds.\n"; + + $loop->add($extractor); + $loop->add($writer); + $loop->run; +} + +main(); + +__END__ + +=head1 NAME + +pg_egress_collect.pl - collect egress from tcpdump output, extract TCP packet length, aggregate in specified interval and write to output file. + +=head1 SYNOPSIS + +pg_egress_collect.pl [-i interval] [-o output] + +Options: + + -i, --interval interval + output file write interval, in seconds, default is 60 seconds + + -o, --output output + output file path, default is /tmp/pg_egress_collect.txt + + -h, --help + print this help message + +=cut diff --git a/ansible/files/admin_api_scripts/pg_upgrade_scripts/check.sh b/ansible/files/admin_api_scripts/pg_upgrade_scripts/check.sh new file mode 100755 index 0000000..f85e957 --- /dev/null +++ b/ansible/files/admin_api_scripts/pg_upgrade_scripts/check.sh @@ -0,0 +1,16 @@ +#! /usr/bin/env bash +## This script provides a method to check the status of the database upgrade +## process, which is updated in /tmp/pg-upgrade-status by initiate.sh +## This runs on the old (source) instance. + +set -euo pipefail + +STATUS_FILE="/tmp/pg-upgrade-status" + +if [ -f "${STATUS_FILE}" ]; then + STATUS=$(cat "${STATUS_FILE}") + echo -n "${STATUS}" +else + echo -n "unknown" +fi + diff --git a/ansible/files/admin_api_scripts/pg_upgrade_scripts/common.sh b/ansible/files/admin_api_scripts/pg_upgrade_scripts/common.sh new file mode 100755 index 0000000..db4e371 --- /dev/null +++ b/ansible/files/admin_api_scripts/pg_upgrade_scripts/common.sh @@ -0,0 +1,561 @@ +#! /usr/bin/env bash + +# Common functions and variables used by initiate.sh and complete.sh + +REPORTING_PROJECT_REF="ihmaxnjpcccasmrbkpvo" +REPORTING_CREDENTIALS_FILE="/root/upgrade-reporting-credentials" + +REPORTING_ANON_KEY="" +if [ -f "$REPORTING_CREDENTIALS_FILE" ]; then + REPORTING_ANON_KEY=$(cat "$REPORTING_CREDENTIALS_FILE") +fi + +# shellcheck disable=SC2120 +# Arguments are passed in other files +function run_sql { + psql -h localhost -U tealbase_admin -d postgres "$@" +} + +function ship_logs { + LOG_FILE=$1 + + if [ -z "$REPORTING_ANON_KEY" ]; then + echo "No reporting key found. Skipping log upload." + return 0 + fi + + if [ ! -f "$LOG_FILE" ]; then + echo "No log file found. Skipping log upload." + return 0 + fi + + if [ ! -s "$LOG_FILE" ]; then + echo "Log file is empty. Skipping log upload." + return 0 + fi + + HOSTNAME=$(hostname) + DERIVED_REF="${HOSTNAME##*-}" + + printf -v BODY '{ "ref": "%s", "step": "%s", "content": %s }' "$DERIVED_REF" "completion" "$(cat "$LOG_FILE" | jq -Rs '.')" + curl -sf -X POST "https://$REPORTING_PROJECT_REF.tealbase.co/rest/v1/error_logs" \ + -H "apikey: ${REPORTING_ANON_KEY}" \ + -H 'Content-type: application/json' \ + -d "$BODY" +} + +function retry { + local retries=$1 + shift + + local count=0 + until "$@"; do + exit=$? + wait=$((2 ** (count + 1))) + count=$((count + 1)) + if [ $count -lt "$retries" ]; then + echo "Command $* exited with code $exit, retrying..." + sleep $wait + else + echo "Command $* exited with code $exit, no more retries left." + return $exit + fi + done + return 0 +} + +CI_stop_postgres() { + BINDIR=$(pg_config --bindir) + ARG=${1:-""} + + if [ "$ARG" = "--new-bin" ]; then + BINDIR="/tmp/pg_upgrade_bin/$PG_MAJOR_VERSION/bin" + fi + + su postgres -c "$BINDIR/pg_ctl stop -o '-c config_file=/etc/postgresql/postgresql.conf' -l /tmp/postgres.log" +} + +CI_start_postgres() { + BINDIR=$(pg_config --bindir) + ARG=${1:-""} + + if [ "$ARG" = "--new-bin" ]; then + BINDIR="/tmp/pg_upgrade_bin/$PG_MAJOR_VERSION/bin" + fi + + su postgres -c "$BINDIR/pg_ctl start -o '-c config_file=/etc/postgresql/postgresql.conf' -l /tmp/postgres.log" +} + +swap_postgres_and_tealbase_admin() { + run_sql <<'EOSQL' +alter database postgres connection limit 0; +select pg_terminate_backend(pid) from pg_stat_activity where backend_type = 'client backend' and pid != pg_backend_pid(); +EOSQL + + if [ -z "$IS_CI" ]; then + retry 5 systemctl restart postgresql + else + CI_start_postgres "" + fi + + retry 8 pg_isready -h localhost -U tealbase_admin + + run_sql <<'EOSQL' +set statement_timeout = '600s'; +begin; +create role tealbase_tmp superuser; +set session authorization tealbase_tmp; + +-- to handle snowflakes that happened in the past +revoke tealbase_admin from authenticator; + +do $$ +begin + if exists (select from pg_extension where extname = 'timescaledb') then + execute(format('select %s.timescaledb_pre_restore()', (select pronamespace::regnamespace from pg_proc where proname = 'timescaledb_pre_restore'))); + end if; +end +$$; + +do $$ +declare + postgres_rolpassword text := (select rolpassword from pg_authid where rolname = 'postgres'); + tealbase_admin_rolpassword text := (select rolpassword from pg_authid where rolname = 'tealbase_admin'); + role_settings jsonb[] := ( + select coalesce(array_agg(jsonb_build_object('database', d.datname, 'role', a.rolname, 'configs', s.setconfig)), '{}') + from pg_db_role_setting s + left join pg_database d on d.oid = s.setdatabase + join pg_authid a on a.oid = s.setrole + where a.rolname in ('postgres', 'tealbase_admin') + ); + event_triggers jsonb[] := (select coalesce(array_agg(jsonb_build_object('name', evtname)), '{}') from pg_event_trigger where evtowner = 'postgres'::regrole); + user_mappings jsonb[] := ( + select coalesce(array_agg(jsonb_build_object('oid', um.oid, 'role', a.rolname, 'server', s.srvname, 'options', um.umoptions)), '{}') + from pg_user_mapping um + join pg_authid a on a.oid = um.umuser + join pg_foreign_server s on s.oid = um.umserver + where a.rolname in ('postgres', 'tealbase_admin') + ); + -- Objects can have initial privileges either by having those privileges set + -- when the system is initialized (by initdb) or when the object is created + -- during a CREATE EXTENSION and the extension script sets initial + -- privileges using the GRANT system. (https://www.postgresql.org/docs/current/catalog-pg-init-privs.html) + -- We only care about swapping init_privs for extensions. + init_privs jsonb[] := ( + select coalesce(array_agg(jsonb_build_object('objoid', objoid, 'classoid', classoid, 'initprivs', initprivs::text)), '{}') + from pg_init_privs + where privtype = 'e' + ); + default_acls jsonb[] := ( + select coalesce(array_agg(jsonb_build_object('oid', d.oid, 'role', a.rolname, 'schema', n.nspname, 'objtype', d.defaclobjtype, 'acl', defaclacl::text)), '{}') + from pg_default_acl d + join pg_authid a on a.oid = d.defaclrole + left join pg_namespace n on n.oid = d.defaclnamespace + ); + schemas jsonb[] := ( + select coalesce(array_agg(jsonb_build_object('oid', n.oid, 'owner', a.rolname, 'acl', nspacl::text)), '{}') + from pg_namespace n + join pg_authid a on a.oid = n.nspowner + where true + and n.nspname != 'information_schema' + and not starts_with(n.nspname, 'pg_') + ); + types jsonb[] := ( + select coalesce(array_agg(jsonb_build_object('oid', t.oid, 'owner', a.rolname, 'acl', t.typacl::text)), '{}') + from pg_type t + join pg_namespace n on n.oid = t.typnamespace + join pg_authid a on a.oid = t.typowner + where true + and n.nspname != 'information_schema' + and not starts_with(n.nspname, 'pg_') + and ( + t.typrelid = 0 + or ( + select + c.relkind = 'c' + from + pg_class c + where + c.oid = t.typrelid + ) + ) + and not exists ( + select + from + pg_type el + where + el.oid = t.typelem + and el.typarray = t.oid + ) + ); + functions jsonb[] := ( + select coalesce(array_agg(jsonb_build_object('oid', p.oid, 'owner', a.rolname, 'kind', p.prokind, 'acl', p.proacl::text)), '{}') + from pg_proc p + join pg_namespace n on n.oid = p.pronamespace + join pg_authid a on a.oid = p.proowner + where true + and n.nspname != 'information_schema' + and not starts_with(n.nspname, 'pg_') + ); + relations jsonb[] := ( + select coalesce(array_agg(jsonb_build_object('oid', c.oid, 'owner', a.rolname, 'acl', c.relacl::text)), '{}') + from ( + -- Sequences must appear after tables, so we order by relkind + select * from pg_class order by relkind desc + ) c + join pg_namespace n on n.oid = c.relnamespace + join pg_authid a on a.oid = c.relowner + where true + and n.nspname != 'information_schema' + and not starts_with(n.nspname, 'pg_') + and c.relkind not in ('c', 'i', 'I') + ); + rec record; + obj jsonb; +begin + set local search_path = ''; + + if exists (select from pg_event_trigger where evtname = 'pgsodium_trg_mask_update') then + alter event trigger pgsodium_trg_mask_update disable; + end if; + + alter role postgres rename to tealbase_admin_; + alter role tealbase_admin rename to postgres; + alter role tealbase_admin_ rename to tealbase_admin; + + -- role grants + for rec in + select * from pg_auth_members + loop + execute(format('revoke %s from %s;', rec.roleid::regrole, rec.member::regrole)); + execute(format( + 'grant %s to %s %s granted by %s;', + case + when rec.roleid = 'postgres'::regrole then 'tealbase_admin' + when rec.roleid = 'tealbase_admin'::regrole then 'postgres' + else rec.roleid::regrole + end, + case + when rec.member = 'postgres'::regrole then 'tealbase_admin' + when rec.member = 'tealbase_admin'::regrole then 'postgres' + else rec.member::regrole + end, + case + when rec.admin_option then 'with admin option' + else '' + end, + case + when rec.grantor = 'postgres'::regrole then 'tealbase_admin' + when rec.grantor = 'tealbase_admin'::regrole then 'postgres' + else rec.grantor::regrole + end + )); + end loop; + + -- role passwords + execute(format('alter role postgres password %L;', postgres_rolpassword)); + execute(format('alter role tealbase_admin password %L;', tealbase_admin_rolpassword)); + + -- role settings + foreach obj in array role_settings + loop + execute(format('alter role %I %s reset all', + case when obj->>'role' = 'postgres' then 'tealbase_admin' else 'postgres' end, + case when obj->>'database' is null then '' else format('in database %I', obj->>'database') end + )); + end loop; + foreach obj in array role_settings + loop + for rec in + select split_part(value, '=', 1) as key, substr(value, strpos(value, '=') + 1) as value + from jsonb_array_elements_text(obj->'configs') + loop + execute(format('alter role %I %s set %I to %s', + obj->>'role', + case when obj->>'database' is null then '' else format('in database %I', obj->>'database') end, + rec.key, + -- https://github.com/postgres/postgres/blob/70d1c664f4376fd3499e3b0c6888cf39b65d722b/src/bin/pg_dump/dumputils.c#L861 + case + when rec.key in ('local_preload_libraries', 'search_path', 'session_preload_libraries', 'shared_preload_libraries', 'temp_tablespaces', 'unix_socket_directories') + then rec.value + else quote_literal(rec.value) + end + )); + end loop; + end loop; + + reassign owned by postgres to tealbase_admin; + + -- databases + for rec in + select * from pg_database where datname not in ('template0') + loop + execute(format('alter database %I owner to postgres;', rec.datname)); + end loop; + + -- event triggers + foreach obj in array event_triggers + loop + execute(format('alter event trigger %I owner to postgres;', obj->>'name')); + end loop; + + -- publications + for rec in + select * from pg_publication + loop + execute(format('alter publication %I owner to postgres;', rec.pubname)); + end loop; + + -- FDWs + for rec in + select * from pg_foreign_data_wrapper + loop + execute(format('alter foreign data wrapper %I owner to postgres;', rec.fdwname)); + end loop; + + -- foreign servers + for rec in + select * from pg_foreign_server + loop + execute(format('alter server %I owner to postgres;', rec.srvname)); + end loop; + + -- user mappings + foreach obj in array user_mappings + loop + execute(format('drop user mapping for %I server %I', case when obj->>'role' = 'postgres' then 'tealbase_admin' else 'postgres' end, obj->>'server')); + end loop; + foreach obj in array user_mappings + loop + execute(format('create user mapping for %I server %I', obj->>'role', obj->>'server')); + for rec in + select split_part(value, '=', 1) as key, substr(value, strpos(value, '=') + 1) as value + from jsonb_array_elements_text(obj->'options') + loop + execute(format('alter user mapping for %I server %I options (%I %L)', obj->>'role', obj->>'server', rec.key, rec.value)); + end loop; + end loop; + + -- init privs + foreach obj in array init_privs + loop + -- We need to modify system catalog directly here because there's no ALTER INIT PRIVILEGES. + update pg_init_privs set initprivs = (obj->>'initprivs')::aclitem[] where objoid = (obj->>'objoid')::oid and classoid = (obj->>'classoid')::oid; + end loop; + + -- default acls + foreach obj in array default_acls + loop + for rec in + select grantor, grantee, privilege_type, is_grantable + from aclexplode((obj->>'acl')::aclitem[]) + loop + if obj->>'role' in ('postgres', 'tealbase_admin') or rec.grantee::regrole in ('postgres', 'tealbase_admin') then + execute(format('alter default privileges for role %I %s revoke %s on %s from %s' + , case when obj->>'role' = 'postgres' then 'tealbase_admin' + when obj->>'role' = 'tealbase_admin' then 'postgres' + else obj->>'role' + end + , case when obj->>'schema' is null then '' + else format('in schema %I', obj->>'schema') + end + , rec.privilege_type + , case when obj->>'objtype' = 'r' then 'tables' + when obj->>'objtype' = 'S' then 'sequences' + when obj->>'objtype' = 'f' then 'functions' + when obj->>'objtype' = 'T' then 'types' + when obj->>'objtype' = 'n' then 'schemas' + end + , case when rec.grantee = 'postgres'::regrole then 'tealbase_admin' + when rec.grantee = 'tealbase_admin'::regrole then 'postgres' + when rec.grantee = 0 then 'public' + else rec.grantee::regrole::text + end + )); + end if; + end loop; + end loop; + + foreach obj in array default_acls + loop + for rec in + select grantor, grantee, privilege_type, is_grantable + from aclexplode((obj->>'acl')::aclitem[]) + loop + if obj->>'role' in ('postgres', 'tealbase_admin') or rec.grantee::regrole in ('postgres', 'tealbase_admin') then + execute(format('alter default privileges for role %I %s grant %s on %s to %s %s' + , obj->>'role' + , case when obj->>'schema' is null then '' + else format('in schema %I', obj->>'schema') + end + , rec.privilege_type + , case when obj->>'objtype' = 'r' then 'tables' + when obj->>'objtype' = 'S' then 'sequences' + when obj->>'objtype' = 'f' then 'functions' + when obj->>'objtype' = 'T' then 'types' + when obj->>'objtype' = 'n' then 'schemas' + end + , case when rec.grantee = 0 then 'public' else rec.grantee::regrole::text end + , case when rec.is_grantable then 'with grant option' else '' end + )); + end if; + end loop; + end loop; + + -- schemas + foreach obj in array schemas + loop + if obj->>'owner' = 'postgres' then + execute(format('alter schema %s owner to postgres;', (obj->>'oid')::regnamespace)); + end if; + for rec in + select grantor, grantee, privilege_type, is_grantable + from aclexplode((obj->>'acl')::aclitem[]) + where grantee::regrole in ('postgres', 'tealbase_admin') + loop + execute(format('revoke %s on schema %s from %I', rec.privilege_type, (obj->>'oid')::regnamespace, case when rec.grantee = 'postgres'::regrole then 'tealbase_admin' else 'postgres' end)); + end loop; + end loop; + foreach obj in array schemas + loop + for rec in + select grantor, grantee, privilege_type, is_grantable + from aclexplode((obj->>'acl')::aclitem[]) + where grantee::regrole in ('postgres', 'tealbase_admin') + loop + execute(format('grant %s on schema %s to %s %s', rec.privilege_type, (obj->>'oid')::regnamespace, rec.grantee::regrole, case when rec.is_grantable then 'with grant option' else '' end)); + end loop; + end loop; + + -- types + foreach obj in array types + loop + if obj->>'owner' = 'postgres' then + execute(format('alter type %s owner to postgres;', (obj->>'oid')::regtype)); + end if; + for rec in + select grantor, grantee, privilege_type, is_grantable + from aclexplode((obj->>'acl')::aclitem[]) + where grantee::regrole in ('postgres', 'tealbase_admin') + loop + execute(format('revoke %s on type %s from %I', rec.privilege_type, (obj->>'oid')::regtype, case when rec.grantee = 'postgres'::regrole then 'tealbase_admin' else 'postgres' end)); + end loop; + end loop; + foreach obj in array types + loop + for rec in + select grantor, grantee, privilege_type, is_grantable + from aclexplode((obj->>'acl')::aclitem[]) + where grantee::regrole in ('postgres', 'tealbase_admin') + loop + execute(format('grant %s on type %s to %s %s', rec.privilege_type, (obj->>'oid')::regtype, rec.grantee::regrole, case when rec.is_grantable then 'with grant option' else '' end)); + end loop; + end loop; + + -- functions + foreach obj in array functions + loop + if obj->>'owner' = 'postgres' then + execute(format('alter %s %s(%s) owner to postgres;' + , case when obj->>'kind' = 'p' then 'procedure' else 'function' end + , (obj->>'oid')::regproc + , pg_get_function_identity_arguments((obj->>'oid')::regproc))); + end if; + for rec in + select grantor, grantee, privilege_type, is_grantable + from aclexplode((obj->>'acl')::aclitem[]) + where grantee::regrole in ('postgres', 'tealbase_admin') + loop + execute(format('revoke %s on %s %s(%s) from %I' + , rec.privilege_type + , case + when obj->>'kind' = 'p' then 'procedure' + else 'function' + end + , (obj->>'oid')::regproc + , pg_get_function_identity_arguments((obj->>'oid')::regproc) + , case when rec.grantee = 'postgres'::regrole then 'tealbase_admin' else 'postgres' end + )); + end loop; + end loop; + foreach obj in array functions + loop + for rec in + select grantor, grantee, privilege_type, is_grantable + from aclexplode((obj->>'acl')::aclitem[]) + where grantee::regrole in ('postgres', 'tealbase_admin') + loop + execute(format('grant %s on %s %s(%s) to %s %s' + , rec.privilege_type + , case + when obj->>'kind' = 'p' then 'procedure' + else 'function' + end + , (obj->>'oid')::regproc + , pg_get_function_identity_arguments((obj->>'oid')::regproc) + , rec.grantee::regrole + , case when rec.is_grantable then 'with grant option' else '' end + )); + end loop; + end loop; + + -- relations + foreach obj in array relations + loop + -- obj->>'oid' (text) needs to be casted to oid first for some reason + + if obj->>'owner' = 'postgres' then + execute(format('alter table %s owner to postgres;', (obj->>'oid')::oid::regclass)); + end if; + for rec in + select grantor, grantee, privilege_type, is_grantable + from aclexplode((obj->>'acl')::aclitem[]) + where grantee::regrole in ('postgres', 'tealbase_admin') + loop + execute(format('revoke %s on table %s from %I', rec.privilege_type, (obj->>'oid')::oid::regclass, case when rec.grantee = 'postgres'::regrole then 'tealbase_admin' else 'postgres' end)); + end loop; + end loop; + foreach obj in array relations + loop + -- obj->>'oid' (text) needs to be casted to oid first for some reason + + for rec in + select grantor, grantee, privilege_type, is_grantable + from aclexplode((obj->>'acl')::aclitem[]) + where grantee::regrole in ('postgres', 'tealbase_admin') + loop + execute(format('grant %s on table %s to %s %s', rec.privilege_type, (obj->>'oid')::oid::regclass, rec.grantee::regrole, case when rec.is_grantable then 'with grant option' else '' end)); + end loop; + end loop; + + if exists (select from pg_event_trigger where evtname = 'pgsodium_trg_mask_update') then + alter event trigger pgsodium_trg_mask_update enable; + end if; +end +$$; + +do $$ +begin + if exists (select from pg_extension where extname = 'timescaledb') then + execute(format('select %s.timescaledb_post_restore()', (select pronamespace::regnamespace from pg_proc where proname = 'timescaledb_post_restore'))); + end if; +end +$$; + +alter database postgres connection limit -1; + +-- #incident-2024-09-12-project-upgrades-are-temporarily-disabled +do $$ +begin + if exists (select from pg_authid where rolname = 'pg_read_all_data') then + execute('grant pg_read_all_data to postgres'); + end if; +end +$$; +grant pg_signal_backend to postgres; + +set session authorization tealbase_admin; +drop role tealbase_tmp; +commit; +EOSQL +} diff --git a/ansible/files/admin_api_scripts/pg_upgrade_scripts/complete.sh b/ansible/files/admin_api_scripts/pg_upgrade_scripts/complete.sh new file mode 100755 index 0000000..160f713 --- /dev/null +++ b/ansible/files/admin_api_scripts/pg_upgrade_scripts/complete.sh @@ -0,0 +1,340 @@ +#! /usr/bin/env bash + +## This script is run on the newly launched instance which is to be promoted to +## become the primary database instance once the upgrade successfully completes. +## The following commands copy custom PG configs and enable previously disabled +## extensions, containing regtypes referencing system OIDs. + +set -eEuo pipefail + +SCRIPT_DIR=$(dirname -- "$0";) +# shellcheck disable=SC1091 +source "$SCRIPT_DIR/common.sh" + +IS_CI=${IS_CI:-} +LOG_FILE="/var/log/pg-upgrade-complete.log" + +function cleanup { + UPGRADE_STATUS=${1:-"failed"} + EXIT_CODE=${?:-0} + + echo "$UPGRADE_STATUS" > /tmp/pg-upgrade-status + + ship_logs "$LOG_FILE" || true + + exit "$EXIT_CODE" +} + +function execute_extension_upgrade_patches { + if [ -f "/var/lib/postgresql/extension/wrappers--0.3.1--0.4.1.sql" ] && [ ! -f "/usr/share/postgresql/15/extension/wrappers--0.3.0--0.4.1.sql" ]; then + cp /var/lib/postgresql/extension/wrappers--0.3.1--0.4.1.sql /var/lib/postgresql/extension/wrappers--0.3.0--0.4.1.sql + ln -s /var/lib/postgresql/extension/wrappers--0.3.0--0.4.1.sql /usr/share/postgresql/15/extension/wrappers--0.3.0--0.4.1.sql + fi +} + +function execute_wrappers_patch { + # If upgrading to pgsodium-less Vault, Wrappers need to be updated so that + # foreign servers use `vault.secrets.id` instead of `vault.secrets.key_id` + UPDATE_WRAPPERS_SERVER_OPTIONS_QUERY=$(cat < 0 from pg_extension where extname = 'pg_net';") + + if [ "$PG_NET_ENABLED" = "t" ]; then + PG_NET_GRANT_QUERY=$(cat < 0 from pg_extension where extname = 'pg_cron' and extowner::regrole::text = 'postgres';") + + if [ "$HAS_PG_CRON_OWNED_BY_POSTGRES" = "t" ]; then + RECREATE_PG_CRON_QUERY=$(cat < 0 from pg_extension where extname = 'pgmq';") + if [ "$HAS_PGMQ" = "t" ]; then + run_sql -c "update pg_extension set extowner = 'postgres'::regrole where extname = 'pgmq';" + fi + + # Patch to handle upgrading to pgsodium-less Vault + REENCRYPT_VAULT_SECRETS_QUERY=$(cat <= 16 THEN + GRANT pg_create_subscription TO postgres; + GRANT anon, authenticated, service_role, authenticator, pg_monitor, pg_read_all_data, pg_signal_backend TO postgres WITH ADMIN OPTION; + END IF; + GRANT pg_monitor, pg_read_all_data, pg_signal_backend TO postgres; + END + \$\$; +EOF + ) + run_sql -c "$GRANT_PREDEFINED_ROLES_TO_POSTGRES_QUERY" +} + +function complete_pg_upgrade { + if [ -f /tmp/pg-upgrade-status ]; then + echo "Upgrade job already started. Bailing." + exit 0 + fi + + echo "running" > /tmp/pg-upgrade-status + + echo "1. Mounting data disk" + if [ -z "$IS_CI" ]; then + retry 8 mount -a -v + else + echo "Skipping mount -a -v" + fi + + # copying custom configurations + echo "2. Copying custom configurations" + retry 3 copy_configs + + echo "3. Starting postgresql" + if [ -z "$IS_CI" ]; then + retry 3 service postgresql start + else + CI_start_postgres --new-bin + fi + + execute_extension_upgrade_patches || true + + # For this to work we need `vault.secrets` from the old project to be + # preserved, but `run_generated_sql` includes `ALTER EXTENSION + # tealbase_vault UPDATE` which modifies that. So we need to run it + # beforehand. + echo "3.1. Patch Wrappers server options" + execute_wrappers_patch + + echo "4. Running generated SQL files" + retry 3 run_generated_sql + + echo "4.1. Applying patches" + execute_patches || true + + run_sql -c "ALTER USER postgres WITH NOSUPERUSER;" + + echo "4.2. Applying authentication scheme updates" + retry 3 apply_auth_scheme_updates + + sleep 5 + + echo "5. Restarting postgresql" + if [ -z "$IS_CI" ]; then + retry 3 service postgresql restart + + echo "5.1. Restarting gotrue and postgrest" + retry 3 service gotrue restart + retry 3 service postgrest restart + + else + retry 3 CI_stop_postgres || true + retry 3 CI_start_postgres + fi + + echo "6. Starting vacuum analyze" + retry 3 start_vacuum_analyze +} + +function copy_configs { + cp -R /data/conf/* /etc/postgresql-custom/ + chown -R postgres:postgres /var/lib/postgresql/data + chown -R postgres:postgres /data/pgdata + chmod -R 0750 /data/pgdata +} + +function run_generated_sql { + if [ -d /data/sql ]; then + for FILE in /data/sql/*.sql; do + if [ -f "$FILE" ]; then + run_sql -f "$FILE" || true + fi + done + fi +} + +# Projects which had their passwords hashed using md5 need to have their passwords reset +# Passwords for managed roles are already present in /etc/postgresql.schema.sql +function apply_auth_scheme_updates { + PASSWORD_ENCRYPTION_SETTING=$(run_sql -A -t -c "SHOW password_encryption;") + if [ "$PASSWORD_ENCRYPTION_SETTING" = "md5" ]; then + run_sql -c "ALTER SYSTEM SET password_encryption TO 'scram-sha-256';" + run_sql -c "SELECT pg_reload_conf();" + + if [ -z "$IS_CI" ]; then + run_sql -f /etc/postgresql.schema.sql + fi + fi +} + +function start_vacuum_analyze { + echo "complete" > /tmp/pg-upgrade-status + + # shellcheck disable=SC1091 + if [ -f "/nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh" ]; then + # shellcheck disable=SC1091 + source "/nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh" + fi + vacuumdb --all --analyze-in-stages -U tealbase_admin -h localhost -p 5432 + echo "Upgrade job completed" +} + +trap cleanup ERR + +echo "C.UTF-8 UTF-8" > /etc/locale.gen +echo "en_US.UTF-8 UTF-8" >> /etc/locale.gen +locale-gen + +if [ -z "$IS_CI" ]; then + complete_pg_upgrade >> $LOG_FILE 2>&1 & +else + CI_stop_postgres || true + + rm -f /tmp/pg-upgrade-status + mv /data_migration /data + + rm -rf /var/lib/postgresql/data + ln -s /data/pgdata /var/lib/postgresql/data + + complete_pg_upgrade +fi diff --git a/ansible/files/admin_api_scripts/pg_upgrade_scripts/initiate.sh b/ansible/files/admin_api_scripts/pg_upgrade_scripts/initiate.sh new file mode 100755 index 0000000..4ae2bbf --- /dev/null +++ b/ansible/files/admin_api_scripts/pg_upgrade_scripts/initiate.sh @@ -0,0 +1,518 @@ +#! /usr/bin/env bash + +## This script is run on the old (source) instance, mounting the data disk +## of the newly launched instance, disabling extensions containing regtypes, +## and running pg_upgrade. +## It reports the current status of the upgrade process to /tmp/pg-upgrade-status, +## which can then be subsequently checked through check.sh. + +# Extensions to disable before running pg_upgrade. +# Running an upgrade with these extensions enabled will result in errors due to +# them depending on regtypes referencing system OIDs or outdated library files. +EXTENSIONS_TO_DISABLE=( + "pg_graphql" + "pg_stat_monitor" + "pg_backtrace" +) + +PG14_EXTENSIONS_TO_DISABLE=( + "wrappers" + "pgrouting" +) + +PG13_EXTENSIONS_TO_DISABLE=( + "pgrouting" +) + +set -eEuo pipefail + +SCRIPT_DIR=$(dirname -- "$0";) +# shellcheck disable=SC1091 +source "$SCRIPT_DIR/common.sh" + +IS_CI=${IS_CI:-} +IS_LOCAL_UPGRADE=${IS_LOCAL_UPGRADE:-} +IS_NIX_UPGRADE=${IS_NIX_UPGRADE:-} +IS_NIX_BASED_SYSTEM="false" + +PGVERSION=$1 +MOUNT_POINT="/data_migration" +LOG_FILE="/var/log/pg-upgrade-initiate.log" + +POST_UPGRADE_EXTENSION_SCRIPT="/tmp/pg_upgrade/pg_upgrade_extensions.sql" +POST_UPGRADE_POSTGRES_PERMS_SCRIPT="/tmp/pg_upgrade/pg_upgrade_postgres_perms.sql" +OLD_PGVERSION=$(run_sql -A -t -c "SHOW server_version;") + +# Skip locale settings if both versions are PostgreSQL 16+ +if ! [[ "${OLD_PGVERSION%%.*}" -ge 16 && "${PGVERSION%%.*}" -ge 16 ]]; then + SERVER_LC_COLLATE=$(run_sql -A -t -c "SHOW lc_collate;") + SERVER_LC_CTYPE=$(run_sql -A -t -c "SHOW lc_ctype;") +fi + +SERVER_ENCODING=$(run_sql -A -t -c "SHOW server_encoding;") + +POSTGRES_CONFIG_PATH="/etc/postgresql/postgresql.conf" +PGBINOLD="/usr/lib/postgresql/bin" + +PG_UPGRADE_BIN_DIR="/tmp/pg_upgrade_bin/$PGVERSION" +NIX_INSTALLER_PATH="/tmp/persistent/nix-installer" +NIX_INSTALLER_PACKAGE_PATH="$NIX_INSTALLER_PATH.tar.gz" + +if [ -L "$PGBINOLD/pg_upgrade" ]; then + BINARY_PATH=$(readlink -f "$PGBINOLD/pg_upgrade") + if [[ "$BINARY_PATH" == *"nix"* ]]; then + IS_NIX_BASED_SYSTEM="true" + fi +fi + +# If upgrading from older major PG versions, disable specific extensions +if [[ "$OLD_PGVERSION" =~ ^14.* ]]; then + EXTENSIONS_TO_DISABLE+=("${PG14_EXTENSIONS_TO_DISABLE[@]}") +elif [[ "$OLD_PGVERSION" =~ ^13.* ]]; then + EXTENSIONS_TO_DISABLE+=("${PG13_EXTENSIONS_TO_DISABLE[@]}") +elif [[ "$OLD_PGVERSION" =~ ^12.* ]]; then + POSTGRES_CONFIG_PATH="/etc/postgresql/12/main/postgresql.conf" + PGBINOLD="/usr/lib/postgresql/12/bin" +fi + +if [ -n "$IS_CI" ]; then + PGBINOLD="$(pg_config --bindir)" + echo "Running in CI mode; using pg_config bindir: $PGBINOLD" + echo "PGVERSION: $PGVERSION" +fi + +OLD_BOOTSTRAP_USER=$(run_sql -A -t -c "select rolname from pg_authid where oid = 10;") + +cleanup() { + UPGRADE_STATUS=${1:-"failed"} + EXIT_CODE=${?:-0} + + if [ "$UPGRADE_STATUS" = "failed" ]; then + EXIT_CODE=1 + fi + + if [ "$UPGRADE_STATUS" = "failed" ]; then + echo "Upgrade job failed. Cleaning up and exiting." + fi + + if [ -d "${MOUNT_POINT}/pgdata/pg_upgrade_output.d/" ]; then + echo "Copying pg_upgrade output to /var/log" + cp -R "${MOUNT_POINT}/pgdata/pg_upgrade_output.d/" /var/log/ || true + chown -R postgres:postgres /var/log/pg_upgrade_output.d/ + chmod -R 0750 /var/log/pg_upgrade_output.d/ + ship_logs "$LOG_FILE" || true + tail -n +1 /var/log/pg_upgrade_output.d/*/* > /var/log/pg_upgrade_output.d/pg_upgrade.log || true + ship_logs "/var/log/pg_upgrade_output.d/pg_upgrade.log" || true + fi + + if [ -L "/usr/share/postgresql/${PGVERSION}" ]; then + rm "/usr/share/postgresql/${PGVERSION}" + + if [ -f "/usr/share/postgresql/${PGVERSION}.bak" ]; then + mv "/usr/share/postgresql/${PGVERSION}.bak" "/usr/share/postgresql/${PGVERSION}" + fi + + if [ -d "/usr/share/postgresql/${PGVERSION}.bak" ]; then + mv "/usr/share/postgresql/${PGVERSION}.bak" "/usr/share/postgresql/${PGVERSION}" + fi + fi + + echo "Restarting postgresql" + if [ -z "$IS_CI" ]; then + systemctl enable postgresql + retry 5 systemctl restart postgresql + else + CI_start_postgres + fi + + retry 8 pg_isready -h localhost -U tealbase_admin + + echo "Re-enabling extensions" + if [ -f $POST_UPGRADE_EXTENSION_SCRIPT ]; then + retry 5 run_sql -f $POST_UPGRADE_EXTENSION_SCRIPT + fi + + echo "Removing SUPERUSER grant from postgres" + retry 5 run_sql -c "ALTER USER postgres WITH NOSUPERUSER;" + + echo "Resetting postgres database connection limit" + retry 5 run_sql -c "ALTER DATABASE postgres CONNECTION LIMIT -1;" + + echo "Making sure postgres still has access to pg_shadow" + cat << EOF >> $POST_UPGRADE_POSTGRES_PERMS_SCRIPT +DO \$\$ +begin + if exists (select from pg_authid where rolname = 'pg_read_all_data') then + execute('grant pg_read_all_data to postgres'); + end if; +end +\$\$; +grant pg_signal_backend to postgres; +EOF + + if [ -f $POST_UPGRADE_POSTGRES_PERMS_SCRIPT ]; then + retry 5 run_sql -f $POST_UPGRADE_POSTGRES_PERMS_SCRIPT + fi + + if [ -z "$IS_CI" ] && [ -z "$IS_LOCAL_UPGRADE" ]; then + echo "Unmounting data disk from ${MOUNT_POINT}" + retry 3 umount $MOUNT_POINT + fi + echo "$UPGRADE_STATUS" > /tmp/pg-upgrade-status + + if [ -z "$IS_CI" ]; then + exit "$EXIT_CODE" + else + echo "CI run complete with code ${EXIT_CODE}. Exiting." + exit "$EXIT_CODE" + fi +} + +function handle_extensions { + if [ -z "$IS_CI" ]; then + retry 5 systemctl restart postgresql + else + CI_start_postgres + fi + + retry 8 pg_isready -h localhost -U tealbase_admin + + rm -f $POST_UPGRADE_EXTENSION_SCRIPT + touch $POST_UPGRADE_EXTENSION_SCRIPT + + PASSWORD_ENCRYPTION_SETTING=$(run_sql -A -t -c "SHOW password_encryption;") + if [ "$PASSWORD_ENCRYPTION_SETTING" = "md5" ]; then + echo "ALTER SYSTEM SET password_encryption = 'md5';" >> $POST_UPGRADE_EXTENSION_SCRIPT + fi + + cat << EOF >> $POST_UPGRADE_EXTENSION_SCRIPT +ALTER SYSTEM SET jit = off; +SELECT pg_reload_conf(); +EOF + + # Disable extensions if they're enabled + # Generate SQL script to re-enable them after upgrade + for EXTENSION in "${EXTENSIONS_TO_DISABLE[@]}"; do + EXTENSION_ENABLED=$(run_sql -A -t -c "SELECT EXISTS(SELECT 1 FROM pg_extension WHERE extname = '${EXTENSION}');") + if [ "$EXTENSION_ENABLED" = "t" ]; then + echo "Disabling extension ${EXTENSION}" + run_sql -c "DROP EXTENSION IF EXISTS ${EXTENSION} CASCADE;" + cat << EOF >> $POST_UPGRADE_EXTENSION_SCRIPT +DO \$\$ +BEGIN + IF EXISTS (SELECT 1 FROM pg_available_extensions WHERE name = '${EXTENSION}') THEN + CREATE EXTENSION IF NOT EXISTS ${EXTENSION} CASCADE; + END IF; +END; +\$\$; +EOF + fi + done +} + +function initiate_upgrade { + mkdir -p "$MOUNT_POINT" + SHARED_PRELOAD_LIBRARIES=$(cat "$POSTGRES_CONFIG_PATH" | grep shared_preload_libraries | sed "s/shared_preload_libraries =\s\{0,1\}'\(.*\)'.*/\1/") + + # Wrappers officially launched in PG15; PG14 version is incompatible + if [[ "$OLD_PGVERSION" =~ 14* ]]; then + SHARED_PRELOAD_LIBRARIES=$(echo "$SHARED_PRELOAD_LIBRARIES" | sed "s/wrappers//" | xargs) + fi + + # Timescale is no longer supported for PG17+ upgrades + if [[ "$PGVERSION" != "15" ]]; then + SHARED_PRELOAD_LIBRARIES=$(echo "$SHARED_PRELOAD_LIBRARIES" | sed "s/timescaledb//" | xargs) + fi + + SHARED_PRELOAD_LIBRARIES=$(echo "$SHARED_PRELOAD_LIBRARIES" | sed "s/pg_cron//" | xargs) + SHARED_PRELOAD_LIBRARIES=$(echo "$SHARED_PRELOAD_LIBRARIES" | sed "s/pg_net//" | xargs) + SHARED_PRELOAD_LIBRARIES=$(echo "$SHARED_PRELOAD_LIBRARIES" | sed "s/check_role_membership//" | xargs) + SHARED_PRELOAD_LIBRARIES=$(echo "$SHARED_PRELOAD_LIBRARIES" | sed "s/safeupdate//" | xargs) + SHARED_PRELOAD_LIBRARIES=$(echo "$SHARED_PRELOAD_LIBRARIES" | sed "s/pg_backtrace//" | xargs) + + # Exclude empty-string entries, as well as leading/trailing commas and spaces resulting from the above lib exclusions + # i.e. " , pg_stat_statements, , pgsodium, " -> "pg_stat_statements, pgsodium" + SHARED_PRELOAD_LIBRARIES=$(echo "$SHARED_PRELOAD_LIBRARIES" | tr ',' ' ' | tr -s ' ' | tr ' ' ', ') + + # Account for trailing comma + # eg. "...,auto_explain,pg_tle,plan_filter," -> "...,auto_explain,pg_tle,plan_filter" + if [[ "${SHARED_PRELOAD_LIBRARIES: -1}" = "," ]]; then + # clean up trailing comma + SHARED_PRELOAD_LIBRARIES=$(echo "$SHARED_PRELOAD_LIBRARIES" | sed "s/.$//" | xargs) + fi + + PGDATAOLD=$(cat "$POSTGRES_CONFIG_PATH" | grep data_directory | sed "s/data_directory = '\(.*\)'.*/\1/") + + PGDATANEW="$MOUNT_POINT/pgdata" + + # running upgrade using at least 1 cpu core + WORKERS=$(nproc | awk '{ print ($1 == 1 ? 1 : $1 - 1) }') + + # To make nix-based upgrades work for testing, create a pg binaries tarball with the following contents: + # - nix_flake_version - a7189a68ed4ea78c1e73991b5f271043636cf074 + # Where the value is the commit hash of the nix flake that contains the binaries + + if [ -n "$IS_LOCAL_UPGRADE" ]; then + mkdir -p "$PG_UPGRADE_BIN_DIR" + mkdir -p /tmp/persistent/ + if [ -n "$NIX_FLAKE_VERSION" ]; then + echo "$NIX_FLAKE_VERSION" > "$PG_UPGRADE_BIN_DIR/nix_flake_version" + else + echo "a7189a68ed4ea78c1e73991b5f271043636cf074" > "$PG_UPGRADE_BIN_DIR/nix_flake_version" + fi + + tar -czf "/tmp/persistent/pg_upgrade_bin.tar.gz" -C "/tmp/pg_upgrade_bin" . + rm -rf /tmp/pg_upgrade_bin/ + fi + + echo "1. Extracting pg_upgrade binaries" + mkdir -p "/tmp/pg_upgrade_bin" + tar zxf "/tmp/persistent/pg_upgrade_bin.tar.gz" -C "/tmp/pg_upgrade_bin" + + PGSHARENEW="$PG_UPGRADE_BIN_DIR/share" + + if [ -f "$PG_UPGRADE_BIN_DIR/nix_flake_version" ]; then + IS_NIX_UPGRADE="true" + NIX_FLAKE_VERSION=$(cat "$PG_UPGRADE_BIN_DIR/nix_flake_version") + + if [ "$IS_NIX_BASED_SYSTEM" = "false" ]; then + if [ ! -f /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh ]; then + if ! command -v nix > /dev/null; then + echo "1.1. Nix is not installed; installing." + + if [ -f "$NIX_INSTALLER_PACKAGE_PATH" ]; then + echo "1.1.1. Installing Nix using the provided installer" + tar -xzf "$NIX_INSTALLER_PACKAGE_PATH" -C /tmp/persistent/ + chmod +x "$NIX_INSTALLER_PATH" + "$NIX_INSTALLER_PATH" install --no-confirm \ + --extra-conf "substituters = https://cache.nixos.org https://nix-postgres-artifacts.s3.amazonaws.com" \ + --extra-conf "trusted-public-keys = nix-postgres-artifacts:dGZlQOvKcNEjvT7QEAJbcV6b6uk7VF/hWMjhYleiaLI=% cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=" + else + echo "1.1.1. Installing Nix using the official installer" + + curl --proto '=https' --tlsv1.2 -sSf -L https://install.determinate.systems/nix | sh -s -- install --no-confirm \ + --extra-conf "substituters = https://cache.nixos.org https://nix-postgres-artifacts.s3.amazonaws.com" \ + --extra-conf "trusted-public-keys = nix-postgres-artifacts:dGZlQOvKcNEjvT7QEAJbcV6b6uk7VF/hWMjhYleiaLI=% cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=" + fi + else + echo "1.1. Nix is installed; moving on." + fi + fi + fi + + echo "1.2. Installing flake revision: $NIX_FLAKE_VERSION" + # shellcheck disable=SC1091 + source /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh + nix-collect-garbage -d > /tmp/pg_upgrade-nix-gc.log 2>&1 || true + PG_UPGRADE_BIN_DIR=$(nix build "github:tealbase/postgres/${NIX_FLAKE_VERSION}#psql_${PGVERSION}/bin" --no-link --print-out-paths --extra-experimental-features nix-command --extra-experimental-features flakes) + PGSHARENEW="$PG_UPGRADE_BIN_DIR/share/postgresql" + fi + + PGBINNEW="$PG_UPGRADE_BIN_DIR/bin" + PGLIBNEW="$PG_UPGRADE_BIN_DIR/lib" + + # copy upgrade-specific pgsodium_getkey script into the share dir + chmod +x "$SCRIPT_DIR/pgsodium_getkey.sh" + mkdir -p "$PGSHARENEW/extension" + cp "$SCRIPT_DIR/pgsodium_getkey.sh" "$PGSHARENEW/extension/pgsodium_getkey" + if [ -d "/var/lib/postgresql/extension/" ]; then + cp "$SCRIPT_DIR/pgsodium_getkey.sh" "/var/lib/postgresql/extension/pgsodium_getkey" + chown postgres:postgres "/var/lib/postgresql/extension/pgsodium_getkey" + fi + + chown -R postgres:postgres "/tmp/pg_upgrade_bin/$PGVERSION" + + # upgrade job outputs a log in the cwd; needs write permissions + mkdir -p /tmp/pg_upgrade/ + chown -R postgres:postgres /tmp/pg_upgrade/ + cd /tmp/pg_upgrade/ + + # Fixing erros generated by previous dpkg executions (package upgrades et co) + echo "2. Fixing potential errors generated by dpkg" + DEBIAN_FRONTEND=noninteractive dpkg --configure -a --force-confold || true # handle errors generated by dpkg + + # Needed for PostGIS, since it's compiled with Protobuf-C support now + echo "3. Installing libprotobuf-c1 and libicu66 if missing" + if [[ ! "$(apt list --installed libprotobuf-c1 | grep "installed")" ]]; then + apt-get update -y + apt --fix-broken install -y libprotobuf-c1 libicu66 || true + fi + + echo "4. Setup locale if required" + if ! grep -q "^en_US.UTF-8" /etc/locale.gen ; then + echo "en_US.UTF-8 UTF-8" >> /etc/locale.gen + fi + if ! grep -q "^C.UTF-8" /etc/locale.gen ; then + echo "C.UTF-8 UTF-8" >> /etc/locale.gen + fi + locale-gen + + if [ -z "$IS_CI" ] && [ -z "$IS_LOCAL_UPGRADE" ]; then + # awk NF==3 prints lines with exactly 3 fields, which are the block devices currently not mounted anywhere + # excluding nvme0 since it is the root disk + echo "5. Determining block device to mount" + BLOCK_DEVICE=$(lsblk -dprno name,size,mountpoint,type | grep "disk" | grep -v "nvme0" | awk 'NF==3 { print $1; }') + echo "Block device found: $BLOCK_DEVICE" + + mkdir -p "$MOUNT_POINT" + echo "6. Mounting block device" + + sleep 5 + e2fsck -pf "$BLOCK_DEVICE" + + sleep 1 + mount "$BLOCK_DEVICE" "$MOUNT_POINT" + + sleep 1 + resize2fs "$BLOCK_DEVICE" + else + mkdir -p "$MOUNT_POINT" + fi + + if [ -f "$MOUNT_POINT/pgsodium_root.key" ]; then + cp "$MOUNT_POINT/pgsodium_root.key" /etc/postgresql-custom/pgsodium_root.key + chown postgres:postgres /etc/postgresql-custom/pgsodium_root.key + chmod 600 /etc/postgresql-custom/pgsodium_root.key + fi + + echo "7. Disabling extensions and generating post-upgrade script" + handle_extensions + + echo "8.1. Granting SUPERUSER to postgres user" + run_sql -c "ALTER USER postgres WITH SUPERUSER;" + + if [ "$OLD_BOOTSTRAP_USER" = "postgres" ]; then + echo "8.2. Swap postgres & tealbase_admin roles as we're upgrading a project with postgres as bootstrap user" + swap_postgres_and_tealbase_admin + fi + + if [ -z "$IS_NIX_UPGRADE" ]; then + if [ -d "/usr/share/postgresql/${PGVERSION}" ]; then + mv "/usr/share/postgresql/${PGVERSION}" "/usr/share/postgresql/${PGVERSION}.bak" + fi + + ln -s "$PGSHARENEW" "/usr/share/postgresql/${PGVERSION}" + cp --remove-destination "$PGLIBNEW"/*.control "$PGSHARENEW/extension/" + cp --remove-destination "$PGLIBNEW"/*.sql "$PGSHARENEW/extension/" + + export LD_LIBRARY_PATH="${PGLIBNEW}" + fi + + echo "9. Creating new data directory, initializing database" + chown -R postgres:postgres "$MOUNT_POINT/" + rm -rf "${PGDATANEW:?}/" + + if [ "$IS_NIX_UPGRADE" = "true" ]; then + if [[ "${PGVERSION%%.*}" -ge 16 ]]; then + LC_ALL=en_US.UTF-8 LC_CTYPE=en_US.UTF-8 LC_COLLATE=en_US.UTF-8 LANGUAGE=en_US.UTF-8 LANG=en_US.UTF-8 LOCALE_ARCHIVE=/usr/lib/locale/locale-archive su -c ". /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh && $PGBINNEW/initdb --encoding=$SERVER_ENCODING --locale-provider=icu --icu-locale=en_US.UTF-8 -L $PGSHARENEW -D $PGDATANEW/ --username=tealbase_admin" -s "$SHELL" postgres + else + LC_ALL=en_US.UTF-8 LC_CTYPE=$SERVER_LC_CTYPE LC_COLLATE=$SERVER_LC_COLLATE LANGUAGE=en_US.UTF-8 LANG=en_US.UTF-8 LOCALE_ARCHIVE=/usr/lib/locale/locale-archive su -c ". /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh && $PGBINNEW/initdb --encoding=$SERVER_ENCODING --lc-collate=$SERVER_LC_COLLATE --lc-ctype=$SERVER_LC_CTYPE -L $PGSHARENEW -D $PGDATANEW/ --username=tealbase_admin" -s "$SHELL" postgres + fi + else + su -c "$PGBINNEW/initdb -L $PGSHARENEW -D $PGDATANEW/ --username=tealbase_admin" -s "$SHELL" postgres + + fi + + # This line avoids the need to supply the tealbase_admin password on the old + # instance, since pg_upgrade connects to the db as tealbase_admin using unix + # sockets, which is gated behind scram-sha-256 per pg_hba.conf.j2. The new + # instance is unaffected. + if ! grep -q "local all tealbase_admin trust" /etc/postgresql/pg_hba.conf; then + echo "local all tealbase_admin trust +$(cat /etc/postgresql/pg_hba.conf)" > /etc/postgresql/pg_hba.conf + run_sql -c "select pg_reload_conf();" + fi + + TMP_CONFIG="/tmp/pg_upgrade/postgresql.conf" + cp "$POSTGRES_CONFIG_PATH" "$TMP_CONFIG" + + # Check if max_slot_wal_keep_size exists in the config + # Add the setting if not found + echo "max_slot_wal_keep_size = -1" >> "$TMP_CONFIG" + + # Remove db_user_namespace if upgrading from PG15 or lower to PG16+ + if [[ "${OLD_PGVERSION%%.*}" -le 15 && "${PGVERSION%%.*}" -ge 16 ]]; then + sed -i '/^db_user_namespace/d' "$TMP_CONFIG" + fi + + chown postgres:postgres "$TMP_CONFIG" + + UPGRADE_COMMAND=$(cat < /tmp/pg-upgrade-status +if [ -z "$IS_CI" ] && [ -z "$IS_LOCAL_UPGRADE" ]; then + initiate_upgrade >> "$LOG_FILE" 2>&1 & + echo "Upgrade initiate job completed" +else + rm -f /tmp/pg-upgrade-status + initiate_upgrade +fi diff --git a/ansible/files/admin_api_scripts/pg_upgrade_scripts/pgsodium_getkey.sh b/ansible/files/admin_api_scripts/pg_upgrade_scripts/pgsodium_getkey.sh new file mode 100755 index 0000000..5a5a90e --- /dev/null +++ b/ansible/files/admin_api_scripts/pg_upgrade_scripts/pgsodium_getkey.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +set -euo pipefail + +KEY_FILE=/etc/postgresql-custom/pgsodium_root.key + +# if key file doesn't exist (project previously didn't use pgsodium), generate a new key +if [[ ! -f "${KEY_FILE}" ]]; then + head -c 32 /dev/urandom | od -A n -t x1 | tr -d ' \n' > $KEY_FILE +fi + +cat $KEY_FILE diff --git a/ansible/files/admin_api_scripts/pg_upgrade_scripts/prepare.sh b/ansible/files/admin_api_scripts/pg_upgrade_scripts/prepare.sh new file mode 100755 index 0000000..7d7eb98 --- /dev/null +++ b/ansible/files/admin_api_scripts/pg_upgrade_scripts/prepare.sh @@ -0,0 +1,15 @@ +#! /usr/bin/env bash +## This script is runs in advance of the database version upgrade, on the newly +## launched instance which will eventually be promoted to become the primary +## database instance once the upgrade successfully completes, terminating the +## previous (source) instance. +## The following commands safely stop the Postgres service and unmount +## the data disk off the newly launched instance, to be re-attached to the +## source instance and run the upgrade there. + +set -euo pipefail + +systemctl stop postgresql + +cp /etc/postgresql-custom/pgsodium_root.key /data/pgsodium_root.key +umount /data diff --git a/ansible/files/adminapi.service.j2 b/ansible/files/adminapi.service.j2 new file mode 100644 index 0000000..8c7ef32 --- /dev/null +++ b/ansible/files/adminapi.service.j2 @@ -0,0 +1,25 @@ +[Unit] +Description=AdminAPI +Requires=network-online.target +After=network-online.target + +# Move this to the Service section if on systemd >=250 +StartLimitIntervalSec=60 +StartLimitBurst=10 + +[Service] +Type=simple +ExecStart=/opt/tealbase-admin-api +User=adminapi +Restart=always +RestartSec=3 +TimeoutStopSec=10 +Environment="AWS_USE_DUALSTACK_ENDPOINT=true" +{% if qemu_mode is defined and qemu_mode %} +Environment="AWS_SDK_LOAD_CONFIG=true" +{% endif %} +StandardOutput=journal +StandardError=journal + +[Install] +WantedBy=multi-user.target diff --git a/ansible/files/adminapi.sudoers.conf b/ansible/files/adminapi.sudoers.conf new file mode 100644 index 0000000..52f060b --- /dev/null +++ b/ansible/files/adminapi.sudoers.conf @@ -0,0 +1,31 @@ +Cmnd_Alias ENVOY = /bin/systemctl start envoy.service, /bin/systemctl stop envoy.service, /bin/systemctl restart envoy.service, /bin/systemctl disable envoy.service, /bin/systemctl enable envoy.service, /bin/systemctl reload envoy.service, /bin/systemctl try-restart envoy.service +Cmnd_Alias KONG = /bin/systemctl start kong.service, /bin/systemctl stop kong.service, /bin/systemctl restart kong.service, /bin/systemctl disable kong.service, /bin/systemctl enable kong.service, /bin/systemctl reload kong.service, /bin/systemctl try-restart kong.service +Cmnd_Alias POSTGREST = /bin/systemctl start postgrest.service, /bin/systemctl stop postgrest.service, /bin/systemctl restart postgrest.service, /bin/systemctl disable postgrest.service, /bin/systemctl enable postgrest.service, /bin/systemctl try-restart postgrest.service +Cmnd_Alias GOTRUE = /bin/systemctl start gotrue.service, /bin/systemctl stop gotrue.service, /bin/systemctl restart gotrue.service, /bin/systemctl disable gotrue.service, /bin/systemctl enable gotrue.service, /bin/systemctl try-restart gotrue.service +Cmnd_Alias PGBOUNCER = /bin/systemctl start pgbouncer.service, /bin/systemctl stop pgbouncer.service, /bin/systemctl restart pgbouncer.service, /bin/systemctl disable pgbouncer.service, /bin/systemctl enable pgbouncer.service, /bin/systemctl reload pgbouncer.service, /bin/systemctl try-restart pgbouncer.service + +%adminapi ALL= NOPASSWD: /root/grow_fs.sh +%adminapi ALL= NOPASSWD: /root/manage_readonly_mode.sh +%adminapi ALL= NOPASSWD: /etc/adminapi/pg_upgrade_scripts/prepare.sh +%adminapi ALL= NOPASSWD: /etc/adminapi/pg_upgrade_scripts/initiate.sh +%adminapi ALL= NOPASSWD: /etc/adminapi/pg_upgrade_scripts/complete.sh +%adminapi ALL= NOPASSWD: /etc/adminapi/pg_upgrade_scripts/check.sh +%adminapi ALL= NOPASSWD: /etc/adminapi/pg_upgrade_scripts/common.sh +%adminapi ALL= NOPASSWD: /etc/adminapi/pg_upgrade_scripts/pgsodium_getkey.sh +%adminapi ALL= NOPASSWD: /usr/bin/systemctl daemon-reload +%adminapi ALL= NOPASSWD: /usr/bin/systemctl reload postgresql.service +%adminapi ALL= NOPASSWD: /usr/bin/systemctl restart postgresql.service +%adminapi ALL= NOPASSWD: /usr/bin/systemctl show -p NRestarts postgresql.service +%adminapi ALL= NOPASSWD: /usr/bin/systemctl restart adminapi.service +%adminapi ALL= NOPASSWD: /usr/bin/systemctl is-active commence-backup.service +%adminapi ALL= NOPASSWD: /usr/bin/systemctl start commence-backup.service +%adminapi ALL= NOPASSWD: /bin/systemctl daemon-reload +%adminapi ALL= NOPASSWD: /bin/systemctl restart services.slice +%adminapi ALL= NOPASSWD: /usr/sbin/nft -f /etc/nftables/tealbase_managed.conf +%adminapi ALL= NOPASSWD: /usr/bin/admin-mgr +%adminapi ALL= NOPASSWD: /usr/sbin/netplan apply +%adminapi ALL= NOPASSWD: ENVOY +%adminapi ALL= NOPASSWD: KONG +%adminapi ALL= NOPASSWD: POSTGREST +%adminapi ALL= NOPASSWD: GOTRUE +%adminapi ALL= NOPASSWD: PGBOUNCER diff --git a/ansible/files/ansible-pull.service b/ansible/files/ansible-pull.service new file mode 100644 index 0000000..3e061b3 --- /dev/null +++ b/ansible/files/ansible-pull.service @@ -0,0 +1,20 @@ +[Unit] +Description=Ansible pull + +[Service] +Type=simple +User=ubuntu + +ExecStart=/usr/bin/ansible-pull --private-key "$SSH_READ_KEY_FILE" -U "$REPO" --accept-host-key -t "$REGION,db-all" -i localhost --clean --full "$PLAYBOOK" -v -o -C "$REPO_BRANCH" + +# --verify-commit +# temporarily disable commit verification, while we figure out how we want to balance commit signatures +# and PR reviews; an --ff-only merge options would have allowed us to use this pretty nicely + +MemoryAccounting=true +MemoryMax=30% + +StandardOutput=append:/var/log/ansible-pull.stdout +StandardError=append:/var/log/ansible-pull.error + +TimeoutStopSec=600 diff --git a/ansible/files/ansible-pull.timer b/ansible/files/ansible-pull.timer new file mode 100644 index 0000000..27ce24b --- /dev/null +++ b/ansible/files/ansible-pull.timer @@ -0,0 +1,11 @@ +[Unit] +Description=Run ansible roughly every 3 hours + +[Timer] +OnBootSec=1h +OnUnitActiveSec=3h +RandomizedDelaySec=1h +Persistent=true + +[Install] +WantedBy=timers.target diff --git a/ansible/files/commence-backup.service.j2 b/ansible/files/commence-backup.service.j2 new file mode 100644 index 0000000..9d4ad0c --- /dev/null +++ b/ansible/files/commence-backup.service.j2 @@ -0,0 +1,12 @@ +[Unit] +Description=Async commence physical backup + +[Service] +Type=simple +User=adminapi +ExecStart=/usr/bin/admin-mgr commence-backup --run-as-service true +Restart=no +OOMScoreAdjust=-1000 + +[Install] +WantedBy=multi-user.target diff --git a/ansible/files/cron.deny b/ansible/files/cron.deny new file mode 100644 index 0000000..3b5199b --- /dev/null +++ b/ansible/files/cron.deny @@ -0,0 +1,2 @@ +ubuntu +postgres diff --git a/ansible/files/database-optimizations.service.j2 b/ansible/files/database-optimizations.service.j2 new file mode 100644 index 0000000..599a17d --- /dev/null +++ b/ansible/files/database-optimizations.service.j2 @@ -0,0 +1,12 @@ +[Unit] +Description=Postgresql optimizations + +[Service] +Type=oneshot +# we do not want failures from these commands to cause downstream service startup to fail +ExecStart=-/opt/tealbase-admin-api optimize db --destination-config-file-path /etc/postgresql-custom/generated-optimizations.conf +ExecStart=-/opt/tealbase-admin-api optimize pgbouncer --destination-config-file-path /etc/pgbouncer-custom/generated-optimizations.ini +User=adminapi + +[Install] +WantedBy=multi-user.target diff --git a/ansible/files/default.sysstat b/ansible/files/default.sysstat new file mode 100644 index 0000000..1b029ba --- /dev/null +++ b/ansible/files/default.sysstat @@ -0,0 +1,9 @@ +# +# Default settings for /etc/init.d/sysstat, /etc/cron.d/sysstat +# and /etc/cron.daily/sysstat files +# + +# Should sadc collect system activity informations? Valid values +# are "true" and "false". Please do not put other values, they +# will be overwritten by debconf! +ENABLED="true" diff --git a/ansible/files/envoy.service b/ansible/files/envoy.service new file mode 100644 index 0000000..d739ffd --- /dev/null +++ b/ansible/files/envoy.service @@ -0,0 +1,31 @@ +[Unit] +Description=Envoy +After=postgrest.service gotrue.service adminapi.service +Wants=postgrest.service gotrue.service adminapi.service +Conflicts=kong.service + +[Service] +Type=simple + +ExecStartPre=sh -c 'if ss -lnt | grep -Eq ":(80|443) "; then echo "Port 80 or 443 already in use"; exit 1; fi' + +# Need to run via a restarter script to support hot restart when using a process +# manager, see: +# https://www.envoyproxy.io/docs/envoy/latest/operations/hot_restarter +ExecStart=/opt/envoy-hot-restarter.py /opt/start-envoy.sh + +ExecReload=/bin/kill -HUP $MAINPID +ExecStop=/bin/kill -TERM $MAINPID +User=envoy +Slice=services.slice +Restart=always +RestartSec=3 +LimitNOFILE=100000 + +# The envoy user is unprivileged and thus not permitted to bind on ports < 1024 +# Via systemd we grant the process a set of privileges to bind to 80/443 +# See http://archive.vn/36zJU +AmbientCapabilities=CAP_NET_BIND_SERVICE + +[Install] +WantedBy=multi-user.target diff --git a/ansible/files/envoy_config/cds.yaml b/ansible/files/envoy_config/cds.yaml new file mode 100644 index 0000000..48fd1b9 --- /dev/null +++ b/ansible/files/envoy_config/cds.yaml @@ -0,0 +1,86 @@ +resources: + - '@type': type.googleapis.com/envoy.config.cluster.v3.Cluster + name: admin_api + load_assignment: + cluster_name: admin_api + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 8085 + circuit_breakers: + thresholds: + - priority: DEFAULT + max_connections: 10000 + max_pending_requests: 10000 + max_requests: 10000 + retry_budget: + budget_percent: + value: 100 + min_retry_concurrency: 100 + - '@type': type.googleapis.com/envoy.config.cluster.v3.Cluster + name: gotrue + load_assignment: + cluster_name: gotrue + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 9999 + circuit_breakers: + thresholds: + - priority: DEFAULT + max_connections: 10000 + max_pending_requests: 10000 + max_requests: 10000 + retry_budget: + budget_percent: + value: 100 + min_retry_concurrency: 100 + - '@type': type.googleapis.com/envoy.config.cluster.v3.Cluster + name: postgrest + load_assignment: + cluster_name: postgrest + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 3000 + circuit_breakers: + thresholds: + - priority: DEFAULT + max_connections: 10000 + max_pending_requests: 10000 + max_requests: 10000 + retry_budget: + budget_percent: + value: 100 + min_retry_concurrency: 100 + - '@type': type.googleapis.com/envoy.config.cluster.v3.Cluster + name: postgrest_admin + load_assignment: + cluster_name: postgrest_admin + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 3001 + circuit_breakers: + thresholds: + - priority: DEFAULT + max_connections: 10000 + max_pending_requests: 10000 + max_requests: 10000 + retry_budget: + budget_percent: + value: 100 + min_retry_concurrency: 100 + diff --git a/ansible/files/envoy_config/envoy.yaml b/ansible/files/envoy_config/envoy.yaml new file mode 100644 index 0000000..3d25c13 --- /dev/null +++ b/ansible/files/envoy_config/envoy.yaml @@ -0,0 +1,23 @@ +dynamic_resources: + cds_config: + path_config_source: + path: /etc/envoy/cds.yaml + resource_api_version: V3 + lds_config: + path_config_source: + path: /etc/envoy/lds.yaml + resource_api_version: V3 +node: + cluster: cluster_0 + id: node_0 +overload_manager: + resource_monitors: + - name: envoy.resource_monitors.global_downstream_max_connections + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.resource_monitors.downstream_connections.v3.DownstreamConnectionsConfig + max_active_downstream_connections: 30000 +stats_config: + stats_matcher: + reject_all: true + diff --git a/ansible/files/envoy_config/lds.tealbase.yaml b/ansible/files/envoy_config/lds.tealbase.yaml new file mode 100644 index 0000000..f8e4ee9 --- /dev/null +++ b/ansible/files/envoy_config/lds.tealbase.yaml @@ -0,0 +1,396 @@ +resources: + - '@type': type.googleapis.com/envoy.config.listener.v3.Listener + name: http_listener + address: + socket_address: + address: '::' + port_value: 80 + ipv4_compat: true + filter_chains: + - filters: &ref_1 + - name: envoy.filters.network.http_connection_manager + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + access_log: + - name: envoy.access_loggers.stdout + filter: + status_code_filter: + comparison: + op: GE + value: + default_value: 400 + runtime_key: unused + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.access_loggers.stream.v3.StdoutAccessLog + generate_request_id: false + http_filters: + - name: envoy.filters.http.cors + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.cors.v3.Cors + - name: envoy.filters.http.rbac + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.rbac.v3.RBAC + rules: + action: DENY + policies: + origin_protection_key_missing: + permissions: + - any: true + principals: + - not_id: + header: + name: sb-opk + present_match: true + origin_protection_key_not_valid: + permissions: + - any: true + principals: + - not_id: + or_ids: + ids: + - header: + name: sb-opk + string_match: + exact: tealbase_origin_protection_key + - name: envoy.filters.http.lua + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.lua.v3.Lua + source_codes: + remove_apikey_and_empty_key_query_parameters: + inline_string: |- + function envoy_on_request(request_handle) + local path = request_handle:headers():get(":path") + request_handle + :headers() + :replace(":path", path:gsub("&=[^&]*", ""):gsub("?=[^&]*$", ""):gsub("?=[^&]*&", "?"):gsub("&apikey=[^&]*", ""):gsub("?apikey=[^&]*$", ""):gsub("?apikey=[^&]*&", "?")) + end + remove_empty_key_query_parameters: + inline_string: |- + function envoy_on_request(request_handle) + local path = request_handle:headers():get(":path") + request_handle + :headers() + :replace(":path", path:gsub("&=[^&]*", ""):gsub("?=[^&]*$", ""):gsub("?=[^&]*&", "?")) + end + - name: envoy.filters.http.compressor.brotli + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.compressor.v3.Compressor + response_direction_config: + common_config: + min_content_length: 100 + content_type: + - application/vnd.pgrst.object+json + - application/vnd.pgrst.array+json + - application/openapi+json + - application/geo+json + - text/csv + - application/vnd.pgrst.plan + - application/vnd.pgrst.object + - application/vnd.pgrst.array + - application/javascript + - application/json + - application/xhtml+xml + - image/svg+xml + - text/css + - text/html + - text/plain + - text/xml + disable_on_etag_header: true + request_direction_config: + common_config: + enabled: + default_value: false + runtime_key: request_compressor_enabled + compressor_library: + name: text_optimized + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.compression.brotli.compressor.v3.Brotli + - name: envoy.filters.http.compressor.gzip + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.compressor.v3.Compressor + response_direction_config: + common_config: + min_content_length: 100 + content_type: + - application/vnd.pgrst.object+json + - application/vnd.pgrst.array+json + - application/openapi+json + - application/geo+json + - text/csv + - application/vnd.pgrst.plan + - application/vnd.pgrst.object + - application/vnd.pgrst.array + - application/javascript + - application/json + - application/xhtml+xml + - image/svg+xml + - text/css + - text/html + - text/plain + - text/xml + disable_on_etag_header: true + request_direction_config: + common_config: + enabled: + default_value: false + runtime_key: request_compressor_enabled + compressor_library: + name: text_optimized + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.compression.gzip.compressor.v3.Gzip + - name: envoy.filters.http.router + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + dynamic_stats: false + local_reply_config: + mappers: + - filter: + and_filter: + filters: + - status_code_filter: + comparison: + value: + default_value: 403 + runtime_key: unused + - header_filter: + header: + name: ':path' + string_match: + prefix: /customer/v1/privileged/ + status_code: 401 + body: + inline_string: Unauthorized + headers_to_add: + - header: + key: WWW-Authenticate + value: Basic realm="Unknown" + - filter: + and_filter: + filters: + - status_code_filter: + comparison: + value: + default_value: 403 + runtime_key: unused + - header_filter: + header: + name: ':path' + string_match: + prefix: /metrics/aggregated + invert_match: true + status_code: 401 + headers_to_add: + - header: + key: x-sb-error-code + value: '%RESPONSE_CODE_DETAILS%' + body_format_override: + json_format: + message: >- + `apikey` request header or query parameter is either + missing or invalid. Double check your tealbase `anon` + or `service_role` API key. + hint: '%RESPONSE_CODE_DETAILS%' + json_format_options: + sort_properties: false + merge_slashes: true + route_config: + name: route_config_0 + virtual_hosts: + - name: virtual_host_0 + domains: + - '*' + typed_per_filter_config: + envoy.filters.http.cors: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.cors.v3.CorsPolicy + allow_origin_string_match: + - safe_regex: + regex: \* + allow_methods: GET,HEAD,PUT,PATCH,POST,DELETE,OPTIONS,TRACE,CONNECT + allow_headers: apikey,authorization,x-client-info + max_age: '3600' + routes: + - match: + path: /health + direct_response: + status: 200 + body: + inline_string: Healthy + typed_per_filter_config: &ref_0 + envoy.filters.http.rbac: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.rbac.v3.RBACPerRoute + - match: + safe_regex: + google_re2: + max_program_size: 150 + regex: >- + /auth/v1/(verify|callback|authorize|sso/saml/(acs|metadata|slo)|\.well-known/(openid-configuration|jwks\.json)) + request_headers_to_remove: + - apikey + - sb-opk + route: + cluster: gotrue + regex_rewrite: + pattern: + regex: ^/auth/v1 + substitution: '' + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 35s + typed_per_filter_config: *ref_0 + - match: + prefix: /auth/v1/ + request_headers_to_remove: + - apikey + - sb-opk + route: + cluster: gotrue + prefix_rewrite: / + timeout: 35s + - match: + prefix: /rest/v1/ + query_parameters: + - name: apikey + present_match: true + request_headers_to_remove: + - apikey + - sb-opk + route: + cluster: postgrest + prefix_rewrite: / + timeout: 125s + typed_per_filter_config: + envoy.filters.http.lua: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.lua.v3.LuaPerRoute + name: remove_apikey_and_empty_key_query_parameters + - match: + prefix: /rest/v1/ + request_headers_to_remove: + - apikey + - sb-opk + route: + cluster: postgrest + prefix_rewrite: / + timeout: 125s + typed_per_filter_config: + envoy.filters.http.lua: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.lua.v3.LuaPerRoute + name: remove_empty_key_query_parameters + - match: + prefix: /rest-admin/v1/ + query_parameters: + - name: apikey + present_match: true + request_headers_to_remove: + - apikey + - sb-opk + route: + cluster: postgrest_admin + prefix_rewrite: / + typed_per_filter_config: + envoy.filters.http.lua: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.lua.v3.LuaPerRoute + name: remove_apikey_and_empty_key_query_parameters + - match: + prefix: /rest-admin/v1/ + request_headers_to_remove: + - apikey + - sb-opk + route: + cluster: postgrest_admin + prefix_rewrite: / + - match: + path: /graphql/v1 + request_headers_to_add: + header: + key: Content-Profile + value: graphql_public + request_headers_to_remove: + - apikey + - sb-opk + route: + cluster: postgrest + prefix_rewrite: /rpc/graphql + timeout: 125s + - match: + prefix: /admin/v1/ + request_headers_to_remove: + - sb-opk + route: + cluster: admin_api + prefix_rewrite: / + timeout: 600s + - match: + prefix: /customer/v1/privileged/ + request_headers_to_remove: + - sb-opk + route: + cluster: admin_api + prefix_rewrite: /privileged/ + - match: + prefix: /metrics/aggregated + request_headers_to_remove: + - sb-opk + route: + cluster: admin_api + prefix_rewrite: /tealbase-internal/metrics + typed_per_filter_config: + envoy.filters.http.rbac: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.rbac.v3.RBACPerRoute + rbac: + rules: + action: DENY + policies: + not_private_ip: + permissions: + - any: true + principals: + - not_id: + direct_remote_ip: + address_prefix: 10.0.0.0 + prefix_len: 8 + include_attempt_count_in_response: true + retry_policy: + num_retries: 5 + retry_back_off: + base_interval: 0.1s + max_interval: 1s + retry_on: gateway-error + stat_prefix: ingress_http + - '@type': type.googleapis.com/envoy.config.listener.v3.Listener + name: https_listener + address: + socket_address: + address: '::' + port_value: 443 + ipv4_compat: true + filter_chains: + - filters: *ref_1 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: + filename: /etc/envoy/fullChain.pem + private_key: + filename: /etc/envoy/privKey.pem + diff --git a/ansible/files/envoy_config/lds.yaml b/ansible/files/envoy_config/lds.yaml new file mode 100644 index 0000000..6cc1e8a --- /dev/null +++ b/ansible/files/envoy_config/lds.yaml @@ -0,0 +1,440 @@ +resources: + - '@type': type.googleapis.com/envoy.config.listener.v3.Listener + name: http_listener + address: + socket_address: + address: '::' + port_value: 80 + ipv4_compat: true + filter_chains: + - filters: &ref_1 + - name: envoy.filters.network.http_connection_manager + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + access_log: + - name: envoy.access_loggers.stdout + filter: + status_code_filter: + comparison: + op: GE + value: + default_value: 400 + runtime_key: unused + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.access_loggers.stream.v3.StdoutAccessLog + generate_request_id: false + http_filters: + - name: envoy.filters.http.cors + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.cors.v3.Cors + - name: envoy.filters.http.rbac + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.rbac.v3.RBAC + rules: + action: DENY + policies: + api_key_missing: + permissions: + - any: true + principals: + - not_id: + or_ids: + ids: + - header: + name: apikey + present_match: true + - header: + name: ':path' + string_match: + contains: apikey= + api_key_not_valid: + permissions: + - any: true + principals: + - not_id: + or_ids: + ids: + - header: + name: apikey + string_match: + exact: anon_key + - header: + name: apikey + string_match: + exact: service_key + - header: + name: apikey + string_match: + exact: tealbase_admin_key + - header: + name: ':path' + string_match: + contains: apikey=anon_key + - header: + name: ':path' + string_match: + contains: apikey=service_key + - header: + name: ':path' + string_match: + contains: apikey=tealbase_admin_key + - name: envoy.filters.http.lua + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.lua.v3.Lua + source_codes: + remove_apikey_and_empty_key_query_parameters: + inline_string: |- + function envoy_on_request(request_handle) + local path = request_handle:headers():get(":path") + request_handle + :headers() + :replace(":path", path:gsub("&=[^&]*", ""):gsub("?=[^&]*$", ""):gsub("?=[^&]*&", "?"):gsub("&apikey=[^&]*", ""):gsub("?apikey=[^&]*$", ""):gsub("?apikey=[^&]*&", "?")) + end + remove_empty_key_query_parameters: + inline_string: |- + function envoy_on_request(request_handle) + local path = request_handle:headers():get(":path") + request_handle + :headers() + :replace(":path", path:gsub("&=[^&]*", ""):gsub("?=[^&]*$", ""):gsub("?=[^&]*&", "?")) + end + - name: envoy.filters.http.compressor.brotli + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.compressor.v3.Compressor + response_direction_config: + common_config: + min_content_length: 100 + content_type: + - application/vnd.pgrst.object+json + - application/vnd.pgrst.array+json + - application/openapi+json + - application/geo+json + - text/csv + - application/vnd.pgrst.plan + - application/vnd.pgrst.object + - application/vnd.pgrst.array + - application/javascript + - application/json + - application/xhtml+xml + - image/svg+xml + - text/css + - text/html + - text/plain + - text/xml + disable_on_etag_header: true + request_direction_config: + common_config: + enabled: + default_value: false + runtime_key: request_compressor_enabled + compressor_library: + name: text_optimized + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.compression.brotli.compressor.v3.Brotli + - name: envoy.filters.http.compressor.gzip + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.compressor.v3.Compressor + response_direction_config: + common_config: + min_content_length: 100 + content_type: + - application/vnd.pgrst.object+json + - application/vnd.pgrst.array+json + - application/openapi+json + - application/geo+json + - text/csv + - application/vnd.pgrst.plan + - application/vnd.pgrst.object + - application/vnd.pgrst.array + - application/javascript + - application/json + - application/xhtml+xml + - image/svg+xml + - text/css + - text/html + - text/plain + - text/xml + disable_on_etag_header: true + request_direction_config: + common_config: + enabled: + default_value: false + runtime_key: request_compressor_enabled + compressor_library: + name: text_optimized + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.compression.gzip.compressor.v3.Gzip + - name: envoy.filters.http.router + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + dynamic_stats: false + local_reply_config: + mappers: + - filter: + and_filter: + filters: + - status_code_filter: + comparison: + value: + default_value: 403 + runtime_key: unused + - header_filter: + header: + name: ':path' + string_match: + prefix: /customer/v1/privileged/ + status_code: 401 + body: + inline_string: Unauthorized + headers_to_add: + - header: + key: WWW-Authenticate + value: Basic realm="Unknown" + - filter: + and_filter: + filters: + - status_code_filter: + comparison: + value: + default_value: 403 + runtime_key: unused + - header_filter: + header: + name: ':path' + string_match: + prefix: /metrics/aggregated + invert_match: true + status_code: 401 + headers_to_add: + - header: + key: x-sb-error-code + value: '%RESPONSE_CODE_DETAILS%' + body_format_override: + json_format: + message: >- + `apikey` request header or query parameter is either + missing or invalid. Double check your tealbase `anon` + or `service_role` API key. + hint: '%RESPONSE_CODE_DETAILS%' + json_format_options: + sort_properties: false + merge_slashes: true + route_config: + name: route_config_0 + virtual_hosts: + - name: virtual_host_0 + domains: + - '*' + typed_per_filter_config: + envoy.filters.http.cors: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.cors.v3.CorsPolicy + allow_origin_string_match: + - safe_regex: + regex: \* + allow_methods: GET,HEAD,PUT,PATCH,POST,DELETE,OPTIONS,TRACE,CONNECT + allow_headers: apikey,authorization,x-client-info + max_age: '3600' + routes: + - match: + path: /health + direct_response: + status: 200 + body: + inline_string: Healthy + typed_per_filter_config: &ref_0 + envoy.filters.http.rbac: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.rbac.v3.RBACPerRoute + - match: + safe_regex: + google_re2: + max_program_size: 150 + regex: >- + /auth/v1/(verify|callback|authorize|sso/saml/(acs|metadata|slo)|\.well-known/(openid-configuration|jwks\.json)) + request_headers_to_remove: + - apikey + - sb-opk + route: + cluster: gotrue + regex_rewrite: + pattern: + regex: ^/auth/v1 + substitution: '' + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 35s + typed_per_filter_config: *ref_0 + - match: + prefix: /auth/v1/ + request_headers_to_remove: + - apikey + - sb-opk + route: + cluster: gotrue + prefix_rewrite: / + timeout: 35s + - match: + prefix: /rest/v1/ + query_parameters: + - name: apikey + present_match: true + request_headers_to_remove: + - apikey + - sb-opk + route: + cluster: postgrest + prefix_rewrite: / + timeout: 125s + typed_per_filter_config: + envoy.filters.http.lua: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.lua.v3.LuaPerRoute + name: remove_apikey_and_empty_key_query_parameters + - match: + prefix: /rest/v1/ + request_headers_to_remove: + - apikey + - sb-opk + route: + cluster: postgrest + prefix_rewrite: / + timeout: 125s + typed_per_filter_config: + envoy.filters.http.lua: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.lua.v3.LuaPerRoute + name: remove_empty_key_query_parameters + - match: + prefix: /rest-admin/v1/ + query_parameters: + - name: apikey + present_match: true + request_headers_to_remove: + - apikey + - sb-opk + route: + cluster: postgrest_admin + prefix_rewrite: / + typed_per_filter_config: + envoy.filters.http.lua: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.lua.v3.LuaPerRoute + name: remove_apikey_and_empty_key_query_parameters + - match: + prefix: /rest-admin/v1/ + request_headers_to_remove: + - apikey + - sb-opk + route: + cluster: postgrest_admin + prefix_rewrite: / + - match: + path: /graphql/v1 + request_headers_to_add: + header: + key: Content-Profile + value: graphql_public + request_headers_to_remove: + - apikey + - sb-opk + route: + cluster: postgrest + prefix_rewrite: /rpc/graphql + timeout: 125s + - match: + prefix: /admin/v1/ + request_headers_to_remove: + - sb-opk + route: + cluster: admin_api + prefix_rewrite: / + timeout: 600s + - match: + prefix: /customer/v1/privileged/ + request_headers_to_remove: + - sb-opk + route: + cluster: admin_api + prefix_rewrite: /privileged/ + typed_per_filter_config: + envoy.filters.http.rbac: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.rbac.v3.RBACPerRoute + rbac: + rules: + action: DENY + policies: + basic_auth: + permissions: + - any: true + principals: + - header: + name: authorization + invert_match: true + string_match: + exact: Basic c2VydmljZV9yb2xlOnNlcnZpY2Vfa2V5 + treat_missing_header_as_empty: true + - match: + prefix: /metrics/aggregated + request_headers_to_remove: + - sb-opk + route: + cluster: admin_api + prefix_rewrite: /tealbase-internal/metrics + typed_per_filter_config: + envoy.filters.http.rbac: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.rbac.v3.RBACPerRoute + rbac: + rules: + action: DENY + policies: + not_private_ip: + permissions: + - any: true + principals: + - not_id: + direct_remote_ip: + address_prefix: 10.0.0.0 + prefix_len: 8 + include_attempt_count_in_response: true + retry_policy: + num_retries: 5 + retry_back_off: + base_interval: 0.1s + max_interval: 1s + retry_on: gateway-error + stat_prefix: ingress_http + - '@type': type.googleapis.com/envoy.config.listener.v3.Listener + name: https_listener + address: + socket_address: + address: '::' + port_value: 443 + ipv4_compat: true + filter_chains: + - filters: *ref_1 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: + filename: /etc/envoy/fullChain.pem + private_key: + filename: /etc/envoy/privKey.pem + diff --git a/ansible/files/fail2ban_config/fail2ban.service.conf b/ansible/files/fail2ban_config/fail2ban.service.conf new file mode 100644 index 0000000..431d1db --- /dev/null +++ b/ansible/files/fail2ban_config/fail2ban.service.conf @@ -0,0 +1,6 @@ +[Unit] +After=nftables.service +Wants=nftables.service + +[Service] +ExecStartPost=/bin/bash -c "sleep 5 && chmod g+w /var/run/fail2ban/fail2ban.sock" diff --git a/ansible/files/fail2ban_config/filter-pgbouncer.conf.j2 b/ansible/files/fail2ban_config/filter-pgbouncer.conf.j2 new file mode 100644 index 0000000..3a3a52e --- /dev/null +++ b/ansible/files/fail2ban_config/filter-pgbouncer.conf.j2 @@ -0,0 +1,3 @@ +[Definition] +failregex = ^.+@:.+password authentication failed$ +journalmatch = _SYSTEMD_UNIT=pgbouncer.service diff --git a/ansible/files/fail2ban_config/filter-postgresql.conf.j2 b/ansible/files/fail2ban_config/filter-postgresql.conf.j2 new file mode 100644 index 0000000..fd0895a --- /dev/null +++ b/ansible/files/fail2ban_config/filter-postgresql.conf.j2 @@ -0,0 +1,3 @@ +[Definition] +failregex = ^.*,.*,.*,.*,":.*password authentication failed for user.*$ +ignoreregex = ^.*,.*,.*,.*,"127\.0\.0\.1.*password authentication failed for user.*$ \ No newline at end of file diff --git a/ansible/files/fail2ban_config/jail-pgbouncer.conf.j2 b/ansible/files/fail2ban_config/jail-pgbouncer.conf.j2 new file mode 100644 index 0000000..60a9eb3 --- /dev/null +++ b/ansible/files/fail2ban_config/jail-pgbouncer.conf.j2 @@ -0,0 +1,7 @@ +[pgbouncer] +enabled = true +port = 6543 +protocol = tcp +filter = pgbouncer +backend = systemd[journalflags=1] +maxretry = 3 diff --git a/ansible/files/fail2ban_config/jail-postgresql.conf.j2 b/ansible/files/fail2ban_config/jail-postgresql.conf.j2 new file mode 100644 index 0000000..a021035 --- /dev/null +++ b/ansible/files/fail2ban_config/jail-postgresql.conf.j2 @@ -0,0 +1,8 @@ +[postgresql] +enabled = true +port = 5432 +protocol = tcp +filter = postgresql +logpath = /var/log/postgresql/auth-failures.csv +maxretry = 3 +ignoreip = 192.168.0.0/16 172.17.1.0/20 diff --git a/ansible/files/fail2ban_config/jail-ssh.conf b/ansible/files/fail2ban_config/jail-ssh.conf new file mode 100644 index 0000000..5476c30 --- /dev/null +++ b/ansible/files/fail2ban_config/jail-ssh.conf @@ -0,0 +1,4 @@ +[sshd] + +backend = systemd +mode = aggressive diff --git a/ansible/files/fail2ban_config/jail.local b/ansible/files/fail2ban_config/jail.local new file mode 100644 index 0000000..44e8210 --- /dev/null +++ b/ansible/files/fail2ban_config/jail.local @@ -0,0 +1,4 @@ +[DEFAULT] + +banaction = nftables-multiport +banaction_allports = nftables-allports diff --git a/ansible/files/gotrue-optimizations.service.j2 b/ansible/files/gotrue-optimizations.service.j2 new file mode 100644 index 0000000..4cd8256 --- /dev/null +++ b/ansible/files/gotrue-optimizations.service.j2 @@ -0,0 +1,12 @@ +[Unit] +Description=GoTrue (Auth) optimizations + +[Service] +Type=oneshot +# we don't want failures from this command to cause PG startup to fail +ExecStart=/bin/bash -c "/opt/tealbase-admin-api optimize auth --destination-config-file-path /etc/gotrue/gotrue.generated.env ; exit 0" +ExecStartPost=/bin/bash -c "cp -a /etc/gotrue/gotrue.generated.env /etc/auth.d/20_generated.env ; exit 0" +User=postgrest + +[Install] +WantedBy=multi-user.target diff --git a/ansible/files/gotrue.service.j2 b/ansible/files/gotrue.service.j2 new file mode 100644 index 0000000..2478e99 --- /dev/null +++ b/ansible/files/gotrue.service.j2 @@ -0,0 +1,27 @@ +[Unit] +Description=Gotrue + +[Service] +Type=simple +WorkingDirectory=/opt/gotrue +{% if qemu_mode is defined and qemu_mode %} +ExecStart=/opt/gotrue/gotrue +{% else %} +ExecStart=/opt/gotrue/gotrue --config-dir /etc/auth.d +{% endif %} + +User=gotrue +Restart=always +RestartSec=3 + +MemoryAccounting=true +MemoryMax=50% + +EnvironmentFile=-/etc/gotrue.generated.env +EnvironmentFile=/etc/gotrue.env +EnvironmentFile=-/etc/gotrue.overrides.env + +Slice=services.slice + +[Install] +WantedBy=multi-user.target diff --git a/ansible/files/journald.conf b/ansible/files/journald.conf new file mode 100644 index 0000000..2eb89f9 --- /dev/null +++ b/ansible/files/journald.conf @@ -0,0 +1,6 @@ +[Journal] +Storage=persistent +SystemMaxUse=3G +SystemKeepFree=3G +SystemMaxFileSize=200M +ForwardToSyslog=no diff --git a/ansible/files/kong_config/kong.conf.j2 b/ansible/files/kong_config/kong.conf.j2 new file mode 100644 index 0000000..3906757 --- /dev/null +++ b/ansible/files/kong_config/kong.conf.j2 @@ -0,0 +1,7 @@ +database = off +declarative_config = /etc/kong/kong.yml + +# plugins defined in the dockerfile +plugins = request-transformer,cors,key-auth,http-log + +proxy_listen = 0.0.0.0:80 reuseport backlog=16384, 0.0.0.0:443 http2 ssl reuseport backlog=16834, [::]:80 reuseport backlog=16384, [::]:443 http2 ssl reuseport backlog=16384 diff --git a/ansible/files/kong_config/kong.env.j2 b/ansible/files/kong_config/kong.env.j2 new file mode 100644 index 0000000..57613fd --- /dev/null +++ b/ansible/files/kong_config/kong.env.j2 @@ -0,0 +1,8 @@ +KONG_NGINX_HTTP_GZIP=on +KONG_NGINX_HTTP_GZIP_COMP_LEVEL=6 +KONG_NGINX_HTTP_GZIP_MIN_LENGTH=256 +KONG_NGINX_HTTP_GZIP_PROXIED=any +KONG_NGINX_HTTP_GZIP_VARY=on +KONG_NGINX_HTTP_GZIP_TYPES=text/plain application/xml application/openapi+json application/json +KONG_PROXY_ERROR_LOG=syslog:server=unix:/dev/log +KONG_ADMIN_ERROR_LOG=syslog:server=unix:/dev/log diff --git a/ansible/files/kong_config/kong.service.j2 b/ansible/files/kong_config/kong.service.j2 new file mode 100644 index 0000000..6a36520 --- /dev/null +++ b/ansible/files/kong_config/kong.service.j2 @@ -0,0 +1,28 @@ +[Unit] +Description=Kong server +After=postgrest.service gotrue.service adminapi.service +Wants=postgrest.service gotrue.service adminapi.service +Conflicts=envoy.service + +# Ensures that Kong service is stopped before Envoy service is started +Before=envoy.service + +[Service] +Type=forking +ExecStart=/usr/local/bin/kong start -c /etc/kong/kong.conf +ExecReload=/usr/local/bin/kong reload -c /etc/kong/kong.conf +ExecStop=/usr/local/bin/kong quit +User=kong +EnvironmentFile=/etc/kong/kong.env +Slice=services.slice +Restart=always +RestartSec=3 +LimitNOFILE=100000 + +# The kong user is unprivileged and thus not permitted to bind on ports < 1024 +# Via systemd we grant the process a set of privileges to bind to 80/443 +# See http://archive.vn/36zJU +AmbientCapabilities=CAP_NET_BIND_SERVICE + +[Install] +WantedBy=multi-user.target diff --git a/ansible/files/logind.conf b/ansible/files/logind.conf new file mode 100644 index 0000000..732900f --- /dev/null +++ b/ansible/files/logind.conf @@ -0,0 +1,2 @@ +[Login] +RemoveIPC=no diff --git a/ansible/files/logrotate_config/logrotate-postgres-auth.conf b/ansible/files/logrotate_config/logrotate-postgres-auth.conf new file mode 100644 index 0000000..050210e --- /dev/null +++ b/ansible/files/logrotate_config/logrotate-postgres-auth.conf @@ -0,0 +1,8 @@ +/var/log/postgresql/auth-failures.csv { + size 10M + rotate 5 + compress + delaycompress + notifempty + missingok +} diff --git a/ansible/files/logrotate_config/logrotate-postgres-csv.conf b/ansible/files/logrotate_config/logrotate-postgres-csv.conf new file mode 100644 index 0000000..e5418e8 --- /dev/null +++ b/ansible/files/logrotate_config/logrotate-postgres-csv.conf @@ -0,0 +1,11 @@ +/var/log/postgresql/postgresql.csv { + size 50M + rotate 9 + compress + delaycompress + notifempty + missingok + postrotate + sudo -u postgres /usr/lib/postgresql/bin/pg_ctl -D /var/lib/postgresql/data logrotate + endscript +} diff --git a/ansible/files/logrotate_config/logrotate-postgres.conf b/ansible/files/logrotate_config/logrotate-postgres.conf new file mode 100644 index 0000000..c802320 --- /dev/null +++ b/ansible/files/logrotate_config/logrotate-postgres.conf @@ -0,0 +1,9 @@ +/var/log/postgresql/postgresql.log { + size 50M + rotate 3 + copytruncate + delaycompress + compress + notifempty + missingok +} diff --git a/ansible/files/logrotate_config/logrotate-walg.conf b/ansible/files/logrotate_config/logrotate-walg.conf new file mode 100644 index 0000000..49eeb59 --- /dev/null +++ b/ansible/files/logrotate_config/logrotate-walg.conf @@ -0,0 +1,9 @@ +/var/log/wal-g/*.log { + size 50M + rotate 3 + copytruncate + delaycompress + compress + notifempty + missingok +} diff --git a/ansible/files/manifest.json b/ansible/files/manifest.json new file mode 100644 index 0000000..3a20e76 --- /dev/null +++ b/ansible/files/manifest.json @@ -0,0 +1 @@ +{{ vars | to_json }} diff --git a/ansible/files/nginx.service.j2 b/ansible/files/nginx.service.j2 new file mode 100644 index 0000000..872e334 --- /dev/null +++ b/ansible/files/nginx.service.j2 @@ -0,0 +1,22 @@ +[Unit] +Description=nginx server +After=postgrest.service gotrue.service adminapi.service +Wants=postgrest.service gotrue.service adminapi.service + +[Service] +Type=forking +ExecStart=/usr/local/nginx/sbin/nginx -c /etc/nginx/nginx.conf +ExecReload=/usr/local/nginx/sbin/nginx -s reload -c /etc/nginx/nginx.conf +ExecStop=/usr/local/nginx/sbin/nginx -s quit +User=nginx +Slice=services.slice +Restart=always +RestartSec=3 +LimitNOFILE=100000 + +# Via systemd we grant the process a set of privileges to bind to 80/443 +# See http://archive.vn/36zJU +AmbientCapabilities=CAP_NET_BIND_SERVICE + +[Install] +WantedBy=multi-user.target diff --git a/ansible/files/permission_check.py b/ansible/files/permission_check.py new file mode 100644 index 0000000..a1153d1 --- /dev/null +++ b/ansible/files/permission_check.py @@ -0,0 +1,263 @@ +import subprocess +import json +import sys +import argparse + + +# Expected groups for each user +expected_results = { + "postgres": [ + {"groupname": "postgres", "username": "postgres"}, + {"groupname": "ssl-cert", "username": "postgres"}, + ], + "ubuntu": [ + {"groupname": "adm", "username": "ubuntu"}, + {"groupname": "audio", "username": "ubuntu"}, + {"groupname": "cdrom", "username": "ubuntu"}, + {"groupname": "dialout", "username": "ubuntu"}, + {"groupname": "dip", "username": "ubuntu"}, + {"groupname": "floppy", "username": "ubuntu"}, + {"groupname": "lxd", "username": "ubuntu"}, + {"groupname": "netdev", "username": "ubuntu"}, + {"groupname": "plugdev", "username": "ubuntu"}, + {"groupname": "sudo", "username": "ubuntu"}, + {"groupname": "ubuntu", "username": "ubuntu"}, + {"groupname": "video", "username": "ubuntu"}, + ], + "root": [{"groupname": "root", "username": "root"}], + "daemon": [{"groupname": "daemon", "username": "daemon"}], + "bin": [{"groupname": "bin", "username": "bin"}], + "sys": [{"groupname": "sys", "username": "sys"}], + "sync": [{"groupname": "nogroup", "username": "sync"}], + "games": [{"groupname": "games", "username": "games"}], + "man": [{"groupname": "man", "username": "man"}], + "lp": [{"groupname": "lp", "username": "lp"}], + "mail": [{"groupname": "mail", "username": "mail"}], + "news": [{"groupname": "news", "username": "news"}], + "uucp": [{"groupname": "uucp", "username": "uucp"}], + "proxy": [{"groupname": "proxy", "username": "proxy"}], + "www-data": [{"groupname": "www-data", "username": "www-data"}], + "backup": [{"groupname": "backup", "username": "backup"}], + "list": [{"groupname": "list", "username": "list"}], + "irc": [{"groupname": "irc", "username": "irc"}], + "nobody": [{"groupname": "nogroup", "username": "nobody"}], + "systemd-network": [ + {"groupname": "systemd-network", "username": "systemd-network"} + ], + "systemd-resolve": [ + {"groupname": "systemd-resolve", "username": "systemd-resolve"} + ], + "systemd-timesync": [ + {"groupname": "systemd-timesync", "username": "systemd-timesync"} + ], + "messagebus": [{"groupname": "messagebus", "username": "messagebus"}], + "ec2-instance-connect": [ + {"groupname": "nogroup", "username": "ec2-instance-connect"} + ], + "sshd": [{"groupname": "nogroup", "username": "sshd"}], + "wal-g": [ + {"groupname": "postgres", "username": "wal-g"}, + {"groupname": "wal-g", "username": "wal-g"}, + ], + "pgbouncer": [ + {"groupname": "pgbouncer", "username": "pgbouncer"}, + {"groupname": "postgres", "username": "pgbouncer"}, + {"groupname": "ssl-cert", "username": "pgbouncer"}, + ], + "gotrue": [{"groupname": "gotrue", "username": "gotrue"}], + "envoy": [{"groupname": "envoy", "username": "envoy"}], + "kong": [{"groupname": "kong", "username": "kong"}], + "nginx": [{"groupname": "nginx", "username": "nginx"}], + "vector": [ + {"groupname": "adm", "username": "vector"}, + {"groupname": "postgres", "username": "vector"}, + {"groupname": "systemd-journal", "username": "vector"}, + {"groupname": "vector", "username": "vector"}, + ], + "adminapi": [ + {"groupname": "admin", "username": "adminapi"}, + {"groupname": "adminapi", "username": "adminapi"}, + {"groupname": "envoy", "username": "adminapi"}, + {"groupname": "gotrue", "username": "adminapi"}, + {"groupname": "kong", "username": "adminapi"}, + {"groupname": "pgbouncer", "username": "adminapi"}, + {"groupname": "postgres", "username": "adminapi"}, + {"groupname": "postgrest", "username": "adminapi"}, + {"groupname": "root", "username": "adminapi"}, + {"groupname": "systemd-journal", "username": "adminapi"}, + {"groupname": "vector", "username": "adminapi"}, + {"groupname": "wal-g", "username": "adminapi"}, + ], + "postgrest": [{"groupname": "postgrest", "username": "postgrest"}], + "tcpdump": [{"groupname": "tcpdump", "username": "tcpdump"}], + "systemd-coredump": [ + {"groupname": "systemd-coredump", "username": "systemd-coredump"} + ], + "tealbase-admin-agent": [ + {"groupname": "tealbase-admin-agent", "username": "tealbase-admin-agent"}, + {"groupname": "admin", "username": "tealbase-admin-agent"}, + {"groupname": "salt", "username": "tealbase-admin-agent"}, + ], +} + +# postgresql.service is expected to mount /etc as read-only +expected_mount = "/etc ro" + + +# This program depends on osquery being installed on the system +# Function to run osquery +def run_osquery(query): + process = subprocess.Popen( + ["osqueryi", "--json", query], stdout=subprocess.PIPE, stderr=subprocess.PIPE + ) + output, error = process.communicate() + return output.decode("utf-8") + + +def parse_json(json_str): + try: + return json.loads(json_str) + except json.JSONDecodeError as e: + print("Error decoding JSON:", e) + sys.exit(1) + + +def compare_results(username, query_result): + expected_result = expected_results.get(username) + if expected_result is None: + print(f"No expected result defined for user '{username}'") + sys.exit(1) + + if query_result == expected_result: + print(f"The query result for user '{username}' matches the expected result.") + else: + print( + f"The query result for user '{username}' does not match the expected result." + ) + print("Expected:", expected_result) + print("Got:", query_result) + sys.exit(1) + + +def check_nixbld_users(): + query = """ + SELECT u.username, g.groupname + FROM users u + JOIN user_groups ug ON u.uid = ug.uid + JOIN groups g ON ug.gid = g.gid + WHERE u.username LIKE 'nixbld%'; + """ + query_result = run_osquery(query) + parsed_result = parse_json(query_result) + + for user in parsed_result: + if user["groupname"] != "nixbld": + print( + f"User '{user['username']}' is in group '{user['groupname']}' instead of 'nixbld'." + ) + sys.exit(1) + + print("All nixbld users are in the 'nixbld' group.") + + +def check_postgresql_mount(): + # processes table has the nix .postgres-wrapped path as the + # binary path, rather than /usr/lib/postgresql/bin/postgres which + # is a symlink to /var/lib/postgresql/.nix-profile/bin/postgres, a script + # that ultimately calls /nix/store/...-postgresql-and-plugins-15.8/bin/.postgres-wrapped + query = """ + SELECT pid + FROM processes + WHERE path LIKE '%.postgres-wrapped%' + AND cmdline LIKE '%-D /etc/postgresql%'; + """ + query_result = run_osquery(query) + parsed_result = parse_json(query_result) + + pid = parsed_result[0].get("pid") + + # get the mounts for the process + with open(f"/proc/{pid}/mounts", "r") as o: + lines = [line for line in o if "/etc" in line and "ro," in line] + if len(lines) == 0: + print(f"Expected exactly 1 match, got 0") + sys.exit(1) + if len(lines) != 1: + print(f"Expected exactly 1 match, got {len(lines)}: {';'.join(lines)}") + sys.exit(1) + + print("postgresql.service mounts /etc as read-only.") + + +def main(): + parser = argparse.ArgumentParser( + prog="tealbase Postgres Artifact Permissions Checker", + description="Checks the Postgres Artifact for the appropriate users and group memberships", + ) + parser.add_argument( + "-q", + "--qemu", + action="store_true", + help="Whether we are checking a QEMU artifact", + ) + args = parser.parse_args() + qemu_artifact = args.qemu or False + + # Define usernames for which you want to compare results + usernames = [ + "postgres", + "ubuntu", + "root", + "daemon", + "bin", + "sys", + "sync", + "games", + "man", + "lp", + "mail", + "news", + "uucp", + "proxy", + "www-data", + "backup", + "list", + "irc", + "nobody", + "systemd-network", + "systemd-resolve", + "systemd-timesync", + "messagebus", + "sshd", + "wal-g", + "pgbouncer", + "gotrue", + "envoy", + "kong", + "nginx", + "vector", + "adminapi", + "postgrest", + "tcpdump", + "systemd-coredump", + "tealbase-admin-agent", + ] + if not qemu_artifact: + usernames.append("ec2-instance-connect") + + # Iterate over usernames, run the query, and compare results + for username in usernames: + query = f"SELECT u.username, g.groupname FROM users u JOIN user_groups ug ON u.uid = ug.uid JOIN groups g ON ug.gid = g.gid WHERE u.username = '{username}' ORDER BY g.groupname;" + query_result = run_osquery(query) + parsed_result = parse_json(query_result) + compare_results(username, parsed_result) + + # Check if all nixbld users are in the nixbld group + check_nixbld_users() + + # Check if postgresql.service is using a read-only mount for /etc + check_postgresql_mount() + + +if __name__ == "__main__": + main() diff --git a/ansible/files/pg_egress_collect.service.j2 b/ansible/files/pg_egress_collect.service.j2 new file mode 100644 index 0000000..7ac04f4 --- /dev/null +++ b/ansible/files/pg_egress_collect.service.j2 @@ -0,0 +1,13 @@ +[Unit] +Description=Postgres Egress Collector + +[Service] +Type=simple +ExecStart=/bin/bash -c "tcpdump -s 128 -Q out -nn -tt -vv -p -l 'tcp and (port 5432 or port 6543)' | perl /root/pg_egress_collect.pl" +User=root +Slice=services.slice +Restart=always +RestartSec=3 + +[Install] +WantedBy=multi-user.target diff --git a/ansible/files/pgbouncer_config/pgbouncer.ini.j2 b/ansible/files/pgbouncer_config/pgbouncer.ini.j2 new file mode 100644 index 0000000..e4518c0 --- /dev/null +++ b/ansible/files/pgbouncer_config/pgbouncer.ini.j2 @@ -0,0 +1,364 @@ +;;; +;;; PgBouncer configuration file +;;; + +;; database name = connect string +;; +;; connect string params: +;; dbname= host= port= user= password= auth_user= +;; client_encoding= datestyle= timezone= +;; pool_size= reserve_pool= max_db_connections= +;; pool_mode= connect_query= application_name= +[databases] +* = host=localhost auth_user=pgbouncer + +;; foodb over Unix socket +;foodb = + +;; redirect bardb to bazdb on localhost +;bardb = host=localhost dbname=bazdb + +;; access to dest database will go with single user +;forcedb = host=localhost port=300 user=baz password=foo client_encoding=UNICODE datestyle=ISO connect_query='SELECT 1' + +;; use custom pool sizes +;nondefaultdb = pool_size=50 reserve_pool=10 + +;; use auth_user with auth_query if user not present in auth_file +;; auth_user must exist in auth_file +; foodb = auth_user=bar + +;; fallback connect string +;* = host=testserver + +;; User-specific configuration +[users] + +;user1 = pool_mode=transaction max_user_connections=10 + +;; Configuration section +[pgbouncer] + +;;; +;;; Administrative settings +;;; + +;logfile = /var/log/pgbouncer.log +pidfile = /var/run/pgbouncer/pgbouncer.pid + +;;; +;;; Where to wait for clients +;;; + +;; IP address or * which means all IPs +listen_addr = * +listen_port = 6543 + +;; Unix socket is also used for -R. +;; On Debian it should be /var/run/postgresql +unix_socket_dir = /tmp +;unix_socket_mode = 0777 +;unix_socket_group = + +;;; +;;; TLS settings for accepting clients +;;; + +;; disable, allow, require, verify-ca, verify-full +;client_tls_sslmode = disable + +;; Path to file that contains trusted CA certs +;client_tls_ca_file = + +;; Private key and cert to present to clients. +;; Required for accepting TLS connections from clients. +;client_tls_key_file = +;client_tls_cert_file = + +;; fast, normal, secure, legacy, +;client_tls_ciphers = fast + +;; all, secure, tlsv1.0, tlsv1.1, tlsv1.2, tlsv1.3 +;client_tls_protocols = secure + +;; none, auto, legacy +;client_tls_dheparams = auto + +;; none, auto, +;client_tls_ecdhcurve = auto + +;;; +;;; TLS settings for connecting to backend databases +;;; + +;; disable, allow, require, verify-ca, verify-full +;server_tls_sslmode = disable + +;; Path to that contains trusted CA certs +;server_tls_ca_file = + +;; Private key and cert to present to backend. +;; Needed only if backend server require client cert. +;server_tls_key_file = +;server_tls_cert_file = + +;; all, secure, tlsv1.0, tlsv1.1, tlsv1.2, tlsv1.3 +;server_tls_protocols = secure + +;; fast, normal, secure, legacy, +;server_tls_ciphers = fast + +;;; +;;; Authentication settings +;;; + +;; any, trust, plain, md5, cert, hba, pam +auth_type = scram-sha-256 +auth_file = /etc/pgbouncer/userlist.txt + +;; Path to HBA-style auth config +;auth_hba_file = + +;; Query to use to fetch password from database. Result +;; must have 2 columns - username and password hash. +auth_query = SELECT * FROM pgbouncer.get_auth($1) + +;;; +;;; Users allowed into database 'pgbouncer' +;;; + +;; comma-separated list of users who are allowed to change settings +admin_users = pgbouncer + +;; comma-separated list of users who are just allowed to use SHOW command +stats_users = pgbouncer + +;;; +;;; Pooler personality questions +;;; + +;; When server connection is released back to pool: +;; session - after client disconnects (default) +;; transaction - after transaction finishes +;; statement - after statement finishes +pool_mode = transaction + +;; Query for cleaning connection immediately after releasing from +;; client. No need to put ROLLBACK here, pgbouncer does not reuse +;; connections where transaction is left open. +;server_reset_query = DISCARD ALL + +;; Whether server_reset_query should run in all pooling modes. If it +;; is off, server_reset_query is used only for session-pooling. +;server_reset_query_always = 0 + +;; Comma-separated list of parameters to ignore when given in startup +;; packet. Newer JDBC versions require the extra_float_digits here. +ignore_startup_parameters = extra_float_digits + +;; When taking idle server into use, this query is run first. +;server_check_query = select 1 + +;; If server was used more recently that this many seconds ago, +; skip the check query. Value 0 may or may not run in immediately. +;server_check_delay = 30 + +;; Close servers in session pooling mode after a RECONNECT, RELOAD, +;; etc. when they are idle instead of at the end of the session. +;server_fast_close = 0 + +;; Use as application_name on server. +;application_name_add_host = 0 + +;; Period for updating aggregated stats. +;stats_period = 60 + +;;; +;;; Connection limits +;;; + +;; Total number of clients that can connect +;max_client_conn = 100 + +;; Default pool size. 20 is good number when transaction pooling +;; is in use, in session pooling it needs to be the number of +;; max clients you want to handle at any moment +default_pool_size = 15 + +;; Minimum number of server connections to keep in pool. +;min_pool_size = 0 + +; how many additional connection to allow in case of trouble +;reserve_pool_size = 0 + +;; If a clients needs to wait more than this many seconds, use reserve +;; pool. +;reserve_pool_timeout = 5 + +;; Maximum number of server connections for a database +;max_db_connections = 0 + +;; Maximum number of server connections for a user +;max_user_connections = 0 + +;; If off, then server connections are reused in LIFO manner +;server_round_robin = 0 + +;;; +;;; Logging +;;; + +;; Syslog settings +;syslog = 0 +;syslog_facility = daemon +;syslog_ident = pgbouncer + +;; log if client connects or server connection is made +;log_connections = 1 + +;; log if and why connection was closed +;log_disconnections = 1 + +;; log error messages pooler sends to clients +;log_pooler_errors = 1 + +;; write aggregated stats into log +;log_stats = 1 + +;; Logging verbosity. Same as -v switch on command line. +;verbose = 0 + +;;; +;;; Timeouts +;;; + +;; Close server connection if its been connected longer. +;server_lifetime = 3600 + +;; Close server connection if its not been used in this time. Allows +;; to clean unnecessary connections from pool after peak. +;server_idle_timeout = 600 + +;; Cancel connection attempt if server does not answer takes longer. +;server_connect_timeout = 15 + +;; If server login failed (server_connect_timeout or auth failure) +;; then wait this many second. +;server_login_retry = 15 + +;; Dangerous. Server connection is closed if query does not return in +;; this time. Should be used to survive network problems, _not_ as +;; statement_timeout. (default: 0) +;query_timeout = 0 + +;; Dangerous. Client connection is closed if the query is not +;; assigned to a server in this time. Should be used to limit the +;; number of queued queries in case of a database or network +;; failure. (default: 120) +;query_wait_timeout = 120 + +;; Dangerous. Client connection is closed if no activity in this +;; time. Should be used to survive network problems. (default: 0) +;client_idle_timeout = 0 + +;; Disconnect clients who have not managed to log in after connecting +;; in this many seconds. +;client_login_timeout = 60 + +;; Clean automatically created database entries (via "*") if they stay +;; unused in this many seconds. +; autodb_idle_timeout = 3600 + +;; Close connections which are in "IDLE in transaction" state longer +;; than this many seconds. +;idle_transaction_timeout = 0 + +;; How long SUSPEND/-R waits for buffer flush before closing +;; connection. +;suspend_timeout = 10 + +;;; +;;; Low-level tuning options +;;; + +;; buffer for streaming packets +;pkt_buf = 4096 + +;; man 2 listen +;listen_backlog = 128 + +;; Max number pkt_buf to process in one event loop. +;sbuf_loopcnt = 5 + +;; Maximum PostgreSQL protocol packet size. +;max_packet_size = 2147483647 + +;; Set SO_REUSEPORT socket option +;so_reuseport = 0 + +;; networking options, for info: man 7 tcp + +;; Linux: Notify program about new connection only if there is also +;; data received. (Seconds to wait.) On Linux the default is 45, on +;; other OS'es 0. +;tcp_defer_accept = 0 + +;; In-kernel buffer size (Linux default: 4096) +;tcp_socket_buffer = 0 + +;; whether tcp keepalive should be turned on (0/1) +;tcp_keepalive = 1 + +;; The following options are Linux-specific. They also require +;; tcp_keepalive=1. + +;; Count of keepalive packets +;tcp_keepcnt = 0 + +;; How long the connection can be idle before sending keepalive +;; packets +;tcp_keepidle = 0 + +;; The time between individual keepalive probes +;tcp_keepintvl = 0 + +;; How long may transmitted data remain unacknowledged before TCP +;; connection is closed (in milliseconds) +;tcp_user_timeout = 0 + +;; DNS lookup caching time +;dns_max_ttl = 15 + +;; DNS zone SOA lookup period +;dns_zone_check_period = 0 + +;; DNS negative result caching time +;dns_nxdomain_ttl = 15 + +;; Custom resolv.conf file, to set custom DNS servers or other options +;; (default: empty = use OS settings) +;resolv_conf = /etc/pgbouncer/resolv.conf + +;;; +;;; Random stuff +;;; + +;; Hackish security feature. Helps against SQL injection: when PQexec +;; is disabled, multi-statement cannot be made. +;disable_pqexec = 0 + +;; Config file to use for next RELOAD/SIGHUP +;; By default contains config file from command line. +;conffile + +;; Windows service name to register as. job_name is alias for +;; service_name, used by some Skytools scripts. +;service_name = pgbouncer +;job_name = pgbouncer + +;; Read additional config from other file +;%include /etc/pgbouncer/pgbouncer-other.ini + +%include /etc/pgbouncer-custom/generated-optimizations.ini +%include /etc/pgbouncer-custom/custom-overrides.ini +%include /etc/pgbouncer-custom/ssl-config.ini diff --git a/ansible/files/pgbouncer_config/pgbouncer.service.j2 b/ansible/files/pgbouncer_config/pgbouncer.service.j2 new file mode 100644 index 0000000..c696255 --- /dev/null +++ b/ansible/files/pgbouncer_config/pgbouncer.service.j2 @@ -0,0 +1,22 @@ +[Unit] +Description=connection pooler for PostgreSQL +Documentation=man:pgbouncer(1) +Documentation=https://www.pgbouncer.org/ +After=network.target +{% if tealbase_internal is defined %} +Requires=database-optimizations.service +After=database-optimizations.service +{% endif %} + +[Service] +Type=notify +User=pgbouncer +ExecStart=/usr/local/bin/pgbouncer /etc/pgbouncer/pgbouncer.ini +ExecReload=/bin/kill -HUP $MAINPID +KillSignal=SIGINT +LimitNOFILE=65536 +Restart=always +RestartSec=5 + +[Install] +WantedBy=multi-user.target diff --git a/ansible/files/pgbouncer_config/pgbouncer_auth_schema.sql b/ansible/files/pgbouncer_config/pgbouncer_auth_schema.sql new file mode 100644 index 0000000..c10ce44 --- /dev/null +++ b/ansible/files/pgbouncer_config/pgbouncer_auth_schema.sql @@ -0,0 +1,20 @@ +CREATE USER pgbouncer; + +REVOKE ALL PRIVILEGES ON SCHEMA public FROM pgbouncer; + +CREATE SCHEMA pgbouncer AUTHORIZATION pgbouncer; + +CREATE OR REPLACE FUNCTION pgbouncer.get_auth(p_usename TEXT) +RETURNS TABLE(username TEXT, password TEXT) AS +$$ +BEGIN + RAISE WARNING 'PgBouncer auth request: %', p_usename; + + RETURN QUERY + SELECT usename::TEXT, passwd::TEXT FROM pg_catalog.pg_shadow + WHERE usename = p_usename; +END; +$$ LANGUAGE plpgsql SECURITY DEFINER; + +REVOKE ALL ON FUNCTION pgbouncer.get_auth(p_usename TEXT) FROM PUBLIC; +GRANT EXECUTE ON FUNCTION pgbouncer.get_auth(p_usename TEXT) TO pgbouncer; diff --git a/ansible/files/pgbouncer_config/tmpfiles.d-pgbouncer.conf.j2 b/ansible/files/pgbouncer_config/tmpfiles.d-pgbouncer.conf.j2 new file mode 100644 index 0000000..d5d2cd4 --- /dev/null +++ b/ansible/files/pgbouncer_config/tmpfiles.d-pgbouncer.conf.j2 @@ -0,0 +1,2 @@ +# Directory for PostgreSQL sockets, lockfiles and stats tempfiles +d /run/pgbouncer 2775 pgbouncer postgres - - \ No newline at end of file diff --git a/ansible/files/pgsodium_getkey_readonly.sh.j2 b/ansible/files/pgsodium_getkey_readonly.sh.j2 new file mode 100644 index 0000000..e0a7273 --- /dev/null +++ b/ansible/files/pgsodium_getkey_readonly.sh.j2 @@ -0,0 +1,14 @@ +#!/bin/bash + +set -euo pipefail + +KEY_FILE=/etc/postgresql-custom/pgsodium_root.key + +# On the hosted platform, the root key is generated and managed for each project +# If for some reason the key is missing, we want to fail loudly, +# rather than generating a new one. +if [[ ! -f "${KEY_FILE}" ]]; then + echo "Key file ${KEY_FILE} does not exist." >&2 + exit 1 +fi +cat $KEY_FILE diff --git a/ansible/files/pgsodium_getkey_urandom.sh.j2 b/ansible/files/pgsodium_getkey_urandom.sh.j2 new file mode 100755 index 0000000..e8039d0 --- /dev/null +++ b/ansible/files/pgsodium_getkey_urandom.sh.j2 @@ -0,0 +1,10 @@ +#!/bin/bash + +set -euo pipefail + +KEY_FILE=/etc/postgresql-custom/pgsodium_root.key + +if [[ ! -f "${KEY_FILE}" ]]; then + head -c 32 /dev/urandom | od -A n -t x1 | tr -d ' \n' > "${KEY_FILE}" +fi +cat $KEY_FILE diff --git a/ansible/files/postgres_exporter.service.j2 b/ansible/files/postgres_exporter.service.j2 new file mode 100644 index 0000000..2af6a45 --- /dev/null +++ b/ansible/files/postgres_exporter.service.j2 @@ -0,0 +1,15 @@ +[Unit] +Description=Postgres Exporter + +[Service] +Type=simple +ExecStart=/opt/postgres_exporter/postgres_exporter --disable-settings-metrics --extend.query-path="/opt/postgres_exporter/queries.yml" --disable-default-metrics --no-collector.locks --no-collector.replication --no-collector.replication_slot --no-collector.stat_bgwriter --no-collector.stat_database --no-collector.stat_user_tables --no-collector.statio_user_tables --no-collector.wal {% if qemu_mode is defined and qemu_mode %}--no-collector.database {% endif %} + +User=postgres +Group=postgres +Restart=always +RestartSec=3 +Environment="DATA_SOURCE_NAME=host=localhost dbname=postgres sslmode=disable user=tealbase_admin pg_stat_statements.track=none application_name=postgres_exporter" + +[Install] +WantedBy=multi-user.target diff --git a/ansible/files/postgres_prestart.sh.j2 b/ansible/files/postgres_prestart.sh.j2 new file mode 100644 index 0000000..3ffe54c --- /dev/null +++ b/ansible/files/postgres_prestart.sh.j2 @@ -0,0 +1,49 @@ +#!/bin/bash + +check_orioledb_enabled() { + local pg_conf="/etc/postgresql/postgresql.conf" + if [ ! -f "$pg_conf" ]; then + return 0 + fi + grep "^shared_preload_libraries" "$pg_conf" | grep -c "orioledb" || return 0 +} + +get_shared_buffers() { + local opt_conf="/etc/postgresql-custom/generated-optimizations.conf" + if [ ! -f "$opt_conf" ]; then + return 0 + fi + grep "^shared_buffers = " "$opt_conf" | cut -d "=" -f2 | tr -d ' ' || return 0 +} + +update_orioledb_buffers() { + local pg_conf="/etc/postgresql/postgresql.conf" + local value="$1" + if grep -q "^orioledb.main_buffers = " "$pg_conf"; then + sed -i "s/^orioledb.main_buffers = .*/orioledb.main_buffers = $value/" "$pg_conf" + else + echo "orioledb.main_buffers = $value" >> "$pg_conf" + fi +} + +main() { + local has_orioledb=$(check_orioledb_enabled) + if [ "$has_orioledb" -lt 1 ]; then + return 0 + fi + local shared_buffers_value=$(get_shared_buffers) + if [ ! -z "$shared_buffers_value" ]; then + update_orioledb_buffers "$shared_buffers_value" + fi +} + +# Initial locale setup +if [ $(cat /etc/locale.gen | grep -c en_US.UTF-8) -eq 0 ]; then + echo "en_US.UTF-8 UTF-8" >> /etc/locale.gen +fi + +if [ $(locale -a | grep -c en_US.utf8) -eq 0 ]; then + locale-gen +fi + +main diff --git a/ansible/files/postgresql_config/custom_read_replica.conf.j2 b/ansible/files/postgresql_config/custom_read_replica.conf.j2 new file mode 100644 index 0000000..7d52f92 --- /dev/null +++ b/ansible/files/postgresql_config/custom_read_replica.conf.j2 @@ -0,0 +1,5 @@ +# hot_standby = on +# restore_command = '/usr/bin/admin-mgr wal-fetch %f %p >> /var/log/wal-g/wal-fetch.log 2>&1' +# recovery_target_timeline = 'latest' + +# primary_conninfo = 'host=localhost port=6543 user=replication' diff --git a/ansible/files/postgresql_config/custom_walg.conf.j2 b/ansible/files/postgresql_config/custom_walg.conf.j2 new file mode 100644 index 0000000..7ef7256 --- /dev/null +++ b/ansible/files/postgresql_config/custom_walg.conf.j2 @@ -0,0 +1,21 @@ +# - Archiving - + +#archive_mode = on +#archive_command = '/usr/bin/admin-mgr wal-push %p >> /var/log/wal-g/wal-push.log 2>&1' +#archive_timeout = 120 + + +# - Archive Recovery - + +#restore_command = '/usr/bin/admin-mgr wal-fetch %f %p >> /var/log/wal-g/wal-fetch.log 2>&1' + +# - Recovery Target - + +#recovery_target_lsn = '' +#recovery_target_time = '' +#recovery_target_action = 'promote' +#recovery_target_timeline = 'current' +#recovery_target_inclusive = off + +# - Hot Standby - +hot_standby = off diff --git a/ansible/files/postgresql_config/pg_hba.conf.j2 b/ansible/files/postgresql_config/pg_hba.conf.j2 new file mode 100755 index 0000000..76bd2f0 --- /dev/null +++ b/ansible/files/postgresql_config/pg_hba.conf.j2 @@ -0,0 +1,94 @@ +# PostgreSQL Client Authentication Configuration File +# =================================================== +# +# Refer to the "Client Authentication" section in the PostgreSQL +# documentation for a complete description of this file. A short +# synopsis follows. +# +# This file controls: which hosts are allowed to connect, how clients +# are authenticated, which PostgreSQL user names they can use, which +# databases they can access. Records take one of these forms: +# +# local DATABASE USER METHOD [OPTIONS] +# host DATABASE USER ADDRESS METHOD [OPTIONS] +# hostssl DATABASE USER ADDRESS METHOD [OPTIONS] +# hostnossl DATABASE USER ADDRESS METHOD [OPTIONS] +# hostgssenc DATABASE USER ADDRESS METHOD [OPTIONS] +# hostnogssenc DATABASE USER ADDRESS METHOD [OPTIONS] +# +# (The uppercase items must be replaced by actual values.) +# +# The first field is the connection type: "local" is a Unix-domain +# socket, "host" is either a plain or SSL-encrypted TCP/IP socket, +# "hostssl" is an SSL-encrypted TCP/IP socket, and "hostnossl" is a +# non-SSL TCP/IP socket. Similarly, "hostgssenc" uses a +# GSSAPI-encrypted TCP/IP socket, while "hostnogssenc" uses a +# non-GSSAPI socket. +# +# DATABASE can be "all", "sameuser", "samerole", "replication", a +# database name, or a comma-separated list thereof. The "all" +# keyword does not match "replication". Access to replication +# must be enabled in a separate record (see example below). +# +# USER can be "all", a user name, a group name prefixed with "+", or a +# comma-separated list thereof. In both the DATABASE and USER fields +# you can also write a file name prefixed with "@" to include names +# from a separate file. +# +# ADDRESS specifies the set of hosts the record matches. It can be a +# host name, or it is made up of an IP address and a CIDR mask that is +# an integer (between 0 and 32 (IPv4) or 128 (IPv6) inclusive) that +# specifies the number of significant bits in the mask. A host name +# that starts with a dot (.) matches a suffix of the actual host name. +# Alternatively, you can write an IP address and netmask in separate +# columns to specify the set of hosts. Instead of a CIDR-address, you +# can write "samehost" to match any of the server's own IP addresses, +# or "samenet" to match any address in any subnet that the server is +# directly connected to. +# +# METHOD can be "trust", "reject", "md5", "password", "scram-sha-256", +# "gss", "sspi", "ident", "peer", "pam", "ldap", "radius" or "cert". +# Note that "password" sends passwords in clear text; "md5" or +# "scram-sha-256" are preferred since they send encrypted passwords. +# +# OPTIONS are a set of options for the authentication in the format +# NAME=VALUE. The available options depend on the different +# authentication methods -- refer to the "Client Authentication" +# section in the documentation for a list of which options are +# available for which authentication methods. +# +# Database and user names containing spaces, commas, quotes and other +# special characters must be quoted. Quoting one of the keywords +# "all", "sameuser", "samerole" or "replication" makes the name lose +# its special character, and just match a database or username with +# that name. +# +# This file is read on server startup and when the server receives a +# SIGHUP signal. If you edit the file on a running system, you have to +# SIGHUP the server for the changes to take effect, run "pg_ctl reload", +# or execute "SELECT pg_reload_conf()". +# +# Put your actual configuration here +# ---------------------------------- +# +# If you want to allow non-local connections, you need to add more +# "host" records. In that case you will also need to make PostgreSQL +# listen on a non-local interface via the listen_addresses +# configuration parameter, or via the -i or -h command line switches. + +# TYPE DATABASE USER ADDRESS METHOD + +# trust local connections +local all tealbase_admin scram-sha-256 +local all all peer map=tealbase_map +host all all 127.0.0.1/32 trust +host all all ::1/128 trust + +# IPv4 external connections +host all all 10.0.0.0/8 scram-sha-256 +host all all 172.16.0.0/12 scram-sha-256 +host all all 192.168.0.0/16 scram-sha-256 +host all all 0.0.0.0/0 scram-sha-256 + +# IPv6 external connections +host all all ::0/0 scram-sha-256 diff --git a/ansible/files/postgresql_config/pg_ident.conf.j2 b/ansible/files/postgresql_config/pg_ident.conf.j2 new file mode 100755 index 0000000..1430bc8 --- /dev/null +++ b/ansible/files/postgresql_config/pg_ident.conf.j2 @@ -0,0 +1,50 @@ +# PostgreSQL User Name Maps +# ========================= +# +# Refer to the PostgreSQL documentation, chapter "Client +# Authentication" for a complete description. A short synopsis +# follows. +# +# This file controls PostgreSQL user name mapping. It maps external +# user names to their corresponding PostgreSQL user names. Records +# are of the form: +# +# MAPNAME SYSTEM-USERNAME PG-USERNAME +# +# (The uppercase quantities must be replaced by actual values.) +# +# MAPNAME is the (otherwise freely chosen) map name that was used in +# pg_hba.conf. SYSTEM-USERNAME is the detected user name of the +# client. PG-USERNAME is the requested PostgreSQL user name. The +# existence of a record specifies that SYSTEM-USERNAME may connect as +# PG-USERNAME. +# +# If SYSTEM-USERNAME starts with a slash (/), it will be treated as a +# regular expression. Optionally this can contain a capture (a +# parenthesized subexpression). The substring matching the capture +# will be substituted for \1 (backslash-one) if present in +# PG-USERNAME. +# +# Multiple maps may be specified in this file and used by pg_hba.conf. +# +# No map names are defined in the default configuration. If all +# system user names and PostgreSQL user names are the same, you don't +# need anything in this file. +# +# This file is read on server startup and when the postmaster receives +# a SIGHUP signal. If you edit the file on a running system, you have +# to SIGHUP the postmaster for the changes to take effect. You can +# use "pg_ctl reload" to do that. + +# Put your actual configuration here +# ---------------------------------- + +# MAPNAME SYSTEM-USERNAME PG-USERNAME +tealbase_map postgres postgres +tealbase_map root postgres +tealbase_map ubuntu postgres + +# tealbase-specific users +tealbase_map gotrue tealbase_auth_admin +tealbase_map postgrest authenticator +tealbase_map adminapi postgres diff --git a/ansible/files/postgresql_config/postgresql-csvlog.conf b/ansible/files/postgresql_config/postgresql-csvlog.conf new file mode 100644 index 0000000..b8d64da --- /dev/null +++ b/ansible/files/postgresql_config/postgresql-csvlog.conf @@ -0,0 +1,33 @@ +# - Where to Log - + +log_destination = 'csvlog' # Valid values are combinations of + # stderr, csvlog, syslog, and eventlog, + # depending on platform. csvlog + # requires logging_collector to be on. + +# This is used when logging to stderr: +logging_collector = on # Enable capturing of stderr and csvlog + # into log files. Required to be on for + # csvlogs. + # (change requires restart) + +# These are only used if logging_collector is on: +log_directory = '/var/log/postgresql' # directory where log files are written, + # can be absolute or relative to PGDATA +log_filename = 'postgresql.log' # log file name pattern, + # can include strftime() escapes +log_file_mode = 0640 # creation mode for log files, + # begin with 0 to use octal notation +log_rotation_age = 0 # Automatic rotation of logfiles will + # happen after that time. 0 disables. +log_rotation_size = 0 # Automatic rotation of logfiles will + # happen after that much log output. + # 0 disables. +#log_truncate_on_rotation = off # If on, an existing log file with the + # same name as the new log file will be + # truncated rather than appended to. + # But such truncation only occurs on + # time-driven rotation, not on restarts + # or size-driven rotation. Default is + # off, meaning append to existing files + # in all cases. diff --git a/ansible/files/postgresql_config/postgresql-stdout-log.conf b/ansible/files/postgresql_config/postgresql-stdout-log.conf new file mode 100644 index 0000000..6ae4ff4 --- /dev/null +++ b/ansible/files/postgresql_config/postgresql-stdout-log.conf @@ -0,0 +1,4 @@ +logging_collector = off # Enable capturing of stderr and csvlog + # into log files. Required to be on for + # csvlogs. + # (change requires restart) diff --git a/ansible/files/postgresql_config/postgresql.conf.j2 b/ansible/files/postgresql_config/postgresql.conf.j2 new file mode 100644 index 0000000..f133be8 --- /dev/null +++ b/ansible/files/postgresql_config/postgresql.conf.j2 @@ -0,0 +1,778 @@ +# ----------------------------- +# PostgreSQL configuration file +# ----------------------------- +# +# This file consists of lines of the form: +# +# name = value +# +# (The "=" is optional.) Whitespace may be used. Comments are introduced with +# "#" anywhere on a line. The complete list of parameter names and allowed +# values can be found in the PostgreSQL documentation. +# +# The commented-out settings shown in this file represent the default values. +# Re-commenting a setting is NOT sufficient to revert it to the default value; +# you need to reload the server. +# +# This file is read on server startup and when the server receives a SIGHUP +# signal. If you edit the file on a running system, you have to SIGHUP the +# server for the changes to take effect, run "pg_ctl reload", or execute +# "SELECT pg_reload_conf()". Some parameters, which are marked below, +# require a server shutdown and restart to take effect. +# +# Any parameter can also be given as a command-line option to the server, e.g., +# "postgres -c log_connections=on". Some parameters can be changed at run time +# with the "SET" SQL command. +# +# Memory units: B = bytes Time units: us = microseconds +# kB = kilobytes ms = milliseconds +# MB = megabytes s = seconds +# GB = gigabytes min = minutes +# TB = terabytes h = hours +# d = days + + +#------------------------------------------------------------------------------ +# FILE LOCATIONS +#------------------------------------------------------------------------------ + +# The default values of these variables are driven from the -D command-line +# option or PGDATA environment variable, represented here as ConfigDir. + +data_directory = '/var/lib/postgresql/data' # use data in another directory + # (change requires restart) +hba_file = '/etc/postgresql/pg_hba.conf' # host-based authentication file + # (change requires restart) +ident_file = '/etc/postgresql/pg_ident.conf' # ident configuration file + # (change requires restart) + +# If external_pid_file is not explicitly set, no extra PID file is written. +#external_pid_file = '' # write an extra PID file + # (change requires restart) + + +#------------------------------------------------------------------------------ +# CONNECTIONS AND AUTHENTICATION +#------------------------------------------------------------------------------ + +# - Connection Settings - + +listen_addresses = '*' # what IP address(es) to listen on; + # comma-separated list of addresses; + # defaults to 'localhost'; use '*' for all + # (change requires restart) +#port = 5432 # (change requires restart) +#max_connections = 100 # (change requires restart) +#superuser_reserved_connections = 3 # (change requires restart) +#unix_socket_directories = '/tmp' # comma-separated list of directories + # (change requires restart) +#unix_socket_group = '' # (change requires restart) +#unix_socket_permissions = 0777 # begin with 0 to use octal notation + # (change requires restart) +#bonjour = off # advertise server via Bonjour + # (change requires restart) +#bonjour_name = '' # defaults to the computer name + # (change requires restart) + +# - TCP settings - +# see "man tcp" for details + +#tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds; + # 0 selects the system default +#tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds; + # 0 selects the system default +#tcp_keepalives_count = 0 # TCP_KEEPCNT; + # 0 selects the system default +#tcp_user_timeout = 0 # TCP_USER_TIMEOUT, in milliseconds; + # 0 selects the system default + +#client_connection_check_interval = 0 # time between checks for client + # disconnection while running queries; + # 0 for never + +# - Authentication - + +authentication_timeout = 1min # 1s-600s +password_encryption = scram-sha-256 # scram-sha-256 or md5 +db_user_namespace = off + +# GSSAPI using Kerberos +#krb_server_keyfile = 'FILE:${sysconfdir}/krb5.keytab' +#krb_caseins_users = off + +# - SSL - + +ssl = off +ssl_ca_file = '' +ssl_cert_file = '' +ssl_crl_file = '' +ssl_crl_dir = '' +ssl_key_file = '' +ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers +ssl_prefer_server_ciphers = on +ssl_ecdh_curve = 'prime256v1' +ssl_min_protocol_version = 'TLSv1.2' +ssl_max_protocol_version = '' +ssl_dh_params_file = '' +ssl_passphrase_command = '' +ssl_passphrase_command_supports_reload = off + + +#------------------------------------------------------------------------------ +# RESOURCE USAGE (except WAL) +#------------------------------------------------------------------------------ + +# - Memory - + +shared_buffers = 128MB # min 128kB + # (change requires restart) +#huge_pages = try # on, off, or try + # (change requires restart) +#huge_page_size = 0 # zero for system default + # (change requires restart) +#temp_buffers = 8MB # min 800kB +#max_prepared_transactions = 0 # zero disables the feature + # (change requires restart) +# Caution: it is not advisable to set max_prepared_transactions nonzero unless +# you actively intend to use prepared transactions. +#work_mem = 4MB # min 64kB +#hash_mem_multiplier = 1.0 # 1-1000.0 multiplier on hash table work_mem +#maintenance_work_mem = 64MB # min 1MB +#autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem +#logical_decoding_work_mem = 64MB # min 64kB +#max_stack_depth = 2MB # min 100kB +#shared_memory_type = mmap # the default is the first option + # supported by the operating system: + # mmap + # sysv + # windows + # (change requires restart) +#dynamic_shared_memory_type = posix # the default is the first option + # supported by the operating system: + # posix + # sysv + # windows + # mmap + # (change requires restart) +#min_dynamic_shared_memory = 0MB # (change requires restart) + +# - Disk - + +#temp_file_limit = -1 # limits per-process temp file space + # in kilobytes, or -1 for no limit + +# - Kernel Resources - + +#max_files_per_process = 1000 # min 64 + # (change requires restart) + +# - Cost-Based Vacuum Delay - + +#vacuum_cost_delay = 0 # 0-100 milliseconds (0 disables) +#vacuum_cost_page_hit = 1 # 0-10000 credits +#vacuum_cost_page_miss = 2 # 0-10000 credits +#vacuum_cost_page_dirty = 20 # 0-10000 credits +#vacuum_cost_limit = 200 # 1-10000 credits + +# - Background Writer - + +#bgwriter_delay = 200ms # 10-10000ms between rounds +#bgwriter_lru_maxpages = 100 # max buffers written/round, 0 disables +#bgwriter_lru_multiplier = 2.0 # 0-10.0 multiplier on buffers scanned/round +#bgwriter_flush_after = 0 # measured in pages, 0 disables + +# - Asynchronous Behavior - + +#backend_flush_after = 0 # measured in pages, 0 disables +#effective_io_concurrency = 1 # 1-1000; 0 disables prefetching +#maintenance_io_concurrency = 10 # 1-1000; 0 disables prefetching +#max_worker_processes = 8 # (change requires restart) +#max_parallel_workers_per_gather = 2 # taken from max_parallel_workers +#max_parallel_maintenance_workers = 2 # taken from max_parallel_workers +#max_parallel_workers = 8 # maximum number of max_worker_processes that + # can be used in parallel operations +#parallel_leader_participation = on +#old_snapshot_threshold = -1 # 1min-60d; -1 disables; 0 is immediate + # (change requires restart) + + +#------------------------------------------------------------------------------ +# WRITE-AHEAD LOG +#------------------------------------------------------------------------------ + +# - Settings - + +wal_level = logical # minimal, replica, or logical + # (change requires restart) +#fsync = on # flush data to disk for crash safety + # (turning this off can cause + # unrecoverable data corruption) +#synchronous_commit = on # synchronization level; + # off, local, remote_write, remote_apply, or on +#wal_sync_method = fsync # the default is the first option + # supported by the operating system: + # open_datasync + # fdatasync (default on Linux and FreeBSD) + # fsync + # fsync_writethrough + # open_sync +#full_page_writes = on # recover from partial page writes +#wal_log_hints = off # also do full page writes of non-critical updates + # (change requires restart) +#wal_compression = off # enable compression of full-page writes +#wal_init_zero = on # zero-fill new WAL files +#wal_recycle = on # recycle WAL files +#wal_buffers = -1 # min 32kB, -1 sets based on shared_buffers + # (change requires restart) +#wal_writer_delay = 200ms # 1-10000 milliseconds +#wal_writer_flush_after = 1MB # measured in pages, 0 disables +#wal_skip_threshold = 2MB + +#commit_delay = 0 # range 0-100000, in microseconds +#commit_siblings = 5 # range 1-1000 + +# - Checkpoints - + +#checkpoint_timeout = 5min # range 30s-1d +checkpoint_completion_target = 0.5 # checkpoint target duration, 0.0 - 1.0 +checkpoint_flush_after = 256kB # measured in pages, 0 disables +#checkpoint_warning = 30s # 0 disables +#max_wal_size = 1GB +#min_wal_size = 80MB + +# - Archiving - + +#archive_mode = off # enables archiving; off, on, or always + # (change requires restart) +#archive_command = '' # command to use to archive a logfile segment + # placeholders: %p = path of file to archive + # %f = file name only + # e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f' +#archive_timeout = 0 # force a logfile segment switch after this + # number of seconds; 0 disables + +# - Archive Recovery - + +# These are only used in recovery mode. + +#restore_command = '' # command to use to restore an archived logfile segment + # placeholders: %p = path of file to restore + # %f = file name only + # e.g. 'cp /mnt/server/archivedir/%f %p' +#archive_cleanup_command = '' # command to execute at every restartpoint +#recovery_end_command = '' # command to execute at completion of recovery + +# - Recovery Target - + +# Set these only when performing a targeted recovery. + +#recovery_target = '' # 'immediate' to end recovery as soon as a + # consistent state is reached + # (change requires restart) +#recovery_target_name = '' # the named restore point to which recovery will proceed + # (change requires restart) +#recovery_target_time = '' # the time stamp up to which recovery will proceed + # (change requires restart) +#recovery_target_xid = '' # the transaction ID up to which recovery will proceed + # (change requires restart) +#recovery_target_lsn = '' # the WAL LSN up to which recovery will proceed + # (change requires restart) +#recovery_target_inclusive = on # Specifies whether to stop: + # just after the specified recovery target (on) + # just before the recovery target (off) + # (change requires restart) +#recovery_target_timeline = 'latest' # 'current', 'latest', or timeline ID + # (change requires restart) +#recovery_target_action = 'pause' # 'pause', 'promote', 'shutdown' + # (change requires restart) + + +#------------------------------------------------------------------------------ +# REPLICATION +#------------------------------------------------------------------------------ + +# - Sending Servers - + +# Set these on the primary and on any standby that will send replication data. + +max_wal_senders = 10 # max number of walsender processes + # (change requires restart) +max_replication_slots = 5 # max number of replication slots + # (change requires restart) +#wal_keep_size = 0 # in megabytes; 0 disables +max_slot_wal_keep_size = 4096 # in megabytes; -1 disables +#wal_sender_timeout = 60s # in milliseconds; 0 disables +#track_commit_timestamp = off # collect timestamp of transaction commit + # (change requires restart) + +# - Primary Server - + +# These settings are ignored on a standby server. + +#synchronous_standby_names = '' # standby servers that provide sync rep + # method to choose sync standbys, number of sync standbys, + # and comma-separated list of application_name + # from standby(s); '*' = all +#vacuum_defer_cleanup_age = 0 # number of xacts by which cleanup is delayed + +# - Standby Servers - + +# These settings are ignored on a primary server. + +#primary_conninfo = '' # connection string to sending server +#primary_slot_name = '' # replication slot on sending server +#promote_trigger_file = '' # file name whose presence ends recovery +#hot_standby = on # "off" disallows queries during recovery + # (change requires restart) +#max_standby_archive_delay = 30s # max delay before canceling queries + # when reading WAL from archive; + # -1 allows indefinite delay +#max_standby_streaming_delay = 30s # max delay before canceling queries + # when reading streaming WAL; + # -1 allows indefinite delay +#wal_receiver_create_temp_slot = off # create temp slot if primary_slot_name + # is not set +#wal_receiver_status_interval = 10s # send replies at least this often + # 0 disables +#hot_standby_feedback = off # send info from standby to prevent + # query conflicts +#wal_receiver_timeout = 60s # time that receiver waits for + # communication from primary + # in milliseconds; 0 disables +#wal_retrieve_retry_interval = 5s # time to wait before retrying to + # retrieve WAL after a failed attempt +#recovery_min_apply_delay = 0 # minimum delay for applying changes during recovery + +# - Subscribers - + +# These settings are ignored on a publisher. + +#max_logical_replication_workers = 4 # taken from max_worker_processes + # (change requires restart) +#max_sync_workers_per_subscription = 2 # taken from max_logical_replication_workers + + +#------------------------------------------------------------------------------ +# QUERY TUNING +#------------------------------------------------------------------------------ + +# - Planner Method Configuration - + +#enable_async_append = on +#enable_bitmapscan = on +#enable_gathermerge = on +#enable_hashagg = on +#enable_hashjoin = on +#enable_incremental_sort = on +#enable_indexscan = on +#enable_indexonlyscan = on +#enable_material = on +#enable_resultcache = on +#enable_mergejoin = on +#enable_nestloop = on +#enable_parallel_append = on +#enable_parallel_hash = on +#enable_partition_pruning = on +#enable_partitionwise_join = off +#enable_partitionwise_aggregate = off +#enable_seqscan = on +#enable_sort = on +#enable_tidscan = on + +# - Planner Cost Constants - + +#seq_page_cost = 1.0 # measured on an arbitrary scale +#random_page_cost = 4.0 # same scale as above +#cpu_tuple_cost = 0.01 # same scale as above +#cpu_index_tuple_cost = 0.005 # same scale as above +#cpu_operator_cost = 0.0025 # same scale as above +#parallel_setup_cost = 1000.0 # same scale as above +#parallel_tuple_cost = 0.1 # same scale as above +#min_parallel_table_scan_size = 8MB +#min_parallel_index_scan_size = 512kB +effective_cache_size = 128MB + +#jit_above_cost = 100000 # perform JIT compilation if available + # and query more expensive than this; + # -1 disables +#jit_inline_above_cost = 500000 # inline small functions if query is + # more expensive than this; -1 disables +#jit_optimize_above_cost = 500000 # use expensive JIT optimizations if + # query is more expensive than this; + # -1 disables + +# - Genetic Query Optimizer - + +#geqo = on +#geqo_threshold = 12 +#geqo_effort = 5 # range 1-10 +#geqo_pool_size = 0 # selects default based on effort +#geqo_generations = 0 # selects default based on effort +#geqo_selection_bias = 2.0 # range 1.5-2.0 +#geqo_seed = 0.0 # range 0.0-1.0 + +# - Other Planner Options - + +#default_statistics_target = 100 # range 1-10000 +#constraint_exclusion = partition # on, off, or partition +#cursor_tuple_fraction = 0.1 # range 0.0-1.0 +#from_collapse_limit = 8 +#jit = on # allow JIT compilation +#join_collapse_limit = 8 # 1 disables collapsing of explicit + # JOIN clauses +#plan_cache_mode = auto # auto, force_generic_plan or + # force_custom_plan + + +#------------------------------------------------------------------------------ +# REPORTING AND LOGGING +#------------------------------------------------------------------------------ + +include = '/etc/postgresql/logging.conf' + +# These are relevant when logging to syslog: +#syslog_facility = 'LOCAL0' +#syslog_ident = 'postgres' +#syslog_sequence_numbers = on +#syslog_split_messages = on + +# This is only relevant when logging to eventlog (Windows): +# (change requires restart) +#event_source = 'PostgreSQL' + +# - When to Log - + +#log_min_messages = warning # values in order of decreasing detail: + # debug5 + # debug4 + # debug3 + # debug2 + # debug1 + # info + # notice + # warning + # error + # log + # fatal + # panic + +#log_min_error_statement = error # values in order of decreasing detail: + # debug5 + # debug4 + # debug3 + # debug2 + # debug1 + # info + # notice + # warning + # error + # log + # fatal + # panic (effectively off) + +#log_min_duration_statement = -1 # -1 is disabled, 0 logs all statements + # and their durations, > 0 logs only + # statements running at least this number + # of milliseconds + +#log_min_duration_sample = -1 # -1 is disabled, 0 logs a sample of statements + # and their durations, > 0 logs only a sample of + # statements running at least this number + # of milliseconds; + # sample fraction is determined by log_statement_sample_rate + +#log_statement_sample_rate = 1.0 # fraction of logged statements exceeding + # log_min_duration_sample to be logged; + # 1.0 logs all such statements, 0.0 never logs + + +#log_transaction_sample_rate = 0.0 # fraction of transactions whose statements + # are logged regardless of their duration; 1.0 logs all + # statements from all transactions, 0.0 never logs + +# - What to Log - + +#debug_print_parse = off +#debug_print_rewritten = off +#debug_print_plan = off +#debug_pretty_print = on +#log_autovacuum_min_duration = -1 # log autovacuum activity; + # -1 disables, 0 logs all actions and + # their durations, > 0 logs only + # actions running at least this number + # of milliseconds. +#log_checkpoints = off +#log_connections = off +#log_disconnections = off +#log_duration = off +#log_error_verbosity = default # terse, default, or verbose messages +#log_hostname = off +log_line_prefix = '%h %m [%p] %q%u@%d ' # special values: + # %a = application name + # %u = user name + # %d = database name + # %r = remote host and port + # %h = remote host + # %b = backend type + # %p = process ID + # %P = process ID of parallel group leader + # %t = timestamp without milliseconds + # %m = timestamp with milliseconds + # %n = timestamp with milliseconds (as a Unix epoch) + # %Q = query ID (0 if none or not computed) + # %i = command tag + # %e = SQL state + # %c = session ID + # %l = session line number + # %s = session start timestamp + # %v = virtual transaction ID + # %x = transaction ID (0 if none) + # %q = stop here in non-session + # processes + # %% = '%' + # e.g. '<%u%%%d> ' +#log_lock_waits = off # log lock waits >= deadlock_timeout +#log_recovery_conflict_waits = off # log standby recovery conflict waits + # >= deadlock_timeout +#log_parameter_max_length = -1 # when logging statements, limit logged + # bind-parameter values to N bytes; + # -1 means print in full, 0 disables +#log_parameter_max_length_on_error = 0 # when logging an error, limit logged + # bind-parameter values to N bytes; + # -1 means print in full, 0 disables +log_statement = 'ddl' # none, ddl, mod, all +#log_replication_commands = off +#log_temp_files = -1 # log temporary files equal or larger + # than the specified size in kilobytes; + # -1 disables, 0 logs all temp files +log_timezone = 'UTC' + +#------------------------------------------------------------------------------ +# PROCESS TITLE +#------------------------------------------------------------------------------ + +cluster_name = 'main' # added to process titles if nonempty + # (change requires restart) +#update_process_title = on + + +#------------------------------------------------------------------------------ +# STATISTICS +#------------------------------------------------------------------------------ + +# - Query and Index Statistics Collector - + +#track_activities = on +#track_activity_query_size = 1024 # (change requires restart) +#track_counts = on +#track_io_timing = off +#track_wal_io_timing = off +#track_functions = none # none, pl, all +#stats_temp_directory = 'pg_stat_tmp' + + +# - Monitoring - + +#compute_query_id = auto +#log_statement_stats = off +#log_parser_stats = off +#log_planner_stats = off +#log_executor_stats = off + + +#------------------------------------------------------------------------------ +# AUTOVACUUM +#------------------------------------------------------------------------------ + +#autovacuum = on # Enable autovacuum subprocess? 'on' + # requires track_counts to also be on. +#autovacuum_max_workers = 3 # max number of autovacuum subprocesses + # (change requires restart) +#autovacuum_naptime = 1min # time between autovacuum runs +#autovacuum_vacuum_threshold = 50 # min number of row updates before + # vacuum +#autovacuum_vacuum_insert_threshold = 1000 # min number of row inserts + # before vacuum; -1 disables insert + # vacuums +#autovacuum_analyze_threshold = 50 # min number of row updates before + # analyze +#autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum +#autovacuum_vacuum_insert_scale_factor = 0.2 # fraction of inserts over table + # size before insert vacuum +#autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze +#autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum + # (change requires restart) +#autovacuum_multixact_freeze_max_age = 400000000 # maximum multixact age + # before forced vacuum + # (change requires restart) +#autovacuum_vacuum_cost_delay = 2ms # default vacuum cost delay for + # autovacuum, in milliseconds; + # -1 means use vacuum_cost_delay +#autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for + # autovacuum, -1 means use + # vacuum_cost_limit + + +#------------------------------------------------------------------------------ +# CLIENT CONNECTION DEFAULTS +#------------------------------------------------------------------------------ + +# - Statement Behavior - + +#client_min_messages = notice # values in order of decreasing detail: + # debug5 + # debug4 + # debug3 + # debug2 + # debug1 + # log + # notice + # warning + # error +#search_path = '"$user", public' # schema names +row_security = on +#default_table_access_method = 'heap' +#default_tablespace = '' # a tablespace name, '' uses the default +#default_toast_compression = 'pglz' # 'pglz' or 'lz4' +#temp_tablespaces = '' # a list of tablespace names, '' uses + # only default tablespace +#check_function_bodies = on +#default_transaction_isolation = 'read committed' +#default_transaction_read_only = off +#default_transaction_deferrable = off +#session_replication_role = 'origin' +#statement_timeout = 0 # in milliseconds, 0 is disabled +#lock_timeout = 0 # in milliseconds, 0 is disabled +#idle_in_transaction_session_timeout = 0 # in milliseconds, 0 is disabled +#idle_session_timeout = 0 # in milliseconds, 0 is disabled +#vacuum_freeze_table_age = 150000000 +#vacuum_freeze_min_age = 50000000 +#vacuum_failsafe_age = 1600000000 +#vacuum_multixact_freeze_table_age = 150000000 +#vacuum_multixact_freeze_min_age = 5000000 +#vacuum_multixact_failsafe_age = 1600000000 +#bytea_output = 'hex' # hex, escape +#xmlbinary = 'base64' +#xmloption = 'content' +#gin_pending_list_limit = 4MB + +# - Locale and Formatting - + +#datestyle = 'iso, mdy' +#intervalstyle = 'postgres' +timezone = 'UTC' +#timezone_abbreviations = 'Default' # Select the set of available time zone + # abbreviations. Currently, there are + # Default + # Australia (historical usage) + # India + # You can create your own file in + # share/timezonesets/. +extra_float_digits = 0 # min -15, max 3; any value >0 actually + # selects precise output mode +#client_encoding = sql_ascii # actually, defaults to database + # encoding + +# These settings are initialized by initdb, but they can be changed. +lc_messages = 'en_US.UTF-8' # locale for system error message + # strings +lc_monetary = 'en_US.UTF-8' # locale for monetary formatting +lc_numeric = 'en_US.UTF-8' # locale for number formatting +lc_time = 'en_US.UTF-8' # locale for time formatting + +# default configuration for text search +default_text_search_config = 'pg_catalog.english' + +# - Shared Library Preloading - + +#local_preload_libraries = '' +#session_preload_libraries = '' + +shared_preload_libraries = 'pg_stat_statements, pgaudit, plpgsql, plpgsql_check, pg_cron, pg_net, pgsodium, timescaledb, auto_explain, pg_tle, plan_filter, tealbase_vault' # (change requires restart) +jit_provider = 'llvmjit' # JIT library to use + +# - Other Defaults - + +#dynamic_library_path = '$libdir' +#gin_fuzzy_search_limit = 0 + +#------------------------------------------------------------------------------ +# LOCK MANAGEMENT +#------------------------------------------------------------------------------ + +#deadlock_timeout = 1s +#max_locks_per_transaction = 64 # min 10 + # (change requires restart) +#max_pred_locks_per_transaction = 64 # min 10 + # (change requires restart) +#max_pred_locks_per_relation = -2 # negative values mean + # (max_pred_locks_per_transaction + # / -max_pred_locks_per_relation) - 1 +#max_pred_locks_per_page = 2 # min 0 + + +#------------------------------------------------------------------------------ +# VERSION AND PLATFORM COMPATIBILITY +#------------------------------------------------------------------------------ + +# - Previous PostgreSQL Versions - + +#array_nulls = on +#backslash_quote = safe_encoding # on, off, or safe_encoding +#escape_string_warning = on +#lo_compat_privileges = off +#quote_all_identifiers = off +#standard_conforming_strings = on +#synchronize_seqscans = on + +# - Other Platforms and Clients - + +#transform_null_equals = off + + +#------------------------------------------------------------------------------ +# ERROR HANDLING +#------------------------------------------------------------------------------ + +#exit_on_error = off # terminate session on any error? +#restart_after_crash = on # reinitialize after backend crash? +#data_sync_retry = off # retry or panic on failure to fsync + # data? + # (change requires restart) +#recovery_init_sync_method = fsync # fsync, syncfs (Linux 5.8+) + + +#------------------------------------------------------------------------------ +# CONFIG FILE INCLUDES +#------------------------------------------------------------------------------ + +# These options allow settings to be loaded from files other than the +# default postgresql.conf. Note that these are directives, not variable +# assignments, so they can usefully be given more than once. + +#include_dir = '...' # include files ending in '.conf' from + # a directory, e.g., 'conf.d' +#include_if_exists = '...' # include file only if it exists +#include = '...' # include file + +# Automatically generated optimizations +#include = '/etc/postgresql-custom/generated-optimizations.conf' +# User-supplied custom parameters, override any automatically generated ones +#include = '/etc/postgresql-custom/custom-overrides.conf' + +# WAL-G specific configurations +#include = '/etc/postgresql-custom/wal-g.conf' + +# read replica specific configurations +include = '/etc/postgresql-custom/read-replica.conf' + +# supautils specific configurations +#include = '/etc/postgresql-custom/supautils.conf' + +#------------------------------------------------------------------------------ +# CUSTOMIZED OPTIONS +#------------------------------------------------------------------------------ + +# Add settings for extensions here +auto_explain.log_min_duration = 10s +cron.database_name = 'postgres' diff --git a/ansible/files/postgresql_config/postgresql.service.j2 b/ansible/files/postgresql_config/postgresql.service.j2 new file mode 100644 index 0000000..c09d38b --- /dev/null +++ b/ansible/files/postgresql_config/postgresql.service.j2 @@ -0,0 +1,28 @@ +[Unit] +Description=PostgreSQL database server +Documentation=man:postgres(1) +{% if tealbase_internal is defined %} +Requires=database-optimizations.service +After=database-optimizations.service +{% endif %} + +[Service] +Type=notify +User=postgres +ExecStart=/usr/lib/postgresql/bin/postgres -D /etc/postgresql +ExecStartPre=+/usr/local/bin/postgres_prestart.sh +ExecReload=/bin/kill -HUP $MAINPID +KillMode=mixed +KillSignal=SIGINT +TimeoutStopSec=90 +TimeoutStartSec=86400 +Restart=always +RestartSec=5 +OOMScoreAdjust=-1000 +EnvironmentFile=-/etc/environment.d/postgresql.env +LimitNOFILE=16384 +{% if tealbase_internal is defined %} +ReadOnlyPaths=/etc +{% endif %} +[Install] +WantedBy=multi-user.target diff --git a/ansible/files/postgresql_config/supautils.conf.j2 b/ansible/files/postgresql_config/supautils.conf.j2 new file mode 100644 index 0000000..9e54450 --- /dev/null +++ b/ansible/files/postgresql_config/supautils.conf.j2 @@ -0,0 +1,15 @@ +supautils.extensions_parameter_overrides = '{"pg_cron":{"schema":"pg_catalog"}}' +supautils.policy_grants = '{"postgres":["auth.audit_log_entries","auth.identities","auth.mfa_factors","auth.refresh_tokens","auth.sessions","auth.users","realtime.messages","storage.buckets","storage.migrations","storage.objects","storage.s3_multipart_uploads","storage.s3_multipart_uploads_parts"]}' +supautils.drop_trigger_grants = '{"postgres":["auth.audit_log_entries","auth.identities","auth.mfa_factors","auth.refresh_tokens","auth.sessions","auth.users","realtime.messages","storage.buckets","storage.migrations","storage.objects","storage.s3_multipart_uploads","storage.s3_multipart_uploads_parts"]}' +# full list: address_standardizer, address_standardizer_data_us, adminpack, amcheck, autoinc, bloom, btree_gin, btree_gist, citext, cube, dblink, dict_int, dict_xsyn, earthdistance, file_fdw, fuzzystrmatch, hstore, http, hypopg, index_advisor, insert_username, intagg, intarray, isn, lo, ltree, moddatetime, old_snapshot, orioledb, pageinspect, pg_buffercache, pg_cron, pg_freespacemap, pg_graphql, pg_hashids, pg_jsonschema, pg_net, pg_prewarm, pg_repack, pg_stat_monitor, pg_stat_statements, pg_surgery, pg_tle, pg_trgm, pg_visibility, pg_walinspect, pgaudit, pgcrypto, pgjwt, pgmq, pgroonga, pgroonga_database, pgrouting, pgrowlocks, pgsodium, pgstattuple, pgtap, plcoffee, pljava, plls, plpgsql, plpgsql_check, plv8, postgis, postgis_raster, postgis_sfcgal, postgis_tiger_geocoder, postgis_topology, postgres_fdw, refint, rum, seg, sslinfo, tealbase_vault, supautils, tablefunc, tcn, timescaledb, tsm_system_rows, tsm_system_time, unaccent, uuid-ossp, vector, wrappers, xml2 +# omitted because may be unsafe: adminpack, amcheck, file_fdw, lo, old_snapshot, pageinspect, pg_freespacemap, pg_surgery, pg_visibility +# omitted because deprecated: intagg, xml2 +# omitted because doesn't require superuser: pgmq +# omitted because protected: plpgsql +supautils.privileged_extensions = 'address_standardizer, address_standardizer_data_us, autoinc, bloom, btree_gin, btree_gist, citext, cube, dblink, dict_int, dict_xsyn, earthdistance, fuzzystrmatch, hstore, http, hypopg, index_advisor, insert_username, intarray, isn, ltree, moddatetime, orioledb, pg_buffercache, pg_cron, pg_graphql, pg_hashids, pg_jsonschema, pg_net, pg_prewarm, pg_repack, pg_stat_monitor, pg_stat_statements, pg_tle, pg_trgm, pg_walinspect, pgaudit, pgcrypto, pgjwt, pgroonga, pgroonga_database, pgrouting, pgrowlocks, pgsodium, pgstattuple, pgtap, plcoffee, pljava, plls, plpgsql_check, plv8, postgis, postgis_raster, postgis_sfcgal, postgis_tiger_geocoder, postgis_topology, postgres_fdw, refint, rum, seg, sslinfo, tealbase_vault, supautils, tablefunc, tcn, timescaledb, tsm_system_rows, tsm_system_time, unaccent, uuid-ossp, vector, wrappers' +supautils.extension_custom_scripts_path = '/etc/postgresql-custom/extension-custom-scripts' +supautils.privileged_extensions_superuser = 'tealbase_admin' +supautils.privileged_role = 'postgres' +supautils.privileged_role_allowed_configs = 'auto_explain.*, log_lock_waits, log_min_duration_statement, log_min_messages, log_replication_commands, log_statement, log_temp_files, pg_net.batch_size, pg_net.ttl, pg_stat_statements.*, pgaudit.log, pgaudit.log_catalog, pgaudit.log_client, pgaudit.log_level, pgaudit.log_relation, pgaudit.log_rows, pgaudit.log_statement, pgaudit.log_statement_once, pgaudit.role, pgrst.*, plan_filter.*, safeupdate.enabled, session_replication_role, track_io_timing, wal_compression' +supautils.reserved_memberships = 'pg_read_server_files, pg_write_server_files, pg_execute_server_program, tealbase_admin, tealbase_auth_admin, tealbase_storage_admin, tealbase_read_only_user, tealbase_realtime_admin, tealbase_replication_admin, dashboard_user, pgbouncer, authenticator' +supautils.reserved_roles = 'tealbase_admin, tealbase_auth_admin, tealbase_storage_admin, tealbase_read_only_user, tealbase_realtime_admin, tealbase_replication_admin, dashboard_user, pgbouncer, service_role*, authenticator*, authenticated*, anon*' diff --git a/ansible/files/postgresql_config/tmpfiles.postgresql.conf b/ansible/files/postgresql_config/tmpfiles.postgresql.conf new file mode 100644 index 0000000..b5ea549 --- /dev/null +++ b/ansible/files/postgresql_config/tmpfiles.postgresql.conf @@ -0,0 +1,5 @@ +# unchanged from upstream package +d /run/postgresql 2775 postgres postgres - - +# Log directory - ensure that our logging setup gets preserved +# and that vector can keep writing to a file here as well +d /var/log/postgresql 1775 postgres postgres - - diff --git a/ansible/files/postgresql_extension_custom_scripts/before-create.sql b/ansible/files/postgresql_extension_custom_scripts/before-create.sql new file mode 100644 index 0000000..f2f2386 --- /dev/null +++ b/ansible/files/postgresql_extension_custom_scripts/before-create.sql @@ -0,0 +1,84 @@ +-- If the following are true: +-- * the extension to be created is a TLE +-- * the extension is created with `cascade` +-- +-- then we pre-`create` all nested extension dependencies which are part of +-- `supautils.privileged_extensions`. This is because supautils can't intercept +-- the extension creation for dependencies - it can only intercept the `create +-- extension` statement. +do $$ +declare + _extname text := @extname@; + _extschema text := @extschema@; + _extversion text := @extversion@; + _extcascade bool := @extcascade@; + _r record; +begin + if not _extcascade then + return; + end if; + + if not exists (select from pg_extension where extname = 'pg_tle') then + return; + end if; + + if not exists (select from pgtle.available_extensions() where name = _extname) then + return; + end if; + + if _extversion is null then + select default_version + from pgtle.available_extensions() + where name = _extname + into _extversion; + end if; + + if _extschema is null then + select schema + from pgtle.available_extension_versions() + where name = _extname and version = _extversion + into _extschema; + end if; + + for _r in ( + with recursive available_extensions(name, default_version) as ( + select name, default_version + from pg_available_extensions + union + select name, default_version + from pgtle.available_extensions() + ) + , available_extension_versions(name, version, requires) as ( + select name, version, requires + from pg_available_extension_versions + union + select name, version, requires + from pgtle.available_extension_versions() + ) + , all_dependencies(name, dependency) as ( + select e.name, unnest(ev.requires) as dependency + from available_extensions e + join available_extension_versions ev on ev.name = e.name and ev.version = e.default_version + ) + , dependencies(name) AS ( + select unnest(requires) + from available_extension_versions + where name = _extname and version = _extversion + union + select all_dependencies.dependency + from all_dependencies + join dependencies d on d.name = all_dependencies.name + ) + select name + from dependencies + intersect + select name + from regexp_split_to_table(current_setting('supautils.privileged_extensions', true), '\s*,\s*') as t(name) + ) loop + if _extschema is null then + execute(format('create extension if not exists %I cascade', _r.name)); + else + execute(format('create extension if not exists %I schema %I cascade', _r.name, _extschema)); + end if; + end loop; +end $$; diff --git a/ansible/files/postgresql_extension_custom_scripts/dblink/after-create.sql b/ansible/files/postgresql_extension_custom_scripts/dblink/after-create.sql new file mode 100644 index 0000000..c2e5269 --- /dev/null +++ b/ansible/files/postgresql_extension_custom_scripts/dblink/after-create.sql @@ -0,0 +1,14 @@ +do $$ +declare + r record; +begin + for r in (select oid, (aclexplode(proacl)).grantee from pg_proc where proname = 'dblink_connect_u') loop + continue when r.grantee = 'tealbase_admin'::regrole; + execute( + format( + 'revoke all on function %s(%s) from %s;', r.oid::regproc, pg_get_function_identity_arguments(r.oid), r.grantee::regrole + ) + ); + end loop; +end +$$; diff --git a/ansible/files/postgresql_extension_custom_scripts/pg_cron/after-create.sql b/ansible/files/postgresql_extension_custom_scripts/pg_cron/after-create.sql new file mode 100644 index 0000000..e89ca56 --- /dev/null +++ b/ansible/files/postgresql_extension_custom_scripts/pg_cron/after-create.sql @@ -0,0 +1,13 @@ +grant usage on schema cron to postgres with grant option; +grant all on all functions in schema cron to postgres with grant option; + +alter default privileges for user tealbase_admin in schema cron grant all + on sequences to postgres with grant option; +alter default privileges for user tealbase_admin in schema cron grant all + on tables to postgres with grant option; +alter default privileges for user tealbase_admin in schema cron grant all + on functions to postgres with grant option; + +grant all privileges on all tables in schema cron to postgres with grant option; +revoke all on table cron.job from postgres; +grant select on table cron.job to postgres with grant option; diff --git a/ansible/files/postgresql_extension_custom_scripts/pg_repack/after-create.sql b/ansible/files/postgresql_extension_custom_scripts/pg_repack/after-create.sql new file mode 100644 index 0000000..b0ec306 --- /dev/null +++ b/ansible/files/postgresql_extension_custom_scripts/pg_repack/after-create.sql @@ -0,0 +1,4 @@ +grant all on all tables in schema repack to postgres; +grant all on schema repack to postgres; +alter default privileges in schema repack grant all on tables to postgres; +alter default privileges in schema repack grant all on sequences to postgres; diff --git a/ansible/files/postgresql_extension_custom_scripts/pg_tle/after-create.sql b/ansible/files/postgresql_extension_custom_scripts/pg_tle/after-create.sql new file mode 100644 index 0000000..eb8aeff --- /dev/null +++ b/ansible/files/postgresql_extension_custom_scripts/pg_tle/after-create.sql @@ -0,0 +1 @@ +grant pgtle_admin to postgres; diff --git a/ansible/files/postgresql_extension_custom_scripts/pgmq/after-create.sql b/ansible/files/postgresql_extension_custom_scripts/pgmq/after-create.sql new file mode 100644 index 0000000..050e07d --- /dev/null +++ b/ansible/files/postgresql_extension_custom_scripts/pgmq/after-create.sql @@ -0,0 +1,173 @@ +do $$ +declare + extoid oid := (select oid from pg_extension where extname = 'pgmq'); + r record; + cls pg_class%rowtype; +begin + + set local search_path = ''; + +/* + Override the pgmq.drop_queue to check if relevant tables are owned + by the pgmq extension before attempting to run + `alter extension pgmq drop table ...` + this is necessary becasue, to enable nightly logical backups to include user queues + we automatically detach them from pgmq. + + this update is backwards compatible with version 1.4.4 but should be removed once we're on + physical backups everywhere +*/ +-- Detach and delete the official function +alter extension pgmq drop function pgmq.drop_queue; +drop function pgmq.drop_queue; + +-- Create and reattach the patched function +CREATE FUNCTION pgmq.drop_queue(queue_name TEXT) +RETURNS BOOLEAN AS $func$ +DECLARE + qtable TEXT := pgmq.format_table_name(queue_name, 'q'); + qtable_seq TEXT := qtable || '_msg_id_seq'; + fq_qtable TEXT := 'pgmq.' || qtable; + atable TEXT := pgmq.format_table_name(queue_name, 'a'); + fq_atable TEXT := 'pgmq.' || atable; + partitioned BOOLEAN; +BEGIN + EXECUTE FORMAT( + $QUERY$ + SELECT is_partitioned FROM pgmq.meta WHERE queue_name = %L + $QUERY$, + queue_name + ) INTO partitioned; + + -- NEW CONDITIONAL CHECK + if exists ( + select 1 + from pg_class c + join pg_depend d on c.oid = d.objid + join pg_extension e on d.refobjid = e.oid + where c.relname = qtable and e.extname = 'pgmq' + ) then + + EXECUTE FORMAT( + $QUERY$ + ALTER EXTENSION pgmq DROP TABLE pgmq.%I + $QUERY$, + qtable + ); + + end if; + + -- NEW CONDITIONAL CHECK + if exists ( + select 1 + from pg_class c + join pg_depend d on c.oid = d.objid + join pg_extension e on d.refobjid = e.oid + where c.relname = qtable_seq and e.extname = 'pgmq' + ) then + EXECUTE FORMAT( + $QUERY$ + ALTER EXTENSION pgmq DROP SEQUENCE pgmq.%I + $QUERY$, + qtable_seq + ); + + end if; + + -- NEW CONDITIONAL CHECK + if exists ( + select 1 + from pg_class c + join pg_depend d on c.oid = d.objid + join pg_extension e on d.refobjid = e.oid + where c.relname = atable and e.extname = 'pgmq' + ) then + + EXECUTE FORMAT( + $QUERY$ + ALTER EXTENSION pgmq DROP TABLE pgmq.%I + $QUERY$, + atable + ); + + end if; + + -- NO CHANGES PAST THIS POINT + + EXECUTE FORMAT( + $QUERY$ + DROP TABLE IF EXISTS pgmq.%I + $QUERY$, + qtable + ); + + EXECUTE FORMAT( + $QUERY$ + DROP TABLE IF EXISTS pgmq.%I + $QUERY$, + atable + ); + + IF EXISTS ( + SELECT 1 + FROM information_schema.tables + WHERE table_name = 'meta' and table_schema = 'pgmq' + ) THEN + EXECUTE FORMAT( + $QUERY$ + DELETE FROM pgmq.meta WHERE queue_name = %L + $QUERY$, + queue_name + ); + END IF; + + IF partitioned THEN + EXECUTE FORMAT( + $QUERY$ + DELETE FROM %I.part_config where parent_table in (%L, %L) + $QUERY$, + pgmq._get_pg_partman_schema(), fq_qtable, fq_atable + ); + END IF; + + RETURN TRUE; +END; +$func$ LANGUAGE plpgsql; + +alter extension pgmq add function pgmq.drop_queue; + + + update pg_extension set extowner = 'postgres'::regrole where extname = 'pgmq'; + + for r in (select * from pg_depend where refobjid = extoid) loop + + + if r.classid = 'pg_type'::regclass then + + -- store the type's relkind + select * into cls from pg_class c where c.reltype = r.objid; + + if r.objid::regtype::text like '%[]' then + -- do nothing (skipping array type) + + elsif cls.relkind in ('r', 'p', 'f', 'm') then + -- table-like objects (regular table, partitioned, foreign, materialized view) + execute format('alter table pgmq.%I owner to postgres;', cls.relname); + + else + execute(format('alter type %s owner to postgres;', r.objid::regtype)); + + end if; + + elsif r.classid = 'pg_proc'::regclass then + execute(format('alter function %s(%s) owner to postgres;', r.objid::regproc, pg_get_function_identity_arguments(r.objid))); + + elsif r.classid = 'pg_class'::regclass then + execute(format('alter table %s owner to postgres;', r.objid::regclass)); + + else + raise exception 'error on pgmq after-create script: unexpected object type %', r.classid; + + end if; + end loop; +end $$; diff --git a/ansible/files/postgresql_extension_custom_scripts/pgsodium/after-create.sql b/ansible/files/postgresql_extension_custom_scripts/pgsodium/after-create.sql new file mode 100644 index 0000000..38242ab --- /dev/null +++ b/ansible/files/postgresql_extension_custom_scripts/pgsodium/after-create.sql @@ -0,0 +1,26 @@ +grant execute on function pgsodium.crypto_aead_det_decrypt(bytea, bytea, uuid, bytea) to service_role; +grant execute on function pgsodium.crypto_aead_det_encrypt(bytea, bytea, uuid, bytea) to service_role; +grant execute on function pgsodium.crypto_aead_det_keygen to service_role; + +CREATE OR REPLACE FUNCTION pgsodium.mask_role(masked_role regrole, source_name text, view_name text) +RETURNS void +LANGUAGE plpgsql +SECURITY DEFINER +SET search_path TO '' +AS $function$ +BEGIN + EXECUTE format( + 'GRANT SELECT ON pgsodium.key TO %s', + masked_role); + + EXECUTE format( + 'GRANT pgsodium_keyiduser, pgsodium_keyholder TO %s', + masked_role); + + EXECUTE format( + 'GRANT ALL ON %I TO %s', + view_name, + masked_role); + RETURN; +END +$function$; diff --git a/ansible/files/postgresql_extension_custom_scripts/pgsodium/before-create.sql b/ansible/files/postgresql_extension_custom_scripts/pgsodium/before-create.sql new file mode 100644 index 0000000..fb82a46 --- /dev/null +++ b/ansible/files/postgresql_extension_custom_scripts/pgsodium/before-create.sql @@ -0,0 +1,9 @@ +do $$ +declare + _extversion text := @extversion@; + _r record; +begin + if _extversion is not null and _extversion != '3.1.8' then + raise exception 'only pgsodium 3.1.8 is supported'; + end if; +end $$; diff --git a/ansible/files/postgresql_extension_custom_scripts/postgis_tiger_geocoder/after-create.sql b/ansible/files/postgresql_extension_custom_scripts/postgis_tiger_geocoder/after-create.sql new file mode 100644 index 0000000..0bf02d4 --- /dev/null +++ b/ansible/files/postgresql_extension_custom_scripts/postgis_tiger_geocoder/after-create.sql @@ -0,0 +1,10 @@ +-- These schemas are created by extension to house all tiger related functions, owned by tealbase_admin +grant usage on schema tiger, tiger_data to postgres with grant option; +-- Give postgres permission to all existing entities, also allows postgres to grant other roles +grant all on all tables in schema tiger, tiger_data to postgres with grant option; +grant all on all routines in schema tiger, tiger_data to postgres with grant option; +grant all on all sequences in schema tiger, tiger_data to postgres with grant option; +-- Update default privileges so that new entities are also accessible by postgres +alter default privileges in schema tiger, tiger_data grant all on tables to postgres with grant option; +alter default privileges in schema tiger, tiger_data grant all on routines to postgres with grant option; +alter default privileges in schema tiger, tiger_data grant all on sequences to postgres with grant option; diff --git a/ansible/files/postgresql_extension_custom_scripts/postgres_fdw/after-create.sql b/ansible/files/postgresql_extension_custom_scripts/postgres_fdw/after-create.sql new file mode 100644 index 0000000..1e83ee9 --- /dev/null +++ b/ansible/files/postgresql_extension_custom_scripts/postgres_fdw/after-create.sql @@ -0,0 +1,21 @@ +do $$ +declare + is_super boolean; +begin + is_super = ( + select usesuper + from pg_user + where usename = 'postgres' + ); + + -- Need to be superuser to own FDWs, so we temporarily make postgres superuser. + if not is_super then + alter role postgres superuser; + end if; + + alter foreign data wrapper postgres_fdw owner to postgres; + + if not is_super then + alter role postgres nosuperuser; + end if; +end $$; diff --git a/ansible/files/postgresql_extension_custom_scripts/supabase_vault/after-create.sql b/ansible/files/postgresql_extension_custom_scripts/supabase_vault/after-create.sql new file mode 100644 index 0000000..f5c7284 --- /dev/null +++ b/ansible/files/postgresql_extension_custom_scripts/supabase_vault/after-create.sql @@ -0,0 +1,8 @@ +grant usage on schema vault to postgres with grant option; +grant select, delete, truncate, references on vault.secrets, vault.decrypted_secrets to postgres with grant option; +grant execute on function vault.create_secret, vault.update_secret, vault._crypto_aead_det_decrypt to postgres with grant option; + +-- service_role used to be able to manage secrets in Vault <=0.2.8 because it had privileges to pgsodium functions +grant usage on schema vault to service_role; +grant select, delete on vault.secrets, vault.decrypted_secrets to service_role; +grant execute on function vault.create_secret, vault.update_secret, vault._crypto_aead_det_decrypt to service_role; diff --git a/ansible/files/postgrest-optimizations.service.j2 b/ansible/files/postgrest-optimizations.service.j2 new file mode 100644 index 0000000..c671e0d --- /dev/null +++ b/ansible/files/postgrest-optimizations.service.j2 @@ -0,0 +1,11 @@ +[Unit] +Description=Postgrest optimizations + +[Service] +Type=oneshot +# we don't want failures from this command to cause PG startup to fail +ExecStart=/bin/bash -c "/opt/tealbase-admin-api optimize postgrest --destination-config-file-path /etc/postgrest/generated.conf ; exit 0" +User=postgrest + +[Install] +WantedBy=multi-user.target diff --git a/ansible/files/postgrest.service.j2 b/ansible/files/postgrest.service.j2 new file mode 100644 index 0000000..290f077 --- /dev/null +++ b/ansible/files/postgrest.service.j2 @@ -0,0 +1,18 @@ +[Unit] +Description=PostgREST +Requires=postgrest-optimizations.service +After=postgrest-optimizations.service + +[Service] +Type=simple +# We allow the base config (sent from the worker) to override the generated config +ExecStartPre=/etc/postgrest/merge.sh /etc/postgrest/generated.conf /etc/postgrest/base.conf +ExecStart=/opt/postgrest /etc/postgrest/merged.conf +User=postgrest +Slice=services.slice +Restart=always +RestartSec=3 +LimitNOFILE=100000 + +[Install] +WantedBy=multi-user.target diff --git a/ansible/files/start-envoy.sh b/ansible/files/start-envoy.sh new file mode 100644 index 0000000..edd6fe0 --- /dev/null +++ b/ansible/files/start-envoy.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash +set -eou pipefail + +if [[ $(cat /sys/module/ipv6/parameters/disable) = 1 ]]; then + sed -i -e "s/address: '::'/address: '0.0.0.0'/" -e 's/ipv4_compat: true/ipv4_compat: false/' /etc/envoy/lds.yaml +else + sed -i -e "s/address: '0.0.0.0'/address: '::'/" -e 's/ipv4_compat: false/ipv4_compat: true/' /etc/envoy/lds.yaml +fi + +# Workaround using `tee` to get `/dev/stdout` access logging to work, see: +# https://github.com/envoyproxy/envoy/issues/8297#issuecomment-620659781 +exec /opt/envoy --config-path /etc/envoy/envoy.yaml --restart-epoch "${RESTART_EPOCH}" 2>&1 | tee diff --git a/ansible/files/stat_extension.sql b/ansible/files/stat_extension.sql new file mode 100644 index 0000000..9378340 --- /dev/null +++ b/ansible/files/stat_extension.sql @@ -0,0 +1,2 @@ +CREATE SCHEMA IF NOT exists extensions; +CREATE EXTENSION IF NOT EXISTS pg_stat_statements with schema extensions; diff --git a/ansible/files/supabase_admin_agent_config/tealbase-admin-agent.sudoers.conf b/ansible/files/supabase_admin_agent_config/tealbase-admin-agent.sudoers.conf new file mode 100644 index 0000000..ce18a39 --- /dev/null +++ b/ansible/files/supabase_admin_agent_config/tealbase-admin-agent.sudoers.conf @@ -0,0 +1,2 @@ +%tealbase-admin-agent ALL= NOPASSWD: /usr/bin/salt-call +%tealbase-admin-agent ALL= NOPASSWD: /usr/bin/gpg --homedir /etc/salt/gpgkeys --import, /usr/bin/gpg --homedir /etc/salt/gpgkeys --list-secret-keys * diff --git a/ansible/files/supabase_admin_agent_config/tealbase-admin-agent_salt.service b/ansible/files/supabase_admin_agent_config/tealbase-admin-agent_salt.service new file mode 100644 index 0000000..335d231 --- /dev/null +++ b/ansible/files/supabase_admin_agent_config/tealbase-admin-agent_salt.service @@ -0,0 +1,19 @@ +[Unit] +Description=Configuration management via tealbase-admin-agent salt +After=network.target + +[Service] +Type=oneshot +ExecStart=/opt/tealbase-admin-agent/tealbase-admin-agent --config /opt/tealbase-admin-agent/config.yaml salt --apply --store-result +User=tealbase-admin-agent +Group=tealbase-admin-agent +StandardOutput=journal +StandardError=journal +StateDirectory=tealbase-admin-agent +CacheDirectory=tealbase-admin-agent + +# Security hardening +PrivateTmp=true + +[Install] +WantedBy=multi-user.target diff --git a/ansible/files/supabase_admin_agent_config/tealbase-admin-agent_salt.timer.j2 b/ansible/files/supabase_admin_agent_config/tealbase-admin-agent_salt.timer.j2 new file mode 100644 index 0000000..cc1cd71 --- /dev/null +++ b/ansible/files/supabase_admin_agent_config/tealbase-admin-agent_salt.timer.j2 @@ -0,0 +1,13 @@ +[Unit] +Description=Run tealbase tealbase-admin-agent salt on a schedule +Requires=tealbase-admin-agent_salt.service + +[Timer] +OnCalendar=*:0/10 +# Random delay up to {{ tealbase_admin_agent_splay }} seconds splay +RandomizedDelaySec={{ tealbase_admin_agent_splay }} +AccuracySec=1s +Persistent=true + +[Install] +WantedBy=timers.target diff --git a/ansible/files/sysstat.sysstat b/ansible/files/sysstat.sysstat new file mode 100644 index 0000000..52b7d07 --- /dev/null +++ b/ansible/files/sysstat.sysstat @@ -0,0 +1,36 @@ +# How long to keep log files (in days). +# Used by sa2(8) script +# If value is greater than 28, then use sadc's option -D to prevent older +# data files from being overwritten. See sadc(8) and sysstat(5) manual pages. +HISTORY=7 + +# Compress (using xz, gzip or bzip2) sa and sar files older than (in days): +COMPRESSAFTER=10 + +# Parameters for the system activity data collector (see sadc(8) manual page) +# which are used for the generation of log files. +# By default contains the `-S DISK' option responsible for generating disk +# statisitcs. Use `-S XALL' to collect all available statistics. +SADC_OPTIONS="-S DISK" + +# Directory where sa and sar files are saved. The directory must exist. +SA_DIR=/var/log/sysstat + +# Compression program to use. +ZIP="xz" + +# By default sa2 script generates yesterday's summary, since the cron job +# usually runs right after midnight. If you want sa2 to generate the summary +# of the same day (for example when cron job runs at 23:53) set this variable. +#YESTERDAY=no + +# By default sa2 script generates reports files (the so called sarDD files). +# Set this variable to false to disable reports generation. +#REPORTS=false + +# The sa1 and sa2 scripts generate system activity data and report files in +# the /var/log/sysstat directory. By default the files are created with umask 0022 +# and are therefore readable for all users. Change this variable to restrict +# the permissions on the files (e.g. use 0027 to adhere to more strict +# security standards). +UMASK=0022 diff --git a/ansible/files/systemd-networkd/systemd-networkd-check-and-fix.service b/ansible/files/systemd-networkd/systemd-networkd-check-and-fix.service new file mode 100644 index 0000000..939edc9 --- /dev/null +++ b/ansible/files/systemd-networkd/systemd-networkd-check-and-fix.service @@ -0,0 +1,12 @@ +[Unit] +Description=Check if systemd-networkd has broken NDisc routes and fix +# Remove Requisite to prevent being killed when restarting networkd +Requisite=systemd-networkd.service +After=systemd-networkd.service + +[Service] +Type=oneshot +# This needs to be root for the service restart to work +User=root +Group=root +ExecStart=/usr/local/bin/systemd-networkd-check-and-fix.sh diff --git a/ansible/files/systemd-networkd/systemd-networkd-check-and-fix.sh b/ansible/files/systemd-networkd/systemd-networkd-check-and-fix.sh new file mode 100644 index 0000000..af00b41 --- /dev/null +++ b/ansible/files/systemd-networkd/systemd-networkd-check-and-fix.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +# Check for occurrences of an NDisc log error +# NOTE: --since timer flag must match the cadence of systemd timer unit. Risk of repeat matches and restart loop +journalctl --no-pager --unit systemd-networkd --since "1 minutes ago" --grep "Could not set NDisc route" >/dev/null +NDISC_ERROR=$? + +if systemctl is-active --quiet systemd-networkd.service && [ "${NDISC_ERROR}" == 0 ]; then + echo "$(date) systemd-network running but NDisc routes are broken. Restarting systemd.networkd.service" + /usr/bin/systemctl restart systemd-networkd.service + exit # no need to check further +fi + +# check for routes +ROUTES=$(ip -6 route list) + +if ! echo "${ROUTES}" | grep default >/dev/null || ! echo "${ROUTES}" | grep "::1 dev lo">/dev/null; then + echo "IPv6 routing table messed up. Restarting systemd.networkd.service" + /usr/bin/systemctl restart systemd-networkd.service +fi diff --git a/ansible/files/systemd-networkd/systemd-networkd-check-and-fix.timer b/ansible/files/systemd-networkd/systemd-networkd-check-and-fix.timer new file mode 100644 index 0000000..93c0836 --- /dev/null +++ b/ansible/files/systemd-networkd/systemd-networkd-check-and-fix.timer @@ -0,0 +1,9 @@ +[Unit] +Description=Check if systemd-networkd has broken NDisc routes and fix + +[Timer] +# NOTE: cadence must match that of the journalctl search (--since). Risk of repeat matches and restart loop +OnCalendar=minutely + +[Install] +WantedBy=timers.target diff --git a/ansible/files/systemd-resolved.conf b/ansible/files/systemd-resolved.conf new file mode 100644 index 0000000..9280d88 --- /dev/null +++ b/ansible/files/systemd-resolved.conf @@ -0,0 +1,8 @@ +# the default is RestartSec=0. If the service fails to start because +# of a systemic issue (e.g. rare case when disk is full) it will +# quickly hit the burst limit (default of 5 failures within 10secs) +# and thereafter be placed in a failed state. By increasing the +# restart interval, we avoid that, and ensure that the service will be +# started back up once any underlying issues are resolved. +[Service] +RestartSec=3 diff --git a/ansible/files/tealbase_facts.ini b/ansible/files/tealbase_facts.ini new file mode 100644 index 0000000..44e01b4 --- /dev/null +++ b/ansible/files/tealbase_facts.ini @@ -0,0 +1,2 @@ +[general] +postgres_version=15 diff --git a/ansible/files/ufw.service.conf b/ansible/files/ufw.service.conf new file mode 100644 index 0000000..83b82ef --- /dev/null +++ b/ansible/files/ufw.service.conf @@ -0,0 +1,4 @@ +[Unit] +After=nftables.service +Requires=nftables.service +PartOf=nftables.service diff --git a/ansible/files/vector.service.j2 b/ansible/files/vector.service.j2 new file mode 100644 index 0000000..1c88baa --- /dev/null +++ b/ansible/files/vector.service.j2 @@ -0,0 +1,20 @@ +[Unit] +Description=Vector +Documentation=https://vector.dev +After=network-online.target +Requires=network-online.target + +[Service] +User=vector +Group=vector +ExecStartPre=/usr/bin/vector validate --config-yaml /etc/vector/vector.yaml +ExecStart=/usr/bin/vector --config-yaml /etc/vector/vector.yaml +ExecReload=/usr/bin/vector validate --config-yaml /etc/vector/vector.yaml +ExecReload=/bin/kill -HUP $MAINPID +Restart=always +RestartSec=3 +AmbientCapabilities=CAP_NET_BIND_SERVICE +EnvironmentFile=-/etc/default/vector + +[Install] +WantedBy=multi-user.target diff --git a/ansible/files/walg_helper_scripts/wal_change_ownership.sh b/ansible/files/walg_helper_scripts/wal_change_ownership.sh new file mode 100644 index 0000000..3f0112d --- /dev/null +++ b/ansible/files/walg_helper_scripts/wal_change_ownership.sh @@ -0,0 +1,42 @@ +#! /usr/bin/env bash + +set -euo pipefail + +filename=$1 + +if [[ -z "$filename" ]]; then + echo "Nothing supplied. Exiting." + exit 1 +fi + +full_path=/tmp/wal_fetch_dir/$filename + +num_paths=$(readlink -f "$full_path" | wc -l) + +# Checks if supplied filename string contains multiple paths +# For example, "correct/path /var/lib/injected/path /var/lib/etc" +if [[ "$num_paths" -gt 1 ]]; then + echo "Multiple paths supplied. Exiting." + exit 1 +fi + +base_dir=$(readlink -f "$full_path" | cut -d'/' -f2) + +# Checks if directory/ file to be manipulated +# is indeed within the /tmp directory +# For example, "/tmp/../var/lib/postgresql/..." +# will return "var" as the value for $base_dir +if [[ "$base_dir" != "tmp" ]]; then + echo "Attempt to manipulate a file not in /tmp. Exiting." + exit 1 +fi + +# Checks if change of ownership will be applied to a file +# If not, exit +if [[ ! -f $full_path ]]; then + echo "Either file does not exist or is a directory. Exiting." + exit 1 +fi + +# once valid, proceed to change ownership +chown postgres:postgres "$full_path" diff --git a/ansible/files/walg_helper_scripts/wal_fetch.sh b/ansible/files/walg_helper_scripts/wal_fetch.sh new file mode 100644 index 0000000..33448ac --- /dev/null +++ b/ansible/files/walg_helper_scripts/wal_fetch.sh @@ -0,0 +1,12 @@ +#! /usr/bin/env bash + +set -euo pipefail + +# Fetch the WAL file and temporarily store them in /tmp +sudo -u wal-g wal-g wal-fetch "$1" /tmp/wal_fetch_dir/"$1" --config /etc/wal-g/config.json + +# Ensure WAL file is owned by the postgres Linux user +sudo -u root /root/wal_change_ownership.sh "$1" + +# Move file to its final destination +mv /tmp/wal_fetch_dir/"$1" /var/lib/postgresql/data/"$2" diff --git a/ansible/install_roles.yml b/ansible/install_roles.yml deleted file mode 100644 index b8fbce9..0000000 --- a/ansible/install_roles.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- - -- name: anxs.postgresql - src: https://github.com/anxs/postgresql - version: v1.12.0 \ No newline at end of file diff --git a/ansible/manifest-playbook.yml b/ansible/manifest-playbook.yml new file mode 100644 index 0000000..6de56e6 --- /dev/null +++ b/ansible/manifest-playbook.yml @@ -0,0 +1,93 @@ +- hosts: localhost + gather_facts: no + + vars_files: + - ./vars.yml + + tasks: + - name: Write out image manifest + action: template src=files/manifest.json dest=./image-manifest-{{ ami_release_version }}.json + + - name: Upload image manifest + shell: | + aws s3 cp ./image-manifest-{{ ami_release_version }}.json s3://{{ internal_artifacts_bucket }}/manifests/postgres-{{ ami_release_version }}/software-manifest.json + + # upload software artifacts of interest + # Generally - download, extract, repack as xz archive, upload + # currently, we upload gotrue, adminapi, postgrest + - name: gotrue - download commit archive + get_url: + url: "https://github.com/tealbase/gotrue/releases/download/v{{ gotrue_release }}/auth-v{{ gotrue_release }}-arm64.tar.gz" + dest: /tmp/auth-v{{ gotrue_release }}-arm64.tar.gz + checksum: "{{ gotrue_release_checksum }}" + timeout: 60 + + - name: PostgREST - download ubuntu binary archive (arm) + get_url: + url: "https://github.com/PostgREST/postgrest/releases/download/v{{ postgrest_release }}/postgrest-v{{ postgrest_release }}-ubuntu-aarch64.tar.xz" + dest: /tmp/postgrest-{{ postgrest_release }}-arm64.tar.xz + checksum: "{{ postgrest_arm_release_checksum }}" + timeout: 60 + + - name: Download adminapi archive + get_url: + url: "https://tealbase-public-artifacts-bucket.s3.amazonaws.com/tealbase-admin-api/v{{ adminapi_release }}/tealbase-admin-api_{{ adminapi_release }}_linux_arm64.tar.gz" + dest: "/tmp/adminapi.tar.gz" + timeout: 90 + + - name: adminapi - unpack archive in /tmp + unarchive: + remote_src: yes + src: /tmp/adminapi.tar.gz + dest: /tmp + + - name: adminapi - pack archive + shell: | + cd /tmp && tar -cJf tealbase-admin-api-{{ adminapi_release }}-arm64.tar.xz tealbase-admin-api + + - name: Download admin-mgr archive + get_url: + url: "https://tealbase-public-artifacts-bucket.s3.amazonaws.com/admin-mgr/v{{ adminmgr_release }}/admin-mgr_{{ adminmgr_release }}_linux_arm64.tar.gz" + dest: "/tmp/admin-mgr.tar.gz" + timeout: 90 + + - name: admin-mgr - unpack archive in /tmp + unarchive: + remote_src: yes + src: /tmp/admin-mgr.tar.gz + dest: /tmp + + - name: admin-mgr - pack archive + shell: | + cd /tmp && tar -cJf admin-mgr-{{ adminmgr_release }}-arm64.tar.xz admin-mgr + + - name: Download tealbase-admin-agent archive + get_url: + url: "https://tealbase-public-artifacts-bucket.s3.amazonaws.com/tealbase-admin-agent/v{{ tealbase_admin_agent_release }}/tealbase-admin-agent-{{ tealbase_admin_agent_release }}-linux-arm64.tar.gz" + dest: "/tmp/tealbase-admin-agent.tar.gz" + timeout: 90 + + - name: tealbase-admin-agent - unpack archive in /tmp + unarchive: + remote_src: yes + src: /tmp/tealbase-admin-agent.tar.gz + dest: /tmp + + - name: tealbase-admin-agent - pack archive + shell: | + cd /tmp && tar -cJf tealbase-admin-agent-{{ tealbase_admin_agent_release }}-arm64.tar.xz tealbase-admin-agent-{{ tealbase_admin_agent_release }}-linux-arm64 + + - name: upload archives + shell: | + aws s3 cp /tmp/{{ item.file }} s3://{{ internal_artifacts_bucket }}/upgrades/{{ item.service }}/{{ item.file }} + with_items: + - service: gotrue + file: auth-v{{ gotrue_release }}-arm64.tar.gz + - service: postgrest + file: postgrest-{{ postgrest_release }}-arm64.tar.xz + - service: tealbase-admin-api + file: tealbase-admin-api-{{ adminapi_release }}-arm64.tar.xz + - service: admin-mgr + file: admin-mgr-{{ adminmgr_release }}-arm64.tar.xz + - service: tealbase-admin-agent + file: tealbase-admin-agent-{{ tealbase_admin_agent_release }}-arm64.tar.xz diff --git a/ansible/playbook.yml b/ansible/playbook.yml index e38930e..97775b9 100644 --- a/ansible/playbook.yml +++ b/ansible/playbook.yml @@ -3,37 +3,229 @@ pre_tasks: - import_tasks: tasks/setup-system.yml - vars_files: - ./vars.yml - - roles: - - role: anxs.postgresql + + vars: + sql_files: + - { + source: "pgbouncer_config/pgbouncer_auth_schema.sql", + dest: "00-schema.sql", + } + - { source: "stat_extension.sql", dest: "01-extension.sql" } + + environment: + PATH: /usr/lib/postgresql/bin:{{ ansible_env.PATH }} tasks: - - name: Install non-Postgres extensions - import_tasks: tasks/setup-misc.yml + - set_fact: + tealbase_internal: true + tags: + - install-tealbase-internal + + - set_fact: + parallel_jobs: 16 + + - name: Install Postgres from source + import_tasks: tasks/setup-postgres.yml + + - name: Install PgBouncer + import_tasks: tasks/setup-pgbouncer.yml + tags: + - install-pgbouncer + - install-tealbase-internal + when: debpkg_mode or nixpkg_mode + + - name: Install WAL-G + import_tasks: tasks/setup-wal-g.yml + when: debpkg_mode or nixpkg_mode or stage2_nix + + - name: Install Gotrue + import_tasks: tasks/setup-gotrue.yml + tags: + - install-gotrue + - install-tealbase-internal + when: debpkg_mode or nixpkg_mode + + - name: Install PostgREST + import_tasks: tasks/setup-postgrest.yml + tags: + - install-postgrest + - install-tealbase-internal + when: debpkg_mode or nixpkg_mode + + - name: Install Envoy + import_tasks: tasks/setup-envoy.yml + tags: + - install-tealbase-internal + when: debpkg_mode or nixpkg_mode + + - name: Install Kong + import_tasks: tasks/setup-kong.yml + tags: + - install-tealbase-internal + when: debpkg_mode or nixpkg_mode + + - name: Install nginx + import_tasks: tasks/setup-nginx.yml + tags: + - install-tealbase-internal + when: debpkg_mode or nixpkg_mode - - name: Install Postgres extensions - import_tasks: tasks/setup-extensions.yml + - name: Install tealbase specific content + import_tasks: tasks/setup-tealbase-internal.yml + tags: + - install-tealbase-internal + when: debpkg_mode or nixpkg_mode + + - name: Fix IPv6 NDisc issues (disabled) + import_tasks: tasks/fix-ipv6-ndisc.yml + tags: + - install-tealbase-internal + when: (debpkg_mode or nixpkg_mode) and (qemu_mode is undefined) + + - name: Start Postgres Database without Systemd + become: yes + become_user: postgres + shell: + cmd: /usr/lib/postgresql/bin/pg_ctl -D /var/lib/postgresql/data start + when: debpkg_mode - name: Adjust APT update intervals - copy: + copy: src: files/apt_periodic dest: /etc/apt/apt.conf.d/10periodic + when: debpkg_mode or nixpkg_mode + + - name: Transfer init SQL files + copy: + src: files/{{ item.source }} + dest: /tmp/{{ item.dest }} + loop: "{{ sql_files }}" + when: debpkg_mode or stage2_nix + + - name: Create postgres role + become: yes + become_user: postgres + shell: + cmd: /usr/lib/postgresql/bin/psql --username=tealbase_admin -d postgres -c "create role postgres superuser login; alter database postgres owner to postgres;" + when: debpkg_mode or stage2_nix + + - name: Execute init SQL files + become: yes + become_user: postgres + shell: + cmd: /usr/lib/postgresql/bin/psql -f /tmp/{{ item.dest }} + loop: "{{ sql_files }}" + when: debpkg_mode or stage2_nix + + - name: Delete SQL scripts + file: + path: /tmp/{{ item.dest }} + state: absent + loop: "{{ sql_files }}" + when: debpkg_mode or stage2_nix + + - name: First boot optimizations + import_tasks: tasks/internal/optimizations.yml + tags: + - install-tealbase-internal + when: debpkg_mode or stage2_nix + + - name: Finalize AMI + import_tasks: tasks/finalize-ami.yml + tags: + - install-tealbase-internal + when: debpkg_mode or nixpkg_mode + + - name: Enhance fail2ban + import_tasks: tasks/setup-fail2ban.yml + when: debpkg_mode or nixpkg_mode + + + # Install EC2 instance connect + # Only for AWS images + - name: install EC2 instance connect + become: yes + apt: + pkg: + - ec2-instance-connect + tags: + - aws-only + when: qemu_mode is undefined + + # Install this at the end to prevent it from kicking in during the apt process, causing conflicts + - name: Install security tools + become: yes + apt: + pkg: + - unattended-upgrades + update_cache: yes + cache_valid_time: 3600 + + - name: Clean out build dependencies + import_tasks: tasks/clean-build-dependencies.yml + + - name: Restart Postgres Database without Systemd + become: yes + become_user: postgres + shell: + cmd: /usr/lib/postgresql/bin/pg_ctl -D /var/lib/postgresql/data restart -o "-c shared_preload_libraries='pg_tle'" + when: debpkg_mode + + - name: Run migrations + import_tasks: tasks/setup-migrations.yml + tags: + - migrations + when: debpkg_mode or stage2_nix + + - name: Stop Postgres Database without Systemd + become: yes + become_user: postgres + shell: + cmd: /usr/lib/postgresql/bin/pg_ctl -D /var/lib/postgresql/data stop + when: debpkg_mode + + - name: Run unit tests + import_tasks: tasks/test-image.yml + tags: + - unit-tests + when: debpkg_mode or stage2_nix + + - name: Collect Postgres binaries + import_tasks: tasks/internal/collect-pg-binaries.yml + tags: + - collect-binaries + when: debpkg_mode + + - name: Install osquery from nixpkgs binary cache + become: yes + shell: | + apt autoremove -y --purge snapd + when: stage2_nix + + - name: Install osquery from nixpkgs binary cache + become: yes + shell: | + sudo -u ubuntu bash -c ". /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh && nix profile install github:nixos/nixpkgs/f98ec4f73c762223d62bee706726138cb6ea27cc#osquery" + when: stage2_nix + + - name: Run osquery permission checks + become: yes + shell: | + systemctl start postgresql.service + sudo -u ubuntu bash -c ". /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh && /usr/bin/python3 /tmp/ansible-playbook/ansible/files/permission_check.py {{ '--qemu' if qemu_mode is defined else '' }}" + systemctl stop postgresql.service + when: stage2_nix + + - name: Remove osquery + become: yes + shell: | + sudo -u ubuntu bash -c ". /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh && nix profile remove osquery" + when: stage2_nix - - name: UFW - Allow SSH connections - ufw: - rule: allow - name: OpenSSH - - - name: UFW - Allow connections to postgreSQL (5432) - ufw: - rule: allow - port: '5432' - - - name: UFW - Deny all other incoming traffix by default - ufw: - state: enabled - policy: deny - direction: incoming \ No newline at end of file + - name: nix collect garbage + become: yes + shell: | + sudo -u ubuntu bash -c ". /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh && nix-collect-garbage -d" + when: stage2_nix diff --git a/ansible/tasks/clean-build-dependencies.yml b/ansible/tasks/clean-build-dependencies.yml new file mode 100644 index 0000000..43ec051 --- /dev/null +++ b/ansible/tasks/clean-build-dependencies.yml @@ -0,0 +1,21 @@ +- name: Remove build dependencies + apt: + pkg: + - bison + - build-essential + - clang-11 + - cmake + - cpp + - flex + - g++ + - g++-10 + - g++-9 + - gcc-10 + - make + - manpages + - manpages-dev + - ninja-build + - patch + - python2 + state: absent + autoremove: yes diff --git a/ansible/tasks/finalize-ami.yml b/ansible/tasks/finalize-ami.yml new file mode 100644 index 0000000..411ca33 --- /dev/null +++ b/ansible/tasks/finalize-ami.yml @@ -0,0 +1,81 @@ +- name: PG logging conf + template: + src: files/postgresql_config/postgresql-csvlog.conf + dest: /etc/postgresql/logging.conf + group: postgres + +- name: UFW - Allow SSH connections + ufw: + rule: allow + name: OpenSSH + +- name: UFW - Allow connections to postgreSQL (5432) + ufw: + rule: allow + port: "5432" + +- name: UFW - Allow connections to postgreSQL (6543) + ufw: + rule: allow + port: "6543" + tags: + - install-pgbouncer + +- name: UFW - Allow connections to http (80) + ufw: + rule: allow + port: http + tags: + - install-tealbase-internal + +- name: UFW - Allow connections to https (443) + ufw: + rule: allow + port: https + tags: + - install-tealbase-internal + +- name: UFW - Deny all other incoming traffic by default + ufw: + state: enabled + policy: deny + direction: incoming + +- name: Move logrotate files to /etc/logrotate.d/ + copy: + src: "files/logrotate_config/{{ item.file }}" + dest: "/etc/logrotate.d/{{ item.file }}" + mode: "0700" + owner: root + loop: + - { file: "logrotate-postgres-csv.conf" } + - { file: "logrotate-postgres.conf" } + - { file: "logrotate-walg.conf" } + - { file: "logrotate-postgres-auth.conf" } + +- name: Ensure default Postgres logrotate config is removed + file: + path: /etc/logrotate.d/postgresql-common + state: absent + +- name: Disable cron access + copy: + src: files/cron.deny + dest: /etc/cron.deny + +- name: Configure logrotation to run every hour + shell: + cmd: | + cp /usr/lib/systemd/system/logrotate.timer /etc/systemd/system/logrotate.timer + sed -i -e 's;daily;*:0/5;' /etc/systemd/system/logrotate.timer + systemctl reenable logrotate.timer + become: yes + +- name: import pgsodium_getkey script + template: + src: files/pgsodium_getkey_readonly.sh.j2 + dest: "{{ pg_bindir }}/pgsodium_getkey.sh" + owner: postgres + group: postgres + mode: 0700 + when: debpkg_mode or stage2_nix diff --git a/ansible/tasks/fix-ipv6-ndisc.yml b/ansible/tasks/fix-ipv6-ndisc.yml new file mode 100644 index 0000000..8953fd8 --- /dev/null +++ b/ansible/tasks/fix-ipv6-ndisc.yml @@ -0,0 +1,33 @@ +--- +- name: fix Network - systemd timer file + copy: + dest: /etc/systemd/system/systemd-networkd-check-and-fix.timer + src: "files/systemd-networkd/systemd-networkd-check-and-fix.timer" + owner: root + group: root + mode: 0644 + +- name: fix Network - systemd service file + copy: + dest: /etc/systemd/system/systemd-networkd-check-and-fix.service + src: "files/systemd-networkd/systemd-networkd-check-and-fix.service" + owner: root + group: root + mode: 0644 + +- name: fix Network - detect script + copy: + dest: /usr/local/bin/systemd-networkd-check-and-fix.sh + src: "files/systemd-networkd/systemd-networkd-check-and-fix.sh" + owner: root + group: root + mode: 0700 + +- name: fix Network - reload systemd + systemd: + daemon_reload: false + +- name: fix Network - ensure systemd timer is installed but disabled + systemd: + name: systemd-networkd-check-and-fix.timer + enabled: false diff --git a/ansible/tasks/internal/admin-api.yml b/ansible/tasks/internal/admin-api.yml new file mode 100644 index 0000000..c3456d0 --- /dev/null +++ b/ansible/tasks/internal/admin-api.yml @@ -0,0 +1,97 @@ +- name: adminapi - system user + user: + name: adminapi + groups: root,admin,envoy,gotrue,kong,pgbouncer,postgres,postgrest,systemd-journal,vector,wal-g + append: yes + +- name: Move shell scripts to /root dir + copy: + src: "files/admin_api_scripts/{{ item.file }}" + dest: "/root/{{ item.file }}" + mode: "0700" + owner: root + loop: + - { file: "grow_fs.sh" } + - { file: "manage_readonly_mode.sh" } + - { file: "pg_egress_collect.pl" } + +- name: give adminapi user permissions + copy: + src: files/adminapi.sudoers.conf + dest: /etc/sudoers.d/adminapi + mode: "0644" + +- name: perms for adminapi + shell: | + chmod g+w /etc + +- name: Setting arch (x86) + set_fact: + arch: "x86" + when: platform == "amd64" + +- name: Setting arch (arm) + set_fact: + arch: "arm64" + when: platform == "arm64" + +- name: Download adminapi archive + get_url: + url: "https://tealbase-public-artifacts-bucket.s3.amazonaws.com/tealbase-admin-api/v{{ adminapi_release }}/tealbase-admin-api_{{ adminapi_release }}_linux_{{ arch }}.tar.gz" + dest: "/tmp/adminapi.tar.gz" + timeout: 90 + +- name: adminapi - unpack archive in /opt + unarchive: + remote_src: yes + src: /tmp/adminapi.tar.gz + dest: /opt + owner: adminapi + +- name: adminapi - config dir + file: + path: /etc/adminapi + owner: adminapi + state: directory + +- name: adminapi - pg_upgrade scripts dir + file: + path: /etc/adminapi/pg_upgrade_scripts + owner: adminapi + state: directory + +- name: Move shell scripts to /etc/adminapi/pg_upgrade_scripts/ + copy: + src: "files/admin_api_scripts/pg_upgrade_scripts/{{ item.file }}" + dest: "/etc/adminapi/pg_upgrade_scripts/{{ item.file }}" + mode: "0755" + owner: adminapi + loop: + - { file: "check.sh" } + - { file: "complete.sh" } + - { file: "initiate.sh" } + - { file: "prepare.sh" } + - { file: "pgsodium_getkey.sh" } + - { file: "common.sh" } + +- name: adminapi - create service file + template: + src: files/adminapi.service.j2 + dest: /etc/systemd/system/adminapi.service + +- name: adminapi - create service file for commence backup process + template: + src: files/commence-backup.service.j2 + dest: /etc/systemd/system/commence-backup.service + +- name: UFW - Allow connections to adminapi ports + ufw: + rule: allow + port: "8085" + +- name: adminapi - reload systemd + systemd: + daemon_reload: yes + +- name: adminapi - grant extra priviliges to user + shell: chmod 775 /etc && chmod 775 /etc/kong diff --git a/ansible/tasks/internal/admin-mgr.yml b/ansible/tasks/internal/admin-mgr.yml new file mode 100644 index 0000000..2397616 --- /dev/null +++ b/ansible/tasks/internal/admin-mgr.yml @@ -0,0 +1,22 @@ +- name: Setting arch (x86) + set_fact: + arch: "amd64" + when: platform == "amd64" + +- name: Setting arch (arm) + set_fact: + arch: "arm64" + when: platform == "arm64" + +- name: Download admin-mgr archive + get_url: + url: "https://tealbase-public-artifacts-bucket.s3.amazonaws.com/admin-mgr/v{{ adminmgr_release }}/admin-mgr_{{ adminmgr_release }}_linux_{{ arch }}.tar.gz" + dest: "/tmp/admin-mgr.tar.gz" + timeout: 90 + +- name: admin-mgr - unpack archive in /usr/bin/ + unarchive: + remote_src: yes + src: /tmp/admin-mgr.tar.gz + dest: /usr/bin/ + owner: root diff --git a/ansible/tasks/internal/collect-pg-binaries.yml b/ansible/tasks/internal/collect-pg-binaries.yml new file mode 100644 index 0000000..7f652f7 --- /dev/null +++ b/ansible/tasks/internal/collect-pg-binaries.yml @@ -0,0 +1,49 @@ +- name: Collect Postgres binaries - create collection directory + file: + path: /tmp/pg_binaries/{{ postgresql_major }}/ + state: directory + +- name: Collect Postgres binaries - collect binaries and libraries + copy: + remote_src: yes + src: /usr/lib/postgresql/{{ postgresql_major }}/{{ item }}/ + dest: /tmp/pg_binaries/{{ postgresql_major }}/{{ item }}/ + with_items: + - bin + - lib + +- name: Collect Postgres libraries - collect libraries which are in /usr/lib/postgresql/lib/ + copy: + remote_src: yes + src: /usr/lib/postgresql/lib/ + dest: /tmp/pg_binaries/{{ postgresql_major }}/lib/ + +- name: Collect Postgres libraries - collect libraries which are in /var/lib/postgresql/extension/ + copy: + remote_src: yes + src: /var/lib/postgresql/extension/ + dest: /tmp/pg_binaries/{{ postgresql_major }}/lib/ + +- name: Collect Postgres libraries - collect latest libpq + copy: + remote_src: yes + src: /usr/lib/aarch64-linux-gnu/libpq.so.5 + dest: /tmp/pg_binaries/{{ postgresql_major }}/lib/libpq.so.5 + +- name: Collect Postgres binaries - collect shared files + copy: + remote_src: yes + src: /usr/share/postgresql/{{ postgresql_major }}/ + dest: /tmp/pg_binaries/{{ postgresql_major }}/share/ + +- name: Collect Postgres binaries - create tarfile + archive: + path: /tmp/pg_binaries/ + dest: /tmp/pg_binaries.tar.gz + remove: yes + +- name: Fetch tarfile to local + fetch: + src: /tmp/pg_binaries.tar.gz + dest: /tmp/ + flat: true diff --git a/ansible/tasks/internal/install-salt.yml b/ansible/tasks/internal/install-salt.yml new file mode 100644 index 0000000..73cd6ee --- /dev/null +++ b/ansible/tasks/internal/install-salt.yml @@ -0,0 +1,47 @@ +- name: Add apt repository for Saltstack (arm) + block: + - name: Ensure /etc/apt/keyrings directory exists + file: + path: /etc/apt/keyrings + state: directory + mode: '0755' + + - name: salt gpg key + ansible.builtin.get_url: + url: https://packages.broadcom.com/artifactory/api/security/keypair/SaltProjectKey/public + dest: /etc/apt/keyrings/salt-archive-keyring-2023.pgp + mode: '0644' + + - name: salt apt repo + ansible.builtin.apt_repository: + repo: "deb [signed-by=/etc/apt/keyrings/salt-archive-keyring-2023.pgp arch=arm64] https://packages.broadcom.com/artifactory/saltproject-deb/ stable main" + filename: 'salt.list' + state: present + when: platform == "arm64" + +- name: Add apt repository for Saltstack (amd) + block: + - name: Ensure /etc/apt/keyrings directory exists + file: + path: /etc/apt/keyrings + state: directory + mode: '0755' + + - name: salt gpg key + ansible.builtin.get_url: + url: https://packages.broadcom.com/artifactory/api/security/keypair/SaltProjectKey/public + dest: /etc/apt/keyrings/salt-archive-keyring-2023.pgp + mode: '0644' + + - name: salt apt repo + ansible.builtin.apt_repository: + repo: "deb [signed-by=/etc/apt/keyrings/salt-archive-keyring-2023.pgp arch=amd64] https://packages.broadcom.com/artifactory/saltproject-deb/ stable main" + filename: 'salt.list' + state: present + when: platform == "amd64" + +- name: Salt minion install + apt: + name: salt-minion + state: present + update_cache: yes diff --git a/ansible/tasks/internal/optimizations.yml b/ansible/tasks/internal/optimizations.yml new file mode 100644 index 0000000..42a0a24 --- /dev/null +++ b/ansible/tasks/internal/optimizations.yml @@ -0,0 +1,39 @@ +- name: ensure services are stopped and disabled for first boot debian build + systemd: + enabled: no + name: '{{ item }}' + state: stopped + with_items: + - postgresql + - pgbouncer + - fail2ban + - motd-news + - vector + - lvm2-monitor + - salt-minion + when: debpkg_mode + +- name: ensure services are stopped and disabled for first boot nix build + systemd: + enabled: no + name: '{{ item }}' + state: stopped + with_items: + - postgresql + - pgbouncer + - fail2ban + - motd-news + - vector + - salt-minion + when: stage2_nix + +- name: disable man-db + become: yes + file: + state: absent + path: "/etc/cron.daily/{{ item }}" + with_items: + - man-db + - popularity-contest + - ubuntu-advantage-tools + when: debpkg_mode or stage2_nix diff --git a/ansible/tasks/internal/pg_egress_collect.yml b/ansible/tasks/internal/pg_egress_collect.yml new file mode 100644 index 0000000..be9fefe --- /dev/null +++ b/ansible/tasks/internal/pg_egress_collect.yml @@ -0,0 +1,15 @@ +- name: pg_egress_collect - install tcpdump and perl async lib + apt: + pkg: + - tcpdump + - libio-async-perl + +- name: pg_egress_collect - create service file + template: + src: files/pg_egress_collect.service.j2 + dest: /etc/systemd/system/pg_egress_collect.service + +- name: pg_egress_collect - reload systemd + systemd: + daemon_reload: yes + diff --git a/ansible/tasks/internal/postgres-exporter.yml b/ansible/tasks/internal/postgres-exporter.yml new file mode 100644 index 0000000..0292157 --- /dev/null +++ b/ansible/tasks/internal/postgres-exporter.yml @@ -0,0 +1,48 @@ +- name: UFW - Allow connections to exporter for prometheus + ufw: + rule: allow + port: "9187" + +- name: create directories - systemd unit + file: + state: directory + path: /etc/systemd/system/postgres_exporter.service.d + owner: root + mode: '0700' + become: yes + +- name: create directories - service files + file: + state: directory + path: /opt/postgres_exporter + owner: postgres + group: postgres + mode: '0775' + become: yes + +- name: download postgres exporter + get_url: + url: "https://github.com/prometheus-community/postgres_exporter/releases/download/v{{ postgres_exporter_release }}/postgres_exporter-{{ postgres_exporter_release }}.linux-{{ platform }}.tar.gz" + dest: /tmp/postgres_exporter.tar.gz + checksum: "{{ postgres_exporter_release_checksum[platform] }}" + timeout: 60 + +- name: expand postgres exporter + unarchive: + remote_src: yes + src: /tmp/postgres_exporter.tar.gz + dest: /opt/postgres_exporter + extra_opts: [--strip-components=1] + become: yes + +- name: exporter create a service + template: + src: files/postgres_exporter.service.j2 + dest: /etc/systemd/system/postgres_exporter.service + +- name: exporter ensure service is present + systemd: + enabled: no + name: postgres_exporter + daemon_reload: yes + state: stopped diff --git a/ansible/tasks/internal/postgresql-prestart.yml b/ansible/tasks/internal/postgresql-prestart.yml new file mode 100644 index 0000000..46671d5 --- /dev/null +++ b/ansible/tasks/internal/postgresql-prestart.yml @@ -0,0 +1,7 @@ +- name: postgres_prestart - create service file + template: + src: files/postgres_prestart.sh.j2 + dest: /usr/local/bin/postgres_prestart.sh + mode: a+x + owner: root + group: root diff --git a/ansible/tasks/internal/setup-ansible-pull.yml b/ansible/tasks/internal/setup-ansible-pull.yml new file mode 100644 index 0000000..ad1aa78 --- /dev/null +++ b/ansible/tasks/internal/setup-ansible-pull.yml @@ -0,0 +1,29 @@ +- name: install ansible + shell: + cmd: | + apt install -y software-properties-common + add-apt-repository --yes --update ppa:ansible/ansible + apt install -y ansible + sed -i -e 's/#callback_whitelist.*/callback_whitelist = profile_tasks/' /etc/ansible/ansible.cfg + +- name: ansible pull systemd units + copy: + src: files/{{ item }} + dest: /etc/systemd/system/{{ item }} + with_items: + - ansible-pull.service + - ansible-pull.timer + +- name: create facts dir + file: + path: /etc/ansible/facts.d + state: directory + +- name: ansible facts + copy: + src: files/tealbase_facts.ini + dest: /etc/ansible/facts.d/tealbase.fact + +- name: reload systemd + systemd: + daemon_reload: yes diff --git a/ansible/tasks/internal/setup-nftables.yml b/ansible/tasks/internal/setup-nftables.yml new file mode 100644 index 0000000..e0f0c72 --- /dev/null +++ b/ansible/tasks/internal/setup-nftables.yml @@ -0,0 +1,34 @@ +- name: nftables overrides + file: + state: directory + path: /etc/nftables + owner: adminapi + +- name: nftables empty config + file: + state: touch + path: /etc/nftables/tealbase_managed.conf + owner: adminapi + +- name: include managed config + shell: | + cat >> "/etc/nftables.conf" << EOF + table inet tealbase_managed { } + include "/etc/nftables/tealbase_managed.conf"; + + EOF + +- name: ufw overrides dir + file: + state: directory + path: /etc/systemd/system/ufw.service.d + owner: root + +- name: Custom systemd overrides + copy: + src: files/ufw.service.conf + dest: /etc/systemd/system/ufw.service.d/overrides.conf + +- name: reload systemd + systemd: + daemon_reload: yes diff --git a/ansible/tasks/internal/supautils.yml b/ansible/tasks/internal/supautils.yml new file mode 100644 index 0000000..d4b266b --- /dev/null +++ b/ansible/tasks/internal/supautils.yml @@ -0,0 +1,77 @@ +# supautils +- name: supautils - download & install dependencies + apt: + pkg: + - build-essential + - clang-11 + update_cache: yes + cache_valid_time: 3600 + +- name: supautils - download latest release + get_url: + url: "https://github.com/tealbase/supautils/archive/refs/tags/v{{ supautils_release }}.tar.gz" + dest: /tmp/supautils-{{ supautils_release }}.tar.gz + checksum: "{{ supautils_release_checksum }}" + timeout: 60 + +- name: supautils - unpack archive + unarchive: + remote_src: yes + src: /tmp/supautils-{{ supautils_release }}.tar.gz + dest: /tmp + become: yes + +- name: supautils - build + make: + chdir: /tmp/supautils-{{ supautils_release }} + become: yes + +- name: supautils - install + make: + chdir: /tmp/supautils-{{ supautils_release }} + target: install + become: yes + +- name: supautils - add supautils to session_preload_libraries + become: yes + replace: + path: /etc/postgresql/postgresql.conf + regexp: "#session_preload_libraries = ''" + replace: session_preload_libraries = 'supautils' + +- name: supautils - write custom supautils.conf + template: + src: "files/postgresql_config/supautils.conf.j2" + dest: /etc/postgresql-custom/supautils.conf + mode: 0664 + owner: postgres + group: postgres + +- name: supautils - copy extension custom scripts + copy: + src: files/postgresql_extension_custom_scripts/ + dest: /etc/postgresql-custom/extension-custom-scripts + become: yes + +- name: supautils - chown extension custom scripts + file: + mode: 0775 + owner: postgres + group: postgres + path: /etc/postgresql-custom/extension-custom-scripts + recurse: yes + become: yes + +- name: supautils - include /etc/postgresql-custom/supautils.conf in postgresql.conf + become: yes + replace: + path: /etc/postgresql/postgresql.conf + regexp: "#include = '/etc/postgresql-custom/supautils.conf'" + replace: "include = '/etc/postgresql-custom/supautils.conf'" + +- name: supautils - remove build dependencies + apt: + pkg: + - build-essential + - clang-11 + state: absent diff --git a/ansible/tasks/internal/tealbase-admin-agent.yml b/ansible/tasks/internal/tealbase-admin-agent.yml new file mode 100644 index 0000000..e130f49 --- /dev/null +++ b/ansible/tasks/internal/tealbase-admin-agent.yml @@ -0,0 +1,87 @@ +- name: tealbase-admin-agent - system group + group: + name: tealbase-admin-agent + system: yes + +- name: tealbase-admin-agent - system user + user: + name: tealbase-admin-agent + group: tealbase-admin-agent + groups: admin,salt + append: yes + system: yes + shell: /bin/sh + +- name: tealbase-admin-agent - config dir + file: + path: /opt/tealbase-admin-agent + owner: tealbase-admin-agent + state: directory + +- name: tealbase-admin-agent - gpg dir + file: + path: /etc/salt/gpgkeys + owner: root + group: salt + state: directory + +- name: give tealbase-admin-agent user permissions + copy: + src: files/tealbase_admin_agent_config/tealbase-admin-agent.sudoers.conf + dest: /etc/sudoers.d/tealbase-admin-agent + mode: "0644" + +- name: Setting arch (x86) + set_fact: + arch: "x86" + when: platform == "amd64" + +- name: Setting arch (arm) + set_fact: + arch: "arm64" + when: platform == "arm64" + +- name: Download tealbase-admin-agent archive + get_url: + url: "https://tealbase-public-artifacts-bucket.s3.amazonaws.com/tealbase-admin-agent/v{{ tealbase_admin_agent_release }}/tealbase-admin-agent-{{ tealbase_admin_agent_release }}-linux-{{ arch }}.tar.gz" + dest: "/tmp/tealbase-admin-agent.tar.gz" + timeout: 90 + +- name: tealbase-admin-agent - unpack archive in /opt + unarchive: + remote_src: yes + src: /tmp/tealbase-admin-agent.tar.gz + dest: /opt/tealbase-admin-agent/ + owner: tealbase-admin-agent + extra_opts: + - --strip-components=1 + +- name: tealbase-admin-agent - create symlink + ansible.builtin.file: + path: /opt/tealbase-admin-agent/tealbase-admin-agent + src: "/opt/tealbase-admin-agent/tealbase-admin-agent-linux-{{ arch }}" + state: link + owner: tealbase-admin-agent + mode: "0755" + force: yes + +- name: tealbase-admin-agent - create salt systemd timer file + template: + src: files/tealbase_admin_agent_config/tealbase-admin-agent_salt.timer.j2 + dest: /etc/systemd/system/tealbase-admin-agent_salt.timer + +- name: tealbase-admin-agent - create salt service file + copy: + src: files/tealbase_admin_agent_config/tealbase-admin-agent_salt.service + dest: /etc/systemd/system/tealbase-admin-agent_salt.service + +- name: tealbase-admin-agent - reload systemd + systemd: + daemon_reload: yes + +# Initially ensure tealbase-admin-agent is installed but not started +- name: tealbase-admin-agent - DISABLE service + systemd: + name: tealbase-admin-agent_salt + enabled: no + state: stopped diff --git a/ansible/tasks/setup-docker.yml b/ansible/tasks/setup-docker.yml new file mode 100644 index 0000000..7b37f70 --- /dev/null +++ b/ansible/tasks/setup-docker.yml @@ -0,0 +1,80 @@ +- name: Copy extension packages + copy: + src: files/extensions/ + dest: /tmp/extensions/ + when: debpkg_mode + +# Builtin apt module does not support wildcard for deb paths +- name: Install extensions + shell: | + set -e + apt-get update + apt-get install -y --no-install-recommends /tmp/extensions/*.deb + when: debpkg_mode + +- name: pgsodium - determine postgres bin directory + shell: pg_config --bindir + register: pg_bindir_output + when: debpkg_mode + +- set_fact: + pg_bindir: "{{ pg_bindir_output.stdout }}" + when: debpkg_mode + +- name: pgsodium - set pgsodium.getkey_script + become: yes + lineinfile: + path: /etc/postgresql/postgresql.conf + state: present + # script is expected to be placed by finalization tasks for different target platforms + line: pgsodium.getkey_script= '{{ pg_bindir }}/pgsodium_getkey.sh' + when: debpkg_mode + +# supautils +- name: supautils - add supautils to session_preload_libraries + become: yes + replace: + path: /etc/postgresql/postgresql.conf + regexp: "#session_preload_libraries = ''" + replace: session_preload_libraries = 'supautils' + when: debpkg_mode or stage2_nix + +- name: supautils - write custom supautils.conf + template: + src: "files/postgresql_config/supautils.conf.j2" + dest: /etc/postgresql-custom/supautils.conf + mode: 0664 + owner: postgres + group: postgres + when: debpkg_mode or stage2_nix + +- name: supautils - copy extension custom scripts + copy: + src: files/postgresql_extension_custom_scripts/ + dest: /etc/postgresql-custom/extension-custom-scripts + become: yes + when: debpkg_mode or stage2_nix + +- name: supautils - chown extension custom scripts + file: + mode: 0775 + owner: postgres + group: postgres + path: /etc/postgresql-custom/extension-custom-scripts + recurse: yes + become: yes + when: debpkg_mode or stage2_nix + +- name: supautils - include /etc/postgresql-custom/supautils.conf in postgresql.conf + become: yes + replace: + path: /etc/postgresql/postgresql.conf + regexp: "#include = '/etc/postgresql-custom/supautils.conf'" + replace: "include = '/etc/postgresql-custom/supautils.conf'" + when: debpkg_mode or stage2_nix + +- name: Cleanup - extension packages + file: + path: /tmp/extensions + state: absent + when: debpkg_mode diff --git a/ansible/tasks/setup-envoy.yml b/ansible/tasks/setup-envoy.yml new file mode 100644 index 0000000..9843b55 --- /dev/null +++ b/ansible/tasks/setup-envoy.yml @@ -0,0 +1,60 @@ +- name: Envoy - system user + ansible.builtin.user: + name: envoy + +- name: Envoy - download binary + ansible.builtin.get_url: + checksum: "{{ envoy_release_checksum }}" + dest: /opt/envoy + group: envoy + mode: u+x + owner: envoy + # yamllint disable-line rule:line-length + url: "https://github.com/envoyproxy/envoy/releases/download/v{{ envoy_release }}/envoy-{{ envoy_release }}-linux-aarch_64" + +- name: Envoy - download hot restarter script + ansible.builtin.get_url: + checksum: "{{ envoy_hot_restarter_release_checksum }}" + dest: /opt/envoy-hot-restarter.py + group: envoy + mode: u+x + owner: envoy + # yamllint disable-line rule:line-length + url: https://raw.githubusercontent.com/envoyproxy/envoy/v{{ envoy_release }}/restarter/hot-restarter.py + +- name: Envoy - bump up ulimit + community.general.pam_limits: + domain: envoy + limit_item: nofile + limit_type: soft + value: 4096 + +- name: Envoy - create script to start envoy + ansible.builtin.copy: + dest: /opt/start-envoy.sh + group: envoy + mode: u+x + owner: envoy + src: files/start-envoy.sh + +- name: Envoy - create configuration files + ansible.builtin.copy: + dest: /etc/envoy/ + directory_mode: u=rwx,g=rwx,o=rx + group: envoy + mode: u=rw,g=rw,o=r + owner: envoy + src: files/envoy_config/ + +- name: Envoy - create service file + ansible.builtin.copy: + dest: /etc/systemd/system/envoy.service + mode: u=rw,g=r,o=r + src: files/envoy.service + +- name: Envoy - disable service + ansible.builtin.systemd: + daemon_reload: true + enabled: false + name: envoy + state: stopped diff --git a/ansible/tasks/setup-extensions.yml b/ansible/tasks/setup-extensions.yml deleted file mode 100644 index 85cd462..0000000 --- a/ansible/tasks/setup-extensions.yml +++ /dev/null @@ -1,271 +0,0 @@ -# postgis -- name: Install postgis for postgreSQL versions < 10 - apt: - pkg: - - libgeos-c1v5 - - "postgresql-{{ postgresql_version }}-postgis-{{ postgresql_ext_postgis_version }}" - - "postgresql-{{ postgresql_version }}-postgis-scripts" - update_cache: yes - cache_valid_time: 3600 - when: postgresql_version < 10 - -- name: Install postgis for postgreSQL versions >= 10 - apt: - pkg: - - libgeos-c1v5 - - "postgresql-{{ postgresql_version }}-postgis-{{ postgresql_ext_postgis_version }}" - - "postgresql-{{ postgresql_version }}-postgis-{{ postgresql_ext_postgis_version }}-scripts" - update_cache: yes - cache_valid_time: 3600 - when: postgresql_version >= 10 - -# pgTAP -- name: pgTAP - download latest release - get_url: - url: "https://github.com/theory/pgtap/archive/v{{ pgtap_release }}.tar.gz" - dest: /tmp - checksum: "{{ pgtap_release_checksum }}" - -- name: pgTAP - unpack archive - unarchive: - remote_src: yes - src: /tmp/pgtap-{{ pgtap_release }}.tar.gz - dest: /tmp - become: yes - -- name: pgTAP - install - make: - chdir: /tmp/pgtap-{{ pgtap_release }} - target: install - become: yes - -# plpython -- name: Install plpython - apt: - pkg: postgresql-plpython3-12 - update_cache: yes - cache_valid_time: 3600 - -# pgAudit -- name: pgAudit - download & install dependencies - apt: - pkg: - - postgresql-server-dev-12 - - libssl-dev - - libkrb5-dev - update_cache: yes - install_recommends: no - -- name: pgAudit - download latest release - get_url: - url: "https://github.com/pgaudit/pgaudit/archive/{{ pgaudit_release }}.tar.gz" - dest: /tmp - checksum: "{{ pgaudit_release_checksum }}" - -- name: pgAudit - unpack archive - unarchive: - remote_src: yes - src: /tmp/pgaudit-{{ pgaudit_release }}.tar.gz - dest: /tmp - become: yes - -- name: pgAudit - build - make: - chdir: /tmp/pgaudit-{{ pgaudit_release }} - target: check - params: - USE_PGXS: 1 - become: yes - -- name: pgAudit - install - make: - chdir: /tmp/pgaudit-{{ pgaudit_release }} - target: install - params: - USE_PGXS: 1 - become: yes - -# pgjwt -- name: pgjwt - download from master branch - git: - repo: https://github.com/michelp/pgjwt.git - dest: /tmp/pgjwt - version: master - -- name: pgjwt - install - make: - chdir: /tmp/pgjwt - target: install - become: yes - -- name: Remove libcurl4 package - apt: - pkg: - - libcurl4 - state: absent - -# pgsql-http -- name: pgsql-http - download & install dependencies - apt: - pkg: - - libcurl4-gnutls-dev - update_cache: yes - install_recommends: yes - -- name: pgsql-http - download latest release - get_url: - url: "https://github.com/pramsey/pgsql-http/archive/v{{ pgsql_http_release }}.tar.gz" - dest: /tmp - checksum: "{{ pgsql_http_release_checksum }}" - -- name: pgsql-http - unpack archive - unarchive: - remote_src: yes - src: /tmp/pgsql-http-{{ pgsql_http_release }}.tar.gz - dest: /tmp - become: yes - -- name: pgsql-http - build - make: - chdir: /tmp/pgsql-http-{{ pgsql_http_release }} - become: yes - -- name: pgsql-http - install - make: - chdir: /tmp/pgsql-http-{{ pgsql_http_release }} - target: install - become: yes - -# plpgsql_check -- name: plpgsql_check - download & install dependencies - apt: - pkg: - - libicu-dev - update_cache: yes - install_recommends: no - -- name: plpgsql_check - download latest release - get_url: - url: https://github.com/okbob/plpgsql_check/archive/v{{ plpgsql_check_release }}.tar.gz - dest: /tmp - checksum: "{{ plpgsql_check_release_checksum }}" - -- name: plpgsql_check - unpack archive - unarchive: - remote_src: yes - src: /tmp/plpgsql_check-{{ plpgsql_check_release }}.tar.gz - dest: /tmp - become: yes - -- name: plpgsql_check - clean - make: - chdir: /tmp/plpgsql_check-{{ plpgsql_check_release }} - target: clean - become: yes - -- name: plpgsql_check - install - make: - chdir: /tmp/plpgsql_check-{{ plpgsql_check_release }} - target: install - become: yes - -# pljava -- name: pljava - download & install dependencies - apt: - pkg: - - maven - - default-jre - - default-jdk - update_cache: yes - install_recommends: yes - -- name: pljava - download latest release - get_url: - url: https://github.com/tada/pljava/archive/V{{ pljava_release }}.tar.gz - dest: /tmp - checksum: "{{ pljava_release_checksum }}" - -- name: pljava - unpack archive - unarchive: - remote_src: yes - src: /tmp/pljava-{{ pljava_release }}.tar.gz - dest: /tmp - become: yes - -- name: pljava - build - become: yes - shell: - cmd: mvn clean install - chdir: /tmp/pljava-{{ pljava_release }} - -- name: pljava - install - become: yes - shell: - cmd: java -jar pljava-packaging/target/pljava-pg12.3-amd64-Linux-gpp.jar - chdir: /tmp/pljava-{{ pljava_release }} - -- name: pljava - remove build dependencies - apt: - pkg: - - maven - - default-jre - - default-jdk - state: absent - -- name: pljava - install headless jdk - apt: - pkg: - - default-jdk-headless - update_cache: yes - install_recommends: no - -- name: pljava - set pljava.libjvm_location - become: yes - shell: - cmd: echo "pljava.libjvm_location = '/usr/lib/jvm/java-11-openjdk-amd64/lib/server/libjvm.so'" >> /etc/postgresql/12/main/postgresql.conf - -- name: pljava - remove ~/.m2 directory - become: yes - file: - path: ~/.m2 - state: absent - -# plv8 -- name: plv8 - download & install dependencies - apt: - pkg: - - build-essential - - ca-certificates - - curl - - git-core - - gpp - - cpp - - pkg-config - - apt-transport-https - - cmake - - libc++-dev - - libc++abi-dev - - postgresql-server-dev-12 - - libc++1 - - libtinfo5 - - libc++abi1 - update_cache: yes - install_recommends: no - -- name: plv8 - download latest release - git: - repo: https://github.com/plv8/plv8.git - dest: /tmp/plv8 - version: r3.0alpha - become: yes - -- name: plv8 - build - make: - chdir: /tmp/plv8 - become: yes - -- name: plv8 - install - make: - chdir: /tmp/plv8 - target: install - become: yes diff --git a/ansible/tasks/setup-fail2ban.yml b/ansible/tasks/setup-fail2ban.yml new file mode 100644 index 0000000..38245e8 --- /dev/null +++ b/ansible/tasks/setup-fail2ban.yml @@ -0,0 +1,73 @@ +# set default bantime to 1 hour +- name: extend bantime + become: yes + replace: + path: /etc/fail2ban/jail.conf + regexp: bantime = 10m + replace: bantime = 3600 + when: debpkg_mode or nixpkg_mode + +- name: Configure journald + copy: + src: files/fail2ban_config/jail-ssh.conf + dest: /etc/fail2ban/jail.d/sshd.local + when: debpkg_mode or nixpkg_mode + +- name: configure fail2ban to use nftables + copy: + src: files/fail2ban_config/jail.local + dest: /etc/fail2ban/jail.local + when: debpkg_mode or nixpkg_mode + +# postgresql +- name: import jail.d/postgresql.conf + template: + src: files/fail2ban_config/jail-postgresql.conf.j2 + dest: /etc/fail2ban/jail.d/postgresql.conf + become: yes + when: debpkg_mode or nixpkg_mode + +- name: import filter.d/postgresql.conf + template: + src: files/fail2ban_config/filter-postgresql.conf.j2 + dest: /etc/fail2ban/filter.d/postgresql.conf + become: yes + when: debpkg_mode or nixpkg_mode + +- name: create overrides dir + file: + state: directory + owner: root + group: root + path: /etc/systemd/system/fail2ban.service.d + mode: '0700' + when: debpkg_mode or nixpkg_mode + +- name: Custom systemd overrides + copy: + src: files/fail2ban_config/fail2ban.service.conf + dest: /etc/systemd/system/fail2ban.service.d/overrides.conf + when: debpkg_mode or nixpkg_mode + +- name: add in tealbase specific ignore filters + lineinfile: + path: /etc/fail2ban/filter.d/postgresql.conf + state: present + line: "{{ item.line }}" + loop: + - { line: ' ^.*,.*,.*,.*,":.*password authentication failed for user ""tealbase_admin".*$' } + - { line: ' ^.*,.*,.*,.*,":.*password authentication failed for user ""tealbase_auth_admin".*$' } + - { line: ' ^.*,.*,.*,.*,":.*password authentication failed for user ""tealbase_storage_admin".*$' } + - { line: ' ^.*,.*,.*,.*,":.*password authentication failed for user ""authenticator".*$' } + - { line: ' ^.*,.*,.*,.*,":.*password authentication failed for user ""pgbouncer".*$' } + become: yes + tags: + - install-tealbase-internal + when: debpkg_mode or nixpkg_mode + +- name: fail2ban - disable service + systemd: + name: fail2ban + enabled: no + daemon_reload: yes + when: debpkg_mode or nixpkg_mode diff --git a/ansible/tasks/setup-gotrue.yml b/ansible/tasks/setup-gotrue.yml new file mode 100644 index 0000000..d102ed4 --- /dev/null +++ b/ansible/tasks/setup-gotrue.yml @@ -0,0 +1,66 @@ +- name: UFW - Allow connections to GoTrue metrics exporter + ufw: + rule: allow + port: "9122" + +# use this user for the Gotrue build and for running the service +- name: Gotrue - system user + user: name=gotrue + +- name: Setting arch (x86) + set_fact: + arch: "x86" + when: platform == "amd64" + +- name: Setting arch (arm) + set_fact: + arch: "arm64" + when: platform == "arm64" + +- name: gotrue - download commit archive + get_url: + url: "https://github.com/tealbase/gotrue/releases/download/v{{ gotrue_release }}/auth-v{{ gotrue_release }}-{{ arch }}.tar.gz" + dest: /tmp/gotrue.tar.gz + checksum: "{{ gotrue_release_checksum }}" + +- name: gotrue - create /opt/gotrue + file: + path: /opt/gotrue + state: directory + owner: gotrue + mode: 0775 + +- name: gotrue - create /etc/auth.d + file: + path: /etc/auth.d + state: directory + owner: gotrue + mode: 0775 + +- name: gotrue - unpack archive in /opt/gotrue + unarchive: + remote_src: yes + src: /tmp/gotrue.tar.gz + dest: /opt/gotrue + owner: gotrue + +# libpq is a C library that enables user programs to communicate with +# the PostgreSQL database server. +# - name: gotrue - system dependencies +# apt: +# pkg: +# - libpq-dev + +- name: gotrue - create service file + template: + src: files/gotrue.service.j2 + dest: /etc/systemd/system/gotrue.service + +- name: gotrue - create optimizations file + template: + src: files/gotrue-optimizations.service.j2 + dest: /etc/systemd/system/gotrue-optimizations.service + +- name: gotrue - reload systemd + systemd: + daemon_reload: yes diff --git a/ansible/tasks/setup-kong.yml b/ansible/tasks/setup-kong.yml new file mode 100644 index 0000000..b34f96e --- /dev/null +++ b/ansible/tasks/setup-kong.yml @@ -0,0 +1,62 @@ +- name: Kong - system user + user: name=kong + +# Kong installation steps from http://archive.vn/3HRQx +- name: Kong - system dependencies + apt: + pkg: + - openssl + - libpcre3 + - procps + - perl + +- name: Kong - download deb package + get_url: + url: "https://packages.konghq.com/public/gateway-28/deb/ubuntu/pool/{{ kong_release_target }}/main/k/ko/kong_2.8.1/{{ kong_deb }}" + dest: /tmp/kong.deb + checksum: "{{ kong_deb_checksum }}" + +- name: Kong - deb installation + apt: deb=file:///tmp/kong.deb + +- name: Kong - ensure it is NOT autoremoved + shell: | + set -e + apt-mark manual kong zlib1g* + +- name: Kong - configuration + template: + src: files/kong_config/kong.conf.j2 + dest: /etc/kong/kong.conf + +- name: Kong - hand over ownership of /usr/local/kong to user kong + file: + path: /usr/local/kong + recurse: yes + owner: kong + +# [warn] ulimit is currently set to "1024". For better performance set it to at least +# "4096" using "ulimit -n" +- name: Kong - bump up ulimit + pam_limits: + limit_item: nofile + limit_type: soft + domain: kong + value: "4096" + +- name: Kong - create env file + template: + src: files/kong_config/kong.env.j2 + dest: /etc/kong/kong.env + +- name: Kong - create service file + template: + src: files/kong_config/kong.service.j2 + dest: /etc/systemd/system/kong.service + +- name: Kong - disable service + systemd: + enabled: no + name: kong + state: stopped + daemon_reload: yes diff --git a/ansible/tasks/setup-migrations.yml b/ansible/tasks/setup-migrations.yml new file mode 100644 index 0000000..6eea684 --- /dev/null +++ b/ansible/tasks/setup-migrations.yml @@ -0,0 +1,13 @@ +- name: Run migrate.sh script + shell: ./migrate.sh + register: retval + when: debpkg_mode or stage2_nix + args: + chdir: /tmp/migrations/db + failed_when: retval.rc != 0 + +- name: Create /root/MIGRATION-AMI file + file: + path: "/root/MIGRATION-AMI" + state: touch + when: debpkg_mode or stage2_nix diff --git a/ansible/tasks/setup-misc.yml b/ansible/tasks/setup-misc.yml deleted file mode 100644 index e67d6a3..0000000 --- a/ansible/tasks/setup-misc.yml +++ /dev/null @@ -1,24 +0,0 @@ -# WAL-G -- name: Install daemontools - become: yes - apt: - pkg: - - daemontools - -- name: WAL-G - download latest release - get_url: - url: https://github.com/wal-g/wal-g/releases/download/v{{ wal_g_release }}/wal-g.linux-amd64.tar.gz - dest: /tmp - checksum: "{{ wal_g_release_checksum }}" - -- name: WAL-G - unpack archive - unarchive: - remote_src: yes - src: /tmp/wal-g.linux-amd64.tar.gz - dest: /tmp - become: yes - -- name: WAL-G - install - become: yes - shell: - cmd: mv /tmp/wal-g /usr/local/bin/ diff --git a/ansible/tasks/setup-nginx.yml b/ansible/tasks/setup-nginx.yml new file mode 100644 index 0000000..77fb770 --- /dev/null +++ b/ansible/tasks/setup-nginx.yml @@ -0,0 +1,82 @@ +- name: nginx - system user + user: name=nginx + +# Kong installation steps from http://archive.vn/3HRQx +- name: nginx - system dependencies + apt: + pkg: + - openssl + - libpcre3-dev + - libssl-dev + - zlib1g-dev + +- name: nginx - download source + get_url: + url: "https://nginx.org/download/nginx-{{ nginx_release }}.tar.gz" + dest: /tmp/nginx-{{ nginx_release }}.tar.gz + checksum: "{{ nginx_release_checksum }}" + +- name: nginx - unpack archive + unarchive: + remote_src: yes + src: /tmp/nginx-{{ nginx_release }}.tar.gz + dest: /tmp + +- name: nginx - configure + shell: + chdir: /tmp/nginx-{{ nginx_release }} + cmd: | + set -e + + ./configure \ + --prefix=/usr/local/nginx \ + --conf-path=/etc/nginx/nginx.conf \ + --with-http_ssl_module \ + --with-http_realip_module \ + --with-threads + become: yes + +- name: nginx - build + community.general.make: + target: build + chdir: /tmp/nginx-{{ nginx_release }} + jobs: "{{ parallel_jobs | default(omit) }}" + become: yes + +- name: nginx - install + make: + chdir: /tmp/nginx-{{ nginx_release }} + target: install + become: yes + +- name: nginx - hand over ownership of /usr/local/nginx to user nginx + file: + path: /usr/local/nginx + recurse: yes + owner: nginx + +- name: nginx - hand over ownership of /etc/nginx to user nginx + file: + path: /etc/nginx + recurse: yes + owner: nginx + +# [warn] ulimit is currently set to "1024". For better performance set it to at least +# "4096" using "ulimit -n" +- name: nginx - bump up ulimit + pam_limits: + limit_item: nofile + limit_type: soft + domain: nginx + value: "4096" + +- name: nginx - create service file + template: + src: files/nginx.service.j2 + dest: /etc/systemd/system/nginx.service + +# Keep it dormant for the timebeing + +# - name: nginx - reload systemd +# systemd: +# daemon_reload: yes diff --git a/ansible/tasks/setup-pgbouncer.yml b/ansible/tasks/setup-pgbouncer.yml new file mode 100644 index 0000000..4381ba2 --- /dev/null +++ b/ansible/tasks/setup-pgbouncer.yml @@ -0,0 +1,135 @@ +# PgBouncer +- name: PgBouncer - download & install dependencies + apt: + pkg: + - build-essential + - libssl-dev + - pkg-config + - libevent-dev + - libsystemd-dev + update_cache: yes + cache_valid_time: 3600 + +- name: PgBouncer - download latest release + get_url: + url: "https://www.pgbouncer.org/downloads/files/{{ pgbouncer_release }}/pgbouncer-{{ pgbouncer_release }}.tar.gz" + dest: /tmp/pgbouncer-{{ pgbouncer_release }}.tar.gz + checksum: "{{ pgbouncer_release_checksum }}" + timeout: 60 + +- name: PgBouncer - unpack archive + unarchive: + remote_src: yes + src: /tmp/pgbouncer-{{ pgbouncer_release }}.tar.gz + dest: /tmp + become: yes + +- name: PgBouncer - configure + shell: + cmd: "./configure --prefix=/usr/local --with-systemd" + chdir: /tmp/pgbouncer-{{ pgbouncer_release }} + become: yes + +- name: PgBouncer - build + make: + chdir: /tmp/pgbouncer-{{ pgbouncer_release }} + become: yes + +- name: PgBouncer - install + make: + chdir: /tmp/pgbouncer-{{ pgbouncer_release }} + target: install + become: yes + +- name: Create pgbouncer user + user: + name: pgbouncer + shell: /bin/false + comment: PgBouncer user + groups: postgres,ssl-cert + +- name: PgBouncer - create a directory if it does not exist + file: + path: /etc/pgbouncer + state: directory + owner: pgbouncer + group: pgbouncer + mode: '0700' + +- name: PgBouncer - create a directory if it does not exist + file: + state: directory + owner: pgbouncer + group: pgbouncer + path: '{{ item }}' + mode: '0775' + with_items: + - '/etc/pgbouncer-custom' + +- name: create placeholder config files + file: + path: '/etc/pgbouncer-custom/{{ item }}' + state: touch + owner: pgbouncer + group: pgbouncer + mode: 0664 + with_items: + - 'generated-optimizations.ini' + - 'custom-overrides.ini' + - 'ssl-config.ini' + +- name: PgBouncer - adjust pgbouncer.ini + copy: + src: files/pgbouncer_config/pgbouncer.ini.j2 + dest: /etc/pgbouncer/pgbouncer.ini + owner: pgbouncer + mode: '0700' + +- name: PgBouncer - create a directory if it does not exist + file: + path: /etc/pgbouncer/userlist.txt + state: touch + owner: pgbouncer + mode: '0700' + +- name: import /etc/tmpfiles.d/pgbouncer.conf + template: + src: files/pgbouncer_config/tmpfiles.d-pgbouncer.conf.j2 + dest: /etc/tmpfiles.d/pgbouncer.conf + become: yes + +- name: PgBouncer - By default allow ssl connections. + become: yes + copy: + dest: /etc/pgbouncer-custom/ssl-config.ini + content: | + client_tls_sslmode = allow + +- name: Grant pg_hba and pgbouncer grp perm for adminapi updates + shell: | + chmod g+w /etc/postgresql/pg_hba.conf + chmod g+w /etc/pgbouncer-custom/ssl-config.ini + +# Add fail2ban filter +- name: import jail.d/pgbouncer.conf + template: + src: files/fail2ban_config/jail-pgbouncer.conf.j2 + dest: /etc/fail2ban/jail.d/pgbouncer.conf + become: yes + +- name: import filter.d/pgbouncer.conf + template: + src: files/fail2ban_config/filter-pgbouncer.conf.j2 + dest: /etc/fail2ban/filter.d/pgbouncer.conf + become: yes + +# Add systemd file for PgBouncer +- name: PgBouncer - import postgresql.service + template: + src: files/pgbouncer_config/pgbouncer.service.j2 + dest: /etc/systemd/system/pgbouncer.service + become: yes + +- name: PgBouncer - reload systemd + systemd: + daemon_reload: yes diff --git a/ansible/tasks/setup-postgres.yml b/ansible/tasks/setup-postgres.yml new file mode 100644 index 0000000..95536f6 --- /dev/null +++ b/ansible/tasks/setup-postgres.yml @@ -0,0 +1,322 @@ +- name: Postgres - copy package + copy: + src: files/postgres/ + dest: /tmp/build/ + when: debpkg_mode + +- name: Postgres - add PPA + apt_repository: + repo: "deb [ trusted=yes ] file:///tmp/build ./" + state: present + when: debpkg_mode + +- name: Postgres - install commons + apt: + name: postgresql-common + install_recommends: no + when: debpkg_mode + +- name: Do not create main cluster + shell: + cmd: sed -ri 's/#(create_main_cluster) .*$/\1 = false/' /etc/postgresql-common/createcluster.conf + when: debpkg_mode + +- name: Postgres - install server + apt: + name: postgresql-{{ postgresql_major }}={{ postgresql_release }}-1.pgdg24.04+1 + install_recommends: no + when: debpkg_mode + +- name: Postgres - remove PPA + apt_repository: + repo: "deb [ trusted=yes ] file:///tmp/build ./" + state: absent + when: debpkg_mode + +- name: Postgres - cleanup package + file: + path: /tmp/build + state: absent + when: debpkg_mode + +- name: install locales + apt: + name: locales + state: present + become: yes + when: stage2_nix + +- name: configure locales + command: echo "C.UTF-8 UTF-8" > /etc/locale.gen && echo "en_US.UTF-8 UTF-8" >> /etc/locale.gen + become: yes + when: stage2_nix + +- name: locale-gen + command: sudo locale-gen + when: stage2_nix + +- name: update-locale + command: sudo update-locale + when: stage2_nix + +- name: Create symlink to /usr/lib/postgresql/bin + shell: + cmd: ln -s /usr/lib/postgresql/{{ postgresql_major }}/bin /usr/lib/postgresql/bin + when: debpkg_mode + +- name: create ssl-cert group + group: + name: ssl-cert + state: present + when: nixpkg_mode +# the old method of installing from debian creates this group, but we must create it explicitly +# for the nix built version + +- name: create postgres group + group: + name: postgres + state: present + when: nixpkg_mode + +- name: create postgres user + shell: adduser --system --home /var/lib/postgresql --no-create-home --shell /bin/bash --group --gecos "PostgreSQL administrator" postgres + args: + executable: /bin/bash + become: yes + when: nixpkg_mode + +- name: add postgres user to postgres group + shell: usermod -a -G ssl-cert postgres + args: + executable: /bin/bash + become: yes + when: nixpkg_mode + +- name: Create relevant directories + file: + path: '{{ item }}' + recurse: yes + state: directory + owner: postgres + group: postgres + with_items: + - '/home/postgres' + - '/var/log/postgresql' + - '/var/lib/postgresql' + when: debpkg_mode or nixpkg_mode + +- name: Allow adminapi to write custom config + file: + path: '{{ item }}' + recurse: yes + state: directory + owner: postgres + group: postgres + mode: 0775 + with_items: + - '/etc/postgresql' + - '/etc/postgresql-custom' + when: debpkg_mode or nixpkg_mode + +- name: create placeholder config files + file: + path: '/etc/postgresql-custom/{{ item }}' + state: touch + owner: postgres + group: postgres + mode: 0664 + with_items: + - 'generated-optimizations.conf' + - 'custom-overrides.conf' + when: debpkg_mode or nixpkg_mode + +# Move Postgres configuration files into /etc/postgresql +# Add postgresql.conf +- name: import postgresql.conf + template: + src: files/postgresql_config/postgresql.conf.j2 + dest: /etc/postgresql/postgresql.conf + group: postgres + when: debpkg_mode or nixpkg_mode + +# Add pg_hba.conf +- name: import pg_hba.conf + template: + src: files/postgresql_config/pg_hba.conf.j2 + dest: /etc/postgresql/pg_hba.conf + group: postgres + when: debpkg_mode or nixpkg_mode + +# Add pg_ident.conf +- name: import pg_ident.conf + template: + src: files/postgresql_config/pg_ident.conf.j2 + dest: /etc/postgresql/pg_ident.conf + group: postgres + when: debpkg_mode or nixpkg_mode + +# Add custom config for read replicas set up +- name: Move custom read-replica.conf file to /etc/postgresql-custom/read-replica.conf + template: + src: "files/postgresql_config/custom_read_replica.conf.j2" + dest: /etc/postgresql-custom/read-replica.conf + mode: 0664 + owner: postgres + group: postgres + when: debpkg_mode or nixpkg_mode + +# Install extensions before init +- name: Install Postgres extensions + import_tasks: tasks/setup-docker.yml + when: debpkg_mode or stage2_nix + +#stage 2 postgres tasks +- name: stage2 postgres tasks + import_tasks: tasks/stage2-setup-postgres.yml + when: stage2_nix + +# init DB +- name: Create directory on data volume + file: + path: '{{ item }}' + recurse: yes + state: directory + owner: postgres + group: postgres + mode: 0750 + with_items: + - "/data/pgdata" + when: debpkg_mode or nixpkg_mode + +- name: Link database data_dir to data volume directory + file: + src: "/data/pgdata" + path: "/var/lib/postgresql/data" + state: link + force: yes + when: debpkg_mode or nixpkg_mode + +- name: Initialize the database + become: yes + become_user: postgres + shell: /usr/lib/postgresql/bin/pg_ctl -D /var/lib/postgresql/data initdb -o "--allow-group-access" -o "--username=tealbase_admin" + vars: + ansible_command_timeout: 60 + when: debpkg_mode + +- name: Make sure .bashrc exists + file: + path: /var/lib/postgresql/.bashrc + state: touch + owner: postgres + group: postgres + when: nixpkg_mode + +- name: Check psql_version and modify supautils.conf and postgresql.conf if necessary + block: + - name: Check if psql_version is psql_orioledb + set_fact: + is_psql_oriole: "{{ psql_version in ['psql_orioledb-17'] }}" + is_psql_17: "{{ psql_version in ['psql_17'] }}" + + - name: Initialize the database stage2_nix (non-orioledb) + become: yes + become_user: postgres + shell: source /var/lib/postgresql/.bashrc && /usr/lib/postgresql/bin/pg_ctl -D /var/lib/postgresql/data initdb -o "--allow-group-access" -o "--username=tealbase_admin" + args: + executable: /bin/bash + environment: + LANG: en_US.UTF-8 + LANGUAGE: en_US.UTF-8 + LC_ALL: en_US.UTF-8 + LC_CTYPE: en_US.UTF-8 + LOCALE_ARCHIVE: /usr/lib/locale/locale-archive + vars: + ansible_command_timeout: 60 + when: stage2_nix and not is_psql_oriole and not is_psql_17 + + - name: Initialize the database stage2_nix (orioledb) + become: yes + become_user: postgres + shell: > + source /var/lib/postgresql/.bashrc && /usr/lib/postgresql/bin/pg_ctl -D /var/lib/postgresql/data initdb + -o "--allow-group-access" + -o "--username=tealbase_admin" + -o "--locale-provider=icu" + -o "--encoding=UTF-8" + -o "--icu-locale=en_US.UTF-8" + args: + executable: /bin/bash + environment: + LANG: en_US.UTF-8 + LANGUAGE: en_US.UTF-8 + LC_ALL: en_US.UTF-8 + LC_CTYPE: en_US.UTF-8 + LOCALE_ARCHIVE: /usr/lib/locale/locale-archive + vars: + ansible_command_timeout: 60 + when: stage2_nix and (is_psql_oriole or is_psql_17) + +- name: copy PG systemd unit + template: + src: files/postgresql_config/postgresql.service.j2 + dest: /etc/systemd/system/postgresql.service + when: debpkg_mode or stage2_nix + +- name: copy optimizations systemd unit + template: + src: files/database-optimizations.service.j2 + dest: /etc/systemd/system/database-optimizations.service + when: debpkg_mode or stage2_nix + +- name: initialize pg required state + become: yes + shell: | + mkdir -p /run/postgresql + chown -R postgres:postgres /run/postgresql + when: stage2_nix and qemu_mode is defined + +- name: Restart Postgres Database without Systemd + become: yes + become_user: postgres + shell: | + source /var/lib/postgresql/.bashrc + /usr/lib/postgresql/bin/pg_ctl -D /var/lib/postgresql/data start + environment: + LANG: en_US.UTF-8 + LANGUAGE: en_US.UTF-8 + LC_ALL: en_US.UTF-8 + LC_CTYPE: en_US.UTF-8 + LOCALE_ARCHIVE: /usr/lib/locale/locale-archive + when: stage2_nix + + +# Reload +- name: System - systemd reload + systemd: + enabled: yes + name: postgresql + daemon_reload: yes + when: debpkg_mode or stage2_nix + + +- name: Add LOCALE_ARCHIVE to .bashrc + lineinfile: + dest: "/var/lib/postgresql/.bashrc" + line: 'export LOCALE_ARCHIVE=/usr/lib/locale/locale-archive' + create: yes + become: yes + when: nixpkg_mode + +- name: Add LANG items to .bashrc + lineinfile: + dest: "/var/lib/postgresql/.bashrc" + line: "{{ item }}" + loop: + - 'export LANG="en_US.UTF-8"' + - 'export LANGUAGE="en_US.UTF-8"' + - 'export LC_ALL="en_US.UTF-8"' + - 'export LANG="en_US.UTF-8"' + - 'export LC_CTYPE="en_US.UTF-8"' + become: yes + when: nixpkg_mode diff --git a/ansible/tasks/setup-postgrest.yml b/ansible/tasks/setup-postgrest.yml new file mode 100644 index 0000000..df9fecb --- /dev/null +++ b/ansible/tasks/setup-postgrest.yml @@ -0,0 +1,114 @@ +- name: PostgREST - system user + user: name=postgrest + +- name: PostgREST - add Postgres PPA gpg key + apt_key: + url: https://www.postgresql.org/media/keys/ACCC4CF8.asc + state: present + +- name: PostgREST - add Postgres PPA main + apt_repository: + repo: "deb http://apt.postgresql.org/pub/repos/apt/ noble-pgdg {{ postgresql_major }}" + state: present + filename: postgresql-pgdg + +- name: PostgREST - install system dependencies + apt: + package: + - libpq5 + - libnuma-dev + update_cache: yes + state: present + +- name: PostgREST - verify libpq5 version + shell: dpkg -l libpq5 | grep '^ii' | awk '{print $3}' + register: libpq5_version + changed_when: false + +- name: Show installed libpq5 version + debug: + msg: "Installed libpq5 version: {{ libpq5_version.stdout }}" + +- name: PostgREST - remove Postgres PPA gpg key + apt_key: + url: https://www.postgresql.org/media/keys/ACCC4CF8.asc + state: absent + +- name: PostgREST - remove Postgres PPA + apt_repository: + repo: "deb http://apt.postgresql.org/pub/repos/apt/ noble-pgdg {{ postgresql_major }}" + state: absent + +- name: postgis - ensure dependencies do not get autoremoved + shell: | + set -e + apt-mark manual libpq5* + apt-mark manual libnuma* + apt-mark auto libnuma*-dev + +- name: PostgREST - download ubuntu binary archive (arm) + get_url: + url: "https://github.com/PostgREST/postgrest/releases/download/v{{ postgrest_release }}/postgrest-v{{ postgrest_release }}-ubuntu-aarch64.tar.xz" + dest: /tmp/postgrest.tar.xz + checksum: "{{ postgrest_arm_release_checksum }}" + timeout: 60 + when: platform == "arm64" + +- name: PostgREST - download ubuntu binary archive (x86) + get_url: + url: "https://github.com/PostgREST/postgrest/releases/download/v{{ postgrest_release }}/postgrest-v{{ postgrest_release }}-linux-static-x86-64.tar.xz" + dest: /tmp/postgrest.tar.xz + checksum: "{{ postgrest_x86_release_checksum }}" + timeout: 60 + when: platform == "amd64" + +- name: PostgREST - unpack archive in /opt + unarchive: + remote_src: yes + src: /tmp/postgrest.tar.xz + dest: /opt + owner: postgrest + mode: '0755' + +- name: create directories + file: + state: directory + owner: postgrest + group: postgrest + mode: '0775' + path: /etc/postgrest + +- name: empty files + file: + state: touch + owner: postgrest + group: postgrest + path: /etc/postgrest/{{ item }} + with_items: + - base.conf + - generated.conf + +- name: create conf merging script + copy: + content: | + #! /usr/bin/env bash + set -euo pipefail + set -x + cd "$(dirname "$0")" + cat $@ > merged.conf + dest: /etc/postgrest/merge.sh + mode: 0750 + owner: postgrest + group: postgrest + +- name: PostgREST - create service files + template: + src: files/{{ item }}.j2 + dest: /etc/systemd/system/{{ item }} + with_items: + - postgrest.service + - postgrest-optimizations.service + +- name: PostgREST - reload systemd + systemd: + daemon_reload: yes diff --git a/ansible/tasks/setup-system.yml b/ansible/tasks/setup-system.yml index 05ff498..c1285bf 100644 --- a/ansible/tasks/setup-system.yml +++ b/ansible/tasks/setup-system.yml @@ -1,63 +1,199 @@ -# DigitalOcean's ubuntu droplet isn't up to date with installed packages, and on -# a fresh install I see 71 security upgrades available. - name: System - apt update and apt upgrade apt: update_cache=yes upgrade=yes + when: debpkg_mode or nixpkg_mode # SEE http://archive.vn/DKJjs#parameter-upgrade -- name: Wait for /var/lib/apt/lists/lock - become: yes - shell: while sudo fuser /var/lib/apt/lists/lock; do sleep 10; done; - tags: - - update - - update-only - -- name: Wait for /var/lib/dpkg/lock-frontend - become: yes - shell: while sudo fuser /var/lib/dpkg/lock-frontend; do sleep 10; done; - tags: - - update - - update-only - -- name: add universe repository for bionic - apt_repository: - repo: deb http://archive.ubuntu.com/ubuntu bionic universe - state: present - -- name: Install python - become: yes +- name: Install required security updates apt: pkg: - - python - - python-pip - - python3 - - python3-pip - update_cache: yes - cache_valid_time: 3600 + - tzdata + - linux-libc-dev + when: debpkg_mode or nixpkg_mode +# SEE https://github.com/georchestra/ansible/issues/55#issuecomment-588313638 +# Without this, a similar error is faced +- name: Install Ansible dependencies + apt: + pkg: + - acl + when: debpkg_mode or nixpkg_mode - name: Install security tools - become: yes apt: pkg: - - ufw + - nftables - fail2ban - - unattended-upgrades update_cache: yes cache_valid_time: 3600 - + when: debpkg_mode or nixpkg_mode + +- name: Use nftables backend + shell: | + update-alternatives --set iptables /usr/sbin/iptables-nft + update-alternatives --set ip6tables /usr/sbin/ip6tables-nft + update-alternatives --set arptables /usr/sbin/arptables-nft + update-alternatives --set ebtables /usr/sbin/ebtables-nft + systemctl restart ufw + when: debpkg_mode or nixpkg_mode + +- name: Create Sysstat log directory + file: + path: /var/log/sysstat + state: directory + when: debpkg_mode or nixpkg_mode + +- name: Install other useful tools + apt: + pkg: + - bwm-ng + - htop + - net-tools + - ngrep + - sysstat + - vim-tiny + update_cache: yes + when: debpkg_mode or nixpkg_mode + +- name: Configure sysstat + copy: + src: files/sysstat.sysstat + dest: /etc/sysstat/sysstat + when: debpkg_mode or nixpkg_mode + +- name: Configure default sysstat + copy: + src: files/default.sysstat + dest: /etc/default/sysstat + when: debpkg_mode or nixpkg_mode + + - name: Adjust APT update intervals - copy: + copy: src: files/apt_periodic dest: /etc/apt/apt.conf.d/10periodic + when: debpkg_mode or nixpkg_mode -- name: Install psycopg2 to enable ansible postgreSQL features - pip: - name: psycopg2-binary +# Find platform architecture and set as a variable +- name: finding platform architecture + shell: if [ $(uname -m) = "aarch64" ]; then echo "arm64"; else echo "amd64"; fi + register: platform_output + tags: + - update + - update-only +- set_fact: + platform: "{{ platform_output.stdout }}" + tags: + - update + - update-only + when: debpkg_mode or nixpkg_mode or stage2_nix + +- name: create overrides dir + file: + state: directory + owner: root + group: root + path: /etc/systemd/system/systemd-resolved.service.d + mode: '0700' + when: debpkg_mode or nixpkg_mode + +- name: Custom systemd overrides for resolved + copy: + src: files/systemd-resolved.conf + dest: /etc/systemd/system/systemd-resolved.service.d/override.conf + when: debpkg_mode or nixpkg_mode - name: System - Create services.slice template: src: files/services.slice.j2 dest: /etc/systemd/system/services.slice + when: debpkg_mode or nixpkg_mode + - name: System - systemd reload systemd: daemon_reload=yes + when: debpkg_mode or nixpkg_mode + +- name: Configure journald + copy: + src: files/journald.conf + dest: /etc/systemd/journald.conf + when: debpkg_mode or nixpkg_mode + +- name: reload systemd-journald + systemd: + name: systemd-journald + state: restarted + when: debpkg_mode or nixpkg_mode + +- name: Configure logind + copy: + src: files/logind.conf + dest: /etc/systemd/logind.conf + when: debpkg_mode or nixpkg_mode + +- name: reload systemd-logind + systemd: + name: systemd-logind + state: restarted + when: debpkg_mode or nixpkg_mode +- name: enable timestamps for shell history + copy: + content: | + export HISTTIMEFORMAT='%d/%m/%y %T ' + dest: /etc/profile.d/09-history-timestamps.sh + mode: 0644 + owner: root + group: root + when: debpkg_mode or nixpkg_mode + +- name: set hosts file + copy: + content: | + 127.0.0.1 localhost + ::1 localhost + dest: /etc/hosts + mode: 0644 + owner: root + group: root + when: debpkg_mode or stage2_nix + +#Set Sysctl params for restarting the OS on oom after 10 +- name: Set vm.panic_on_oom=1 + ansible.builtin.sysctl: + name: vm.panic_on_oom + value: '1' + state: present + reload: yes + when: debpkg_mode or nixpkg_mode + +- name: Set kernel.panic=10 + ansible.builtin.sysctl: + name: kernel.panic + value: '10' + state: present + reload: yes + when: debpkg_mode or nixpkg_mode + +- name: configure system + ansible.posix.sysctl: + name: 'net.core.somaxconn' + value: 16834 + +- name: configure system + ansible.posix.sysctl: + name: 'net.ipv4.ip_local_port_range' + value: '1025 65000' + +#Set Sysctl params specific to keepalives +- name: Set net.ipv4.tcp_keepalive_time=1800 + ansible.builtin.sysctl: + name: net.ipv4.tcp_keepalive_time + value: 1800 + state: present + when: debpkg_mode or nixpkg_mode +- name: Set net.ipv4.tcp_keepalive_intvl=60 + ansible.builtin.sysctl: + name: net.ipv4.tcp_keepalive_intvl + value: 60 + state: present + when: debpkg_mode or nixpkg_mode diff --git a/ansible/tasks/setup-tealbase-internal.yml b/ansible/tasks/setup-tealbase-internal.yml new file mode 100644 index 0000000..92f54d2 --- /dev/null +++ b/ansible/tasks/setup-tealbase-internal.yml @@ -0,0 +1,124 @@ +- name: AWS CLI dep + apt: + pkg: + - unzip + - jq + install_recommends: no + +- name: AWS CLI (arm) + get_url: + url: "https://awscli.amazonaws.com/awscli-exe-linux-aarch64-{{ aws_cli_release }}.zip" + dest: "/tmp/awscliv2.zip" + timeout: 60 + when: platform == "arm64" + +- name: AWS CLI (x86) + get_url: + url: "https://awscli.amazonaws.com/awscli-exe-linux-x86_64-{{ aws_cli_release }}.zip" + dest: "/tmp/awscliv2.zip" + timeout: 60 + when: platform == "amd64" + +- name: AWS CLI - expand + unarchive: + remote_src: yes + src: "/tmp/awscliv2.zip" + dest: "/tmp" + +- name: AWS CLI - install + shell: "/tmp/aws/install --update" + become: true + +- name: AWS CLI - configure ipv6 support for s3 + shell: | + aws configure set default.s3.use_dualstack_endpoint true + +- name: install Vector for logging + become: true + apt: + deb: "{{ vector_x86_deb }}" + when: platform == "amd64" + +- name: install Vector for logging + become: true + apt: + deb: "{{ vector_arm_deb }}" + when: platform == "arm64" + +- name: add Vector to postgres group + become: true + shell: + cmd: | + usermod -a -G postgres vector + +- name: create service files for Vector + template: + src: files/vector.service.j2 + dest: /etc/systemd/system/vector.service + +- name: configure tmpfiles for postgres - overwrites upstream package + template: + src: files/postgresql_config/tmpfiles.postgresql.conf + dest: /etc/tmpfiles.d/postgresql-common.conf + +- name: fix permissions for vector config to be managed + shell: + cmd: | + chown -R vector:vector /etc/vector + chmod 0775 /etc/vector + +- name: vector - reload systemd + systemd: + daemon_reload: yes + +- name: Create checkpoints dir + become: true + file: + path: /var/lib/vector + state: directory + owner: vector + +- name: Include file for generated optimizations in postgresql.conf + become: true + replace: + path: /etc/postgresql/postgresql.conf + regexp: "#include = '/etc/postgresql-custom/generated-optimizations.conf'" + replace: "include = '/etc/postgresql-custom/generated-optimizations.conf'" + +- name: Include file for custom overrides in postgresql.conf + become: true + replace: + path: /etc/postgresql/postgresql.conf + regexp: "#include = '/etc/postgresql-custom/custom-overrides.conf'" + replace: "include = '/etc/postgresql-custom/custom-overrides.conf'" + +- name: Install Postgres exporter + import_tasks: internal/postgres-exporter.yml + +- name: Install admin-mgr + import_tasks: internal/admin-mgr.yml + +- name: Install adminapi + import_tasks: internal/admin-api.yml + +- name: Init nftabless + import_tasks: internal/setup-nftables.yml + +- name: Install pg_egress_collect + import_tasks: internal/pg_egress_collect.yml + +- name: Install PostgreSQL prestart script + import_tasks: internal/postgresql-prestart.yml + +- name: Install salt minion + import_tasks: internal/install-salt.yml + tags: + - aws-only + +- name: Install tealbase-admin-agent + import_tasks: internal/tealbase-admin-agent.yml + tags: + - aws-only + +- name: Envoy - use lds.tealbase.yaml for /etc/envoy/lds.yaml + command: mv /etc/envoy/lds.tealbase.yaml /etc/envoy/lds.yaml diff --git a/ansible/tasks/setup-wal-g.yml b/ansible/tasks/setup-wal-g.yml new file mode 100644 index 0000000..aa56fae --- /dev/null +++ b/ansible/tasks/setup-wal-g.yml @@ -0,0 +1,95 @@ +- name: Create wal-g group + group: + name: wal-g + state: present + when: nixpkg_mode + +- name: Create wal-g user + user: + name: wal-g + shell: /bin/false + comment: WAL-G user + group: wal-g + groups: wal-g, postgres + when: nixpkg_mode +- name: Create a config directory owned by wal-g + file: + path: /etc/wal-g + state: directory + owner: wal-g + group: wal-g + mode: '0770' + when: nixpkg_mode + +- name: Install wal-g 2 from nix binary cache + become: yes + shell: | + sudo -u wal-g bash -c ". /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh && nix profile install github:tealbase/postgres/{{ git_commit_sha }}#wal-g-2" + when: stage2_nix + +- name: Install wal-g 3 from nix binary cache + become: yes + shell: | + sudo -u wal-g bash -c ". /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh && nix profile install github:tealbase/postgres/{{ git_commit_sha }}#wal-g-3" + when: stage2_nix + +- name: Create symlink for wal-g-3 from Nix profile to /usr/local/bin + ansible.builtin.file: + src: /home/wal-g/.nix-profile/bin/wal-g-3 + dest: /usr/local/bin/wal-g-v3 + state: link + force: yes # This will replace existing file/symlink if it exists + become: yes # Need sudo to write to /usr/local/bin + when: stage2_nix + +- name: Create symlink to make wal-g-v2 the default wal-g + ansible.builtin.file: + src: /home/wal-g/.nix-profile/bin/wal-g-2 + dest: /usr/local/bin/wal-g + state: link + force: yes + become: yes + when: stage2_nix + +- name: Create /etc/wal-g/config.json + file: + path: /etc/wal-g/config.json + state: touch + owner: wal-g + group: wal-g + mode: '0664' + when: stage2_nix + +- name: Move custom wal-g.conf file to /etc/postgresql-custom/wal-g.conf + template: + src: "files/postgresql_config/custom_walg.conf.j2" + dest: /etc/postgresql-custom/wal-g.conf + mode: 0664 + owner: postgres + group: postgres + when: stage2_nix + +- name: Add script to be run for restore_command + template: + src: "files/walg_helper_scripts/wal_fetch.sh" + dest: /home/postgres/wal_fetch.sh + mode: 0500 + owner: postgres + group: postgres + when: stage2_nix + +- name: Add helper script for wal_fetch.sh + template: + src: "files/walg_helper_scripts/wal_change_ownership.sh" + dest: /root/wal_change_ownership.sh + mode: 0700 + owner: root + when: stage2_nix + +- name: Include /etc/postgresql-custom/wal-g.conf in postgresql.conf + become: yes + replace: + path: /etc/postgresql/postgresql.conf + regexp: "#include = '/etc/postgresql-custom/wal-g.conf'" + replace: "include = '/etc/postgresql-custom/wal-g.conf'" + when: stage2_nix diff --git a/ansible/tasks/stage2-setup-postgres.yml b/ansible/tasks/stage2-setup-postgres.yml new file mode 100644 index 0000000..0ed2066 --- /dev/null +++ b/ansible/tasks/stage2-setup-postgres.yml @@ -0,0 +1,271 @@ +# - name: Install openjdk11 for pljava from nix binary cache +# become: yes +# shell: | +# sudo -u postgres bash -c ". /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh && nix profile install nixpkgs#openjdk11" +# It was decided to leave pljava disabled at https://github.com/tealbase/postgres/pull/690 therefore removing this task + +- name: Check psql_version and modify supautils.conf and postgresql.conf if necessary + block: + - name: Check if psql_version is psql_orioledb-17 + set_fact: + is_psql_oriole: "{{ psql_version in ['psql_orioledb-17'] }}" + + - name: Check if psql_version is psql_17 + set_fact: + is_psql_17: "{{ psql_version in ['psql_17'] }}" + + - name: Check if psql_version is psql_15 + set_fact: + is_psql_15: "{{ psql_version in ['psql_15'] }}" + + - name: Remove specified extensions from postgresql.conf if orioledb-17 or 17 build + ansible.builtin.command: + cmd: > + sed -i 's/ timescaledb,//g' + /etc/postgresql/postgresql.conf + when: is_psql_oriole or is_psql_17 and stage2_nix + become: yes + + - name: Remove specified extensions from supautils.conf if orioledb-17 or 17 build + ansible.builtin.command: + cmd: > + sed -i 's/ timescaledb,//g; s/ plv8,//g' + /etc/postgresql-custom/supautils.conf + when: is_psql_oriole or is_psql_17 and stage2_nix + become: yes + + - name: Remove db_user_namespace from postgresql.conf if orioledb-17 or 17 build + ansible.builtin.command: + cmd: > + sed -i 's/db_user_namespace = off/#db_user_namespace = off/g;' + /etc/postgresql/postgresql.conf + when: is_psql_oriole or is_psql_17 and stage2_nix + become: yes + + - name: Append orioledb to shared_preload_libraries append within closing quote + ansible.builtin.command: + cmd: > + sed -i 's/\(shared_preload_libraries.*\)'\''\(.*\)$/\1, orioledb'\''\2/' + /etc/postgresql/postgresql.conf + when: is_psql_oriole and stage2_nix + become: yes + + - name: Add default_table_access_method setting + ansible.builtin.lineinfile: + path: /etc/postgresql/postgresql.conf + line: "default_table_access_method = 'orioledb'" + state: present + when: is_psql_oriole and stage2_nix + become: yes + + - name: Add ORIOLEDB_ENABLED environment variable + ansible.builtin.lineinfile: + path: /etc/environment + line: 'ORIOLEDB_ENABLED=true' + when: is_psql_oriole and stage2_nix + become: yes + +- name: Install Postgres from nix binary cache + become: yes + shell: | + sudo -u postgres bash -c ". /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh && nix profile install github:tealbase/postgres/{{ git_commit_sha }}#{{psql_version}}/bin" + when: stage2_nix + +- name: Install pg_prove from nix binary cache + become: yes + shell: | + sudo -u postgres bash -c ". /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh && nix profile install github:tealbase/postgres/{{ git_commit_sha }}#pg_prove" + when: stage2_nix + +- name: Install tealbase-groonga from nix binary cache + become: yes + shell: | + sudo -u postgres bash -c ". /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh && nix profile install github:tealbase/postgres/{{ git_commit_sha }}#tealbase-groonga" + when: stage2_nix + +- name: Install debug symbols for postgres version + become: yes + shell: | + sudo -u postgres bash -c ". /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh && nix profile install github:tealbase/postgres/{{ git_commit_sha }}#{{postgresql_version}}_debug" + when: stage2_nix + +- name: Install source files for postgresql version + become: yes + shell: | + sudo -u postgres bash -c ". /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh && nix profile install github:tealbase/postgres/{{ git_commit_sha }}#{{postgresql_version}}_src" + when: stage2_nix + +- name: Set ownership and permissions for /etc/ssl/private + become: yes + file: + path: /etc/ssl/private + owner: root + group: postgres + mode: '0750' + when: stage2_nix + +- name: Set permissions for postgresql.env + become: yes + file: + path: /etc/environment.d/postgresql.env + owner: postgres + group: postgres + mode: '0644' + when: stage2_nix + +- name: Ensure /usr/lib/postgresql/bin directory exists + file: + path: /usr/lib/postgresql/bin + state: directory + owner: postgres + group: postgres + when: stage2_nix + +- name: Ensure /usr/lib/postgresql/share directory exists + file: + path: /usr/lib/postgresql/share/postgresql + state: directory + owner: postgres + group: postgres + when: stage2_nix + +- name: Ensure /usr/lib/postgresql/share/contrib directory exists + file: + path: /usr/lib/postgresql/share/postgresql/contrib + state: directory + owner: postgres + group: postgres + when: stage2_nix + +- name: Ensure /usr/lib/postgresql/share/timezonesets directory exists + file: + path: /usr/lib/postgresql/share/postgresql/timezonesets + state: directory + owner: postgres + group: postgres + when: stage2_nix + +- name: Ensure /usr/lib/postgresql/share/tsearch_data directory exists + file: + path: /usr/lib/postgresql/share/postgresql/tsearch_data + state: directory + owner: postgres + group: postgres + when: stage2_nix + +- name: Ensure /usr/lib/postgresql/share/extension directory exists + file: + path: /usr/lib/postgresql/share/postgresql/extension + state: directory + owner: postgres + group: postgres + when: stage2_nix + +- name: import pgsodium_getkey script + template: + src: files/pgsodium_getkey_readonly.sh.j2 + dest: "/usr/lib/postgresql/bin/pgsodium_getkey.sh" + owner: postgres + group: postgres + mode: 0700 + when: stage2_nix + +- name: Create symbolic links from /var/lib/postgresql/.nix-profile/bin to /usr/lib/postgresql/bin + shell: >- + find /var/lib/postgresql/.nix-profile/bin/ -maxdepth 1 -type f,l -exec sh -c 'ln -s "$0" "{{ item }}/$(basename $0)"' {} \; + loop: + - /usr/lib/postgresql/bin + - /usr/bin + become: yes + when: stage2_nix + +- name: Check if /usr/bin/pg_config exists + stat: + path: /usr/bin/pg_config + register: pg_config_stat + when: stage2_nix + +- name: Remove existing /usr/bin/pg_config if it is not a symlink + file: + path: /usr/bin/pg_config + state: absent + when: pg_config_stat.stat.exists and not pg_config_stat.stat.islnk and stage2_nix + become: yes + +- name: Ensure postgres user has ownership of symlink + shell: >- + find /var/lib/postgresql/.nix-profile/bin/ -maxdepth 1 -type f,l -exec chown postgres:postgres "/usr/bin/$(basename {})" \; + become: yes + when: stage2_nix + +- name: Create symbolic links from /var/lib/postgresql/.nix-profile/share/postgresql to /usr/lib/postgresql/share/postgresql + shell: >- + find /var/lib/postgresql/.nix-profile/share/postgresql/ -maxdepth 1 -type f,l -exec sh -c 'ln -s "$0" "/usr/lib/postgresql/share/postgresql/$(basename $0)"' {} \; + become: yes + when: stage2_nix + +- name: Create symbolic links from /var/lib/postgresql/.nix-profile/share/postgresql/extension to /usr/lib/postgresql/share/postgresql/extension + shell: >- + find /var/lib/postgresql/.nix-profile/share/postgresql/extension/ -maxdepth 1 -type f,l -exec sh -c 'ln -s "$0" "/usr/lib/postgresql/share/postgresql/extension/$(basename $0)"' {} \; + become: yes + when: stage2_nix + +- name: create destination directory + file: + path: /usr/lib/postgresql/share/postgresql/contrib/ + state: directory + recurse: yes + when: stage2_nix + +- name: Check psql_version and run postgis linking if not oriole-xx + block: + - name: Check if psql_version is psql_orioledb-17 + set_fact: + is_psql_oriole: "{{ psql_version == 'psql_orioledb-17' }}" + + - name: Recursively create symbolic links and set permissions for the contrib/postgis-* dir + shell: > + sudo mkdir -p /usr/lib/postgresql/share/postgresql/contrib && \ + sudo find /var/lib/postgresql/.nix-profile/share/postgresql/contrib/ -mindepth 1 -type d -exec sh -c 'for dir do sudo ln -s "$dir" "/usr/lib/postgresql/share/postgresql/contrib/$(basename "$dir")"; done' sh {} + \ + && chown -R postgres:postgres "/usr/lib/postgresql/share/postgresql/contrib/" + become: yes + when: stage2_nix and not is_psql_oriole + +- name: Create symbolic links from /var/lib/postgresql/.nix-profile/share/postgresql/timezonesets to /usr/lib/postgresql/share/postgresql/timeszonesets + shell: >- + find /var/lib/postgresql/.nix-profile/share/postgresql/timezonesets/ -maxdepth 1 -type f,l -exec sh -c 'ln -s "$0" "/usr/lib/postgresql/share/postgresql/timezonesets/$(basename $0)"' {} \; + become: yes + when: stage2_nix + +- name: Create symbolic links from /var/lib/postgresql/.nix-profile/share/postgresql/tsearch_data to /usr/lib/postgresql/share/postgresql/tsearch_data + shell: >- + find /var/lib/postgresql/.nix-profile/share/postgresql/tsearch_data/ -maxdepth 1 -type f,l -exec sh -c 'ln -s "$0" "/usr/lib/postgresql/share/postgresql/tsearch_data/$(basename $0)"' {} \; + become: yes + when: stage2_nix + +- set_fact: + pg_bindir: "/usr/lib/postgresql/bin" + when: stage2_nix + +- name: pgsodium - set pgsodium.getkey_script + become: yes + lineinfile: + path: /etc/postgresql/postgresql.conf + state: present + # script is expected to be placed by finalization tasks for different target platforms + line: pgsodium.getkey_script= '{{ pg_bindir }}/pgsodium_getkey.sh' + when: stage2_nix + +- name: Create symbolic link for pgsodium_getkey script + file: + src: "/usr/lib/postgresql/bin/pgsodium_getkey.sh" + dest: "/usr/lib/postgresql/share/postgresql/extension/pgsodium_getkey" + state: link + become: yes + when: stage2_nix + +- name: Append GRN_PLUGINS_DIR to /etc/environment.d/postgresql.env + ansible.builtin.lineinfile: + path: /etc/environment.d/postgresql.env + line: 'GRN_PLUGINS_DIR=/var/lib/postgresql/.nix-profile/lib/groonga/plugins' + become: yes diff --git a/ansible/tasks/test-image.yml b/ansible/tasks/test-image.yml new file mode 100644 index 0000000..b152971 --- /dev/null +++ b/ansible/tasks/test-image.yml @@ -0,0 +1,87 @@ +# - name: Temporarily disable PG Sodium references in config +# become: yes +# become_user: postgres +# shell: +# cmd: sed -i.bak -e "s/pg_net,\ pgsodium,\ timescaledb/pg_net,\ timescaledb/g" -e "s/pgsodium.getkey_script=/#pgsodium.getkey_script=/g" /etc/postgresql/postgresql.conf +# when: debpkg_mode or stage2_nix + +- name: Temporarily disable PG Sodium and tealbase Vault references in config + become: yes + become_user: postgres + shell: + cmd: > + sed -i.bak + -e 's/\(shared_preload_libraries = '\''.*\)pgsodium,\(.*'\''\)/\1\2/' + -e 's/\(shared_preload_libraries = '\''.*\)tealbase_vault,\(.*'\''\)/\1\2/' + -e 's/\(shared_preload_libraries = '\''.*\), *tealbase_vault'\''/\1'\''/' + -e 's/pgsodium.getkey_script=/#pgsodium.getkey_script=/' + /etc/postgresql/postgresql.conf + when: debpkg_mode or stage2_nix + +- name: Verify pgsodium and vault removal from config + become: yes + become_user: postgres + shell: + cmd: | + FOUND=$(grep -E "shared_preload_libraries.*pgsodium|shared_preload_libraries.*tealbase_vault|^pgsodium\.getkey_script" /etc/postgresql/postgresql.conf) + if [ ! -z "$FOUND" ]; then + echo "Found unremoved references:" + echo "$FOUND" + exit 1 + fi + register: verify_result + failed_when: verify_result.rc != 0 + when: debpkg_mode or stage2_nix + +- name: Start Postgres Database to load all extensions. + become: yes + become_user: postgres + shell: + cmd: /usr/lib/postgresql/bin/pg_ctl -D /var/lib/postgresql/data start "-o -c config_file=/etc/postgresql/postgresql.conf" + when: debpkg_mode + +- name: Stop Postgres Database in stage 2 + become: yes + become_user: postgres + shell: source /var/lib/postgresql/.bashrc && /usr/lib/postgresql/bin/pg_ctl -D /var/lib/postgresql/data stop + args: + executable: /bin/bash + environment: + LANG: en_US.UTF-8 + LANGUAGE: en_US.UTF-8 + LC_ALL: en_US.UTF-8 + LC_CTYPE: en_US.UTF-8 + LOCALE_ARCHIVE: /usr/lib/locale/locale-archive + when: stage2_nix + +- name: Start Postgres Database to load all extensions. + become: yes + become_user: postgres + shell: source /var/lib/postgresql/.bashrc && /usr/lib/postgresql/bin/pg_ctl -D /var/lib/postgresql/data start "-o -c config_file=/etc/postgresql/postgresql.conf" + args: + executable: /bin/bash + environment: + LANG: en_US.UTF-8 + LANGUAGE: en_US.UTF-8 + LC_ALL: en_US.UTF-8 + LC_CTYPE: en_US.UTF-8 + LOCALE_ARCHIVE: /usr/lib/locale/locale-archive + when: stage2_nix + +- name: Re-enable PG Sodium references in config + become: yes + become_user: postgres + shell: + cmd: mv /etc/postgresql/postgresql.conf.bak /etc/postgresql/postgresql.conf + when: debpkg_mode or stage2_nix + +- name: Reset db stats + shell: /usr/lib/postgresql/bin/psql --no-password --no-psqlrc -d postgres -h localhost -U tealbase_admin -c 'SELECT pg_stat_statements_reset(); SELECT pg_stat_reset();' + when: debpkg_mode or stage2_nix + +- name: Stop Postgres Database + become: yes + become_user: postgres + shell: + cmd: /usr/lib/postgresql/bin/pg_ctl -D /var/lib/postgresql/data stop + when: debpkg_mode or stage2_nix diff --git a/ansible/vars.yml b/ansible/vars.yml index a804557..96c136d 100644 --- a/ansible/vars.yml +++ b/ansible/vars.yml @@ -1,42 +1,61 @@ -postgresql_version: 12 -postgresql_wal_level: "logical" -postgresql_max_wal_senders: 10 -postgresql_max_replication_slots: 5 -postgresql_row_security: on -postgresql_listen_addresses: - - "*" - -postgresql_ext_install_contrib: yes -postgresql_ext_install_dev_headers: yes - -# Warning: Make sure the postgresql & postgis versions are compatible with one another -postgresql_ext_postgis_version: 3 - -postgresql_shared_preload_libraries: [pg_stat_statements, pgaudit, plpgsql, plpgsql_check ] - -postgresql_pg_hba_custom: - - {type: "host", database: "all", user: "all", address: "0.0.0.0/0", method: "md5" } - -pgtap_release: "1.1.0" -pgtap_release_checksum: sha1:cca57708e723de18735a723b774577dc52f6f31e - -pgaudit_release: "1.4.0" -pgaudit_release_checksum: sha1:ea085fbf227b5c461331ab33b99579f37db299a6 - -pgsql_http_release: "1.3.1" -pgsql_http_release_checksum: sha1:816a3fff53e05301b176cf0696799fc5a00f54e8 - -plpgsql_check_release: "1.11.0" -plpgsql_check_release_checksum: sha1:395313b6ef9c10c4fc182817d6f0040b171147b8 - -pljava_release: "1_5_5" -pljava_release_checksum: sha1:5277433030fdeed8528c7c0154163b54aedbd842 - -postgresql_log_destination: "csvlog" -postgresql_logging_collector: on -postgresql_log_filename: "postgresql.log" -postgresql_log_rotation_age: 0 -postgresql_log_rotation_size: 0 - -wal_g_release: "0.2.15" -wal_g_release_checksum: sha1:e82d405121e0ccc322a323b9824e60c102b14004 \ No newline at end of file +tealbase_internal: true +ebssurrogate_mode: true +async_mode: true + +postgres_major: + - "15" + - "17" + - "orioledb-17" + +# Full version strings for each major version +postgres_release: + postgresorioledb-17: "17.5.1.009-orioledb" + postgres17: "17.4.1.066" + postgres15: "15.8.1.123" + +# Non Postgres Extensions +pgbouncer_release: "1.19.0" +pgbouncer_release_checksum: sha256:af0b05e97d0e1fd9ad45fe00ea6d2a934c63075f67f7e2ccef2ca59e3d8ce682 + +# The checksum can be found under "Assets", in the GitHub release page for each version. +# The binaries used are: ubuntu-aarch64 and linux-static. +# https://github.com/PostgREST/postgrest/releases +postgrest_release: "13.0.4" +postgrest_arm_release_checksum: sha256:2b400200fb15eb5849267e4375fbbc516dd727afadd8786815b48074ed8c03e1 +postgrest_x86_release_checksum: sha256:a0052c8d4726f52349e0298f98da51140ef4941855548590ee88331afa617811 + +gotrue_release: 2.177.0 +gotrue_release_checksum: sha1:664a26237618c4bfb1e33e4f03a540c3cef3e3c8 + +aws_cli_release: "2.23.11" + +salt_minion_version: 3007 + +golang_version: "1.22.11" +golang_version_checksum: + arm64: sha256:0fc88d966d33896384fbde56e9a8d80a305dc17a9f48f1832e061724b1719991 + amd64: sha256:9ebfcab26801fa4cf0627c6439db7a4da4d3c6766142a3dd83508240e4f21031 + +envoy_release: 1.28.0 +envoy_release_checksum: sha1:b0a06e9cfb170f1993f369beaa5aa9d7ec679ce5 +envoy_hot_restarter_release_checksum: sha1:6d43b89d266fb2427a4b51756b649883b0617eda + +kong_release_target: focal +kong_deb: kong_2.8.1_arm64.deb +kong_deb_checksum: sha1:2086f6ccf8454fe64435252fea4d29d736d7ec61 + +nginx_release: 1.22.0 +nginx_release_checksum: sha1:419efb77b80f165666e2ee406ad8ae9b845aba93 + +postgres_exporter_release: "0.15.0" +postgres_exporter_release_checksum: + arm64: sha256:29ba62d538b92d39952afe12ee2e1f4401250d678ff4b354ff2752f4321c87a0 + amd64: sha256:cb89fc5bf4485fb554e0d640d9684fae143a4b2d5fa443009bd29c59f9129e84 + +adminapi_release: 0.84.1 +adminmgr_release: 0.25.1 +tealbase_admin_agent_release: 1.4.37 +tealbase_admin_agent_splay: 30 + +vector_x86_deb: "https://packages.timber.io/vector/0.48.X/vector_0.48.0-1_amd64.deb" +vector_arm_deb: "https://packages.timber.io/vector/0.48.X/vector_0.48.0-1_arm64.deb" diff --git a/development-arm.vars.pkr.hcl b/development-arm.vars.pkr.hcl new file mode 100644 index 0000000..6772bf6 --- /dev/null +++ b/development-arm.vars.pkr.hcl @@ -0,0 +1,7 @@ +arch = "arm64" +ami_regions = ["us-east-1"] +environment = "dev" +instance-type = "c6g.4xlarge" +region= "us-east-1" +ubuntu-2004 = "ami-0b49a4a6e8e22fa16" + diff --git a/digitalOcean.json b/digitalOcean.json deleted file mode 100644 index e712228..0000000 --- a/digitalOcean.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "variables": { - "do_token": "", - "image_name": "ubuntu-18-04-x64", - "region": "sgp1", - "snapshot_regions": "sgp1", - "ansible_arguments": "--skip-tags,update-only" - }, - "builders": [{ - "type": "digitalocean", - "api_token": "{{user `do_token`}}", - "image": "{{user `image_name`}}", - "region": "{{user `region`}}", - "snapshot_regions": "{{user `snapshot_regions`}}", - "size": "s-1vcpu-1gb", - "ssh_username": "root", - "snapshot_name": "tealbase-postgres-0.13.0" - }], - "provisioners": [ - { - "type": "ansible", - "user": "root", - "playbook_file": "ansible/playbook.yml", - "extra_arguments": "{{user `ansible_arguments`}}" - }, - { - "type": "shell", - "scripts": [ - "scripts/01-test", - "scripts/90-cleanup.sh", - "scripts/91-log_cleanup.sh", - "scripts/99-img_check.sh" - ] - } - ] -} \ No newline at end of file diff --git a/docker/Dockerfile b/docker/Dockerfile index b0b2856..53a9602 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,180 +1,78 @@ -FROM postgres:12 - -# install postgis -ENV POSTGIS_MAJOR 3 -ENV POSTGIS_VERSION 3.0.0+dfsg-2~exp1.pgdg100+1 -RUN apt-get update \ - && apt-cache showpkg postgresql-$PG_MAJOR-postgis-$POSTGIS_MAJOR \ - && apt-get install -y --no-install-recommends \ - postgresql-$PG_MAJOR-postgis-$POSTGIS_MAJOR \ - postgresql-$PG_MAJOR-postgis-$POSTGIS_MAJOR-scripts \ - && rm -rf /var/lib/apt/lists/* /var/tmp/* - -# install pgtap -ENV PGTAP_VERSION=v1.1.0 - -RUN pgtapDependencies="git \ - ca-certificates \ - build-essential" \ - && apt-get update \ - && apt-get install -y --no-install-recommends ${pgtapDependencies} \ - && cd /tmp \ - && git clone git://github.com/theory/pgtap.git \ - && cd pgtap \ - && git checkout tags/$PGTAP_VERSION \ - && make install \ - && apt-get clean \ - && apt-get remove -y ${pgtapDependencies} \ - && apt-get autoremove -y \ - && rm -rf /tmp/pgtap /var/lib/apt/lists/* /var/tmp/* - -# install plpython3 -RUN apt-get update \ - && apt-get install postgresql-plpython3-12 -y - -# install pgAudit -ENV PGAUDIT_VERSION=1.4.0 - -RUN pgAuditDependencies="git \ - ca-certificates \ - build-essential \ - postgresql-server-dev-$PG_MAJOR \ - libssl-dev \ - libkrb5-dev" \ - && apt-get update \ - && apt-get install -y --no-install-recommends ${pgAuditDependencies} \ - && cd /tmp \ - && git clone https://github.com/pgaudit/pgaudit.git \ - && cd pgaudit \ - && git checkout ${PGAUDIT_VERSION} \ - && make check USE_PGXS=1 \ - && make install USE_PGXS=1 \ - && apt-get clean \ - && apt-get remove -y ${pgAuditDependencies} \ - && apt-get autoremove -y \ - && rm -rf /tmp/pgaudit /var/lib/apt/lists/* /var/tmp/* - -# install pgjwt -RUN pgjwtDependencies="git \ - ca-certificates \ - build-essential" \ - && apt-get update \ - && apt-get install -y --no-install-recommends ${pgjwtDependencies} \ - && cd /tmp \ - && git clone https://github.com/michelp/pgjwt.git \ - && cd pgjwt \ - && git checkout master \ - && make install \ - && apt-get clean \ - && apt-get remove -y ${pgtapDependencies} \ - && apt-get autoremove -y \ - && rm -rf /tmp/pgjwt /var/lib/apt/lists/* /var/tmp/* - -# install pgsql-http -ENV PGSQL_HTTP_VERSION=v1.3.1 - -RUN pgsqlHttpDependencies="git \ - ca-certificates \ - build-essential \ - postgresql-server-dev-$PG_MAJOR" \ - && pgsqlHttpRuntimeDependencies="libcurl4-gnutls-dev" \ - && apt-get update \ - && apt-get install -y --no-install-recommends ${pgsqlHttpDependencies} ${pgsqlHttpRuntimeDependencies} \ - && cd /tmp \ - && git clone https://github.com/pramsey/pgsql-http.git \ - && cd pgsql-http \ - && git checkout ${PGSQL_HTTP_VERSION} \ - && make \ - && make install \ - && apt-get clean \ - && apt-get remove -y ${pgsqlHttpDependencies} \ - && apt-get autoremove -y \ - && rm -rf /tmp/pgsql-http /var/lib/apt/lists/* /var/tmp/* - -# install plpgsql_check -ENV PLPGSQL_CHECK_VERSION=v1.11.3 - -RUN plpgsqlCheckDependencies="git \ - ca-certificates \ - build-essential \ - postgresql-server-dev-$PG_MAJOR" \ - && plpgsqlCheckRuntimeDependencies="libicu-dev" \ - && apt-get update \ - && apt-get install -y --no-install-recommends ${plpgsqlCheckDependencies} ${plpgsqlCheckRuntimeDependencies} \ - && cd /tmp \ - && git clone https://github.com/okbob/plpgsql_check.git \ - && cd plpgsql_check \ - && git checkout ${PLPGSQL_CHECK_VERSION} \ - && make clean \ - && make install \ - && apt-get clean \ - && apt-get remove -y ${pgsqlHttpDependencies} \ - && apt-get autoremove -y \ - && rm -rf /tmp/plpgsql_check /var/lib/apt/lists/* /var/tmp/* - -# install plv8 -ENV PLV8_VERSION=r3.0alpha - -RUN plv8Dependencies="build-essential \ - ca-certificates \ - curl \ - git-core \ - python \ - gpp \ - cpp \ - pkg-config \ - apt-transport-https \ - cmake \ - libc++-dev \ - libc++abi-dev \ - postgresql-server-dev-$PG_MAJOR" \ - && plv8RuntimeDependencies="libc++1 \ - libtinfo5 \ - libc++abi1" \ - && apt-get update \ - && apt-get install -y --no-install-recommends ${plv8Dependencies} ${plv8RuntimeDependencies} \ - && mkdir -p /tmp/build \ - && cd /tmp/build \ - && git clone https://github.com/plv8/plv8.git \ - && cd plv8 \ - && git checkout ${PLV8_VERSION} \ - && make static \ - && make install \ - && rm -rf /root/.vpython_cipd_cache /root/.vpython-root \ - && apt-get clean \ - && apt-get remove -y ${plv8Dependencies} \ - && apt-get autoremove -y \ - && rm -rf /tmp/build /var/lib/apt/lists/* /var/tmp/* - -# install pljava -ENV PLJAVA_VERSION=V1_5_5 - -RUN pljavaDependencies="git \ +ARG ubuntu_release=noble +FROM ubuntu:${ubuntu_release} as base + +ARG ubuntu_release=flocal +ARG ubuntu_release_no=24.04 +ARG postgresql_major=15 +ARG postgresql_release=${postgresql_major}.1 + +FROM base as pg-source + +# Install build dependencies +RUN apt-get update && apt-get install -y --no-install-recommends \ + gnupg \ + dpkg-dev \ ca-certificates \ - g++ \ - maven \ - postgresql-server-dev-$PG_MAJOR \ - libpq-dev \ - libecpg-dev \ - libkrb5-dev \ - default-jdk \ - libssl-dev" \ - && apt-get update \ - && apt-get install -y --no-install-recommends ${pljavaDependencies} \ - && cd /tmp \ - && git clone https://github.com/tada/pljava.git \ - && cd pljava \ - && git checkout ${PLJAVA_VERSION} \ - && mvn clean install \ - && java -jar pljava-packaging/target/pljava-pg12.3-amd64-Linux-gpp.jar \ - && apt-get clean \ - && apt-get remove -y ${pljavaDependencies} \ - && apt-get autoremove -y \ - && rm -rf ~/.m2 /tmp/pljava /var/lib/apt/lists/* /var/tmp/* - -RUN apt-get update \ - && apt-get install -y --no-install-recommends default-jdk-headless \ - && rm -rf /var/lib/apt/lists/* /var/tmp/* - -RUN mkdir -p /docker-entrypoint-initdb.d -ADD ./mnt /docker-entrypoint-initdb.d/ \ No newline at end of file + && rm -rf /var/lib/apt/lists/* + +# Add Postgres PPA +# In the off-chance that the key in the repository expires, it can be replaced by running the following in the repository's root: +# gpg --keyserver hkps://keyserver.ubuntu.com --recv-keys $NEW_POSTGRESQL_GPG_KEY +# gpg --export --armor $NEW_POSTGRESQL_GPG_KEY > postgresql.gpg.key +COPY postgresql.gpg.key /tmp/postgresql.gpg.key +RUN apt-key add /tmp/postgresql.gpg.key && \ + echo "deb https://apt-archive.postgresql.org/pub/repos/apt ${ubuntu_release}-pgdg-archive main" > /etc/apt/sources.list.d/pgdg.list && \ + echo "deb-src https://apt-archive.postgresql.org/pub/repos/apt ${ubuntu_release}-pgdg-archive main" > /etc/apt/sources.list.d/pgdg.list + +# Create local PPA +WORKDIR /tmp/build +RUN echo "deb [ trusted=yes ] file:///tmp/build ./" > /etc/apt/sources.list.d/temp.list && \ + dpkg-scanpackages . > Packages && \ + apt-get -o Acquire::GzipIndexes=false update + +ENV DEBIAN_FRONTEND=noninteractive +ENV PYTHONDONTWRITEBYTECODE=1 +ENV DEB_BUILD_OPTIONS="nocheck parallel=$(nproc)" + +# Configure processor optimised build +ARG CPPFLAGS="" +ENV DEB_CPPFLAGS_APPEND="${CPPFLAGS} -fsigned-char" +ENV DEB_CFLAGS_APPEND="-g3" +ARG DEB_BUILD_PROFILES="pkg.postgresql.nozstd" +ENV DEB_BUILD_PROFILES="${DEB_BUILD_PROFILES}" + +RUN apt-get -o Acquire::GzipIndexes=false update && apt-get build-dep -y postgresql-common pgdg-keyring && \ + apt-get source --compile postgresql-common pgdg-keyring && \ + dpkg-scanpackages . > Packages && \ + apt-get -o Acquire::GzipIndexes=false update + +RUN apt-get build-dep -y "postgresql-${postgresql_major}=${postgresql_release}-1.pgdg${ubuntu_release_no}+1" && \ + apt-get source --compile "postgresql-${postgresql_major}=${postgresql_release}-1.pgdg${ubuntu_release_no}+1" && \ + dpkg-scanpackages . > Packages && \ + apt-get -o Acquire::GzipIndexes=false update + +# Remove source directories +RUN rm -rf /tmp/build/*/ + +FROM base as pg + +# Inherit args from base stage +ARG postgresql_major +ARG postgresql_release + +COPY --from=pg-source /tmp/build /tmp/build + +ENV DEBIAN_FRONTEND=noninteractive +RUN echo "deb [ trusted=yes ] file:///tmp/build ./" > /etc/apt/sources.list.d/temp.list && \ + apt-get -o Acquire::GzipIndexes=false update && \ + apt-get install -y --no-install-recommends postgresql-common && \ + sed -ri 's/#(create_main_cluster) .*$/\1 = false/' /etc/postgresql-common/createcluster.conf && \ + apt-get install -y --no-install-recommends "postgresql-${postgresql_major}=${postgresql_release}-1.pgdg${ubuntu_release_no}+1" && \ + rm -rf /var/lib/apt/lists/* && \ + rm -rf /tmp/build /etc/apt/sources.list.d/temp.list + +ENV PATH $PATH:/usr/lib/postgresql/${postgresql_major}/bin + +FROM scratch as pg-deb + +COPY --from=pg-source /tmp/build /tmp diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml index 03e0a1a..4f00aa9 100644 --- a/docker/docker-compose.yml +++ b/docker/docker-compose.yml @@ -1,4 +1,4 @@ -version: '3' +version: "3" services: db: @@ -6,4 +6,4 @@ services: ports: - "5432:5432" environment: - POSTGRES_PASSWORD: postgres \ No newline at end of file + POSTGRES_PASSWORD: postgres diff --git a/docker/mnt/init-permissions.sh b/docker/mnt/init-permissions.sh deleted file mode 100644 index 314d387..0000000 --- a/docker/mnt/init-permissions.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash -set -e - -echo "host replication $POSTGRES_USER 0.0.0.0/0 trust" >> $PGDATA/pg_hba.conf -echo "shared_preload_libraries = 'pg_stat_statements, pgaudit'" >> $PGDATA/postgresql.conf -echo "pg_stat_statements.max = 10000" >> $PGDATA/postgresql.conf -echo "pg_stat_statements.track = all" >> $PGDATA/postgresql.conf -echo "wal_level=logical" >> $PGDATA/postgresql.conf -echo "max_replication_slots=5" >> $PGDATA/postgresql.conf -echo "max_wal_senders=10" >> $PGDATA/postgresql.conf -echo "log_destination='csvlog'" >> $PGDATA/postgresql.conf -echo "logging_collector=on" >> $PGDATA/postgresql.conf -echo "log_filename='postgresql.log'" >> $PGDATA/postgresql.conf -echo "log_rotation_age=0" >> $PGDATA/postgresql.conf -echo "log_rotation_size=0" >> $PGDATA/postgresql.conf -echo "pljava.libjvm_location = '/usr/lib/jvm/java-11-openjdk-amd64/lib/server/libjvm.so'" >> $PGDATA/postgresql.conf diff --git a/docker/nix/Dockerfile b/docker/nix/Dockerfile new file mode 100644 index 0000000..2269079 --- /dev/null +++ b/docker/nix/Dockerfile @@ -0,0 +1,16 @@ +FROM nixpkgs/nix-flakes + +RUN echo "substituters = https://cache.nixos.org https://nix-postgres-artifacts.s3.amazonaws.com" >> /etc/nix/nix.conf + +RUN echo "trusted-public-keys = cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY= nix-postgres-artifacts:dGZlQOvKcNEjvT7QEAJbcV6b6uk7VF/hWMjhYleiaLI=%" >> /etc/nix/nix.conf + + +USER $USER + +RUN mkdir -p /workspace + +COPY ./ /workspace + +RUN chmod +x /workspace/docker/nix/build_nix.sh + +RUN chown -R $USER:$USER /workspace \ No newline at end of file diff --git a/docker/nix/build_nix.sh b/docker/nix/build_nix.sh new file mode 100755 index 0000000..118fdf6 --- /dev/null +++ b/docker/nix/build_nix.sh @@ -0,0 +1,9 @@ +#!/bin/env bash +set -eou pipefail + +nix --version +if [ -d "/workspace" ]; then + cd /workspace +fi + +nix run "github:Mic92/nix-fast-build?rev=b1dae483ab7d4139a6297e02b6de9e5d30e43d48" -- --skip-cached --no-nom --flake ".#checks" diff --git a/docs/.DS_Store b/docs/.DS_Store deleted file mode 100644 index 645d589..0000000 Binary files a/docs/.DS_Store and /dev/null differ diff --git a/ebssurrogate/USAGE.md b/ebssurrogate/USAGE.md new file mode 100644 index 0000000..9a63a08 --- /dev/null +++ b/ebssurrogate/USAGE.md @@ -0,0 +1,50 @@ +## Ext4 amd64 AMI creation + +`packer build -var "aws_access_key=$AWS_ACCESS_KEY_ID" -var "aws_secret_key=$AWS_SECRET_ACCESS_KEY" -var "region=$AWS_REGION" \ +-var "docker_passwd=$DOCKER_PASSWD" -var "docker_user=$DOCKER_USER" -var "docker_image=$DOCKER_IMAGE" -var "docker_image_tag=$DOCKER_IMAGE_TAG" \ +amazon-amd64.pkr.hcl` + +## Ext4 arm64 AMI creation + +`packer build -var "aws_access_key=$AWS_ACCESS_KEY_ID" -var "aws_secret_key=$AWS_SECRET_ACCESS_KEY" -var "region=$AWS_REGION" \ +-var "docker_passwd=$DOCKER_PASSWD" -var "docker_user=$DOCKER_USER" -var "docker_image=$DOCKER_IMAGE" -var "docker_image_tag=$DOCKER_IMAGE_TAG" \ +amazon-arm64.pkr.hcl` + +## Docker Image + + DOCKER_IMAGE is used to store ccache data during build process. This can be any image, you can create your image using: + + ``` + docker pull ubuntu + docker tag ubuntu /ccache + docker push /ccache + ``` + + For ARM64 builds + + ``` + docker pull arm64v8/ubuntu + docker tag arm64v8/ubuntu:latest /ccache-arm64v8 + docker push /ccache-arm64v8 + ``` + + Now set DOCKER_IMAGE="/ccache" or DOCKER_IMAGE="/ccache-arm64v8" based on your AMI architecture. + + +## EBS-Surrogate File layout + +``` +$ tree ebssurrogate/ +ebssurrogate/ +├── files +│   ├── 70-ec2-nvme-devices.rules +│   ├── cloud.cfg # cloud.cfg for cloud-init +│   ├── ebsnvme-id +│   ├── sources-arm64.cfg # apt/sources.list for arm64 +│   ├── sources.cfg # apt/sources.list for amd64 +│   ├── vector.timer # systemd-timer to delay vectore execution +│   └── zfs-growpart-root.cfg +└── scripts + ├── chroot-bootstrap.sh # Installs grub and other required packages for build. Configures target AMI settings + └── surrogate-bootstrap.sh # Formats disk and setups chroot environment. Runs Ansible tasks within chrooted environment. +``` diff --git a/ebssurrogate/files/70-ec2-nvme-devices.rules b/ebssurrogate/files/70-ec2-nvme-devices.rules new file mode 100644 index 0000000..62a5deb --- /dev/null +++ b/ebssurrogate/files/70-ec2-nvme-devices.rules @@ -0,0 +1,25 @@ +# Copyright (C) 2006-2016 Amazon.com, Inc. or its affiliates. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS +# OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the +# License. + +#nvme-ns-* devices +KERNEL=="nvme[0-9]*n[0-9]*", ENV{DEVTYPE}=="disk", ATTRS{serial}=="?*", ATTRS{model}=="?*", SYMLINK+="disk/by-id/nvme-$attr{model}_$attr{serial}-ns-%n", OPTIONS+="string_escape=replace" + +#nvme partitions +KERNEL=="nvme[0-9]*n[0-9]*p[0-9]*", ENV{DEVTYPE}=="partition", ATTRS{serial}=="?*", ATTRS{model}=="?*", IMPORT{program}="ec2nvme-nsid %k" +KERNEL=="nvme[0-9]*n[0-9]*p[0-9]*", ENV{DEVTYPE}=="partition", ATTRS{serial}=="?*", ATTRS{model}=="?*", ENV{_NS_ID}=="?*", SYMLINK+="disk/by-id/nvme-$attr{model}_$attr{serial}-ns-$env{_NS_ID}-part%n", OPTIONS+="string_escape=replace" + +# ebs nvme devices +KERNEL=="nvme[0-9]*n[0-9]*", ENV{DEVTYPE}=="disk", ATTRS{model}=="Amazon Elastic Block Store", PROGRAM="/sbin/ebsnvme-id -u /dev/%k", SYMLINK+="%c" +KERNEL=="nvme[0-9]*n[0-9]*p[0-9]*", ENV{DEVTYPE}=="partition", ATTRS{model}=="Amazon Elastic Block Store", PROGRAM="/sbin/ebsnvme-id -u /dev/%k", SYMLINK+="%c%n" diff --git a/ebssurrogate/files/apparmor_profiles/opt.gotrue.gotrue b/ebssurrogate/files/apparmor_profiles/opt.gotrue.gotrue new file mode 100644 index 0000000..7b9594a --- /dev/null +++ b/ebssurrogate/files/apparmor_profiles/opt.gotrue.gotrue @@ -0,0 +1,15 @@ +#include + +/opt/gotrue/gotrue { + #include + #include + #include + + /opt/gotrue/gotrue r, + /opt/gotrue/migrations/ r, + /etc/ssl/certs/java/* r, + /opt/gotrue/migrations/** rw, + /proc/sys/net/core/somaxconn r, + /sys/kernel/mm/transparent_hugepage/hpage_pmd_size r, + owner /etc/gotrue.env r, +} diff --git a/ebssurrogate/files/apparmor_profiles/opt.postgrest b/ebssurrogate/files/apparmor_profiles/opt.postgrest new file mode 100644 index 0000000..c738a65 --- /dev/null +++ b/ebssurrogate/files/apparmor_profiles/opt.postgrest @@ -0,0 +1,12 @@ +#include + +/opt/postgrest { + #include + #include + #include + + /etc/gss/mech.d/ r, + /sys/devices/system/node/ r, + /sys/devices/system/node/node0/meminfo r, + owner /etc/postgrest/merged.conf r, +} diff --git a/ebssurrogate/files/apparmor_profiles/usr.bin.vector b/ebssurrogate/files/apparmor_profiles/usr.bin.vector new file mode 100644 index 0000000..b8a7eb2 --- /dev/null +++ b/ebssurrogate/files/apparmor_profiles/usr.bin.vector @@ -0,0 +1,35 @@ +#include + +/usr/bin/vector flags=(attach_disconnected) { + #include + #include + #include + #include + #include + #include + #include + #include + + deny @{HOME}/** rwx, + /etc/machine-id r, + /etc/vector/** r, + /proc/*/sched r, + /proc/cmdline r, + /proc/sys/kernel/osrelease r, + /run/log/journal/ r, + /var/log/journal/** r, + /run/systemd/notify rw, + /sys/firmware/efi/efivars/SecureBoot-8be4df61-93ca-11d2-aa0d-00e098032b8c r, + /sys/fs/cgroup/cpu,cpuacct/cpu.cfs_quota_us r, + /sys/kernel/mm/transparent_hugepage/enabled r, + /usr/bin/journalctl mrix, + /usr/bin/vector mrix, + /var/lib/vector/** rw, + /var/log/journal/ r, + /var/log/postgresql/ r, + /var/log/postgresql/** rw, + /var/run/systemd/notify rw, + owner /proc/*/cgroup r, + owner /proc/*/mountinfo r, + owner /proc/*/stat r, +} diff --git a/ebssurrogate/files/apparmor_profiles/usr.lib.postgresql.bin.postgres b/ebssurrogate/files/apparmor_profiles/usr.lib.postgresql.bin.postgres new file mode 100644 index 0000000..8e2efc3 --- /dev/null +++ b/ebssurrogate/files/apparmor_profiles/usr.lib.postgresql.bin.postgres @@ -0,0 +1,55 @@ +#include + +profile /usr/lib/postgresql/bin/postgres flags=(attach_disconnected) { +#include +#include +#include +#include +#include +#include +#include + +capability dac_override, +capability dac_read_search, + +deny @{HOME}/** rwx, + +/data/pgdata/** r, +/dev/shm rw, +/etc/java-11-openjdk/logging.properties r, +/etc/java-11-openjdk/security/default.policy r, +/etc/java-11-openjdk/security/java.policy r, +/etc/java-11-openjdk/security/java.security r, +/etc/mecabrc r, +/etc/postgresql-custom/** r, +/etc/postgresql/** r, +/etc/timezone r, +/etc/wal-g/config.json r, +/run/systemd/notify rw, +/usr/bin/cat rix, +/usr/bin/dash rix, +/usr/bin/mknod rix, +/usr/bin/admin-mgr Ux, +/usr/lib/postgresql/bin/* mrix, +/usr/local/bin/wal-g rix, +/usr/local/lib/groonga/plugins/tokenizers/mecab.so mr, +/usr/local/lib/libSFCGAL.so.* mr, +/usr/local/lib/libgroonga.so.* mr, +/usr/local/pgsql/etc/pljava.policy r, +/usr/share/postgresql/** r, +/var/lib/mecab/** r, +/var/lib/postgresql/** rwl, +/var/log/postgresql/** rw, +/var/log/wal-g/** w, +/var/run/systemd/notify rw, +/{,var/}run/postgresql/** rw, +owner /data/pgdata/ r, +owner /data/pgdata/** rwl, +owner /data/pgdata/pgroonga.log k, +owner /dev/shm/ rw, +owner /dev/shm/PostgreSQL.* rw, +owner /sys/kernel/mm/transparent_hugepage/hpage_pmd_size r, +owner /var/log/wal-g/** rw, +owner @{PROC}/[0-9]*/oom_adj rw, + +} diff --git a/ebssurrogate/files/apparmor_profiles/usr.local.bin.pgbouncer b/ebssurrogate/files/apparmor_profiles/usr.local.bin.pgbouncer new file mode 100644 index 0000000..7bf6d09 --- /dev/null +++ b/ebssurrogate/files/apparmor_profiles/usr.local.bin.pgbouncer @@ -0,0 +1,20 @@ +#include +profile /usr/local/bin/pgbouncer flags=(attach_disconnected) { + #include + #include + #include + #include + #include + #include + #include + + deny @{HOME}/** rwx, + /etc/pgbouncer-custom/** r, + /etc/pgbouncer/** r, + /proc/sys/kernel/random/uuid r, + /run/systemd/notify rw, + /usr/local/bin/pgbouncer mrix, + /var/log/pgbouncer.log rw, + /var/run/systemd/notify rw, + /{,var/}run/pgbouncer/** rw, +} diff --git a/ebssurrogate/files/cloud.cfg b/ebssurrogate/files/cloud.cfg new file mode 100644 index 0000000..678b5b6 --- /dev/null +++ b/ebssurrogate/files/cloud.cfg @@ -0,0 +1,137 @@ +# The top level settings are used as module +# and system configuration. +# A set of users which may be applied and/or used by various modules +# when a 'default' entry is found it will reference the 'default_user' +# from the distro configuration specified below +users: + - default + + +# If this is set, 'root' will not be able to ssh in and they +# will get a message to login instead as the default $user +disable_root: true + +# This will cause the set+update hostname module to not operate (if true) +preserve_hostname: false + +# If you use datasource_list array, keep array items in a single line. +# If you use multi line array, ds-identify script won't read array items. +# Example datasource config +# datasource: +# Ec2: +# metadata_urls: [ 'blah.com' ] +# timeout: 5 # (defaults to 50 seconds) +# max_wait: 10 # (defaults to 120 seconds) + + + +# The modules that run in the 'init' stage +cloud_init_modules: +# - migrator +# - seed_random +# - bootcmd + - write-files +# - growpart +# - resizefs +# - disk_setup +# - mounts + - set_hostname + - update_hostname + - update_etc_hosts +# - ca-certs +# - rsyslog + - users-groups + - ssh + +# The modules that run in the 'config' stage +cloud_config_modules: +# Emit the cloud config ready event +# this can be used by upstart jobs for 'start on cloud-config'. +# - emit_upstart +# - snap +# - ssh-import-id +# - locale +# - set-passwords +# - grub-dpkg +# - apt-pipelining +# - apt-configure +# - ubuntu-advantage + - ntp + - timezone + - disable-ec2-metadata + - runcmd +# - byobu + +# The modules that run in the 'final' stage +cloud_final_modules: +# - package-update-upgrade-install +# - fan +# - landscape +# - lxd +# - ubuntu-drivers +# - puppet +# - chef +# - mcollective +# - salt-minion + - reset_rmc + - refresh_rmc_and_interface +# - rightscale_userdata + - scripts-vendor + - scripts-per-once + - scripts-per-boot + - scripts-per-instance + - scripts-user +# - ssh-authkey-fingerprints +# - keys-to-console +# - phone-home + - final-message + - power-state-change + +# System and/or distro specific settings +# (not accessible to handlers/transforms) +system_info: + # This will affect which distro class gets used + distro: ubuntu + # Default user name + that default users groups (if added/used) + default_user: + name: ubuntu + lock_passwd: True + gecos: Ubuntu + groups: [adm, audio, cdrom, dialout, dip, floppy, lxd, netdev, plugdev, sudo, video] + sudo: ["ALL=(ALL) NOPASSWD:ALL"] + shell: /bin/bash + network: + renderers: ['netplan', 'eni', 'sysconfig'] + # Automatically discover the best ntp_client + ntp_client: auto + # Other config here will be given to the distro class and/or path classes + paths: + cloud_dir: /var/lib/cloud/ + templates_dir: /etc/cloud/templates/ + upstart_dir: /etc/init/ + package_mirrors: + - arches: [i386, amd64] + failsafe: + primary: http://archive.ubuntu.com/ubuntu + security: http://security.ubuntu.com/ubuntu + search: + primary: + - http://%(ec2_region)s.ec2.archive.ubuntu.com/ubuntu/ + - http://%(availability_zone)s.clouds.archive.ubuntu.com/ubuntu/ + - http://%(region)s.clouds.archive.ubuntu.com/ubuntu/ + security: [] + - arches: [arm64, armel, armhf] + failsafe: + primary: http://ports.ubuntu.com/ubuntu-ports + security: http://ports.ubuntu.com/ubuntu-ports + search: + primary: + - http://%(ec2_region)s.ec2.ports.ubuntu.com/ubuntu-ports/ + - http://%(availability_zone)s.clouds.ports.ubuntu.com/ubuntu-ports/ + - http://%(region)s.clouds.ports.ubuntu.com/ubuntu-ports/ + security: [] + - arches: [default] + failsafe: + primary: http://ports.ubuntu.com/ubuntu-ports + security: http://ports.ubuntu.com/ubuntu-ports + ssh_svcname: ssh diff --git a/ebssurrogate/files/ebsnvme-id b/ebssurrogate/files/ebsnvme-id new file mode 100644 index 0000000..b543c0d --- /dev/null +++ b/ebssurrogate/files/ebsnvme-id @@ -0,0 +1,173 @@ +#!/usr/bin/env python2.7 + +# Copyright (C) 2017 Amazon.com, Inc. or its affiliates. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS +# OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the +# License. + +""" +Usage: +Read EBS device information and provide information about +the volume. +""" + +import argparse +from ctypes import * +from fcntl import ioctl +import sys + +NVME_ADMIN_IDENTIFY = 0x06 +NVME_IOCTL_ADMIN_CMD = 0xC0484E41 +AMZN_NVME_VID = 0x1D0F +AMZN_NVME_EBS_MN = "Amazon Elastic Block Store" + +class nvme_admin_command(Structure): + _pack_ = 1 + _fields_ = [("opcode", c_uint8), # op code + ("flags", c_uint8), # fused operation + ("cid", c_uint16), # command id + ("nsid", c_uint32), # namespace id + ("reserved0", c_uint64), + ("mptr", c_uint64), # metadata pointer + ("addr", c_uint64), # data pointer + ("mlen", c_uint32), # metadata length + ("alen", c_uint32), # data length + ("cdw10", c_uint32), + ("cdw11", c_uint32), + ("cdw12", c_uint32), + ("cdw13", c_uint32), + ("cdw14", c_uint32), + ("cdw15", c_uint32), + ("reserved1", c_uint64)] + +class nvme_identify_controller_amzn_vs(Structure): + _pack_ = 1 + _fields_ = [("bdev", c_char * 32), # block device name + ("reserved0", c_char * (1024 - 32))] + +class nvme_identify_controller_psd(Structure): + _pack_ = 1 + _fields_ = [("mp", c_uint16), # maximum power + ("reserved0", c_uint16), + ("enlat", c_uint32), # entry latency + ("exlat", c_uint32), # exit latency + ("rrt", c_uint8), # relative read throughput + ("rrl", c_uint8), # relative read latency + ("rwt", c_uint8), # relative write throughput + ("rwl", c_uint8), # relative write latency + ("reserved1", c_char * 16)] + +class nvme_identify_controller(Structure): + _pack_ = 1 + _fields_ = [("vid", c_uint16), # PCI Vendor ID + ("ssvid", c_uint16), # PCI Subsystem Vendor ID + ("sn", c_char * 20), # Serial Number + ("mn", c_char * 40), # Module Number + ("fr", c_char * 8), # Firmware Revision + ("rab", c_uint8), # Recommend Arbitration Burst + ("ieee", c_uint8 * 3), # IEEE OUI Identifier + ("mic", c_uint8), # Multi-Interface Capabilities + ("mdts", c_uint8), # Maximum Data Transfer Size + ("reserved0", c_uint8 * (256 - 78)), + ("oacs", c_uint16), # Optional Admin Command Support + ("acl", c_uint8), # Abort Command Limit + ("aerl", c_uint8), # Asynchronous Event Request Limit + ("frmw", c_uint8), # Firmware Updates + ("lpa", c_uint8), # Log Page Attributes + ("elpe", c_uint8), # Error Log Page Entries + ("npss", c_uint8), # Number of Power States Support + ("avscc", c_uint8), # Admin Vendor Specific Command Configuration + ("reserved1", c_uint8 * (512 - 265)), + ("sqes", c_uint8), # Submission Queue Entry Size + ("cqes", c_uint8), # Completion Queue Entry Size + ("reserved2", c_uint16), + ("nn", c_uint32), # Number of Namespaces + ("oncs", c_uint16), # Optional NVM Command Support + ("fuses", c_uint16), # Fused Operation Support + ("fna", c_uint8), # Format NVM Attributes + ("vwc", c_uint8), # Volatile Write Cache + ("awun", c_uint16), # Atomic Write Unit Normal + ("awupf", c_uint16), # Atomic Write Unit Power Fail + ("nvscc", c_uint8), # NVM Vendor Specific Command Configuration + ("reserved3", c_uint8 * (704 - 531)), + ("reserved4", c_uint8 * (2048 - 704)), + ("psd", nvme_identify_controller_psd * 32), # Power State Descriptor + ("vs", nvme_identify_controller_amzn_vs)] # Vendor Specific + +class ebs_nvme_device: + def __init__(self, device): + self.device = device + self.ctrl_identify() + + def _nvme_ioctl(self, id_response, id_len): + admin_cmd = nvme_admin_command(opcode = NVME_ADMIN_IDENTIFY, + addr = id_response, + alen = id_len, + cdw10 = 1) + + with open(self.device, "rw") as nvme: + ioctl(nvme, NVME_IOCTL_ADMIN_CMD, admin_cmd) + + def ctrl_identify(self): + self.id_ctrl = nvme_identify_controller() + self._nvme_ioctl(addressof(self.id_ctrl), sizeof(self.id_ctrl)) + + if self.id_ctrl.vid != AMZN_NVME_VID or self.id_ctrl.mn.strip() != AMZN_NVME_EBS_MN: + raise TypeError("[ERROR] Not an EBS device: '{0}'".format(self.device)) + + def get_volume_id(self): + vol = self.id_ctrl.sn + + if vol.startswith("vol") and vol[3] != "-": + vol = "vol-" + vol[3:] + + return vol + + def get_block_device(self, stripped=False): + dev = self.id_ctrl.vs.bdev.strip() + + if stripped and dev.startswith("/dev/"): + dev = dev[5:] + + return dev + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Reads EBS information from NVMe devices.") + parser.add_argument("device", nargs=1, help="Device to query") + + display = parser.add_argument_group("Display Options") + display.add_argument("-v", "--volume", action="store_true", + help="Return volume-id") + display.add_argument("-b", "--block-dev", action="store_true", + help="Return block device mapping") + display.add_argument("-u", "--udev", action="store_true", + help="Output data in format suitable for udev rules") + + if len(sys.argv) < 2: + parser.print_help() + sys.exit(1) + + args = parser.parse_args() + + get_all = not (args.udev or args.volume or args.block_dev) + + try: + dev = ebs_nvme_device(args.device[0]) + except (IOError, TypeError) as err: + print >> sys.stderr, err + sys.exit(1) + + if get_all or args.volume: + print "Volume ID: {0}".format(dev.get_volume_id()) + if get_all or args.block_dev or args.udev: + print dev.get_block_device(args.udev) diff --git a/ebssurrogate/files/sources-arm64.cfg b/ebssurrogate/files/sources-arm64.cfg new file mode 100644 index 0000000..eed6c0f --- /dev/null +++ b/ebssurrogate/files/sources-arm64.cfg @@ -0,0 +1,10 @@ +deb http://REGION.clouds.ports.ubuntu.com/ubuntu-ports/ noble main restricted +deb http://REGION.clouds.ports.ubuntu.com/ubuntu-ports/ noble-updates main restricted +deb http://REGION.clouds.ports.ubuntu.com/ubuntu-ports/ noble universe +deb http://REGION.clouds.ports.ubuntu.com/ubuntu-ports/ noble-updates universe +deb http://REGION.clouds.ports.ubuntu.com/ubuntu-ports/ noble multiverse +deb http://REGION.clouds.ports.ubuntu.com/ubuntu-ports/ noble-updates multiverse +deb http://REGION.clouds.ports.ubuntu.com/ubuntu-ports/ noble-backports main restricted universe multiverse +deb http://ports.ubuntu.com/ubuntu-ports noble-security main restricted +deb http://ports.ubuntu.com/ubuntu-ports noble-security universe +deb http://ports.ubuntu.com/ubuntu-ports noble-security multiverse diff --git a/ebssurrogate/files/sources.cfg b/ebssurrogate/files/sources.cfg new file mode 100644 index 0000000..a27be05 --- /dev/null +++ b/ebssurrogate/files/sources.cfg @@ -0,0 +1,10 @@ +deb http://REGION.ec2.archive.ubuntu.com/ubuntu/ noble main restricted +deb http://REGION.ec2.archive.ubuntu.com/ubuntu/ noble-updates main restricted +deb http://REGION.ec2.archive.ubuntu.com/ubuntu/ noble universe +deb http://REGION.ec2.archive.ubuntu.com/ubuntu/ noble-updates universe +deb http://REGION.ec2.archive.ubuntu.com/ubuntu/ noble multiverse +deb http://REGION.ec2.archive.ubuntu.com/ubuntu/ noble-updates multiverse +deb http://REGION.ec2.archive.ubuntu.com/ubuntu/ noble-backports main restricted universe multiverse +deb http://security.ubuntu.com/ubuntu noble-security main restricted +deb http://security.ubuntu.com/ubuntu noble-security universe +deb http://security.ubuntu.com/ubuntu noble-security multiverse diff --git a/ebssurrogate/files/vector.timer b/ebssurrogate/files/vector.timer new file mode 100644 index 0000000..68bb4d6 --- /dev/null +++ b/ebssurrogate/files/vector.timer @@ -0,0 +1,9 @@ +[Unit] +Description= Defer running the vector.service 60secs after boot up + +[Timer] +OnBootSec=60s +Unit=vector.service + +[Install] +WantedBy=multi-user.target diff --git a/ebssurrogate/scripts/chroot-bootstrap-nix.sh b/ebssurrogate/scripts/chroot-bootstrap-nix.sh new file mode 100755 index 0000000..90fd917 --- /dev/null +++ b/ebssurrogate/scripts/chroot-bootstrap-nix.sh @@ -0,0 +1,232 @@ +#!/usr/bin/env bash +# +# This script runs inside chrooted environment. It installs grub and its +# Configuration file. +# + +set -o errexit +set -o pipefail +set -o xtrace + +export DEBIAN_FRONTEND=noninteractive + +export APT_OPTIONS="-oAPT::Install-Recommends=false \ + -oAPT::Install-Suggests=false \ + -oAcquire::Languages=none" + +if [ $(dpkg --print-architecture) = "amd64" ]; +then + ARCH="amd64"; +else + ARCH="arm64"; +fi + + + +function update_install_packages { + source /etc/os-release + + # Update APT with new sources + cat /etc/apt/sources.list + apt-get $APT_OPTIONS update && apt-get $APT_OPTIONS --yes dist-upgrade + + # Do not configure grub during package install + if [ "${ARCH}" = "amd64" ]; then + echo 'grub-pc grub-pc/install_devices_empty select true' | debconf-set-selections + echo 'grub-pc grub-pc/install_devices select' | debconf-set-selections + # Install various packages needed for a booting system + apt-get install -y \ + linux-aws \ + grub-pc \ + e2fsprogs + else + apt-get install -y e2fsprogs + fi + # Install standard packages + apt-get install -y \ + sudo \ + wget \ + cloud-init \ + acpid \ + ec2-hibinit-agent \ + ec2-instance-connect \ + hibagent \ + ncurses-term \ + ssh-import-id \ + + # apt upgrade + apt-get upgrade -y + + # Install OpenSSH and other packages + sudo add-apt-repository --yes universe + apt-get update + apt-get install -y --no-install-recommends \ + openssh-server \ + git \ + ufw \ + cron \ + logrotate \ + fail2ban \ + locales \ + at \ + less \ + python3-systemd + + if [ "${ARCH}" = "arm64" ]; then + apt-get $APT_OPTIONS --yes install linux-aws initramfs-tools dosfstools + fi +} + +function setup_locale { +cat << EOF >> /etc/locale.gen +en_US.UTF-8 UTF-8 +EOF + +cat << EOF > /etc/default/locale +LANG="C.UTF-8" +LC_CTYPE="C.UTF-8" +EOF + locale-gen en_US.UTF-8 +} + +function setup_postgesql_env { + # Create the directory if it doesn't exist + sudo mkdir -p /etc/environment.d + + # Define the contents of the PostgreSQL environment file + cat </dev/null +LOCALE_ARCHIVE=/usr/lib/locale/locale-archive +LANG="en_US.UTF-8" +LANGUAGE="en_US.UTF-8" +LC_ALL="en_US.UTF-8" +LC_CTYPE="en_US.UTF-8" +EOF +} + +function install_packages_for_build { + apt-get install -y --no-install-recommends linux-libc-dev \ + acl \ + magic-wormhole sysstat \ + build-essential libreadline-dev zlib1g-dev flex bison libxml2-dev libxslt-dev libssl-dev libsystemd-dev libpq-dev libxml2-utils uuid-dev xsltproc ssl-cert \ + gcc-10 g++-10 \ + libgeos-dev libproj-dev libgdal-dev libjson-c-dev libboost-all-dev libcgal-dev libmpfr-dev libgmp-dev cmake \ + libkrb5-dev \ + maven default-jre default-jdk \ + curl gpp apt-transport-https cmake libc++-dev libc++abi-dev libc++1 libglib2.0-dev libtinfo5 libc++abi1 ninja-build python \ + liblzo2-dev + + source /etc/os-release + + apt-get install -y --no-install-recommends llvm-11-dev clang-11 + # Mark llvm as manual to prevent auto removal + apt-mark manual libllvm11:arm64 +} + +function setup_apparmor { + apt-get install -y apparmor apparmor-utils auditd + + # Copy apparmor profiles + cp -rv /tmp/apparmor_profiles/* /etc/apparmor.d/ +} + +function setup_grub_conf_arm64 { +cat << EOF > /etc/default/grub +GRUB_DEFAULT=0 +GRUB_TIMEOUT=0 +GRUB_TIMEOUT_STYLE="hidden" +GRUB_DISTRIBUTOR="tealbase postgresql" +GRUB_CMDLINE_LINUX_DEFAULT="nomodeset console=tty1 console=ttyS0 ipv6.disable=0" +EOF +} + +# Install GRUB +function install_configure_grub { + if [ "${ARCH}" = "arm64" ]; then + apt-get $APT_OPTIONS --yes install cloud-guest-utils fdisk grub-efi-arm64 efibootmgr + setup_grub_conf_arm64 + rm -rf /etc/grub.d/30_os-prober + sleep 1 + fi + grub-install /dev/xvdf && update-grub +} + +# skip fsck for first boot +function disable_fsck { + touch /fastboot +} + +# Don't request hostname during boot but set hostname +function setup_hostname { + # Set the static hostname + echo "ubuntu" > /etc/hostname + chmod 644 /etc/hostname + # Update netplan configuration to not send hostname + cat << EOF > /etc/netplan/01-hostname.yaml +network: + version: 2 + ethernets: + eth0: + dhcp4: true + dhcp4-overrides: + send-hostname: false +EOF + # Set proper permissions for netplan security + chmod 600 /etc/netplan/01-hostname.yaml +} + +# Set options for the default interface +function setup_eth0_interface { +cat << EOF > /etc/netplan/eth0.yaml +network: + version: 2 + ethernets: + eth0: + dhcp4: true +EOF + # Set proper permissions for netplan security + chmod 600 /etc/netplan/eth0.yaml +} + +function disable_sshd_passwd_auth { + sed -i -E -e 's/^#?\s*PasswordAuthentication\s+(yes|no)\s*$/PasswordAuthentication no/g' \ + -e 's/^#?\s*ChallengeResponseAuthentication\s+(yes|no)\s*$/ChallengeResponseAuthentication no/g' \ + /etc/ssh/sshd_config +} + +function create_admin_account { + groupadd admin +} + +#Set default target as multi-user +function set_default_target { + rm -f /etc/systemd/system/default.target + ln -s /lib/systemd/system/multi-user.target /etc/systemd/system/default.target +} + +# Setup ccache +function setup_ccache { + apt-get install ccache -y + mkdir -p /tmp/ccache + export PATH=/usr/lib/ccache:$PATH + echo "PATH=$PATH" >> /etc/environment +} + +# Clear apt caches +function cleanup_cache { + apt-get clean +} + +update_install_packages +setup_locale +setup_postgesql_env +#install_packages_for_build +install_configure_grub +setup_apparmor +setup_hostname +create_admin_account +set_default_target +setup_eth0_interface +disable_sshd_passwd_auth +disable_fsck +#setup_ccache +cleanup_cache diff --git a/ebssurrogate/scripts/qemu-bootstrap-nix.sh b/ebssurrogate/scripts/qemu-bootstrap-nix.sh new file mode 100755 index 0000000..b52b857 --- /dev/null +++ b/ebssurrogate/scripts/qemu-bootstrap-nix.sh @@ -0,0 +1,160 @@ +#!/usr/bin/env bash + +set -o errexit +set -o pipefail +set -o xtrace + +if [ $(dpkg --print-architecture) = "amd64" ]; then + ARCH="amd64" +else + ARCH="arm64" +fi + +function waitfor_boot_finished { + export DEBIAN_FRONTEND=noninteractive + + echo "args: ${ARGS}" + # Wait for cloudinit on the surrogate to complete before making progress + while [[ ! -f /var/lib/cloud/instance/boot-finished ]]; do + echo 'Waiting for cloud-init...' + sleep 1 + done +} + +function install_packages { + apt-get update && sudo apt-get install software-properties-common e2fsprogs nfs-common -y + add-apt-repository --yes --update ppa:ansible/ansible && sudo apt-get install ansible -y + ansible-galaxy collection install community.general +} + +function execute_playbook { + + tee /etc/ansible/ansible.cfg </dev/null +LOCALE_ARCHIVE=/usr/lib/locale/locale-archive +LANG="en_US.UTF-8" +LANGUAGE="en_US.UTF-8" +LC_ALL="en_US.UTF-8" +LC_CTYPE="en_US.UTF-8" +EOF +} + +function setup_locale { + cat <>/etc/locale.gen +en_US.UTF-8 UTF-8 +EOF + + cat </etc/default/locale +LANG="C.UTF-8" +LC_CTYPE="C.UTF-8" +EOF + locale-gen en_US.UTF-8 +} + +sed -i 's/- hosts: all/- hosts: localhost/' ansible/playbook.yml + +waitfor_boot_finished +install_packages +setup_postgesql_env +setup_locale +execute_playbook + +#################### +# stage 2 things +#################### + +function install_nix() { + sudo su -c "curl --proto '=https' --tlsv1.2 -sSf -L https://install.determinate.systems/nix | sh -s -- install --no-confirm \ + --extra-conf \"substituters = https://cache.nixos.org https://nix-postgres-artifacts.s3.amazonaws.com\" \ + --extra-conf \"trusted-public-keys = nix-postgres-artifacts:dGZlQOvKcNEjvT7QEAJbcV6b6uk7VF/hWMjhYleiaLI=% cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=\" " -s /bin/bash root + . /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh + +} + +function execute_stage2_playbook { + sudo tee /etc/ansible/ansible.cfg < sda + + # Create /dev/xvd* device symlink + if [[ ! -z "$mapping" ]] && [[ -b "${blkdev}" ]] && [[ ! -L "${mapping}" ]]; then + ln -s "$blkdev" "$mapping" + + blkdev_mappings["$blkdev"]="$mapping" + fi + done + + create_partition_table + + # NVMe EBS launch device partition mappings (symlinks): /dev/nvme*n*p* to /dev/xvd*[0-9]+ + declare -A partdev_mappings + for blkdev in "${!blkdev_mappings[@]}"; do # /dev/nvme*n* + mapping="${blkdev_mappings[$blkdev]}" + + # Create /dev/xvd*[0-9]+ partition device symlink + for partdev in "${blkdev}"p*; do + partnum=${partdev##*p} + if [[ ! -L "${mapping}${partnum}" ]]; then + ln -s "${blkdev}p${partnum}" "${mapping}${partnum}" + + partdev_mappings["${blkdev}p${partnum}"]="${mapping}${partnum}" + fi + done + done +} + + +#Download and install latest e2fsprogs for fast_commit feature,if required. +function format_and_mount_rootfs { + mkfs.ext4 -m0.1 /dev/xvdf2 + + mount -o noatime,nodiratime /dev/xvdf2 /mnt + if [ "${ARCH}" = "arm64" ]; then + mkfs.fat -F32 /dev/xvdf1 + mkdir -p /mnt/boot/efi + sleep 2 + mount /dev/xvdf1 /mnt/boot/efi + fi + + mkfs.ext4 /dev/xvdh + + # Explicitly reserving 100MiB worth of blocks for the data volume + RESERVED_DATA_VOLUME_BLOCK_COUNT=$((100 * 1024 * 1024 / 4096)) + tune2fs -r $RESERVED_DATA_VOLUME_BLOCK_COUNT /dev/xvdh + + mkdir -p /mnt/data + mount -o defaults,discard /dev/xvdh /mnt/data +} + +function create_swapfile { + fallocate -l 1G /mnt/swapfile + chmod 600 /mnt/swapfile + mkswap /mnt/swapfile +} + +function format_build_partition { + mkfs.ext4 -O ^has_journal /dev/xvdc +} +function pull_docker { + apt-get install -y docker.io + docker run -itd --name ccachedata "${DOCKER_IMAGE}:${DOCKER_IMAGE_TAG}" sh + docker exec -itd ccachedata mkdir -p /build/ccache +} + +# Create fstab +function create_fstab { + FMT="%-42s %-11s %-5s %-17s %-5s %s" +cat > "/mnt/etc/fstab" << EOF +$(printf "${FMT}" "# DEVICE UUID" "MOUNTPOINT" "TYPE" "OPTIONS" "DUMP" "FSCK") +$(findmnt -no SOURCE /mnt | xargs blkid -o export | awk -v FMT="${FMT}" '/^UUID=/ { printf(FMT, $0, "/", "ext4", "defaults,discard", "0", "1" ) }') +$(findmnt -no SOURCE /mnt/boot/efi | xargs blkid -o export | awk -v FMT="${FMT}" '/^UUID=/ { printf(FMT, $0, "/boot/efi", "vfat", "umask=0077", "0", "1" ) }') +$(findmnt -no SOURCE /mnt/data | xargs blkid -o export | awk -v FMT="${FMT}" '/^UUID=/ { printf(FMT, $0, "/data", "ext4", "defaults,discard", "0", "2" ) }') +$(printf "$FMT" "/swapfile" "none" "swap" "sw" "0" "0") +EOF + unset FMT +} + +function setup_chroot_environment { + UBUNTU_VERSION=$(lsb_release -cs) # 'noble' for Ubuntu 24.04 + + # Bootstrap Ubuntu into /mnt + debootstrap --arch ${ARCH} --variant=minbase "$UBUNTU_VERSION" /mnt + + # Update ec2-region + REGION=$(curl --silent --fail http://169.254.169.254/latest/meta-data/placement/availability-zone | sed -E 's|[a-z]+$||g') + sed -i "s/REGION/${REGION}/g" /tmp/sources.list + cp /tmp/sources.list /mnt/etc/apt/sources.list + + if [ "${ARCH}" = "arm64" ]; then + create_fstab + fi + + # Create mount points and mount the filesystem + mkdir -p /mnt/{dev,proc,sys} + mount --rbind /dev /mnt/dev + mount --rbind /proc /mnt/proc + mount --rbind /sys /mnt/sys + + # Create build mount point and mount + mkdir -p /mnt/tmp + mount /dev/xvdc /mnt/tmp + chmod 777 /mnt/tmp + + # Copy apparmor profiles + chmod 644 /tmp/apparmor_profiles/* + cp -r /tmp/apparmor_profiles /mnt/tmp/ + + # Copy migrations + cp -r /tmp/migrations /mnt/tmp/ + + # Copy the bootstrap script into place and execute inside chroot + cp /tmp/chroot-bootstrap-nix.sh /mnt/tmp/chroot-bootstrap-nix.sh + chroot /mnt /tmp/chroot-bootstrap-nix.sh + rm -f /mnt/tmp/chroot-bootstrap-nix.sh + echo "${POSTGRES_tealbase_VERSION}" > /mnt/root/tealbase-release + + # Copy the nvme identification script into /sbin inside the chroot + mkdir -p /mnt/sbin + cp /tmp/ebsnvme-id /mnt/sbin/ebsnvme-id + chmod +x /mnt/sbin/ebsnvme-id + + # Copy the udev rules for identifying nvme devices into the chroot + mkdir -p /mnt/etc/udev/rules.d + cp /tmp/70-ec2-nvme-devices.rules \ + /mnt/etc/udev/rules.d/70-ec2-nvme-devices.rules + + #Copy custom cloud-init + rm -f /mnt/etc/cloud/cloud.cfg + cp /tmp/cloud.cfg /mnt/etc/cloud/cloud.cfg + + sleep 2 +} + +function download_ccache { + docker cp ccachedata:/build/ccache/. /mnt/tmp/ccache +} + +function execute_playbook { + +tee /etc/ansible/ansible.cfg <' to create a new migration file. + +Examples: + nix run .#dbmate-tool + nix run .#dbmate-tool -- --version 15 + nix run .#dbmate-tool -- --version 16 --port 5433 + +``` + +This can also be run from a github "flake url" for example: + +```shell +nix run github:tealbase/postgres#dbmate-tool -- --version 15 + +or + +nix run github:tealbase/postgres/mybranch#dbmate-tool -- --version 15 +``` +# tealbase/migrations + +`tealbase/migrations` is a consolidation of SQL migrations from: + +- tealbase/postgres +- tealbase/tealbase +- tealbase/cli +- tealbase/infrastructure (internal) + +aiming to provide a single source of truth for migrations on the platform that can be depended upon by those components. For more information on goals see [the RFC](https://www.notion.so/tealbase/Centralize-SQL-Migrations-cd3847ae027d4f2bba9defb2cc82f69a) + + + +## How it was Created + +Migrations were pulled (in order) from: + +1. [init-scripts/postgres](https://github.com/tealbase/infrastructure/tree/develop/init-scripts/postgres) => [db/init-scripts](db/init-scripts) +2. [init-scripts/migrations](https://github.com/tealbase/infrastructure/tree/develop/init-scripts/migrations) => [db/migrations](db/migrations) + +For compatibility with hosted projects, we include [migrate.sh](migrate.sh) that executes migrations in the same order as ami build: + +1. Run all `db/init-scripts` with `postgres` superuser role. +2. Run all `db/migrations` with `tealbase_admin` superuser role. +3. Finalize role passwords with `/etc/postgresql.schema.sql` if present. + +Additionally, [tealbase/postgres](https://github.com/tealbase/postgres/blob/develop/ansible/playbook-docker.yml#L9) image contains several migration scripts to configure default extensions. These are run first by docker entrypoint and included in ami by ansible. + + + +## Guidelines + +- Migrations are append only. Never edit existing migrations once they are on master. +- Migrations in `migrations/db/migrations` have to be idempotent. +- Self contained components (gotrue, storage, realtime) may contain their own migrations. +- Self hosted tealbase users should update role passwords separately after running all migrations. +- Prod release is done by publishing a new GitHub release on master branch. + +## Requirements + +- [dbmate](https://github.com/amacneil/dbmate) +- [docker-compose](https://docs.docker.com/compose/) + +## Usage + +### Add a Migration + +First, start a local postgres server in another terminal window: + +```shell +# Start the database server in another window +nix run .#start-server 15 +``` + +Then, in your main terminal window, run: + +```shell + +nix develop +``` +in the root of `tealbase/postgres`. + +Next run: +``` shell +# Create a new migration (make sure to specify the migrations directory) +dbmate --migrations-dir="migrations/db/migrations" new '' +``` + +Then, execute the migration at `./migrations/db/xxxxxxxxx_` and make sure it runs successfully with: + +```shell +dbmate --no-dump-schema --migrations-dir"migrations/db/migrations" up +``` + +Note: Migrations are applied using the `tealbase_admin` superuser role, as specified in the "How it was Created" section above. + +### Adding a migration with docker-compose + +dbmate can optionally be run locally using docker: + +```shell +# Start the database server +docker-compose up + +# create a new migration +docker-compose run --rm dbmate new '' +``` + +Then, populate the migration at `./db/migrations/xxxxxxxxx_` and make sure it execute sucessfully with + +```shell +docker-compose run --rm dbmate up +``` +### Updating schema.sql for each major version + +After making changes to migrations, you should update the schema.sql files for each major version of PostgreSQL: + +```shell +# First, stop any running PostgreSQL servers +# Then from the root of tealbase/postgres run: +nix run .#dbmate-tool -- --version all +``` + +This will create automatically schema.sql file for each major version of PostgreSQL and OrioleDB (the files are named like `schema-`, `schema-oriole-`). Commit these changes to your repository and push to your branch. The workflow in `.github/workflows/test.yml` will re-run this command in CI, and perform a git diff to verify the idempotency of the migrations, and that the latest changes have been committed. + +## Testing + +In addition to ci test mentioned above, you can test migrations locally by running the following test for each major version of postgres one at a time. + +Examples: + +``` +nix build .#checks.aarch64-darwin.psql_15 -L +nix build .#checks.aarch64-darwin.psql_17 -L +nix build .#checks.aarch64-darwin.psql_orioledb-17 -L +``` + +(Note that the evaluation and nix build of the postgres packages "bundle" of each major version must succeed here, even though we run one version at a time. If you made changes to postgres or extensions, or wrappers those may rebuild here when you run this. Otherwise they will usually download the prebuilt version from the tealbase nix binary cache) + +At the end of these commands, you will see the output of both `pg_regress` tests, and migration tests + +see [Adding Tests](https://github.com/tealbase/postgres/blob/develop/nix/docs/adding-tests.md) for more information. diff --git a/migrations/db/init-scripts/00000000000000-initial-schema.sql b/migrations/db/init-scripts/00000000000000-initial-schema.sql new file mode 100644 index 0000000..becb941 --- /dev/null +++ b/migrations/db/init-scripts/00000000000000-initial-schema.sql @@ -0,0 +1,57 @@ +-- migrate:up + +-- Set up realtime +-- defaults to empty publication +create publication tealbase_realtime; + +-- tealbase super admin +alter user tealbase_admin with superuser createdb createrole replication bypassrls; + +-- tealbase replication user +create user tealbase_replication_admin with login replication; + +-- tealbase read-only user +create role tealbase_read_only_user with login bypassrls; +grant pg_read_all_data to tealbase_read_only_user; + +-- Extension namespacing +create schema if not exists extensions; +create extension if not exists "uuid-ossp" with schema extensions; +create extension if not exists pgcrypto with schema extensions; + + +-- Set up auth roles for the developer +create role anon nologin noinherit; +create role authenticated nologin noinherit; -- "logged in" user: web_user, app_user, etc +create role service_role nologin noinherit bypassrls; -- allow developers to create JWT's that bypass their policies + +create user authenticator noinherit; +grant anon to authenticator; +grant authenticated to authenticator; +grant service_role to authenticator; +grant tealbase_admin to authenticator; + +grant usage on schema public to postgres, anon, authenticated, service_role; +alter default privileges in schema public grant all on tables to postgres, anon, authenticated, service_role; +alter default privileges in schema public grant all on functions to postgres, anon, authenticated, service_role; +alter default privileges in schema public grant all on sequences to postgres, anon, authenticated, service_role; + +-- Allow Extensions to be used in the API +grant usage on schema extensions to postgres, anon, authenticated, service_role; + +-- Set up namespacing +alter user tealbase_admin SET search_path TO public, extensions; -- don't include the "auth" schema + +-- These are required so that the users receive grants whenever "tealbase_admin" creates tables/function +alter default privileges for user tealbase_admin in schema public grant all + on sequences to postgres, anon, authenticated, service_role; +alter default privileges for user tealbase_admin in schema public grant all + on tables to postgres, anon, authenticated, service_role; +alter default privileges for user tealbase_admin in schema public grant all + on functions to postgres, anon, authenticated, service_role; + +-- Set short statement/query timeouts for API roles +alter role anon set statement_timeout = '3s'; +alter role authenticated set statement_timeout = '8s'; + +-- migrate:down diff --git a/migrations/db/init-scripts/00000000000001-auth-schema.sql b/migrations/db/init-scripts/00000000000001-auth-schema.sql new file mode 100644 index 0000000..10f5d0b --- /dev/null +++ b/migrations/db/init-scripts/00000000000001-auth-schema.sql @@ -0,0 +1,123 @@ +-- migrate:up + +CREATE SCHEMA IF NOT EXISTS auth AUTHORIZATION tealbase_admin; + +-- auth.users definition + +CREATE TABLE auth.users ( + instance_id uuid NULL, + id uuid NOT NULL UNIQUE, + aud varchar(255) NULL, + "role" varchar(255) NULL, + email varchar(255) NULL UNIQUE, + encrypted_password varchar(255) NULL, + confirmed_at timestamptz NULL, + invited_at timestamptz NULL, + confirmation_token varchar(255) NULL, + confirmation_sent_at timestamptz NULL, + recovery_token varchar(255) NULL, + recovery_sent_at timestamptz NULL, + email_change_token varchar(255) NULL, + email_change varchar(255) NULL, + email_change_sent_at timestamptz NULL, + last_sign_in_at timestamptz NULL, + raw_app_meta_data jsonb NULL, + raw_user_meta_data jsonb NULL, + is_super_admin bool NULL, + created_at timestamptz NULL, + updated_at timestamptz NULL, + CONSTRAINT users_pkey PRIMARY KEY (id) +); +CREATE INDEX users_instance_id_email_idx ON auth.users USING btree (instance_id, email); +CREATE INDEX users_instance_id_idx ON auth.users USING btree (instance_id); +comment on table auth.users is 'Auth: Stores user login data within a secure schema.'; + +-- auth.refresh_tokens definition + +CREATE TABLE auth.refresh_tokens ( + instance_id uuid NULL, + id bigserial NOT NULL, + "token" varchar(255) NULL, + user_id varchar(255) NULL, + revoked bool NULL, + created_at timestamptz NULL, + updated_at timestamptz NULL, + CONSTRAINT refresh_tokens_pkey PRIMARY KEY (id) +); +CREATE INDEX refresh_tokens_instance_id_idx ON auth.refresh_tokens USING btree (instance_id); +CREATE INDEX refresh_tokens_instance_id_user_id_idx ON auth.refresh_tokens USING btree (instance_id, user_id); +CREATE INDEX refresh_tokens_token_idx ON auth.refresh_tokens USING btree (token); +comment on table auth.refresh_tokens is 'Auth: Store of tokens used to refresh JWT tokens once they expire.'; + +-- auth.instances definition + +CREATE TABLE auth.instances ( + id uuid NOT NULL, + uuid uuid NULL, + raw_base_config text NULL, + created_at timestamptz NULL, + updated_at timestamptz NULL, + CONSTRAINT instances_pkey PRIMARY KEY (id) +); +comment on table auth.instances is 'Auth: Manages users across multiple sites.'; + +-- auth.audit_log_entries definition + +CREATE TABLE auth.audit_log_entries ( + instance_id uuid NULL, + id uuid NOT NULL, + payload json NULL, + created_at timestamptz NULL, + CONSTRAINT audit_log_entries_pkey PRIMARY KEY (id) +); +CREATE INDEX audit_logs_instance_id_idx ON auth.audit_log_entries USING btree (instance_id); +comment on table auth.audit_log_entries is 'Auth: Audit trail for user actions.'; + +-- auth.schema_migrations definition + +CREATE TABLE auth.schema_migrations ( + "version" varchar(255) NOT NULL, + CONSTRAINT schema_migrations_pkey PRIMARY KEY ("version") +); +comment on table auth.schema_migrations is 'Auth: Manages updates to the auth system.'; + +INSERT INTO auth.schema_migrations (version) +VALUES ('20171026211738'), + ('20171026211808'), + ('20171026211834'), + ('20180103212743'), + ('20180108183307'), + ('20180119214651'), + ('20180125194653'); + +-- Gets the User ID from the request cookie +create or replace function auth.uid() returns uuid as $$ + select nullif(current_setting('request.jwt.claim.sub', true), '')::uuid; +$$ language sql stable; + +-- Gets the User ID from the request cookie +create or replace function auth.role() returns text as $$ + select nullif(current_setting('request.jwt.claim.role', true), '')::text; +$$ language sql stable; + +-- Gets the User email +create or replace function auth.email() returns text as $$ + select nullif(current_setting('request.jwt.claim.email', true), '')::text; +$$ language sql stable; + +-- usage on auth functions to API roles +GRANT USAGE ON SCHEMA auth TO anon, authenticated, service_role; + +-- tealbase super admin +CREATE USER tealbase_auth_admin NOINHERIT CREATEROLE LOGIN NOREPLICATION; +GRANT ALL PRIVILEGES ON SCHEMA auth TO tealbase_auth_admin; +GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA auth TO tealbase_auth_admin; +GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA auth TO tealbase_auth_admin; +ALTER USER tealbase_auth_admin SET search_path = "auth"; +ALTER table "auth".users OWNER TO tealbase_auth_admin; +ALTER table "auth".refresh_tokens OWNER TO tealbase_auth_admin; +ALTER table "auth".audit_log_entries OWNER TO tealbase_auth_admin; +ALTER table "auth".instances OWNER TO tealbase_auth_admin; +ALTER table "auth".schema_migrations OWNER TO tealbase_auth_admin; + +-- migrate:down diff --git a/migrations/db/init-scripts/00000000000002-storage-schema.sql b/migrations/db/init-scripts/00000000000002-storage-schema.sql new file mode 100644 index 0000000..99d2eb7 --- /dev/null +++ b/migrations/db/init-scripts/00000000000002-storage-schema.sql @@ -0,0 +1,120 @@ +-- migrate:up + +CREATE SCHEMA IF NOT EXISTS storage AUTHORIZATION tealbase_admin; + +grant usage on schema storage to postgres, anon, authenticated, service_role; +alter default privileges in schema storage grant all on tables to postgres, anon, authenticated, service_role; +alter default privileges in schema storage grant all on functions to postgres, anon, authenticated, service_role; +alter default privileges in schema storage grant all on sequences to postgres, anon, authenticated, service_role; + +CREATE TABLE "storage"."buckets" ( + "id" text not NULL, + "name" text NOT NULL, + "owner" uuid, + "created_at" timestamptz DEFAULT now(), + "updated_at" timestamptz DEFAULT now(), + CONSTRAINT "buckets_owner_fkey" FOREIGN KEY ("owner") REFERENCES "auth"."users"("id"), + PRIMARY KEY ("id") +); +CREATE UNIQUE INDEX "bname" ON "storage"."buckets" USING BTREE ("name"); + +CREATE TABLE "storage"."objects" ( + "id" uuid NOT NULL DEFAULT extensions.uuid_generate_v4(), + "bucket_id" text, + "name" text, + "owner" uuid, + "created_at" timestamptz DEFAULT now(), + "updated_at" timestamptz DEFAULT now(), + "last_accessed_at" timestamptz DEFAULT now(), + "metadata" jsonb, + CONSTRAINT "objects_bucketId_fkey" FOREIGN KEY ("bucket_id") REFERENCES "storage"."buckets"("id"), + CONSTRAINT "objects_owner_fkey" FOREIGN KEY ("owner") REFERENCES "auth"."users"("id"), + PRIMARY KEY ("id") +); +CREATE UNIQUE INDEX "bucketid_objname" ON "storage"."objects" USING BTREE ("bucket_id","name"); +CREATE INDEX name_prefix_search ON storage.objects(name text_pattern_ops); + +ALTER TABLE storage.objects ENABLE ROW LEVEL SECURITY; + +CREATE FUNCTION storage.foldername(name text) + RETURNS text[] + LANGUAGE plpgsql +AS $function$ +DECLARE +_parts text[]; +BEGIN + select string_to_array(name, '/') into _parts; + return _parts[1:array_length(_parts,1)-1]; +END +$function$; + +CREATE FUNCTION storage.filename(name text) + RETURNS text + LANGUAGE plpgsql +AS $function$ +DECLARE +_parts text[]; +BEGIN + select string_to_array(name, '/') into _parts; + return _parts[array_length(_parts,1)]; +END +$function$; + +CREATE FUNCTION storage.extension(name text) + RETURNS text + LANGUAGE plpgsql +AS $function$ +DECLARE +_parts text[]; +_filename text; +BEGIN + select string_to_array(name, '/') into _parts; + select _parts[array_length(_parts,1)] into _filename; + -- @todo return the last part instead of 2 + return split_part(_filename, '.', 2); +END +$function$; + +CREATE FUNCTION storage.search(prefix text, bucketname text, limits int DEFAULT 100, levels int DEFAULT 1, offsets int DEFAULT 0) + RETURNS TABLE ( + name text, + id uuid, + updated_at TIMESTAMPTZ, + created_at TIMESTAMPTZ, + last_accessed_at TIMESTAMPTZ, + metadata jsonb + ) + LANGUAGE plpgsql +AS $function$ +DECLARE +_bucketId text; +BEGIN + -- will be replaced by migrations when server starts + -- saving space for cloud-init +END +$function$; + +-- create migrations table +-- https://github.com/ThomWright/postgres-migrations/blob/master/src/migrations/0_create-migrations-table.sql +-- we add this table here and not let it be auto-created so that the permissions are properly applied to it +CREATE TABLE IF NOT EXISTS storage.migrations ( + id integer PRIMARY KEY, + name varchar(100) UNIQUE NOT NULL, + hash varchar(40) NOT NULL, -- sha1 hex encoded hash of the file name and contents, to ensure it hasn't been altered since applying the migration + executed_at timestamp DEFAULT current_timestamp +); + +CREATE USER tealbase_storage_admin NOINHERIT CREATEROLE LOGIN NOREPLICATION; +GRANT ALL PRIVILEGES ON SCHEMA storage TO tealbase_storage_admin; +GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA storage TO tealbase_storage_admin; +GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA storage TO tealbase_storage_admin; +ALTER USER tealbase_storage_admin SET search_path = "storage"; +ALTER table "storage".objects owner to tealbase_storage_admin; +ALTER table "storage".buckets owner to tealbase_storage_admin; +ALTER table "storage".migrations OWNER TO tealbase_storage_admin; +ALTER function "storage".foldername(text) owner to tealbase_storage_admin; +ALTER function "storage".filename(text) owner to tealbase_storage_admin; +ALTER function "storage".extension(text) owner to tealbase_storage_admin; +ALTER function "storage".search(text,text,int,int,int) owner to tealbase_storage_admin; + +-- migrate:down diff --git a/migrations/db/init-scripts/00000000000003-post-setup.sql b/migrations/db/init-scripts/00000000000003-post-setup.sql new file mode 100644 index 0000000..f7c9051 --- /dev/null +++ b/migrations/db/init-scripts/00000000000003-post-setup.sql @@ -0,0 +1,119 @@ +-- migrate:up + +ALTER ROLE tealbase_admin SET search_path TO "\$user",public,auth,extensions; +ALTER ROLE postgres SET search_path TO "\$user",public,extensions; + +-- Trigger for pg_cron +CREATE OR REPLACE FUNCTION extensions.grant_pg_cron_access() +RETURNS event_trigger +LANGUAGE plpgsql +AS $$ +DECLARE + schema_is_cron bool; +BEGIN + schema_is_cron = ( + SELECT n.nspname = 'cron' + FROM pg_event_trigger_ddl_commands() AS ev + LEFT JOIN pg_catalog.pg_namespace AS n + ON ev.objid = n.oid + ); + + IF schema_is_cron + THEN + grant usage on schema cron to postgres with grant option; + + alter default privileges in schema cron grant all on tables to postgres with grant option; + alter default privileges in schema cron grant all on functions to postgres with grant option; + alter default privileges in schema cron grant all on sequences to postgres with grant option; + + alter default privileges for user tealbase_admin in schema cron grant all + on sequences to postgres with grant option; + alter default privileges for user tealbase_admin in schema cron grant all + on tables to postgres with grant option; + alter default privileges for user tealbase_admin in schema cron grant all + on functions to postgres with grant option; + + grant all privileges on all tables in schema cron to postgres with grant option; + + END IF; + +END; +$$; +CREATE EVENT TRIGGER issue_pg_cron_access ON ddl_command_end WHEN TAG in ('CREATE SCHEMA') +EXECUTE PROCEDURE extensions.grant_pg_cron_access(); +COMMENT ON FUNCTION extensions.grant_pg_cron_access IS 'Grants access to pg_cron'; + +-- Event trigger for pg_net +CREATE OR REPLACE FUNCTION extensions.grant_pg_net_access() +RETURNS event_trigger +LANGUAGE plpgsql +AS $$ +BEGIN + IF EXISTS ( + SELECT 1 + FROM pg_event_trigger_ddl_commands() AS ev + JOIN pg_extension AS ext + ON ev.objid = ext.oid + WHERE ext.extname = 'pg_net' + ) + THEN + IF NOT EXISTS ( + SELECT 1 + FROM pg_roles + WHERE rolname = 'tealbase_functions_admin' + ) + THEN + CREATE USER tealbase_functions_admin NOINHERIT CREATEROLE LOGIN NOREPLICATION; + END IF; + + GRANT USAGE ON SCHEMA net TO tealbase_functions_admin, postgres, anon, authenticated, service_role; + + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; + + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; + + REVOKE ALL ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; + REVOKE ALL ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; + + GRANT EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO tealbase_functions_admin, postgres, anon, authenticated, service_role; + GRANT EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO tealbase_functions_admin, postgres, anon, authenticated, service_role; + END IF; +END; +$$; +COMMENT ON FUNCTION extensions.grant_pg_net_access IS 'Grants access to pg_net'; + +DO +$$ +BEGIN + IF NOT EXISTS ( + SELECT 1 + FROM pg_event_trigger + WHERE evtname = 'issue_pg_net_access' + ) THEN + CREATE EVENT TRIGGER issue_pg_net_access + ON ddl_command_end + WHEN TAG IN ('CREATE EXTENSION') + EXECUTE PROCEDURE extensions.grant_pg_net_access(); + END IF; +END +$$; + +-- tealbase dashboard user +CREATE ROLE dashboard_user NOSUPERUSER CREATEDB CREATEROLE REPLICATION; +GRANT ALL ON DATABASE postgres TO dashboard_user; +GRANT ALL ON SCHEMA auth TO dashboard_user; +GRANT ALL ON SCHEMA extensions TO dashboard_user; +GRANT ALL ON SCHEMA storage TO dashboard_user; +GRANT ALL ON ALL TABLES IN SCHEMA auth TO dashboard_user; +GRANT ALL ON ALL TABLES IN SCHEMA extensions TO dashboard_user; +-- GRANT ALL ON ALL TABLES IN SCHEMA storage TO dashboard_user; +GRANT ALL ON ALL SEQUENCES IN SCHEMA auth TO dashboard_user; +GRANT ALL ON ALL SEQUENCES IN SCHEMA storage TO dashboard_user; +GRANT ALL ON ALL SEQUENCES IN SCHEMA extensions TO dashboard_user; +GRANT ALL ON ALL ROUTINES IN SCHEMA auth TO dashboard_user; +GRANT ALL ON ALL ROUTINES IN SCHEMA storage TO dashboard_user; +GRANT ALL ON ALL ROUTINES IN SCHEMA extensions TO dashboard_user; + +-- migrate:down diff --git a/migrations/db/init-scripts/README.md b/migrations/db/init-scripts/README.md new file mode 100644 index 0000000..c12fe3b --- /dev/null +++ b/migrations/db/init-scripts/README.md @@ -0,0 +1,7 @@ + +The effects of these migrations are tested on: + +- [nix/tests/sql/auth.out](../../../nix/tests/expected/auth.out) +- [nix/tests/sql/storage.out](../../../nix/tests/expected/storage.out) +- [nix/tests/sql/roles.out](../../../nix/tests/expected/roles.out) +- [nix/tests/sql/evtrigs.out](../../../nix/tests/expected/evtrigs.out) diff --git a/migrations/db/migrate.sh b/migrations/db/migrate.sh new file mode 100755 index 0000000..2ed9fc0 --- /dev/null +++ b/migrations/db/migrate.sh @@ -0,0 +1,72 @@ +#!/bin/sh +set -eu + +####################################### +# Used by both ami and docker builds to initialise database schema. +# Env vars: +# POSTGRES_DB defaults to postgres +# POSTGRES_HOST defaults to localhost +# POSTGRES_PORT defaults to 5432 +# POSTGRES_PASSWORD defaults to "" +# USE_DBMATE defaults to "" +# Exit code: +# 0 if migration succeeds, non-zero on error. +####################################### + +export PGDATABASE="${POSTGRES_DB:-postgres}" +export PGHOST="${POSTGRES_HOST:-localhost}" +export PGPORT="${POSTGRES_PORT:-5432}" +export PGPASSWORD="${POSTGRES_PASSWORD:-}" + +# if args are supplied, simply forward to dbmate +connect="$PGPASSWORD@$PGHOST:$PGPORT/$PGDATABASE?sslmode=disable" +if [ "$#" -ne 0 ]; then + export DATABASE_URL="${DATABASE_URL:-postgres://tealbase_admin:$connect}" + exec dbmate "$@" + exit 0 +fi + +db=$( cd -- "$( dirname -- "$0" )" > /dev/null 2>&1 && pwd ) +if [ -z "${USE_DBMATE:-}" ]; then + psql -v ON_ERROR_STOP=1 --no-password --no-psqlrc -U tealbase_admin <= 14 THEN + RETURN jsonb_build_object( + 'data', null::jsonb, + 'errors', array['pg_graphql extension is not enabled.'] + ); + ELSE + RETURN jsonb_build_object( + 'data', null::jsonb, + 'errors', array['pg_graphql is only available on projects running Postgres 14 onwards.'] + ); + END IF; + END; +$$; + +grant usage on schema graphql_public to postgres, anon, authenticated, service_role; +alter default privileges in schema graphql_public grant all on tables to postgres, anon, authenticated, service_role; +alter default privileges in schema graphql_public grant all on functions to postgres, anon, authenticated, service_role; +alter default privileges in schema graphql_public grant all on sequences to postgres, anon, authenticated, service_role; + +alter default privileges for user tealbase_admin in schema graphql_public grant all + on sequences to postgres, anon, authenticated, service_role; +alter default privileges for user tealbase_admin in schema graphql_public grant all + on tables to postgres, anon, authenticated, service_role; +alter default privileges for user tealbase_admin in schema graphql_public grant all + on functions to postgres, anon, authenticated, service_role; + +-- Trigger upon enabling pg_graphql +CREATE OR REPLACE FUNCTION extensions.grant_pg_graphql_access() +RETURNS event_trigger +LANGUAGE plpgsql +AS $func$ + DECLARE + func_is_graphql_resolve bool; + BEGIN + func_is_graphql_resolve = ( + SELECT n.proname = 'resolve' + FROM pg_event_trigger_ddl_commands() AS ev + LEFT JOIN pg_catalog.pg_proc AS n + ON ev.objid = n.oid + ); + + IF func_is_graphql_resolve + THEN + grant usage on schema graphql to postgres, anon, authenticated, service_role; + grant all on function graphql.resolve to postgres, anon, authenticated, service_role; + + alter default privileges in schema graphql grant all on tables to postgres, anon, authenticated, service_role; + alter default privileges in schema graphql grant all on functions to postgres, anon, authenticated, service_role; + alter default privileges in schema graphql grant all on sequences to postgres, anon, authenticated, service_role; + + DROP FUNCTION IF EXISTS graphql_public.graphql; + create or replace function graphql_public.graphql( + "operationName" text default null, + query text default null, + variables jsonb default null, + extensions jsonb default null + ) + returns jsonb + language sql + as $$ + SELECT graphql.resolve(query, coalesce(variables, '{}')); + $$; + + grant execute on function graphql.resolve to postgres, anon, authenticated, service_role; + END IF; + + END; +$func$; + +DROP EVENT TRIGGER IF EXISTS issue_pg_graphql_access; +CREATE EVENT TRIGGER issue_pg_graphql_access ON ddl_command_end WHEN TAG in ('CREATE FUNCTION') +EXECUTE PROCEDURE extensions.grant_pg_graphql_access(); +COMMENT ON FUNCTION extensions.grant_pg_graphql_access IS 'Grants access to pg_graphql'; + +-- Trigger upon dropping the pg_graphql extension +CREATE OR REPLACE FUNCTION extensions.set_graphql_placeholder() +RETURNS event_trigger +LANGUAGE plpgsql +AS $func$ + DECLARE + graphql_is_dropped bool; + BEGIN + graphql_is_dropped = ( + SELECT ev.schema_name = 'graphql_public' + FROM pg_event_trigger_dropped_objects() AS ev + WHERE ev.schema_name = 'graphql_public' + ); + + IF graphql_is_dropped + THEN + create or replace function graphql_public.graphql( + "operationName" text default null, + query text default null, + variables jsonb default null, + extensions jsonb default null + ) + returns jsonb + language plpgsql + as $$ + DECLARE + server_version float; + BEGIN + server_version = (SELECT (SPLIT_PART((select version()), ' ', 2))::float); + + IF server_version >= 14 THEN + RETURN jsonb_build_object( + 'data', null::jsonb, + 'errors', array['pg_graphql extension is not enabled.'] + ); + ELSE + RETURN jsonb_build_object( + 'data', null::jsonb, + 'errors', array['pg_graphql is only available on projects running Postgres 14 onwards.'] + ); + END IF; + END; + $$; + END IF; + + END; +$func$; + +DROP EVENT TRIGGER IF EXISTS issue_graphql_placeholder; +CREATE EVENT TRIGGER issue_graphql_placeholder ON sql_drop WHEN TAG in ('DROP EXTENSION') +EXECUTE PROCEDURE extensions.set_graphql_placeholder(); +COMMENT ON FUNCTION extensions.set_graphql_placeholder IS 'Reintroduces placeholder function for graphql_public.graphql'; + +-- migrate:down diff --git a/migrations/db/migrations/20220321174452_fix-postgrest-alter-type-event-trigger.sql b/migrations/db/migrations/20220321174452_fix-postgrest-alter-type-event-trigger.sql new file mode 100644 index 0000000..339def9 --- /dev/null +++ b/migrations/db/migrations/20220321174452_fix-postgrest-alter-type-event-trigger.sql @@ -0,0 +1,70 @@ +-- migrate:up + +drop event trigger if exists api_restart; +drop function if exists extensions.notify_api_restart(); + +-- https://postgrest.org/en/latest/schema_cache.html#finer-grained-event-trigger +-- watch create and alter +CREATE OR REPLACE FUNCTION extensions.pgrst_ddl_watch() RETURNS event_trigger AS $$ +DECLARE + cmd record; +BEGIN + FOR cmd IN SELECT * FROM pg_event_trigger_ddl_commands() + LOOP + IF cmd.command_tag IN ( + 'CREATE SCHEMA', 'ALTER SCHEMA' + , 'CREATE TABLE', 'CREATE TABLE AS', 'SELECT INTO', 'ALTER TABLE' + , 'CREATE FOREIGN TABLE', 'ALTER FOREIGN TABLE' + , 'CREATE VIEW', 'ALTER VIEW' + , 'CREATE MATERIALIZED VIEW', 'ALTER MATERIALIZED VIEW' + , 'CREATE FUNCTION', 'ALTER FUNCTION' + , 'CREATE TRIGGER' + , 'CREATE TYPE', 'ALTER TYPE' + , 'CREATE RULE' + , 'COMMENT' + ) + -- don't notify in case of CREATE TEMP table or other objects created on pg_temp + AND cmd.schema_name is distinct from 'pg_temp' + THEN + NOTIFY pgrst, 'reload schema'; + END IF; + END LOOP; +END; $$ LANGUAGE plpgsql; + +-- watch drop +CREATE OR REPLACE FUNCTION extensions.pgrst_drop_watch() RETURNS event_trigger AS $$ +DECLARE + obj record; +BEGIN + FOR obj IN SELECT * FROM pg_event_trigger_dropped_objects() + LOOP + IF obj.object_type IN ( + 'schema' + , 'table' + , 'foreign table' + , 'view' + , 'materialized view' + , 'function' + , 'trigger' + , 'type' + , 'rule' + ) + AND obj.is_temporary IS false -- no pg_temp objects + THEN + NOTIFY pgrst, 'reload schema'; + END IF; + END LOOP; +END; $$ LANGUAGE plpgsql; + +DROP EVENT TRIGGER IF EXISTS pgrst_ddl_watch; +CREATE EVENT TRIGGER pgrst_ddl_watch + ON ddl_command_end + EXECUTE PROCEDURE extensions.pgrst_ddl_watch(); + +DROP EVENT TRIGGER IF EXISTS pgrst_drop_watch; +CREATE EVENT TRIGGER pgrst_drop_watch + ON sql_drop + EXECUTE PROCEDURE extensions.pgrst_drop_watch(); + + +-- migrate:down diff --git a/migrations/db/migrations/20220322085208_gotrue-session-limit.sql b/migrations/db/migrations/20220322085208_gotrue-session-limit.sql new file mode 100644 index 0000000..2b36901 --- /dev/null +++ b/migrations/db/migrations/20220322085208_gotrue-session-limit.sql @@ -0,0 +1,4 @@ +-- migrate:up +ALTER ROLE tealbase_auth_admin SET idle_in_transaction_session_timeout TO 60000; + +-- migrate:down diff --git a/migrations/db/migrations/20220404205710_pg_graphql-on-by-default.sql b/migrations/db/migrations/20220404205710_pg_graphql-on-by-default.sql new file mode 100644 index 0000000..452901e --- /dev/null +++ b/migrations/db/migrations/20220404205710_pg_graphql-on-by-default.sql @@ -0,0 +1,161 @@ +-- migrate:up + +-- Update Trigger upon enabling pg_graphql +create or replace function extensions.grant_pg_graphql_access() + returns event_trigger + language plpgsql +AS $func$ +DECLARE + func_is_graphql_resolve bool; +BEGIN + func_is_graphql_resolve = ( + SELECT n.proname = 'resolve' + FROM pg_event_trigger_ddl_commands() AS ev + LEFT JOIN pg_catalog.pg_proc AS n + ON ev.objid = n.oid + ); + + IF func_is_graphql_resolve + THEN + grant usage on schema graphql to postgres, anon, authenticated, service_role; + grant all on function graphql.resolve to postgres, anon, authenticated, service_role; + + alter default privileges in schema graphql grant all on tables to postgres, anon, authenticated, service_role; + alter default privileges in schema graphql grant all on functions to postgres, anon, authenticated, service_role; + alter default privileges in schema graphql grant all on sequences to postgres, anon, authenticated, service_role; + + -- Update public wrapper to pass all arguments through to the pg_graphql resolve func + DROP FUNCTION IF EXISTS graphql_public.graphql; + create or replace function graphql_public.graphql( + "operationName" text default null, + query text default null, + variables jsonb default null, + extensions jsonb default null + ) + returns jsonb + language sql + as $$ + -- This changed + select graphql.resolve( + query := query, + variables := coalesce(variables, '{}'), + "operationName" := "operationName", + extensions := extensions + ); + $$; + + grant execute on function graphql.resolve to postgres, anon, authenticated, service_role; + END IF; + +END; +$func$; + +CREATE OR REPLACE FUNCTION extensions.set_graphql_placeholder() +RETURNS event_trigger +LANGUAGE plpgsql +AS $func$ + DECLARE + graphql_is_dropped bool; + BEGIN + graphql_is_dropped = ( + SELECT ev.schema_name = 'graphql_public' + FROM pg_event_trigger_dropped_objects() AS ev + WHERE ev.schema_name = 'graphql_public' + ); + + IF graphql_is_dropped + THEN + create or replace function graphql_public.graphql( + "operationName" text default null, + query text default null, + variables jsonb default null, + extensions jsonb default null + ) + returns jsonb + language plpgsql + as $$ + DECLARE + server_version float; + BEGIN + server_version = (SELECT (SPLIT_PART((select version()), ' ', 2))::float); + + IF server_version >= 14 THEN + RETURN jsonb_build_object( + 'errors', jsonb_build_array( + jsonb_build_object( + 'message', 'pg_graphql extension is not enabled.' + ) + ) + ); + ELSE + RETURN jsonb_build_object( + 'errors', jsonb_build_array( + jsonb_build_object( + 'message', 'pg_graphql is only available on projects running Postgres 14 onwards.' + ) + ) + ); + END IF; + END; + $$; + END IF; + + END; +$func$; + +-- GraphQL Placeholder Entrypoint +create or replace function graphql_public.graphql( + "operationName" text default null, + query text default null, + variables jsonb default null, + extensions jsonb default null +) + returns jsonb + language plpgsql +as $$ + DECLARE + server_version float; + BEGIN + server_version = (SELECT (SPLIT_PART((select version()), ' ', 2))::float); + + IF server_version >= 14 THEN + RETURN jsonb_build_object( + 'errors', jsonb_build_array( + jsonb_build_object( + 'message', 'pg_graphql extension is not enabled.' + ) + ) + ); + ELSE + RETURN jsonb_build_object( + 'errors', jsonb_build_array( + jsonb_build_object( + 'message', 'pg_graphql is only available on projects running Postgres 14 onwards.' + ) + ) + ); + END IF; + END; +$$; + + +drop extension if exists pg_graphql; +-- Avoids limitation of only being able to load the extension via dashboard +-- Only install as well if the extension is actually installed +DO $$ +DECLARE + graphql_exists boolean; +BEGIN + graphql_exists = ( + select count(*) = 1 + from pg_available_extensions + where name = 'pg_graphql' + ); + + IF graphql_exists + THEN + create extension if not exists pg_graphql; + END IF; +END $$; + +-- migrate:down diff --git a/migrations/db/migrations/20220609081115_grant-tealbase-auth-admin-and-tealbase-storage-admin-to-postgres.sql b/migrations/db/migrations/20220609081115_grant-tealbase-auth-admin-and-tealbase-storage-admin-to-postgres.sql new file mode 100644 index 0000000..31e908a --- /dev/null +++ b/migrations/db/migrations/20220609081115_grant-tealbase-auth-admin-and-tealbase-storage-admin-to-postgres.sql @@ -0,0 +1,10 @@ +-- migrate:up + +-- This is done so that the `postgres` role can manage auth tables triggers, +-- storage tables policies, etc. which unblocks the revocation of superuser +-- access. +-- +-- More context: https://www.notion.so/tealbase/RFC-Postgres-Permissions-I-40cb4f61bd4145fd9e75ce657c0e31dd#bf5d853436384e6e8e339d0a2e684cbb +grant tealbase_auth_admin, tealbase_storage_admin to postgres; + +-- migrate:down diff --git a/migrations/db/migrations/20220613123923_pg_graphql-pg-dump-perms.sql b/migrations/db/migrations/20220613123923_pg_graphql-pg-dump-perms.sql new file mode 100644 index 0000000..915b1c0 --- /dev/null +++ b/migrations/db/migrations/20220613123923_pg_graphql-pg-dump-perms.sql @@ -0,0 +1,74 @@ +-- migrate:up + +create or replace function extensions.grant_pg_graphql_access() + returns event_trigger + language plpgsql +AS $func$ +DECLARE + func_is_graphql_resolve bool; +BEGIN + func_is_graphql_resolve = ( + SELECT n.proname = 'resolve' + FROM pg_event_trigger_ddl_commands() AS ev + LEFT JOIN pg_catalog.pg_proc AS n + ON ev.objid = n.oid + ); + + IF func_is_graphql_resolve + THEN + -- Update public wrapper to pass all arguments through to the pg_graphql resolve func + DROP FUNCTION IF EXISTS graphql_public.graphql; + create or replace function graphql_public.graphql( + "operationName" text default null, + query text default null, + variables jsonb default null, + extensions jsonb default null + ) + returns jsonb + language sql + as $$ + select graphql.resolve( + query := query, + variables := coalesce(variables, '{}'), + "operationName" := "operationName", + extensions := extensions + ); + $$; + + -- This hook executes when `graphql.resolve` is created. That is not necessarily the last + -- function in the extension so we need to grant permissions on existing entities AND + -- update default permissions to any others that are created after `graphql.resolve` + grant usage on schema graphql to postgres, anon, authenticated, service_role; + grant select on all tables in schema graphql to postgres, anon, authenticated, service_role; + grant execute on all functions in schema graphql to postgres, anon, authenticated, service_role; + grant all on all sequences in schema graphql to postgres, anon, authenticated, service_role; + alter default privileges in schema graphql grant all on tables to postgres, anon, authenticated, service_role; + alter default privileges in schema graphql grant all on functions to postgres, anon, authenticated, service_role; + alter default privileges in schema graphql grant all on sequences to postgres, anon, authenticated, service_role; + END IF; + +END; +$func$; + +-- Cycle the extension off and back on to apply the permissions update. + +drop extension if exists pg_graphql; +-- Avoids limitation of only being able to load the extension via dashboard +-- Only install as well if the extension is actually installed +DO $$ +DECLARE + graphql_exists boolean; +BEGIN + graphql_exists = ( + select count(*) = 1 + from pg_available_extensions + where name = 'pg_graphql' + ); + + IF graphql_exists + THEN + create extension if not exists pg_graphql; + END IF; +END $$; + +-- migrate:down diff --git a/migrations/db/migrations/20220713082019_pg_cron-pg_net-temp-perms-fix.sql b/migrations/db/migrations/20220713082019_pg_cron-pg_net-temp-perms-fix.sql new file mode 100644 index 0000000..be81fbe --- /dev/null +++ b/migrations/db/migrations/20220713082019_pg_cron-pg_net-temp-perms-fix.sql @@ -0,0 +1,74 @@ +-- migrate:up +DO $$ +DECLARE + pg_cron_installed boolean; +BEGIN + -- checks if pg_cron is enabled + pg_cron_installed = ( + select count(*) = 1 + from pg_available_extensions + where name = 'pg_cron' + and installed_version is not null + ); + + IF pg_cron_installed + THEN + grant usage on schema cron to postgres with grant option; + grant all on all functions in schema cron to postgres with grant option; + + alter default privileges in schema cron grant all on tables to postgres with grant option; + alter default privileges in schema cron grant all on functions to postgres with grant option; + alter default privileges in schema cron grant all on sequences to postgres with grant option; + + alter default privileges for user tealbase_admin in schema cron grant all + on sequences to postgres with grant option; + alter default privileges for user tealbase_admin in schema cron grant all + on tables to postgres with grant option; + alter default privileges for user tealbase_admin in schema cron grant all + on functions to postgres with grant option; + + grant all privileges on all tables in schema cron to postgres with grant option; + END IF; +END $$; + +DO $$ +DECLARE + pg_net_installed boolean; +BEGIN + -- checks if pg_net is enabled + pg_net_installed = ( + select count(*) = 1 + from pg_available_extensions + where name = 'pg_net' + and installed_version is not null + + ); + + IF pg_net_installed + THEN + IF NOT EXISTS ( + SELECT 1 + FROM pg_roles + WHERE rolname = 'tealbase_functions_admin' + ) + THEN + CREATE USER tealbase_functions_admin NOINHERIT CREATEROLE LOGIN NOREPLICATION; + END IF; + + GRANT USAGE ON SCHEMA net TO tealbase_functions_admin, postgres, anon, authenticated, service_role; + + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; + + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; + + REVOKE ALL ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; + REVOKE ALL ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; + + GRANT EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO tealbase_functions_admin, postgres, anon, authenticated, service_role; + GRANT EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO tealbase_functions_admin, postgres, anon, authenticated, service_role; + END IF; +END $$; + +-- migrate:down diff --git a/migrations/db/migrations/20221028101028_set_authenticator_timeout.sql b/migrations/db/migrations/20221028101028_set_authenticator_timeout.sql new file mode 100644 index 0000000..d38c2bf --- /dev/null +++ b/migrations/db/migrations/20221028101028_set_authenticator_timeout.sql @@ -0,0 +1,5 @@ +-- migrate:up +alter role authenticator set statement_timeout = '8s'; + +-- migrate:down + diff --git a/migrations/db/migrations/20221103090837_revoke_admin.sql b/migrations/db/migrations/20221103090837_revoke_admin.sql new file mode 100644 index 0000000..5565694 --- /dev/null +++ b/migrations/db/migrations/20221103090837_revoke_admin.sql @@ -0,0 +1,5 @@ +-- migrate:up +revoke tealbase_admin from authenticator; + +-- migrate:down + diff --git a/migrations/db/migrations/20221207154255_create_pgsodium_and_vault.sql b/migrations/db/migrations/20221207154255_create_pgsodium_and_vault.sql new file mode 100644 index 0000000..31ada68 --- /dev/null +++ b/migrations/db/migrations/20221207154255_create_pgsodium_and_vault.sql @@ -0,0 +1,53 @@ +-- migrate:up + +DO $$ +DECLARE + pgsodium_exists boolean; + vault_exists boolean; +BEGIN + IF EXISTS (SELECT FROM pg_available_extensions WHERE name = 'tealbase_vault' AND default_version != '0.2.8') THEN + CREATE EXTENSION IF NOT EXISTS tealbase_vault; + + -- for some reason extension custom scripts aren't run during AMI build, so + -- we manually run it here + grant usage on schema vault to postgres with grant option; + grant select, delete, truncate, references on vault.secrets, vault.decrypted_secrets to postgres with grant option; + grant execute on function vault.create_secret, vault.update_secret, vault._crypto_aead_det_decrypt to postgres with grant option; + grant usage on schema vault to service_role; + grant select, delete on vault.secrets, vault.decrypted_secrets to service_role; + grant execute on function vault.create_secret, vault.update_secret, vault._crypto_aead_det_decrypt to service_role; + ELSE + pgsodium_exists = ( + select count(*) = 1 + from pg_available_extensions + where name = 'pgsodium' + and default_version in ('3.1.6', '3.1.7', '3.1.8', '3.1.9') + ); + + vault_exists = ( + select count(*) = 1 + from pg_available_extensions + where name = 'tealbase_vault' + ); + + IF pgsodium_exists + THEN + create extension if not exists pgsodium; + + grant pgsodium_keyiduser to postgres with admin option; + grant pgsodium_keyholder to postgres with admin option; + grant pgsodium_keymaker to postgres with admin option; + + grant execute on function pgsodium.crypto_aead_det_decrypt(bytea, bytea, uuid, bytea) to service_role; + grant execute on function pgsodium.crypto_aead_det_encrypt(bytea, bytea, uuid, bytea) to service_role; + grant execute on function pgsodium.crypto_aead_det_keygen to service_role; + + IF vault_exists + THEN + create extension if not exists tealbase_vault; + END IF; + END IF; + END IF; +END $$; + +-- migrate:down diff --git a/migrations/db/migrations/20230201083204_grant_auth_roles_to_postgres.sql b/migrations/db/migrations/20230201083204_grant_auth_roles_to_postgres.sql new file mode 100644 index 0000000..f975813 --- /dev/null +++ b/migrations/db/migrations/20230201083204_grant_auth_roles_to_postgres.sql @@ -0,0 +1,5 @@ +-- migrate:up +grant anon, authenticated, service_role to postgres; + +-- migrate:down + diff --git a/migrations/db/migrations/20230224042246_grant_extensions_perms_for_postgres.sql b/migrations/db/migrations/20230224042246_grant_extensions_perms_for_postgres.sql new file mode 100644 index 0000000..f00fffe --- /dev/null +++ b/migrations/db/migrations/20230224042246_grant_extensions_perms_for_postgres.sql @@ -0,0 +1,10 @@ +-- migrate:up +grant all privileges on all tables in schema extensions to postgres with grant option; +grant all privileges on all routines in schema extensions to postgres with grant option; +grant all privileges on all sequences in schema extensions to postgres with grant option; +alter default privileges in schema extensions grant all on tables to postgres with grant option; +alter default privileges in schema extensions grant all on routines to postgres with grant option; +alter default privileges in schema extensions grant all on sequences to postgres with grant option; + +-- migrate:down + diff --git a/migrations/db/migrations/20230306081037_grant_pg_monitor_to_postgres.sql b/migrations/db/migrations/20230306081037_grant_pg_monitor_to_postgres.sql new file mode 100644 index 0000000..76c350e --- /dev/null +++ b/migrations/db/migrations/20230306081037_grant_pg_monitor_to_postgres.sql @@ -0,0 +1,5 @@ +-- migrate:up +grant pg_monitor to postgres; + +-- migrate:down + diff --git a/migrations/db/migrations/20230327032006_grant_auth_roles_to_tealbase_storage_admin.sql b/migrations/db/migrations/20230327032006_grant_auth_roles_to_tealbase_storage_admin.sql new file mode 100644 index 0000000..2a51a76 --- /dev/null +++ b/migrations/db/migrations/20230327032006_grant_auth_roles_to_tealbase_storage_admin.sql @@ -0,0 +1,4 @@ +-- migrate:up +grant anon, authenticated, service_role to tealbase_storage_admin; + +-- migrate:down diff --git a/migrations/db/migrations/20230529180330_alter_api_roles_for_inherit.sql b/migrations/db/migrations/20230529180330_alter_api_roles_for_inherit.sql new file mode 100644 index 0000000..d8291bb --- /dev/null +++ b/migrations/db/migrations/20230529180330_alter_api_roles_for_inherit.sql @@ -0,0 +1,15 @@ +-- migrate:up + +ALTER ROLE authenticated inherit; +ALTER ROLE anon inherit; +ALTER ROLE service_role inherit; + +DO $$ +BEGIN + IF EXISTS (SELECT FROM pg_roles WHERE rolname = 'pgsodium_keyholder') THEN + GRANT pgsodium_keyholder to service_role; + END IF; +END $$; + +-- migrate:down + diff --git a/migrations/db/migrations/20231013070755_grant_authenticator_to_tealbase_storage_admin.sql b/migrations/db/migrations/20231013070755_grant_authenticator_to_tealbase_storage_admin.sql new file mode 100644 index 0000000..dce6963 --- /dev/null +++ b/migrations/db/migrations/20231013070755_grant_authenticator_to_tealbase_storage_admin.sql @@ -0,0 +1,5 @@ +-- migrate:up +grant authenticator to tealbase_storage_admin; +revoke anon, authenticated, service_role from tealbase_storage_admin; + +-- migrate:down diff --git a/migrations/db/migrations/20231017062225_grant_pg_graphql_permissions_for_custom_roles.sql b/migrations/db/migrations/20231017062225_grant_pg_graphql_permissions_for_custom_roles.sql new file mode 100644 index 0000000..ca204bb --- /dev/null +++ b/migrations/db/migrations/20231017062225_grant_pg_graphql_permissions_for_custom_roles.sql @@ -0,0 +1,78 @@ +-- migrate:up + +create or replace function extensions.grant_pg_graphql_access() + returns event_trigger + language plpgsql +AS $func$ +DECLARE + func_is_graphql_resolve bool; +BEGIN + func_is_graphql_resolve = ( + SELECT n.proname = 'resolve' + FROM pg_event_trigger_ddl_commands() AS ev + LEFT JOIN pg_catalog.pg_proc AS n + ON ev.objid = n.oid + ); + + IF func_is_graphql_resolve + THEN + -- Update public wrapper to pass all arguments through to the pg_graphql resolve func + DROP FUNCTION IF EXISTS graphql_public.graphql; + create or replace function graphql_public.graphql( + "operationName" text default null, + query text default null, + variables jsonb default null, + extensions jsonb default null + ) + returns jsonb + language sql + as $$ + select graphql.resolve( + query := query, + variables := coalesce(variables, '{}'), + "operationName" := "operationName", + extensions := extensions + ); + $$; + + -- This hook executes when `graphql.resolve` is created. That is not necessarily the last + -- function in the extension so we need to grant permissions on existing entities AND + -- update default permissions to any others that are created after `graphql.resolve` + grant usage on schema graphql to postgres, anon, authenticated, service_role; + grant select on all tables in schema graphql to postgres, anon, authenticated, service_role; + grant execute on all functions in schema graphql to postgres, anon, authenticated, service_role; + grant all on all sequences in schema graphql to postgres, anon, authenticated, service_role; + alter default privileges in schema graphql grant all on tables to postgres, anon, authenticated, service_role; + alter default privileges in schema graphql grant all on functions to postgres, anon, authenticated, service_role; + alter default privileges in schema graphql grant all on sequences to postgres, anon, authenticated, service_role; + + -- Allow postgres role to allow granting usage on graphql and graphql_public schemas to custom roles + grant usage on schema graphql_public to postgres with grant option; + grant usage on schema graphql to postgres with grant option; + END IF; + +END; +$func$; + +-- Cycle the extension off and back on to apply the permissions update. + +drop extension if exists pg_graphql; +-- Avoids limitation of only being able to load the extension via dashboard +-- Only install as well if the extension is actually installed +DO $$ +DECLARE + graphql_exists boolean; +BEGIN + graphql_exists = ( + select count(*) = 1 + from pg_available_extensions + where name = 'pg_graphql' + ); + + IF graphql_exists + THEN + create extension if not exists pg_graphql; + END IF; +END $$; + +-- migrate:down diff --git a/migrations/db/migrations/20231020085357_revoke_writes_on_cron_job_from_postgres.sql b/migrations/db/migrations/20231020085357_revoke_writes_on_cron_job_from_postgres.sql new file mode 100644 index 0000000..482ea29 --- /dev/null +++ b/migrations/db/migrations/20231020085357_revoke_writes_on_cron_job_from_postgres.sql @@ -0,0 +1,48 @@ +-- migrate:up +do $$ +begin + if exists (select from pg_extension where extname = 'pg_cron') then + revoke all on table cron.job from postgres; + grant select on table cron.job to postgres with grant option; + end if; +end $$; + +CREATE OR REPLACE FUNCTION extensions.grant_pg_cron_access() RETURNS event_trigger + LANGUAGE plpgsql + AS $$ +BEGIN + IF EXISTS ( + SELECT + FROM pg_event_trigger_ddl_commands() AS ev + JOIN pg_extension AS ext + ON ev.objid = ext.oid + WHERE ext.extname = 'pg_cron' + ) + THEN + grant usage on schema cron to postgres with grant option; + + alter default privileges in schema cron grant all on tables to postgres with grant option; + alter default privileges in schema cron grant all on functions to postgres with grant option; + alter default privileges in schema cron grant all on sequences to postgres with grant option; + + alter default privileges for user tealbase_admin in schema cron grant all + on sequences to postgres with grant option; + alter default privileges for user tealbase_admin in schema cron grant all + on tables to postgres with grant option; + alter default privileges for user tealbase_admin in schema cron grant all + on functions to postgres with grant option; + + grant all privileges on all tables in schema cron to postgres with grant option; + revoke all on table cron.job from postgres; + grant select on table cron.job to postgres with grant option; + END IF; +END; +$$; + +drop event trigger if exists issue_pg_cron_access; +alter function extensions.grant_pg_cron_access owner to tealbase_admin; +CREATE EVENT TRIGGER issue_pg_cron_access ON ddl_command_end + WHEN TAG IN ('CREATE EXTENSION') + EXECUTE FUNCTION extensions.grant_pg_cron_access(); + +-- migrate:down diff --git a/migrations/db/migrations/20231130133139_set_lock_timeout_to_authenticator_role.sql b/migrations/db/migrations/20231130133139_set_lock_timeout_to_authenticator_role.sql new file mode 100644 index 0000000..a0cee20 --- /dev/null +++ b/migrations/db/migrations/20231130133139_set_lock_timeout_to_authenticator_role.sql @@ -0,0 +1,4 @@ +-- migrate:up +ALTER ROLE authenticator set lock_timeout to '8s'; + +-- migrate:down diff --git a/migrations/db/migrations/20240124080435_alter_lo_export_lo_import_owner.sql b/migrations/db/migrations/20240124080435_alter_lo_export_lo_import_owner.sql new file mode 100644 index 0000000..5f79fd0 --- /dev/null +++ b/migrations/db/migrations/20240124080435_alter_lo_export_lo_import_owner.sql @@ -0,0 +1,6 @@ +-- migrate:up +alter function pg_catalog.lo_export owner to tealbase_admin; +alter function pg_catalog.lo_import(text) owner to tealbase_admin; +alter function pg_catalog.lo_import(text, oid) owner to tealbase_admin; + +-- migrate:down diff --git a/migrations/db/migrations/20240606060239_grant_predefined_roles_to_postgres.sql b/migrations/db/migrations/20240606060239_grant_predefined_roles_to_postgres.sql new file mode 100644 index 0000000..324e124 --- /dev/null +++ b/migrations/db/migrations/20240606060239_grant_predefined_roles_to_postgres.sql @@ -0,0 +1,4 @@ +-- migrate:up +grant pg_read_all_data, pg_signal_backend to postgres; + +-- migrate:down diff --git a/migrations/db/migrations/20241031003909_create_orioledb.sql b/migrations/db/migrations/20241031003909_create_orioledb.sql new file mode 100644 index 0000000..694fbb9 --- /dev/null +++ b/migrations/db/migrations/20241031003909_create_orioledb.sql @@ -0,0 +1,11 @@ +-- migrate:up +do $$ +begin + if exists (select 1 from pg_available_extensions where name = 'orioledb') then + if not exists (select 1 from pg_extension where extname = 'orioledb') then + create extension if not exists orioledb; + end if; + end if; +end $$; + +-- migrate:down diff --git a/migrations/db/migrations/20241215003910_backfill_pgmq_metadata.sql b/migrations/db/migrations/20241215003910_backfill_pgmq_metadata.sql new file mode 100644 index 0000000..5785272 --- /dev/null +++ b/migrations/db/migrations/20241215003910_backfill_pgmq_metadata.sql @@ -0,0 +1,79 @@ +-- migrate:up +do $$ +begin + -- Check if the pgmq.meta table exists + if exists ( + select + 1 + from + pg_catalog.pg_class c + join pg_catalog.pg_namespace n + on c.relnamespace = n.oid + where + n.nspname = 'pgmq' + and c.relname = 'meta' + and c.relkind = 'r' -- regular table + -- Make sure only expected columns exist and are correctly named + and ( + select array_agg(attname::text order by attname) + from pg_catalog.pg_attribute a + where + a.attnum > 0 + and a.attrelid = c.oid + ) = array['created_at', 'is_partitioned', 'is_unlogged', 'queue_name']::text[] + ) then + -- Insert data into pgmq.meta for all tables matching the naming pattern 'pgmq.q_' + insert into pgmq.meta (queue_name, is_partitioned, is_unlogged, created_at) + select + substring(c.relname from 3) as queue_name, + false as is_partitioned, + case when c.relpersistence = 'u' then true else false end as is_unlogged, + now() as created_at + from + pg_catalog.pg_class c + join pg_catalog.pg_namespace n + on c.relnamespace = n.oid + where + n.nspname = 'pgmq' + and c.relname like 'q_%' + and c.relkind in ('r', 'p', 'u') + on conflict (queue_name) do nothing; + end if; +end $$; + +-- For logical backups we detach the queue and archive tables from the pgmq extension +-- prior to pausing. Once detached, pgmq.drop_queue breaks. This re-attaches them +-- when a project is unpaused and allows pgmq.drop_queue to work normally. +do $$ +declare + ext_exists boolean; + tbl record; +begin + -- check if pgmq extension is installed + select exists(select 1 from pg_extension where extname = 'pgmq') into ext_exists; + + if ext_exists then + for tbl in + select c.relname as table_name + from pg_class c + join pg_namespace n on c.relnamespace = n.oid + where n.nspname = 'pgmq' + and c.relkind in ('r', 'u') -- include ordinary and unlogged tables + and (c.relname like 'q\_%' or c.relname like 'a\_%') + and c.oid not in ( + select d.objid + from pg_depend d + join pg_extension e on d.refobjid = e.oid + where e.extname = 'pgmq' + and d.classid = 'pg_class'::regclass + and d.deptype = 'e' + ) + loop + execute format('alter extension pgmq add table pgmq.%I', tbl.table_name); + end loop; + end if; +end; +$$; + + +-- migrate:down diff --git a/migrations/db/migrations/20250205060043_disable_log_statement_on_internal_roles.sql b/migrations/db/migrations/20250205060043_disable_log_statement_on_internal_roles.sql new file mode 100644 index 0000000..2d7fdf6 --- /dev/null +++ b/migrations/db/migrations/20250205060043_disable_log_statement_on_internal_roles.sql @@ -0,0 +1,6 @@ +-- migrate:up +alter role tealbase_admin set log_statement = none; +alter role tealbase_auth_admin set log_statement = none; +alter role tealbase_storage_admin set log_statement = none; + +-- migrate:down diff --git a/migrations/db/migrations/20250205144616_move_orioledb_to_extensions_schema.sql b/migrations/db/migrations/20250205144616_move_orioledb_to_extensions_schema.sql new file mode 100644 index 0000000..259a6b0 --- /dev/null +++ b/migrations/db/migrations/20250205144616_move_orioledb_to_extensions_schema.sql @@ -0,0 +1,26 @@ +-- migrate:up +do $$ +declare + ext_schema text; + extensions_schema_exists boolean; +begin + -- check if the "extensions" schema exists + select exists ( + select 1 from pg_namespace where nspname = 'extensions' + ) into extensions_schema_exists; + + if extensions_schema_exists then + -- check if the "orioledb" extension is in the "public" schema + select nspname into ext_schema + from pg_extension e + join pg_namespace n on e.extnamespace = n.oid + where extname = 'orioledb'; + + if ext_schema = 'public' then + execute 'alter extension orioledb set schema extensions'; + end if; + end if; +end $$; + +-- migrate:down + diff --git a/migrations/db/migrations/20250218031949_pgsodium_mask_role.sql b/migrations/db/migrations/20250218031949_pgsodium_mask_role.sql new file mode 100644 index 0000000..f44fa98 --- /dev/null +++ b/migrations/db/migrations/20250218031949_pgsodium_mask_role.sql @@ -0,0 +1,31 @@ +-- migrate:up + +DO $$ +BEGIN + IF EXISTS (SELECT FROM pg_extension WHERE extname = 'pgsodium') THEN + CREATE OR REPLACE FUNCTION pgsodium.mask_role(masked_role regrole, source_name text, view_name text) + RETURNS void + LANGUAGE plpgsql + SECURITY DEFINER + SET search_path TO '' + AS $function$ + BEGIN + EXECUTE format( + 'GRANT SELECT ON pgsodium.key TO %s', + masked_role); + + EXECUTE format( + 'GRANT pgsodium_keyiduser, pgsodium_keyholder TO %s', + masked_role); + + EXECUTE format( + 'GRANT ALL ON %I TO %s', + view_name, + masked_role); + RETURN; + END + $function$; + END IF; +END $$; + +-- migrate:down diff --git a/migrations/db/migrations/20250220051611_pg_net_perms_fix.sql b/migrations/db/migrations/20250220051611_pg_net_perms_fix.sql new file mode 100644 index 0000000..f914765 --- /dev/null +++ b/migrations/db/migrations/20250220051611_pg_net_perms_fix.sql @@ -0,0 +1,64 @@ +-- migrate:up +CREATE OR REPLACE FUNCTION extensions.grant_pg_net_access() +RETURNS event_trigger +LANGUAGE plpgsql +AS $$ +BEGIN + IF EXISTS ( + SELECT 1 + FROM pg_event_trigger_ddl_commands() AS ev + JOIN pg_extension AS ext + ON ev.objid = ext.oid + WHERE ext.extname = 'pg_net' + ) + THEN + IF NOT EXISTS ( + SELECT 1 + FROM pg_roles + WHERE rolname = 'tealbase_functions_admin' + ) + THEN + CREATE USER tealbase_functions_admin NOINHERIT CREATEROLE LOGIN NOREPLICATION; + END IF; + + GRANT USAGE ON SCHEMA net TO tealbase_functions_admin, postgres, anon, authenticated, service_role; + + IF EXISTS ( + SELECT FROM pg_extension + WHERE extname = 'pg_net' + -- all versions in use on existing projects as of 2025-02-20 + -- version 0.12.0 onwards don't need these applied + AND extversion IN ('0.2', '0.6', '0.7', '0.7.1', '0.8', '0.10.0', '0.11.0') + ) THEN + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; + + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; + + REVOKE ALL ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; + REVOKE ALL ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; + + GRANT EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO tealbase_functions_admin, postgres, anon, authenticated, service_role; + GRANT EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO tealbase_functions_admin, postgres, anon, authenticated, service_role; + END IF; + END IF; +END; +$$; + +DO $$ +BEGIN + IF EXISTS (SELECT FROM pg_extension WHERE extname = 'pg_net') + THEN + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY INVOKER; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY INVOKER; + + REVOKE EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) FROM tealbase_functions_admin, postgres, anon, authenticated, service_role; + REVOKE EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) FROM tealbase_functions_admin, postgres, anon, authenticated, service_role; + + GRANT ALL ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO PUBLIC; + GRANT ALL ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO PUBLIC; + END IF; +END $$; + +-- migrate:down diff --git a/migrations/db/migrations/20250312095419_pgbouncer_ownership.sql b/migrations/db/migrations/20250312095419_pgbouncer_ownership.sql new file mode 100644 index 0000000..b4c5b8a --- /dev/null +++ b/migrations/db/migrations/20250312095419_pgbouncer_ownership.sql @@ -0,0 +1,5 @@ +-- migrate:up +alter function pgbouncer.get_auth owner to tealbase_admin; +grant execute on function pgbouncer.get_auth(p_usename text) to postgres; + +-- migrate:down diff --git a/migrations/db/migrations/20250402065937_alter_internal_event_triggers_owner_to_tealbase_admin.sql b/migrations/db/migrations/20250402065937_alter_internal_event_triggers_owner_to_tealbase_admin.sql new file mode 100644 index 0000000..3ca18e3 --- /dev/null +++ b/migrations/db/migrations/20250402065937_alter_internal_event_triggers_owner_to_tealbase_admin.sql @@ -0,0 +1,10 @@ +-- migrate:up +drop event trigger if exists issue_pg_net_access; + +alter function extensions.grant_pg_net_access owner to tealbase_admin; + +CREATE EVENT TRIGGER issue_pg_net_access ON ddl_command_end + WHEN TAG IN ('CREATE EXTENSION') + EXECUTE FUNCTION extensions.grant_pg_net_access(); + +-- migrate:down diff --git a/migrations/db/migrations/20250402093753_grant_subscription_to_postgres_16_and_above.sql b/migrations/db/migrations/20250402093753_grant_subscription_to_postgres_16_and_above.sql new file mode 100644 index 0000000..9e9d881 --- /dev/null +++ b/migrations/db/migrations/20250402093753_grant_subscription_to_postgres_16_and_above.sql @@ -0,0 +1,13 @@ +-- migrate:up +DO $$ +DECLARE + major_version INT; +BEGIN + SELECT current_setting('server_version_num')::INT / 10000 INTO major_version; + + IF major_version >= 16 THEN + GRANT pg_create_subscription TO postgres; + END IF; +END $$; + +-- migrate:down diff --git a/migrations/db/migrations/20250417190610_update_pgbouncer_get_auth.sql b/migrations/db/migrations/20250417190610_update_pgbouncer_get_auth.sql new file mode 100644 index 0000000..5e6e6a5 --- /dev/null +++ b/migrations/db/migrations/20250417190610_update_pgbouncer_get_auth.sql @@ -0,0 +1,24 @@ +-- migrate:up + +create or replace function pgbouncer.get_auth(p_usename text) returns table (username text, password text) + language plpgsql security definer + as $$ +begin + raise debug 'PgBouncer auth request: %', p_usename; + + return query + select + rolname::text, + case when rolvaliduntil < now() + then null + else rolpassword::text + end + from pg_authid + where rolname=$1 and rolcanlogin; +end; +$$; + +-- from migrations/db/migrations/20250312095419_pgbouncer_ownership.sql +grant execute on function pgbouncer.get_auth(p_usename text) to postgres; + +-- migrate:down diff --git a/migrations/db/migrations/20250421084701_revoke_admin_roles_from_postgres.sql b/migrations/db/migrations/20250421084701_revoke_admin_roles_from_postgres.sql new file mode 100644 index 0000000..dbd2216 --- /dev/null +++ b/migrations/db/migrations/20250421084701_revoke_admin_roles_from_postgres.sql @@ -0,0 +1,10 @@ +-- migrate:up +revoke tealbase_storage_admin from postgres; +revoke create on schema storage from postgres; +revoke all on storage.migrations from anon, authenticated, service_role, postgres; + +revoke tealbase_auth_admin from postgres; +revoke create on schema auth from postgres; +revoke all on auth.schema_migrations from dashboard_user, postgres; + +-- migrate:down diff --git a/migrations/db/migrations/20250605172253_grant_with_admin_to_postgres_16_and_above.sql b/migrations/db/migrations/20250605172253_grant_with_admin_to_postgres_16_and_above.sql new file mode 100644 index 0000000..5f2cd57 --- /dev/null +++ b/migrations/db/migrations/20250605172253_grant_with_admin_to_postgres_16_and_above.sql @@ -0,0 +1,13 @@ +-- migrate:up +DO $$ +DECLARE + major_version INT; +BEGIN + SELECT current_setting('server_version_num')::INT / 10000 INTO major_version; + + IF major_version >= 16 THEN + GRANT anon, authenticated, service_role, authenticator, pg_monitor, pg_read_all_data, pg_signal_backend TO postgres WITH ADMIN OPTION; + END IF; +END $$; + +-- migrate:down diff --git a/migrations/db/migrations/20250623125453_tmp_grant_storage_tables_to_postgres_with_grant_option.sql b/migrations/db/migrations/20250623125453_tmp_grant_storage_tables_to_postgres_with_grant_option.sql new file mode 100644 index 0000000..465aee2 --- /dev/null +++ b/migrations/db/migrations/20250623125453_tmp_grant_storage_tables_to_postgres_with_grant_option.sql @@ -0,0 +1,6 @@ +-- migrate:up +-- TODO: remove this migration once STORAGE-211 is completed +-- DRI: bobbie +grant all on storage.buckets, storage.objects to postgres with grant option; + +-- migrate:down diff --git a/migrations/db/migrations/20250709135250_grant_storage_schema_to_postgres_with_grant_option.sql b/migrations/db/migrations/20250709135250_grant_storage_schema_to_postgres_with_grant_option.sql new file mode 100644 index 0000000..604ff99 --- /dev/null +++ b/migrations/db/migrations/20250709135250_grant_storage_schema_to_postgres_with_grant_option.sql @@ -0,0 +1,4 @@ +-- migrate:up +grant usage on schema storage to postgres with grant option; + +-- migrate:down diff --git a/migrations/db/migrations/20250710151649_tealbase_read_only_user_default_transaction_read_only.sql b/migrations/db/migrations/20250710151649_tealbase_read_only_user_default_transaction_read_only.sql new file mode 100644 index 0000000..18a387e --- /dev/null +++ b/migrations/db/migrations/20250710151649_tealbase_read_only_user_default_transaction_read_only.sql @@ -0,0 +1,4 @@ +-- migrate:up +alter role tealbase_read_only_user set default_transaction_read_only = on; + +-- migrate:down diff --git a/migrations/docker-compose.yaml b/migrations/docker-compose.yaml new file mode 100644 index 0000000..fc80b8c --- /dev/null +++ b/migrations/docker-compose.yaml @@ -0,0 +1,46 @@ +# Usage +# Start: docker-compose up +# Stop: docker-compose down -v + +version: "3.8" + +services: + db: + image: tealbase_postgres + restart: "no" + healthcheck: + test: pg_isready -U postgres -h localhost + interval: 2s + timeout: 2s + retries: 10 + environment: + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + + pg_prove: + image: horrendo/pg_prove + depends_on: + db: + condition: service_healthy + dbmate: + condition: service_completed_successfully + environment: + PGHOST: db + PGUSER: tealbase_admin + PGDATABASE: postgres + PGPASSWORD: ${POSTGRES_PASSWORD} + volumes: + - ./tests:/tests + command: pg_prove /tests/test.sql + + dbmate: + build: + context: . + dockerfile: Dockerfile.dbmate + depends_on: + db: + condition: service_healthy + volumes: + - ./schema.sql:/db/schema.sql + environment: + DATABASE_URL: postgres://postgres:${POSTGRES_PASSWORD}@db/postgres?sslmode=disable + command: dump diff --git a/migrations/schema-15.sql b/migrations/schema-15.sql new file mode 100644 index 0000000..1aeadc2 --- /dev/null +++ b/migrations/schema-15.sql @@ -0,0 +1,999 @@ +-- +-- PostgreSQL database dump +-- + +-- Dumped from database version 15.8 +-- Dumped by pg_dump version 15.8 + +SET statement_timeout = 0; +SET lock_timeout = 0; +SET idle_in_transaction_session_timeout = 0; +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = on; +SELECT pg_catalog.set_config('search_path', '', false); +SET check_function_bodies = false; +SET xmloption = content; +SET client_min_messages = warning; +SET row_security = off; + +-- +-- Name: auth; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA auth; + + +-- +-- Name: extensions; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA extensions; + + +-- +-- Name: graphql; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA graphql; + + +-- +-- Name: graphql_public; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA graphql_public; + + +-- +-- Name: pgbouncer; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA pgbouncer; + + +-- +-- Name: realtime; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA realtime; + + +-- +-- Name: storage; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA storage; + + +-- +-- Name: vault; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA vault; + + +-- +-- Name: pg_graphql; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS pg_graphql WITH SCHEMA graphql; + + +-- +-- Name: EXTENSION pg_graphql; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION pg_graphql IS 'pg_graphql: GraphQL support'; + + +-- +-- Name: pg_stat_statements; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS pg_stat_statements WITH SCHEMA extensions; + + +-- +-- Name: EXTENSION pg_stat_statements; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION pg_stat_statements IS 'track planning and execution statistics of all SQL statements executed'; + + +-- +-- Name: pgcrypto; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS pgcrypto WITH SCHEMA extensions; + + +-- +-- Name: EXTENSION pgcrypto; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION pgcrypto IS 'cryptographic functions'; + + +-- +-- Name: tealbase_vault; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS tealbase_vault WITH SCHEMA vault; + + +-- +-- Name: EXTENSION tealbase_vault; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION tealbase_vault IS 'tealbase Vault Extension'; + + +-- +-- Name: uuid-ossp; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS "uuid-ossp" WITH SCHEMA extensions; + + +-- +-- Name: EXTENSION "uuid-ossp"; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION "uuid-ossp" IS 'generate universally unique identifiers (UUIDs)'; + + +-- +-- Name: email(); Type: FUNCTION; Schema: auth; Owner: - +-- + +CREATE FUNCTION auth.email() RETURNS text + LANGUAGE sql STABLE + AS $$ + select nullif(current_setting('request.jwt.claim.email', true), '')::text; +$$; + + +-- +-- Name: role(); Type: FUNCTION; Schema: auth; Owner: - +-- + +CREATE FUNCTION auth.role() RETURNS text + LANGUAGE sql STABLE + AS $$ + select nullif(current_setting('request.jwt.claim.role', true), '')::text; +$$; + + +-- +-- Name: uid(); Type: FUNCTION; Schema: auth; Owner: - +-- + +CREATE FUNCTION auth.uid() RETURNS uuid + LANGUAGE sql STABLE + AS $$ + select nullif(current_setting('request.jwt.claim.sub', true), '')::uuid; +$$; + + +-- +-- Name: grant_pg_cron_access(); Type: FUNCTION; Schema: extensions; Owner: - +-- + +CREATE FUNCTION extensions.grant_pg_cron_access() RETURNS event_trigger + LANGUAGE plpgsql + AS $$ +BEGIN + IF EXISTS ( + SELECT + FROM pg_event_trigger_ddl_commands() AS ev + JOIN pg_extension AS ext + ON ev.objid = ext.oid + WHERE ext.extname = 'pg_cron' + ) + THEN + grant usage on schema cron to postgres with grant option; + + alter default privileges in schema cron grant all on tables to postgres with grant option; + alter default privileges in schema cron grant all on functions to postgres with grant option; + alter default privileges in schema cron grant all on sequences to postgres with grant option; + + alter default privileges for user tealbase_admin in schema cron grant all + on sequences to postgres with grant option; + alter default privileges for user tealbase_admin in schema cron grant all + on tables to postgres with grant option; + alter default privileges for user tealbase_admin in schema cron grant all + on functions to postgres with grant option; + + grant all privileges on all tables in schema cron to postgres with grant option; + revoke all on table cron.job from postgres; + grant select on table cron.job to postgres with grant option; + END IF; +END; +$$; + + +-- +-- Name: FUNCTION grant_pg_cron_access(); Type: COMMENT; Schema: extensions; Owner: - +-- + +COMMENT ON FUNCTION extensions.grant_pg_cron_access() IS 'Grants access to pg_cron'; + + +-- +-- Name: grant_pg_graphql_access(); Type: FUNCTION; Schema: extensions; Owner: - +-- + +CREATE FUNCTION extensions.grant_pg_graphql_access() RETURNS event_trigger + LANGUAGE plpgsql + AS $_$ +DECLARE + func_is_graphql_resolve bool; +BEGIN + func_is_graphql_resolve = ( + SELECT n.proname = 'resolve' + FROM pg_event_trigger_ddl_commands() AS ev + LEFT JOIN pg_catalog.pg_proc AS n + ON ev.objid = n.oid + ); + + IF func_is_graphql_resolve + THEN + -- Update public wrapper to pass all arguments through to the pg_graphql resolve func + DROP FUNCTION IF EXISTS graphql_public.graphql; + create or replace function graphql_public.graphql( + "operationName" text default null, + query text default null, + variables jsonb default null, + extensions jsonb default null + ) + returns jsonb + language sql + as $$ + select graphql.resolve( + query := query, + variables := coalesce(variables, '{}'), + "operationName" := "operationName", + extensions := extensions + ); + $$; + + -- This hook executes when `graphql.resolve` is created. That is not necessarily the last + -- function in the extension so we need to grant permissions on existing entities AND + -- update default permissions to any others that are created after `graphql.resolve` + grant usage on schema graphql to postgres, anon, authenticated, service_role; + grant select on all tables in schema graphql to postgres, anon, authenticated, service_role; + grant execute on all functions in schema graphql to postgres, anon, authenticated, service_role; + grant all on all sequences in schema graphql to postgres, anon, authenticated, service_role; + alter default privileges in schema graphql grant all on tables to postgres, anon, authenticated, service_role; + alter default privileges in schema graphql grant all on functions to postgres, anon, authenticated, service_role; + alter default privileges in schema graphql grant all on sequences to postgres, anon, authenticated, service_role; + + -- Allow postgres role to allow granting usage on graphql and graphql_public schemas to custom roles + grant usage on schema graphql_public to postgres with grant option; + grant usage on schema graphql to postgres with grant option; + END IF; + +END; +$_$; + + +-- +-- Name: FUNCTION grant_pg_graphql_access(); Type: COMMENT; Schema: extensions; Owner: - +-- + +COMMENT ON FUNCTION extensions.grant_pg_graphql_access() IS 'Grants access to pg_graphql'; + + +-- +-- Name: grant_pg_net_access(); Type: FUNCTION; Schema: extensions; Owner: - +-- + +CREATE FUNCTION extensions.grant_pg_net_access() RETURNS event_trigger + LANGUAGE plpgsql + AS $$ +BEGIN + IF EXISTS ( + SELECT 1 + FROM pg_event_trigger_ddl_commands() AS ev + JOIN pg_extension AS ext + ON ev.objid = ext.oid + WHERE ext.extname = 'pg_net' + ) + THEN + IF NOT EXISTS ( + SELECT 1 + FROM pg_roles + WHERE rolname = 'tealbase_functions_admin' + ) + THEN + CREATE USER tealbase_functions_admin NOINHERIT CREATEROLE LOGIN NOREPLICATION; + END IF; + + GRANT USAGE ON SCHEMA net TO tealbase_functions_admin, postgres, anon, authenticated, service_role; + + IF EXISTS ( + SELECT FROM pg_extension + WHERE extname = 'pg_net' + -- all versions in use on existing projects as of 2025-02-20 + -- version 0.12.0 onwards don't need these applied + AND extversion IN ('0.2', '0.6', '0.7', '0.7.1', '0.8', '0.10.0', '0.11.0') + ) THEN + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; + + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; + + REVOKE ALL ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; + REVOKE ALL ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; + + GRANT EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO tealbase_functions_admin, postgres, anon, authenticated, service_role; + GRANT EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO tealbase_functions_admin, postgres, anon, authenticated, service_role; + END IF; + END IF; +END; +$$; + + +-- +-- Name: FUNCTION grant_pg_net_access(); Type: COMMENT; Schema: extensions; Owner: - +-- + +COMMENT ON FUNCTION extensions.grant_pg_net_access() IS 'Grants access to pg_net'; + + +-- +-- Name: pgrst_ddl_watch(); Type: FUNCTION; Schema: extensions; Owner: - +-- + +CREATE FUNCTION extensions.pgrst_ddl_watch() RETURNS event_trigger + LANGUAGE plpgsql + AS $$ +DECLARE + cmd record; +BEGIN + FOR cmd IN SELECT * FROM pg_event_trigger_ddl_commands() + LOOP + IF cmd.command_tag IN ( + 'CREATE SCHEMA', 'ALTER SCHEMA' + , 'CREATE TABLE', 'CREATE TABLE AS', 'SELECT INTO', 'ALTER TABLE' + , 'CREATE FOREIGN TABLE', 'ALTER FOREIGN TABLE' + , 'CREATE VIEW', 'ALTER VIEW' + , 'CREATE MATERIALIZED VIEW', 'ALTER MATERIALIZED VIEW' + , 'CREATE FUNCTION', 'ALTER FUNCTION' + , 'CREATE TRIGGER' + , 'CREATE TYPE', 'ALTER TYPE' + , 'CREATE RULE' + , 'COMMENT' + ) + -- don't notify in case of CREATE TEMP table or other objects created on pg_temp + AND cmd.schema_name is distinct from 'pg_temp' + THEN + NOTIFY pgrst, 'reload schema'; + END IF; + END LOOP; +END; $$; + + +-- +-- Name: pgrst_drop_watch(); Type: FUNCTION; Schema: extensions; Owner: - +-- + +CREATE FUNCTION extensions.pgrst_drop_watch() RETURNS event_trigger + LANGUAGE plpgsql + AS $$ +DECLARE + obj record; +BEGIN + FOR obj IN SELECT * FROM pg_event_trigger_dropped_objects() + LOOP + IF obj.object_type IN ( + 'schema' + , 'table' + , 'foreign table' + , 'view' + , 'materialized view' + , 'function' + , 'trigger' + , 'type' + , 'rule' + ) + AND obj.is_temporary IS false -- no pg_temp objects + THEN + NOTIFY pgrst, 'reload schema'; + END IF; + END LOOP; +END; $$; + + +-- +-- Name: set_graphql_placeholder(); Type: FUNCTION; Schema: extensions; Owner: - +-- + +CREATE FUNCTION extensions.set_graphql_placeholder() RETURNS event_trigger + LANGUAGE plpgsql + AS $_$ + DECLARE + graphql_is_dropped bool; + BEGIN + graphql_is_dropped = ( + SELECT ev.schema_name = 'graphql_public' + FROM pg_event_trigger_dropped_objects() AS ev + WHERE ev.schema_name = 'graphql_public' + ); + + IF graphql_is_dropped + THEN + create or replace function graphql_public.graphql( + "operationName" text default null, + query text default null, + variables jsonb default null, + extensions jsonb default null + ) + returns jsonb + language plpgsql + as $$ + DECLARE + server_version float; + BEGIN + server_version = (SELECT (SPLIT_PART((select version()), ' ', 2))::float); + + IF server_version >= 14 THEN + RETURN jsonb_build_object( + 'errors', jsonb_build_array( + jsonb_build_object( + 'message', 'pg_graphql extension is not enabled.' + ) + ) + ); + ELSE + RETURN jsonb_build_object( + 'errors', jsonb_build_array( + jsonb_build_object( + 'message', 'pg_graphql is only available on projects running Postgres 14 onwards.' + ) + ) + ); + END IF; + END; + $$; + END IF; + + END; +$_$; + + +-- +-- Name: FUNCTION set_graphql_placeholder(); Type: COMMENT; Schema: extensions; Owner: - +-- + +COMMENT ON FUNCTION extensions.set_graphql_placeholder() IS 'Reintroduces placeholder function for graphql_public.graphql'; + + +-- +-- Name: get_auth(text); Type: FUNCTION; Schema: pgbouncer; Owner: - +-- + +CREATE FUNCTION pgbouncer.get_auth(p_usename text) RETURNS TABLE(username text, password text) + LANGUAGE plpgsql SECURITY DEFINER + AS $_$ +begin + raise debug 'PgBouncer auth request: %', p_usename; + + return query + select + rolname::text, + case when rolvaliduntil < now() + then null + else rolpassword::text + end + from pg_authid + where rolname=$1 and rolcanlogin; +end; +$_$; + + +-- +-- Name: extension(text); Type: FUNCTION; Schema: storage; Owner: - +-- + +CREATE FUNCTION storage.extension(name text) RETURNS text + LANGUAGE plpgsql + AS $$ +DECLARE +_parts text[]; +_filename text; +BEGIN + select string_to_array(name, '/') into _parts; + select _parts[array_length(_parts,1)] into _filename; + -- @todo return the last part instead of 2 + return split_part(_filename, '.', 2); +END +$$; + + +-- +-- Name: filename(text); Type: FUNCTION; Schema: storage; Owner: - +-- + +CREATE FUNCTION storage.filename(name text) RETURNS text + LANGUAGE plpgsql + AS $$ +DECLARE +_parts text[]; +BEGIN + select string_to_array(name, '/') into _parts; + return _parts[array_length(_parts,1)]; +END +$$; + + +-- +-- Name: foldername(text); Type: FUNCTION; Schema: storage; Owner: - +-- + +CREATE FUNCTION storage.foldername(name text) RETURNS text[] + LANGUAGE plpgsql + AS $$ +DECLARE +_parts text[]; +BEGIN + select string_to_array(name, '/') into _parts; + return _parts[1:array_length(_parts,1)-1]; +END +$$; + + +-- +-- Name: search(text, text, integer, integer, integer); Type: FUNCTION; Schema: storage; Owner: - +-- + +CREATE FUNCTION storage.search(prefix text, bucketname text, limits integer DEFAULT 100, levels integer DEFAULT 1, offsets integer DEFAULT 0) RETURNS TABLE(name text, id uuid, updated_at timestamp with time zone, created_at timestamp with time zone, last_accessed_at timestamp with time zone, metadata jsonb) + LANGUAGE plpgsql + AS $$ +DECLARE +_bucketId text; +BEGIN + -- will be replaced by migrations when server starts + -- saving space for cloud-init +END +$$; + + +SET default_tablespace = ''; + +SET default_table_access_method = heap; + +-- +-- Name: audit_log_entries; Type: TABLE; Schema: auth; Owner: - +-- + +CREATE TABLE auth.audit_log_entries ( + instance_id uuid, + id uuid NOT NULL, + payload json, + created_at timestamp with time zone +); + + +-- +-- Name: TABLE audit_log_entries; Type: COMMENT; Schema: auth; Owner: - +-- + +COMMENT ON TABLE auth.audit_log_entries IS 'Auth: Audit trail for user actions.'; + + +-- +-- Name: instances; Type: TABLE; Schema: auth; Owner: - +-- + +CREATE TABLE auth.instances ( + id uuid NOT NULL, + uuid uuid, + raw_base_config text, + created_at timestamp with time zone, + updated_at timestamp with time zone +); + + +-- +-- Name: TABLE instances; Type: COMMENT; Schema: auth; Owner: - +-- + +COMMENT ON TABLE auth.instances IS 'Auth: Manages users across multiple sites.'; + + +-- +-- Name: refresh_tokens; Type: TABLE; Schema: auth; Owner: - +-- + +CREATE TABLE auth.refresh_tokens ( + instance_id uuid, + id bigint NOT NULL, + token character varying(255), + user_id character varying(255), + revoked boolean, + created_at timestamp with time zone, + updated_at timestamp with time zone +); + + +-- +-- Name: TABLE refresh_tokens; Type: COMMENT; Schema: auth; Owner: - +-- + +COMMENT ON TABLE auth.refresh_tokens IS 'Auth: Store of tokens used to refresh JWT tokens once they expire.'; + + +-- +-- Name: refresh_tokens_id_seq; Type: SEQUENCE; Schema: auth; Owner: - +-- + +CREATE SEQUENCE auth.refresh_tokens_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: refresh_tokens_id_seq; Type: SEQUENCE OWNED BY; Schema: auth; Owner: - +-- + +ALTER SEQUENCE auth.refresh_tokens_id_seq OWNED BY auth.refresh_tokens.id; + + +-- +-- Name: schema_migrations; Type: TABLE; Schema: auth; Owner: - +-- + +CREATE TABLE auth.schema_migrations ( + version character varying(255) NOT NULL +); + + +-- +-- Name: TABLE schema_migrations; Type: COMMENT; Schema: auth; Owner: - +-- + +COMMENT ON TABLE auth.schema_migrations IS 'Auth: Manages updates to the auth system.'; + + +-- +-- Name: users; Type: TABLE; Schema: auth; Owner: - +-- + +CREATE TABLE auth.users ( + instance_id uuid, + id uuid NOT NULL, + aud character varying(255), + role character varying(255), + email character varying(255), + encrypted_password character varying(255), + confirmed_at timestamp with time zone, + invited_at timestamp with time zone, + confirmation_token character varying(255), + confirmation_sent_at timestamp with time zone, + recovery_token character varying(255), + recovery_sent_at timestamp with time zone, + email_change_token character varying(255), + email_change character varying(255), + email_change_sent_at timestamp with time zone, + last_sign_in_at timestamp with time zone, + raw_app_meta_data jsonb, + raw_user_meta_data jsonb, + is_super_admin boolean, + created_at timestamp with time zone, + updated_at timestamp with time zone +); + + +-- +-- Name: TABLE users; Type: COMMENT; Schema: auth; Owner: - +-- + +COMMENT ON TABLE auth.users IS 'Auth: Stores user login data within a secure schema.'; + + +-- +-- Name: schema_migrations; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.schema_migrations ( + version character varying(128) NOT NULL +); + + +-- +-- Name: buckets; Type: TABLE; Schema: storage; Owner: - +-- + +CREATE TABLE storage.buckets ( + id text NOT NULL, + name text NOT NULL, + owner uuid, + created_at timestamp with time zone DEFAULT now(), + updated_at timestamp with time zone DEFAULT now() +); + + +-- +-- Name: migrations; Type: TABLE; Schema: storage; Owner: - +-- + +CREATE TABLE storage.migrations ( + id integer NOT NULL, + name character varying(100) NOT NULL, + hash character varying(40) NOT NULL, + executed_at timestamp without time zone DEFAULT CURRENT_TIMESTAMP +); + + +-- +-- Name: objects; Type: TABLE; Schema: storage; Owner: - +-- + +CREATE TABLE storage.objects ( + id uuid DEFAULT extensions.uuid_generate_v4() NOT NULL, + bucket_id text, + name text, + owner uuid, + created_at timestamp with time zone DEFAULT now(), + updated_at timestamp with time zone DEFAULT now(), + last_accessed_at timestamp with time zone DEFAULT now(), + metadata jsonb +); + + +-- +-- Name: refresh_tokens id; Type: DEFAULT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.refresh_tokens ALTER COLUMN id SET DEFAULT nextval('auth.refresh_tokens_id_seq'::regclass); + + +-- +-- Name: audit_log_entries audit_log_entries_pkey; Type: CONSTRAINT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.audit_log_entries + ADD CONSTRAINT audit_log_entries_pkey PRIMARY KEY (id); + + +-- +-- Name: instances instances_pkey; Type: CONSTRAINT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.instances + ADD CONSTRAINT instances_pkey PRIMARY KEY (id); + + +-- +-- Name: refresh_tokens refresh_tokens_pkey; Type: CONSTRAINT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.refresh_tokens + ADD CONSTRAINT refresh_tokens_pkey PRIMARY KEY (id); + + +-- +-- Name: schema_migrations schema_migrations_pkey; Type: CONSTRAINT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.schema_migrations + ADD CONSTRAINT schema_migrations_pkey PRIMARY KEY (version); + + +-- +-- Name: users users_email_key; Type: CONSTRAINT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.users + ADD CONSTRAINT users_email_key UNIQUE (email); + + +-- +-- Name: users users_pkey; Type: CONSTRAINT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.users + ADD CONSTRAINT users_pkey PRIMARY KEY (id); + + +-- +-- Name: schema_migrations schema_migrations_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.schema_migrations + ADD CONSTRAINT schema_migrations_pkey PRIMARY KEY (version); + + +-- +-- Name: buckets buckets_pkey; Type: CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.buckets + ADD CONSTRAINT buckets_pkey PRIMARY KEY (id); + + +-- +-- Name: migrations migrations_name_key; Type: CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.migrations + ADD CONSTRAINT migrations_name_key UNIQUE (name); + + +-- +-- Name: migrations migrations_pkey; Type: CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.migrations + ADD CONSTRAINT migrations_pkey PRIMARY KEY (id); + + +-- +-- Name: objects objects_pkey; Type: CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.objects + ADD CONSTRAINT objects_pkey PRIMARY KEY (id); + + +-- +-- Name: audit_logs_instance_id_idx; Type: INDEX; Schema: auth; Owner: - +-- + +CREATE INDEX audit_logs_instance_id_idx ON auth.audit_log_entries USING btree (instance_id); + + +-- +-- Name: refresh_tokens_instance_id_idx; Type: INDEX; Schema: auth; Owner: - +-- + +CREATE INDEX refresh_tokens_instance_id_idx ON auth.refresh_tokens USING btree (instance_id); + + +-- +-- Name: refresh_tokens_instance_id_user_id_idx; Type: INDEX; Schema: auth; Owner: - +-- + +CREATE INDEX refresh_tokens_instance_id_user_id_idx ON auth.refresh_tokens USING btree (instance_id, user_id); + + +-- +-- Name: refresh_tokens_token_idx; Type: INDEX; Schema: auth; Owner: - +-- + +CREATE INDEX refresh_tokens_token_idx ON auth.refresh_tokens USING btree (token); + + +-- +-- Name: users_instance_id_email_idx; Type: INDEX; Schema: auth; Owner: - +-- + +CREATE INDEX users_instance_id_email_idx ON auth.users USING btree (instance_id, email); + + +-- +-- Name: users_instance_id_idx; Type: INDEX; Schema: auth; Owner: - +-- + +CREATE INDEX users_instance_id_idx ON auth.users USING btree (instance_id); + + +-- +-- Name: bname; Type: INDEX; Schema: storage; Owner: - +-- + +CREATE UNIQUE INDEX bname ON storage.buckets USING btree (name); + + +-- +-- Name: bucketid_objname; Type: INDEX; Schema: storage; Owner: - +-- + +CREATE UNIQUE INDEX bucketid_objname ON storage.objects USING btree (bucket_id, name); + + +-- +-- Name: name_prefix_search; Type: INDEX; Schema: storage; Owner: - +-- + +CREATE INDEX name_prefix_search ON storage.objects USING btree (name text_pattern_ops); + + +-- +-- Name: buckets buckets_owner_fkey; Type: FK CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.buckets + ADD CONSTRAINT buckets_owner_fkey FOREIGN KEY (owner) REFERENCES auth.users(id); + + +-- +-- Name: objects objects_bucketId_fkey; Type: FK CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.objects + ADD CONSTRAINT "objects_bucketId_fkey" FOREIGN KEY (bucket_id) REFERENCES storage.buckets(id); + + +-- +-- Name: objects objects_owner_fkey; Type: FK CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.objects + ADD CONSTRAINT objects_owner_fkey FOREIGN KEY (owner) REFERENCES auth.users(id); + + +-- +-- Name: objects; Type: ROW SECURITY; Schema: storage; Owner: - +-- + +ALTER TABLE storage.objects ENABLE ROW LEVEL SECURITY; + +-- +-- Name: tealbase_realtime; Type: PUBLICATION; Schema: -; Owner: - +-- + +CREATE PUBLICATION tealbase_realtime WITH (publish = 'insert, update, delete, truncate'); + + +-- +-- Name: issue_graphql_placeholder; Type: EVENT TRIGGER; Schema: -; Owner: - +-- + +CREATE EVENT TRIGGER issue_graphql_placeholder ON sql_drop + WHEN TAG IN ('DROP EXTENSION') + EXECUTE FUNCTION extensions.set_graphql_placeholder(); + + +-- +-- Name: issue_pg_cron_access; Type: EVENT TRIGGER; Schema: -; Owner: - +-- + +CREATE EVENT TRIGGER issue_pg_cron_access ON ddl_command_end + WHEN TAG IN ('CREATE EXTENSION') + EXECUTE FUNCTION extensions.grant_pg_cron_access(); + + +-- +-- Name: issue_pg_graphql_access; Type: EVENT TRIGGER; Schema: -; Owner: - +-- + +CREATE EVENT TRIGGER issue_pg_graphql_access ON ddl_command_end + WHEN TAG IN ('CREATE FUNCTION') + EXECUTE FUNCTION extensions.grant_pg_graphql_access(); + + +-- +-- Name: issue_pg_net_access; Type: EVENT TRIGGER; Schema: -; Owner: - +-- + +CREATE EVENT TRIGGER issue_pg_net_access ON ddl_command_end + WHEN TAG IN ('CREATE EXTENSION') + EXECUTE FUNCTION extensions.grant_pg_net_access(); + + +-- +-- Name: pgrst_ddl_watch; Type: EVENT TRIGGER; Schema: -; Owner: - +-- + +CREATE EVENT TRIGGER pgrst_ddl_watch ON ddl_command_end + EXECUTE FUNCTION extensions.pgrst_ddl_watch(); + + +-- +-- Name: pgrst_drop_watch; Type: EVENT TRIGGER; Schema: -; Owner: - +-- + +CREATE EVENT TRIGGER pgrst_drop_watch ON sql_drop + EXECUTE FUNCTION extensions.pgrst_drop_watch(); + + +-- +-- PostgreSQL database dump complete +-- + diff --git a/migrations/schema-17.sql b/migrations/schema-17.sql new file mode 100644 index 0000000..7fca1d7 --- /dev/null +++ b/migrations/schema-17.sql @@ -0,0 +1,1000 @@ +-- +-- PostgreSQL database dump +-- + +-- Dumped from database version 17.4 +-- Dumped by pg_dump version 17.4 + +SET statement_timeout = 0; +SET lock_timeout = 0; +SET idle_in_transaction_session_timeout = 0; +SET transaction_timeout = 0; +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = on; +SELECT pg_catalog.set_config('search_path', '', false); +SET check_function_bodies = false; +SET xmloption = content; +SET client_min_messages = warning; +SET row_security = off; + +-- +-- Name: auth; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA auth; + + +-- +-- Name: extensions; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA extensions; + + +-- +-- Name: graphql; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA graphql; + + +-- +-- Name: graphql_public; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA graphql_public; + + +-- +-- Name: pgbouncer; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA pgbouncer; + + +-- +-- Name: realtime; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA realtime; + + +-- +-- Name: storage; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA storage; + + +-- +-- Name: vault; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA vault; + + +-- +-- Name: pg_graphql; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS pg_graphql WITH SCHEMA graphql; + + +-- +-- Name: EXTENSION pg_graphql; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION pg_graphql IS 'pg_graphql: GraphQL support'; + + +-- +-- Name: pg_stat_statements; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS pg_stat_statements WITH SCHEMA extensions; + + +-- +-- Name: EXTENSION pg_stat_statements; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION pg_stat_statements IS 'track planning and execution statistics of all SQL statements executed'; + + +-- +-- Name: pgcrypto; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS pgcrypto WITH SCHEMA extensions; + + +-- +-- Name: EXTENSION pgcrypto; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION pgcrypto IS 'cryptographic functions'; + + +-- +-- Name: tealbase_vault; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS tealbase_vault WITH SCHEMA vault; + + +-- +-- Name: EXTENSION tealbase_vault; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION tealbase_vault IS 'tealbase Vault Extension'; + + +-- +-- Name: uuid-ossp; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS "uuid-ossp" WITH SCHEMA extensions; + + +-- +-- Name: EXTENSION "uuid-ossp"; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION "uuid-ossp" IS 'generate universally unique identifiers (UUIDs)'; + + +-- +-- Name: email(); Type: FUNCTION; Schema: auth; Owner: - +-- + +CREATE FUNCTION auth.email() RETURNS text + LANGUAGE sql STABLE + AS $$ + select nullif(current_setting('request.jwt.claim.email', true), '')::text; +$$; + + +-- +-- Name: role(); Type: FUNCTION; Schema: auth; Owner: - +-- + +CREATE FUNCTION auth.role() RETURNS text + LANGUAGE sql STABLE + AS $$ + select nullif(current_setting('request.jwt.claim.role', true), '')::text; +$$; + + +-- +-- Name: uid(); Type: FUNCTION; Schema: auth; Owner: - +-- + +CREATE FUNCTION auth.uid() RETURNS uuid + LANGUAGE sql STABLE + AS $$ + select nullif(current_setting('request.jwt.claim.sub', true), '')::uuid; +$$; + + +-- +-- Name: grant_pg_cron_access(); Type: FUNCTION; Schema: extensions; Owner: - +-- + +CREATE FUNCTION extensions.grant_pg_cron_access() RETURNS event_trigger + LANGUAGE plpgsql + AS $$ +BEGIN + IF EXISTS ( + SELECT + FROM pg_event_trigger_ddl_commands() AS ev + JOIN pg_extension AS ext + ON ev.objid = ext.oid + WHERE ext.extname = 'pg_cron' + ) + THEN + grant usage on schema cron to postgres with grant option; + + alter default privileges in schema cron grant all on tables to postgres with grant option; + alter default privileges in schema cron grant all on functions to postgres with grant option; + alter default privileges in schema cron grant all on sequences to postgres with grant option; + + alter default privileges for user tealbase_admin in schema cron grant all + on sequences to postgres with grant option; + alter default privileges for user tealbase_admin in schema cron grant all + on tables to postgres with grant option; + alter default privileges for user tealbase_admin in schema cron grant all + on functions to postgres with grant option; + + grant all privileges on all tables in schema cron to postgres with grant option; + revoke all on table cron.job from postgres; + grant select on table cron.job to postgres with grant option; + END IF; +END; +$$; + + +-- +-- Name: FUNCTION grant_pg_cron_access(); Type: COMMENT; Schema: extensions; Owner: - +-- + +COMMENT ON FUNCTION extensions.grant_pg_cron_access() IS 'Grants access to pg_cron'; + + +-- +-- Name: grant_pg_graphql_access(); Type: FUNCTION; Schema: extensions; Owner: - +-- + +CREATE FUNCTION extensions.grant_pg_graphql_access() RETURNS event_trigger + LANGUAGE plpgsql + AS $_$ +DECLARE + func_is_graphql_resolve bool; +BEGIN + func_is_graphql_resolve = ( + SELECT n.proname = 'resolve' + FROM pg_event_trigger_ddl_commands() AS ev + LEFT JOIN pg_catalog.pg_proc AS n + ON ev.objid = n.oid + ); + + IF func_is_graphql_resolve + THEN + -- Update public wrapper to pass all arguments through to the pg_graphql resolve func + DROP FUNCTION IF EXISTS graphql_public.graphql; + create or replace function graphql_public.graphql( + "operationName" text default null, + query text default null, + variables jsonb default null, + extensions jsonb default null + ) + returns jsonb + language sql + as $$ + select graphql.resolve( + query := query, + variables := coalesce(variables, '{}'), + "operationName" := "operationName", + extensions := extensions + ); + $$; + + -- This hook executes when `graphql.resolve` is created. That is not necessarily the last + -- function in the extension so we need to grant permissions on existing entities AND + -- update default permissions to any others that are created after `graphql.resolve` + grant usage on schema graphql to postgres, anon, authenticated, service_role; + grant select on all tables in schema graphql to postgres, anon, authenticated, service_role; + grant execute on all functions in schema graphql to postgres, anon, authenticated, service_role; + grant all on all sequences in schema graphql to postgres, anon, authenticated, service_role; + alter default privileges in schema graphql grant all on tables to postgres, anon, authenticated, service_role; + alter default privileges in schema graphql grant all on functions to postgres, anon, authenticated, service_role; + alter default privileges in schema graphql grant all on sequences to postgres, anon, authenticated, service_role; + + -- Allow postgres role to allow granting usage on graphql and graphql_public schemas to custom roles + grant usage on schema graphql_public to postgres with grant option; + grant usage on schema graphql to postgres with grant option; + END IF; + +END; +$_$; + + +-- +-- Name: FUNCTION grant_pg_graphql_access(); Type: COMMENT; Schema: extensions; Owner: - +-- + +COMMENT ON FUNCTION extensions.grant_pg_graphql_access() IS 'Grants access to pg_graphql'; + + +-- +-- Name: grant_pg_net_access(); Type: FUNCTION; Schema: extensions; Owner: - +-- + +CREATE FUNCTION extensions.grant_pg_net_access() RETURNS event_trigger + LANGUAGE plpgsql + AS $$ +BEGIN + IF EXISTS ( + SELECT 1 + FROM pg_event_trigger_ddl_commands() AS ev + JOIN pg_extension AS ext + ON ev.objid = ext.oid + WHERE ext.extname = 'pg_net' + ) + THEN + IF NOT EXISTS ( + SELECT 1 + FROM pg_roles + WHERE rolname = 'tealbase_functions_admin' + ) + THEN + CREATE USER tealbase_functions_admin NOINHERIT CREATEROLE LOGIN NOREPLICATION; + END IF; + + GRANT USAGE ON SCHEMA net TO tealbase_functions_admin, postgres, anon, authenticated, service_role; + + IF EXISTS ( + SELECT FROM pg_extension + WHERE extname = 'pg_net' + -- all versions in use on existing projects as of 2025-02-20 + -- version 0.12.0 onwards don't need these applied + AND extversion IN ('0.2', '0.6', '0.7', '0.7.1', '0.8', '0.10.0', '0.11.0') + ) THEN + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; + + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; + + REVOKE ALL ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; + REVOKE ALL ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; + + GRANT EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO tealbase_functions_admin, postgres, anon, authenticated, service_role; + GRANT EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO tealbase_functions_admin, postgres, anon, authenticated, service_role; + END IF; + END IF; +END; +$$; + + +-- +-- Name: FUNCTION grant_pg_net_access(); Type: COMMENT; Schema: extensions; Owner: - +-- + +COMMENT ON FUNCTION extensions.grant_pg_net_access() IS 'Grants access to pg_net'; + + +-- +-- Name: pgrst_ddl_watch(); Type: FUNCTION; Schema: extensions; Owner: - +-- + +CREATE FUNCTION extensions.pgrst_ddl_watch() RETURNS event_trigger + LANGUAGE plpgsql + AS $$ +DECLARE + cmd record; +BEGIN + FOR cmd IN SELECT * FROM pg_event_trigger_ddl_commands() + LOOP + IF cmd.command_tag IN ( + 'CREATE SCHEMA', 'ALTER SCHEMA' + , 'CREATE TABLE', 'CREATE TABLE AS', 'SELECT INTO', 'ALTER TABLE' + , 'CREATE FOREIGN TABLE', 'ALTER FOREIGN TABLE' + , 'CREATE VIEW', 'ALTER VIEW' + , 'CREATE MATERIALIZED VIEW', 'ALTER MATERIALIZED VIEW' + , 'CREATE FUNCTION', 'ALTER FUNCTION' + , 'CREATE TRIGGER' + , 'CREATE TYPE', 'ALTER TYPE' + , 'CREATE RULE' + , 'COMMENT' + ) + -- don't notify in case of CREATE TEMP table or other objects created on pg_temp + AND cmd.schema_name is distinct from 'pg_temp' + THEN + NOTIFY pgrst, 'reload schema'; + END IF; + END LOOP; +END; $$; + + +-- +-- Name: pgrst_drop_watch(); Type: FUNCTION; Schema: extensions; Owner: - +-- + +CREATE FUNCTION extensions.pgrst_drop_watch() RETURNS event_trigger + LANGUAGE plpgsql + AS $$ +DECLARE + obj record; +BEGIN + FOR obj IN SELECT * FROM pg_event_trigger_dropped_objects() + LOOP + IF obj.object_type IN ( + 'schema' + , 'table' + , 'foreign table' + , 'view' + , 'materialized view' + , 'function' + , 'trigger' + , 'type' + , 'rule' + ) + AND obj.is_temporary IS false -- no pg_temp objects + THEN + NOTIFY pgrst, 'reload schema'; + END IF; + END LOOP; +END; $$; + + +-- +-- Name: set_graphql_placeholder(); Type: FUNCTION; Schema: extensions; Owner: - +-- + +CREATE FUNCTION extensions.set_graphql_placeholder() RETURNS event_trigger + LANGUAGE plpgsql + AS $_$ + DECLARE + graphql_is_dropped bool; + BEGIN + graphql_is_dropped = ( + SELECT ev.schema_name = 'graphql_public' + FROM pg_event_trigger_dropped_objects() AS ev + WHERE ev.schema_name = 'graphql_public' + ); + + IF graphql_is_dropped + THEN + create or replace function graphql_public.graphql( + "operationName" text default null, + query text default null, + variables jsonb default null, + extensions jsonb default null + ) + returns jsonb + language plpgsql + as $$ + DECLARE + server_version float; + BEGIN + server_version = (SELECT (SPLIT_PART((select version()), ' ', 2))::float); + + IF server_version >= 14 THEN + RETURN jsonb_build_object( + 'errors', jsonb_build_array( + jsonb_build_object( + 'message', 'pg_graphql extension is not enabled.' + ) + ) + ); + ELSE + RETURN jsonb_build_object( + 'errors', jsonb_build_array( + jsonb_build_object( + 'message', 'pg_graphql is only available on projects running Postgres 14 onwards.' + ) + ) + ); + END IF; + END; + $$; + END IF; + + END; +$_$; + + +-- +-- Name: FUNCTION set_graphql_placeholder(); Type: COMMENT; Schema: extensions; Owner: - +-- + +COMMENT ON FUNCTION extensions.set_graphql_placeholder() IS 'Reintroduces placeholder function for graphql_public.graphql'; + + +-- +-- Name: get_auth(text); Type: FUNCTION; Schema: pgbouncer; Owner: - +-- + +CREATE FUNCTION pgbouncer.get_auth(p_usename text) RETURNS TABLE(username text, password text) + LANGUAGE plpgsql SECURITY DEFINER + AS $_$ +begin + raise debug 'PgBouncer auth request: %', p_usename; + + return query + select + rolname::text, + case when rolvaliduntil < now() + then null + else rolpassword::text + end + from pg_authid + where rolname=$1 and rolcanlogin; +end; +$_$; + + +-- +-- Name: extension(text); Type: FUNCTION; Schema: storage; Owner: - +-- + +CREATE FUNCTION storage.extension(name text) RETURNS text + LANGUAGE plpgsql + AS $$ +DECLARE +_parts text[]; +_filename text; +BEGIN + select string_to_array(name, '/') into _parts; + select _parts[array_length(_parts,1)] into _filename; + -- @todo return the last part instead of 2 + return split_part(_filename, '.', 2); +END +$$; + + +-- +-- Name: filename(text); Type: FUNCTION; Schema: storage; Owner: - +-- + +CREATE FUNCTION storage.filename(name text) RETURNS text + LANGUAGE plpgsql + AS $$ +DECLARE +_parts text[]; +BEGIN + select string_to_array(name, '/') into _parts; + return _parts[array_length(_parts,1)]; +END +$$; + + +-- +-- Name: foldername(text); Type: FUNCTION; Schema: storage; Owner: - +-- + +CREATE FUNCTION storage.foldername(name text) RETURNS text[] + LANGUAGE plpgsql + AS $$ +DECLARE +_parts text[]; +BEGIN + select string_to_array(name, '/') into _parts; + return _parts[1:array_length(_parts,1)-1]; +END +$$; + + +-- +-- Name: search(text, text, integer, integer, integer); Type: FUNCTION; Schema: storage; Owner: - +-- + +CREATE FUNCTION storage.search(prefix text, bucketname text, limits integer DEFAULT 100, levels integer DEFAULT 1, offsets integer DEFAULT 0) RETURNS TABLE(name text, id uuid, updated_at timestamp with time zone, created_at timestamp with time zone, last_accessed_at timestamp with time zone, metadata jsonb) + LANGUAGE plpgsql + AS $$ +DECLARE +_bucketId text; +BEGIN + -- will be replaced by migrations when server starts + -- saving space for cloud-init +END +$$; + + +SET default_tablespace = ''; + +SET default_table_access_method = heap; + +-- +-- Name: audit_log_entries; Type: TABLE; Schema: auth; Owner: - +-- + +CREATE TABLE auth.audit_log_entries ( + instance_id uuid, + id uuid NOT NULL, + payload json, + created_at timestamp with time zone +); + + +-- +-- Name: TABLE audit_log_entries; Type: COMMENT; Schema: auth; Owner: - +-- + +COMMENT ON TABLE auth.audit_log_entries IS 'Auth: Audit trail for user actions.'; + + +-- +-- Name: instances; Type: TABLE; Schema: auth; Owner: - +-- + +CREATE TABLE auth.instances ( + id uuid NOT NULL, + uuid uuid, + raw_base_config text, + created_at timestamp with time zone, + updated_at timestamp with time zone +); + + +-- +-- Name: TABLE instances; Type: COMMENT; Schema: auth; Owner: - +-- + +COMMENT ON TABLE auth.instances IS 'Auth: Manages users across multiple sites.'; + + +-- +-- Name: refresh_tokens; Type: TABLE; Schema: auth; Owner: - +-- + +CREATE TABLE auth.refresh_tokens ( + instance_id uuid, + id bigint NOT NULL, + token character varying(255), + user_id character varying(255), + revoked boolean, + created_at timestamp with time zone, + updated_at timestamp with time zone +); + + +-- +-- Name: TABLE refresh_tokens; Type: COMMENT; Schema: auth; Owner: - +-- + +COMMENT ON TABLE auth.refresh_tokens IS 'Auth: Store of tokens used to refresh JWT tokens once they expire.'; + + +-- +-- Name: refresh_tokens_id_seq; Type: SEQUENCE; Schema: auth; Owner: - +-- + +CREATE SEQUENCE auth.refresh_tokens_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: refresh_tokens_id_seq; Type: SEQUENCE OWNED BY; Schema: auth; Owner: - +-- + +ALTER SEQUENCE auth.refresh_tokens_id_seq OWNED BY auth.refresh_tokens.id; + + +-- +-- Name: schema_migrations; Type: TABLE; Schema: auth; Owner: - +-- + +CREATE TABLE auth.schema_migrations ( + version character varying(255) NOT NULL +); + + +-- +-- Name: TABLE schema_migrations; Type: COMMENT; Schema: auth; Owner: - +-- + +COMMENT ON TABLE auth.schema_migrations IS 'Auth: Manages updates to the auth system.'; + + +-- +-- Name: users; Type: TABLE; Schema: auth; Owner: - +-- + +CREATE TABLE auth.users ( + instance_id uuid, + id uuid NOT NULL, + aud character varying(255), + role character varying(255), + email character varying(255), + encrypted_password character varying(255), + confirmed_at timestamp with time zone, + invited_at timestamp with time zone, + confirmation_token character varying(255), + confirmation_sent_at timestamp with time zone, + recovery_token character varying(255), + recovery_sent_at timestamp with time zone, + email_change_token character varying(255), + email_change character varying(255), + email_change_sent_at timestamp with time zone, + last_sign_in_at timestamp with time zone, + raw_app_meta_data jsonb, + raw_user_meta_data jsonb, + is_super_admin boolean, + created_at timestamp with time zone, + updated_at timestamp with time zone +); + + +-- +-- Name: TABLE users; Type: COMMENT; Schema: auth; Owner: - +-- + +COMMENT ON TABLE auth.users IS 'Auth: Stores user login data within a secure schema.'; + + +-- +-- Name: schema_migrations; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.schema_migrations ( + version character varying(128) NOT NULL +); + + +-- +-- Name: buckets; Type: TABLE; Schema: storage; Owner: - +-- + +CREATE TABLE storage.buckets ( + id text NOT NULL, + name text NOT NULL, + owner uuid, + created_at timestamp with time zone DEFAULT now(), + updated_at timestamp with time zone DEFAULT now() +); + + +-- +-- Name: migrations; Type: TABLE; Schema: storage; Owner: - +-- + +CREATE TABLE storage.migrations ( + id integer NOT NULL, + name character varying(100) NOT NULL, + hash character varying(40) NOT NULL, + executed_at timestamp without time zone DEFAULT CURRENT_TIMESTAMP +); + + +-- +-- Name: objects; Type: TABLE; Schema: storage; Owner: - +-- + +CREATE TABLE storage.objects ( + id uuid DEFAULT extensions.uuid_generate_v4() NOT NULL, + bucket_id text, + name text, + owner uuid, + created_at timestamp with time zone DEFAULT now(), + updated_at timestamp with time zone DEFAULT now(), + last_accessed_at timestamp with time zone DEFAULT now(), + metadata jsonb +); + + +-- +-- Name: refresh_tokens id; Type: DEFAULT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.refresh_tokens ALTER COLUMN id SET DEFAULT nextval('auth.refresh_tokens_id_seq'::regclass); + + +-- +-- Name: audit_log_entries audit_log_entries_pkey; Type: CONSTRAINT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.audit_log_entries + ADD CONSTRAINT audit_log_entries_pkey PRIMARY KEY (id); + + +-- +-- Name: instances instances_pkey; Type: CONSTRAINT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.instances + ADD CONSTRAINT instances_pkey PRIMARY KEY (id); + + +-- +-- Name: refresh_tokens refresh_tokens_pkey; Type: CONSTRAINT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.refresh_tokens + ADD CONSTRAINT refresh_tokens_pkey PRIMARY KEY (id); + + +-- +-- Name: schema_migrations schema_migrations_pkey; Type: CONSTRAINT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.schema_migrations + ADD CONSTRAINT schema_migrations_pkey PRIMARY KEY (version); + + +-- +-- Name: users users_email_key; Type: CONSTRAINT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.users + ADD CONSTRAINT users_email_key UNIQUE (email); + + +-- +-- Name: users users_pkey; Type: CONSTRAINT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.users + ADD CONSTRAINT users_pkey PRIMARY KEY (id); + + +-- +-- Name: schema_migrations schema_migrations_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.schema_migrations + ADD CONSTRAINT schema_migrations_pkey PRIMARY KEY (version); + + +-- +-- Name: buckets buckets_pkey; Type: CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.buckets + ADD CONSTRAINT buckets_pkey PRIMARY KEY (id); + + +-- +-- Name: migrations migrations_name_key; Type: CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.migrations + ADD CONSTRAINT migrations_name_key UNIQUE (name); + + +-- +-- Name: migrations migrations_pkey; Type: CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.migrations + ADD CONSTRAINT migrations_pkey PRIMARY KEY (id); + + +-- +-- Name: objects objects_pkey; Type: CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.objects + ADD CONSTRAINT objects_pkey PRIMARY KEY (id); + + +-- +-- Name: audit_logs_instance_id_idx; Type: INDEX; Schema: auth; Owner: - +-- + +CREATE INDEX audit_logs_instance_id_idx ON auth.audit_log_entries USING btree (instance_id); + + +-- +-- Name: refresh_tokens_instance_id_idx; Type: INDEX; Schema: auth; Owner: - +-- + +CREATE INDEX refresh_tokens_instance_id_idx ON auth.refresh_tokens USING btree (instance_id); + + +-- +-- Name: refresh_tokens_instance_id_user_id_idx; Type: INDEX; Schema: auth; Owner: - +-- + +CREATE INDEX refresh_tokens_instance_id_user_id_idx ON auth.refresh_tokens USING btree (instance_id, user_id); + + +-- +-- Name: refresh_tokens_token_idx; Type: INDEX; Schema: auth; Owner: - +-- + +CREATE INDEX refresh_tokens_token_idx ON auth.refresh_tokens USING btree (token); + + +-- +-- Name: users_instance_id_email_idx; Type: INDEX; Schema: auth; Owner: - +-- + +CREATE INDEX users_instance_id_email_idx ON auth.users USING btree (instance_id, email); + + +-- +-- Name: users_instance_id_idx; Type: INDEX; Schema: auth; Owner: - +-- + +CREATE INDEX users_instance_id_idx ON auth.users USING btree (instance_id); + + +-- +-- Name: bname; Type: INDEX; Schema: storage; Owner: - +-- + +CREATE UNIQUE INDEX bname ON storage.buckets USING btree (name); + + +-- +-- Name: bucketid_objname; Type: INDEX; Schema: storage; Owner: - +-- + +CREATE UNIQUE INDEX bucketid_objname ON storage.objects USING btree (bucket_id, name); + + +-- +-- Name: name_prefix_search; Type: INDEX; Schema: storage; Owner: - +-- + +CREATE INDEX name_prefix_search ON storage.objects USING btree (name text_pattern_ops); + + +-- +-- Name: buckets buckets_owner_fkey; Type: FK CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.buckets + ADD CONSTRAINT buckets_owner_fkey FOREIGN KEY (owner) REFERENCES auth.users(id); + + +-- +-- Name: objects objects_bucketId_fkey; Type: FK CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.objects + ADD CONSTRAINT "objects_bucketId_fkey" FOREIGN KEY (bucket_id) REFERENCES storage.buckets(id); + + +-- +-- Name: objects objects_owner_fkey; Type: FK CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.objects + ADD CONSTRAINT objects_owner_fkey FOREIGN KEY (owner) REFERENCES auth.users(id); + + +-- +-- Name: objects; Type: ROW SECURITY; Schema: storage; Owner: - +-- + +ALTER TABLE storage.objects ENABLE ROW LEVEL SECURITY; + +-- +-- Name: tealbase_realtime; Type: PUBLICATION; Schema: -; Owner: - +-- + +CREATE PUBLICATION tealbase_realtime WITH (publish = 'insert, update, delete, truncate'); + + +-- +-- Name: issue_graphql_placeholder; Type: EVENT TRIGGER; Schema: -; Owner: - +-- + +CREATE EVENT TRIGGER issue_graphql_placeholder ON sql_drop + WHEN TAG IN ('DROP EXTENSION') + EXECUTE FUNCTION extensions.set_graphql_placeholder(); + + +-- +-- Name: issue_pg_cron_access; Type: EVENT TRIGGER; Schema: -; Owner: - +-- + +CREATE EVENT TRIGGER issue_pg_cron_access ON ddl_command_end + WHEN TAG IN ('CREATE EXTENSION') + EXECUTE FUNCTION extensions.grant_pg_cron_access(); + + +-- +-- Name: issue_pg_graphql_access; Type: EVENT TRIGGER; Schema: -; Owner: - +-- + +CREATE EVENT TRIGGER issue_pg_graphql_access ON ddl_command_end + WHEN TAG IN ('CREATE FUNCTION') + EXECUTE FUNCTION extensions.grant_pg_graphql_access(); + + +-- +-- Name: issue_pg_net_access; Type: EVENT TRIGGER; Schema: -; Owner: - +-- + +CREATE EVENT TRIGGER issue_pg_net_access ON ddl_command_end + WHEN TAG IN ('CREATE EXTENSION') + EXECUTE FUNCTION extensions.grant_pg_net_access(); + + +-- +-- Name: pgrst_ddl_watch; Type: EVENT TRIGGER; Schema: -; Owner: - +-- + +CREATE EVENT TRIGGER pgrst_ddl_watch ON ddl_command_end + EXECUTE FUNCTION extensions.pgrst_ddl_watch(); + + +-- +-- Name: pgrst_drop_watch; Type: EVENT TRIGGER; Schema: -; Owner: - +-- + +CREATE EVENT TRIGGER pgrst_drop_watch ON sql_drop + EXECUTE FUNCTION extensions.pgrst_drop_watch(); + + +-- +-- PostgreSQL database dump complete +-- + diff --git a/migrations/schema-orioledb-17.sql b/migrations/schema-orioledb-17.sql new file mode 100644 index 0000000..66e74ca --- /dev/null +++ b/migrations/schema-orioledb-17.sql @@ -0,0 +1,1014 @@ +-- +-- PostgreSQL database dump +-- + +-- Dumped from database version 17.5 +-- Dumped by pg_dump version 17.5 + +SET statement_timeout = 0; +SET lock_timeout = 0; +SET idle_in_transaction_session_timeout = 0; +SET transaction_timeout = 0; +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = on; +SELECT pg_catalog.set_config('search_path', '', false); +SET check_function_bodies = false; +SET xmloption = content; +SET client_min_messages = warning; +SET row_security = off; + +-- +-- Name: auth; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA auth; + + +-- +-- Name: extensions; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA extensions; + + +-- +-- Name: graphql; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA graphql; + + +-- +-- Name: graphql_public; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA graphql_public; + + +-- +-- Name: pgbouncer; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA pgbouncer; + + +-- +-- Name: realtime; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA realtime; + + +-- +-- Name: storage; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA storage; + + +-- +-- Name: vault; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA vault; + + +-- +-- Name: orioledb; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS orioledb WITH SCHEMA extensions; + + +-- +-- Name: EXTENSION orioledb; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION orioledb IS 'OrioleDB -- the next generation transactional engine'; + + +-- +-- Name: pg_graphql; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS pg_graphql WITH SCHEMA graphql; + + +-- +-- Name: EXTENSION pg_graphql; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION pg_graphql IS 'pg_graphql: GraphQL support'; + + +-- +-- Name: pg_stat_statements; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS pg_stat_statements WITH SCHEMA extensions; + + +-- +-- Name: EXTENSION pg_stat_statements; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION pg_stat_statements IS 'track planning and execution statistics of all SQL statements executed'; + + +-- +-- Name: pgcrypto; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS pgcrypto WITH SCHEMA extensions; + + +-- +-- Name: EXTENSION pgcrypto; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION pgcrypto IS 'cryptographic functions'; + + +-- +-- Name: tealbase_vault; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS tealbase_vault WITH SCHEMA vault; + + +-- +-- Name: EXTENSION tealbase_vault; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION tealbase_vault IS 'tealbase Vault Extension'; + + +-- +-- Name: uuid-ossp; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS "uuid-ossp" WITH SCHEMA extensions; + + +-- +-- Name: EXTENSION "uuid-ossp"; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION "uuid-ossp" IS 'generate universally unique identifiers (UUIDs)'; + + +-- +-- Name: email(); Type: FUNCTION; Schema: auth; Owner: - +-- + +CREATE FUNCTION auth.email() RETURNS text + LANGUAGE sql STABLE + AS $$ + select nullif(current_setting('request.jwt.claim.email', true), '')::text; +$$; + + +-- +-- Name: role(); Type: FUNCTION; Schema: auth; Owner: - +-- + +CREATE FUNCTION auth.role() RETURNS text + LANGUAGE sql STABLE + AS $$ + select nullif(current_setting('request.jwt.claim.role', true), '')::text; +$$; + + +-- +-- Name: uid(); Type: FUNCTION; Schema: auth; Owner: - +-- + +CREATE FUNCTION auth.uid() RETURNS uuid + LANGUAGE sql STABLE + AS $$ + select nullif(current_setting('request.jwt.claim.sub', true), '')::uuid; +$$; + + +-- +-- Name: grant_pg_cron_access(); Type: FUNCTION; Schema: extensions; Owner: - +-- + +CREATE FUNCTION extensions.grant_pg_cron_access() RETURNS event_trigger + LANGUAGE plpgsql + AS $$ +BEGIN + IF EXISTS ( + SELECT + FROM pg_event_trigger_ddl_commands() AS ev + JOIN pg_extension AS ext + ON ev.objid = ext.oid + WHERE ext.extname = 'pg_cron' + ) + THEN + grant usage on schema cron to postgres with grant option; + + alter default privileges in schema cron grant all on tables to postgres with grant option; + alter default privileges in schema cron grant all on functions to postgres with grant option; + alter default privileges in schema cron grant all on sequences to postgres with grant option; + + alter default privileges for user tealbase_admin in schema cron grant all + on sequences to postgres with grant option; + alter default privileges for user tealbase_admin in schema cron grant all + on tables to postgres with grant option; + alter default privileges for user tealbase_admin in schema cron grant all + on functions to postgres with grant option; + + grant all privileges on all tables in schema cron to postgres with grant option; + revoke all on table cron.job from postgres; + grant select on table cron.job to postgres with grant option; + END IF; +END; +$$; + + +-- +-- Name: FUNCTION grant_pg_cron_access(); Type: COMMENT; Schema: extensions; Owner: - +-- + +COMMENT ON FUNCTION extensions.grant_pg_cron_access() IS 'Grants access to pg_cron'; + + +-- +-- Name: grant_pg_graphql_access(); Type: FUNCTION; Schema: extensions; Owner: - +-- + +CREATE FUNCTION extensions.grant_pg_graphql_access() RETURNS event_trigger + LANGUAGE plpgsql + AS $_$ +DECLARE + func_is_graphql_resolve bool; +BEGIN + func_is_graphql_resolve = ( + SELECT n.proname = 'resolve' + FROM pg_event_trigger_ddl_commands() AS ev + LEFT JOIN pg_catalog.pg_proc AS n + ON ev.objid = n.oid + ); + + IF func_is_graphql_resolve + THEN + -- Update public wrapper to pass all arguments through to the pg_graphql resolve func + DROP FUNCTION IF EXISTS graphql_public.graphql; + create or replace function graphql_public.graphql( + "operationName" text default null, + query text default null, + variables jsonb default null, + extensions jsonb default null + ) + returns jsonb + language sql + as $$ + select graphql.resolve( + query := query, + variables := coalesce(variables, '{}'), + "operationName" := "operationName", + extensions := extensions + ); + $$; + + -- This hook executes when `graphql.resolve` is created. That is not necessarily the last + -- function in the extension so we need to grant permissions on existing entities AND + -- update default permissions to any others that are created after `graphql.resolve` + grant usage on schema graphql to postgres, anon, authenticated, service_role; + grant select on all tables in schema graphql to postgres, anon, authenticated, service_role; + grant execute on all functions in schema graphql to postgres, anon, authenticated, service_role; + grant all on all sequences in schema graphql to postgres, anon, authenticated, service_role; + alter default privileges in schema graphql grant all on tables to postgres, anon, authenticated, service_role; + alter default privileges in schema graphql grant all on functions to postgres, anon, authenticated, service_role; + alter default privileges in schema graphql grant all on sequences to postgres, anon, authenticated, service_role; + + -- Allow postgres role to allow granting usage on graphql and graphql_public schemas to custom roles + grant usage on schema graphql_public to postgres with grant option; + grant usage on schema graphql to postgres with grant option; + END IF; + +END; +$_$; + + +-- +-- Name: FUNCTION grant_pg_graphql_access(); Type: COMMENT; Schema: extensions; Owner: - +-- + +COMMENT ON FUNCTION extensions.grant_pg_graphql_access() IS 'Grants access to pg_graphql'; + + +-- +-- Name: grant_pg_net_access(); Type: FUNCTION; Schema: extensions; Owner: - +-- + +CREATE FUNCTION extensions.grant_pg_net_access() RETURNS event_trigger + LANGUAGE plpgsql + AS $$ +BEGIN + IF EXISTS ( + SELECT 1 + FROM pg_event_trigger_ddl_commands() AS ev + JOIN pg_extension AS ext + ON ev.objid = ext.oid + WHERE ext.extname = 'pg_net' + ) + THEN + IF NOT EXISTS ( + SELECT 1 + FROM pg_roles + WHERE rolname = 'tealbase_functions_admin' + ) + THEN + CREATE USER tealbase_functions_admin NOINHERIT CREATEROLE LOGIN NOREPLICATION; + END IF; + + GRANT USAGE ON SCHEMA net TO tealbase_functions_admin, postgres, anon, authenticated, service_role; + + IF EXISTS ( + SELECT FROM pg_extension + WHERE extname = 'pg_net' + -- all versions in use on existing projects as of 2025-02-20 + -- version 0.12.0 onwards don't need these applied + AND extversion IN ('0.2', '0.6', '0.7', '0.7.1', '0.8', '0.10.0', '0.11.0') + ) THEN + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; + + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; + + REVOKE ALL ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; + REVOKE ALL ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; + + GRANT EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO tealbase_functions_admin, postgres, anon, authenticated, service_role; + GRANT EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO tealbase_functions_admin, postgres, anon, authenticated, service_role; + END IF; + END IF; +END; +$$; + + +-- +-- Name: FUNCTION grant_pg_net_access(); Type: COMMENT; Schema: extensions; Owner: - +-- + +COMMENT ON FUNCTION extensions.grant_pg_net_access() IS 'Grants access to pg_net'; + + +-- +-- Name: pgrst_ddl_watch(); Type: FUNCTION; Schema: extensions; Owner: - +-- + +CREATE FUNCTION extensions.pgrst_ddl_watch() RETURNS event_trigger + LANGUAGE plpgsql + AS $$ +DECLARE + cmd record; +BEGIN + FOR cmd IN SELECT * FROM pg_event_trigger_ddl_commands() + LOOP + IF cmd.command_tag IN ( + 'CREATE SCHEMA', 'ALTER SCHEMA' + , 'CREATE TABLE', 'CREATE TABLE AS', 'SELECT INTO', 'ALTER TABLE' + , 'CREATE FOREIGN TABLE', 'ALTER FOREIGN TABLE' + , 'CREATE VIEW', 'ALTER VIEW' + , 'CREATE MATERIALIZED VIEW', 'ALTER MATERIALIZED VIEW' + , 'CREATE FUNCTION', 'ALTER FUNCTION' + , 'CREATE TRIGGER' + , 'CREATE TYPE', 'ALTER TYPE' + , 'CREATE RULE' + , 'COMMENT' + ) + -- don't notify in case of CREATE TEMP table or other objects created on pg_temp + AND cmd.schema_name is distinct from 'pg_temp' + THEN + NOTIFY pgrst, 'reload schema'; + END IF; + END LOOP; +END; $$; + + +-- +-- Name: pgrst_drop_watch(); Type: FUNCTION; Schema: extensions; Owner: - +-- + +CREATE FUNCTION extensions.pgrst_drop_watch() RETURNS event_trigger + LANGUAGE plpgsql + AS $$ +DECLARE + obj record; +BEGIN + FOR obj IN SELECT * FROM pg_event_trigger_dropped_objects() + LOOP + IF obj.object_type IN ( + 'schema' + , 'table' + , 'foreign table' + , 'view' + , 'materialized view' + , 'function' + , 'trigger' + , 'type' + , 'rule' + ) + AND obj.is_temporary IS false -- no pg_temp objects + THEN + NOTIFY pgrst, 'reload schema'; + END IF; + END LOOP; +END; $$; + + +-- +-- Name: set_graphql_placeholder(); Type: FUNCTION; Schema: extensions; Owner: - +-- + +CREATE FUNCTION extensions.set_graphql_placeholder() RETURNS event_trigger + LANGUAGE plpgsql + AS $_$ + DECLARE + graphql_is_dropped bool; + BEGIN + graphql_is_dropped = ( + SELECT ev.schema_name = 'graphql_public' + FROM pg_event_trigger_dropped_objects() AS ev + WHERE ev.schema_name = 'graphql_public' + ); + + IF graphql_is_dropped + THEN + create or replace function graphql_public.graphql( + "operationName" text default null, + query text default null, + variables jsonb default null, + extensions jsonb default null + ) + returns jsonb + language plpgsql + as $$ + DECLARE + server_version float; + BEGIN + server_version = (SELECT (SPLIT_PART((select version()), ' ', 2))::float); + + IF server_version >= 14 THEN + RETURN jsonb_build_object( + 'errors', jsonb_build_array( + jsonb_build_object( + 'message', 'pg_graphql extension is not enabled.' + ) + ) + ); + ELSE + RETURN jsonb_build_object( + 'errors', jsonb_build_array( + jsonb_build_object( + 'message', 'pg_graphql is only available on projects running Postgres 14 onwards.' + ) + ) + ); + END IF; + END; + $$; + END IF; + + END; +$_$; + + +-- +-- Name: FUNCTION set_graphql_placeholder(); Type: COMMENT; Schema: extensions; Owner: - +-- + +COMMENT ON FUNCTION extensions.set_graphql_placeholder() IS 'Reintroduces placeholder function for graphql_public.graphql'; + + +-- +-- Name: get_auth(text); Type: FUNCTION; Schema: pgbouncer; Owner: - +-- + +CREATE FUNCTION pgbouncer.get_auth(p_usename text) RETURNS TABLE(username text, password text) + LANGUAGE plpgsql SECURITY DEFINER + AS $_$ +begin + raise debug 'PgBouncer auth request: %', p_usename; + + return query + select + rolname::text, + case when rolvaliduntil < now() + then null + else rolpassword::text + end + from pg_authid + where rolname=$1 and rolcanlogin; +end; +$_$; + + +-- +-- Name: extension(text); Type: FUNCTION; Schema: storage; Owner: - +-- + +CREATE FUNCTION storage.extension(name text) RETURNS text + LANGUAGE plpgsql + AS $$ +DECLARE +_parts text[]; +_filename text; +BEGIN + select string_to_array(name, '/') into _parts; + select _parts[array_length(_parts,1)] into _filename; + -- @todo return the last part instead of 2 + return split_part(_filename, '.', 2); +END +$$; + + +-- +-- Name: filename(text); Type: FUNCTION; Schema: storage; Owner: - +-- + +CREATE FUNCTION storage.filename(name text) RETURNS text + LANGUAGE plpgsql + AS $$ +DECLARE +_parts text[]; +BEGIN + select string_to_array(name, '/') into _parts; + return _parts[array_length(_parts,1)]; +END +$$; + + +-- +-- Name: foldername(text); Type: FUNCTION; Schema: storage; Owner: - +-- + +CREATE FUNCTION storage.foldername(name text) RETURNS text[] + LANGUAGE plpgsql + AS $$ +DECLARE +_parts text[]; +BEGIN + select string_to_array(name, '/') into _parts; + return _parts[1:array_length(_parts,1)-1]; +END +$$; + + +-- +-- Name: search(text, text, integer, integer, integer); Type: FUNCTION; Schema: storage; Owner: - +-- + +CREATE FUNCTION storage.search(prefix text, bucketname text, limits integer DEFAULT 100, levels integer DEFAULT 1, offsets integer DEFAULT 0) RETURNS TABLE(name text, id uuid, updated_at timestamp with time zone, created_at timestamp with time zone, last_accessed_at timestamp with time zone, metadata jsonb) + LANGUAGE plpgsql + AS $$ +DECLARE +_bucketId text; +BEGIN + -- will be replaced by migrations when server starts + -- saving space for cloud-init +END +$$; + + +SET default_tablespace = ''; + +SET default_table_access_method = orioledb; + +-- +-- Name: audit_log_entries; Type: TABLE; Schema: auth; Owner: - +-- + +CREATE TABLE auth.audit_log_entries ( + instance_id uuid, + id uuid NOT NULL, + payload json, + created_at timestamp with time zone +); + + +-- +-- Name: TABLE audit_log_entries; Type: COMMENT; Schema: auth; Owner: - +-- + +COMMENT ON TABLE auth.audit_log_entries IS 'Auth: Audit trail for user actions.'; + + +-- +-- Name: instances; Type: TABLE; Schema: auth; Owner: - +-- + +CREATE TABLE auth.instances ( + id uuid NOT NULL, + uuid uuid, + raw_base_config text, + created_at timestamp with time zone, + updated_at timestamp with time zone +); + + +-- +-- Name: TABLE instances; Type: COMMENT; Schema: auth; Owner: - +-- + +COMMENT ON TABLE auth.instances IS 'Auth: Manages users across multiple sites.'; + + +-- +-- Name: refresh_tokens; Type: TABLE; Schema: auth; Owner: - +-- + +CREATE TABLE auth.refresh_tokens ( + instance_id uuid, + id bigint NOT NULL, + token character varying(255), + user_id character varying(255), + revoked boolean, + created_at timestamp with time zone, + updated_at timestamp with time zone +); + + +-- +-- Name: TABLE refresh_tokens; Type: COMMENT; Schema: auth; Owner: - +-- + +COMMENT ON TABLE auth.refresh_tokens IS 'Auth: Store of tokens used to refresh JWT tokens once they expire.'; + + +-- +-- Name: refresh_tokens_id_seq; Type: SEQUENCE; Schema: auth; Owner: - +-- + +CREATE SEQUENCE auth.refresh_tokens_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: refresh_tokens_id_seq; Type: SEQUENCE OWNED BY; Schema: auth; Owner: - +-- + +ALTER SEQUENCE auth.refresh_tokens_id_seq OWNED BY auth.refresh_tokens.id; + + +-- +-- Name: schema_migrations; Type: TABLE; Schema: auth; Owner: - +-- + +CREATE TABLE auth.schema_migrations ( + version character varying(255) NOT NULL +); + + +-- +-- Name: TABLE schema_migrations; Type: COMMENT; Schema: auth; Owner: - +-- + +COMMENT ON TABLE auth.schema_migrations IS 'Auth: Manages updates to the auth system.'; + + +-- +-- Name: users; Type: TABLE; Schema: auth; Owner: - +-- + +CREATE TABLE auth.users ( + instance_id uuid, + id uuid NOT NULL, + aud character varying(255), + role character varying(255), + email character varying(255), + encrypted_password character varying(255), + confirmed_at timestamp with time zone, + invited_at timestamp with time zone, + confirmation_token character varying(255), + confirmation_sent_at timestamp with time zone, + recovery_token character varying(255), + recovery_sent_at timestamp with time zone, + email_change_token character varying(255), + email_change character varying(255), + email_change_sent_at timestamp with time zone, + last_sign_in_at timestamp with time zone, + raw_app_meta_data jsonb, + raw_user_meta_data jsonb, + is_super_admin boolean, + created_at timestamp with time zone, + updated_at timestamp with time zone +); + + +-- +-- Name: TABLE users; Type: COMMENT; Schema: auth; Owner: - +-- + +COMMENT ON TABLE auth.users IS 'Auth: Stores user login data within a secure schema.'; + + +-- +-- Name: schema_migrations; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.schema_migrations ( + version character varying(128) NOT NULL +); + + +-- +-- Name: buckets; Type: TABLE; Schema: storage; Owner: - +-- + +CREATE TABLE storage.buckets ( + id text NOT NULL, + name text NOT NULL, + owner uuid, + created_at timestamp with time zone DEFAULT now(), + updated_at timestamp with time zone DEFAULT now() +); + + +-- +-- Name: migrations; Type: TABLE; Schema: storage; Owner: - +-- + +CREATE TABLE storage.migrations ( + id integer NOT NULL, + name character varying(100) NOT NULL, + hash character varying(40) NOT NULL, + executed_at timestamp without time zone DEFAULT CURRENT_TIMESTAMP +); + + +-- +-- Name: objects; Type: TABLE; Schema: storage; Owner: - +-- + +CREATE TABLE storage.objects ( + id uuid DEFAULT extensions.uuid_generate_v4() NOT NULL, + bucket_id text, + name text, + owner uuid, + created_at timestamp with time zone DEFAULT now(), + updated_at timestamp with time zone DEFAULT now(), + last_accessed_at timestamp with time zone DEFAULT now(), + metadata jsonb +); + + +-- +-- Name: refresh_tokens id; Type: DEFAULT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.refresh_tokens ALTER COLUMN id SET DEFAULT nextval('auth.refresh_tokens_id_seq'::regclass); + + +-- +-- Name: audit_log_entries audit_log_entries_pkey; Type: CONSTRAINT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.audit_log_entries + ADD CONSTRAINT audit_log_entries_pkey PRIMARY KEY (id); + + +-- +-- Name: instances instances_pkey; Type: CONSTRAINT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.instances + ADD CONSTRAINT instances_pkey PRIMARY KEY (id); + + +-- +-- Name: refresh_tokens refresh_tokens_pkey; Type: CONSTRAINT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.refresh_tokens + ADD CONSTRAINT refresh_tokens_pkey PRIMARY KEY (id); + + +-- +-- Name: schema_migrations schema_migrations_pkey; Type: CONSTRAINT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.schema_migrations + ADD CONSTRAINT schema_migrations_pkey PRIMARY KEY (version); + + +-- +-- Name: users users_email_key; Type: CONSTRAINT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.users + ADD CONSTRAINT users_email_key UNIQUE (email); + + +-- +-- Name: users users_pkey; Type: CONSTRAINT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.users + ADD CONSTRAINT users_pkey PRIMARY KEY (id); + + +-- +-- Name: schema_migrations schema_migrations_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.schema_migrations + ADD CONSTRAINT schema_migrations_pkey PRIMARY KEY (version); + + +-- +-- Name: buckets buckets_pkey; Type: CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.buckets + ADD CONSTRAINT buckets_pkey PRIMARY KEY (id); + + +-- +-- Name: migrations migrations_name_key; Type: CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.migrations + ADD CONSTRAINT migrations_name_key UNIQUE (name); + + +-- +-- Name: migrations migrations_pkey; Type: CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.migrations + ADD CONSTRAINT migrations_pkey PRIMARY KEY (id); + + +-- +-- Name: objects objects_pkey; Type: CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.objects + ADD CONSTRAINT objects_pkey PRIMARY KEY (id); + + +-- +-- Name: audit_logs_instance_id_idx; Type: INDEX; Schema: auth; Owner: - +-- + +CREATE INDEX audit_logs_instance_id_idx ON auth.audit_log_entries USING btree (instance_id); + + +-- +-- Name: refresh_tokens_instance_id_idx; Type: INDEX; Schema: auth; Owner: - +-- + +CREATE INDEX refresh_tokens_instance_id_idx ON auth.refresh_tokens USING btree (instance_id); + + +-- +-- Name: refresh_tokens_instance_id_user_id_idx; Type: INDEX; Schema: auth; Owner: - +-- + +CREATE INDEX refresh_tokens_instance_id_user_id_idx ON auth.refresh_tokens USING btree (instance_id, user_id); + + +-- +-- Name: refresh_tokens_token_idx; Type: INDEX; Schema: auth; Owner: - +-- + +CREATE INDEX refresh_tokens_token_idx ON auth.refresh_tokens USING btree (token); + + +-- +-- Name: users_instance_id_email_idx; Type: INDEX; Schema: auth; Owner: - +-- + +CREATE INDEX users_instance_id_email_idx ON auth.users USING btree (instance_id, email); + + +-- +-- Name: users_instance_id_idx; Type: INDEX; Schema: auth; Owner: - +-- + +CREATE INDEX users_instance_id_idx ON auth.users USING btree (instance_id); + + +-- +-- Name: bname; Type: INDEX; Schema: storage; Owner: - +-- + +CREATE UNIQUE INDEX bname ON storage.buckets USING btree (name); + + +-- +-- Name: bucketid_objname; Type: INDEX; Schema: storage; Owner: - +-- + +CREATE UNIQUE INDEX bucketid_objname ON storage.objects USING btree (bucket_id, name); + + +-- +-- Name: name_prefix_search; Type: INDEX; Schema: storage; Owner: - +-- + +CREATE INDEX name_prefix_search ON storage.objects USING btree (name text_pattern_ops); + + +-- +-- Name: buckets buckets_owner_fkey; Type: FK CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.buckets + ADD CONSTRAINT buckets_owner_fkey FOREIGN KEY (owner) REFERENCES auth.users(id); + + +-- +-- Name: objects objects_bucketId_fkey; Type: FK CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.objects + ADD CONSTRAINT "objects_bucketId_fkey" FOREIGN KEY (bucket_id) REFERENCES storage.buckets(id); + + +-- +-- Name: objects objects_owner_fkey; Type: FK CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.objects + ADD CONSTRAINT objects_owner_fkey FOREIGN KEY (owner) REFERENCES auth.users(id); + + +-- +-- Name: objects; Type: ROW SECURITY; Schema: storage; Owner: - +-- + +ALTER TABLE storage.objects ENABLE ROW LEVEL SECURITY; + +-- +-- Name: tealbase_realtime; Type: PUBLICATION; Schema: -; Owner: - +-- + +CREATE PUBLICATION tealbase_realtime WITH (publish = 'insert, update, delete, truncate'); + + +-- +-- Name: issue_graphql_placeholder; Type: EVENT TRIGGER; Schema: -; Owner: - +-- + +CREATE EVENT TRIGGER issue_graphql_placeholder ON sql_drop + WHEN TAG IN ('DROP EXTENSION') + EXECUTE FUNCTION extensions.set_graphql_placeholder(); + + +-- +-- Name: issue_pg_cron_access; Type: EVENT TRIGGER; Schema: -; Owner: - +-- + +CREATE EVENT TRIGGER issue_pg_cron_access ON ddl_command_end + WHEN TAG IN ('CREATE EXTENSION') + EXECUTE FUNCTION extensions.grant_pg_cron_access(); + + +-- +-- Name: issue_pg_graphql_access; Type: EVENT TRIGGER; Schema: -; Owner: - +-- + +CREATE EVENT TRIGGER issue_pg_graphql_access ON ddl_command_end + WHEN TAG IN ('CREATE FUNCTION') + EXECUTE FUNCTION extensions.grant_pg_graphql_access(); + + +-- +-- Name: issue_pg_net_access; Type: EVENT TRIGGER; Schema: -; Owner: - +-- + +CREATE EVENT TRIGGER issue_pg_net_access ON ddl_command_end + WHEN TAG IN ('CREATE EXTENSION') + EXECUTE FUNCTION extensions.grant_pg_net_access(); + + +-- +-- Name: pgrst_ddl_watch; Type: EVENT TRIGGER; Schema: -; Owner: - +-- + +CREATE EVENT TRIGGER pgrst_ddl_watch ON ddl_command_end + EXECUTE FUNCTION extensions.pgrst_ddl_watch(); + + +-- +-- Name: pgrst_drop_watch; Type: EVENT TRIGGER; Schema: -; Owner: - +-- + +CREATE EVENT TRIGGER pgrst_drop_watch ON sql_drop + EXECUTE FUNCTION extensions.pgrst_drop_watch(); + + +-- +-- PostgreSQL database dump complete +-- + diff --git a/migrations/schema.sql b/migrations/schema.sql new file mode 100644 index 0000000..53682b9 --- /dev/null +++ b/migrations/schema.sql @@ -0,0 +1,997 @@ +SET statement_timeout = 0; +SET lock_timeout = 0; +SET idle_in_transaction_session_timeout = 0; +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = on; +SELECT pg_catalog.set_config('search_path', '', false); +SET check_function_bodies = false; +SET xmloption = content; +SET client_min_messages = warning; +SET row_security = off; + +-- +-- Name: auth; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA auth; + + +-- +-- Name: extensions; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA extensions; + + +-- +-- Name: graphql; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA graphql; + + +-- +-- Name: graphql_public; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA graphql_public; + + +-- +-- Name: pgbouncer; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA pgbouncer; + + +-- +-- Name: realtime; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA realtime; + + +-- +-- Name: storage; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA storage; + + +-- +-- Name: vault; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA vault; + + +-- +-- Name: pg_graphql; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS pg_graphql WITH SCHEMA graphql; + + +-- +-- Name: EXTENSION pg_graphql; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION pg_graphql IS 'pg_graphql: GraphQL support'; + + +-- +-- Name: pg_stat_statements; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS pg_stat_statements WITH SCHEMA extensions; + + +-- +-- Name: EXTENSION pg_stat_statements; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION pg_stat_statements IS 'track planning and execution statistics of all SQL statements executed'; + + +-- +-- Name: pgcrypto; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS pgcrypto WITH SCHEMA extensions; + + +-- +-- Name: EXTENSION pgcrypto; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION pgcrypto IS 'cryptographic functions'; + + +-- +-- Name: pgjwt; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS pgjwt WITH SCHEMA extensions; + + +-- +-- Name: EXTENSION pgjwt; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION pgjwt IS 'JSON Web Token API for Postgresql'; + + +-- +-- Name: tealbase_vault; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS tealbase_vault WITH SCHEMA vault; + + +-- +-- Name: EXTENSION tealbase_vault; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION tealbase_vault IS 'tealbase Vault Extension'; + + +-- +-- Name: uuid-ossp; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS "uuid-ossp" WITH SCHEMA extensions; + + +-- +-- Name: EXTENSION "uuid-ossp"; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION "uuid-ossp" IS 'generate universally unique identifiers (UUIDs)'; + + +-- +-- Name: email(); Type: FUNCTION; Schema: auth; Owner: - +-- + +CREATE FUNCTION auth.email() RETURNS text + LANGUAGE sql STABLE + AS $$ + select nullif(current_setting('request.jwt.claim.email', true), '')::text; +$$; + + +-- +-- Name: role(); Type: FUNCTION; Schema: auth; Owner: - +-- + +CREATE FUNCTION auth.role() RETURNS text + LANGUAGE sql STABLE + AS $$ + select nullif(current_setting('request.jwt.claim.role', true), '')::text; +$$; + + +-- +-- Name: uid(); Type: FUNCTION; Schema: auth; Owner: - +-- + +CREATE FUNCTION auth.uid() RETURNS uuid + LANGUAGE sql STABLE + AS $$ + select nullif(current_setting('request.jwt.claim.sub', true), '')::uuid; +$$; + + +-- +-- Name: grant_pg_cron_access(); Type: FUNCTION; Schema: extensions; Owner: - +-- + +CREATE FUNCTION extensions.grant_pg_cron_access() RETURNS event_trigger + LANGUAGE plpgsql + AS $$ +BEGIN + IF EXISTS ( + SELECT + FROM pg_event_trigger_ddl_commands() AS ev + JOIN pg_extension AS ext + ON ev.objid = ext.oid + WHERE ext.extname = 'pg_cron' + ) + THEN + grant usage on schema cron to postgres with grant option; + + alter default privileges in schema cron grant all on tables to postgres with grant option; + alter default privileges in schema cron grant all on functions to postgres with grant option; + alter default privileges in schema cron grant all on sequences to postgres with grant option; + + alter default privileges for user tealbase_admin in schema cron grant all + on sequences to postgres with grant option; + alter default privileges for user tealbase_admin in schema cron grant all + on tables to postgres with grant option; + alter default privileges for user tealbase_admin in schema cron grant all + on functions to postgres with grant option; + + grant all privileges on all tables in schema cron to postgres with grant option; + revoke all on table cron.job from postgres; + grant select on table cron.job to postgres with grant option; + END IF; +END; +$$; + + +-- +-- Name: FUNCTION grant_pg_cron_access(); Type: COMMENT; Schema: extensions; Owner: - +-- + +COMMENT ON FUNCTION extensions.grant_pg_cron_access() IS 'Grants access to pg_cron'; + + +-- +-- Name: grant_pg_graphql_access(); Type: FUNCTION; Schema: extensions; Owner: - +-- + +CREATE FUNCTION extensions.grant_pg_graphql_access() RETURNS event_trigger + LANGUAGE plpgsql + AS $_$ +DECLARE + func_is_graphql_resolve bool; +BEGIN + func_is_graphql_resolve = ( + SELECT n.proname = 'resolve' + FROM pg_event_trigger_ddl_commands() AS ev + LEFT JOIN pg_catalog.pg_proc AS n + ON ev.objid = n.oid + ); + + IF func_is_graphql_resolve + THEN + -- Update public wrapper to pass all arguments through to the pg_graphql resolve func + DROP FUNCTION IF EXISTS graphql_public.graphql; + create or replace function graphql_public.graphql( + "operationName" text default null, + query text default null, + variables jsonb default null, + extensions jsonb default null + ) + returns jsonb + language sql + as $$ + select graphql.resolve( + query := query, + variables := coalesce(variables, '{}'), + "operationName" := "operationName", + extensions := extensions + ); + $$; + + -- This hook executes when `graphql.resolve` is created. That is not necessarily the last + -- function in the extension so we need to grant permissions on existing entities AND + -- update default permissions to any others that are created after `graphql.resolve` + grant usage on schema graphql to postgres, anon, authenticated, service_role; + grant select on all tables in schema graphql to postgres, anon, authenticated, service_role; + grant execute on all functions in schema graphql to postgres, anon, authenticated, service_role; + grant all on all sequences in schema graphql to postgres, anon, authenticated, service_role; + alter default privileges in schema graphql grant all on tables to postgres, anon, authenticated, service_role; + alter default privileges in schema graphql grant all on functions to postgres, anon, authenticated, service_role; + alter default privileges in schema graphql grant all on sequences to postgres, anon, authenticated, service_role; + + -- Allow postgres role to allow granting usage on graphql and graphql_public schemas to custom roles + grant usage on schema graphql_public to postgres with grant option; + grant usage on schema graphql to postgres with grant option; + END IF; + +END; +$_$; + + +-- +-- Name: FUNCTION grant_pg_graphql_access(); Type: COMMENT; Schema: extensions; Owner: - +-- + +COMMENT ON FUNCTION extensions.grant_pg_graphql_access() IS 'Grants access to pg_graphql'; + + +-- +-- Name: grant_pg_net_access(); Type: FUNCTION; Schema: extensions; Owner: - +-- + +CREATE FUNCTION extensions.grant_pg_net_access() RETURNS event_trigger + LANGUAGE plpgsql + AS $$ +BEGIN + IF EXISTS ( + SELECT 1 + FROM pg_event_trigger_ddl_commands() AS ev + JOIN pg_extension AS ext + ON ev.objid = ext.oid + WHERE ext.extname = 'pg_net' + ) + THEN + IF NOT EXISTS ( + SELECT 1 + FROM pg_roles + WHERE rolname = 'tealbase_functions_admin' + ) + THEN + CREATE USER tealbase_functions_admin NOINHERIT CREATEROLE LOGIN NOREPLICATION; + END IF; + + GRANT USAGE ON SCHEMA net TO tealbase_functions_admin, postgres, anon, authenticated, service_role; + + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; + + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; + + REVOKE ALL ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; + REVOKE ALL ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; + + GRANT EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO tealbase_functions_admin, postgres, anon, authenticated, service_role; + GRANT EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO tealbase_functions_admin, postgres, anon, authenticated, service_role; + END IF; +END; +$$; + + +-- +-- Name: FUNCTION grant_pg_net_access(); Type: COMMENT; Schema: extensions; Owner: - +-- + +COMMENT ON FUNCTION extensions.grant_pg_net_access() IS 'Grants access to pg_net'; + + +-- +-- Name: pgrst_ddl_watch(); Type: FUNCTION; Schema: extensions; Owner: - +-- + +CREATE FUNCTION extensions.pgrst_ddl_watch() RETURNS event_trigger + LANGUAGE plpgsql + AS $$ +DECLARE + cmd record; +BEGIN + FOR cmd IN SELECT * FROM pg_event_trigger_ddl_commands() + LOOP + IF cmd.command_tag IN ( + 'CREATE SCHEMA', 'ALTER SCHEMA' + , 'CREATE TABLE', 'CREATE TABLE AS', 'SELECT INTO', 'ALTER TABLE' + , 'CREATE FOREIGN TABLE', 'ALTER FOREIGN TABLE' + , 'CREATE VIEW', 'ALTER VIEW' + , 'CREATE MATERIALIZED VIEW', 'ALTER MATERIALIZED VIEW' + , 'CREATE FUNCTION', 'ALTER FUNCTION' + , 'CREATE TRIGGER' + , 'CREATE TYPE', 'ALTER TYPE' + , 'CREATE RULE' + , 'COMMENT' + ) + -- don't notify in case of CREATE TEMP table or other objects created on pg_temp + AND cmd.schema_name is distinct from 'pg_temp' + THEN + NOTIFY pgrst, 'reload schema'; + END IF; + END LOOP; +END; $$; + + +-- +-- Name: pgrst_drop_watch(); Type: FUNCTION; Schema: extensions; Owner: - +-- + +CREATE FUNCTION extensions.pgrst_drop_watch() RETURNS event_trigger + LANGUAGE plpgsql + AS $$ +DECLARE + obj record; +BEGIN + FOR obj IN SELECT * FROM pg_event_trigger_dropped_objects() + LOOP + IF obj.object_type IN ( + 'schema' + , 'table' + , 'foreign table' + , 'view' + , 'materialized view' + , 'function' + , 'trigger' + , 'type' + , 'rule' + ) + AND obj.is_temporary IS false -- no pg_temp objects + THEN + NOTIFY pgrst, 'reload schema'; + END IF; + END LOOP; +END; $$; + + +-- +-- Name: set_graphql_placeholder(); Type: FUNCTION; Schema: extensions; Owner: - +-- + +CREATE FUNCTION extensions.set_graphql_placeholder() RETURNS event_trigger + LANGUAGE plpgsql + AS $_$ + DECLARE + graphql_is_dropped bool; + BEGIN + graphql_is_dropped = ( + SELECT ev.schema_name = 'graphql_public' + FROM pg_event_trigger_dropped_objects() AS ev + WHERE ev.schema_name = 'graphql_public' + ); + + IF graphql_is_dropped + THEN + create or replace function graphql_public.graphql( + "operationName" text default null, + query text default null, + variables jsonb default null, + extensions jsonb default null + ) + returns jsonb + language plpgsql + as $$ + DECLARE + server_version float; + BEGIN + server_version = (SELECT (SPLIT_PART((select version()), ' ', 2))::float); + + IF server_version >= 14 THEN + RETURN jsonb_build_object( + 'errors', jsonb_build_array( + jsonb_build_object( + 'message', 'pg_graphql extension is not enabled.' + ) + ) + ); + ELSE + RETURN jsonb_build_object( + 'errors', jsonb_build_array( + jsonb_build_object( + 'message', 'pg_graphql is only available on projects running Postgres 14 onwards.' + ) + ) + ); + END IF; + END; + $$; + END IF; + + END; +$_$; + + +-- +-- Name: FUNCTION set_graphql_placeholder(); Type: COMMENT; Schema: extensions; Owner: - +-- + +COMMENT ON FUNCTION extensions.set_graphql_placeholder() IS 'Reintroduces placeholder function for graphql_public.graphql'; + + +-- +-- Name: get_auth(text); Type: FUNCTION; Schema: pgbouncer; Owner: - +-- + +CREATE FUNCTION pgbouncer.get_auth(p_usename text) RETURNS TABLE(username text, password text) + LANGUAGE plpgsql SECURITY DEFINER + AS $$ +BEGIN + RAISE WARNING 'PgBouncer auth request: %', p_usename; + + RETURN QUERY + SELECT usename::TEXT, passwd::TEXT FROM pg_catalog.pg_shadow + WHERE usename = p_usename; +END; +$$; + + +-- +-- Name: extension(text); Type: FUNCTION; Schema: storage; Owner: - +-- + +CREATE FUNCTION storage.extension(name text) RETURNS text + LANGUAGE plpgsql + AS $$ +DECLARE +_parts text[]; +_filename text; +BEGIN + select string_to_array(name, '/') into _parts; + select _parts[array_length(_parts,1)] into _filename; + -- @todo return the last part instead of 2 + return split_part(_filename, '.', 2); +END +$$; + + +-- +-- Name: filename(text); Type: FUNCTION; Schema: storage; Owner: - +-- + +CREATE FUNCTION storage.filename(name text) RETURNS text + LANGUAGE plpgsql + AS $$ +DECLARE +_parts text[]; +BEGIN + select string_to_array(name, '/') into _parts; + return _parts[array_length(_parts,1)]; +END +$$; + + +-- +-- Name: foldername(text); Type: FUNCTION; Schema: storage; Owner: - +-- + +CREATE FUNCTION storage.foldername(name text) RETURNS text[] + LANGUAGE plpgsql + AS $$ +DECLARE +_parts text[]; +BEGIN + select string_to_array(name, '/') into _parts; + return _parts[1:array_length(_parts,1)-1]; +END +$$; + + +-- +-- Name: search(text, text, integer, integer, integer); Type: FUNCTION; Schema: storage; Owner: - +-- + +CREATE FUNCTION storage.search(prefix text, bucketname text, limits integer DEFAULT 100, levels integer DEFAULT 1, offsets integer DEFAULT 0) RETURNS TABLE(name text, id uuid, updated_at timestamp with time zone, created_at timestamp with time zone, last_accessed_at timestamp with time zone, metadata jsonb) + LANGUAGE plpgsql + AS $$ +DECLARE +_bucketId text; +BEGIN + -- will be replaced by migrations when server starts + -- saving space for cloud-init +END +$$; + + +SET default_tablespace = ''; + +SET default_table_access_method = heap; + +-- +-- Name: audit_log_entries; Type: TABLE; Schema: auth; Owner: - +-- + +CREATE TABLE auth.audit_log_entries ( + instance_id uuid, + id uuid NOT NULL, + payload json, + created_at timestamp with time zone +); + + +-- +-- Name: TABLE audit_log_entries; Type: COMMENT; Schema: auth; Owner: - +-- + +COMMENT ON TABLE auth.audit_log_entries IS 'Auth: Audit trail for user actions.'; + + +-- +-- Name: instances; Type: TABLE; Schema: auth; Owner: - +-- + +CREATE TABLE auth.instances ( + id uuid NOT NULL, + uuid uuid, + raw_base_config text, + created_at timestamp with time zone, + updated_at timestamp with time zone +); + + +-- +-- Name: TABLE instances; Type: COMMENT; Schema: auth; Owner: - +-- + +COMMENT ON TABLE auth.instances IS 'Auth: Manages users across multiple sites.'; + + +-- +-- Name: refresh_tokens; Type: TABLE; Schema: auth; Owner: - +-- + +CREATE TABLE auth.refresh_tokens ( + instance_id uuid, + id bigint NOT NULL, + token character varying(255), + user_id character varying(255), + revoked boolean, + created_at timestamp with time zone, + updated_at timestamp with time zone +); + + +-- +-- Name: TABLE refresh_tokens; Type: COMMENT; Schema: auth; Owner: - +-- + +COMMENT ON TABLE auth.refresh_tokens IS 'Auth: Store of tokens used to refresh JWT tokens once they expire.'; + + +-- +-- Name: refresh_tokens_id_seq; Type: SEQUENCE; Schema: auth; Owner: - +-- + +CREATE SEQUENCE auth.refresh_tokens_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: refresh_tokens_id_seq; Type: SEQUENCE OWNED BY; Schema: auth; Owner: - +-- + +ALTER SEQUENCE auth.refresh_tokens_id_seq OWNED BY auth.refresh_tokens.id; + + +-- +-- Name: schema_migrations; Type: TABLE; Schema: auth; Owner: - +-- + +CREATE TABLE auth.schema_migrations ( + version character varying(255) NOT NULL +); + + +-- +-- Name: TABLE schema_migrations; Type: COMMENT; Schema: auth; Owner: - +-- + +COMMENT ON TABLE auth.schema_migrations IS 'Auth: Manages updates to the auth system.'; + + +-- +-- Name: users; Type: TABLE; Schema: auth; Owner: - +-- + +CREATE TABLE auth.users ( + instance_id uuid, + id uuid NOT NULL, + aud character varying(255), + role character varying(255), + email character varying(255), + encrypted_password character varying(255), + confirmed_at timestamp with time zone, + invited_at timestamp with time zone, + confirmation_token character varying(255), + confirmation_sent_at timestamp with time zone, + recovery_token character varying(255), + recovery_sent_at timestamp with time zone, + email_change_token character varying(255), + email_change character varying(255), + email_change_sent_at timestamp with time zone, + last_sign_in_at timestamp with time zone, + raw_app_meta_data jsonb, + raw_user_meta_data jsonb, + is_super_admin boolean, + created_at timestamp with time zone, + updated_at timestamp with time zone +); + + +-- +-- Name: TABLE users; Type: COMMENT; Schema: auth; Owner: - +-- + +COMMENT ON TABLE auth.users IS 'Auth: Stores user login data within a secure schema.'; + + +-- +-- Name: schema_migrations; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.schema_migrations ( + version character varying(128) NOT NULL +); + + +-- +-- Name: buckets; Type: TABLE; Schema: storage; Owner: - +-- + +CREATE TABLE storage.buckets ( + id text NOT NULL, + name text NOT NULL, + owner uuid, + created_at timestamp with time zone DEFAULT now(), + updated_at timestamp with time zone DEFAULT now() +); + + +-- +-- Name: migrations; Type: TABLE; Schema: storage; Owner: - +-- + +CREATE TABLE storage.migrations ( + id integer NOT NULL, + name character varying(100) NOT NULL, + hash character varying(40) NOT NULL, + executed_at timestamp without time zone DEFAULT CURRENT_TIMESTAMP +); + + +-- +-- Name: objects; Type: TABLE; Schema: storage; Owner: - +-- + +CREATE TABLE storage.objects ( + id uuid DEFAULT extensions.uuid_generate_v4() NOT NULL, + bucket_id text, + name text, + owner uuid, + created_at timestamp with time zone DEFAULT now(), + updated_at timestamp with time zone DEFAULT now(), + last_accessed_at timestamp with time zone DEFAULT now(), + metadata jsonb +); + + +-- +-- Name: refresh_tokens id; Type: DEFAULT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.refresh_tokens ALTER COLUMN id SET DEFAULT nextval('auth.refresh_tokens_id_seq'::regclass); + + +-- +-- Name: audit_log_entries audit_log_entries_pkey; Type: CONSTRAINT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.audit_log_entries + ADD CONSTRAINT audit_log_entries_pkey PRIMARY KEY (id); + + +-- +-- Name: instances instances_pkey; Type: CONSTRAINT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.instances + ADD CONSTRAINT instances_pkey PRIMARY KEY (id); + + +-- +-- Name: refresh_tokens refresh_tokens_pkey; Type: CONSTRAINT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.refresh_tokens + ADD CONSTRAINT refresh_tokens_pkey PRIMARY KEY (id); + + +-- +-- Name: schema_migrations schema_migrations_pkey; Type: CONSTRAINT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.schema_migrations + ADD CONSTRAINT schema_migrations_pkey PRIMARY KEY (version); + + +-- +-- Name: users users_email_key; Type: CONSTRAINT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.users + ADD CONSTRAINT users_email_key UNIQUE (email); + + +-- +-- Name: users users_pkey; Type: CONSTRAINT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.users + ADD CONSTRAINT users_pkey PRIMARY KEY (id); + + +-- +-- Name: schema_migrations schema_migrations_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.schema_migrations + ADD CONSTRAINT schema_migrations_pkey PRIMARY KEY (version); + + +-- +-- Name: buckets buckets_pkey; Type: CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.buckets + ADD CONSTRAINT buckets_pkey PRIMARY KEY (id); + + +-- +-- Name: migrations migrations_name_key; Type: CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.migrations + ADD CONSTRAINT migrations_name_key UNIQUE (name); + + +-- +-- Name: migrations migrations_pkey; Type: CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.migrations + ADD CONSTRAINT migrations_pkey PRIMARY KEY (id); + + +-- +-- Name: objects objects_pkey; Type: CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.objects + ADD CONSTRAINT objects_pkey PRIMARY KEY (id); + + +-- +-- Name: audit_logs_instance_id_idx; Type: INDEX; Schema: auth; Owner: - +-- + +CREATE INDEX audit_logs_instance_id_idx ON auth.audit_log_entries USING btree (instance_id); + + +-- +-- Name: refresh_tokens_instance_id_idx; Type: INDEX; Schema: auth; Owner: - +-- + +CREATE INDEX refresh_tokens_instance_id_idx ON auth.refresh_tokens USING btree (instance_id); + + +-- +-- Name: refresh_tokens_instance_id_user_id_idx; Type: INDEX; Schema: auth; Owner: - +-- + +CREATE INDEX refresh_tokens_instance_id_user_id_idx ON auth.refresh_tokens USING btree (instance_id, user_id); + + +-- +-- Name: refresh_tokens_token_idx; Type: INDEX; Schema: auth; Owner: - +-- + +CREATE INDEX refresh_tokens_token_idx ON auth.refresh_tokens USING btree (token); + + +-- +-- Name: users_instance_id_email_idx; Type: INDEX; Schema: auth; Owner: - +-- + +CREATE INDEX users_instance_id_email_idx ON auth.users USING btree (instance_id, email); + + +-- +-- Name: users_instance_id_idx; Type: INDEX; Schema: auth; Owner: - +-- + +CREATE INDEX users_instance_id_idx ON auth.users USING btree (instance_id); + + +-- +-- Name: bname; Type: INDEX; Schema: storage; Owner: - +-- + +CREATE UNIQUE INDEX bname ON storage.buckets USING btree (name); + + +-- +-- Name: bucketid_objname; Type: INDEX; Schema: storage; Owner: - +-- + +CREATE UNIQUE INDEX bucketid_objname ON storage.objects USING btree (bucket_id, name); + + +-- +-- Name: name_prefix_search; Type: INDEX; Schema: storage; Owner: - +-- + +CREATE INDEX name_prefix_search ON storage.objects USING btree (name text_pattern_ops); + + +-- +-- Name: buckets buckets_owner_fkey; Type: FK CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.buckets + ADD CONSTRAINT buckets_owner_fkey FOREIGN KEY (owner) REFERENCES auth.users(id); + + +-- +-- Name: objects objects_bucketId_fkey; Type: FK CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.objects + ADD CONSTRAINT "objects_bucketId_fkey" FOREIGN KEY (bucket_id) REFERENCES storage.buckets(id); + + +-- +-- Name: objects objects_owner_fkey; Type: FK CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.objects + ADD CONSTRAINT objects_owner_fkey FOREIGN KEY (owner) REFERENCES auth.users(id); + + +-- +-- Name: objects; Type: ROW SECURITY; Schema: storage; Owner: - +-- + +ALTER TABLE storage.objects ENABLE ROW LEVEL SECURITY; + +-- +-- Name: tealbase_realtime; Type: PUBLICATION; Schema: -; Owner: - +-- + +CREATE PUBLICATION tealbase_realtime WITH (publish = 'insert, update, delete, truncate'); + + +-- +-- Name: issue_graphql_placeholder; Type: EVENT TRIGGER; Schema: -; Owner: - +-- + +CREATE EVENT TRIGGER issue_graphql_placeholder ON sql_drop + WHEN TAG IN ('DROP EXTENSION') + EXECUTE FUNCTION extensions.set_graphql_placeholder(); + + +-- +-- Name: issue_pg_cron_access; Type: EVENT TRIGGER; Schema: -; Owner: - +-- + +CREATE EVENT TRIGGER issue_pg_cron_access ON ddl_command_end + WHEN TAG IN ('CREATE EXTENSION') + EXECUTE FUNCTION extensions.grant_pg_cron_access(); + + +-- +-- Name: issue_pg_graphql_access; Type: EVENT TRIGGER; Schema: -; Owner: - +-- + +CREATE EVENT TRIGGER issue_pg_graphql_access ON ddl_command_end + WHEN TAG IN ('CREATE FUNCTION') + EXECUTE FUNCTION extensions.grant_pg_graphql_access(); + + +-- +-- Name: issue_pg_net_access; Type: EVENT TRIGGER; Schema: -; Owner: - +-- + +CREATE EVENT TRIGGER issue_pg_net_access ON ddl_command_end + WHEN TAG IN ('CREATE EXTENSION') + EXECUTE FUNCTION extensions.grant_pg_net_access(); + + +-- +-- Name: pgrst_ddl_watch; Type: EVENT TRIGGER; Schema: -; Owner: - +-- + +CREATE EVENT TRIGGER pgrst_ddl_watch ON ddl_command_end + EXECUTE FUNCTION extensions.pgrst_ddl_watch(); + + +-- +-- Name: pgrst_drop_watch; Type: EVENT TRIGGER; Schema: -; Owner: - +-- + +CREATE EVENT TRIGGER pgrst_drop_watch ON sql_drop + EXECUTE FUNCTION extensions.pgrst_drop_watch(); + + +-- +-- PostgreSQL database dump complete +-- + + +-- +-- Dbmate schema migrations +-- + diff --git a/migrations/tests/database/exists.sql b/migrations/tests/database/exists.sql new file mode 100644 index 0000000..bc19cd3 --- /dev/null +++ b/migrations/tests/database/exists.sql @@ -0,0 +1,9 @@ + +SELECT has_schema('public'); +SELECT has_schema('auth'); +SELECT has_schema('pgbouncer'); +SELECT has_schema('extensions'); +SELECT has_schema('graphql'); +SELECT has_schema('graphql_public'); +SELECT has_schema('realtime'); +SELECT has_schema('storage'); diff --git a/migrations/tests/database/privs.sql b/migrations/tests/database/privs.sql new file mode 100644 index 0000000..bed44b2 --- /dev/null +++ b/migrations/tests/database/privs.sql @@ -0,0 +1,29 @@ +SELECT database_privs_are( + 'postgres', 'postgres', ARRAY['CONNECT', 'TEMPORARY', 'CREATE'] +); + +-- Verify public schema privileges +SELECT schema_privs_are('public', 'postgres', array['CREATE', 'USAGE']); +SELECT schema_privs_are('public', 'anon', array['USAGE']); +SELECT schema_privs_are('public', 'authenticated', array['USAGE']); +SELECT schema_privs_are('public', 'service_role', array['USAGE']); + +set role postgres; +create table test_priv(); +SELECT table_owner_is('test_priv', 'postgres'); +SELECT table_privs_are('test_priv', 'tealbase_admin', array['DELETE', 'INSERT', 'REFERENCES', 'SELECT', 'TRIGGER', 'TRUNCATE', 'UPDATE']); +SELECT table_privs_are('test_priv', 'postgres', array['DELETE', 'INSERT', 'REFERENCES', 'SELECT', 'TRIGGER', 'TRUNCATE', 'UPDATE']); +SELECT table_privs_are('test_priv', 'anon', array['DELETE', 'INSERT', 'REFERENCES', 'SELECT', 'TRIGGER', 'TRUNCATE', 'UPDATE']); +SELECT table_privs_are('test_priv', 'authenticated', array['DELETE', 'INSERT', 'REFERENCES', 'SELECT', 'TRIGGER', 'TRUNCATE', 'UPDATE']); +SELECT table_privs_are('test_priv', 'service_role', array['DELETE', 'INSERT', 'REFERENCES', 'SELECT', 'TRIGGER', 'TRUNCATE', 'UPDATE']); +reset role; + +-- Verify extensions schema privileges +SELECT schema_privs_are('extensions', 'postgres', array['CREATE', 'USAGE']); +SELECT schema_privs_are('extensions', 'anon', array['USAGE']); +SELECT schema_privs_are('extensions', 'authenticated', array['USAGE']); +SELECT schema_privs_are('extensions', 'service_role', array['USAGE']); + +-- Role memberships +SELECT is_member_of('pg_read_all_data', 'postgres'); +SELECT is_member_of('pg_signal_backend', 'postgres'); diff --git a/migrations/tests/database/test.sql b/migrations/tests/database/test.sql new file mode 100644 index 0000000..465c38a --- /dev/null +++ b/migrations/tests/database/test.sql @@ -0,0 +1,3 @@ + +\ir exists.sql +\ir privs.sql diff --git a/migrations/tests/extensions/01-postgis.sql b/migrations/tests/extensions/01-postgis.sql new file mode 100644 index 0000000..b2f7ba8 --- /dev/null +++ b/migrations/tests/extensions/01-postgis.sql @@ -0,0 +1,38 @@ +begin; +do $_$ +begin + if not exists (select 1 from pg_extension where extname = 'orioledb') then + -- create postgis tiger as tealbase_admin + create extension if not exists postgis_tiger_geocoder cascade; + + -- \ir ansible/files/postgresql_extension_custom_scripts/postgis_tiger_geocoder/after-create.sql + grant usage on schema tiger, tiger_data to postgres with grant option; + grant all privileges on all tables in schema tiger, tiger_data to postgres with grant option; + grant all privileges on all routines in schema tiger, tiger_data to postgres with grant option; + grant all privileges on all sequences in schema tiger, tiger_data to postgres with grant option; + alter default privileges in schema tiger, tiger_data grant all on tables to postgres with grant option; + alter default privileges in schema tiger, tiger_data grant all on routines to postgres with grant option; + alter default privileges in schema tiger, tiger_data grant all on sequences to postgres with grant option; + SET search_path TO extensions, public, tiger, tiger_data; + -- postgres role should have access + set local role postgres; + perform tiger.pprint_addy(tiger.pagc_normalize_address('710 E Ben White Blvd, Austin, TX 78704')); + + -- other roles can be granted access + grant usage on schema tiger, tiger_data to authenticated; + grant select on all tables in schema tiger, tiger_data to authenticated; + grant execute on all routines in schema tiger, tiger_data to authenticated; + + -- authenticated role should have access now + set local role authenticated; + perform tiger.pprint_addy(tiger.pagc_normalize_address('710 E Ben White Blvd, Austin, TX 78704')); + reset role; + + -- postgres role should have access to address_standardizer_data_us + set local role postgres; + perform 1 from us_lex; + reset role; + end if; +end +$_$; +rollback; diff --git a/migrations/tests/extensions/02-pgrouting.sql b/migrations/tests/extensions/02-pgrouting.sql new file mode 100644 index 0000000..f8d4786 --- /dev/null +++ b/migrations/tests/extensions/02-pgrouting.sql @@ -0,0 +1,9 @@ +begin; +do $_$ +begin + if not exists (select 1 from pg_extension where extname = 'orioledb') then + create extension if not exists pgrouting with schema "extensions" cascade; + end if; +end +$_$; +rollback; diff --git a/migrations/tests/extensions/03-pgtap.sql b/migrations/tests/extensions/03-pgtap.sql new file mode 100644 index 0000000..ddce974 --- /dev/null +++ b/migrations/tests/extensions/03-pgtap.sql @@ -0,0 +1,3 @@ +BEGIN; +create extension if not exists pgtap with schema "extensions"; +ROLLBACK; diff --git a/migrations/tests/extensions/04-pg_cron.sql b/migrations/tests/extensions/04-pg_cron.sql new file mode 100644 index 0000000..6a8a067 --- /dev/null +++ b/migrations/tests/extensions/04-pg_cron.sql @@ -0,0 +1,25 @@ +BEGIN; +-- create cron extension as tealbase_admin +create extension if not exists pg_cron; + +-- \ir migrations/db/init-scripts/00000000000003-post-setup.sql +grant usage on schema cron to postgres with grant option; +alter default privileges in schema cron grant all on tables to postgres with grant option; +alter default privileges in schema cron grant all on routines to postgres with grant option; +alter default privileges in schema cron grant all on sequences to postgres with grant option; +grant all privileges on all tables in schema cron to postgres with grant option; +grant all privileges on all routines in schema cron to postgres with grant option; +grant all privileges on all sequences in schema cron to postgres with grant option; + +-- postgres role should have access +set local role postgres; +select * from cron.job; + +-- other roles can be granted access +grant usage on schema cron to authenticated; +grant select on all tables in schema cron to authenticated; + +-- authenticated role should have access now +set local role authenticated; +select * from cron.job; +ROLLBACK; diff --git a/migrations/tests/extensions/05-pgaudit.sql b/migrations/tests/extensions/05-pgaudit.sql new file mode 100644 index 0000000..70ee578 --- /dev/null +++ b/migrations/tests/extensions/05-pgaudit.sql @@ -0,0 +1,3 @@ +BEGIN; +create extension if not exists pgaudit with schema "extensions"; +ROLLBACK; diff --git a/migrations/tests/extensions/06-pgjwt.sql b/migrations/tests/extensions/06-pgjwt.sql new file mode 100644 index 0000000..31716d3 --- /dev/null +++ b/migrations/tests/extensions/06-pgjwt.sql @@ -0,0 +1,10 @@ +BEGIN; +do $$ +begin + if exists (select 1 from pg_available_extensions where name = 'pgjwt') then + if not exists (select 1 from pg_extension where extname = 'pgjwt') then + create extension if not exists pgjwt with schema "extensions" cascade; + end if; + end if; +end $$; +ROLLBACK; diff --git a/migrations/tests/extensions/07-pgsql-http.sql b/migrations/tests/extensions/07-pgsql-http.sql new file mode 100644 index 0000000..8c37feb --- /dev/null +++ b/migrations/tests/extensions/07-pgsql-http.sql @@ -0,0 +1,3 @@ +BEGIN; +create extension if not exists http with schema "extensions"; +ROLLBACK; diff --git a/migrations/tests/extensions/08-plpgsql_check.sql b/migrations/tests/extensions/08-plpgsql_check.sql new file mode 100644 index 0000000..0bc8e8b --- /dev/null +++ b/migrations/tests/extensions/08-plpgsql_check.sql @@ -0,0 +1,3 @@ +BEGIN; +create extension if not exists plpgsql_check with schema "extensions"; +ROLLBACK; diff --git a/migrations/tests/extensions/09-pg-safeupdate.sql b/migrations/tests/extensions/09-pg-safeupdate.sql new file mode 100644 index 0000000..c08ec2e --- /dev/null +++ b/migrations/tests/extensions/09-pg-safeupdate.sql @@ -0,0 +1,3 @@ +BEGIN; +alter role postgres set session_preload_libraries = 'safeupdate'; +ROLLBACK; diff --git a/migrations/tests/extensions/10-timescaledb.sql b/migrations/tests/extensions/10-timescaledb.sql new file mode 100644 index 0000000..71fc3b1 --- /dev/null +++ b/migrations/tests/extensions/10-timescaledb.sql @@ -0,0 +1,9 @@ +begin; +do $_$ +begin + if current_setting('server_version_num')::integer >= 150000 and current_setting('server_version_num')::integer < 160000 then + create extension if not exists timescaledb with schema "extensions"; + end if; +end +$_$; +rollback; diff --git a/migrations/tests/extensions/11-wal2json.sql b/migrations/tests/extensions/11-wal2json.sql new file mode 100644 index 0000000..ab25131 --- /dev/null +++ b/migrations/tests/extensions/11-wal2json.sql @@ -0,0 +1,6 @@ +BEGIN; +select pg_drop_replication_slot(slot_name) from pg_replication_slots where slot_name = 'test_slot'; +select * from pg_create_logical_replication_slot('test_slot', 'wal2json'); +-- a rollback of the txn does not remove the logical replication slot that gets created, so we need to manually drop it +select pg_drop_replication_slot(slot_name) from pg_replication_slots where slot_name = 'test_slot'; +ROLLBACK; diff --git a/migrations/tests/extensions/12-pljava.sql b/migrations/tests/extensions/12-pljava.sql new file mode 100644 index 0000000..b51c824 --- /dev/null +++ b/migrations/tests/extensions/12-pljava.sql @@ -0,0 +1,3 @@ +BEGIN; +create extension if not exists pljava with schema "sqlj"; +ROLLBACK; diff --git a/migrations/tests/extensions/13-plv8.sql b/migrations/tests/extensions/13-plv8.sql new file mode 100644 index 0000000..8ce134a --- /dev/null +++ b/migrations/tests/extensions/13-plv8.sql @@ -0,0 +1,9 @@ +begin; +do $_$ +begin + if current_setting('server_version_num')::integer >= 150000 and current_setting('server_version_num')::integer < 160000 then + create extension if not exists plv8 with schema "pg_catalog"; + end if; +end +$_$; +rollback; diff --git a/migrations/tests/extensions/14-pg_plan_filter.sql b/migrations/tests/extensions/14-pg_plan_filter.sql new file mode 100644 index 0000000..941e1d6 --- /dev/null +++ b/migrations/tests/extensions/14-pg_plan_filter.sql @@ -0,0 +1,3 @@ +BEGIN; +alter role postgres set session_preload_libraries = 'plan_filter'; +ROLLBACK; diff --git a/migrations/tests/extensions/15-pg_net.sql b/migrations/tests/extensions/15-pg_net.sql new file mode 100644 index 0000000..310a9b9 --- /dev/null +++ b/migrations/tests/extensions/15-pg_net.sql @@ -0,0 +1,23 @@ +BEGIN; +-- create net extension as tealbase_admin +create extension if not exists pg_net with schema "extensions"; + +-- \ir migrations/db/init-scripts/00000000000003-post-setup.sql +grant usage on schema net TO postgres, anon, authenticated, service_role; +alter function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) security definer; +alter function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) security definer; +alter function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; +alter function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; +revoke all on function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) from public; +revoke all on function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) from public; +grant execute on function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO postgres, anon, authenticated, service_role; +grant execute on function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO postgres, anon, authenticated, service_role; + +-- postgres role should have access +set local role postgres; +select net.http_get('http://localhost', null::jsonb, null::jsonb, 100); + +-- authenticated role should have access +set local role authenticated; +select net.http_get('http://localhost', null::jsonb, null::jsonb, 100); +ROLLBACK; diff --git a/migrations/tests/extensions/16-rum.sql b/migrations/tests/extensions/16-rum.sql new file mode 100644 index 0000000..95b0845 --- /dev/null +++ b/migrations/tests/extensions/16-rum.sql @@ -0,0 +1,3 @@ +BEGIN; +create extension if not exists rum with schema "extensions"; +ROLLBACK; diff --git a/migrations/tests/extensions/17-pg_hashids.sql b/migrations/tests/extensions/17-pg_hashids.sql new file mode 100644 index 0000000..594c7e5 --- /dev/null +++ b/migrations/tests/extensions/17-pg_hashids.sql @@ -0,0 +1,3 @@ +BEGIN; +create extension if not exists pg_hashids with schema "extensions"; +ROLLBACK; diff --git a/migrations/tests/extensions/18-pgsodium.sql b/migrations/tests/extensions/18-pgsodium.sql new file mode 100644 index 0000000..1c2ff98 --- /dev/null +++ b/migrations/tests/extensions/18-pgsodium.sql @@ -0,0 +1,4 @@ +BEGIN; +create schema if not exists "pgsodium"; +create extension if not exists pgsodium with schema "pgsodium"; +ROLLBACK; diff --git a/migrations/tests/extensions/19-pg_graphql.sql b/migrations/tests/extensions/19-pg_graphql.sql new file mode 100644 index 0000000..f55e940 --- /dev/null +++ b/migrations/tests/extensions/19-pg_graphql.sql @@ -0,0 +1,4 @@ +BEGIN; +create schema if not exists "graphql"; +create extension if not exists pg_graphql with schema "graphql"; +ROLLBACK; diff --git a/migrations/tests/extensions/20-pg_stat_monitor.sql b/migrations/tests/extensions/20-pg_stat_monitor.sql new file mode 100644 index 0000000..f4075a2 --- /dev/null +++ b/migrations/tests/extensions/20-pg_stat_monitor.sql @@ -0,0 +1,3 @@ +BEGIN; +create extension if not exists pg_stat_monitor with schema "extensions"; +ROLLBACK; diff --git a/migrations/tests/extensions/21-auto_explain.sql b/migrations/tests/extensions/21-auto_explain.sql new file mode 100644 index 0000000..e69de29 diff --git a/migrations/tests/extensions/22-pg_jsonschema.sql b/migrations/tests/extensions/22-pg_jsonschema.sql new file mode 100644 index 0000000..d357b61 --- /dev/null +++ b/migrations/tests/extensions/22-pg_jsonschema.sql @@ -0,0 +1,3 @@ +BEGIN; +create extension if not exists pg_jsonschema with schema "extensions"; +ROLLBACK; diff --git a/migrations/tests/extensions/23-vault.sql b/migrations/tests/extensions/23-vault.sql new file mode 100644 index 0000000..e9d2dfe --- /dev/null +++ b/migrations/tests/extensions/23-vault.sql @@ -0,0 +1,4 @@ +BEGIN; +create schema if not exists "vault"; +create extension if not exists tealbase_vault with schema "vault" cascade; +ROLLBACK; diff --git a/migrations/tests/extensions/24-pgroonga.sql b/migrations/tests/extensions/24-pgroonga.sql new file mode 100644 index 0000000..bf3fda7 --- /dev/null +++ b/migrations/tests/extensions/24-pgroonga.sql @@ -0,0 +1,3 @@ +BEGIN; +create extension if not exists pgroonga with schema "extensions"; +ROLLBACK; diff --git a/migrations/tests/extensions/25-wrappers.sql b/migrations/tests/extensions/25-wrappers.sql new file mode 100644 index 0000000..4f7f7ac --- /dev/null +++ b/migrations/tests/extensions/25-wrappers.sql @@ -0,0 +1,3 @@ +BEGIN; +create extension if not exists wrappers with schema "extensions"; +ROLLBACK; diff --git a/migrations/tests/extensions/26-hypopg.sql b/migrations/tests/extensions/26-hypopg.sql new file mode 100644 index 0000000..e6e0706 --- /dev/null +++ b/migrations/tests/extensions/26-hypopg.sql @@ -0,0 +1,3 @@ +BEGIN; +create extension if not exists hypopg with schema "extensions"; +ROLLBACK; diff --git a/migrations/tests/extensions/27-pg_repack.sql b/migrations/tests/extensions/27-pg_repack.sql new file mode 100644 index 0000000..200cf78 --- /dev/null +++ b/migrations/tests/extensions/27-pg_repack.sql @@ -0,0 +1,3 @@ +BEGIN; +create extension if not exists pg_repack with schema "extensions"; +ROLLBACK; diff --git a/migrations/tests/extensions/28-pgvector.sql b/migrations/tests/extensions/28-pgvector.sql new file mode 100644 index 0000000..437bdae --- /dev/null +++ b/migrations/tests/extensions/28-pgvector.sql @@ -0,0 +1,3 @@ +BEGIN; +create extension if not exists vector with schema "extensions"; +ROLLBACK; diff --git a/migrations/tests/extensions/29-pg_tle.sql b/migrations/tests/extensions/29-pg_tle.sql new file mode 100644 index 0000000..29a8fdc --- /dev/null +++ b/migrations/tests/extensions/29-pg_tle.sql @@ -0,0 +1,4 @@ +BEGIN; +create schema if not exists "pgtle"; +create extension if not exists pg_tle with schema "pgtle"; +ROLLBACK; diff --git a/migrations/tests/extensions/test.sql b/migrations/tests/extensions/test.sql new file mode 100644 index 0000000..7e0d1f3 --- /dev/null +++ b/migrations/tests/extensions/test.sql @@ -0,0 +1,30 @@ + +\ir 01-postgis.sql +\ir 02-pgrouting.sql +\ir 03-pgtap.sql +\ir 04-pg_cron.sql +\ir 05-pgaudit.sql +\ir 06-pgjwt.sql +\ir 07-pgsql-http.sql +\ir 08-plpgsql_check.sql +\ir 09-pg-safeupdate.sql +\ir 10-timescaledb.sql +\ir 11-wal2json.sql +-- \ir 12-pljava.sql +\ir 13-plv8.sql +\ir 14-pg_plan_filter.sql +\ir 15-pg_net.sql +\ir 16-rum.sql +\ir 17-pg_hashids.sql +\ir 18-pgsodium.sql +\ir 19-pg_graphql.sql +\ir 20-pg_stat_monitor.sql +\ir 21-auto_explain.sql +\ir 22-pg_jsonschema.sql +\ir 23-vault.sql +\ir 24-pgroonga.sql +\ir 25-wrappers.sql +\ir 26-hypopg.sql +\ir 27-pg_repack.sql +\ir 28-pgvector.sql +\ir 29-pg_tle.sql diff --git a/migrations/tests/fixtures.sql b/migrations/tests/fixtures.sql new file mode 100644 index 0000000..3699e1a --- /dev/null +++ b/migrations/tests/fixtures.sql @@ -0,0 +1,67 @@ +CREATE ROLE test_user_role; + +CREATE ROLE test_admin_role; + +GRANT authenticated TO test_user_role; + +GRANT postgres TO test_admin_role; + +INSERT INTO auth.users (id, "role", email) + VALUES (gen_random_uuid (), 'test_user_role', 'bob@tealbase.com') +RETURNING + * \gset bob_ + +INSERT INTO auth.users (id, "role", email) + VALUES (gen_random_uuid (), 'test_user_role', 'alice@tealbase.com') +RETURNING + * \gset alice_ + +INSERT INTO auth.users (id, "role", email) + VALUES (gen_random_uuid (), 'test_admin_role', 'admin@tealbase.com') +RETURNING + * \gset admin_ + +CREATE OR REPLACE FUNCTION test_logout () + RETURNS void + LANGUAGE plpgsql + AS $$ +BEGIN + PERFORM + set_config('request.jwt.claim.sub', NULL, TRUE); + PERFORM + set_config('request.jwt.claim.role', NULL, TRUE); + PERFORM + set_config('request.jwt.claim.email', NULL, TRUE); + RESET ROLE; +END; +$$; + +CREATE OR REPLACE FUNCTION test_login (user_email text, logout_first boolean = TRUE) + RETURNS auth.users + LANGUAGE plpgsql + AS $$ +DECLARE + auth_user auth.users; +BEGIN + IF logout_first THEN + PERFORM + test_logout (); + END IF; + SELECT + * INTO auth_user + FROM + auth.users + WHERE + email = user_email; + PERFORM + set_config('request.jwt.claim.sub', (auth_user).id::text, TRUE); + PERFORM + set_config('request.jwt.claim.role', (auth_user).ROLE, TRUE); + PERFORM + set_config('request.jwt.claim.email', (auth_user).email, TRUE); + RAISE NOTICE '%', format( 'SET ROLE %I; -- Logging in as %L (%L)', (auth_user).ROLE, (auth_user).id, (auth_user).email); + EXECUTE format('SET ROLE %I', (auth_user).ROLE); + RETURN auth_user; +END; +$$; + diff --git a/migrations/tests/storage/exists.sql b/migrations/tests/storage/exists.sql new file mode 100644 index 0000000..fae3d10 --- /dev/null +++ b/migrations/tests/storage/exists.sql @@ -0,0 +1,13 @@ + +-- Sanity test object existence in storage schema + +select has_table('storage'::name, 'buckets'::name); +select has_table('storage'::name, 'objects'::name); +select has_table('storage'::name, 'migrations'::name); +select has_function('storage'::name, 'foldername'::name); +select has_function('storage'::name, 'filename'::name); +select has_function('storage'::name, 'extension'::name); +select has_function('storage'::name, 'search'::name); + +select todo('This test should probably fail.'); select schema_privs_are('storage', 'anon', ARRAY['USAGE']); + diff --git a/migrations/tests/storage/privs.sql b/migrations/tests/storage/privs.sql new file mode 100644 index 0000000..f8d9f23 --- /dev/null +++ b/migrations/tests/storage/privs.sql @@ -0,0 +1 @@ +select is_member_of('authenticator', 'tealbase_storage_admin'); diff --git a/migrations/tests/storage/test.sql b/migrations/tests/storage/test.sql new file mode 100644 index 0000000..465c38a --- /dev/null +++ b/migrations/tests/storage/test.sql @@ -0,0 +1,3 @@ + +\ir exists.sql +\ir privs.sql diff --git a/migrations/tests/test.sql b/migrations/tests/test.sql new file mode 100644 index 0000000..9682b4a --- /dev/null +++ b/migrations/tests/test.sql @@ -0,0 +1,26 @@ +-- Check and create OrioleDB if available +DO $$ +BEGIN + IF EXISTS (SELECT 1 FROM pg_available_extensions WHERE name = 'orioledb') THEN + IF NOT EXISTS (SELECT 1 FROM pg_extension WHERE extname = 'orioledb') THEN + CREATE EXTENSION orioledb; + END IF; + END IF; +END $$; + +-- Create all extensions +\ir extensions/test.sql + +BEGIN; + +CREATE EXTENSION IF NOT EXISTS pgtap; + +SELECT no_plan(); + +\ir fixtures.sql +\ir database/test.sql +\ir storage/test.sql + +SELECT * FROM finish(); + +ROLLBACK; diff --git a/postgresql.gpg.key b/postgresql.gpg.key new file mode 100644 index 0000000..443bf58 --- /dev/null +++ b/postgresql.gpg.key @@ -0,0 +1,64 @@ +-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQINBE6XR8IBEACVdDKT2HEH1IyHzXkb4nIWAY7echjRxo7MTcj4vbXAyBKOfjja +UrBEJWHN6fjKJXOYWXHLIYg0hOGeW9qcSiaa1/rYIbOzjfGfhE4x0Y+NJHS1db0V +G6GUj3qXaeyqIJGS2z7m0Thy4Lgr/LpZlZ78Nf1fliSzBlMo1sV7PpP/7zUO+aA4 +bKa8Rio3weMXQOZgclzgeSdqtwKnyKTQdXY5MkH1QXyFIk1nTfWwyqpJjHlgtwMi +c2cxjqG5nnV9rIYlTTjYG6RBglq0SmzF/raBnF4Lwjxq4qRqvRllBXdFu5+2pMfC +IZ10HPRdqDCTN60DUix+BTzBUT30NzaLhZbOMT5RvQtvTVgWpeIn20i2NrPWNCUh +hj490dKDLpK/v+A5/i8zPvN4c6MkDHi1FZfaoz3863dylUBR3Ip26oM0hHXf4/2U +A/oA4pCl2W0hc4aNtozjKHkVjRx5Q8/hVYu+39csFWxo6YSB/KgIEw+0W8DiTII3 +RQj/OlD68ZDmGLyQPiJvaEtY9fDrcSpI0Esm0i4sjkNbuuh0Cvwwwqo5EF1zfkVj +Tqz2REYQGMJGc5LUbIpk5sMHo1HWV038TWxlDRwtOdzw08zQA6BeWe9FOokRPeR2 +AqhyaJJwOZJodKZ76S+LDwFkTLzEKnYPCzkoRwLrEdNt1M7wQBThnC5z6wARAQAB +tBxQb3N0Z3JlU1FMIERlYmlhbiBSZXBvc2l0b3J5iQJOBBMBCAA4AhsDBQsJCAcD +BRUKCQgLBRYCAwEAAh4BAheAFiEEuXsK/KoaR/BE8kSgf8x9RqzMTPgFAlhtCD8A +CgkQf8x9RqzMTPgECxAAk8uL+dwveTv6eH21tIHcltt8U3Ofajdo+D/ayO53LiYO +xi27kdHD0zvFMUWXLGxQtWyeqqDRvDagfWglHucIcaLxoxNwL8+e+9hVFIEskQAY +kVToBCKMXTQDLarz8/J030Pmcv3ihbwB+jhnykMuyyNmht4kq0CNgnlcMCdVz0d3 +z/09puryIHJrD+A8y3TD4RM74snQuwc9u5bsckvRtRJKbP3GX5JaFZAqUyZNRJRJ +Tn2OQRBhCpxhlZ2afkAPFIq2aVnEt/Ie6tmeRCzsW3lOxEH2K7MQSfSu/kRz7ELf +Cz3NJHj7rMzC+76Rhsas60t9CjmvMuGONEpctijDWONLCuch3Pdj6XpC+MVxpgBy +2VUdkunb48YhXNW0jgFGM/BFRj+dMQOUbY8PjJjsmVV0joDruWATQG/M4C7O8iU0 +B7o6yVv4m8LDEN9CiR6r7H17m4xZseT3f+0QpMe7iQjz6XxTUFRQxXqzmNnloA1T +7VjwPqIIzkj/u0V8nICG/ktLzp1OsCFatWXh7LbU+hwYl6gsFH/mFDqVxJ3+DKQi +vyf1NatzEwl62foVjGUSpvh3ymtmtUQ4JUkNDsXiRBWczaiGSuzD9Qi0ONdkAX3b +ewqmN4TfE+XIpCPxxHXwGq9Rv1IFjOdCX0iG436GHyTLC1tTUIKF5xV4Y0+cXIOJ +Aj0EEwEIACcCGwMFCwkIBwMFFQoJCAsFFgIDAQACHgECF4AFAlLpFRkFCQ6EJy0A +CgkQf8x9RqzMTPjOZA//Zp0e25pcvle7cLc0YuFr9pBv2JIkLzPm83nkcwKmxaWa +yUIG4Sv6pH6hm8+S/CHQij/yFCX+o3ngMw2J9HBUvafZ4bnbI0RGJ70GsAwraQ0V +lkIfg7GUw3TzvoGYO42rZTru9S0K/6nFP6D1HUu+U+AsJONLeb6oypQgInfXQExP +ZyliUnHdipei4WR1YFW6sjSkZT/5C3J1wkAvPl5lvOVthI9Zs6bZlJLZwusKxU0U +M4Btgu1Sf3nnJcHmzisixwS9PMHE+AgPWIGSec/N27a0KmTTvImV6K6nEjXJey0K +2+EYJuIBsYUNorOGBwDFIhfRk9qGlpgt0KRyguV+AP5qvgry95IrYtrOuE7307Si +dEbSnvO5ezNemE7gT9Z1tM7IMPfmoKph4BfpNoH7aXiQh1Wo+ChdP92hZUtQrY2N +m13cmkxYjQ4ZgMWfYMC+DA/GooSgZM5i6hYqyyfAuUD9kwRN6BqTbuAUAp+hCWYe +N4D88sLYpFh3paDYNKJ+Gf7Yyi6gThcV956RUFDH3ys5Dk0vDL9NiWwdebWfRFbz +oRM3dyGP889aOyLzS3mh6nHzZrNGhW73kslSQek8tjKrB+56hXOnb4HaElTZGDvD +5wmrrhN94kbyGtz3cydIohvNO9d90+29h0eGEDYti7j7maHkBKUAwlcPvMg5m3aJ +Aj0EEwEIACcCGwMFCwkIBwMFFQoJCAsFFgIDAQACHgECF4AFAlEqbZUFCQg2wEEA +CgkQf8x9RqzMTPhFMQ//WxAfKMdpSIA9oIC/yPD/dJpY/+DyouOljpE6MucMy/Ar +BECjFTBwi/j9NYM4ynAk34IkhuNexc1i9/05f5RM6+riLCLgAOsADDbHD4miZzoS +xiVr6GQ3YXMbOGld9kV9Sy6mGNjcUov7iFcf5Hy5w3AjPfKuR9zXswyfzIU1YXOb +iiZT38l55pp/BSgvGVQsvbNjsff5CbEKXS7q3xW+WzN0QWF6YsfNVhFjRGj8hKtH +vwKcA02wwjLeLXVTm6915ZUKhZXUFc0vM4Pj4EgNswH8Ojw9AJaKWJIZmLyW+aP+ +wpu6YwVCicxBY59CzBO2pPJDfKFQzUtrErk9irXeuCCLesDyirxJhv8o0JAvmnMA +KOLhNFUrSQ2m+3EnF7zhfz70gHW+EG8X8mL/EN3/dUM09j6TVrjtw43RLxBzwMDe +ariFF9yC+5bLtnGgxjsB9Ik6GV5v34/NEEGf1qBiAzFmDVFRZlrNDkq6gmpvGnA5 +hUWNr+y0i01LjGyaLSWHYjgw2UEQOqcUtTFK9MNzbZze4mVaHMEz9/aMfX25R6qb +iNqCChveIm8mYr5Ds2zdZx+G5bAKdzX7nx2IUAxFQJEE94VLSp3npAaTWv3sHr7d +R8tSyUJ9poDwgw4W9BIcnAM7zvFYbLF5FNggg/26njHCCN70sHt8zGxKQINMc6SJ +Aj0EEwEIACcCGwMFCwkIBwMFFQoJCAsFFgIDAQACHgECF4AFAlB5KywFCQPDFt8A +CgkQf8x9RqzMTPhuCQ//QAjRSAOCQ02qmUAikT+mTB6baOAakkYq6uHbEO7qPZkv +4E/M+HPIJ4wdnBNeSQjfvdNcZBA/x0hr5EMcBneKKPDj4hJ0panOIRQmNSTThQw9 +OU351gm3YQctAMPRUu1fTJAL/AuZUQf9ESmhyVtWNlH/56HBfYjE4iVeaRkkNLJy +X3vkWdJSMwC/LO3Lw/0M3R8itDsm74F8w4xOdSQ52nSRFRh7PunFtREl+QzQ3EA/ +WB4AIj3VohIGkWDfPFCzV3cyZQiEnjAe9gG5pHsXHUWQsDFZ12t784JgkGyO5wT2 +6pzTiuApWM3k/9V+o3HJSgH5hn7wuTi3TelEFwP1fNzI5iUUtZdtxbFOfWMnZAyp +EhaLmXNkg4zDkH44r0ss9fR0DAgUav1a25UnbOn4PgIEQy2fgHKHwRpCy20d6oCS +lmgyWsR40EPPYvtGq49A2aK6ibXmdvvFT+Ts8Z+q2SkFpoYFX20mR2nsF0fbt1lf +H65P64dukxeRGteWIeNakDD40bAAOH8+OaoTGVBJ2ACJfLVNM53PEoftavAwUYMr +R910qvwYfd/46rh46g1Frr9SFMKYE9uvIJIgDsQB3QBp71houU4H55M5GD8XURYs ++bfiQpJG1p7eB8e5jZx1SagNWc4XwL2FzQ9svrkbg1Y+359buUiP7T6QXX2zY+8= +=XSRU +-----END PGP PUBLIC KEY BLOCK----- diff --git a/qemu-arm64-nix.pkr.hcl b/qemu-arm64-nix.pkr.hcl new file mode 100644 index 0000000..d36eeaa --- /dev/null +++ b/qemu-arm64-nix.pkr.hcl @@ -0,0 +1,137 @@ +variable "ansible_arguments" { + type = string + default = "--skip-tags install-postgrest,install-pgbouncer,install-tealbase-internal" +} + +variable "environment" { + type = string + default = "prod" +} + +variable "git_sha" { + type = string +} + +locals { + creator = "packer" +} + +variable "postgres-version" { + type = string + default = "" +} + +variable "postgres-major-version" { + type = string + default = "" +} + +variable "git-head-version" { + type = string + default = "unknown" +} + +variable "packer-execution-id" { + type = string + default = "unknown" +} + +packer { + required_plugins { + amazon = { + source = "github.com/hashicorp/amazon" + version = "~> 1" + } + qemu = { + version = "~> 1.0" + source = "github.com/hashicorp/qemu" + } + } +} + +source "null" "dependencies" { + communicator = "none" +} + +build { + name = "cloudimg.deps" + sources = ["source.null.dependencies"] + + provisioner "shell-local" { + inline = [ + "cp /usr/share/AAVMF/AAVMF_VARS.fd AAVMF_VARS.fd", + "cloud-localds seeds-cloudimg.iso user-data-cloudimg meta-data" + ] + inline_shebang = "/bin/bash -e" + } +} + +source "qemu" "cloudimg" { + boot_wait = "2s" + cpus = 8 + disk_image = true + disk_size = "15G" + format = "qcow2" + headless = true + http_directory = "http" + iso_checksum = "file:https://cloud-images.ubuntu.com/noble/current/SHA256SUMS" + iso_url = "https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-arm64.img" + memory = 40000 + qemu_binary = "qemu-system-aarch64" + qemuargs = [ + ["-machine", "virt,gic-version=3"], + ["-cpu", "host"], + ["-device", "virtio-gpu-pci"], + ["-drive", "if=pflash,format=raw,id=ovmf_code,readonly=on,file=/usr/share/AAVMF/AAVMF_CODE.fd"], + ["-drive", "if=pflash,format=raw,id=ovmf_vars,file=AAVMF_VARS.fd"], + ["-drive", "file=output-cloudimg/packer-cloudimg,format=qcow2"], + ["-drive", "file=seeds-cloudimg.iso,format=raw"], + ["--enable-kvm"] + ] + shutdown_command = "sudo -S shutdown -P now" + ssh_handshake_attempts = 500 + ssh_password = "ubuntu" + ssh_timeout = "1h" + ssh_username = "ubuntu" + ssh_wait_timeout = "1h" + use_backing_file = false + accelerator = "kvm" +} + +build { + name = "cloudimg.image" + sources = ["source.qemu.cloudimg"] + + # Copy ansible playbook + provisioner "shell" { + inline = ["mkdir /tmp/ansible-playbook"] + } + + provisioner "file" { + source = "ansible" + destination = "/tmp/ansible-playbook" + } + + provisioner "file" { + source = "scripts" + destination = "/tmp/ansible-playbook" + } + + provisioner "file" { + source = "migrations" + destination = "/tmp" + } + + provisioner "shell" { + environment_vars = [ + "POSTGRES_MAJOR_VERSION=${var.postgres-major-version}", + "POSTGRES_tealbase_VERSION=${var.postgres-version}", + "GIT_SHA=${var.git_sha}" + ] + use_env_var_file = true + script = "ebssurrogate/scripts/qemu-bootstrap-nix.sh" + execute_command = "sudo -S sh -c '. {{.EnvVarFile}} && cd /tmp/ansible-playbook && {{.Path}}'" + start_retry_timeout = "5m" + skip_clean = true + } +} diff --git a/qemu_artifact.md b/qemu_artifact.md new file mode 100644 index 0000000..a93dae5 --- /dev/null +++ b/qemu_artifact.md @@ -0,0 +1,52 @@ +# QEMU artifact + +We build a container image that contains a QEMU qcow2 disk image. Container images are a convenient mechanism to ship the disk image to the nodes where they're needed. + +Given the size of the image, the first VM using it on a node might take a while to come up, while the image is being pulled down. The image can be pre-fetched to avoid this; we might also switch to other deployment mechanisms in the future. + +### Build process + +The current AMI process involves a few steps: + +1. nix package is build and published using GHA (`.github/workflows/nix-build.yml`) + - this builds Postgres along with the PG extensions we use. +2. "stage1" build (`amazon-arm64-nix.pkr.hcl`, invoked via `.github/workflows/ami-release-nix.yml`) + - uses an upstream Ubuntu image to initialize the AMI + - installs and configures the majority of the software that gets shipped as part of the AMI (e.g. gotrue, postgrest, ...) +3. "stage2" build (`stage2-nix-psql.pkr.hcl`, invoked via `.github/workflows/ami-release-nix.yml`) + - uses the image published from (2) + - installs and configures the software that is build and published using nix in (1) + - cleans up build dependencies etc + +The QEMU artifact process collapses (2) and (3): + +a. nix package is build and published using GHA (`.github/workflows/nix-build.yml`) +b. packer build (`qemu-arm64-nix.pkr.hcl`) + - uses an upstream Ubuntu live image as the base + - performs the work that was performed as part of the "stage1" and "stage2" builds + - this work is executed using `ebssurrogate/scripts/qemu-bootstrap-nix.sh` + +While the AMI build uses the EBS Surrogate Packer builder to create a minimal boot environment that it then adds things to, the QEMU build merely adds things to the Ubuntu Cloud Image. As such, it's likely possible to make something more minimal with a bit more work, but this was deemed unnecessary for now. Collapsing Stage1 and Stage2 was done in the interest of iteration speed, as executing them together is much faster than saving an artifact off stage1, booting another VM off it, and then executing stage2. + +## Publish image for later use + +Following `make init alpine-image`, the generated VM image should be bundled as a container image with the name: `tealbase-postgres-test` . Publish the built docker image to a registry of your choosing, and use the published image with e.g. KubeVirt. + +## Iterating on image + +For faster iteration, it's more convenient to build the image on an ubuntu bare-metal node that's part of the EKS cluster you're using. Build the image in the `k8s.io` namespace in order for it to be available for immediate use on that node. + +### Dependencies note + +Installing `docker.io` on an EKS node might interfere with the k8s setup of the node. You can instead install `nerdctl` and `buildkit`: + +```bash +curl -L -O https://github.com/containerd/nerdctl/releases/download/v2.0.0/nerdctl-2.0.0-linux-arm64.tar.gz +tar -xzf nerdctl-2.0.0-linux-arm64.tar.gz +mv ./nerdctl /usr/local/bin/ +curl -O -L https://github.com/moby/buildkit/releases/download/v0.17.1/buildkit-v0.17.1.linux-arm64.tar.gz +tar -xzf buildkit-v0.17.1.linux-arm64.tar.gz +mv bin/* /usr/local/bin/ +``` + +You'll need to run buildkit: `buildkitd` diff --git a/rfcs/0001-connection-pooling.md b/rfcs/0001-connection-pooling.md new file mode 100644 index 0000000..f787711 --- /dev/null +++ b/rfcs/0001-connection-pooling.md @@ -0,0 +1,71 @@ +--- +feature: Connection Pooling +start-date: 2021-02-04 +author: kiwicopple +co-authors: steve-chavez, dragarcia +related-issues: (will contain links to implementation PRs) +--- + +# Summary +[summary]: #summary + +We would like to explore connection pooling on tealbase. This RFC is intended to decide: + +- Whether we should provide a pooler +- Which connection pooler we should use +- Where in the stack it would be installed - i.e. if should bundle it with the Postgres build + + +# Motivation +[motivation]: #motivation + +In Postgres, every connection is a process. Because of this, a lot of connections to the database can be very expensive on memory. + +When connecting to Postgres database from serverless functions, there is no connection pooling, and so the server needs to maintain hundreds/thousands of connections. + + +# Detailed design +[design]: #detailed-design + +This is still in the "Gather Feedback" stage. To start the discussion: + + +### 1. Decide on a PG Pooler + +- `pg_bouncer` - https://www.pgbouncer.org/ +- `PG Pool II` - https://www.pgpool.net/mediawiki/index.php/Main_Page +- `odyssey` - https://github.com/yandex/odyssey +- others? + +### 2. Decide on configuration + +Most poolers allow different configurations. We would need to decide on how we would configure the pooler by default + +### 3. Decide if the user should be able re-configure the pooler + +Should a user be able to change the configuration? If so, how would they do it? + + +# Drawbacks +[drawbacks]: #drawbacks + +- Security +- Not directly relevant to the "tealbase" stack, so it's additional non-core support + +# Alternatives +[alternatives]: #alternatives + +1. Since we already offer [PostgREST](https://github.com/postgrest/postgrest) and [postgres-meta](https://github.com/tealbase/pg-api), this isn't entirely necessary for the tealbase stack. Bundling this is only beneficial for connecting external tools. +2. We could hold back on this implementation until we move to a full Postgres Operator, which would include a pooler. It would be nice to have something for local development though. + + +# Unresolved questions +[unresolved]: #unresolved-questions + +- Add any unresolved questions here + + +# Future work +[future]: #future-work + +- Add any future work here \ No newline at end of file diff --git a/scripts/01-postgres_check.sh b/scripts/01-postgres_check.sh new file mode 100644 index 0000000..d131528 --- /dev/null +++ b/scripts/01-postgres_check.sh @@ -0,0 +1,72 @@ +#!/bin/bash +# +# Scripts in this directory are run during the build process. +# each script will be uploaded to /tmp on your build droplet, +# given execute permissions and run. The cleanup process will +# remove the scripts from your build system after they have run +# if you use the build_image task. +# +echo "Commencing Checks" + +function check_database_is_ready { + echo -e "\nChecking if database is ready and accepting connections:" + if [ "$(pg_isready)" = "/tmp:5432 - accepting connections" ]; then + echo "Database is ready" + else + echo "Error: Database is not ready. Exiting" + exit 1 + fi +} + +function check_postgres_owned_dir_exists { + DIR=$1 + USER="postgres" + + echo -e "\nChecking if $DIR exists and owned by postgres user:" + + if [ -d "$DIR" ]; then + echo "$DIR exists" + if [ $(stat -c '%U' $DIR) = "$USER" ]; then + echo "$DIR is owned by $USER" + else + echo "Error: $DIR is not owned by $USER" + exit 1 + fi + else + echo "Error: ${DIR} not found. Exiting." + exit 1 + fi +} + +function check_lse_enabled { + ARCH=$(uname -m) + if [ $ARCH = "aarch64" ]; then + echo -e "\nArchitecture is $ARCH. Checking for LSE:" + + LSE_COUNT=$(objdump -d /usr/lib/postgresql/bin/postgres | grep -i 'ldxr\|ldaxr\|stxr\|stlxr' | wc -l) + MOUTLINE_ATOMICS_COUNT=$(nm /usr/lib/postgresql/bin/postgres | grep __aarch64_have_lse_atomics | wc -l) + + # Checking for load and store exclusives + if [ $LSE_COUNT -gt 0 ]; then + echo "Postgres has LSE enabled" + else + echo "Error: Postgres failed to be compiled with LSE. Exiting" + exit 1 + fi + + # Checking if successfully compiled with -moutline-atomics + if [ $MOUTLINE_ATOMICS_COUNT -gt 0 ]; then + echo "Postgres has been compiled with -moutline-atomics" + else + echo "Error: Postgres failed to be compiled with -moutline-atomics. Exiting" + exit 1 + fi + else + echo "Architecture is $ARCH. Not checking for LSE." + fi +} + +check_database_is_ready +check_postgres_owned_dir_exists "/var/lib/postgresql" +check_postgres_owned_dir_exists "/etc/postgresql" +check_lse_enabled \ No newline at end of file diff --git a/scripts/01-test b/scripts/01-test deleted file mode 100644 index e5b3e05..0000000 --- a/scripts/01-test +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash -# -# Scripts in this directory are run during the build process. -# each script will be uploaded to /tmp on your build droplet, -# given execute permissions and run. The cleanup process will -# remove the scripts from your build system after they have run -# if you use the build_image task. -# -echo "Commencing Digital Ocean Checks" diff --git a/scripts/02-credentials_cleanup.sh b/scripts/02-credentials_cleanup.sh index d1b359a..a7b966f 100644 --- a/scripts/02-credentials_cleanup.sh +++ b/scripts/02-credentials_cleanup.sh @@ -1 +1 @@ -sudo rm /home/ubuntu/.ssh/authorized_keys \ No newline at end of file +sudo rm /home/ubuntu/.ssh/authorized_keys diff --git a/scripts/11-lemp.sh b/scripts/11-lemp.sh new file mode 100644 index 0000000..c340f5e --- /dev/null +++ b/scripts/11-lemp.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +# DigitalOcean Marketplace Image Validation Tool +# © 2021 DigitalOcean LLC. +# This code is licensed under Apache 2.0 license (see LICENSE.md for details) + +rm -rvf /etc/nginx/sites-enabled/default + +ln -s /etc/nginx/sites-available/digitalocean \ + /etc/nginx/sites-enabled/digitalocean + +rm -rf /var/www/html/index*debian.html + +chown -R www-data: /var/www \ No newline at end of file diff --git a/scripts/12-ufw-nginx.sh b/scripts/12-ufw-nginx.sh new file mode 100644 index 0000000..7c47366 --- /dev/null +++ b/scripts/12-ufw-nginx.sh @@ -0,0 +1,10 @@ +#!/bin/sh + +# DigitalOcean Marketplace Image Validation Tool +# © 2021 DigitalOcean LLC. +# This code is licensed under Apache 2.0 license (see LICENSE.md for details) + +ufw limit ssh +ufw allow 'Nginx Full' + +ufw --force enable \ No newline at end of file diff --git a/scripts/13-force-ssh-logout.sh b/scripts/13-force-ssh-logout.sh new file mode 100644 index 0000000..99e28c1 --- /dev/null +++ b/scripts/13-force-ssh-logout.sh @@ -0,0 +1,10 @@ +#!/bin/sh + +# DigitalOcean Marketplace Image Validation Tool +# © 2021 DigitalOcean LLC. +# This code is licensed under Apache 2.0 license (see LICENSE.md for details) + +cat >> /etc/ssh/sshd_config < /root/.bash_history +unset HISTFILE +find /var/log -mtime -1 -type f -exec truncate -s 0 {} \; +rm -rf /var/log/*.gz /var/log/*.[0-9] /var/log/*-???????? +rm -rf /var/lib/cloud/instances/* +rm -f /root/.ssh/authorized_keys /etc/ssh/*key* +touch /etc/ssh/revoked_keys +chmod 600 /etc/ssh/revoked_keys + +cat /dev/null > /var/log/lastlog +cat /dev/null > /var/log/wtmp diff --git a/scripts/90-cleanup.sh b/scripts/90-cleanup.sh index 98ab510..644e5f7 100644 --- a/scripts/90-cleanup.sh +++ b/scripts/90-cleanup.sh @@ -1,13 +1,54 @@ #!/bin/bash -apt-get -y update -apt-get -y upgrade +# DigitalOcean Marketplace Image Validation Tool +# © 2021 DigitalOcean LLC. +# This code is licensed under Apache 2.0 license (see LICENSE.md for details) + +set -o errexit + +# Ensure /tmp exists and has the proper permissions before +# checking for security updates +# https://github.com/digitalocean/marketplace-partners/issues/94 +if [[ ! -d /tmp ]]; then + mkdir /tmp +fi +chmod 1777 /tmp + +if [ -n "$(command -v yum)" ]; then + yum update -y + yum clean all +elif [ -n "$(command -v apt-get)" ]; then + # Cleanup more packages + apt-get -y remove --purge \ + automake \ + autoconf \ + autotools-dev \ + cmake-data \ + cpp-9 \ + cpp-10 \ + gcc-9 \ + gcc-10 \ + git \ + git-man \ + ansible \ + libicu-dev \ + libcgal-dev \ + libgcc-9-dev \ + ansible + + add-apt-repository --yes --remove ppa:ansible/ansible + + source /etc/os-release + + apt-get -y update + apt-get -y upgrade + apt-get -y autoremove + apt-get -y autoclean +fi rm -rf /tmp/* /var/tmp/* history -c cat /dev/null > /root/.bash_history unset HISTFILE -apt-get -y autoremove -apt-get -y autoclean find /var/log -mtime -1 -type f -exec truncate -s 0 {} \; rm -rf /var/log/*.gz /var/log/*.[0-9] /var/log/*-???????? rm -rf /var/lib/cloud/instances/* diff --git a/scripts/91-log_cleanup.sh b/scripts/91-log_cleanup.sh index 26f5fbc..24073af 100644 --- a/scripts/91-log_cleanup.sh +++ b/scripts/91-log_cleanup.sh @@ -3,3 +3,22 @@ # echo "Clearing all log files" rm -rf /var/log/* + +# creating system stats directory +mkdir /var/log/sysstat + +# https://github.com/fail2ban/fail2ban/issues/1593 +touch /var/log/auth.log + +touch /var/log/pgbouncer.log +chown pgbouncer:postgres /var/log/pgbouncer.log + +mkdir /var/log/postgresql +chown postgres:postgres /var/log/postgresql + +mkdir /var/log/wal-g +cd /var/log/wal-g +touch backup-push.log backup-fetch.log wal-push.log wal-fetch.log pitr.log +chown -R postgres:postgres /var/log/wal-g +chmod -R 0300 /var/log/wal-g + diff --git a/scripts/99-img_check.sh b/scripts/99-img_check.sh index 6daee68..ac958a5 100755 --- a/scripts/99-img_check.sh +++ b/scripts/99-img_check.sh @@ -1,10 +1,10 @@ #!/bin/bash -# + # DigitalOcean Marketplace Image Validation Tool -# © 2018 DigitalOcean LLC. -# This code is licensed under MIT license (see LICENSE.txt for details) -# -VERSION="v. 1.2" +# © 2021 DigitalOcean LLC. +# This code is licensed under Apache 2.0 license (see LICENSE.md for details) + +VERSION="v. 1.6" RUNDATE=$( date ) # Script should be run with SUDO @@ -110,18 +110,12 @@ function checkLogs { [[ -e $f ]] || break if [[ "${f}" = '/var/log/lfd.log' && "$( cat "${f}" | egrep -v '/var/log/messages has been reset| Watching /var/log/messages' | wc -c)" -gt 50 ]]; then if [ $f != $cp_ignore ]; then - echo -en "\e[93m[WARN]\e[0m un-cleared log file, ${f} found\n" - ((WARN++)) - if [[ $STATUS != 2 ]]; then - STATUS=1 - fi - fi - elif [[ "${f}" == '/var/log/cloud-init-output.log' ]]; then - if cat '/var/log/cloud-init-output.log' | grep -q SHA256; then - echo -en "\e[41m[FAIL]\e[0m log containing SHA256 value found in log file ${f}\n" - ((FAIL++)) - STATUS=1 + echo -en "\e[93m[WARN]\e[0m un-cleared log file, ${f} found\n" + ((WARN++)) + if [[ $STATUS != 2 ]]; then + STATUS=1 fi + fi elif [[ "${f}" != '/var/log/lfd.log' && "$( cat "${f}" | wc -c)" -gt 50 ]]; then if [ $f != $cp_ignore ]; then echo -en "\e[93m[WARN]\e[0m un-cleared log file, ${f} found\n" @@ -252,7 +246,7 @@ function checkUsers { echo -en "\e[32m[PASS]\e[0m User ${user} has no password set.\n" ((PASS++)) else - echo -en "\e[41m[FAIL]\e[0m User ${user} has a password set on their account.\n" + echo -en "\e[41m[FAIL]\e[0m User ${user} has a password set on their account. Only system users are allowed on the image.\n" ((FAIL++)) STATUS=2 fi @@ -385,7 +379,7 @@ function checkFirewall { # we will check some of the most common if cmdExists 'ufw'; then fw="ufw" - ufwa=$(ufw status | sed -e "s/^Status:\ //") + ufwa=$(ufw status |head -1| sed -e "s/^Status:\ //") if [[ $ufwa == "active" ]]; then FW_VER="\e[32m[PASS]\e[0m Firewall service (${fw}) is active\n" ((PASS++)) @@ -418,6 +412,14 @@ function checkFirewall { } function checkUpdates { if [[ $OS == "Ubuntu" ]] || [[ "$OS" =~ Debian.* ]]; then + # Ensure /tmp exists and has the proper permissions before + # checking for security updates + # https://github.com/digitalocean/marketplace-partners/issues/94 + if [[ ! -d /tmp ]]; then + mkdir /tmp + fi + chmod 1777 /tmp + echo -en "\nUpdating apt package database to check for security updates, this may take a minute...\n\n" apt-get -y update > /dev/null @@ -441,11 +443,11 @@ function checkUpdates { echo -en "\e[32m[PASS]\e[0m There are no pending security updates for this image.\n\n" fi elif [[ $OS == "CentOS Linux" ]]; then - echo -en "\nChecking for available updates with yum, this may take a minute...\n\n" + echo -en "\nChecking for available security updates, this may take a minute...\n\n" - update_count=$(yum list updates -q | grep -vc "Updated Packages") + update_count=$(yum check-update --security --quiet | wc -l) if [[ $update_count -gt 0 ]]; then - echo -en "\e[41m[FAIL]\e[0m There are ${update_count} updates available for this image that have not been installed.\n" + echo -en "\e[41m[FAIL]\e[0m There are ${update_count} security updates available for this image that have not been installed.\n" ((FAIL++)) STATUS=2 else @@ -567,7 +569,9 @@ osv=0 if [[ $OS == "Ubuntu" ]]; then ost=1 - if [[ $VER == "18.04" ]]; then + if [[ $VER == "24.04" ]]; then + osv=1 + elif [[ $VER == "18.04" ]]; then osv=1 elif [[ $VER == "16.04" ]]; then osv=1 @@ -591,7 +595,9 @@ elif [[ "$OS" =~ Debian.* ]]; then elif [[ $OS == "CentOS Linux" ]]; then ost=1 - if [[ $VER == "7" ]]; then + if [[ $VER == "8" ]]; then + osv=1 + elif [[ $VER == "7" ]]; then osv=1 elif [[ $VER == "6" ]]; then osv=1 @@ -669,8 +675,8 @@ if [[ $STATUS == 0 ]]; then exit 0 elif [[ $STATUS == 1 ]]; then echo -en "Please review all [WARN] items above and ensure they are intended or resolved. If you do not have a specific requirement, we recommend resolving these items before image submission\n\n" - exit 1 + exit 0 else - echo -en "Some critical tests failed. These items must be resolved and this scan re-run before you submit your image to the marketplace.\n\n" + echo -en "Some critical tests failed. These items must be resolved and this scan re-run before you submit your image to the DigitalOcean Marketplace.\n\n" exit 1 -fi +fi \ No newline at end of file diff --git a/scripts/nix-provision.sh b/scripts/nix-provision.sh new file mode 100644 index 0000000..6515eca --- /dev/null +++ b/scripts/nix-provision.sh @@ -0,0 +1,63 @@ +#!/usr/bin/env bash +# shellcheck shell=bash + +set -o errexit +set -o pipefail +set -o xtrace + +function install_packages { + # Setup Ansible on host VM + sudo apt-get update && sudo apt-get install -y software-properties-common + + # Manually add GPG key with explicit keyserver + sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 93C4A3FD7BB9C367 + + # Add repository and install + sudo add-apt-repository --yes ppa:ansible/ansible + sudo apt-get update + sudo apt-get install -y ansible + + ansible-galaxy collection install community.general +} + + + +function install_nix() { + sudo su -c "curl --proto '=https' --tlsv1.2 -sSf -L https://install.determinate.systems/nix | sh -s -- install --no-confirm \ + --extra-conf \"substituters = https://cache.nixos.org https://nix-postgres-artifacts.s3.amazonaws.com\" \ + --extra-conf \"trusted-public-keys = nix-postgres-artifacts:dGZlQOvKcNEjvT7QEAJbcV6b6uk7VF/hWMjhYleiaLI=% cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=\" " -s /bin/bash root + . /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh + +} + + +function execute_stage2_playbook { + echo "POSTGRES_MAJOR_VERSION: ${POSTGRES_MAJOR_VERSION}" + echo "GIT_SHA: ${GIT_SHA}" + sudo tee /etc/ansible/ansible.cfg < str: + return base64.b64encode(gzip.compress(s.encode())).decode() + + instance = list( + ec2.create_instances( + BlockDeviceMappings=[ + { + "DeviceName": "/dev/sda1", + "Ebs": { + "VolumeSize": 8, # gb + "Encrypted": True, + "DeleteOnTermination": True, + "VolumeType": "gp3", + }, + }, + ], + MetadataOptions={ + "HttpTokens": "required", + "HttpEndpoint": "enabled", + }, + IamInstanceProfile={"Name": "pg-ap-southeast-1"}, + InstanceType="t4g.micro", + MinCount=1, + MaxCount=1, + ImageId=image.id, + NetworkInterfaces=[ + { + "DeviceIndex": 0, + "AssociatePublicIpAddress": True, + "Groups": ["sg-0a883ca614ebfbae0", "sg-014d326be5a1627dc"], + } + ], + UserData=f"""#cloud-config +hostname: db-aaaaaaaaaaaaaaaaaaaa +write_files: + - {{path: /etc/postgresql.schema.sql, content: {gzip_then_base64_encode(postgresql_schema_sql_content)}, permissions: '0600', encoding: gz+b64}} + - {{path: /etc/realtime.env, content: {gzip_then_base64_encode(realtime_env_content)}, permissions: '0664', encoding: gz+b64}} + - {{path: /etc/adminapi/adminapi.yaml, content: {gzip_then_base64_encode(adminapi_yaml_content)}, permissions: '0600', owner: 'adminapi:root', encoding: gz+b64}} + - {{path: /etc/postgresql-custom/pgsodium_root.key, content: {gzip_then_base64_encode(pgsodium_root_key_content)}, permissions: '0600', owner: 'postgres:postgres', encoding: gz+b64}} + - {{path: /etc/postgrest/base.conf, content: {gzip_then_base64_encode(postgrest_base_conf_content)}, permissions: '0664', encoding: gz+b64}} + - {{path: /etc/gotrue.env, content: {gzip_then_base64_encode(gotrue_env_content)}, permissions: '0664', encoding: gz+b64}} + - {{path: /etc/wal-g/config.json, content: {gzip_then_base64_encode(walg_config_json_content)}, permissions: '0664', owner: 'wal-g:wal-g', encoding: gz+b64}} + - {{path: /tmp/init.json, content: {gzip_then_base64_encode(init_json_content)}, permissions: '0600', encoding: gz+b64}} +runcmd: + - 'sudo echo \"pgbouncer\" \"postgres\" >> /etc/pgbouncer/userlist.txt' + - 'cd /tmp && aws s3 cp --region ap-southeast-1 s3://init-scripts-staging/project/init.sh .' + - 'bash init.sh "staging"' + - 'touch /var/lib/init-complete' + - 'rm -rf /tmp/*' +""", + TagSpecifications=[ + { + "ResourceType": "instance", + "Tags": [ + {"Key": "Name", "Value": "ci-ami-test-nix"}, + {"Key": "creator", "Value": "testinfra-ci"}, + {"Key": "testinfra-run-id", "Value": RUN_ID}, + ], + } + ], + ) + )[0] + instance.wait_until_running() + + # Increase wait time before starting health checks + sleep(30) # Wait for 30 seconds to allow services to start + + ec2logger = EC2InstanceConnectLogger(debug=False) + temp_key = EC2InstanceConnectKey(ec2logger.get_logger()) + ec2ic = boto3.client("ec2-instance-connect", region_name="ap-southeast-1") + response = ec2ic.send_ssh_public_key( + InstanceId=instance.id, + InstanceOSUser="ubuntu", + SSHPublicKey=temp_key.get_pub_key(), + ) + assert response["Success"] + + # Wait for instance to have public IP + while not instance.public_ip_address: + logger.warning("waiting for ip to be available") + sleep(5) + instance.reload() + + # Create single SSH connection + ssh = get_ssh_connection( + instance.public_ip_address, + temp_key.get_priv_key_file(), + ) + + # Check PostgreSQL data directory + logger.info("Checking PostgreSQL data directory...") + result = run_ssh_command(ssh, "ls -la /var/lib/postgresql") + if result['succeeded']: + logger.info("PostgreSQL data directory contents:\n" + result['stdout']) + else: + logger.warning("Failed to list PostgreSQL data directory: " + result['stderr']) + + # Wait for init.sh to complete + logger.info("Waiting for init.sh to complete...") + max_attempts = 60 # 5 minutes + attempt = 0 + while attempt < max_attempts: + try: + result = run_ssh_command(ssh, "test -f /var/lib/init-complete") + if result['succeeded']: + logger.info("init.sh has completed") + break + except Exception as e: + logger.warning(f"Error checking init.sh status: {str(e)}") + + attempt += 1 + logger.warning(f"Waiting for init.sh to complete (attempt {attempt}/{max_attempts})") + sleep(5) + + if attempt >= max_attempts: + logger.error("init.sh failed to complete within the timeout period") + instance.terminate() + raise TimeoutError("init.sh failed to complete within the timeout period") + + def is_healthy(ssh) -> bool: + health_checks = [ + ("postgres", "sudo -u postgres /usr/bin/pg_isready -U postgres"), + ("adminapi", f"curl -sf -k --connect-timeout 30 --max-time 60 https://localhost:8085/health -H 'apikey: {tealbase_admin_key}'"), + ("postgrest", "curl -sf --connect-timeout 30 --max-time 60 http://localhost:3001/ready"), + ("gotrue", "curl -sf --connect-timeout 30 --max-time 60 http://localhost:8081/health"), + ("kong", "sudo kong health"), + ("fail2ban", "sudo fail2ban-client status"), + ] + + for service, command in health_checks: + try: + result = run_ssh_command(ssh, command) + if not result['succeeded']: + logger.warning(f"{service} not ready") + return False + except Exception: + logger.warning(f"Connection failed during {service} check") + return False + + return True + + while True: + if is_healthy(ssh): + break + sleep(1) + + # Return both the SSH connection and instance IP for use in tests + yield { + 'ssh': ssh, + 'ip': instance.public_ip_address + } + + # at the end of the test suite, destroy the instance + instance.terminate() + + +def test_postgrest_is_running(host): + """Check if postgrest service is running using our SSH connection.""" + result = run_ssh_command(host['ssh'], "systemctl is-active postgrest") + assert result['succeeded'] and result['stdout'].strip() == 'active', "PostgREST service is not running" + + +def test_postgrest_responds_to_requests(host): + """Test if PostgREST responds to requests.""" + res = requests.get( + f"http://{host['ip']}/rest/v1/", + headers={ + "apikey": anon_key, + "authorization": f"Bearer {anon_key}", + }, + ) + assert res.ok + + +def test_postgrest_can_connect_to_db(host): + """Test if PostgREST can connect to the database.""" + res = requests.get( + f"http://{host['ip']}/rest/v1/buckets", + headers={ + "apikey": service_role_key, + "authorization": f"Bearer {service_role_key}", + "accept-profile": "storage", + }, + ) + assert res.ok + + +def test_postgrest_starting_apikey_query_parameter_is_removed(host): + """Test if PostgREST removes apikey query parameter at start.""" + res = requests.get( + f"http://{host['ip']}/rest/v1/buckets", + headers={ + "accept-profile": "storage", + }, + params={ + "apikey": service_role_key, + "id": "eq.absent", + "name": "eq.absent", + }, + ) + assert res.ok + + +def test_postgrest_middle_apikey_query_parameter_is_removed(host): + """Test if PostgREST removes apikey query parameter in middle.""" + res = requests.get( + f"http://{host['ip']}/rest/v1/buckets", + headers={ + "accept-profile": "storage", + }, + params={ + "id": "eq.absent", + "apikey": service_role_key, + "name": "eq.absent", + }, + ) + assert res.ok + + +def test_postgrest_ending_apikey_query_parameter_is_removed(host): + """Test if PostgREST removes apikey query parameter at end.""" + res = requests.get( + f"http://{host['ip']}/rest/v1/buckets", + headers={ + "accept-profile": "storage", + }, + params={ + "id": "eq.absent", + "name": "eq.absent", + "apikey": service_role_key, + }, + ) + assert res.ok + + +def test_postgrest_starting_empty_key_query_parameter_is_removed(host): + """Test if PostgREST removes empty key query parameter at start.""" + res = requests.get( + f"http://{host['ip']}/rest/v1/buckets", + headers={ + "accept-profile": "storage", + }, + params={ + "": "empty_key", + "id": "eq.absent", + "apikey": service_role_key, + }, + ) + assert res.ok + + +def test_postgrest_middle_empty_key_query_parameter_is_removed(host): + """Test if PostgREST removes empty key query parameter in middle.""" + res = requests.get( + f"http://{host['ip']}/rest/v1/buckets", + headers={ + "accept-profile": "storage", + }, + params={ + "apikey": service_role_key, + "": "empty_key", + "id": "eq.absent", + }, + ) + assert res.ok + + +def test_postgrest_ending_empty_key_query_parameter_is_removed(host): + """Test if PostgREST removes empty key query parameter at end.""" + res = requests.get( + f"http://{host['ip']}/rest/v1/buckets", + headers={ + "accept-profile": "storage", + }, + params={ + "id": "eq.absent", + "apikey": service_role_key, + "": "empty_key", + }, + ) + assert res.ok + + +def test_postgresql_version(host): + """Print the PostgreSQL version being tested and ensure it's >= 14.""" + result = run_ssh_command(host['ssh'], "sudo -u postgres psql -c 'SELECT version();'") + if result['succeeded']: + print(f"\nPostgreSQL Version:\n{result['stdout']}") + # Extract version number from the output + version_line = result['stdout'].strip().split('\n')[2] # Skip header and get the actual version + # Extract major version number (e.g., "15.8" -> 15) + import re + version_match = re.search(r'PostgreSQL (\d+)\.', version_line) + if version_match: + major_version = int(version_match.group(1)) + print(f"PostgreSQL major version: {major_version}") + assert major_version >= 14, f"PostgreSQL version {major_version} is less than 14" + else: + assert False, "Could not parse PostgreSQL version number" + else: + print(f"\nFailed to get PostgreSQL version: {result['stderr']}") + assert False, "Failed to get PostgreSQL version" + + # Also get the version from the command line + result = run_ssh_command(host['ssh'], "sudo -u postgres psql --version") + if result['succeeded']: + print(f"PostgreSQL Client Version: {result['stdout'].strip()}") + else: + print(f"Failed to get PostgreSQL client version: {result['stderr']}") + + print("✓ PostgreSQL version is >= 14") + + +def test_libpq5_version(host): + """Print the libpq5 version installed and ensure it's >= 14.""" + # Try different package managers to find libpq5 + result = run_ssh_command(host['ssh'], "dpkg -l | grep libpq5 || true") + if result['succeeded'] and result['stdout'].strip(): + print(f"\nlibpq5 package info:\n{result['stdout']}") + # Extract version from dpkg output (format: ii libpq5:arm64 17.5-1.pgdg20.04+1) + import re + version_match = re.search(r'libpq5[^ ]* +(\d+)\.', result['stdout']) + if version_match: + major_version = int(version_match.group(1)) + print(f"libpq5 major version: {major_version}") + assert major_version >= 14, f"libpq5 version {major_version} is less than 14" + else: + print("Could not parse libpq5 version from dpkg output") + else: + print("\nlibpq5 not found via dpkg") + + # Also try to find libpq.so files + result = run_ssh_command(host['ssh'], "find /usr -name '*libpq*' -type f 2>/dev/null | head -10") + if result['succeeded'] and result['stdout'].strip(): + print(f"\nlibpq files found:\n{result['stdout']}") + else: + print("\nNo libpq files found") + + # Check if we can get version from a libpq file + result = run_ssh_command(host['ssh'], "ldd /usr/bin/psql | grep libpq || true") + if result['succeeded'] and result['stdout'].strip(): + print(f"\npsql libpq dependency:\n{result['stdout']}") + else: + print("\nCould not find libpq dependency for psql") + + # Try to get version from libpq directly + result = run_ssh_command(host['ssh'], "psql --version 2>&1 | head -1") + if result['succeeded'] and result['stdout'].strip(): + print(f"\npsql version output: {result['stdout'].strip()}") + # The psql version should match the libpq version + import re + version_match = re.search(r'psql \(PostgreSQL\) (\d+)\.', result['stdout']) + if version_match: + major_version = int(version_match.group(1)) + print(f"psql/libpq major version: {major_version}") + assert major_version >= 14, f"psql/libpq version {major_version} is less than 14" + else: + print("Could not parse psql version") + + print("✓ libpq5 version is >= 14") + + +def test_postgrest_read_only_session_attrs(host): + """Test PostgREST with target_session_attrs=read-only and check for session errors.""" + # First, check if PostgreSQL is configured for read-only mode + result = run_ssh_command(host['ssh'], "sudo -u postgres psql -c \"SHOW default_transaction_read_only;\"") + if result['succeeded']: + default_read_only = result['stdout'].strip() + print(f"PostgreSQL default_transaction_read_only: {default_read_only}") + else: + print("Could not check PostgreSQL read-only setting") + default_read_only = "unknown" + + # Check if PostgreSQL is in recovery mode (standby) + result = run_ssh_command(host['ssh'], "sudo -u postgres psql -c \"SELECT pg_is_in_recovery();\"") + if result['succeeded']: + in_recovery = result['stdout'].strip() + print(f"PostgreSQL pg_is_in_recovery: {in_recovery}") + else: + print("Could not check PostgreSQL recovery status") + in_recovery = "unknown" + + # Find PostgreSQL configuration file + result = run_ssh_command(host['ssh'], "sudo -u postgres psql -c \"SHOW config_file;\"") + if result['succeeded']: + config_file = result['stdout'].strip().split('\n')[2].strip() # Skip header and get the actual path + print(f"PostgreSQL config file: {config_file}") + else: + print("Could not find PostgreSQL config file") + config_file = "/etc/postgresql/15/main/postgresql.conf" # Default fallback + + # Backup PostgreSQL config + result = run_ssh_command(host['ssh'], f"sudo cp {config_file} {config_file}.backup") + assert result['succeeded'], "Failed to backup PostgreSQL config" + + # Add read-only setting to PostgreSQL config + result = run_ssh_command(host['ssh'], f"echo 'default_transaction_read_only = on' | sudo tee -a {config_file}") + assert result['succeeded'], "Failed to add read-only setting to PostgreSQL config" + + # Restart PostgreSQL to apply the new configuration + result = run_ssh_command(host['ssh'], "sudo systemctl restart postgresql") + assert result['succeeded'], "Failed to restart PostgreSQL" + + # Wait for PostgreSQL to start up + sleep(5) + + # Verify the change took effect + result = run_ssh_command(host['ssh'], "sudo -u postgres psql -c \"SHOW default_transaction_read_only;\"") + if result['succeeded']: + new_default_read_only = result['stdout'].strip() + print(f"PostgreSQL default_transaction_read_only after change: {new_default_read_only}") + else: + print("Could not verify PostgreSQL read-only setting change") + + # First, backup the current PostgREST config + result = run_ssh_command(host['ssh'], "sudo cp /etc/postgrest/base.conf /etc/postgrest/base.conf.backup") + assert result['succeeded'], "Failed to backup PostgREST config" + + try: + # Read the current config to get the db-uri + result = run_ssh_command(host['ssh'], "sudo cat /etc/postgrest/base.conf | grep '^db-uri'") + assert result['succeeded'], "Failed to read current db-uri" + + current_db_uri = result['stdout'].strip() + print(f"Current db-uri: {current_db_uri}") + + # Extract just the URI part (remove the db-uri = " prefix and trailing quote) + uri_start = current_db_uri.find('"') + 1 + uri_end = current_db_uri.rfind('"') + base_uri = current_db_uri[uri_start:uri_end] + + # Modify the URI to add target_session_attrs=read-only + if '?' in base_uri: + # URI already has parameters, add target_session_attrs + modified_uri = base_uri + "&target_session_attrs=read-only" + else: + # URI has no parameters, add target_session_attrs + modified_uri = base_uri + "?target_session_attrs=read-only" + + print(f"Modified URI: {modified_uri}") + + # Use awk to replace the db-uri line more reliably + result = run_ssh_command(host['ssh'], f"sudo awk '{{if ($1 == \"db-uri\") print \"db-uri = \\\"{modified_uri}\\\"\"; else print $0}}' /etc/postgrest/base.conf > /tmp/new_base.conf && sudo mv /tmp/new_base.conf /etc/postgrest/base.conf") + assert result['succeeded'], "Failed to update db-uri in config" + + # Verify the change was made correctly + result = run_ssh_command(host['ssh'], "sudo cat /etc/postgrest/base.conf | grep '^db-uri'") + print(f"Updated db-uri line: {result['stdout'].strip()}") + + # Also show the full config to debug + result = run_ssh_command(host['ssh'], "sudo cat /etc/postgrest/base.conf") + print(f"Full config after change:\n{result['stdout']}") + + # Restart PostgREST to apply the new configuration + result = run_ssh_command(host['ssh'], "sudo systemctl restart postgrest") + assert result['succeeded'], "Failed to restart PostgREST" + + # Wait a moment for PostgREST to start up + sleep(5) + + # Check if PostgREST is running + result = run_ssh_command(host['ssh'], "sudo systemctl is-active postgrest") + if not (result['succeeded'] and result['stdout'].strip() == 'active'): + # If PostgREST failed to start, check the logs to see why + log_result = run_ssh_command(host['ssh'], "sudo journalctl -u postgrest --since '5 seconds ago' --no-pager") + print(f"PostgREST failed to start. Recent logs:\n{log_result['stdout']}") + assert False, "PostgREST failed to start after config change" + + # Make a test request to trigger any potential session errors + try: + response = requests.get( + f"http://{host['ip']}/rest/v1/", + headers={"apikey": anon_key, "authorization": f"Bearer {anon_key}"}, + timeout=10 + ) + print(f"Test request status: {response.status_code}") + except Exception as e: + print(f"Test request failed: {str(e)}") + + # Check PostgREST logs for "session is not read-only" errors + result = run_ssh_command(host['ssh'], "sudo journalctl -u postgrest --since '5 seconds ago' | grep -i 'session is not read-only' || true") + + if result['stdout'].strip(): + print(f"\nFound 'session is not read-only' errors in PostgREST logs:\n{result['stdout']}") + assert False, "PostgREST logs contain 'session is not read-only' errors even though PostgreSQL is configured for read-only mode" + else: + print("\nNo 'session is not read-only' errors found in PostgREST logs") + + finally: + # Restore the original configuration + result = run_ssh_command(host['ssh'], "sudo cp /etc/postgrest/base.conf.backup /etc/postgrest/base.conf") + if result['succeeded']: + result = run_ssh_command(host['ssh'], "sudo systemctl restart postgrest") + if result['succeeded']: + print("Restored original PostgREST configuration") + else: + print("Warning: Failed to restart PostgREST after restoring config") + else: + print("Warning: Failed to restore original PostgREST configuration") + + # Restore PostgreSQL to original configuration + result = run_ssh_command(host['ssh'], f"sudo cp {config_file}.backup {config_file}") + if result['succeeded']: + result = run_ssh_command(host['ssh'], "sudo systemctl restart postgresql") + if result['succeeded']: + print("Restored PostgreSQL to original configuration") + else: + print("Warning: Failed to restart PostgreSQL after restoring config") + else: + print("Warning: Failed to restore PostgreSQL configuration") + diff --git a/tests/pg_upgrade/.env b/tests/pg_upgrade/.env new file mode 100644 index 0000000..505503f --- /dev/null +++ b/tests/pg_upgrade/.env @@ -0,0 +1,6 @@ +POSTGRES_PASSWORD=postgres +POSTGRES_HOST=/var/run/postgresql +POSTGRES_INITDB_ARGS=--lc-ctype=C.UTF-8 +PG_MAJOR_VERSION=15 +IS_CI=true +SCRIPT_DIR=/tmp/upgrade diff --git a/tests/pg_upgrade/.gitignore b/tests/pg_upgrade/.gitignore new file mode 100644 index 0000000..c8ff8c3 --- /dev/null +++ b/tests/pg_upgrade/.gitignore @@ -0,0 +1,4 @@ +# excluding these since running debug.sh will download the files locally +pg_upgrade_bin*.tar.gz +pg_upgrade_scripts*.tar.gz +pg_upgrade_scripts/ diff --git a/tests/pg_upgrade/debug.sh b/tests/pg_upgrade/debug.sh new file mode 100755 index 0000000..d8a47e7 --- /dev/null +++ b/tests/pg_upgrade/debug.sh @@ -0,0 +1,80 @@ +#!/bin/bash + +set -eEuo pipefail + +export PGPASSWORD=postgres +export PGUSER=tealbase_admin +export PGHOST=localhost +export PGDATABASE=postgres + +ARTIFACTS_BUCKET_NAME=${1:-} +if [ -z "$ARTIFACTS_BUCKET_NAME" ]; then + echo "Usage: $0 [INITIAL_PG_VERSION]" + exit 1 +fi + +INITIAL_PG_VERSION=${2:-15.1.1.60} +LATEST_PG_VERSION=$(sed -e 's/postgres-version = "\(.*\)"/\1/g' ../../common.vars.pkr.hcl) + +LATEST_VERSION_SCRIPTS="scripts/pg_upgrade_scripts-${LATEST_PG_VERSION}.tar.gz" +LATEST_VERSION_BIN="scripts/pg_upgrade_bin-${LATEST_PG_VERSION}.tar.gz" + +if [ ! -f "$LATEST_VERSION_SCRIPTS" ]; then + aws s3 cp "s3://${ARTIFACTS_BUCKET_NAME}/upgrades/postgres/tealbase-postgres-${LATEST_PG_VERSION}/pg_upgrade_scripts.tar.gz" "$LATEST_VERSION_SCRIPTS" +fi + +if [ ! -f "$LATEST_VERSION_BIN" ]; then + aws s3 cp "s3://${ARTIFACTS_BUCKET_NAME}/upgrades/postgres/tealbase-postgres-${LATEST_PG_VERSION}/24.04.tar.gz" "$LATEST_VERSION_BIN" +fi + +rm -rf scripts/pg_upgrade_scripts +cp "$LATEST_VERSION_SCRIPTS" scripts/pg_upgrade_scripts.tar.gz +cp "$LATEST_VERSION_BIN" scripts/pg_upgrade_bin.tar.gz + +docker rm -f pg_upgrade_test || true + +docker run -t --name pg_upgrade_test --env-file .env \ + -v "$(pwd)/scripts:/tmp/upgrade" \ + --entrypoint /tmp/upgrade/entrypoint.sh -d \ + -p 5432:5432 \ + "tealbase/postgres:${INITIAL_PG_VERSION}" + +sleep 3 +while ! docker exec -it pg_upgrade_test bash -c "pg_isready"; do + echo "Waiting for postgres to start..." + sleep 1 +done + +echo "Running migrations" +docker cp ../../migrations/db/migrations "pg_upgrade_test:/docker-entrypoint-initdb.d/" +docker exec -it pg_upgrade_test bash -c '/docker-entrypoint-initdb.d/migrate.sh > /tmp/migrate.log 2>&1; exit $?' +if [ $? -ne 0 ]; then + echo "Running migrations failed. Exiting." + exit 1 +fi + +echo "Running tests" +pg_prove "../../migrations/tests/test.sql" +psql -f "./tests/97-enable-extensions.sql" +psql -f "./tests/98-data-fixtures.sql" +psql -f "./tests/99-fixtures.sql" + +echo "Initiating pg_upgrade" +docker exec -it pg_upgrade_test bash -c '/tmp/upgrade/pg_upgrade_scripts/initiate.sh "$PG_MAJOR_VERSION"; exit $?' +if [ $? -ne 0 ]; then + echo "Initiating pg_upgrade failed. Exiting." + exit 1 +fi + +sleep 3 +echo "Completing pg_upgrade" +docker exec -it pg_upgrade_test bash -c 'rm -f /tmp/pg-upgrade-status; /tmp/upgrade/pg_upgrade_scripts/complete.sh; exit $?' +if [ $? -ne 0 ]; then + echo "Completing pg_upgrade failed. Exiting." + exit 1 +fi + +pg_prove tests/01-schema.sql +pg_prove tests/02-data.sql +pg_prove tests/03-settings.sql + diff --git a/tests/pg_upgrade/scripts/entrypoint.sh b/tests/pg_upgrade/scripts/entrypoint.sh new file mode 100755 index 0000000..d9d80ac --- /dev/null +++ b/tests/pg_upgrade/scripts/entrypoint.sh @@ -0,0 +1,25 @@ +#!/bin/bash + +set -e + +SCRIPT_DIR=$(dirname -- "$0";) + +ls -la "$SCRIPT_DIR" + +tar -xzf "${SCRIPT_DIR}/pg_upgrade_scripts.tar.gz" -C "${SCRIPT_DIR}" + +mkdir -p /tmp/persistent +cp "$SCRIPT_DIR/pg_upgrade_bin.tar.gz" /tmp/persistent + +export PATH="$(pg_config --bindir):$PATH" + +sed -i "s/|--version//g" /usr/local/bin/docker-entrypoint.sh +/usr/local/bin/docker-entrypoint.sh postgres --version || true + +su postgres -c "$(pg_config --bindir)/pg_ctl start -o '-c config_file=/etc/postgresql/postgresql.conf' -l /tmp/postgres.log" + +RECEIVED_EXIT_SIGNAL=false +trap 'RECEIVED_EXIT_SIGNAL=true' SIGINT SIGTERM SIGUSR1 +while ! ((RECEIVED_EXIT_SIGNAL)); do + sleep 5 +done diff --git a/tests/pg_upgrade/tests/01-schema.sql b/tests/pg_upgrade/tests/01-schema.sql new file mode 100644 index 0000000..3cf3a83 --- /dev/null +++ b/tests/pg_upgrade/tests/01-schema.sql @@ -0,0 +1,26 @@ +CREATE EXTENSION IF NOT EXISTS pgtap; + +BEGIN; +SELECT plan(15); + +select has_schema('public'); +select has_schema('auth'); +select has_schema('storage'); +select has_schema('realtime'); +select has_schema('pgsodium'); +select has_schema('vault'); +select has_schema('extensions'); + +SELECT has_enum('public', 'continents', 'Enum continents should exist'); + +SELECT has_table('public', 'countries', 'Table countries should exist'); +SELECT has_column('public', 'countries', 'id', 'Column id should exist'); +SELECT has_column('public', 'countries', 'name', 'Column name should exist'); +SELECT has_column('public', 'countries', 'iso2', 'Column iso2 should exist'); +SELECT has_column('public', 'countries', 'iso3', 'Column iso3 should exist'); +SELECT has_column('public', 'countries', 'continent', 'Column continent should exist'); + +SELECT has_materialized_view('public', 'european_countries', 'Materialized view european_countries should exist'); + +SELECT * FROM finish(); +ROLLBACK; diff --git a/tests/pg_upgrade/tests/02-data.sql b/tests/pg_upgrade/tests/02-data.sql new file mode 100644 index 0000000..d83e346 --- /dev/null +++ b/tests/pg_upgrade/tests/02-data.sql @@ -0,0 +1,27 @@ +CREATE EXTENSION IF NOT EXISTS pgtap; + +BEGIN; +SELECT plan(4); + +SELECT results_eq( + 'SELECT count(*)::int FROM public.countries', + ARRAY[ 249 ] +); + +SELECT results_eq( + 'SELECT count(*)::int FROM public.countries where continent = ''Europe''', + ARRAY[ 45 ] +); + +SELECT results_eq( + 'SELECT count(*)::int FROM public.european_countries', + ARRAY[ 45 ] +); + +SELECT results_eq( + 'SELECT count(*) FROM public.countries where continent = ''Europe''', + 'SELECT count(*) FROM public.european_countries' +); + +SELECT * FROM finish(); +ROLLBACK; diff --git a/tests/pg_upgrade/tests/03-settings.sql b/tests/pg_upgrade/tests/03-settings.sql new file mode 100644 index 0000000..32fc71a --- /dev/null +++ b/tests/pg_upgrade/tests/03-settings.sql @@ -0,0 +1,17 @@ +CREATE EXTENSION IF NOT EXISTS pgtap; + +BEGIN; +SELECT plan(2); + +SELECT results_eq( + 'SELECT setting FROM pg_settings where name = ''jit''', + ARRAY[ 'off' ] +); + +select results_eq( + 'SELECT setting FROM pg_settings WHERE name = ''password_encryption''', + ARRAY[ 'scram-sha-256' ] +); + +SELECT * FROM finish(); +ROLLBACK; diff --git a/tests/pg_upgrade/tests/97-enable-extensions.sql b/tests/pg_upgrade/tests/97-enable-extensions.sql new file mode 100644 index 0000000..34c730b --- /dev/null +++ b/tests/pg_upgrade/tests/97-enable-extensions.sql @@ -0,0 +1,10 @@ +do $$ +declare + ext record; +begin + for ext in (select * from pg_available_extensions where name not in (select extname from pg_extension) order by name) + loop + execute 'create extension if not exists ' || ext.name || ' cascade'; + end loop; +end; +$$; diff --git a/tests/pg_upgrade/tests/98-data-fixtures.sql b/tests/pg_upgrade/tests/98-data-fixtures.sql new file mode 100644 index 0000000..1a675e2 --- /dev/null +++ b/tests/pg_upgrade/tests/98-data-fixtures.sql @@ -0,0 +1,273 @@ +create type public.continents as enum ( + 'Africa', + 'Antarctica', + 'Asia', + 'Europe', + 'Oceania', + 'North America', + 'South America' +); +create table public.countries ( + id bigint generated by default as identity primary key, + name text, + iso2 text not null, + iso3 text, + local_name text, + continent continents +); +comment on table countries is 'Full list of countries.'; +comment on column countries.name is 'Full country name.'; +comment on column countries.iso2 is 'ISO 3166-1 alpha-2 code.'; +comment on column countries.iso3 is 'ISO 3166-1 alpha-3 code.'; +comment on column countries.local_name is 'Local variation of the name.'; +insert into public.countries (name,iso2,iso3,local_name,continent) values + ('Bonaire, Sint Eustatius and Saba','BQ','BES',null,null), + ('Curaçao','CW','CUW',null,null), + ('Guernsey','GG','GGY',null,null), + ('Isle of Man','IM','IMN',null,null), + ('Jersey','JE','JEY',null,null), + ('Åland Islands','AX','ALA',null,null), + ('Montenegro','ME','MNE',null,null), + ('Saint Barthélemy','BL','BLM',null,null), + ('Saint Martin (French part)','MF','MAF',null,null), + ('Serbia','RS','SRB',null,null), + ('Sint Maarten (Dutch part)','SX','SXM',null,null), + ('South Sudan','SS','SSD',null,null), + ('Timor-Leste','TL','TLS',null,null), + ('American Samoa','as','ASM','Amerika Samoa','Oceania'), + ('Andorra','AD','AND','Andorra','Europe'), + ('Angola','AO','AGO','Angola','Africa'), + ('Anguilla','AI','AIA','Anguilla','North America'), + ('Antarctica','AQ','ATA','','Antarctica'), + ('Antigua and Barbuda','AG','ATG','Antigua and Barbuda','North America'), + ('Argentina','AR','ARG','Argentina','South America'), + ('Armenia','AM','ARM','Hajastan','Asia'), + ('Aruba','AW','ABW','Aruba','North America'), + ('Australia','AU','AUS','Australia','Oceania'), + ('Austria','AT','AUT','Österreich','Europe'), + ('Azerbaijan','AZ','AZE','Azerbaijan','Asia'), + ('Bahamas','BS','BHS','The Bahamas','North America'), + ('Bahrain','BH','BHR','Al-Bahrayn','Asia'), + ('Bangladesh','BD','BGD','Bangladesh','Asia'), + ('Barbados','BB','BRB','Barbados','North America'), + ('Belarus','BY','BLR','Belarus','Europe'), + ('Belgium','BE','BEL','Belgium/Belgique','Europe'), + ('Belize','BZ','BLZ','Belize','North America'), + ('Benin','BJ','BEN','Benin','Africa'), + ('Bermuda','BM','BMU','Bermuda','North America'), + ('Bhutan','BT','BTN','Druk-Yul','Asia'), + ('Bolivia','BO','BOL','Bolivia','South America'), + ('Bosnia and Herzegovina','BA','BIH','Bosna i Hercegovina','Europe'), + ('Botswana','BW','BWA','Botswana','Africa'), + ('Bouvet Island','BV','BVT','Bouvet Island','Antarctica'), + ('Brazil','BR','BRA','Brasil','South America'), + ('British Indian Ocean Territory','IO','IOT','British Indian Ocean Territory','Africa'), + ('Brunei Darussalam','BN','BRN','Brunei Darussalam','Asia'), + ('Bulgaria','BG','BGR','Balgarija','Europe'), + ('Burkina Faso','BF','BFA','Burkina Faso','Africa'), + ('Burundi','BI','BDI','Burundi/Uburundi','Africa'), + ('Cambodia','KH','KHM','Cambodia','Asia'), + ('Cameroon','CM','CMR','Cameroun/Cameroon','Africa'), + ('Canada','CA','CAN','Canada','North America'), + ('Cape Verde','CV','CPV','Cabo Verde','Africa'), + ('Cayman Islands','KY','CYM','Cayman Islands','North America'), + ('Central African Republic','CF','CAF','Centrafrique','Africa'), + ('Chad','TD','TCD','Tchad/Tshad','Africa'), + ('Chile','CL','CHL','Chile','South America'), + ('China','CN','CHN','Zhongquo','Asia'), + ('Christmas Island','CX','CXR','Christmas Island','Oceania'), + ('Cocos (Keeling) Islands','CC','CCK','Cocos (Keeling) Islands','Oceania'), + ('Colombia','CO','COL','Colombia','South America'), + ('Comoros','KM','COM','Komori/Comores','Africa'), + ('Congo','CG','COG','Congo','Africa'), + ('Congo, the Democratic Republic of the','CD','COD','Republique Democratique du Congo','Africa'), + ('Cook Islands','CK','COK','The Cook Islands','Oceania'), + ('Costa Rica','CR','CRI','Costa Rica','North America'), + ('Cote DIvoire','CI','CIV','Côte dIvoire','Africa'), + ('Croatia','HR','HRV','Hrvatska','Europe'), + ('Cuba','CU','CUB','Cuba','North America'), + ('Cyprus','CY','CYP','Cyprus','Asia'), + ('Czech Republic','CZ','CZE','Czech','Europe'), + ('Denmark','DK','DNK','Danmark','Europe'), + ('Djibouti','DJ','DJI','Djibouti/Jibuti','Africa'), + ('Dominica','DM','DMA','Dominica','North America'), + ('Dominican Republic','DO','DOM','Republica Dominicana','North America'), + ('Ecuador','EC','ECU','Ecuador','South America'), + ('Egypt','EG','EGY','Misr','Africa'), + ('El Salvador','SV','SLV','El Salvador','North America'), + ('Equatorial Guinea','GQ','GNQ','Guinea Ecuatorial','Africa'), + ('Eritrea','ER','ERI','Ertra','Africa'), + ('Estonia','EE','EST','Eesti','Europe'), + ('Ethiopia','ET','ETH','Yeityopiya','Africa'), + ('Falkland Islands (Malvinas)','FK','FLK','Falkland Islands','South America'), + ('Faroe Islands','FO','FRO','Faroe Islands','Europe'), + ('Fiji','FJ','FJI','Fiji Islands','Oceania'), + ('Finland','FI','FIN','Suomi','Europe'), + ('France','FR','FRA','France','Europe'), + ('French Guiana','GF','GUF','Guyane francaise','South America'), + ('French Polynesia','PF','PYF','Polynésie française','Oceania'), + ('French Southern Territories','TF','ATF','Terres australes françaises','Antarctica'), + ('Gabon','GA','GAB','Le Gabon','Africa'), + ('Gambia','GM','GMB','The Gambia','Africa'), + ('Georgia','GE','GEO','Sakartvelo','Asia'), + ('Germany','DE','DEU','Deutschland','Europe'), + ('Ghana','GH','GHA','Ghana','Africa'), + ('Gibraltar','GI','GIB','Gibraltar','Europe'), + ('Greece','GR','GRC','Greece','Europe'), + ('Greenland','GL','GRL','Kalaallit Nunaat','North America'), + ('Grenada','GD','GRD','Grenada','North America'), + ('Guadeloupe','GP','GLP','Guadeloupe','North America'), + ('Guam','GU','GUM','Guam','Oceania'), + ('Guatemala','GT','GTM','Guatemala','North America'), + ('Guinea','GN','GIN','Guinea','Africa'), + ('Guinea-Bissau','GW','GNB','Guinea-Bissau','Africa'), + ('Guyana','GY','GUY','Guyana','South America'), + ('Haiti','HT','HTI','Haiti/Dayti','North America'), + ('Heard Island and Mcdonald Islands','HM','HMD','Heard and McDonald Islands','Antarctica'), + ('Holy See (Vatican City State)','VA','VAT','Santa Sede/Città del Vaticano','Europe'), + ('Honduras','HN','HND','Honduras','North America'), + ('Hong Kong','HK','HKG','Xianggang/Hong Kong','Asia'), + ('Hungary','HU','HUN','Hungary','Europe'), + ('Iceland','IS','ISL','Iceland','Europe'), + ('India','IN','IND','Bharat/India','Asia'), + ('Indonesia','ID','IDN','Indonesia','Asia'), + ('Iran, Islamic Republic of','IR','IRN','Iran','Asia'), + ('Iraq','IQ','IRQ','Al-Irāq','Asia'), + ('Ireland','IE','IRL','Ireland','Europe'), + ('Israel','IL','ISR','Yisrael','Asia'), + ('Italy','IT','ITA','Italia','Europe'), + ('Jamaica','JM','JAM','Jamaica','North America'), + ('Japan','JP','JPN','Nihon/Nippon','Asia'), + ('Jordan','JO','JOR','Al-Urdunn','Asia'), + ('Kazakhstan','KZ','KAZ','Qazaqstan','Asia'), + ('Kenya','KE','KEN','Kenya','Africa'), + ('Kiribati','KI','KIR','Kiribati','Oceania'), + ('Korea, Democratic People''s Republic of','KP','PRK','Choson Minjujuui Inmin Konghwaguk (Bukhan)','Asia'), + ('Korea, Republic of','KR','KOR','Taehan-minguk (Namhan)','Asia'), + ('Kuwait','KW','KWT','Al-Kuwayt','Asia'), + ('Kyrgyzstan','KG','KGZ','Kyrgyzstan','Asia'), + ('Lao People''s Democratic Republic','LA','LAO','Lao','Asia'), + ('Latvia','LV','LVA','Latvija','Europe'), + ('Lebanon','LB','LBN','Lubnan','Asia'), + ('Lesotho','LS','LSO','Lesotho','Africa'), + ('Liberia','LR','LBR','Liberia','Africa'), + ('Libya','LY','LBY','Libiya','Africa'), + ('Liechtenstein','LI','LIE','Liechtenstein','Europe'), + ('Lithuania','LT','LTU','Lietuva','Europe'), + ('Luxembourg','LU','LUX','Luxembourg','Europe'), + ('Macao','MO','MAC','Macau/Aomen','Asia'), + ('Macedonia, the Former Yugoslav Republic of','MK','MKD','Makedonija','Europe'), + ('Madagascar','MG','MDG','Madagasikara/Madagascar','Africa'), + ('Malawi','MW','MWI','Malawi','Africa'), + ('Malaysia','MY','MYS','Malaysia','Asia'), + ('Maldives','MV','MDV','Dhivehi Raajje/Maldives','Asia'), + ('Mali','ML','MLI','Mali','Africa'), + ('Malta','MT','MLT','Malta','Europe'), + ('Marshall Islands','MH','MHL','Marshall Islands/Majol','Oceania'), + ('Martinique','MQ','MTQ','Martinique','North America'), + ('Mauritania','MR','MRT','Muritaniya/Mauritanie','Africa'), + ('Mauritius','MU','MUS','Mauritius','Africa'), + ('Mayotte','YT','MYT','Mayotte','Africa'), + ('Mexico','MX','MEX','Mexico','North America'), + ('Micronesia, Federated States of','FM','FSM','Micronesia','Oceania'), + ('Moldova, Republic of','MD','MDA','Moldova','Europe'), + ('Monaco','MC','MCO','Monaco','Europe'), + ('Mongolia','MN','MNG','Mongol Uls','Asia'), + ('Albania','AL','ALB','Republika e Shqipërisë','Europe'), + ('Montserrat','MS','MSR','Montserrat','North America'), + ('Morocco','MA','MAR','Al-Maghrib','Africa'), + ('Mozambique','MZ','MOZ','Mozambique','Africa'), + ('Myanmar','MM','MMR','Myanma Pye','Asia'), + ('Namibia','NA','NAM','Namibia','Africa'), + ('Nauru','NR','NRU','Naoero/Nauru','Oceania'), + ('Nepal','NP','NPL','Nepal','Asia'), + ('Netherlands','NL','NLD','Nederland','Europe'), + ('New Caledonia','NC','NCL','Nouvelle-Calédonie','Oceania'), + ('New Zealand','NZ','NZL','New Zealand/Aotearoa','Oceania'), + ('Nicaragua','NI','NIC','Nicaragua','North America'), + ('Niger','NE','NER','Niger','Africa'), + ('Nigeria','NG','NGA','Nigeria','Africa'), + ('Niue','NU','NIU','Niue','Oceania'), + ('Norfolk Island','NF','NFK','Norfolk Island','Oceania'), + ('Northern Mariana Islands','MP','MNP','Northern Mariana Islands','Oceania'), + ('Norway','NO','NOR','Norge','Europe'), + ('Oman','OM','OMN','Oman','Asia'), + ('Pakistan','PK','PAK','Pakistan','Asia'), + ('Palau','PW','PLW','Belau/Palau','Oceania'), + ('Palestine, State of','PS','PSE','Filastin','Asia'), + ('Panama','PA','PAN','República de Panamá','North America'), + ('Papua New Guinea','PG','PNG','Papua New Guinea/Papua Niugini','Oceania'), + ('Paraguay','PY','PRY','Paraguay','South America'), + ('Peru','PE','PER','Perú/Piruw','South America'), + ('Philippines','PH','PHL','Pilipinas','Asia'), + ('Pitcairn','PN','PCN','Pitcairn','Oceania'), + ('Poland','PL','POL','Polska','Europe'), + ('Portugal','PT','PRT','Portugal','Europe'), + ('Puerto Rico','PR','PRI','Puerto Rico','North America'), + ('Qatar','QA','QAT','Qatar','Asia'), + ('Reunion','RE','REU','Reunion','Africa'), + ('Romania','RO','ROM','Romania','Europe'), + ('Russian Federation','RU','RUS','Rossija','Europe'), + ('Rwanda','RW','RWA','Rwanda/Urwanda','Africa'), + ('Saint Helena, Ascension and Tristan da Cunha','SH','SHN','Saint Helena','Africa'), + ('Saint Kitts and Nevis','KN','KNA','Saint Kitts and Nevis','North America'), + ('Saint Lucia','LC','LCA','Saint Lucia','North America'), + ('Saint Pierre and Miquelon','PM','SPM','Saint-Pierre-et-Miquelon','North America'), + ('Saint Vincent and the Grenadines','VC','VCT','Saint Vincent and the Grenadines','North America'), + ('Samoa','WS','WSM','Samoa','Oceania'), + ('San Marino','SM','SMR','San Marino','Europe'), + ('Sao Tome and Principe','ST','STP','São Tomé e Príncipe','Africa'), + ('Saudi Arabia','SA','SAU','Al-Mamlaka al-Arabiya as-Saudiya','Asia'), + ('Senegal','SN','SEN','Sénégal/Sounougal','Africa'), + ('Seychelles','SC','SYC','Sesel/Seychelles','Africa'), + ('Sierra Leone','SL','SLE','Sierra Leone','Africa'), + ('Singapore','SG','SGP','Singapore/Singapura/Xinjiapo/Singapur','Asia'), + ('Slovakia','SK','SVK','Slovensko','Europe'), + ('Slovenia','SI','SVN','Slovenija','Europe'), + ('Solomon Islands','SB','SLB','Solomon Islands','Oceania'), + ('Somalia','SO','SOM','Soomaaliya','Africa'), + ('South Africa','ZA','ZAF','South Africa','Africa'), + ('South Georgia and the South Sandwich Islands','GS','SGS','South Georgia and the South Sandwich Islands','Antarctica'), + ('Spain','ES','ESP','España','Europe'), + ('Sri Lanka','LK','LKA','Sri Lanka/Ilankai','Asia'), + ('Sudan','SD','SDN','As-Sudan','Africa'), + ('Suriname','SR','SUR','Suriname','South America'), + ('Svalbard and Jan Mayen','SJ','SJM','Svalbard og Jan Mayen','Europe'), + ('Swaziland','SZ','SWZ','kaNgwane','Africa'), + ('Sweden','SE','SWE','Sverige','Europe'), + ('Switzerland','CH','CHE','Schweiz/Suisse/Svizzera/Svizra','Europe'), + ('Syrian Arab Republic','SY','SYR','Suriya','Asia'), + ('Taiwan (Province of China)','TW','TWN','Tai-wan','Asia'), + ('Tajikistan','TJ','TJK','Tajikistan','Asia'), + ('Tanzania, United Republic of','TZ','TZA','Tanzania','Africa'), + ('Thailand','TH','THA','Prathet Thai','Asia'), + ('Togo','TG','TGO','Togo','Africa'), + ('Tokelau','TK','TKL','Tokelau','Oceania'), + ('Tonga','TO','TON','Tonga','Oceania'), + ('Trinidad and Tobago','TT','TTO','Trinidad and Tobago','North America'), + ('Tunisia','TN','TUN','Tunis/Tunisie','Africa'), + ('Turkey','TR','TUR','Türkiye','Asia'), + ('Turkmenistan','TM','TKM','Türkmenistan','Asia'), + ('Turks and Caicos Islands','TC','TCA','The Turks and Caicos Islands','North America'), + ('Tuvalu','TV','TUV','Tuvalu','Oceania'), + ('Uganda','UG','UGA','Uganda','Africa'), + ('Ukraine','UA','UKR','Ukrajina','Europe'), + ('United Arab Emirates','AE','ARE','Al-Amirat al-Arabiya al-Muttahida','Asia'), + ('United Kingdom','GB','GBR','United Kingdom','Europe'), + ('United States','US','USA','United States','North America'), + ('United States Minor Outlying Islands','UM','UMI','United States Minor Outlying Islands','Oceania'), + ('Uruguay','UY','URY','Uruguay','South America'), + ('Uzbekistan','UZ','UZB','Uzbekiston','Asia'), + ('Vanuatu','VU','VUT','Vanuatu','Oceania'), + ('Venezuela','VE','VEN','Venezuela','South America'), + ('Viet Nam','VN','VNM','Viet Nam','Asia'), + ('Virgin Islands (British)','VG','VGB','British Virgin Islands','North America'), + ('Virgin Islands (U.S.)','VI','VIR','Virgin Islands of the United States','North America'), + ('Wallis and Futuna','WF','WLF','Wallis-et-Futuna','Oceania'), + ('Western Sahara','EH','ESH','As-Sahrawiya','Africa'), + ('Yemen','YE','YEM','Al-Yaman','Asia'), + ('Zambia','ZM','ZMB','Zambia','Africa'), + ('Zimbabwe','ZW','ZWE','Zimbabwe','Africa'), + ('Afghanistan','AF','AFG','Afganistan/Afqanestan','Asia'), + ('Algeria','DZ','DZA','Al-Jazair/Algerie','Africa'); + \ No newline at end of file diff --git a/tests/pg_upgrade/tests/99-fixtures.sql b/tests/pg_upgrade/tests/99-fixtures.sql new file mode 100644 index 0000000..2b93d45 --- /dev/null +++ b/tests/pg_upgrade/tests/99-fixtures.sql @@ -0,0 +1,12 @@ +-- enable JIT to ensure the upgrade process disables it +alter system set jit = on; +alter system set password_encryption = 'md5'; +select pg_reload_conf(); + +-- create materialized view +create materialized view public.european_countries as + select * from public.countries where continent = 'Europe' +with no data; +refresh materialized view public.european_countries; + +select count(*) from public.european_countries; diff --git a/user-data-cloudimg b/user-data-cloudimg new file mode 100644 index 0000000..9a74d23 --- /dev/null +++ b/user-data-cloudimg @@ -0,0 +1,16 @@ +#cloud-config +users: + - name: root + lock_passwd: false + ssh_redirect_user: true + hashed_passwd: "$6$canonical.$0zWaW71A9ke9ASsaOcFTdQ2tx1gSmLxMPrsH0rF0Yb.2AEKNPV1lrF94n6YuPJmnUy2K2/JSDtxuiBDey6Lpa/" + - name: ubuntu + lock_passwd: false + hashed_passwd: "$6$canonical.$0zWaW71A9ke9ASsaOcFTdQ2tx1gSmLxMPrsH0rF0Yb.2AEKNPV1lrF94n6YuPJmnUy2K2/JSDtxuiBDey6Lpa/" + ssh_redired_user: false + sudo: "ALL=(ALL) NOPASSWD:ALL" + shell: /usr/bin/bash + groups: [adm, audio, cdrom, dialout, dip, floppy, lxd, netdev, plugdev, sudo, video] +ssh_pwauth: True +disable_root: false +preserve_hostname: true