From 813c39cc13b20d38c789e980595a44025dd5cab4 Mon Sep 17 00:00:00 2001 From: Ekjot Singh Date: Tue, 15 Jul 2025 16:58:17 +0530 Subject: [PATCH 1/9] v13.3.0 --- .DS_Store | Bin 6148 -> 0 bytes .github/workflows/provision.yml | 59 -- .gitignore | 3 + README.md | 87 +- amazon-arm.json | 60 ++ amazon.json | 64 +- ansible/files/docker_mnt/init.sh | 3 + .../fail2ban_config/filter-pgbouncer.conf.j2 | 2 + .../fail2ban_config/filter-postgresql.conf.j2 | 3 + .../fail2ban_config/jail-pgbouncer.conf.j2 | 7 + .../fail2ban_config/jail-postgresql.conf.j2 | 7 + ansible/files/logrotate-postgres | 9 + ansible/files/node_exporter.service.j2 | 16 + .../files/pgbouncer_config/pgbouncer.ini.j2 | 360 ++++++++ .../pgbouncer_config/pgbouncer.service.j2 | 40 + .../pgbouncer_auth_schema.sql | 18 + .../tmpfiles.d-pgbouncer.conf.j2 | 2 + ansible/files/postgres_exporter.service.j2 | 16 + .../files/postgresql_config/pg_hba.conf.j2 | 99 +++ .../files/postgresql_config/pg_ident.conf.j2 | 44 + .../postgresql_config/postgresql.conf.j2 | 794 ++++++++++++++++++ .../postgresql_config/postgresql.service.j2 | 15 + ansible/files/postgrest.service.j2 | 15 + ansible/files/queries.yml.j2 | 194 +++++ ansible/files/stat_extension.sql | 2 + ansible/install_roles.yml | 5 - ansible/playbook-docker.yml | 52 ++ ansible/playbook.yml | 104 ++- ansible/requirements.yml | 0 ansible/tasks/docker/cleanup.yml | 67 ++ ansible/tasks/docker/setup.yml | 69 ++ ansible/tasks/internal/node-exporter.yml | 46 + ansible/tasks/internal/optimizations.yml | 15 + ansible/tasks/internal/postgres-exporter.yml | 46 + ansible/tasks/internal/supautils.yml | 45 + .../tasks/postgres-extensions/01-postgis.yml | 77 ++ .../postgres-extensions/02-pgrouting.yml | 36 + .../tasks/postgres-extensions/03-pgtap.yml | 19 + .../tasks/postgres-extensions/04-pg_cron.yml | 31 + .../tasks/postgres-extensions/05-pgaudit.yml | 37 + .../tasks/postgres-extensions/06-pgjwt.yml | 12 + .../postgres-extensions/07-pgsql-http.yml | 37 + .../postgres-extensions/08-plpgsql_check.yml | 32 + .../postgres-extensions/09-pg-safeupdate.yml | 24 + .../postgres-extensions/10-timescaledb.yml | 31 + .../tasks/postgres-extensions/11-wal2json.yml | 24 + .../tasks/postgres-extensions/12-pljava.yml | 62 ++ ansible/tasks/postgres-extensions/13-plv8.yml | 48 ++ ansible/tasks/setup-extensions.yml | 285 +------ ansible/tasks/setup-fail2ban.yml | 41 + ansible/tasks/setup-misc.yml | 24 - ansible/tasks/setup-pgbouncer.yml | 111 +++ ansible/tasks/setup-postgres.yml | 165 ++++ ansible/tasks/setup-postgrest.yml | 39 + ansible/tasks/setup-system.yml | 65 +- ansible/tasks/setup-tealbase-internal.yml | 40 + ansible/tasks/setup-wal-g.yml | 45 + ansible/vars.yml | 83 +- digitalOcean.json | 37 +- docker/Dockerfile | 180 ---- docker/docker-compose.yml | 4 +- docker/mnt/init-permissions.sh | 16 - rfcs/0001-connection-pooling.md | 71 ++ scripts/01-postgres_check.sh | 72 ++ scripts/01-test | 9 - scripts/11-lemp.sh | 14 + scripts/12-ufw-nginx.sh | 10 + scripts/13-force-ssh-logout.sh | 10 + scripts/90-cleanup.sh | 30 +- scripts/91-log_cleanup.sh | 6 + scripts/99-img_check.sh | 58 +- 71 files changed, 3542 insertions(+), 711 deletions(-) delete mode 100644 .DS_Store delete mode 100644 .github/workflows/provision.yml create mode 100644 amazon-arm.json create mode 100644 ansible/files/docker_mnt/init.sh create mode 100644 ansible/files/fail2ban_config/filter-pgbouncer.conf.j2 create mode 100644 ansible/files/fail2ban_config/filter-postgresql.conf.j2 create mode 100644 ansible/files/fail2ban_config/jail-pgbouncer.conf.j2 create mode 100644 ansible/files/fail2ban_config/jail-postgresql.conf.j2 create mode 100644 ansible/files/logrotate-postgres create mode 100644 ansible/files/node_exporter.service.j2 create mode 100644 ansible/files/pgbouncer_config/pgbouncer.ini.j2 create mode 100644 ansible/files/pgbouncer_config/pgbouncer.service.j2 create mode 100644 ansible/files/pgbouncer_config/pgbouncer_auth_schema.sql create mode 100644 ansible/files/pgbouncer_config/tmpfiles.d-pgbouncer.conf.j2 create mode 100644 ansible/files/postgres_exporter.service.j2 create mode 100755 ansible/files/postgresql_config/pg_hba.conf.j2 create mode 100755 ansible/files/postgresql_config/pg_ident.conf.j2 create mode 100644 ansible/files/postgresql_config/postgresql.conf.j2 create mode 100644 ansible/files/postgresql_config/postgresql.service.j2 create mode 100644 ansible/files/postgrest.service.j2 create mode 100644 ansible/files/queries.yml.j2 create mode 100644 ansible/files/stat_extension.sql delete mode 100644 ansible/install_roles.yml create mode 100644 ansible/playbook-docker.yml delete mode 100644 ansible/requirements.yml create mode 100644 ansible/tasks/docker/cleanup.yml create mode 100644 ansible/tasks/docker/setup.yml create mode 100644 ansible/tasks/internal/node-exporter.yml create mode 100644 ansible/tasks/internal/optimizations.yml create mode 100644 ansible/tasks/internal/postgres-exporter.yml create mode 100644 ansible/tasks/internal/supautils.yml create mode 100644 ansible/tasks/postgres-extensions/01-postgis.yml create mode 100644 ansible/tasks/postgres-extensions/02-pgrouting.yml create mode 100644 ansible/tasks/postgres-extensions/03-pgtap.yml create mode 100644 ansible/tasks/postgres-extensions/04-pg_cron.yml create mode 100644 ansible/tasks/postgres-extensions/05-pgaudit.yml create mode 100644 ansible/tasks/postgres-extensions/06-pgjwt.yml create mode 100644 ansible/tasks/postgres-extensions/07-pgsql-http.yml create mode 100644 ansible/tasks/postgres-extensions/08-plpgsql_check.yml create mode 100644 ansible/tasks/postgres-extensions/09-pg-safeupdate.yml create mode 100644 ansible/tasks/postgres-extensions/10-timescaledb.yml create mode 100644 ansible/tasks/postgres-extensions/11-wal2json.yml create mode 100644 ansible/tasks/postgres-extensions/12-pljava.yml create mode 100644 ansible/tasks/postgres-extensions/13-plv8.yml create mode 100644 ansible/tasks/setup-fail2ban.yml delete mode 100644 ansible/tasks/setup-misc.yml create mode 100644 ansible/tasks/setup-pgbouncer.yml create mode 100644 ansible/tasks/setup-postgres.yml create mode 100644 ansible/tasks/setup-postgrest.yml create mode 100644 ansible/tasks/setup-tealbase-internal.yml create mode 100644 ansible/tasks/setup-wal-g.yml delete mode 100644 docker/Dockerfile delete mode 100644 docker/mnt/init-permissions.sh create mode 100644 rfcs/0001-connection-pooling.md create mode 100644 scripts/01-postgres_check.sh delete mode 100644 scripts/01-test create mode 100644 scripts/11-lemp.sh create mode 100644 scripts/12-ufw-nginx.sh create mode 100644 scripts/13-force-ssh-logout.sh diff --git a/.DS_Store b/.DS_Store deleted file mode 100644 index 834b874743279b8eeeb26c1610d337d8966b2cab..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6148 zcmeHKOKQVF43!!$422N7>T(XyO>Qs^&IxjXk{H@W%mhmI-gTX;p+G3`cNIX- z7OQU!V+{pDfl%PAfP5bkTrfNKhWhEiq9XuMe&f|}t+NC$NdTA~dqYHEY*e68*+&dE zI^xOevSV*(bYeXj_wke2CluDx5l>o9oDE|Q1ww&q1tzwgN&jEMN9O-)QtU#3P~cxF zz*Vzq7I-D^t%H}7UYp=g@J~anmSfRY4AfSPjkMzXi@GG&$Y;miQ0a&(9T-0XqDxpP I@EZzz09G_7m;e9( diff --git a/.github/workflows/provision.yml b/.github/workflows/provision.yml deleted file mode 100644 index 9e67c6a..0000000 --- a/.github/workflows/provision.yml +++ /dev/null @@ -1,59 +0,0 @@ -name: CD - -on: - push: - branches: [ master ] - -jobs: - provision: - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v2 - - - name: Install essentials - run: | - sudo apt-get update -y - sudo apt-get install software-properties-common -y - sudo apt-get install unzip -y - sudo apt-get install git -y - - - name: Install Packer - run: | - export VER="1.5.5" - wget https://releases.hashicorp.com/packer/${VER}/packer_${VER}_linux_amd64.zip - sudo unzip packer_${VER}_linux_amd64.zip - sudo mv packer /usr/local/bin - - - name: Install Ansible - run: | - sudo apt-add-repository --yes ppa:ansible/ansible - sudo apt-get install ansible -y - ansible --version - - - name: Install Ansible role - run: | - sudo ansible-galaxy install anxs.postgresql -r ansible/install_roles.yml --force -vvv - ansible-galaxy list anxs.postgresql - - - name: Build Digital Ocean Snapshot - run: | - export REGION=sgp1 - export SNAPSHOT_REGIONS="nyc1,nyc3,ams3,sfo2,sfo3,sgp1,lon1,fra1,tor1" - sudo packer build \ - -var "do_token=${{ secrets.DO_TOKEN }}" \ - -var "region=$REGION" \ - -var "snapshot_regions=$SNAPSHOT_REGIONS" \ - digitalOcean.json - - # - name: Build Digital Ocean Snapshot for Marketplace - # run: | - # export REGION=sgp1 - # export IMAGE_NAME="supabase-supabasepostgres-18-04" - # export ARGS="--tags,update" - # sudo packer build \ - # -var "do_token=${{ secrets.DO_TOKEN }}" \ - # -var "region=$REGION" \ - # -var "image_name=$IMAGE_NAME" \ - # -var "ansible_arguments=$ARGS" \ - # digitalOcean.json diff --git a/.gitignore b/.gitignore index e69de29..8c1f8fa 100644 --- a/.gitignore +++ b/.gitignore @@ -0,0 +1,3 @@ +.DS_Store +.python-version +venv/ diff --git a/README.md b/README.md index d89caee..68ade93 100644 --- a/README.md +++ b/README.md @@ -2,21 +2,44 @@ Unmodified Postgres with some useful plugins. Our goal with this repo is not to modify Postgres, but to provide some of the most common extensions with a one-click install. -## Features - -- ✅ Postgres [12](https://www.postgresql.org/about/news/1976/). Includes [generated columns](https://www.postgresql.org/docs/12/ddl-generated-columns.html) and [JSON path](https://www.postgresql.org/docs/12/functions-json.html#FUNCTIONS-SQLJSON-PATH) support. -- ✅ Ubuntu 18.04 (Bionic). -- ✅ [pg-contrib-12](https://www.postgresql.org/docs/12/contrib.html). Because everyone should enable `pg_stat_statements`. +## Primary Features +- ✅ Postgres [13](https://www.postgresql.org/about/news/postgresql-13-released-2077/). +- ✅ Ubuntu 20.04 (Focal Fossa). - ✅ [wal_level](https://www.postgresql.org/docs/current/runtime-config-wal.html) = logical and [max_replication_slots](https://www.postgresql.org/docs/current/runtime-config-replication.html) = 5. Ready for replication. -- ✅ [PostGIS](https://postgis.net/). Postgres' most popular extension - support for geographic objects. -- ✅ [pgTAP](https://pgtap.org/). Unit Testing for Postgres. -- ✅ [pgAudit](https://www.pgaudit.org/). Generate highly compliant audit logs. -- ✅ [pgjwt](https://github.com/michelp/pgjwt). Generate JSON Web Tokens (JWT) in Postgres. -- ✅ [pgsql-http](https://github.com/pramsey/pgsql-http). HTTP client for Postgres. -- ✅ [plpgsql_check](https://github.com/okbob/plpgsql_check). Linter tool for PL/pgSQL. -- ✅ [plv8](https://github.com/plv8/plv8). Write in Javascript functions in Postgres. -- ✅ [plpython3u](https://www.postgresql.org/docs/current/plpython-python23.html). Python3 enabled by default. Write in Python functions in Postgres. -- ✅ [PL/Java](https://github.com/tada/pljaval). Write in Java functions in Postgres. +- ✅ [Large Systems Extensions](https://github.com/aws/aws-graviton-getting-started#building-for-graviton-and-graviton2). Enabled for ARM images. + +## Extensions +| Extension | Description | +| ------------- | ------------- | +| [Postgres contrib modules](https://www.postgresql.org/docs/current/contrib.html) | Because everyone should enable `pg_stat_statements`. | +| [PostGIS](https://postgis.net/) | Postgres' most popular extension - support for geographic objects. | +| [pgRouting](https://pgrouting.org/) | Extension of PostGIS - provides geospatial routing functionalities. | +| [pgTAP](https://pgtap.org/) | Unit Testing for Postgres. | +| [pg_cron](https://github.com/citusdata/pg_cron) | Run CRON jobs inside Postgres. | +| [pgAudit](https://www.pgaudit.org/) | Generate highly compliant audit logs. | +| [pgjwt](https://github.com/michelp/pgjwt) | Generate JSON Web Tokens (JWT) in Postgres. | +| [pgsql-http](https://github.com/pramsey/pgsql-http) | HTTP client for Postgres. | +| [plpgsql_check](https://github.com/okbob/plpgsql_check) | Linter tool for PL/pgSQL. | +| [pg-safeupdate](https://github.com/eradman/pg-safeupdate) | Protect your data from accidental updates or deletes. | +| [wal2json](https://github.com/eulerto/wal2json) | JSON output plugin for logical replication decoding. | +| [PL/Java](https://github.com/tada/pljava) | Write in Java functions in Postgres. | +| [plv8](https://github.com/plv8/plv8) | Write in Javascript functions in Postgres. | + +Can't find your favorite extension? Suggest for it to be added into future versions [here](https://github.com/tealbase/tealbase/discussions/679)! + +## Enhanced Security +Aside from having [ufw](https://help.ubuntu.com/community/UFW),[fail2ban](https://www.fail2ban.org/wiki/index.php/Main_Page), and [unattended-upgrades](https://wiki.debian.org/UnattendedUpgrades) installed, we also have the following enhancements in place: +| Enhancement | Description | +| ------------- | ------------- | +| [fail2ban filter](https://github.com/tealbase/postgres/blob/develop/ansible/files/fail2ban_config/filter-postgresql.conf.j2) for PostgreSQL access | Monitors for brute force attempts over at port `5432`. | +| [fail2ban filter](https://github.com/tealbase/postgres/blob/develop/ansible/files/fail2ban_config/filter-pgbouncer.conf.j2) for PgBouncer access | Monitors for brute force attempts over at port `6543`. | + +## Additional Goodies +| Goodie | Description | +| ------------- | ------------- | +| [PgBouncer](https://postgis.net/) | Set up Connection Pooling. | +| [PostgREST](https://postgrest.org/en/stable/) | Instantly transform your database into an RESTful API. | +| [WAL-G](https://github.com/wal-g/wal-g#wal-g) | Tool for physical database backup and recovery. | ## Install @@ -26,13 +49,39 @@ See all installation instructions in the [repo wiki](https://github.com/tealbase [![Digital Ocean](https://github.com/tealbase/postgres/blob/master/docs/img/digital-ocean.png)](https://github.com/tealbase/postgres/wiki/Digital-Ocean) [![AWS](https://github.com/tealbase/postgres/blob/master/docs/img/aws.png)](https://github.com/tealbase/postgres/wiki/AWS-EC2) +### Marketplace Images +| | Postgres & Extensions | PgBouncer | PostgREST | WAL-G | +|---|:---:|:---:|:---:|:---:| +| tealbase Postgres | ✔️ | ❌ | ❌ | ✔️ | +| tealbase Postgres: PgBouncer Bundle | ✔️ | ✔️ | ❌ | ✔️ | +| tealbase Postgres: PostgREST Bundle | ✔️ | ❌ | ✔️ | ✔️ | +| tealbase Postgres: Complete Bundle | ✔️ | ✔️ | ✔️ | ✔️ | + +#### Availability +| | AWS ARM | AWS x86 | Digital Ocean x86 | +|---|:---:|:---:|:---:| +| tealbase Postgres | Coming Soon | Coming Soon | Coming Soon | +| tealbase Postgres: PgBouncer Bundle | Coming Soon | Coming Soon | Coming Soon | +| tealbase Postgres: PostgREST Bundle | Coming Soon | Coming Soon | Coming Soon | +| tealbase Postgres: Complete Bundle | Coming Soon | Coming Soon | Coming Soon | + +### Quick Build + +```bash +$ time packer build -timestamp-ui \ + --var "aws_access_key=" \ + --var "aws_secret_key=" \ + --var "ami_regions=" \ + amazon-arm.json +``` + ## Motivation -After talking to a lot of techies, we've found that most believe Postgres is the best (operational) database but they *still* choose other databases. This is overwhelmingly because "the other one was quicker/easier". Our goal is to make it fast and simple to get started with Postgres, so that we never hear that excuse again. +After talking to a lot of techies, we've found that most believe Postgres is the best (operational) database but they _still_ choose other databases. This is overwhelmingly because "the other one was quicker/easier". Our goal is to make it fast and simple to get started with Postgres, so that we never hear that excuse again. Our secondary goal is to show off a few of Postgres' most exciting features. This is to convince new developers to choose it over other database (a decision we hope they'll appreciate once they start scaling). -Finally, this is the same build we offer at [tealbase](https://tealbase.io), and everything we do is opensource. This repo makes it easy to *install* Postgres, tealbase makes it easy to *use* Postgres. +Finally, this is the same build we offer at [tealbase](https://tealbase.io), and everything we do is opensource. This repo makes it easy to _install_ Postgres, tealbase makes it easy to _use_ Postgres. ## Roadmap @@ -43,3 +92,9 @@ Finally, this is the same build we offer at [tealbase](https://tealbase.io), and ## License [The PostgreSQL License](https://opensource.org/licenses/postgresql). We realize that licensing is tricky since we are bundling all the various plugins. If we have infringed on any license, let us know and we will make the necessary changes (or remove that extension from this repo). + +## Sponsors + +We are building the features of Firebase using enterprise-grade, open source products. We support existing communities wherever possible, and if the products don’t exist we build them and open source them ourselves. + +[![New Sponsor](https://user-images.githubusercontent.com/10214025/90518111-e74bbb00-e198-11ea-8f88-c9e3c1aa4b5b.png)](https://github.com/sponsors/tealbase) diff --git a/amazon-arm.json b/amazon-arm.json new file mode 100644 index 0000000..bf717cf --- /dev/null +++ b/amazon-arm.json @@ -0,0 +1,60 @@ +{ + "variables": { + "aws_access_key": "", + "aws_secret_key": "", + "region": "ap-northeast-1", + "ami_regions": "eu-central-1,eu-west-1,eu-west-2,ap-south-1,ap-southeast-1,ap-southeast-2,us-west-1,us-east-1,ca-central-1,sa-east-1,ap-northeast-1", + "ami": "ami-076d8ebdd0e1ec091", + "ami_name": "tealbase-postgres-13.3.0", + "environment": "prod", + "ansible_arguments": "--skip-tags,update-only,--skip-tags,install-postgrest,--skip-tags,install-pgbouncer,--skip-tags,install-tealbase-internal" + }, + "builders": [ + { + "type": "amazon-ebs", + "access_key": "{{user `aws_access_key`}}", + "secret_key": "{{user `aws_secret_key`}}", + "region": "{{user `region`}}", + "ami_regions": "{{user `ami_regions`}}", + "source_ami": "{{user `ami`}}", + "instance_type": "r6g.2xlarge", + "ssh_username": "ubuntu", + "ami_name": "{{user `ami_name`}}", + "tags": { + "environment": "{{user `environment`}}", + "appType": "postgres" + }, + "launch_block_device_mappings": [ + { + "device_name": "/dev/sda1", + "volume_size": 16, + "volume_type": "gp2", + "delete_on_termination": true + } + ] + } + ], + "provisioners": [ + { + "type": "shell", + "inline": [ + "while [ ! -f /var/lib/cloud/instance/boot-finished ]; do echo 'Waiting for cloud-init...'; sleep 1; done" + ] + }, + { + "type": "ansible", + "user": "ubuntu", + "playbook_file": "ansible/playbook.yml", + "extra_arguments": "{{user `ansible_arguments`}}" + }, + { + "execute_command": "echo 'packer' | sudo -S sh -c '{{ .Vars }} {{ .Path }}'", + "type": "shell", + "scripts": [ + "scripts/02-credentials_cleanup.sh", + "scripts/90-cleanup.sh", + "scripts/91-log_cleanup.sh" + ] + } + ] +} diff --git a/amazon.json b/amazon.json index 17d9d43..2fabed7 100644 --- a/amazon.json +++ b/amazon.json @@ -2,31 +2,45 @@ "variables": { "aws_access_key": "", "aws_secret_key": "", - "region": "ap-southeast-1", - "ami_regions": "ap-southeast-1", - "ami": "ami-0f7719e8b7ba25c61", - "ansible_arguments": "--skip-tags,update-only" + "region": "af-south-1", + "ami_regions": "af-south-1", + "ami": "ami-08a4b40f2fe1e4b35", + "ami_name": "tealbase-postgres-13.3.0.4", + "environment": "prod", + "ansible_arguments": "--skip-tags,update-only,--skip-tags,install-postgrest,--skip-tags,install-pgbouncer,--skip-tags,install-tealbase-internal" }, - "builders": [{ - "type": "amazon-ebs", - "access_key": "{{user `aws_access_key`}}", - "secret_key": "{{user `aws_secret_key`}}", - "region": "{{user `region`}}", - "ami_regions": "{{user `ami_regions`}}", - "source_ami": "{{user `ami`}}", - "instance_type": "m5.2xlarge", - "ssh_username": "ubuntu", - "ami_name": "tealbase-postgres-0.13.0", - "launch_block_device_mappings": [ - { - "device_name": "/dev/sda1", - "volume_size": 16, - "volume_type": "gp2", - "delete_on_termination": true - } - ] - }], + "builders": [ + { + "type": "amazon-ebs", + "access_key": "{{user `aws_access_key`}}", + "secret_key": "{{user `aws_secret_key`}}", + "region": "{{user `region`}}", + "ami_regions": "{{user `ami_regions`}}", + "source_ami": "{{user `ami`}}", + "instance_type": "m5.2xlarge", + "ssh_username": "ubuntu", + "ami_name": "{{user `ami_name`}}", + "tags": { + "environment": "{{user `environment`}}", + "appType": "postgres" + }, + "launch_block_device_mappings": [ + { + "device_name": "/dev/sda1", + "volume_size": 16, + "volume_type": "gp2", + "delete_on_termination": true + } + ] + } + ], "provisioners": [ + { + "type": "shell", + "inline": [ + "while [ ! -f /var/lib/cloud/instance/boot-finished ]; do echo 'Waiting for cloud-init...'; sleep 1; done" + ] + }, { "type": "ansible", "user": "ubuntu", @@ -37,11 +51,9 @@ "execute_command": "echo 'packer' | sudo -S sh -c '{{ .Vars }} {{ .Path }}'", "type": "shell", "scripts": [ - "scripts/01-test", "scripts/02-credentials_cleanup.sh", "scripts/90-cleanup.sh", - "scripts/91-log_cleanup.sh", - "scripts/99-img_check.sh" + "scripts/91-log_cleanup.sh" ] } ] diff --git a/ansible/files/docker_mnt/init.sh b/ansible/files/docker_mnt/init.sh new file mode 100644 index 0000000..fd12bbe --- /dev/null +++ b/ansible/files/docker_mnt/init.sh @@ -0,0 +1,3 @@ +cat /etc/postgresql/postgresql.conf > $PGDATA/postgresql.conf +echo "host replication $POSTGRES_USER 0.0.0.0/0 trust" >> $PGDATA/pg_hba.conf +echo "host all all 127.0.0.1/32 trust" >> $PGDATA/pg_hba.conf \ No newline at end of file diff --git a/ansible/files/fail2ban_config/filter-pgbouncer.conf.j2 b/ansible/files/fail2ban_config/filter-pgbouncer.conf.j2 new file mode 100644 index 0000000..50326da --- /dev/null +++ b/ansible/files/fail2ban_config/filter-pgbouncer.conf.j2 @@ -0,0 +1,2 @@ +[Definition] +failregex = ^.+@:.+error: password authentication failed$ \ No newline at end of file diff --git a/ansible/files/fail2ban_config/filter-postgresql.conf.j2 b/ansible/files/fail2ban_config/filter-postgresql.conf.j2 new file mode 100644 index 0000000..fd0895a --- /dev/null +++ b/ansible/files/fail2ban_config/filter-postgresql.conf.j2 @@ -0,0 +1,3 @@ +[Definition] +failregex = ^.*,.*,.*,.*,":.*password authentication failed for user.*$ +ignoreregex = ^.*,.*,.*,.*,"127\.0\.0\.1.*password authentication failed for user.*$ \ No newline at end of file diff --git a/ansible/files/fail2ban_config/jail-pgbouncer.conf.j2 b/ansible/files/fail2ban_config/jail-pgbouncer.conf.j2 new file mode 100644 index 0000000..77c5530 --- /dev/null +++ b/ansible/files/fail2ban_config/jail-pgbouncer.conf.j2 @@ -0,0 +1,7 @@ +[pgbouncer] +enabled = true +port = 6543 +protocol = tcp +filter = pgbouncer +logpath = /var/log/pgbouncer.log +maxretry = 3 \ No newline at end of file diff --git a/ansible/files/fail2ban_config/jail-postgresql.conf.j2 b/ansible/files/fail2ban_config/jail-postgresql.conf.j2 new file mode 100644 index 0000000..516f532 --- /dev/null +++ b/ansible/files/fail2ban_config/jail-postgresql.conf.j2 @@ -0,0 +1,7 @@ +[postgresql] +enabled = true +port = 5432 +protocol = tcp +filter = postgresql +logpath = /var/lib/postgresql/data/pg_log/postgresql.csv +maxretry = 3 \ No newline at end of file diff --git a/ansible/files/logrotate-postgres b/ansible/files/logrotate-postgres new file mode 100644 index 0000000..3266dbd --- /dev/null +++ b/ansible/files/logrotate-postgres @@ -0,0 +1,9 @@ +/var/lib/postgresql/data/pg_log/postgresql.log { + size 50M + rotate 3 + copytruncate + delaycompress + compress + notifempty + missingok +} diff --git a/ansible/files/node_exporter.service.j2 b/ansible/files/node_exporter.service.j2 new file mode 100644 index 0000000..4af7195 --- /dev/null +++ b/ansible/files/node_exporter.service.j2 @@ -0,0 +1,16 @@ +[Unit] +Description=Node Exporter +After=network-online.target + +[Service] +Type=simple +ExecStart=/opt/node_exporter/node_exporter --web.disable-exporter-metrics --collector.disable-defaults {% for collector in collectors %} --collector.{{ collector }} {% endfor %} + +User=root +StandardOutput=file:/var/log/node_exporter.stdout +StandardError=file:/var/log/node_exporter.error +Restart=on-failure +RestartSec=3 + +[Install] +WantedBy=multi-user.target diff --git a/ansible/files/pgbouncer_config/pgbouncer.ini.j2 b/ansible/files/pgbouncer_config/pgbouncer.ini.j2 new file mode 100644 index 0000000..c0c5dd0 --- /dev/null +++ b/ansible/files/pgbouncer_config/pgbouncer.ini.j2 @@ -0,0 +1,360 @@ +;;; +;;; PgBouncer configuration file +;;; + +;; database name = connect string +;; +;; connect string params: +;; dbname= host= port= user= password= auth_user= +;; client_encoding= datestyle= timezone= +;; pool_size= reserve_pool= max_db_connections= +;; pool_mode= connect_query= application_name= +[databases] +* = host=localhost auth_user=pgbouncer + +;; foodb over Unix socket +;foodb = + +;; redirect bardb to bazdb on localhost +;bardb = host=localhost dbname=bazdb + +;; access to dest database will go with single user +;forcedb = host=localhost port=300 user=baz password=foo client_encoding=UNICODE datestyle=ISO connect_query='SELECT 1' + +;; use custom pool sizes +;nondefaultdb = pool_size=50 reserve_pool=10 + +;; use auth_user with auth_query if user not present in auth_file +;; auth_user must exist in auth_file +; foodb = auth_user=bar + +;; fallback connect string +;* = host=testserver + +;; User-specific configuration +[users] + +;user1 = pool_mode=transaction max_user_connections=10 + +;; Configuration section +[pgbouncer] + +;;; +;;; Administrative settings +;;; + +logfile = /var/log/pgbouncer.log +pidfile = /var/run/pgbouncer/pgbouncer.pid + +;;; +;;; Where to wait for clients +;;; + +;; IP address or * which means all IPs +listen_addr = * +listen_port = 6543 + +;; Unix socket is also used for -R. +;; On Debian it should be /var/run/postgresql +unix_socket_dir = /tmp +;unix_socket_mode = 0777 +;unix_socket_group = + +;;; +;;; TLS settings for accepting clients +;;; + +;; disable, allow, require, verify-ca, verify-full +;client_tls_sslmode = disable + +;; Path to file that contains trusted CA certs +;client_tls_ca_file = + +;; Private key and cert to present to clients. +;; Required for accepting TLS connections from clients. +;client_tls_key_file = +;client_tls_cert_file = + +;; fast, normal, secure, legacy, +;client_tls_ciphers = fast + +;; all, secure, tlsv1.0, tlsv1.1, tlsv1.2, tlsv1.3 +;client_tls_protocols = secure + +;; none, auto, legacy +;client_tls_dheparams = auto + +;; none, auto, +;client_tls_ecdhcurve = auto + +;;; +;;; TLS settings for connecting to backend databases +;;; + +;; disable, allow, require, verify-ca, verify-full +;server_tls_sslmode = disable + +;; Path to that contains trusted CA certs +;server_tls_ca_file = + +;; Private key and cert to present to backend. +;; Needed only if backend server require client cert. +;server_tls_key_file = +;server_tls_cert_file = + +;; all, secure, tlsv1.0, tlsv1.1, tlsv1.2, tlsv1.3 +;server_tls_protocols = secure + +;; fast, normal, secure, legacy, +;server_tls_ciphers = fast + +;;; +;;; Authentication settings +;;; + +;; any, trust, plain, md5, cert, hba, pam +auth_type = md5 +auth_file = /etc/pgbouncer/userlist.txt + +;; Path to HBA-style auth config +;auth_hba_file = + +;; Query to use to fetch password from database. Result +;; must have 2 columns - username and password hash. +auth_query = SELECT * FROM pgbouncer.get_auth($1) + +;;; +;;; Users allowed into database 'pgbouncer' +;;; + +;; comma-separated list of users who are allowed to change settings +admin_users = pgbouncer + +;; comma-separated list of users who are just allowed to use SHOW command +stats_users = pgbouncer + +;;; +;;; Pooler personality questions +;;; + +;; When server connection is released back to pool: +;; session - after client disconnects (default) +;; transaction - after transaction finishes +;; statement - after statement finishes +pool_mode = transaction + +;; Query for cleaning connection immediately after releasing from +;; client. No need to put ROLLBACK here, pgbouncer does not reuse +;; connections where transaction is left open. +;server_reset_query = DISCARD ALL + +;; Whether server_reset_query should run in all pooling modes. If it +;; is off, server_reset_query is used only for session-pooling. +;server_reset_query_always = 0 + +;; Comma-separated list of parameters to ignore when given in startup +;; packet. Newer JDBC versions require the extra_float_digits here. +ignore_startup_parameters = extra_float_digits + +;; When taking idle server into use, this query is run first. +;server_check_query = select 1 + +;; If server was used more recently that this many seconds ago, +; skip the check query. Value 0 may or may not run in immediately. +;server_check_delay = 30 + +;; Close servers in session pooling mode after a RECONNECT, RELOAD, +;; etc. when they are idle instead of at the end of the session. +;server_fast_close = 0 + +;; Use as application_name on server. +;application_name_add_host = 0 + +;; Period for updating aggregated stats. +;stats_period = 60 + +;;; +;;; Connection limits +;;; + +;; Total number of clients that can connect +;max_client_conn = 100 + +;; Default pool size. 20 is good number when transaction pooling +;; is in use, in session pooling it needs to be the number of +;; max clients you want to handle at any moment +default_pool_size = 15 + +;; Minimum number of server connections to keep in pool. +;min_pool_size = 0 + +; how many additional connection to allow in case of trouble +;reserve_pool_size = 0 + +;; If a clients needs to wait more than this many seconds, use reserve +;; pool. +;reserve_pool_timeout = 5 + +;; Maximum number of server connections for a database +;max_db_connections = 0 + +;; Maximum number of server connections for a user +;max_user_connections = 0 + +;; If off, then server connections are reused in LIFO manner +;server_round_robin = 0 + +;;; +;;; Logging +;;; + +;; Syslog settings +;syslog = 0 +;syslog_facility = daemon +;syslog_ident = pgbouncer + +;; log if client connects or server connection is made +;log_connections = 1 + +;; log if and why connection was closed +;log_disconnections = 1 + +;; log error messages pooler sends to clients +;log_pooler_errors = 1 + +;; write aggregated stats into log +;log_stats = 1 + +;; Logging verbosity. Same as -v switch on command line. +;verbose = 0 + +;;; +;;; Timeouts +;;; + +;; Close server connection if its been connected longer. +;server_lifetime = 3600 + +;; Close server connection if its not been used in this time. Allows +;; to clean unnecessary connections from pool after peak. +;server_idle_timeout = 600 + +;; Cancel connection attempt if server does not answer takes longer. +;server_connect_timeout = 15 + +;; If server login failed (server_connect_timeout or auth failure) +;; then wait this many second. +;server_login_retry = 15 + +;; Dangerous. Server connection is closed if query does not return in +;; this time. Should be used to survive network problems, _not_ as +;; statement_timeout. (default: 0) +;query_timeout = 0 + +;; Dangerous. Client connection is closed if the query is not +;; assigned to a server in this time. Should be used to limit the +;; number of queued queries in case of a database or network +;; failure. (default: 120) +;query_wait_timeout = 120 + +;; Dangerous. Client connection is closed if no activity in this +;; time. Should be used to survive network problems. (default: 0) +;client_idle_timeout = 0 + +;; Disconnect clients who have not managed to log in after connecting +;; in this many seconds. +;client_login_timeout = 60 + +;; Clean automatically created database entries (via "*") if they stay +;; unused in this many seconds. +; autodb_idle_timeout = 3600 + +;; Close connections which are in "IDLE in transaction" state longer +;; than this many seconds. +;idle_transaction_timeout = 0 + +;; How long SUSPEND/-R waits for buffer flush before closing +;; connection. +;suspend_timeout = 10 + +;;; +;;; Low-level tuning options +;;; + +;; buffer for streaming packets +;pkt_buf = 4096 + +;; man 2 listen +;listen_backlog = 128 + +;; Max number pkt_buf to process in one event loop. +;sbuf_loopcnt = 5 + +;; Maximum PostgreSQL protocol packet size. +;max_packet_size = 2147483647 + +;; Set SO_REUSEPORT socket option +;so_reuseport = 0 + +;; networking options, for info: man 7 tcp + +;; Linux: Notify program about new connection only if there is also +;; data received. (Seconds to wait.) On Linux the default is 45, on +;; other OS'es 0. +;tcp_defer_accept = 0 + +;; In-kernel buffer size (Linux default: 4096) +;tcp_socket_buffer = 0 + +;; whether tcp keepalive should be turned on (0/1) +;tcp_keepalive = 1 + +;; The following options are Linux-specific. They also require +;; tcp_keepalive=1. + +;; Count of keepalive packets +;tcp_keepcnt = 0 + +;; How long the connection can be idle before sending keepalive +;; packets +;tcp_keepidle = 0 + +;; The time between individual keepalive probes +;tcp_keepintvl = 0 + +;; How long may transmitted data remain unacknowledged before TCP +;; connection is closed (in milliseconds) +;tcp_user_timeout = 0 + +;; DNS lookup caching time +;dns_max_ttl = 15 + +;; DNS zone SOA lookup period +;dns_zone_check_period = 0 + +;; DNS negative result caching time +;dns_nxdomain_ttl = 15 + +;; Custom resolv.conf file, to set custom DNS servers or other options +;; (default: empty = use OS settings) +;resolv_conf = /etc/pgbouncer/resolv.conf + +;;; +;;; Random stuff +;;; + +;; Hackish security feature. Helps against SQL injection: when PQexec +;; is disabled, multi-statement cannot be made. +;disable_pqexec = 0 + +;; Config file to use for next RELOAD/SIGHUP +;; By default contains config file from command line. +;conffile + +;; Windows service name to register as. job_name is alias for +;; service_name, used by some Skytools scripts. +;service_name = pgbouncer +;job_name = pgbouncer + +;; Read additional config from other file +;%include /etc/pgbouncer/pgbouncer-other.ini diff --git a/ansible/files/pgbouncer_config/pgbouncer.service.j2 b/ansible/files/pgbouncer_config/pgbouncer.service.j2 new file mode 100644 index 0000000..96273cb --- /dev/null +++ b/ansible/files/pgbouncer_config/pgbouncer.service.j2 @@ -0,0 +1,40 @@ +# Example systemd service unit for PgBouncer +# +# - Adjust the paths in ExecStart for your installation. +# +# - The User setting requires careful consideration. PgBouncer needs +# to be able to place a Unix-domain socket file where PostgreSQL +# clients will look for it. In the olden days, this was in /tmp, +# but systems using systemd now prefer something like +# /var/run/postgresql/. But then some systems also lock down that +# directory so that only the postgres user can write to it. That +# means you need to either +# +# - run PgBouncer as the postgres user, or +# +# - create a separate user and add it to the postgres group and +# make /var/run/postgresql/ group-writable, or +# +# - use systemd to create the sockets; see pgbouncer.socket nearby. +# +# For packagers and deployment systems, this requires some +# coordination between the PgBouncer and the PostgreSQL +# packages/components. +# +[Unit] +Description=connection pooler for PostgreSQL +Documentation=man:pgbouncer(1) +Documentation=https://www.pgbouncer.org/ +After=network.target +#Requires=pgbouncer.socket + +[Service] +Type=notify +User=postgres +ExecStart=/usr/local/bin/pgbouncer /etc/pgbouncer/pgbouncer.ini +ExecReload=/bin/kill -HUP $MAINPID +KillSignal=SIGINT +#LimitNOFILE=1024 + +[Install] +WantedBy=multi-user.target \ No newline at end of file diff --git a/ansible/files/pgbouncer_config/pgbouncer_auth_schema.sql b/ansible/files/pgbouncer_config/pgbouncer_auth_schema.sql new file mode 100644 index 0000000..bc1342f --- /dev/null +++ b/ansible/files/pgbouncer_config/pgbouncer_auth_schema.sql @@ -0,0 +1,18 @@ +CREATE USER pgbouncer; + +CREATE SCHEMA pgbouncer AUTHORIZATION pgbouncer; + +CREATE OR REPLACE FUNCTION pgbouncer.get_auth(p_usename TEXT) +RETURNS TABLE(username TEXT, password TEXT) AS +$$ +BEGIN + RAISE WARNING 'PgBouncer auth request: %', p_usename; + + RETURN QUERY + SELECT usename::TEXT, passwd::TEXT FROM pg_catalog.pg_shadow + WHERE usename = p_usename; +END; +$$ LANGUAGE plpgsql SECURITY DEFINER; + +REVOKE ALL ON FUNCTION pgbouncer.get_auth(p_usename TEXT) FROM PUBLIC; +GRANT EXECUTE ON FUNCTION pgbouncer.get_auth(p_usename TEXT) TO pgbouncer; diff --git a/ansible/files/pgbouncer_config/tmpfiles.d-pgbouncer.conf.j2 b/ansible/files/pgbouncer_config/tmpfiles.d-pgbouncer.conf.j2 new file mode 100644 index 0000000..3889ed2 --- /dev/null +++ b/ansible/files/pgbouncer_config/tmpfiles.d-pgbouncer.conf.j2 @@ -0,0 +1,2 @@ +# Directory for PostgreSQL sockets, lockfiles and stats tempfiles +d /run/pgbouncer 2775 postgres postgres - - \ No newline at end of file diff --git a/ansible/files/postgres_exporter.service.j2 b/ansible/files/postgres_exporter.service.j2 new file mode 100644 index 0000000..65d2120 --- /dev/null +++ b/ansible/files/postgres_exporter.service.j2 @@ -0,0 +1,16 @@ +[Unit] +Description=Postgres Exporter + +[Service] +Type=simple +ExecStart=/opt/postgres_exporter/postgres_exporter --auto-discover-databases --disable-settings-metrics --extend.query-path="/opt/postgres_exporter/queries.yml" --disable-default-metrics +User=root +StandardOutput=file:/var/log/postgres_exporter.stdout +StandardError=file:/var/log/postgres_exporter.error +Restart=always +RestartSec=3 +Environment="DATA_SOURCE_URI=localhost/postgres?sslmode=disable" +Environment="DATA_SOURCE_USER=tealbase_admin" + +[Install] +WantedBy=multi-user.target diff --git a/ansible/files/postgresql_config/pg_hba.conf.j2 b/ansible/files/postgresql_config/pg_hba.conf.j2 new file mode 100755 index 0000000..1e4c866 --- /dev/null +++ b/ansible/files/postgresql_config/pg_hba.conf.j2 @@ -0,0 +1,99 @@ +# PostgreSQL Client Authentication Configuration File +# =================================================== +# +# Refer to the "Client Authentication" section in the PostgreSQL +# documentation for a complete description of this file. A short +# synopsis follows. +# +# This file controls: which hosts are allowed to connect, how clients +# are authenticated, which PostgreSQL user names they can use, which +# databases they can access. Records take one of these forms: +# +# local DATABASE USER METHOD [OPTIONS] +# host DATABASE USER ADDRESS METHOD [OPTIONS] +# hostssl DATABASE USER ADDRESS METHOD [OPTIONS] +# hostnossl DATABASE USER ADDRESS METHOD [OPTIONS] +# hostgssenc DATABASE USER ADDRESS METHOD [OPTIONS] +# hostnogssenc DATABASE USER ADDRESS METHOD [OPTIONS] +# +# (The uppercase items must be replaced by actual values.) +# +# The first field is the connection type: "local" is a Unix-domain +# socket, "host" is either a plain or SSL-encrypted TCP/IP socket, +# "hostssl" is an SSL-encrypted TCP/IP socket, and "hostnossl" is a +# non-SSL TCP/IP socket. Similarly, "hostgssenc" uses a +# GSSAPI-encrypted TCP/IP socket, while "hostnogssenc" uses a +# non-GSSAPI socket. +# +# DATABASE can be "all", "sameuser", "samerole", "replication", a +# database name, or a comma-separated list thereof. The "all" +# keyword does not match "replication". Access to replication +# must be enabled in a separate record (see example below). +# +# USER can be "all", a user name, a group name prefixed with "+", or a +# comma-separated list thereof. In both the DATABASE and USER fields +# you can also write a file name prefixed with "@" to include names +# from a separate file. +# +# ADDRESS specifies the set of hosts the record matches. It can be a +# host name, or it is made up of an IP address and a CIDR mask that is +# an integer (between 0 and 32 (IPv4) or 128 (IPv6) inclusive) that +# specifies the number of significant bits in the mask. A host name +# that starts with a dot (.) matches a suffix of the actual host name. +# Alternatively, you can write an IP address and netmask in separate +# columns to specify the set of hosts. Instead of a CIDR-address, you +# can write "samehost" to match any of the server's own IP addresses, +# or "samenet" to match any address in any subnet that the server is +# directly connected to. +# +# METHOD can be "trust", "reject", "md5", "password", "scram-sha-256", +# "gss", "sspi", "ident", "peer", "pam", "ldap", "radius" or "cert". +# Note that "password" sends passwords in clear text; "md5" or +# "scram-sha-256" are preferred since they send encrypted passwords. +# +# OPTIONS are a set of options for the authentication in the format +# NAME=VALUE. The available options depend on the different +# authentication methods -- refer to the "Client Authentication" +# section in the documentation for a list of which options are +# available for which authentication methods. +# +# Database and user names containing spaces, commas, quotes and other +# special characters must be quoted. Quoting one of the keywords +# "all", "sameuser", "samerole" or "replication" makes the name lose +# its special character, and just match a database or username with +# that name. +# +# This file is read on server startup and when the server receives a +# SIGHUP signal. If you edit the file on a running system, you have to +# SIGHUP the server for the changes to take effect, run "pg_ctl reload", +# or execute "SELECT pg_reload_conf()". +# +# Put your actual configuration here +# ---------------------------------- +# +# If you want to allow non-local connections, you need to add more +# "host" records. In that case you will also need to make PostgreSQL +# listen on a non-local interface via the listen_addresses +# configuration parameter, or via the -i or -h command line switches. + +# TYPE DATABASE USER ADDRESS METHOD + +# Default: +# "local" is for Unix domain socket connections only +local all all peer +# IPv4 local connections: +host all all 127.0.0.1/32 trust +# IPv6 local connections: +host all all ::1/128 md5 +# Local root Unix user, passwordless access +local all postgres peer map=root_as_postgres +# IPv4 external connections +host all all 0.0.0.0/0 md5 + +# MD5 hashed password hosts + +# Password hosts + +# Trusted hosts + +# User custom \ No newline at end of file diff --git a/ansible/files/postgresql_config/pg_ident.conf.j2 b/ansible/files/postgresql_config/pg_ident.conf.j2 new file mode 100755 index 0000000..a5c8de7 --- /dev/null +++ b/ansible/files/postgresql_config/pg_ident.conf.j2 @@ -0,0 +1,44 @@ +# PostgreSQL User Name Maps +# ========================= +# +# Refer to the PostgreSQL documentation, chapter "Client +# Authentication" for a complete description. A short synopsis +# follows. +# +# This file controls PostgreSQL user name mapping. It maps external +# user names to their corresponding PostgreSQL user names. Records +# are of the form: +# +# MAPNAME SYSTEM-USERNAME PG-USERNAME +# +# (The uppercase quantities must be replaced by actual values.) +# +# MAPNAME is the (otherwise freely chosen) map name that was used in +# pg_hba.conf. SYSTEM-USERNAME is the detected user name of the +# client. PG-USERNAME is the requested PostgreSQL user name. The +# existence of a record specifies that SYSTEM-USERNAME may connect as +# PG-USERNAME. +# +# If SYSTEM-USERNAME starts with a slash (/), it will be treated as a +# regular expression. Optionally this can contain a capture (a +# parenthesized subexpression). The substring matching the capture +# will be substituted for \1 (backslash-one) if present in +# PG-USERNAME. +# +# Multiple maps may be specified in this file and used by pg_hba.conf. +# +# No map names are defined in the default configuration. If all +# system user names and PostgreSQL user names are the same, you don't +# need anything in this file. +# +# This file is read on server startup and when the postmaster receives +# a SIGHUP signal. If you edit the file on a running system, you have +# to SIGHUP the postmaster for the changes to take effect. You can +# use "pg_ctl reload" to do that. + +# Put your actual configuration here +# ---------------------------------- + +# MAPNAME SYSTEM-USERNAME PG-USERNAME +# root is allowed to login as postgres +root_as_postgres postgres postgres diff --git a/ansible/files/postgresql_config/postgresql.conf.j2 b/ansible/files/postgresql_config/postgresql.conf.j2 new file mode 100644 index 0000000..8b4fec8 --- /dev/null +++ b/ansible/files/postgresql_config/postgresql.conf.j2 @@ -0,0 +1,794 @@ +# ----------------------------- +# PostgreSQL configuration file +# ----------------------------- +# +# This file consists of lines of the form: +# +# name = value +# +# (The "=" is optional.) Whitespace may be used. Comments are introduced with +# "#" anywhere on a line. The complete list of parameter names and allowed +# values can be found in the PostgreSQL documentation. +# +# The commented-out settings shown in this file represent the default values. +# Re-commenting a setting is NOT sufficient to revert it to the default value; +# you need to reload the server. +# +# This file is read on server startup and when the server receives a SIGHUP +# signal. If you edit the file on a running system, you have to SIGHUP the +# server for the changes to take effect, run "pg_ctl reload", or execute +# "SELECT pg_reload_conf()". Some parameters, which are marked below, +# require a server shutdown and restart to take effect. +# +# Any parameter can also be given as a command-line option to the server, e.g., +# "postgres -c log_connections=on". Some parameters can be changed at run time +# with the "SET" SQL command. +# +# Memory units: kB = kilobytes Time units: ms = milliseconds +# MB = megabytes s = seconds +# GB = gigabytes min = minutes +# TB = terabytes h = hours +# d = days + + +#------------------------------------------------------------------------------ +# FILE LOCATIONS +#------------------------------------------------------------------------------ + +# The default values of these variables are driven from the -D command-line +# option or PGDATA environment variable, represented here as ConfigDir. + +data_directory = '/var/lib/postgresql/data' # use data in another directory + # (change requires restart) +hba_file = '/etc/postgresql/pg_hba.conf' # host-based authentication file + # (change requires restart) +ident_file = '/etc/postgresql/pg_ident.conf' # ident configuration file + # (change requires restart) + +# If external_pid_file is not explicitly set, no extra PID file is written. +#external_pid_file = '' # write an extra PID file + # (change requires restart) + + +#------------------------------------------------------------------------------ +# CONNECTIONS AND AUTHENTICATION +#------------------------------------------------------------------------------ + +# - Connection Settings - + +listen_addresses = '*' # what IP address(es) to listen on; + # comma-separated list of addresses; + # defaults to 'localhost'; use '*' for all + # (change requires restart) +#port = 5432 # (change requires restart) +#max_connections = 100 # (change requires restart) +#superuser_reserved_connections = 3 # (change requires restart) +#unix_socket_directories = '/tmp' # comma-separated list of directories + # (change requires restart) +#unix_socket_group = '' # (change requires restart) +#unix_socket_permissions = 0777 # begin with 0 to use octal notation + # (change requires restart) +#bonjour = off # advertise server via Bonjour + # (change requires restart) +#bonjour_name = '' # defaults to the computer name + # (change requires restart) + +# - TCP settings - +# see "man tcp" for details + +#tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds; + # 0 selects the system default +#tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds; + # 0 selects the system default +#tcp_keepalives_count = 0 # TCP_KEEPCNT; + # 0 selects the system default +#tcp_user_timeout = 0 # TCP_USER_TIMEOUT, in milliseconds; + # 0 selects the system default + +#client_connection_check_interval = 0 # time between checks for client + # disconnection while running queries; + # 0 for never + +# - Authentication - + +authentication_timeout = 1min # 1s-600s +password_encryption = md5 # scram-sha-256 or md5 +db_user_namespace = off + +# GSSAPI using Kerberos +#krb_server_keyfile = 'FILE:${sysconfdir}/krb5.keytab' +#krb_caseins_users = off + +# - SSL - + +ssl = off +ssl_ca_file = '' +ssl_cert_file = '' +ssl_crl_file = '' +ssl_key_file = '' +ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers +ssl_prefer_server_ciphers = on +ssl_ecdh_curve = 'prime256v1' +ssl_min_protocol_version = 'TLSv1.2' +ssl_max_protocol_version = '' +ssl_dh_params_file = '' +ssl_passphrase_command = '' +ssl_passphrase_command_supports_reload = off + + +#------------------------------------------------------------------------------ +# RESOURCE USAGE (except WAL) +#------------------------------------------------------------------------------ + +# - Memory - + +shared_buffers = 128MB # min 128kB + # (change requires restart) +# huge_pages = try # on, off, or try + # (change requires restart) +# huge_page_size = 0 # zero for system default + # (change requires restart) +# temp_buffers = 8MB # min 800kB +# max_prepared_transactions = 0 # zero disables the feature + # (change requires restart) +# Caution: it is not advisable to set max_prepared_transactions nonzero unless +# you actively intend to use prepared transactions. +# work_mem = 4MB # min 64kB +# hash_mem_multiplier = 1.0 # 1-1000.0 multiplier on hash table work_mem +# maintenance_work_mem = 64MB # min 1MB +# autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem +# logical_decoding_work_mem = 64MB # min 64kB +# max_stack_depth = 2MB # min 100kB +# shared_memory_type = mmap # the default is the first option + # supported by the operating system: + # mmap + # sysv + # windows + # (change requires restart) +# dynamic_shared_memory_type = posix # the default is the first option + # supported by the operating system: + # posix + # sysv + # windows + # mmap + # (change requires restart) +#min_dynamic_shared_memory = 0MB # (change requires restart) + +# - Disk - + +#temp_file_limit = -1 # limits per-process temp file space + # in kilobytes, or -1 for no limit + +# - Kernel Resources - + +#max_files_per_process = 1000 # min 64 + # (change requires restart) + +# - Cost-Based Vacuum Delay - + +#vacuum_cost_delay = 0 # 0-100 milliseconds (0 disables) +#vacuum_cost_page_hit = 1 # 0-10000 credits +#vacuum_cost_page_miss = 2 # 0-10000 credits +#vacuum_cost_page_dirty = 20 # 0-10000 credits +#vacuum_cost_limit = 200 # 1-10000 credits + +# - Background Writer - + +#bgwriter_delay = 200ms # 10-10000ms between rounds +#bgwriter_lru_maxpages = 100 # max buffers written/round, 0 disables +#bgwriter_lru_multiplier = 2.0 # 0-10.0 multiplier on buffers scanned/round +#bgwriter_flush_after = 0 # measured in pages, 0 disables + +# - Asynchronous Behavior - + +#backend_flush_after = 0 # measured in pages, 0 disables +#effective_io_concurrency = 1 # 1-1000; 0 disables prefetching +#maintenance_io_concurrency = 10 # 1-1000; 0 disables prefetching +#max_worker_processes = 8 # (change requires restart) +#max_parallel_workers_per_gather = 2 # taken from max_parallel_workers +#max_parallel_maintenance_workers = 2 # taken from max_parallel_workers +#max_parallel_workers = 8 # maximum number of max_worker_processes that + # can be used in parallel operations +#parallel_leader_participation = on +#old_snapshot_threshold = -1 # 1min-60d; -1 disables; 0 is immediate + # (change requires restart) + + +#------------------------------------------------------------------------------ +# WRITE-AHEAD LOG +#------------------------------------------------------------------------------ + +# - Settings - + +wal_level = logical # minimal, replica, or logical + # (change requires restart) +#fsync = on # flush data to disk for crash safety + # (turning this off can cause + # unrecoverable data corruption) +#synchronous_commit = on # synchronization level; + # off, local, remote_write, remote_apply, or on +#wal_sync_method = fsync # the default is the first option + # supported by the operating system: + # open_datasync + # fdatasync (default on Linux and FreeBSD) + # fsync + # fsync_writethrough + # open_sync +#full_page_writes = on # recover from partial page writes +#wal_log_hints = off # also do full page writes of non-critical updates + # (change requires restart) +#wal_compression = off # enable compression of full-page writes +#wal_init_zero = on # zero-fill new WAL files +#wal_recycle = on # recycle WAL files +#wal_buffers = -1 # min 32kB, -1 sets based on shared_buffers + # (change requires restart) +#wal_writer_delay = 200ms # 1-10000 milliseconds +#wal_writer_flush_after = 1MB # measured in pages, 0 disables +#wal_skip_threshold = 2MB + +#commit_delay = 0 # range 0-100000, in microseconds +#commit_siblings = 5 # range 1-1000 + +# - Checkpoints - + +#checkpoint_timeout = 5min # range 30s-1d +checkpoint_completion_target = 0.5 # checkpoint target duration, 0.0 - 1.0 +checkpoint_flush_after = 256kB # measured in pages, 0 disables +#checkpoint_warning = 30s # 0 disables +#max_wal_size = 1GB +#min_wal_size = 80MB + +# - Archiving - + +#archive_mode = off # enables archiving; off, on, or always + # (change requires restart) +#archive_command = '' # command to use to archive a logfile segment + # placeholders: %p = path of file to archive + # %f = file name only + # e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f' +#archive_timeout = 0 # force a logfile segment switch after this + # number of seconds; 0 disables + +# - Archive Recovery - + +# These are only used in recovery mode. + +#restore_command = '' # command to use to restore an archived logfile segment + # placeholders: %p = path of file to restore + # %f = file name only + # e.g. 'cp /mnt/server/archivedir/%f %p' +#archive_cleanup_command = '' # command to execute at every restartpoint +#recovery_end_command = '' # command to execute at completion of recovery + +# - Recovery Target - + +# Set these only when performing a targeted recovery. + +#recovery_target = '' # 'immediate' to end recovery as soon as a + # consistent state is reached + # (change requires restart) +#recovery_target_name = '' # the named restore point to which recovery will proceed + # (change requires restart) +#recovery_target_time = '' # the time stamp up to which recovery will proceed + # (change requires restart) +#recovery_target_xid = '' # the transaction ID up to which recovery will proceed + # (change requires restart) +#recovery_target_lsn = '' # the WAL LSN up to which recovery will proceed + # (change requires restart) +#recovery_target_inclusive = on # Specifies whether to stop: + # just after the specified recovery target (on) + # just before the recovery target (off) + # (change requires restart) +#recovery_target_timeline = 'latest' # 'current', 'latest', or timeline ID + # (change requires restart) +#recovery_target_action = 'pause' # 'pause', 'promote', 'shutdown' + # (change requires restart) + + +#------------------------------------------------------------------------------ +# REPLICATION +#------------------------------------------------------------------------------ + +# - Sending Servers - + +# Set these on the primary and on any standby that will send replication data. + +max_wal_senders = 10 # max number of walsender processes + # (change requires restart) +max_replication_slots = 5 # max number of replication slots + # (change requires restart) +#wal_keep_size = 0 # in megabytes; 0 disables +#max_slot_wal_keep_size = -1 # in megabytes; -1 disables +#wal_sender_timeout = 60s # in milliseconds; 0 disables +#track_commit_timestamp = off # collect timestamp of transaction commit + # (change requires restart) + +# - Primary Server - + +# These settings are ignored on a standby server. + +#synchronous_standby_names = '' # standby servers that provide sync rep + # method to choose sync standbys, number of sync standbys, + # and comma-separated list of application_name + # from standby(s); '*' = all +#vacuum_defer_cleanup_age = 0 # number of xacts by which cleanup is delayed + +# - Standby Servers - + +# These settings are ignored on a primary server. + +#primary_conninfo = '' # connection string to sending server +#primary_slot_name = '' # replication slot on sending server +#promote_trigger_file = '' # file name whose presence ends recovery +#hot_standby = on # "off" disallows queries during recovery + # (change requires restart) +#max_standby_archive_delay = 30s # max delay before canceling queries + # when reading WAL from archive; + # -1 allows indefinite delay +#max_standby_streaming_delay = 30s # max delay before canceling queries + # when reading streaming WAL; + # -1 allows indefinite delay +#wal_receiver_create_temp_slot = off # create temp slot if primary_slot_name + # is not set +#wal_receiver_status_interval = 10s # send replies at least this often + # 0 disables +#hot_standby_feedback = off # send info from standby to prevent + # query conflicts +#wal_receiver_timeout = 60s # time that receiver waits for + # communication from primary + # in milliseconds; 0 disables +#wal_retrieve_retry_interval = 5s # time to wait before retrying to + # retrieve WAL after a failed attempt +#recovery_min_apply_delay = 0 # minimum delay for applying changes during recovery + +# - Subscribers - + +# These settings are ignored on a publisher. + +#max_logical_replication_workers = 4 # taken from max_worker_processes + # (change requires restart) +#max_sync_workers_per_subscription = 2 # taken from max_logical_replication_workers + + +#------------------------------------------------------------------------------ +# QUERY TUNING +#------------------------------------------------------------------------------ + +# - Planner Method Configuration - + +#enable_async_append = on +#enable_bitmapscan = on +#enable_gathermerge = on +#enable_hashagg = on +#enable_hashjoin = on +#enable_incremental_sort = on +#enable_indexscan = on +#enable_indexonlyscan = on +#enable_material = on +#enable_resultcache = on +#enable_mergejoin = on +#enable_nestloop = on +#enable_parallel_append = on +#enable_parallel_hash = on +#enable_partition_pruning = on +#enable_partitionwise_join = off +#enable_partitionwise_aggregate = off +#enable_seqscan = on +#enable_sort = on +#enable_tidscan = on + +# - Planner Cost Constants - + +#seq_page_cost = 1.0 # measured on an arbitrary scale +#random_page_cost = 4.0 # same scale as above +#cpu_tuple_cost = 0.01 # same scale as above +#cpu_index_tuple_cost = 0.005 # same scale as above +#cpu_operator_cost = 0.0025 # same scale as above +#parallel_setup_cost = 1000.0 # same scale as above +#parallel_tuple_cost = 0.1 # same scale as above +#min_parallel_table_scan_size = 8MB +#min_parallel_index_scan_size = 512kB +effective_cache_size = 128MB + +#jit_above_cost = 100000 # perform JIT compilation if available + # and query more expensive than this; + # -1 disables +#jit_inline_above_cost = 500000 # inline small functions if query is + # more expensive than this; -1 disables +#jit_optimize_above_cost = 500000 # use expensive JIT optimizations if + # query is more expensive than this; + # -1 disables + +# - Genetic Query Optimizer - + +#geqo = on +#geqo_threshold = 12 +#geqo_effort = 5 # range 1-10 +#geqo_pool_size = 0 # selects default based on effort +#geqo_generations = 0 # selects default based on effort +#geqo_selection_bias = 2.0 # range 1.5-2.0 +#geqo_seed = 0.0 # range 0.0-1.0 + +# - Other Planner Options - + +#default_statistics_target = 100 # range 1-10000 +#constraint_exclusion = partition # on, off, or partition +#cursor_tuple_fraction = 0.1 # range 0.0-1.0 +#from_collapse_limit = 8 +#jit = on # allow JIT compilation +#join_collapse_limit = 8 # 1 disables collapsing of explicit + # JOIN clauses +#plan_cache_mode = auto # auto, force_generic_plan or + # force_custom_plan + + +#------------------------------------------------------------------------------ +# REPORTING AND LOGGING +#------------------------------------------------------------------------------ + +# - Where to Log - + +log_destination = 'csvlog' # Valid values are combinations of + # stderr, csvlog, syslog, and eventlog, + # depending on platform. csvlog + # requires logging_collector to be on. + +# This is used when logging to stderr: +logging_collector = on # Enable capturing of stderr and csvlog + # into log files. Required to be on for + # csvlogs. + # (change requires restart) + +# These are only used if logging_collector is on: +log_directory = 'pg_log' # directory where log files are written, + # can be absolute or relative to PGDATA +log_filename = 'postgresql.log' # log file name pattern, + # can include strftime() escapes +# log_file_mode = 0600 # creation mode for log files, + # begin with 0 to use octal notation +log_rotation_age = 0 # Automatic rotation of logfiles will + # happen after that time. 0 disables. +log_rotation_size = 0 # Automatic rotation of logfiles will + # happen after that much log output. + # 0 disables. +#log_truncate_on_rotation = off # If on, an existing log file with the + # same name as the new log file will be + # truncated rather than appended to. + # But such truncation only occurs on + # time-driven rotation, not on restarts + # or size-driven rotation. Default is + # off, meaning append to existing files + # in all cases. + +# These are relevant when logging to syslog: +#syslog_facility = 'LOCAL0' +#syslog_ident = 'postgres' +#syslog_sequence_numbers = on +#syslog_split_messages = on + +# This is only relevant when logging to eventlog (Windows): +# (change requires restart) +#event_source = 'PostgreSQL' + +# - When to Log - + +#log_min_messages = warning # values in order of decreasing detail: + # debug5 + # debug4 + # debug3 + # debug2 + # debug1 + # info + # notice + # warning + # error + # log + # fatal + # panic + +#log_min_error_statement = error # values in order of decreasing detail: + # debug5 + # debug4 + # debug3 + # debug2 + # debug1 + # info + # notice + # warning + # error + # log + # fatal + # panic (effectively off) + +#log_min_duration_statement = -1 # -1 is disabled, 0 logs all statements + # and their durations, > 0 logs only + # statements running at least this number + # of milliseconds + +#log_min_duration_sample = -1 # -1 is disabled, 0 logs a sample of statements + # and their durations, > 0 logs only a sample of + # statements running at least this number + # of milliseconds; + # sample fraction is determined by log_statement_sample_rate + +#log_statement_sample_rate = 1.0 # fraction of logged statements exceeding + # log_min_duration_sample to be logged; + # 1.0 logs all such statements, 0.0 never logs + + +#log_transaction_sample_rate = 0.0 # fraction of transactions whose statements + # are logged regardless of their duration; 1.0 logs all + # statements from all transactions, 0.0 never logs + +# - What to Log - + +#debug_print_parse = off +#debug_print_rewritten = off +#debug_print_plan = off +#debug_pretty_print = on +#log_autovacuum_min_duration = -1 # log autovacuum activity; + # -1 disables, 0 logs all actions and + # their durations, > 0 logs only + # actions running at least this number + # of milliseconds. +#log_checkpoints = off +#log_connections = off +#log_disconnections = off +#log_duration = off +#log_error_verbosity = default # terse, default, or verbose messages +#log_hostname = off +log_line_prefix = '%h %m [%p] %q%u@%d ' # special values: + # %a = application name + # %u = user name + # %d = database name + # %r = remote host and port + # %h = remote host + # %b = backend type + # %p = process ID + # %P = process ID of parallel group leader + # %t = timestamp without milliseconds + # %m = timestamp with milliseconds + # %n = timestamp with milliseconds (as a Unix epoch) + # %Q = query ID (0 if none or not computed) + # %i = command tag + # %e = SQL state + # %c = session ID + # %l = session line number + # %s = session start timestamp + # %v = virtual transaction ID + # %x = transaction ID (0 if none) + # %q = stop here in non-session + # processes + # %% = '%' + # e.g. '<%u%%%d> ' +#log_lock_waits = off # log lock waits >= deadlock_timeout +#log_recovery_conflict_waits = off # log standby recovery conflict waits + # >= deadlock_timeout +#log_parameter_max_length = -1 # when logging statements, limit logged + # bind-parameter values to N bytes; + # -1 means print in full, 0 disables +#log_parameter_max_length_on_error = 0 # when logging an error, limit logged + # bind-parameter values to N bytes; + # -1 means print in full, 0 disables +#log_statement = 'none' # none, ddl, mod, all +#log_replication_commands = off +#log_temp_files = -1 # log temporary files equal or larger + # than the specified size in kilobytes; + # -1 disables, 0 logs all temp files +log_timezone = 'UTC' + +#------------------------------------------------------------------------------ +# PROCESS TITLE +#------------------------------------------------------------------------------ + +cluster_name = 'main' # added to process titles if nonempty + # (change requires restart) +#update_process_title = on + + +#------------------------------------------------------------------------------ +# STATISTICS +#------------------------------------------------------------------------------ + +# - Query and Index Statistics Collector - + +#track_activities = on +#track_activity_query_size = 1024 # (change requires restart) +#track_counts = on +#track_io_timing = off +#track_wal_io_timing = off +#track_functions = none # none, pl, all +#stats_temp_directory = 'pg_stat_tmp' + + +# - Monitoring - + +#compute_query_id = auto +#log_statement_stats = off +#log_parser_stats = off +#log_planner_stats = off +#log_executor_stats = off + + +#------------------------------------------------------------------------------ +# AUTOVACUUM +#------------------------------------------------------------------------------ + +#autovacuum = on # Enable autovacuum subprocess? 'on' + # requires track_counts to also be on. +#autovacuum_max_workers = 3 # max number of autovacuum subprocesses + # (change requires restart) +#autovacuum_naptime = 1min # time between autovacuum runs +#autovacuum_vacuum_threshold = 50 # min number of row updates before + # vacuum +#autovacuum_vacuum_insert_threshold = 1000 # min number of row inserts + # before vacuum; -1 disables insert + # vacuums +#autovacuum_analyze_threshold = 50 # min number of row updates before + # analyze +#autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum +#autovacuum_vacuum_insert_scale_factor = 0.2 # fraction of inserts over table + # size before insert vacuum +#autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze +#autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum + # (change requires restart) +#autovacuum_multixact_freeze_max_age = 400000000 # maximum multixact age + # before forced vacuum + # (change requires restart) +#autovacuum_vacuum_cost_delay = 2ms # default vacuum cost delay for + # autovacuum, in milliseconds; + # -1 means use vacuum_cost_delay +#autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for + # autovacuum, -1 means use + # vacuum_cost_limit + + +#------------------------------------------------------------------------------ +# CLIENT CONNECTION DEFAULTS +#------------------------------------------------------------------------------ + +# - Statement Behavior - + +#client_min_messages = notice # values in order of decreasing detail: + # debug5 + # debug4 + # debug3 + # debug2 + # debug1 + # log + # notice + # warning + # error +#search_path = '"$user", public' # schema names +row_security = on +#default_table_access_method = 'heap' +#default_tablespace = '' # a tablespace name, '' uses the default +#default_toast_compression = 'pglz' # 'pglz' or 'lz4' +#temp_tablespaces = '' # a list of tablespace names, '' uses + # only default tablespace +#check_function_bodies = on +#default_transaction_isolation = 'read committed' +#default_transaction_read_only = off +#default_transaction_deferrable = off +#session_replication_role = 'origin' +#statement_timeout = 0 # in milliseconds, 0 is disabled +#lock_timeout = 0 # in milliseconds, 0 is disabled +#idle_in_transaction_session_timeout = 0 # in milliseconds, 0 is disabled +#idle_session_timeout = 0 # in milliseconds, 0 is disabled +#vacuum_freeze_table_age = 150000000 +#vacuum_freeze_min_age = 50000000 +#vacuum_failsafe_age = 1600000000 +#vacuum_multixact_freeze_table_age = 150000000 +#vacuum_multixact_freeze_min_age = 5000000 +#vacuum_multixact_failsafe_age = 1600000000 +#bytea_output = 'hex' # hex, escape +#xmlbinary = 'base64' +#xmloption = 'content' +#gin_pending_list_limit = 4MB + +# - Locale and Formatting - + +#datestyle = 'iso, mdy' +#intervalstyle = 'postgres' +timezone = 'UTC' +#timezone_abbreviations = 'Default' # Select the set of available time zone + # abbreviations. Currently, there are + # Default + # Australia (historical usage) + # India + # You can create your own file in + # share/timezonesets/. +extra_float_digits = 0 # min -15, max 3; any value >0 actually + # selects precise output mode +#client_encoding = sql_ascii # actually, defaults to database + # encoding + +# These settings are initialized by initdb, but they can be changed. +lc_messages = 'en_US.UTF-8' # locale for system error message + # strings +lc_monetary = 'en_US.UTF-8' # locale for monetary formatting +lc_numeric = 'en_US.UTF-8' # locale for number formatting +lc_time = 'en_US.UTF-8' # locale for time formatting + +# default configuration for text search +default_text_search_config = 'pg_catalog.english' + +# - Shared Library Preloading - + +#local_preload_libraries = '' +#session_preload_libraries = '' +shared_preload_libraries = 'pg_stat_statements, pgaudit, plpgsql, plpgsql_check, pg_cron' # (change requires restart) +jit_provider = 'llvmjit' # JIT library to use + +# - Other Defaults - + +#dynamic_library_path = '$libdir' +#gin_fuzzy_search_limit = 0 + +#------------------------------------------------------------------------------ +# LOCK MANAGEMENT +#------------------------------------------------------------------------------ + +#deadlock_timeout = 1s +#max_locks_per_transaction = 64 # min 10 + # (change requires restart) +#max_pred_locks_per_transaction = 64 # min 10 + # (change requires restart) +#max_pred_locks_per_relation = -2 # negative values mean + # (max_pred_locks_per_transaction + # / -max_pred_locks_per_relation) - 1 +#max_pred_locks_per_page = 2 # min 0 + + +#------------------------------------------------------------------------------ +# VERSION AND PLATFORM COMPATIBILITY +#------------------------------------------------------------------------------ + +# - Previous PostgreSQL Versions - + +#array_nulls = on +#backslash_quote = safe_encoding # on, off, or safe_encoding +#escape_string_warning = on +#lo_compat_privileges = off +#quote_all_identifiers = off +#standard_conforming_strings = on +#synchronize_seqscans = on + +# - Other Platforms and Clients - + +#transform_null_equals = off + + +#------------------------------------------------------------------------------ +# ERROR HANDLING +#------------------------------------------------------------------------------ + +#exit_on_error = off # terminate session on any error? +#restart_after_crash = on # reinitialize after backend crash? +#remove_temp_files_after_crash = on # remove temporary files after + # backend crash? +#data_sync_retry = off # retry or panic on failure to fsync + # data? + # (change requires restart) +#recovery_init_sync_method = fsync # fsync, syncfs (Linux 5.8+) + + +#------------------------------------------------------------------------------ +# CONFIG FILE INCLUDES +#------------------------------------------------------------------------------ + +# These options allow settings to be loaded from files other than the +# default postgresql.conf. Note that these are directives, not variable +# assignments, so they can usefully be given more than once. + +#include_dir = '...' # include files ending in '.conf' from + # a directory, e.g., 'conf.d' +#include_if_exists = '...' # include file only if it exists +#include = '...' # include file + + +#------------------------------------------------------------------------------ +# CUSTOMIZED OPTIONS +#------------------------------------------------------------------------------ + +# Add settings for extensions here \ No newline at end of file diff --git a/ansible/files/postgresql_config/postgresql.service.j2 b/ansible/files/postgresql_config/postgresql.service.j2 new file mode 100644 index 0000000..1a3544b --- /dev/null +++ b/ansible/files/postgresql_config/postgresql.service.j2 @@ -0,0 +1,15 @@ +[Unit] +Description=PostgreSQL database server +Documentation=man:postgres(1) + +[Service] +Type=notify +User=postgres +ExecStart=/usr/lib/postgresql/bin/postgres -D /etc/postgresql +ExecReload=/bin/kill -HUP $MAINPID +KillMode=mixed +KillSignal=SIGINT +TimeoutSec=0 + +[Install] +WantedBy=multi-user.target \ No newline at end of file diff --git a/ansible/files/postgrest.service.j2 b/ansible/files/postgrest.service.j2 new file mode 100644 index 0000000..8018a03 --- /dev/null +++ b/ansible/files/postgrest.service.j2 @@ -0,0 +1,15 @@ +[Unit] +Description=PostgREST + +[Service] +Type=simple +ExecStart=/opt/postgrest /etc/postgrest.conf +User=postgrest +StandardOutput=file:/var/log/postgrest.stdout +StandardError=file:/var/log/postgrest.error +Slice=services.slice +Restart=always +RestartSec=3 + +[Install] +WantedBy=multi-user.target diff --git a/ansible/files/queries.yml.j2 b/ansible/files/queries.yml.j2 new file mode 100644 index 0000000..0de4ca2 --- /dev/null +++ b/ansible/files/queries.yml.j2 @@ -0,0 +1,194 @@ +pg_database: + query: "SELECT SUM(pg_database_size(pg_database.datname)) / (1024 * 1024) as size_mb FROM pg_database" + master: true + cache_seconds: 30 + metrics: + - size_mb: + usage: "GAUGE" + description: "Disk space used by the database" + +pg_stat_bgwriter: + query: | + select checkpoints_timed as checkpoints_timed_total, + checkpoints_req as checkpoints_req_total, + checkpoint_write_time as checkpoint_write_time_total, + checkpoint_sync_time as checkpoint_sync_time_total, + buffers_checkpoint as buffers_checkpoint_total, + buffers_clean as buffers_clean_total, + maxwritten_clean as maxwritten_clean_total, + buffers_backend as buffers_backend_total, + buffers_backend_fsync as buffers_backend_fsync_total, + buffers_alloc as buffers_alloc_total, + stats_reset + from pg_stat_bgwriter + cache_seconds: 30 + master: true + metrics: + - checkpoints_timed_total: + usage: "COUNTER" + description: "Scheduled checkpoints performed" + - checkpoints_req_total: + usage: "COUNTER" + description: "Requested checkpoints performed" + - checkpoint_write_time_total: + usage: "COUNTER" + description: "Time spent writing checkpoint files to disk" + - checkpoint_sync_time_total: + usage: "COUNTER" + description: "Time spent synchronizing checkpoint files to disk" + - buffers_checkpoint_total: + usage: "COUNTER" + description: "Buffers written during checkpoints" + - buffers_clean_total: + usage: "COUNTER" + description: "Buffers written by bg writter" + - maxwritten_clean_total: + usage: "COUNTER" + description: "Number of times bg writer stopped a cleaning scan because it had written too many buffers" + - buffers_backend_total: + usage: "COUNTER" + description: "Buffers written directly by a backend" + - buffers_backend_fsync_total: + usage: "COUNTER" + description: "fsync calls executed by a backend directly" + - buffers_alloc_total: + usage: "COUNTER" + description: "Buffers allocated" + - stats_reset: + usage: "COUNTER" + description: "Most recent stat reset time" + + +pg_stat_database: + cache_seconds: 30 + query: | + SELECT sum(numbackends) as num_backends, + sum(xact_commit) as xact_commit_total, + sum(xact_rollback) as xact_rollback_total, + sum(blks_read) as blks_read_total, + sum(blks_hit) as blks_hit_total, + sum(tup_returned) as tup_returned_total, + sum(tup_fetched) as tup_fetched_total, + sum(tup_inserted) as tup_inserted_total, + sum(tup_updated) as tup_updated_total, + sum(tup_deleted) as tup_deleted_total, + sum(conflicts) as conflicts_total, + sum(temp_files) as temp_files_total, + sum(temp_bytes) as temp_bytes_total, + sum(deadlocks) as deadlocks_total, + max(stats_reset) as most_recent_reset + FROM pg_stat_database + master: true + metrics: + - num_backends: + usage: "GAUGE" + description: "The number of active backends" + - xact_commit_total: + usage: "COUNTER" + description: "Transactions committed" + - xact_rollback_total: + usage: "COUNTER" + description: "Transactions rolled back" + - blks_read_total: + usage: "COUNTER" + description: "Number of disk blocks read" + - blks_hit_total: + usage: "COUNTER" + description: "Disk blocks found in buffer cache" + - tup_returned_total: + usage: "COUNTER" + description: "Rows returned by queries" + - tup_fetched_total: + usage: "COUNTER" + description: "Rows fetched by queries" + - tup_inserted_total: + usage: "COUNTER" + description: "Rows inserted" + - tup_updated_total: + usage: "COUNTER" + description: "Rows updated" + - tup_deleted_total: + usage: "COUNTER" + description: "Rows deleted" + - conflicts_total: + usage: "COUNTER" + description: "Queries canceled due to conflicts with recovery" + - temp_files_total: + usage: "COUNTER" + description: "Temp files created by queries" + - temp_bytes_total: + usage: "COUNTER" + description: "Temp data written by queries" + - deadlocks_total: + usage: "COUNTER" + description: "Deadlocks detected" + - most_recent_reset: + usage: "COUNTER" + description: "The most recent time one of the databases had its statistics reset" + +pg_stat_database_conflicts: + query: | + SELECT sum(confl_tablespace) as confl_tablespace_total, + sum(confl_lock) as confl_lock_total, + sum(confl_snapshot) as confl_snapshot_total, + sum(confl_bufferpin) as confl_bufferpin_total, + sum(confl_deadlock) as confl_deadlock_total + from pg_stat_database_conflicts + cache_seconds: 30 + master: true + metrics: + - confl_tablespace_total: + usage: "COUNTER" + description: "Queries cancelled due to dropped tablespaces" + - confl_lock_total: + usage: "COUNTER" + description: "Queries cancelled due to lock timeouts" + - confl_snapshot_total: + usage: "COUNTER" + description: "Queries cancelled due to old snapshots" + - confl_bufferpin_total: + usage: "COUNTER" + description: "Queries cancelled due to pinned buffers" + - confl_deadlock_total: + usage: "COUNTER" + description: "Queries cancelled due to deadlocks" + +pg_stat_statements: + query: "SELECT sum(calls) as total_queries, sum(total_exec_time / 1000) as total_time_seconds FROM extensions.pg_stat_statements t1 JOIN pg_database t3 ON (t1.dbid=t3.oid)" + master: true + metrics: + - total_queries: + usage: "COUNTER" + description: "Number of times executed" + - total_time_seconds: + usage: "COUNTER" + description: "Total time spent, in seconds" + +auth_users: + query: "select count(id) as user_count from auth.users" + master: true + cache_seconds: 30 + metrics: + - user_count: + usage: "GAUGE" + description: "Number of users in the project db" + +replication: + query: "SELECT pg_wal_lsn_diff(pg_current_wal_lsn(), restart_lsn) AS realtime_lag_bytes, active AS realtime_slot_status FROM pg_replication_slots where slot_name = 'realtime'" + master: true + metrics: + - realtime_lag_bytes: + usage: "GAUGE" + description: "Replication Lag for Realtime" + - realtime_slot_status: + usage: "GAUGE" + description: "Replication Slot active status" + +storage: + query: "select sum(size) / (1024 * 1024) as storage_size_mb from storage.get_size_by_bucket()" + master: true + cache_seconds: 30 + metrics: + - storage_size_mb: + usage: "GAUGE" + description: "The total size used for all storage buckets, in mb" diff --git a/ansible/files/stat_extension.sql b/ansible/files/stat_extension.sql new file mode 100644 index 0000000..9378340 --- /dev/null +++ b/ansible/files/stat_extension.sql @@ -0,0 +1,2 @@ +CREATE SCHEMA IF NOT exists extensions; +CREATE EXTENSION IF NOT EXISTS pg_stat_statements with schema extensions; diff --git a/ansible/install_roles.yml b/ansible/install_roles.yml deleted file mode 100644 index b8fbce9..0000000 --- a/ansible/install_roles.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- - -- name: anxs.postgresql - src: https://github.com/anxs/postgresql - version: v1.12.0 \ No newline at end of file diff --git a/ansible/playbook-docker.yml b/ansible/playbook-docker.yml new file mode 100644 index 0000000..f4a937d --- /dev/null +++ b/ansible/playbook-docker.yml @@ -0,0 +1,52 @@ +- name: Preparing Docker container + hosts: localhost + tasks: + - name: Pull Postgres Image + docker_container: + name: "tealbase-postgres-build" + image: "postgres:13.3" + env: + LANGUAGE: "en_US.UTF-8" + LANG: "en_US.UTF-8" + LC_ALL: "en_US.UTF-8" + state: started + memory: 4G + memory_swap: 6G + command: tail -f /dev/null + - name: Add Postgres Image to Ansible Hosts + add_host: + name: "tealbase-postgres-build" + ansible_connection: docker + ansible_ssh_user: root + +- name: Build tealbase Postgres + hosts: "tealbase-postgres-build" + gather_facts: false + + vars_files: + - ./vars.yml + + tasks: + - name: Setup container + import_tasks: tasks/docker/setup.yml + + - name: Install Postgres extensions + import_tasks: tasks/setup-extensions.yml + + - name: Cleanup container + import_tasks: tasks/docker/cleanup.yml + +- name: Create tealbase/postgres docker image + hosts: localhost + tasks: + - name: Commit Docker image + command: docker commit --change='CMD ["postgres"]' "tealbase-postgres-build" "tealbase/postgres" + +- name: Clean Up Postgres Image + hosts: localhost + tasks: + - name: Remove Running Base Image + docker_container: + name: tealbase-postgres-build + state: absent + force_kill: yes \ No newline at end of file diff --git a/ansible/playbook.yml b/ansible/playbook.yml index e38930e..5362de9 100644 --- a/ansible/playbook.yml +++ b/ansible/playbook.yml @@ -3,25 +3,71 @@ pre_tasks: - import_tasks: tasks/setup-system.yml - + vars_files: - ./vars.yml - - roles: - - role: anxs.postgresql + + vars: + sql_files: + - { source: "stat_extension.sql", dest: "01-extension.sql" } + + environment: + PATH: /usr/lib/postgresql/bin:{{ ansible_env.PATH }} tasks: - - name: Install non-Postgres extensions - import_tasks: tasks/setup-misc.yml + - name: Install Postgres from source + import_tasks: tasks/setup-postgres.yml - name: Install Postgres extensions import_tasks: tasks/setup-extensions.yml + - name: Start Postgres Database + systemd: + name: postgresql + state: started + + - name: Install WAL-G + import_tasks: tasks/setup-wal-g.yml + + - name: Install PgBouncer + import_tasks: tasks/setup-pgbouncer.yml + tags: + - install-pgbouncer + + - name: Install PostgREST + import_tasks: tasks/setup-postgrest.yml + tags: + - install-postgrest + + - name: Install tealbase specific content + import_tasks: tasks/setup-tealbase-internal.yml + tags: + - install-tealbase-internal + - name: Adjust APT update intervals - copy: + copy: src: files/apt_periodic dest: /etc/apt/apt.conf.d/10periodic + - name: Transfer init SQL files + copy: + src: files/{{ item.source }} + dest: /tmp/{{ item.dest }} + loop: "{{ sql_files }}" + + - name: Execute init SQL files + become: yes + become_user: postgres + shell: + cmd: /usr/lib/postgresql/bin/psql -f /tmp/{{ item.dest }} + loop: "{{ sql_files }}" + + - name: Delete SQL scripts + file: + path: /tmp/{{ item.dest }} + state: absent + loop: "{{ sql_files }}" + - name: UFW - Allow SSH connections ufw: rule: allow @@ -30,10 +76,46 @@ - name: UFW - Allow connections to postgreSQL (5432) ufw: rule: allow - port: '5432' + port: "5432" - - name: UFW - Deny all other incoming traffix by default - ufw: + - name: UFW - Allow connections to postgreSQL (6543) + ufw: + rule: allow + port: "6543" + tags: + - install-pgbouncer + + - name: UFW - Deny all other incoming traffic by default + ufw: state: enabled policy: deny - direction: incoming \ No newline at end of file + direction: incoming + + - name: Setup logrotate for postgres logs + copy: + src: files/logrotate-postgres + dest: /etc/logrotate.d/postgres + + - name: Configure logrotation to run every hour + shell: + cmd: mv /etc/cron.daily/logrotate /etc/cron.hourly/ + become: yes + + - name: restart crond + systemd: + state: restarted + name: cron + become: yes + + - name: Enhance fail2ban + import_tasks: tasks/setup-fail2ban.yml + + # Install EC2 instance connect + # Only for AWS images + - name: install EC2 instance connect + become: yes + apt: + pkg: + - ec2-instance-connect + tags: + - aws-only diff --git a/ansible/requirements.yml b/ansible/requirements.yml deleted file mode 100644 index e69de29..0000000 diff --git a/ansible/tasks/docker/cleanup.yml b/ansible/tasks/docker/cleanup.yml new file mode 100644 index 0000000..2ccc2af --- /dev/null +++ b/ansible/tasks/docker/cleanup.yml @@ -0,0 +1,67 @@ + + +- name: Cleanup - remove build dependencies + apt: + pkg: + - python3 + - rsync + - ca-certificates + - build-essential + - postgresql-server-dev-13 + - curl + - git-core + - gpp + - cpp + - pkg-config + - apt-transport-https + - cmake + - ninja-build + - python + state: absent + +- name: Cleanup - apt update and apt upgrade + apt: update_cache=yes upgrade=yes + # SEE http://archive.vn/DKJjs#parameter-upgrade + +- name: Cleanup - remove dependencies that are no longer required + apt: + autoremove: yes + +- name: Cleanup - remove useless packages from the cache + apt: + autoclean: yes + +- name: Cleanup - reinstall headless jdk + apt: + pkg: + - default-jdk-headless + update_cache: yes + install_recommends: no + +- name: Cleanup - find all files in /tmp + find: + paths: /tmp + file_type: any + register: tmp_items_to_delete + +- name: Cleanup - delete all items in /tmp + file: + path: "/tmp/{{ item.path | basename }}" + state: absent + force: yes + with_items: "{{ tmp_items_to_delete.files }}" + +- name: Cleanup - find all files in /var/lib/apt/lists/* + find: + paths: /var/lib/apt/lists + file_type: any + register: var_items_to_delete + +- name: Cleanup - delete all items in /tmp + file: + path: "/var/lib/apt/lists/{{ item.path | basename }}" + state: absent + force: yes + with_items: "{{ var_items_to_delete.files }}" + + \ No newline at end of file diff --git a/ansible/tasks/docker/setup.yml b/ansible/tasks/docker/setup.yml new file mode 100644 index 0000000..70a54d2 --- /dev/null +++ b/ansible/tasks/docker/setup.yml @@ -0,0 +1,69 @@ +- name: Install Python3 + raw: apt update && apt upgrade -y && apt install python3 -y + +- name: Setup - install common dependencies + apt: + pkg: + - rsync + - ca-certificates + - build-essential + - postgresql-server-dev-13 + - curl + - git-core + - gpp + - cpp + - pkg-config + - apt-transport-https + - cmake + - ninja-build + - python + update_cache: yes + install_recommends: no + +# Find platform architecture and set as a variable +- name: Setup - finding platform architecture + shell: if [ $(uname -m) = "aarch64" ]; then echo "arm64"; else echo "amd64"; fi + register: platform_output +- set_fact: + platform: "{{ platform_output.stdout }}" + +- name: Setup - import postgresql.conf + synchronize: + src: files/postgresql_config/postgresql.conf.j2 + dest: etc/postgresql/postgresql.conf + +- set_fact: + regex_string: "#unix_socket_directories = '/tmp'" + +- name: Setup - modify unix_socket_directories + become: yes + replace: + path: /etc/postgresql/postgresql.conf + regexp: '{{ regex_string }}' + replace: unix_socket_directories = '/var/run/postgresql' + +- name: Setup - modify unix_socket_directories + become: yes + replace: + path: /etc/postgresql/postgresql.conf + regexp: '{{ regex_string }}' + replace: unix_socket_directories = '/var/run/postgresql' + +- name: Setup - modify hba_file directory + become: yes + replace: + path: /etc/postgresql/postgresql.conf + regexp: hba_file = '/etc/postgresql/pg_hba.conf' + replace: hba_file = '/var/lib/postgresql/data/pg_hba.conf' + +- name: Setup - modify ident_file directory + become: yes + replace: + path: /etc/postgresql/postgresql.conf + regexp: ident_file = '/etc/postgresql/pg_ident.conf' + replace: ident_file = '/var/lib/postgresql/data/pg_ident.conf' + +- name: Setup - add init script to /docker-entrypoint-initdb.d + synchronize: + src: files/docker_mnt/init.sh + dest: /docker-entrypoint-initdb.d/init.sh \ No newline at end of file diff --git a/ansible/tasks/internal/node-exporter.yml b/ansible/tasks/internal/node-exporter.yml new file mode 100644 index 0000000..355dcdb --- /dev/null +++ b/ansible/tasks/internal/node-exporter.yml @@ -0,0 +1,46 @@ +- name: UFW - Allow connections to node exporter ports + ufw: + rule: allow + port: "9100" + +- name: Node Exporter - download binary archive + get_url: + url: "https://github.com/prometheus/node_exporter/releases/download/v{{ node_exporter_release }}/node_exporter-{{ node_exporter_release }}.linux-{{ platform }}.tar.gz" + dest: /tmp/node_exporter.tar.gz + checksum: "{{ node_exporter_release_checksum[platform] }}" + +- name: create directories + file: + state: directory + owner: root + path: "/opt/node_exporter" + +- name: node_exporter - unpack archives in /opt + unarchive: + remote_src: yes + src: "/tmp/node_exporter.tar.gz" + dest: /opt/node_exporter + owner: root + extra_opts: [--strip-components=1] + +- name: node_exporter - create service files + template: + src: files/node_exporter.service.j2 + dest: /etc/systemd/system/node_exporter.service + vars: + collectors: + - cpu + - diskstats + - filesystem + - filesystem.ignored-mount-points='^/.+' + - loadavg + - meminfo + - netdev + - netdev.device-exclude='lo' + +- name: node_exporter - reload systemd + systemd: + daemon_reload: yes + enabled: no + state: stopped + name: node_exporter diff --git a/ansible/tasks/internal/optimizations.yml b/ansible/tasks/internal/optimizations.yml new file mode 100644 index 0000000..8ee6551 --- /dev/null +++ b/ansible/tasks/internal/optimizations.yml @@ -0,0 +1,15 @@ +- name: ensure services are stopped + community.general.snap: + name: amazon-ssm-agent + state: absent + +- name: ensure services are stopped and disabled for first boot + systemd: + enabled: no + name: '{{ item }}' + state: stopped + with_items: + - snapd + - postgresql + - pgbouncer + - fail2ban diff --git a/ansible/tasks/internal/postgres-exporter.yml b/ansible/tasks/internal/postgres-exporter.yml new file mode 100644 index 0000000..4fecaea --- /dev/null +++ b/ansible/tasks/internal/postgres-exporter.yml @@ -0,0 +1,46 @@ +- name: UFW - Allow connections to exporter for prometheus + ufw: + rule: allow + port: "9187" + +- name: create directories + file: + state: directory + path: "{{ item }}" + owner: root + mode: '0700' + become: yes + with_items: + - /opt/postgres_exporter + - /etc/systemd/system/postgres_exporter.service.d + +- name: download postgres exporter + get_url: + url: "https://github.com/prometheus-community/postgres_exporter/releases/download/v{{ postgres_exporter_release }}/postgres_exporter-{{ postgres_exporter_release }}.linux-{{ platform }}.tar.gz" + dest: /tmp/postgres_exporter.tar.gz + checksum: "{{ postgres_exporter_release_checksum[platform] }}" + +- name: expand postgres exporter + unarchive: + remote_src: yes + src: /tmp/postgres_exporter.tar.gz + dest: /opt/postgres_exporter + extra_opts: [--strip-components=1] + become: yes + +- name: exporter create a service + template: + src: files/postgres_exporter.service.j2 + dest: /etc/systemd/system/postgres_exporter.service + +- name: exporter copy over queries + template: + src: files/queries.yml.j2 + dest: /opt/postgres_exporter/queries.yml + +- name: exporter ensure service is present + systemd: + enabled: no + name: postgres_exporter + daemon_reload: yes + state: stopped diff --git a/ansible/tasks/internal/supautils.yml b/ansible/tasks/internal/supautils.yml new file mode 100644 index 0000000..45419ad --- /dev/null +++ b/ansible/tasks/internal/supautils.yml @@ -0,0 +1,45 @@ +# supautils +- name: supautils - download latest release + get_url: + url: "https://github.com/tealbase/supautils/archive/refs/tags/v{{ supautils_release }}.tar.gz" + dest: /tmp/supautils-{{ supautils_release }}.tar.gz + checksum: "{{ supautils_release_checksum }}" + +- name: supautils - unpack archive + unarchive: + remote_src: yes + src: /tmp/supautils-{{ supautils_release }}.tar.gz + dest: /tmp + become: yes + +- name: supautils - build + make: + chdir: /tmp/supautils-{{ supautils_release }} + become: yes + +- name: supautils - install + make: + chdir: /tmp/supautils-{{ supautils_release }} + target: install + become: yes + +- name: supautils - set supautils.reserved_roles + become: yes + lineinfile: + path: /etc/postgresql/postgresql.conf + state: present + line: supautils.reserved_roles = 'tealbase_admin, tealbase_auth_admin, tealbase_storage_admin, dashboard_user, pgbouncer, service_role, authenticator, authenticated, anon' + +- name: supautils - set supautils.reserved_memberships + become: yes + lineinfile: + path: /etc/postgresql/postgresql.conf + state: present + line: supautils.reserved_memberships = 'pg_read_server_files, pg_write_server_files, pg_execute_server_program' + +- name: supautils - add supautils to shared_preload_libraries + become: yes + replace: + path: /etc/postgresql/postgresql.conf + regexp: shared_preload_libraries = ' + replace: shared_preload_libraries = 'supautils, diff --git a/ansible/tasks/postgres-extensions/01-postgis.yml b/ansible/tasks/postgres-extensions/01-postgis.yml new file mode 100644 index 0000000..9aaad6d --- /dev/null +++ b/ansible/tasks/postgres-extensions/01-postgis.yml @@ -0,0 +1,77 @@ +# postgis +- name: postgis - download & install dependencies + apt: + pkg: + - libgeos-dev + - libproj-dev + - libgdal-dev + - libjson-c-dev + - libxml2-dev + - libboost-all-dev + - libcgal-dev + - libmpfr-dev + - libgmp-dev + - cmake + update_cache: yes + cache_valid_time: 3600 + install_recommends: no + +- name: postgis - download SFCGAL dependency + get_url: + url: "https://gitlab.com/Oslandia/SFCGAL/-/archive/v{{ sfcgal_release }}/SFCGAL-v{{ sfcgal_release }}.tar.gz" + dest: /tmp/SFCGAL-v{{ sfcgal_release }}.tar.gz + checksum: "{{ sfcgal_release_checksum }}" + +- name: postgis - unpack SFCGAL + unarchive: + remote_src: yes + src: /tmp/SFCGAL-v{{ sfcgal_release }}.tar.gz + dest: /tmp + become: yes + +- name: postgis - compile SFCGAL + shell: + cmd: "cmake ." + chdir: /tmp/SFCGAL-v{{ sfcgal_release }} + become: yes + +- name: postgis - build SFCGAL + make: + chdir: /tmp/SFCGAL-v{{ sfcgal_release }} + become: yes + +- name: postgis - install SFCGAL + make: + chdir: /tmp/SFCGAL-v{{ sfcgal_release }} + target: install + become: yes + +- name: postgis - download latest release + get_url: + url: "https://download.osgeo.org/postgis/source/postgis-{{ postgis_release }}.tar.gz" + dest: /tmp/postgis-{{ postgis_release }}.tar.gz + checksum: "{{ postgis_release_checksum }}" + +- name: postgis - unpack archive + unarchive: + remote_src: yes + src: /tmp/postgis-{{ postgis_release }}.tar.gz + dest: /tmp + become: yes + +- name: postgis - configure + shell: + cmd: "./configure --without-protobuf --with-sfcgal" + chdir: /tmp/postgis-{{ postgis_release }} + become: yes + +- name: postgis - build + make: + chdir: /tmp/postgis-{{ postgis_release }} + become: yes + +- name: postgis - install + make: + chdir: /tmp/postgis-{{ postgis_release }} + target: install + become: yes \ No newline at end of file diff --git a/ansible/tasks/postgres-extensions/02-pgrouting.yml b/ansible/tasks/postgres-extensions/02-pgrouting.yml new file mode 100644 index 0000000..9020d00 --- /dev/null +++ b/ansible/tasks/postgres-extensions/02-pgrouting.yml @@ -0,0 +1,36 @@ +# pgRouting +- name: pgRouting - download latest release + get_url: + url: "https://github.com/pgRouting/pgrouting/releases/download/v{{ pgrouting_release }}/pgrouting-{{ pgrouting_release }}.tar.gz" + dest: /tmp/pgrouting-{{ pgrouting_release }}.tar.gz + checksum: "{{ pgrouting_release_checksum }}" + +- name: pgRouting - unpack archive + unarchive: + remote_src: yes + src: /tmp/pgrouting-{{ pgrouting_release }}.tar.gz + dest: /tmp + become: yes + +- name: pgRouting - create build directory + file: + path: /tmp/pgrouting-{{ pgrouting_release }}/build + state: directory + become: yes + +- name: pgRouting - compile + shell: + cmd: "cmake -DBUILD_HTML=OFF -DBUILD_DOXY=OFF .." + chdir: /tmp/pgrouting-{{ pgrouting_release }}/build + become: yes + +- name: pgRouting - build + make: + chdir: /tmp/pgrouting-{{ pgrouting_release }}/build + become: yes + +- name: pgRouting - install + make: + chdir: /tmp/pgrouting-{{ pgrouting_release }}/build + target: install + become: yes \ No newline at end of file diff --git a/ansible/tasks/postgres-extensions/03-pgtap.yml b/ansible/tasks/postgres-extensions/03-pgtap.yml new file mode 100644 index 0000000..6dc11f0 --- /dev/null +++ b/ansible/tasks/postgres-extensions/03-pgtap.yml @@ -0,0 +1,19 @@ +# pgTAP +- name: pgTAP - download latest release + get_url: + url: "https://github.com/theory/pgtap/archive/v{{ pgtap_release }}.tar.gz" + dest: /tmp/pgtap-{{ pgtap_release }}.tar.gz + checksum: "{{ pgtap_release_checksum }}" + +- name: pgTAP - unpack archive + unarchive: + remote_src: yes + src: /tmp/pgtap-{{ pgtap_release }}.tar.gz + dest: /tmp + become: yes + +- name: pgTAP - install + make: + chdir: /tmp/pgtap-{{ pgtap_release }} + target: install + become: yes \ No newline at end of file diff --git a/ansible/tasks/postgres-extensions/04-pg_cron.yml b/ansible/tasks/postgres-extensions/04-pg_cron.yml new file mode 100644 index 0000000..5bdf294 --- /dev/null +++ b/ansible/tasks/postgres-extensions/04-pg_cron.yml @@ -0,0 +1,31 @@ +# pg_cron +- name: pg_cron - download latest release + get_url: + url: "https://github.com/citusdata/pg_cron/archive/refs/tags/v{{ pg_cron_release }}.tar.gz" + dest: /tmp/pg_cron-{{ pg_cron_release }}.tar.gz + checksum: "{{ pg_cron_release_checksum }}" + +- name: pg_cron - unpack archive + unarchive: + remote_src: yes + src: /tmp/pg_cron-{{ pg_cron_release }}.tar.gz + dest: /tmp + become: yes + +- name: pg_cron - build + make: + chdir: /tmp/pg_cron-{{ pg_cron_release }} + become: yes + +- name: pg_cron - install + make: + chdir: /tmp/pg_cron-{{ pg_cron_release }} + target: install + become: yes + +- name: pg_cron - set cron.database_name + become: yes + lineinfile: + path: /etc/postgresql/postgresql.conf + state: present + line: cron.database_name = 'postgres' \ No newline at end of file diff --git a/ansible/tasks/postgres-extensions/05-pgaudit.yml b/ansible/tasks/postgres-extensions/05-pgaudit.yml new file mode 100644 index 0000000..6d3b2bc --- /dev/null +++ b/ansible/tasks/postgres-extensions/05-pgaudit.yml @@ -0,0 +1,37 @@ +# pgAudit +- name: pgAudit - download & install dependencies + apt: + pkg: + - libssl-dev + - libkrb5-dev + update_cache: yes + install_recommends: no + +- name: pgAudit - download latest release + get_url: + url: "https://github.com/pgaudit/pgaudit/archive/refs/tags/{{ pgaudit_release }}.tar.gz" + dest: /tmp/pgaudit-{{ pgaudit_release }}.tar.gz + checksum: "{{ pgaudit_release_checksum }}" + +- name: pgAudit - unpack archive + unarchive: + remote_src: yes + src: /tmp/pgaudit-{{ pgaudit_release }}.tar.gz + dest: /tmp + become: yes + +- name: pgAudit - build + make: + chdir: /tmp/pgaudit-{{ pgaudit_release }} + target: check + params: + USE_PGXS: 1 + become: yes + +- name: pgAudit - install + make: + chdir: /tmp/pgaudit-{{ pgaudit_release }} + target: install + params: + USE_PGXS: 1 + become: yes \ No newline at end of file diff --git a/ansible/tasks/postgres-extensions/06-pgjwt.yml b/ansible/tasks/postgres-extensions/06-pgjwt.yml new file mode 100644 index 0000000..b2734e1 --- /dev/null +++ b/ansible/tasks/postgres-extensions/06-pgjwt.yml @@ -0,0 +1,12 @@ +# pgjwt +- name: pgjwt - download from master branch + git: + repo: https://github.com/michelp/pgjwt.git + dest: /tmp/pgjwt + version: master + +- name: pgjwt - install + make: + chdir: /tmp/pgjwt + target: install + become: yes \ No newline at end of file diff --git a/ansible/tasks/postgres-extensions/07-pgsql-http.yml b/ansible/tasks/postgres-extensions/07-pgsql-http.yml new file mode 100644 index 0000000..6fd5cf9 --- /dev/null +++ b/ansible/tasks/postgres-extensions/07-pgsql-http.yml @@ -0,0 +1,37 @@ +# pgsql-http +- name: pgsql-http - libcurl4 package + apt: + pkg: + - libcurl4 + state: absent + +- name: pgsql-http - download & install dependencies + apt: + pkg: + - libcurl4-gnutls-dev + update_cache: yes + install_recommends: no + +- name: pgsql-http - download latest release + get_url: + url: "https://github.com/pramsey/pgsql-http/archive/refs/tags/v{{ pgsql_http_release }}.tar.gz" + dest: /tmp/pgsql_http-{{ pgsql_http_release }}.tar.gz + checksum: "{{ pgsql_http_release_checksum }}" + +- name: pgsql-http - unpack archive + unarchive: + remote_src: yes + src: /tmp/pgsql_http-{{ pgsql_http_release }}.tar.gz + dest: /tmp + become: yes + +- name: pgsql-http - build + make: + chdir: /tmp/pgsql-http-{{ pgsql_http_release }} + become: yes + +- name: pgsql-http - install + make: + chdir: /tmp/pgsql-http-{{ pgsql_http_release }} + target: install + become: yes \ No newline at end of file diff --git a/ansible/tasks/postgres-extensions/08-plpgsql_check.yml b/ansible/tasks/postgres-extensions/08-plpgsql_check.yml new file mode 100644 index 0000000..16fb5aa --- /dev/null +++ b/ansible/tasks/postgres-extensions/08-plpgsql_check.yml @@ -0,0 +1,32 @@ +# plpgsql_check +- name: plpgsql_check - download & install dependencies + apt: + pkg: + - libicu-dev + update_cache: yes + install_recommends: no + +- name: plpgsql_check - download latest release + get_url: + url: "https://github.com/okbob/plpgsql_check/archive/refs/tags/v{{ plpgsql_check_release }}.tar.gz" + dest: /tmp/plpgsql_check-{{ plpgsql_check_release }}.tar.gz + checksum: "{{ plpgsql_check_release_checksum }}" + +- name: plpgsql_check - unpack archive + unarchive: + remote_src: yes + src: /tmp/plpgsql_check-{{ plpgsql_check_release }}.tar.gz + dest: /tmp + become: yes + +- name: plpgsql_check - clean + make: + chdir: /tmp/plpgsql_check-{{ plpgsql_check_release }} + target: clean + become: yes + +- name: plpgsql_check - install + make: + chdir: /tmp/plpgsql_check-{{ plpgsql_check_release }} + target: install + become: yes \ No newline at end of file diff --git a/ansible/tasks/postgres-extensions/09-pg-safeupdate.yml b/ansible/tasks/postgres-extensions/09-pg-safeupdate.yml new file mode 100644 index 0000000..e27cfd9 --- /dev/null +++ b/ansible/tasks/postgres-extensions/09-pg-safeupdate.yml @@ -0,0 +1,24 @@ +# pg-safeupdate +- name: pg-safeupdate - download latest release + get_url: + url: "https://github.com/eradman/pg-safeupdate/archive/refs/tags/{{ pg_safeupdate_release }}.tar.gz" + dest: /tmp/pg_safeupdate-{{ pg_safeupdate_release }}.tar.gz + checksum: "{{ pg_safeupdate_release_checksum }}" + +- name: pg-safeupdate - unpack archive + unarchive: + remote_src: yes + src: /tmp/pg_safeupdate-{{ pg_safeupdate_release }}.tar.gz + dest: /tmp + become: yes + +- name: pg-safeupdate - build + make: + chdir: /tmp/pg-safeupdate-{{ pg_safeupdate_release }} + become: yes + +- name: pg-safeupdate - install + make: + chdir: /tmp/pg-safeupdate-{{ pg_safeupdate_release }} + target: install + become: yes \ No newline at end of file diff --git a/ansible/tasks/postgres-extensions/10-timescaledb.yml b/ansible/tasks/postgres-extensions/10-timescaledb.yml new file mode 100644 index 0000000..4679899 --- /dev/null +++ b/ansible/tasks/postgres-extensions/10-timescaledb.yml @@ -0,0 +1,31 @@ +# timescaledb +- name: timescaledb - download & install dependencies + apt: + pkg: + - cmake + update_cache: yes + install_recommends: no + +- name: timescaledb - download latest release + git: + repo: https://github.com/timescale/timescaledb.git + dest: /tmp/timescaledb + version: "{{ timescaledb_release }}" + become: yes + +- name: timescaledb - bootstrap + shell: + cmd: "./bootstrap -DAPACHE_ONLY=1 -DREGRESS_CHECKS=OFF" + chdir: /tmp/timescaledb + become: yes + +- name: timescaledb - build + make: + chdir: /tmp/timescaledb/build + become: yes + +- name: timescaledb - install + make: + chdir: /tmp/timescaledb/build + target: install + become: yes \ No newline at end of file diff --git a/ansible/tasks/postgres-extensions/11-wal2json.yml b/ansible/tasks/postgres-extensions/11-wal2json.yml new file mode 100644 index 0000000..8fabbdd --- /dev/null +++ b/ansible/tasks/postgres-extensions/11-wal2json.yml @@ -0,0 +1,24 @@ +# wal2json +- name: wal2json - download latest release + get_url: + url: "https://github.com/eulerto/wal2json/archive/refs/tags/wal2json_{{ wal2json_release }}.tar.gz" + dest: /tmp/wal2json-{{ wal2json_release }}.tar.gz + checksum: "{{ wal2json_release_checksum }}" + +- name: wal2json - unpack archive + unarchive: + remote_src: yes + src: /tmp/wal2json-{{ wal2json_release }}.tar.gz + dest: /tmp + become: yes + +- name: wal2json - build + make: + chdir: /tmp/wal2json-wal2json_{{ wal2json_release }} + become: yes + +- name: wal2json - install + make: + chdir: /tmp/wal2json-wal2json_{{ wal2json_release }} + target: install + become: yes \ No newline at end of file diff --git a/ansible/tasks/postgres-extensions/12-pljava.yml b/ansible/tasks/postgres-extensions/12-pljava.yml new file mode 100644 index 0000000..3bea59c --- /dev/null +++ b/ansible/tasks/postgres-extensions/12-pljava.yml @@ -0,0 +1,62 @@ +# pljava +- name: pljava - download & install dependencies + apt: + pkg: + - maven + - default-jre + - default-jdk + update_cache: yes + install_recommends: no + +- name: pljava - download latest release + get_url: + url: https://github.com/tada/pljava/archive/V{{ pljava_release }}.tar.gz + dest: /tmp/pljava-{{ pljava_release }}.tar.gz + checksum: "{{ pljava_release_checksum }}" + +- name: pljava - unpack archive + unarchive: + remote_src: yes + src: /tmp/pljava-{{ pljava_release }}.tar.gz + dest: /tmp + become: yes + +- name: pljava - build + become: yes + shell: + cmd: mvn clean install + chdir: /tmp/pljava-{{ pljava_release }} + +- name: pljava - install + become: yes + shell: + cmd: java -jar pljava-packaging/target/pljava-pg13.jar + chdir: /tmp/pljava-{{ pljava_release }} + +- name: pljava - remove build dependencies + apt: + pkg: + - maven + - default-jre + - default-jdk + state: absent + +- name: pljava - install headless jdk + apt: + pkg: + - default-jdk-headless + update_cache: yes + install_recommends: no + +- name: pljava - set pljava.libjvm_location + become: yes + lineinfile: + path: /etc/postgresql/postgresql.conf + state: present + line: pljava.libjvm_location = '/usr/lib/jvm/java-11-openjdk-{{ platform }}/lib/server/libjvm.so' + +- name: pljava - remove ~/.m2 directory + become: yes + file: + path: ~/.m2 + state: absent \ No newline at end of file diff --git a/ansible/tasks/postgres-extensions/13-plv8.yml b/ansible/tasks/postgres-extensions/13-plv8.yml new file mode 100644 index 0000000..1966649 --- /dev/null +++ b/ansible/tasks/postgres-extensions/13-plv8.yml @@ -0,0 +1,48 @@ +# plv8 +- name: plv8 - download & install dependencies + apt: + pkg: + - build-essential + - ca-certificates + - curl + - git-core + - gpp + - cpp + - pkg-config + - apt-transport-https + - cmake + - libc++-dev + - libc++abi-dev + - libc++1 + - libglib2.0-dev + - libtinfo5 + - libc++abi1 + - ninja-build + - python + update_cache: yes + install_recommends: no + +- name: plv8 - download latest release + git: + repo: https://github.com/plv8/plv8.git + dest: /tmp/plv8 + version: 3656177d384e3e02b74faa8e2931600f3690ab59 + become: yes + +- name: Create a symbolic link + file: + src: /lib/aarch64-linux-gnu/libc++.so.1 + dest: /lib/aarch64-linux-gnu/libc++.so + state: link + when: platform == "arm64" + +- name: plv8 - build + make: + chdir: /tmp/plv8 + become: yes + +- name: plv8 - install + make: + chdir: /tmp/plv8 + target: install + become: yes \ No newline at end of file diff --git a/ansible/tasks/setup-extensions.yml b/ansible/tasks/setup-extensions.yml index 85cd462..a9f2d3f 100644 --- a/ansible/tasks/setup-extensions.yml +++ b/ansible/tasks/setup-extensions.yml @@ -1,271 +1,38 @@ -# postgis -- name: Install postgis for postgreSQL versions < 10 - apt: - pkg: - - libgeos-c1v5 - - "postgresql-{{ postgresql_version }}-postgis-{{ postgresql_ext_postgis_version }}" - - "postgresql-{{ postgresql_version }}-postgis-scripts" - update_cache: yes - cache_valid_time: 3600 - when: postgresql_version < 10 +- name: Install postgis + import_tasks: tasks/postgres-extensions/01-postgis.yml -- name: Install postgis for postgreSQL versions >= 10 - apt: - pkg: - - libgeos-c1v5 - - "postgresql-{{ postgresql_version }}-postgis-{{ postgresql_ext_postgis_version }}" - - "postgresql-{{ postgresql_version }}-postgis-{{ postgresql_ext_postgis_version }}-scripts" - update_cache: yes - cache_valid_time: 3600 - when: postgresql_version >= 10 +- name: Install pgrouting + import_tasks: tasks/postgres-extensions/02-pgrouting.yml -# pgTAP -- name: pgTAP - download latest release - get_url: - url: "https://github.com/theory/pgtap/archive/v{{ pgtap_release }}.tar.gz" - dest: /tmp - checksum: "{{ pgtap_release_checksum }}" +- name: Install pgtap + import_tasks: tasks/postgres-extensions/03-pgtap.yml -- name: pgTAP - unpack archive - unarchive: - remote_src: yes - src: /tmp/pgtap-{{ pgtap_release }}.tar.gz - dest: /tmp - become: yes +- name: Install pg_cron + import_tasks: tasks/postgres-extensions/04-pg_cron.yml -- name: pgTAP - install - make: - chdir: /tmp/pgtap-{{ pgtap_release }} - target: install - become: yes +- name: Install pgaudit + import_tasks: tasks/postgres-extensions/05-pgaudit.yml -# plpython -- name: Install plpython - apt: - pkg: postgresql-plpython3-12 - update_cache: yes - cache_valid_time: 3600 +- name: Install pgjwt + import_tasks: tasks/postgres-extensions/06-pgjwt.yml -# pgAudit -- name: pgAudit - download & install dependencies - apt: - pkg: - - postgresql-server-dev-12 - - libssl-dev - - libkrb5-dev - update_cache: yes - install_recommends: no +- name: Install pgsql-http + import_tasks: tasks/postgres-extensions/07-pgsql-http.yml -- name: pgAudit - download latest release - get_url: - url: "https://github.com/pgaudit/pgaudit/archive/{{ pgaudit_release }}.tar.gz" - dest: /tmp - checksum: "{{ pgaudit_release_checksum }}" +- name: Install plpgsql_check + import_tasks: tasks/postgres-extensions/08-plpgsql_check.yml -- name: pgAudit - unpack archive - unarchive: - remote_src: yes - src: /tmp/pgaudit-{{ pgaudit_release }}.tar.gz - dest: /tmp - become: yes +- name: Install pg-safeupdate + import_tasks: tasks/postgres-extensions/09-pg-safeupdate.yml -- name: pgAudit - build - make: - chdir: /tmp/pgaudit-{{ pgaudit_release }} - target: check - params: - USE_PGXS: 1 - become: yes +# - name: Install timescaledb +# import_tasks: tasks/postgres-extensions/10-timescaledb.yml -- name: pgAudit - install - make: - chdir: /tmp/pgaudit-{{ pgaudit_release }} - target: install - params: - USE_PGXS: 1 - become: yes +- name: Install wal2json + import_tasks: tasks/postgres-extensions/11-wal2json.yml -# pgjwt -- name: pgjwt - download from master branch - git: - repo: https://github.com/michelp/pgjwt.git - dest: /tmp/pgjwt - version: master +- name: Install pljava + import_tasks: tasks/postgres-extensions/12-pljava.yml -- name: pgjwt - install - make: - chdir: /tmp/pgjwt - target: install - become: yes - -- name: Remove libcurl4 package - apt: - pkg: - - libcurl4 - state: absent - -# pgsql-http -- name: pgsql-http - download & install dependencies - apt: - pkg: - - libcurl4-gnutls-dev - update_cache: yes - install_recommends: yes - -- name: pgsql-http - download latest release - get_url: - url: "https://github.com/pramsey/pgsql-http/archive/v{{ pgsql_http_release }}.tar.gz" - dest: /tmp - checksum: "{{ pgsql_http_release_checksum }}" - -- name: pgsql-http - unpack archive - unarchive: - remote_src: yes - src: /tmp/pgsql-http-{{ pgsql_http_release }}.tar.gz - dest: /tmp - become: yes - -- name: pgsql-http - build - make: - chdir: /tmp/pgsql-http-{{ pgsql_http_release }} - become: yes - -- name: pgsql-http - install - make: - chdir: /tmp/pgsql-http-{{ pgsql_http_release }} - target: install - become: yes - -# plpgsql_check -- name: plpgsql_check - download & install dependencies - apt: - pkg: - - libicu-dev - update_cache: yes - install_recommends: no - -- name: plpgsql_check - download latest release - get_url: - url: https://github.com/okbob/plpgsql_check/archive/v{{ plpgsql_check_release }}.tar.gz - dest: /tmp - checksum: "{{ plpgsql_check_release_checksum }}" - -- name: plpgsql_check - unpack archive - unarchive: - remote_src: yes - src: /tmp/plpgsql_check-{{ plpgsql_check_release }}.tar.gz - dest: /tmp - become: yes - -- name: plpgsql_check - clean - make: - chdir: /tmp/plpgsql_check-{{ plpgsql_check_release }} - target: clean - become: yes - -- name: plpgsql_check - install - make: - chdir: /tmp/plpgsql_check-{{ plpgsql_check_release }} - target: install - become: yes - -# pljava -- name: pljava - download & install dependencies - apt: - pkg: - - maven - - default-jre - - default-jdk - update_cache: yes - install_recommends: yes - -- name: pljava - download latest release - get_url: - url: https://github.com/tada/pljava/archive/V{{ pljava_release }}.tar.gz - dest: /tmp - checksum: "{{ pljava_release_checksum }}" - -- name: pljava - unpack archive - unarchive: - remote_src: yes - src: /tmp/pljava-{{ pljava_release }}.tar.gz - dest: /tmp - become: yes - -- name: pljava - build - become: yes - shell: - cmd: mvn clean install - chdir: /tmp/pljava-{{ pljava_release }} - -- name: pljava - install - become: yes - shell: - cmd: java -jar pljava-packaging/target/pljava-pg12.3-amd64-Linux-gpp.jar - chdir: /tmp/pljava-{{ pljava_release }} - -- name: pljava - remove build dependencies - apt: - pkg: - - maven - - default-jre - - default-jdk - state: absent - -- name: pljava - install headless jdk - apt: - pkg: - - default-jdk-headless - update_cache: yes - install_recommends: no - -- name: pljava - set pljava.libjvm_location - become: yes - shell: - cmd: echo "pljava.libjvm_location = '/usr/lib/jvm/java-11-openjdk-amd64/lib/server/libjvm.so'" >> /etc/postgresql/12/main/postgresql.conf - -- name: pljava - remove ~/.m2 directory - become: yes - file: - path: ~/.m2 - state: absent - -# plv8 -- name: plv8 - download & install dependencies - apt: - pkg: - - build-essential - - ca-certificates - - curl - - git-core - - gpp - - cpp - - pkg-config - - apt-transport-https - - cmake - - libc++-dev - - libc++abi-dev - - postgresql-server-dev-12 - - libc++1 - - libtinfo5 - - libc++abi1 - update_cache: yes - install_recommends: no - -- name: plv8 - download latest release - git: - repo: https://github.com/plv8/plv8.git - dest: /tmp/plv8 - version: r3.0alpha - become: yes - -- name: plv8 - build - make: - chdir: /tmp/plv8 - become: yes - -- name: plv8 - install - make: - chdir: /tmp/plv8 - target: install - become: yes +- name: Install plv8 + import_tasks: tasks/postgres-extensions/13-plv8.yml \ No newline at end of file diff --git a/ansible/tasks/setup-fail2ban.yml b/ansible/tasks/setup-fail2ban.yml new file mode 100644 index 0000000..2d901d0 --- /dev/null +++ b/ansible/tasks/setup-fail2ban.yml @@ -0,0 +1,41 @@ +# set default bantime to 30 minutes +- name: supautils - add supautils to shared_preload_libraries + become: yes + replace: + path: /etc/fail2ban/jail.conf + regexp: bantime = 10m + replace: bantime = 1800 + +# postgresql +- name: import jail.d/postgresql.conf + template: + src: files/fail2ban_config/jail-postgresql.conf.j2 + dest: /etc/fail2ban/jail.d/postgresql.conf + become: yes + +- name: import filter.d/postgresql.conf + template: + src: files/fail2ban_config/filter-postgresql.conf.j2 + dest: /etc/fail2ban/filter.d/postgresql.conf + become: yes + +- name: add in tealbase specific ignore filters + lineinfile: + path: /etc/fail2ban/filter.d/postgresql.conf + state: present + line: "{{ item.line }}" + loop: + - { line: ' ^.*,.*,.*,.*,":.*password authentication failed for user ""tealbase_admin".*$' } + - { line: ' ^.*,.*,.*,.*,":.*password authentication failed for user ""tealbase_auth_admin".*$' } + - { line: ' ^.*,.*,.*,.*,":.*password authentication failed for user ""tealbase_storage_admin".*$' } + - { line: ' ^.*,.*,.*,.*,":.*password authentication failed for user ""authenticator".*$' } + - { line: ' ^.*,.*,.*,.*,":.*password authentication failed for user ""pgbouncer".*$' } + become: yes + tags: + - install-tealbase-internal + +# Restart +- name: fail2ban - restart + systemd: + name: fail2ban + state: restarted \ No newline at end of file diff --git a/ansible/tasks/setup-misc.yml b/ansible/tasks/setup-misc.yml deleted file mode 100644 index e67d6a3..0000000 --- a/ansible/tasks/setup-misc.yml +++ /dev/null @@ -1,24 +0,0 @@ -# WAL-G -- name: Install daemontools - become: yes - apt: - pkg: - - daemontools - -- name: WAL-G - download latest release - get_url: - url: https://github.com/wal-g/wal-g/releases/download/v{{ wal_g_release }}/wal-g.linux-amd64.tar.gz - dest: /tmp - checksum: "{{ wal_g_release_checksum }}" - -- name: WAL-G - unpack archive - unarchive: - remote_src: yes - src: /tmp/wal-g.linux-amd64.tar.gz - dest: /tmp - become: yes - -- name: WAL-G - install - become: yes - shell: - cmd: mv /tmp/wal-g /usr/local/bin/ diff --git a/ansible/tasks/setup-pgbouncer.yml b/ansible/tasks/setup-pgbouncer.yml new file mode 100644 index 0000000..7400b19 --- /dev/null +++ b/ansible/tasks/setup-pgbouncer.yml @@ -0,0 +1,111 @@ +# PgBouncer +- name: PgBouncer - download & install dependencies + apt: + pkg: + - libssl-dev + - pkg-config + - libevent-dev + update_cache: yes + cache_valid_time: 3600 + +- name: PgBouncer - download latest release + get_url: + url: "https://www.pgbouncer.org/downloads/files/{{ pgbouncer_release }}/pgbouncer-{{ pgbouncer_release }}.tar.gz" + dest: /tmp/pgbouncer-{{ pgbouncer_release }}.tar.gz + checksum: "{{ pgbouncer_release_checksum }}" + +- name: PgBouncer - unpack archive + unarchive: + remote_src: yes + src: /tmp/pgbouncer-{{ pgbouncer_release }}.tar.gz + dest: /tmp + become: yes + +- name: PgBouncer - configure + shell: + cmd: "./configure --prefix=/usr/local --with-systemd" + chdir: /tmp/pgbouncer-{{ pgbouncer_release }} + become: yes + +- name: PgBouncer - build + make: + chdir: /tmp/pgbouncer-{{ pgbouncer_release }} + become: yes + +- name: PgBouncer - install + make: + chdir: /tmp/pgbouncer-{{ pgbouncer_release }} + target: install + become: yes + +# Create /etc/postgresql directory and make sure postgres group owns it +- name: PgBouncer - create a directory if it does not exist + file: + path: /etc/pgbouncer + state: directory + group: postgres + +- name: PgBouncer - adjust pgbouncer.ini + copy: + src: files/pgbouncer_config/pgbouncer.ini.j2 + dest: /etc/pgbouncer/pgbouncer.ini + +- name: PgBouncer - create a directory if it does not exist + file: + path: /etc/pgbouncer/userlist.txt + state: touch + group: postgres + owner: postgres + +- name: import /etc/tmpfiles.d/pgbouncer.conf + template: + src: files/pgbouncer_config/tmpfiles.d-pgbouncer.conf.j2 + dest: /etc/tmpfiles.d/pgbouncer.conf + become: yes + +- name: PgBouncer - add permissions for pgbouncer user + become: yes + lineinfile: + path: /etc/postgresql/pg_hba.conf + state: present + insertafter: '# Default:' + line: "{{ item }}" + with_items: + - "host all pgbouncer 127.0.0.1/32 md5" + - "# Allow connection by pgbouncer user" + +# Run PgBouncer SQL script +- name: Transfer init SQL files + copy: + src: files/pgbouncer_config/pgbouncer_auth_schema.sql + dest: /tmp/00-schema.sql + +- name: Execute init SQL files + become: yes + become_user: postgres + shell: + cmd: /usr/lib/postgresql/bin/psql -f /tmp/00-schema.sql + +# Add fail2ban filter +- name: import jail.d/pgbouncer.conf + template: + src: files/fail2ban_config/jail-pgbouncer.conf.j2 + dest: /etc/fail2ban/jail.d/pgbouncer.conf + become: yes + +- name: import filter.d/pgbouncer.conf + template: + src: files/fail2ban_config/filter-pgbouncer.conf.j2 + dest: /etc/fail2ban/filter.d/pgbouncer.conf + become: yes + +# Add systemd file for PgBouncer +- name: PgBouncer - import postgresql.service + template: + src: files/pgbouncer_config/pgbouncer.service.j2 + dest: /etc/systemd/system/pgbouncer.service + become: yes + +- name: PgBouncer - reload systemd + systemd: + daemon_reload: yes \ No newline at end of file diff --git a/ansible/tasks/setup-postgres.yml b/ansible/tasks/setup-postgres.yml new file mode 100644 index 0000000..679c3ce --- /dev/null +++ b/ansible/tasks/setup-postgres.yml @@ -0,0 +1,165 @@ +# Downloading dependencies +- name: Postgres dependencies + become: yes + apt: + pkg: + - build-essential + - libreadline-dev + - zlib1g-dev + - flex + - bison + - libxml2-dev + - libxslt-dev + - libssl-dev + - libsystemd-dev + - libpq-dev + - libxml2-utils + - uuid-dev + - xsltproc + - ssl-cert + +- name: Download LLVM & Clang + become: yes + apt: + pkg: + - llvm-11-dev + - clang-11 + +- name: Download GCC 10 + become: yes + apt: + pkg: + - gcc-10 + - g++-10 + +- name: Switch to GCC 10 + shell: + cmd: update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-10 100 --slave /usr/bin/g++ g++ /usr/bin/g++-10 --slave /usr/bin/gcov gcov /usr/bin/gcov-10 + become: yes + +# Building Postgres from source +- name: Postgres - download latest release + get_url: + url: https://ftp.postgresql.org/pub/source/v{{ postgresql_release }}/postgresql-{{ postgresql_release }}.tar.gz + dest: /tmp + checksum: "{{ postgresql_release_checksum }}" + +- name: Postgres - unpack archive + unarchive: + remote_src: yes + src: /tmp/postgresql-{{ postgresql_release }}.tar.gz + dest: /tmp + become: yes + +- name: Setting CFLAGS (arm) + set_fact: + cflags: "-moutline-atomics -mtune=neoverse-n1 -fsigned-char" + when: platform == "arm64" + +- name: Setting CFLAGS (x86) + set_fact: + cflags: "-fsigned-char" + when: platform == "amd64" + +- name: Postgres - configure + shell: + cmd: CFLAGS='{{ cflags }}' LLVM_CONFIG=/usr/bin/llvm-config-11 CLANG=/usr/bin/clang-11 ./configure --with-llvm --with-openssl --with-systemd --with-uuid=e2fs --exec-prefix=/usr/lib/postgresql --datarootdir=/var/lib/postgresql + chdir: /tmp/postgresql-{{ postgresql_release }} + become: yes + +- name: Postgres - build + make: + target: world + chdir: /tmp/postgresql-{{ postgresql_release }} + become: yes + +- name: Postgres - install + make: + target: install-world + chdir: /tmp/postgresql-{{ postgresql_release }} + become: yes + +# Create postgres user +- name: Create postgres user + user: + name: postgres + shell: /bin/false + comment: Postgres user + groups: ssl-cert + +- name: Recursively change ownership of a directory + file: + path: /var/lib/postgresql + state: directory + recurse: yes + owner: postgres + group: postgres + +# Create /etc/postgresql directory and make sure postgres group owns it +- name: Create a directory if it does not exist + file: + path: /etc/postgresql + state: directory + owner: postgres + group: postgres + +# Move Postgres configuration files into /etc/postgresql +# Add postgresql.conf +- name: import postgresql.conf + template: + src: files/postgresql_config/postgresql.conf.j2 + dest: /etc/postgresql/postgresql.conf + group: postgres + +# Add pg_hba.conf +- name: import pg_hba.conf + template: + src: files/postgresql_config/pg_hba.conf.j2 + dest: /etc/postgresql/pg_hba.conf + group: postgres + +# Add pg_ident.conf +- name: import pg_ident.conf + template: + src: files/postgresql_config/pg_ident.conf.j2 + dest: /etc/postgresql/pg_ident.conf + group: postgres + +- name: Find all files in /usr/lib/postgresql/bin + find: + paths: /usr/lib/postgresql/bin + register: postgresql_bin + +- name: Create symbolic links for Postgres binaries to /usr/bin/ + become: True + file: + src: "{{ item.path }}" + path: "/usr/bin/{{ item.path | basename }}" + state: link + force: yes + with_items: "{{ postgresql_bin.files }}" + +# init DB +- name: Initialize the database + become: yes + become_user: postgres + shell: + cmd: /usr/lib/postgresql/bin/pg_ctl -D /var/lib/postgresql/data initdb + vars: + ansible_command_timeout: 60 + # Circumvents the following error: + # "Timeout (12s) waiting for privilege escalation prompt" + +# Add systemd file for Postgres +- name: import postgresql.service + template: + src: files/postgresql_config/postgresql.service.j2 + dest: /etc/systemd/system/postgresql.service + become: yes + +# Reload +- name: System - systemd reload + systemd: + enabled: yes + name: postgresql + daemon_reload: yes diff --git a/ansible/tasks/setup-postgrest.yml b/ansible/tasks/setup-postgrest.yml new file mode 100644 index 0000000..16a10e8 --- /dev/null +++ b/ansible/tasks/setup-postgrest.yml @@ -0,0 +1,39 @@ +- name: PostgREST - system user + user: name=postgrest + +# libpq is a C library that enables user programs to communicate with +# the PostgreSQL database server. +- name: PostgREST - system dependencies + apt: + pkg: + - libpq5 + +- name: PostgREST - download ubuntu binary archive (arm) + get_url: + url: "https://github.com/PostgREST/postgrest/releases/download/nightly/postgrest-nightly-{{ postgrest_arm_release }}.tar.xz" + dest: /tmp/postgrest.tar.xz + checksum: "{{ postgrest_arm_release_checksum }}" + when: platform == "arm64" + +- name: PostgREST - download ubuntu binary archive (x86) + get_url: + url: "https://github.com/PostgREST/postgrest/releases/download/nightly/postgrest-nightly-{{ postgrest_x86_release }}.tar.xz" + dest: /tmp/postgrest.tar.xz + checksum: "{{ postgrest_x86_release_checksum }}" + when: platform == "amd64" + +- name: PostgREST - unpack archive in /opt + unarchive: + remote_src: yes + src: /tmp/postgrest.tar.xz + dest: /opt + owner: postgrest + +- name: PostgREST - create service file + template: + src: files/postgrest.service.j2 + dest: /etc/systemd/system/postgrest.service + +- name: PostgREST - reload systemd + systemd: + daemon_reload: yes \ No newline at end of file diff --git a/ansible/tasks/setup-system.yml b/ansible/tasks/setup-system.yml index 05ff498..3a3db1a 100644 --- a/ansible/tasks/setup-system.yml +++ b/ansible/tasks/setup-system.yml @@ -1,41 +1,34 @@ # DigitalOcean's ubuntu droplet isn't up to date with installed packages, and on # a fresh install I see 71 security upgrades available. +- name: Terminate any ongoing updates + become: yes + shell: killall apt apt-get + ignore_errors: yes + tags: + - update + - update-only + - name: System - apt update and apt upgrade apt: update_cache=yes upgrade=yes # SEE http://archive.vn/DKJjs#parameter-upgrade -- name: Wait for /var/lib/apt/lists/lock +- name: Install required security updates become: yes - shell: while sudo fuser /var/lib/apt/lists/lock; do sleep 10; done; - tags: - - update - - update-only + apt: + pkg: + - tzdata + - linux-libc-dev -- name: Wait for /var/lib/dpkg/lock-frontend +# SEE https://github.com/georchestra/ansible/issues/55#issuecomment-588313638 +# Without this, a similar error is faced +- name: Install Ansible dependencies become: yes - shell: while sudo fuser /var/lib/dpkg/lock-frontend; do sleep 10; done; - tags: - - update - - update-only - -- name: add universe repository for bionic - apt_repository: - repo: deb http://archive.ubuntu.com/ubuntu bionic universe - state: present - -- name: Install python - become: yes apt: pkg: - - python - - python-pip - - python3 - - python3-pip - update_cache: yes - cache_valid_time: 3600 + - acl - name: Install security tools - become: yes + become: yes apt: pkg: - ufw @@ -43,15 +36,24 @@ - unattended-upgrades update_cache: yes cache_valid_time: 3600 - + - name: Adjust APT update intervals - copy: + copy: src: files/apt_periodic dest: /etc/apt/apt.conf.d/10periodic -- name: Install psycopg2 to enable ansible postgreSQL features - pip: - name: psycopg2-binary +# Find platform architecture and set as a variable +- name: finding platform architecture + shell: if [ $(uname -m) = "aarch64" ]; then echo "arm64"; else echo "amd64"; fi + register: platform_output + tags: + - update + - update-only +- set_fact: + platform: "{{ platform_output.stdout }}" + tags: + - update + - update-only - name: System - Create services.slice template: @@ -59,5 +61,4 @@ dest: /etc/systemd/system/services.slice - name: System - systemd reload - systemd: daemon_reload=yes - + systemd: daemon_reload=yes \ No newline at end of file diff --git a/ansible/tasks/setup-tealbase-internal.yml b/ansible/tasks/setup-tealbase-internal.yml new file mode 100644 index 0000000..52b6958 --- /dev/null +++ b/ansible/tasks/setup-tealbase-internal.yml @@ -0,0 +1,40 @@ +- name: AWS CLI dep + apt: + pkg: + - unzip + - jq + install_recommends: no + +- name: AWS CLI (arm) + get_url: + url: "https://awscli.amazonaws.com/awscli-exe-linux-aarch64-{{ aws_cli_release }}.zip" + dest: "/tmp/awscliv2.zip" + when: platform == "arm64" + +- name: AWS CLI (x86) + get_url: + url: "https://awscli.amazonaws.com/awscli-exe-linux-x86_64-{{ aws_cli_release }}.zip" + dest: "/tmp/awscliv2.zip" + when: platform == "amd64" + +- name: AWS CLI - expand + unarchive: + remote_src: yes + src: "/tmp/awscliv2.zip" + dest: "/tmp" + +- name: AWS CLI - install + shell: "/tmp/aws/install" + become: true + +- name: Install Postgres exporter + import_tasks: internal/postgres-exporter.yml + +- name: Install node exporter + import_tasks: internal/node-exporter.yml + +- name: Install supautils + import_tasks: internal/supautils.yml + +- name: Boot time optimizations + import_tasks: internal/optimizations.yml diff --git a/ansible/tasks/setup-wal-g.yml b/ansible/tasks/setup-wal-g.yml new file mode 100644 index 0000000..3f2bc5a --- /dev/null +++ b/ansible/tasks/setup-wal-g.yml @@ -0,0 +1,45 @@ +# Downloading dependencies +- name: Postgres dependencies + become: yes + apt: + pkg: + - liblzo2-dev + - cmake + +# install go dependency for WAL-G +- name: wal-g go dependency + get_url: + url: "https://golang.org/dl/go{{ golang_version }}.linux-{{ platform }}.tar.gz" + dest: /tmp + +- name: unpack go archive + unarchive: + remote_src: yes + src: "/tmp/go{{ golang_version }}.linux-{{ platform }}.tar.gz" + dest: /usr/local + +# Download WAL-G +- name: download wal-g + shell: + cmd: go get github.com/wal-g/wal-g; + environment: + PATH: "{{ ansible_env.PATH }}:/usr/local/go/bin" + ignore_errors: yes + # ignore error https://github.com/wal-g/wal-g/issues/343#issuecomment-514544288 + +# Install WAL-G +- name: install wal-g + become: yes + shell: + cmd: make install && make deps && make pg_install + chdir: "{{ ansible_env.HOME }}/go/src/github.com/wal-g/wal-g" + environment: + GOBIN: "/usr/local/bin" + PATH: "{{ ansible_env.PATH }}:/usr/local/go/bin" + +# Clean up Go +- name: Uninstall Go + become: yes + file: + path: /usr/local/go + state: absent \ No newline at end of file diff --git a/ansible/vars.yml b/ansible/vars.yml index a804557..a6f823a 100644 --- a/ansible/vars.yml +++ b/ansible/vars.yml @@ -1,42 +1,71 @@ -postgresql_version: 12 -postgresql_wal_level: "logical" -postgresql_max_wal_senders: 10 -postgresql_max_replication_slots: 5 -postgresql_row_security: on -postgresql_listen_addresses: - - "*" +tealbase_internal: true -postgresql_ext_install_contrib: yes -postgresql_ext_install_dev_headers: yes +postgresql_major: "13" +postgresql_release: "13.3" +postgresql_release_checksum: sha1:aeb645988b1ec9ffbb2fc0a49d9055d3ab17affa -# Warning: Make sure the postgresql & postgis versions are compatible with one another -postgresql_ext_postgis_version: 3 +# Non Postgres Extensions +pgbouncer_release: "1.15.0" +pgbouncer_release_checksum: sha1:ea7e9dbcab178f439a0fa402a78a7f1e4f43e6d4 -postgresql_shared_preload_libraries: [pg_stat_statements, pgaudit, plpgsql, plpgsql_check ] +postgrest_arm_release: 2021-03-05-19-03-d3a8b5f-ubuntu-aarch64 +postgrest_arm_release_checksum: sha1:b9e9b06ead7230b75033e8ae17912714bf463a33 -postgresql_pg_hba_custom: - - {type: "host", database: "all", user: "all", address: "0.0.0.0/0", method: "md5" } +postgrest_x86_release: 2021-03-05-19-03-d3a8b5f-linux-x64-static +postgrest_x86_release_checksum: sha1:4b4adde15f0d41d65a9136d1f8c0d9cd6fe79326 + +aws_cli_release: "2.2.7" + +golang_version: "1.15.4" +wal_g_release: "0.2.15" +wal_g_release_checksum: sha1:e82d405121e0ccc322a323b9824e60c102b14004 + +sfcgal_release: "1.3.10" +sfcgal_release_checksum: sha1:f4add34a00afb0b5f594685fc646565a2bda259b + +postgres_exporter_release: "0.9.0" +postgres_exporter_release_checksum: + arm64: sha256:d869c16791481dc8475487ad84ae4371a63f9b399898ca1c666eead5cccf7182 + amd64: sha256:ff541bd3ee19c0ae003d71424a75edfcc8695e828dd20d5b4555ce433c89d60b + +node_exporter_release: 1.1.2 +node_exporter_release_checksum: + arm64: sha256:eb5e7d16f18bb3272d0d832986fc8ac6cb0b6c42d487c94e15dabb10feae8e04 + amd64: sha256:8c1f6a317457a658e0ae68ad710f6b4098db2cad10204649b51e3c043aa3e70d + +# Postgres Extensions +postgis_release: "3.1.2" +postgis_release_checksum: sha1:622f52f3bf338c8e51ea6d73d30d6a5d3140c517 + +pgrouting_release: "3.2.0" +pgrouting_release_checksum: sha1:d902d449ebc96b6cdcb2fac09434d0098467cda5 pgtap_release: "1.1.0" pgtap_release_checksum: sha1:cca57708e723de18735a723b774577dc52f6f31e -pgaudit_release: "1.4.0" -pgaudit_release_checksum: sha1:ea085fbf227b5c461331ab33b99579f37db299a6 +pg_cron_release: "1.3.1" +pg_cron_release_checksum: sha1:679b6ff54e0b1070a5fd713c5d25c3378f371fac + +pgaudit_release: "1.5.0" +pgaudit_release_checksum: sha1:8429125e8f70fcaa2c2f5a0e22b910a4afb821a4 pgsql_http_release: "1.3.1" pgsql_http_release_checksum: sha1:816a3fff53e05301b176cf0696799fc5a00f54e8 -plpgsql_check_release: "1.11.0" -plpgsql_check_release_checksum: sha1:395313b6ef9c10c4fc182817d6f0040b171147b8 +plpgsql_check_release: "1.16.0" +plpgsql_check_release_checksum: sha1:626553fc2746fe10aa5a776a1229baf2af3365fc -pljava_release: "1_5_5" -pljava_release_checksum: sha1:5277433030fdeed8528c7c0154163b54aedbd842 +pg_safeupdate_release: "1.3" +pg_safeupdate_release_checksum: sha1:34a0353611bfd63f7ea760aac2afcb518bf3ba7c -postgresql_log_destination: "csvlog" -postgresql_logging_collector: on -postgresql_log_filename: "postgresql.log" -postgresql_log_rotation_age: 0 -postgresql_log_rotation_size: 0 +timescaledb_release: "2.3.0" + +wal2json_release: "2_3" +wal2json_release_checksum: sha1:923f9bbcd0505a1f0b6eac1d371e4ff2d266a958 + +supautils_release: "1.1.0" +supautils_release_checksum: sha1:326ac5c1933bd30d4a50da7568b27629a9ec544b + +pljava_release: "1_6_2" +pljava_release_checksum: sha1:9610b80cbd13d4d43bcdaa2928365dbfd1bf6e94 -wal_g_release: "0.2.15" -wal_g_release_checksum: sha1:e82d405121e0ccc322a323b9824e60c102b14004 \ No newline at end of file diff --git a/digitalOcean.json b/digitalOcean.json index e712228..378973d 100644 --- a/digitalOcean.json +++ b/digitalOcean.json @@ -1,22 +1,31 @@ { "variables": { "do_token": "", - "image_name": "ubuntu-18-04-x64", + "image_name": "ubuntu-20-04-x64", "region": "sgp1", "snapshot_regions": "sgp1", - "ansible_arguments": "--skip-tags,update-only" + "snapshot_name": "tealbase-postgres-13.3.0", + "ansible_arguments": "--skip-tags,update-only,--skip-tags,aws-only,-e,tealbase_internal='false'" }, - "builders": [{ - "type": "digitalocean", - "api_token": "{{user `do_token`}}", - "image": "{{user `image_name`}}", - "region": "{{user `region`}}", - "snapshot_regions": "{{user `snapshot_regions`}}", - "size": "s-1vcpu-1gb", - "ssh_username": "root", - "snapshot_name": "tealbase-postgres-0.13.0" - }], + "builders": [ + { + "type": "digitalocean", + "api_token": "{{user `do_token`}}", + "image": "{{user `image_name`}}", + "region": "{{user `region`}}", + "snapshot_regions": "{{user `snapshot_regions`}}", + "size": "s-1vcpu-1gb", + "ssh_username": "root", + "snapshot_name": "{{user `snapshot_name`}}" + } + ], "provisioners": [ + { + "type": "shell", + "inline": [ + "while [ ! -f /var/lib/cloud/instance/boot-finished ]; do echo 'Waiting for cloud-init...'; sleep 1; done" + ] + }, { "type": "ansible", "user": "root", @@ -26,11 +35,11 @@ { "type": "shell", "scripts": [ - "scripts/01-test", + "scripts/01-postgres_check.sh", "scripts/90-cleanup.sh", "scripts/91-log_cleanup.sh", "scripts/99-img_check.sh" ] } ] -} \ No newline at end of file +} diff --git a/docker/Dockerfile b/docker/Dockerfile deleted file mode 100644 index b0b2856..0000000 --- a/docker/Dockerfile +++ /dev/null @@ -1,180 +0,0 @@ -FROM postgres:12 - -# install postgis -ENV POSTGIS_MAJOR 3 -ENV POSTGIS_VERSION 3.0.0+dfsg-2~exp1.pgdg100+1 -RUN apt-get update \ - && apt-cache showpkg postgresql-$PG_MAJOR-postgis-$POSTGIS_MAJOR \ - && apt-get install -y --no-install-recommends \ - postgresql-$PG_MAJOR-postgis-$POSTGIS_MAJOR \ - postgresql-$PG_MAJOR-postgis-$POSTGIS_MAJOR-scripts \ - && rm -rf /var/lib/apt/lists/* /var/tmp/* - -# install pgtap -ENV PGTAP_VERSION=v1.1.0 - -RUN pgtapDependencies="git \ - ca-certificates \ - build-essential" \ - && apt-get update \ - && apt-get install -y --no-install-recommends ${pgtapDependencies} \ - && cd /tmp \ - && git clone git://github.com/theory/pgtap.git \ - && cd pgtap \ - && git checkout tags/$PGTAP_VERSION \ - && make install \ - && apt-get clean \ - && apt-get remove -y ${pgtapDependencies} \ - && apt-get autoremove -y \ - && rm -rf /tmp/pgtap /var/lib/apt/lists/* /var/tmp/* - -# install plpython3 -RUN apt-get update \ - && apt-get install postgresql-plpython3-12 -y - -# install pgAudit -ENV PGAUDIT_VERSION=1.4.0 - -RUN pgAuditDependencies="git \ - ca-certificates \ - build-essential \ - postgresql-server-dev-$PG_MAJOR \ - libssl-dev \ - libkrb5-dev" \ - && apt-get update \ - && apt-get install -y --no-install-recommends ${pgAuditDependencies} \ - && cd /tmp \ - && git clone https://github.com/pgaudit/pgaudit.git \ - && cd pgaudit \ - && git checkout ${PGAUDIT_VERSION} \ - && make check USE_PGXS=1 \ - && make install USE_PGXS=1 \ - && apt-get clean \ - && apt-get remove -y ${pgAuditDependencies} \ - && apt-get autoremove -y \ - && rm -rf /tmp/pgaudit /var/lib/apt/lists/* /var/tmp/* - -# install pgjwt -RUN pgjwtDependencies="git \ - ca-certificates \ - build-essential" \ - && apt-get update \ - && apt-get install -y --no-install-recommends ${pgjwtDependencies} \ - && cd /tmp \ - && git clone https://github.com/michelp/pgjwt.git \ - && cd pgjwt \ - && git checkout master \ - && make install \ - && apt-get clean \ - && apt-get remove -y ${pgtapDependencies} \ - && apt-get autoremove -y \ - && rm -rf /tmp/pgjwt /var/lib/apt/lists/* /var/tmp/* - -# install pgsql-http -ENV PGSQL_HTTP_VERSION=v1.3.1 - -RUN pgsqlHttpDependencies="git \ - ca-certificates \ - build-essential \ - postgresql-server-dev-$PG_MAJOR" \ - && pgsqlHttpRuntimeDependencies="libcurl4-gnutls-dev" \ - && apt-get update \ - && apt-get install -y --no-install-recommends ${pgsqlHttpDependencies} ${pgsqlHttpRuntimeDependencies} \ - && cd /tmp \ - && git clone https://github.com/pramsey/pgsql-http.git \ - && cd pgsql-http \ - && git checkout ${PGSQL_HTTP_VERSION} \ - && make \ - && make install \ - && apt-get clean \ - && apt-get remove -y ${pgsqlHttpDependencies} \ - && apt-get autoremove -y \ - && rm -rf /tmp/pgsql-http /var/lib/apt/lists/* /var/tmp/* - -# install plpgsql_check -ENV PLPGSQL_CHECK_VERSION=v1.11.3 - -RUN plpgsqlCheckDependencies="git \ - ca-certificates \ - build-essential \ - postgresql-server-dev-$PG_MAJOR" \ - && plpgsqlCheckRuntimeDependencies="libicu-dev" \ - && apt-get update \ - && apt-get install -y --no-install-recommends ${plpgsqlCheckDependencies} ${plpgsqlCheckRuntimeDependencies} \ - && cd /tmp \ - && git clone https://github.com/okbob/plpgsql_check.git \ - && cd plpgsql_check \ - && git checkout ${PLPGSQL_CHECK_VERSION} \ - && make clean \ - && make install \ - && apt-get clean \ - && apt-get remove -y ${pgsqlHttpDependencies} \ - && apt-get autoremove -y \ - && rm -rf /tmp/plpgsql_check /var/lib/apt/lists/* /var/tmp/* - -# install plv8 -ENV PLV8_VERSION=r3.0alpha - -RUN plv8Dependencies="build-essential \ - ca-certificates \ - curl \ - git-core \ - python \ - gpp \ - cpp \ - pkg-config \ - apt-transport-https \ - cmake \ - libc++-dev \ - libc++abi-dev \ - postgresql-server-dev-$PG_MAJOR" \ - && plv8RuntimeDependencies="libc++1 \ - libtinfo5 \ - libc++abi1" \ - && apt-get update \ - && apt-get install -y --no-install-recommends ${plv8Dependencies} ${plv8RuntimeDependencies} \ - && mkdir -p /tmp/build \ - && cd /tmp/build \ - && git clone https://github.com/plv8/plv8.git \ - && cd plv8 \ - && git checkout ${PLV8_VERSION} \ - && make static \ - && make install \ - && rm -rf /root/.vpython_cipd_cache /root/.vpython-root \ - && apt-get clean \ - && apt-get remove -y ${plv8Dependencies} \ - && apt-get autoremove -y \ - && rm -rf /tmp/build /var/lib/apt/lists/* /var/tmp/* - -# install pljava -ENV PLJAVA_VERSION=V1_5_5 - -RUN pljavaDependencies="git \ - ca-certificates \ - g++ \ - maven \ - postgresql-server-dev-$PG_MAJOR \ - libpq-dev \ - libecpg-dev \ - libkrb5-dev \ - default-jdk \ - libssl-dev" \ - && apt-get update \ - && apt-get install -y --no-install-recommends ${pljavaDependencies} \ - && cd /tmp \ - && git clone https://github.com/tada/pljava.git \ - && cd pljava \ - && git checkout ${PLJAVA_VERSION} \ - && mvn clean install \ - && java -jar pljava-packaging/target/pljava-pg12.3-amd64-Linux-gpp.jar \ - && apt-get clean \ - && apt-get remove -y ${pljavaDependencies} \ - && apt-get autoremove -y \ - && rm -rf ~/.m2 /tmp/pljava /var/lib/apt/lists/* /var/tmp/* - -RUN apt-get update \ - && apt-get install -y --no-install-recommends default-jdk-headless \ - && rm -rf /var/lib/apt/lists/* /var/tmp/* - -RUN mkdir -p /docker-entrypoint-initdb.d -ADD ./mnt /docker-entrypoint-initdb.d/ \ No newline at end of file diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml index 03e0a1a..4f00aa9 100644 --- a/docker/docker-compose.yml +++ b/docker/docker-compose.yml @@ -1,4 +1,4 @@ -version: '3' +version: "3" services: db: @@ -6,4 +6,4 @@ services: ports: - "5432:5432" environment: - POSTGRES_PASSWORD: postgres \ No newline at end of file + POSTGRES_PASSWORD: postgres diff --git a/docker/mnt/init-permissions.sh b/docker/mnt/init-permissions.sh deleted file mode 100644 index 314d387..0000000 --- a/docker/mnt/init-permissions.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash -set -e - -echo "host replication $POSTGRES_USER 0.0.0.0/0 trust" >> $PGDATA/pg_hba.conf -echo "shared_preload_libraries = 'pg_stat_statements, pgaudit'" >> $PGDATA/postgresql.conf -echo "pg_stat_statements.max = 10000" >> $PGDATA/postgresql.conf -echo "pg_stat_statements.track = all" >> $PGDATA/postgresql.conf -echo "wal_level=logical" >> $PGDATA/postgresql.conf -echo "max_replication_slots=5" >> $PGDATA/postgresql.conf -echo "max_wal_senders=10" >> $PGDATA/postgresql.conf -echo "log_destination='csvlog'" >> $PGDATA/postgresql.conf -echo "logging_collector=on" >> $PGDATA/postgresql.conf -echo "log_filename='postgresql.log'" >> $PGDATA/postgresql.conf -echo "log_rotation_age=0" >> $PGDATA/postgresql.conf -echo "log_rotation_size=0" >> $PGDATA/postgresql.conf -echo "pljava.libjvm_location = '/usr/lib/jvm/java-11-openjdk-amd64/lib/server/libjvm.so'" >> $PGDATA/postgresql.conf diff --git a/rfcs/0001-connection-pooling.md b/rfcs/0001-connection-pooling.md new file mode 100644 index 0000000..f787711 --- /dev/null +++ b/rfcs/0001-connection-pooling.md @@ -0,0 +1,71 @@ +--- +feature: Connection Pooling +start-date: 2021-02-04 +author: kiwicopple +co-authors: steve-chavez, dragarcia +related-issues: (will contain links to implementation PRs) +--- + +# Summary +[summary]: #summary + +We would like to explore connection pooling on tealbase. This RFC is intended to decide: + +- Whether we should provide a pooler +- Which connection pooler we should use +- Where in the stack it would be installed - i.e. if should bundle it with the Postgres build + + +# Motivation +[motivation]: #motivation + +In Postgres, every connection is a process. Because of this, a lot of connections to the database can be very expensive on memory. + +When connecting to Postgres database from serverless functions, there is no connection pooling, and so the server needs to maintain hundreds/thousands of connections. + + +# Detailed design +[design]: #detailed-design + +This is still in the "Gather Feedback" stage. To start the discussion: + + +### 1. Decide on a PG Pooler + +- `pg_bouncer` - https://www.pgbouncer.org/ +- `PG Pool II` - https://www.pgpool.net/mediawiki/index.php/Main_Page +- `odyssey` - https://github.com/yandex/odyssey +- others? + +### 2. Decide on configuration + +Most poolers allow different configurations. We would need to decide on how we would configure the pooler by default + +### 3. Decide if the user should be able re-configure the pooler + +Should a user be able to change the configuration? If so, how would they do it? + + +# Drawbacks +[drawbacks]: #drawbacks + +- Security +- Not directly relevant to the "tealbase" stack, so it's additional non-core support + +# Alternatives +[alternatives]: #alternatives + +1. Since we already offer [PostgREST](https://github.com/postgrest/postgrest) and [postgres-meta](https://github.com/tealbase/pg-api), this isn't entirely necessary for the tealbase stack. Bundling this is only beneficial for connecting external tools. +2. We could hold back on this implementation until we move to a full Postgres Operator, which would include a pooler. It would be nice to have something for local development though. + + +# Unresolved questions +[unresolved]: #unresolved-questions + +- Add any unresolved questions here + + +# Future work +[future]: #future-work + +- Add any future work here \ No newline at end of file diff --git a/scripts/01-postgres_check.sh b/scripts/01-postgres_check.sh new file mode 100644 index 0000000..d131528 --- /dev/null +++ b/scripts/01-postgres_check.sh @@ -0,0 +1,72 @@ +#!/bin/bash +# +# Scripts in this directory are run during the build process. +# each script will be uploaded to /tmp on your build droplet, +# given execute permissions and run. The cleanup process will +# remove the scripts from your build system after they have run +# if you use the build_image task. +# +echo "Commencing Checks" + +function check_database_is_ready { + echo -e "\nChecking if database is ready and accepting connections:" + if [ "$(pg_isready)" = "/tmp:5432 - accepting connections" ]; then + echo "Database is ready" + else + echo "Error: Database is not ready. Exiting" + exit 1 + fi +} + +function check_postgres_owned_dir_exists { + DIR=$1 + USER="postgres" + + echo -e "\nChecking if $DIR exists and owned by postgres user:" + + if [ -d "$DIR" ]; then + echo "$DIR exists" + if [ $(stat -c '%U' $DIR) = "$USER" ]; then + echo "$DIR is owned by $USER" + else + echo "Error: $DIR is not owned by $USER" + exit 1 + fi + else + echo "Error: ${DIR} not found. Exiting." + exit 1 + fi +} + +function check_lse_enabled { + ARCH=$(uname -m) + if [ $ARCH = "aarch64" ]; then + echo -e "\nArchitecture is $ARCH. Checking for LSE:" + + LSE_COUNT=$(objdump -d /usr/lib/postgresql/bin/postgres | grep -i 'ldxr\|ldaxr\|stxr\|stlxr' | wc -l) + MOUTLINE_ATOMICS_COUNT=$(nm /usr/lib/postgresql/bin/postgres | grep __aarch64_have_lse_atomics | wc -l) + + # Checking for load and store exclusives + if [ $LSE_COUNT -gt 0 ]; then + echo "Postgres has LSE enabled" + else + echo "Error: Postgres failed to be compiled with LSE. Exiting" + exit 1 + fi + + # Checking if successfully compiled with -moutline-atomics + if [ $MOUTLINE_ATOMICS_COUNT -gt 0 ]; then + echo "Postgres has been compiled with -moutline-atomics" + else + echo "Error: Postgres failed to be compiled with -moutline-atomics. Exiting" + exit 1 + fi + else + echo "Architecture is $ARCH. Not checking for LSE." + fi +} + +check_database_is_ready +check_postgres_owned_dir_exists "/var/lib/postgresql" +check_postgres_owned_dir_exists "/etc/postgresql" +check_lse_enabled \ No newline at end of file diff --git a/scripts/01-test b/scripts/01-test deleted file mode 100644 index e5b3e05..0000000 --- a/scripts/01-test +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash -# -# Scripts in this directory are run during the build process. -# each script will be uploaded to /tmp on your build droplet, -# given execute permissions and run. The cleanup process will -# remove the scripts from your build system after they have run -# if you use the build_image task. -# -echo "Commencing Digital Ocean Checks" diff --git a/scripts/11-lemp.sh b/scripts/11-lemp.sh new file mode 100644 index 0000000..c340f5e --- /dev/null +++ b/scripts/11-lemp.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +# DigitalOcean Marketplace Image Validation Tool +# © 2021 DigitalOcean LLC. +# This code is licensed under Apache 2.0 license (see LICENSE.md for details) + +rm -rvf /etc/nginx/sites-enabled/default + +ln -s /etc/nginx/sites-available/digitalocean \ + /etc/nginx/sites-enabled/digitalocean + +rm -rf /var/www/html/index*debian.html + +chown -R www-data: /var/www \ No newline at end of file diff --git a/scripts/12-ufw-nginx.sh b/scripts/12-ufw-nginx.sh new file mode 100644 index 0000000..7c47366 --- /dev/null +++ b/scripts/12-ufw-nginx.sh @@ -0,0 +1,10 @@ +#!/bin/sh + +# DigitalOcean Marketplace Image Validation Tool +# © 2021 DigitalOcean LLC. +# This code is licensed under Apache 2.0 license (see LICENSE.md for details) + +ufw limit ssh +ufw allow 'Nginx Full' + +ufw --force enable \ No newline at end of file diff --git a/scripts/13-force-ssh-logout.sh b/scripts/13-force-ssh-logout.sh new file mode 100644 index 0000000..99e28c1 --- /dev/null +++ b/scripts/13-force-ssh-logout.sh @@ -0,0 +1,10 @@ +#!/bin/sh + +# DigitalOcean Marketplace Image Validation Tool +# © 2021 DigitalOcean LLC. +# This code is licensed under Apache 2.0 license (see LICENSE.md for details) + +cat >> /etc/ssh/sshd_config < /root/.bash_history unset HISTFILE -apt-get -y autoremove -apt-get -y autoclean find /var/log -mtime -1 -type f -exec truncate -s 0 {} \; rm -rf /var/log/*.gz /var/log/*.[0-9] /var/log/*-???????? rm -rf /var/lib/cloud/instances/* @@ -33,4 +53,4 @@ dd if=/dev/zero of=/zerofile & sleep 5 done sync; rm /zerofile; sync -cat /dev/null > /var/log/lastlog; cat /dev/null > /var/log/wtmp +cat /dev/null > /var/log/lastlog; cat /dev/null > /var/log/wtmp \ No newline at end of file diff --git a/scripts/91-log_cleanup.sh b/scripts/91-log_cleanup.sh index 26f5fbc..8521164 100644 --- a/scripts/91-log_cleanup.sh +++ b/scripts/91-log_cleanup.sh @@ -3,3 +3,9 @@ # echo "Clearing all log files" rm -rf /var/log/* + +# https://github.com/fail2ban/fail2ban/issues/1593 +touch /var/log/auth.log + +touch /var/log/pgbouncer.log +chown postgres:postgres /var/log/pgbouncer.log \ No newline at end of file diff --git a/scripts/99-img_check.sh b/scripts/99-img_check.sh index 6daee68..00b5476 100755 --- a/scripts/99-img_check.sh +++ b/scripts/99-img_check.sh @@ -1,10 +1,10 @@ #!/bin/bash -# + # DigitalOcean Marketplace Image Validation Tool -# © 2018 DigitalOcean LLC. -# This code is licensed under MIT license (see LICENSE.txt for details) -# -VERSION="v. 1.2" +# © 2021 DigitalOcean LLC. +# This code is licensed under Apache 2.0 license (see LICENSE.md for details) + +VERSION="v. 1.6" RUNDATE=$( date ) # Script should be run with SUDO @@ -110,18 +110,12 @@ function checkLogs { [[ -e $f ]] || break if [[ "${f}" = '/var/log/lfd.log' && "$( cat "${f}" | egrep -v '/var/log/messages has been reset| Watching /var/log/messages' | wc -c)" -gt 50 ]]; then if [ $f != $cp_ignore ]; then - echo -en "\e[93m[WARN]\e[0m un-cleared log file, ${f} found\n" - ((WARN++)) - if [[ $STATUS != 2 ]]; then - STATUS=1 - fi - fi - elif [[ "${f}" == '/var/log/cloud-init-output.log' ]]; then - if cat '/var/log/cloud-init-output.log' | grep -q SHA256; then - echo -en "\e[41m[FAIL]\e[0m log containing SHA256 value found in log file ${f}\n" - ((FAIL++)) - STATUS=1 + echo -en "\e[93m[WARN]\e[0m un-cleared log file, ${f} found\n" + ((WARN++)) + if [[ $STATUS != 2 ]]; then + STATUS=1 fi + fi elif [[ "${f}" != '/var/log/lfd.log' && "$( cat "${f}" | wc -c)" -gt 50 ]]; then if [ $f != $cp_ignore ]; then echo -en "\e[93m[WARN]\e[0m un-cleared log file, ${f} found\n" @@ -252,7 +246,7 @@ function checkUsers { echo -en "\e[32m[PASS]\e[0m User ${user} has no password set.\n" ((PASS++)) else - echo -en "\e[41m[FAIL]\e[0m User ${user} has a password set on their account.\n" + echo -en "\e[41m[FAIL]\e[0m User ${user} has a password set on their account. Only system users are allowed on the image.\n" ((FAIL++)) STATUS=2 fi @@ -385,7 +379,7 @@ function checkFirewall { # we will check some of the most common if cmdExists 'ufw'; then fw="ufw" - ufwa=$(ufw status | sed -e "s/^Status:\ //") + ufwa=$(ufw status |head -1| sed -e "s/^Status:\ //") if [[ $ufwa == "active" ]]; then FW_VER="\e[32m[PASS]\e[0m Firewall service (${fw}) is active\n" ((PASS++)) @@ -418,6 +412,14 @@ function checkFirewall { } function checkUpdates { if [[ $OS == "Ubuntu" ]] || [[ "$OS" =~ Debian.* ]]; then + # Ensure /tmp exists and has the proper permissions before + # checking for security updates + # https://github.com/digitalocean/marketplace-partners/issues/94 + if [[ ! -d /tmp ]]; then + mkdir /tmp + fi + chmod 1777 /tmp + echo -en "\nUpdating apt package database to check for security updates, this may take a minute...\n\n" apt-get -y update > /dev/null @@ -441,11 +443,11 @@ function checkUpdates { echo -en "\e[32m[PASS]\e[0m There are no pending security updates for this image.\n\n" fi elif [[ $OS == "CentOS Linux" ]]; then - echo -en "\nChecking for available updates with yum, this may take a minute...\n\n" + echo -en "\nChecking for available security updates, this may take a minute...\n\n" - update_count=$(yum list updates -q | grep -vc "Updated Packages") + update_count=$(yum check-update --security --quiet | wc -l) if [[ $update_count -gt 0 ]]; then - echo -en "\e[41m[FAIL]\e[0m There are ${update_count} updates available for this image that have not been installed.\n" + echo -en "\e[41m[FAIL]\e[0m There are ${update_count} security updates available for this image that have not been installed.\n" ((FAIL++)) STATUS=2 else @@ -567,7 +569,9 @@ osv=0 if [[ $OS == "Ubuntu" ]]; then ost=1 - if [[ $VER == "18.04" ]]; then + if [[ $VER == "20.04" ]]; then + osv=1 + elif [[ $VER == "18.04" ]]; then osv=1 elif [[ $VER == "16.04" ]]; then osv=1 @@ -591,7 +595,9 @@ elif [[ "$OS" =~ Debian.* ]]; then elif [[ $OS == "CentOS Linux" ]]; then ost=1 - if [[ $VER == "7" ]]; then + if [[ $VER == "8" ]]; then + osv=1 + elif [[ $VER == "7" ]]; then osv=1 elif [[ $VER == "6" ]]; then osv=1 @@ -669,8 +675,8 @@ if [[ $STATUS == 0 ]]; then exit 0 elif [[ $STATUS == 1 ]]; then echo -en "Please review all [WARN] items above and ensure they are intended or resolved. If you do not have a specific requirement, we recommend resolving these items before image submission\n\n" - exit 1 + exit 0 else - echo -en "Some critical tests failed. These items must be resolved and this scan re-run before you submit your image to the marketplace.\n\n" + echo -en "Some critical tests failed. These items must be resolved and this scan re-run before you submit your image to the DigitalOcean Marketplace.\n\n" exit 1 -fi +fi \ No newline at end of file From 98f4b76b760e75d2f9b96113ae55cb6eb335b984 Mon Sep 17 00:00:00 2001 From: Ekjot Singh Date: Tue, 15 Jul 2025 17:02:32 +0530 Subject: [PATCH 2/9] postgres-15.1.0.34-rc0 --- .github/CODEOWNERS | 2 + .github/workflows/ami-release.yml | 51 + .github/workflows/dockerhub-release.yml | 195 ++++ .github/workflows/mirror.yml | 37 + .github/workflows/publish-migrations.yml | 43 + .github/workflows/test.yml | 107 ++ .gitignore | 2 + Dockerfile | 39 + README.md | 67 +- amazon-arm.json | 60 -- amazon-arm64.pkr.hcl | 253 +++++ amazon.json | 60 -- ansible.cfg | 3 + ansible/files/admin_api_scripts/grow_fs.sh | 23 + .../admin_api_scripts/manage_readonly_mode.sh | 45 + .../admin_api_scripts/pg_egress_collect.pl | 126 +++ .../admin_api_scripts/pg_upgrade_check.sh | 16 + .../admin_api_scripts/pg_upgrade_complete.sh | 45 + .../admin_api_scripts/pg_upgrade_initiate.sh | 124 +++ .../pg_upgrade_pgsodium_getkey.sh | 12 + .../admin_api_scripts/pg_upgrade_prepare.sh | 14 + ansible/files/adminapi.service.j2 | 12 + ansible/files/adminapi.sudoers.conf | 24 + ansible/files/ansible-pull.service | 20 + ansible/files/ansible-pull.timer | 11 + ansible/files/cron.deny | 2 + .../files/database-optimizations.service.j2 | 12 + ansible/files/docker_mnt/init.sh | 3 - .../fail2ban_config/fail2ban.service.conf | 6 + .../fail2ban_config/jail-postgresql.conf.j2 | 4 +- ansible/files/fail2ban_config/jail-ssh.conf | 4 + ansible/files/fail2ban_config/jail.local | 4 + ansible/files/gotrue.service.j2 | 20 + ansible/files/journald.conf | 6 + ansible/files/kong_config/kong.conf.j2 | 7 + ansible/files/kong_config/kong.env.j2 | 8 + ansible/files/kong_config/kong.service.j2 | 24 + ansible/files/logind.conf | 2 + .../logrotate-postgres-auth.conf | 8 + .../logrotate-postgres-csv.conf | 11 + .../logrotate-postgres.conf} | 2 +- .../logrotate_config/logrotate-walg.conf | 9 + ansible/files/nginx.service.j2 | 22 + ansible/files/node_exporter.service.j2 | 16 - ansible/files/pg_egress_collect.service.j2 | 13 + .../files/pgbouncer_config/pgbouncer.ini.j2 | 8 +- .../pgbouncer_config/pgbouncer.service.j2 | 34 +- .../pgbouncer_auth_schema.sql | 2 + .../tmpfiles.d-pgbouncer.conf.j2 | 2 +- ansible/files/pgsodium_getkey_readonly.sh.j2 | 14 + ansible/files/pgsodium_getkey_urandom.sh.j2 | 10 + ansible/files/postgres_exporter.service.j2 | 9 +- .../postgresql_config/custom_walg.conf.j2 | 17 + .../files/postgresql_config/pg_hba.conf.j2 | 26 +- .../files/postgresql_config/pg_ident.conf.j2 | 10 +- .../postgresql_config/postgresql-csvlog.conf | 33 + .../postgresql-stdout-log.conf | 4 + .../postgresql_config/postgresql.conf.j2 | 85 +- .../postgresql_config/postgresql.service.j2 | 11 +- .../files/postgresql_config/supautils.conf.j2 | 12 + .../pgsodium/after-create.sql | 3 + .../files/postgrest-optimizations.service.j2 | 11 + ansible/files/postgrest.service.j2 | 9 +- ansible/files/queries.yml.j2 | 194 ---- ansible/files/sodium_extension.sql | 6 + ansible/files/tealbase_facts.ini | 2 + ansible/files/ufw.service.conf | 4 + ansible/files/vector.service.j2 | 20 + .../wal_change_ownership.sh | 42 + .../files/walg_helper_scripts/wal_fetch.sh | 12 + ansible/playbook-docker.yml | 41 +- ansible/playbook.yml | 133 ++- ansible/tasks/clean-build-dependencies.yml | 17 + ansible/tasks/docker/cleanup.yml | 51 +- ansible/tasks/docker/finalize.yml | 24 + ansible/tasks/docker/setup.yml | 47 +- ansible/tasks/finalize-ami.yml | 75 ++ ansible/tasks/internal/admin-api.yml | 76 ++ ansible/tasks/internal/admin-mgr.yml | 22 + ansible/tasks/internal/node-exporter.yml | 46 - ansible/tasks/internal/optimizations.yml | 32 + ansible/tasks/internal/pg_egress_collect.yml | 15 + ansible/tasks/internal/postgres-exporter.yml | 6 +- ansible/tasks/internal/setup-ansible-pull.yml | 29 + ansible/tasks/internal/setup-nftables.yml | 34 + ansible/tasks/internal/supautils.yml | 56 +- .../tasks/postgres-extensions/01-postgis.yml | 35 +- .../postgres-extensions/02-pgrouting.yml | 17 +- .../tasks/postgres-extensions/03-pgtap.yml | 8 +- .../tasks/postgres-extensions/04-pg_cron.yml | 8 +- .../tasks/postgres-extensions/05-pgaudit.yml | 8 +- .../tasks/postgres-extensions/06-pgjwt.yml | 7 +- .../postgres-extensions/07-pgsql-http.yml | 8 +- .../postgres-extensions/08-plpgsql_check.yml | 8 +- .../postgres-extensions/09-pg-safeupdate.yml | 8 +- .../postgres-extensions/10-timescaledb.yml | 9 +- .../tasks/postgres-extensions/11-wal2json.yml | 31 +- .../tasks/postgres-extensions/12-pljava.yml | 48 +- ansible/tasks/postgres-extensions/13-plv8.yml | 22 +- .../postgres-extensions/14-pg_plan_filter.yml | 23 + .../tasks/postgres-extensions/15-pg_net.yml | 37 + ansible/tasks/postgres-extensions/16-rum.yml | 34 + .../postgres-extensions/17-pg_hashids.yml | 22 + .../tasks/postgres-extensions/18-pgsodium.yml | 81 ++ .../postgres-extensions/19-pg_graphql.yml | 3 + .../20-pg_stat_monitor.yml | 23 + .../postgres-extensions/21-auto_explain.yml | 7 + .../postgres-extensions/22-pg_jsonschema.yml | 3 + .../tasks/postgres-extensions/23-vault.yml | 31 + .../tasks/postgres-extensions/24-pgroonga.yml | 83 ++ .../tasks/postgres-extensions/25-wrappers.yml | 3 + .../tasks/postgres-extensions/26-hypopg.yml | 17 + .../postgres-extensions/27-pg_repack.yml | 37 + .../tasks/postgres-extensions/28-pgvector.yml | 23 + .../99-finish_async_tasks.yml | 19 + ansible/tasks/setup-extensions.yml | 57 +- ansible/tasks/setup-fail2ban.yml | 37 +- ansible/tasks/setup-gotrue.yml | 54 + ansible/tasks/setup-kong.yml | 62 ++ ansible/tasks/setup-migrations.yml | 13 + ansible/tasks/setup-nginx.yml | 81 ++ ansible/tasks/setup-pgbouncer.yml | 68 +- ansible/tasks/setup-postgres.yml | 104 +- ansible/tasks/setup-postgrest.yml | 57 +- ansible/tasks/setup-system.yml | 72 +- ansible/tasks/setup-tealbase-internal.yml | 68 +- ansible/tasks/setup-wal-g.yml | 117 +- ansible/tasks/test-image.yml | 88 ++ ansible/vars.yml | 130 ++- common.vars.pkr.hcl | 1 + development-arm.vars.pkr.hcl | 7 + docker-compose.yaml | 28 + docs/.DS_Store | Bin 6148 -> 0 bytes ebssurrogate/USAGE.md | 50 + ebssurrogate/files/70-ec2-nvme-devices.rules | 25 + .../files/apparmor_profiles/opt.gotrue.gotrue | 15 + .../files/apparmor_profiles/opt.postgrest | 12 + .../files/apparmor_profiles/usr.bin.vector | 35 + .../usr.lib.postgresql.bin.postgres | 55 + .../apparmor_profiles/usr.local.bin.pgbouncer | 20 + ebssurrogate/files/cloud.cfg | 137 +++ ebssurrogate/files/ebsnvme-id | 173 +++ ebssurrogate/files/sources-arm64.cfg | 10 + ebssurrogate/files/sources.cfg | 10 + .../files/unit-tests/test-extensions.sql | 20 + .../files/unit-tests/unit-test-01.sql | 31 + .../files/unit-tests/verify-extensions.sql | 12 + ebssurrogate/files/vector.timer | 9 + ebssurrogate/scripts/chroot-bootstrap.sh | 213 ++++ ebssurrogate/scripts/surrogate-bootstrap.sh | 312 ++++++ migrations/.env | 2 + migrations/README.md | 77 ++ .../00000000000000-initial-schema.sql | 54 + .../00000000000001-auth-schema.sql | 123 +++ .../00000000000002-storage-schema.sql | 120 +++ .../00000000000003-post-setup.sql | 119 +++ migrations/db/migrate.sh | 58 + .../10000000000000_demote-postgres.sql | 19 + ...20211115181400_update-auth-permissions.sql | 22 + .../20211118015519_create-realtime-schema.sql | 6 + ...1122051245_update-realtime-permissions.sql | 9 + .../20211124212715_update-auth-owner.sql | 8 + ...1130151719_update-realtime-permissions.sql | 8 + ...0118070449_enable-safeupdate-postgrest.sql | 4 + ...0220126121436_finer-postgrest-triggers.sql | 70 ++ ...20220224211803_fix-postgrest-supautils.sql | 21 + .../migrations/20220317095840_pg_graphql.sql | 146 +++ ...fix-postgrest-alter-type-event-trigger.sql | 70 ++ .../20220322085208_gotrue-session-limit.sql | 4 + ...0220404205710_pg_graphql-on-by-default.sql | 161 +++ ...and-tealbase-storage-admin-to-postgres.sql | 10 + ...0220613123923_pg_graphql-pg-dump-perms.sql | 74 ++ ...13082019_pg_cron-pg_net-temp-perms-fix.sql | 73 ++ ...221028101028_set_authenticator_timeout.sql | 5 + .../20221103090837_revoke_admin.sql | 5 + ...221207154255_create_pgsodium_and_vault.sql | 15 + ...201083204_grant_auth_roles_to_postgres.sql | 5 + migrations/docker-compose.yaml | 49 + migrations/schema.sql | 996 ++++++++++++++++++ migrations/tests/database/exists.sql | 10 + migrations/tests/database/privs.sql | 8 + migrations/tests/database/test.sql | 3 + migrations/tests/fixtures.sql | 67 ++ migrations/tests/storage/exists.sql | 13 + migrations/tests/storage/test.sql | 2 + migrations/tests/test.sql | 13 + scripts/02-credentials_cleanup.sh | 2 +- scripts/90-cleanup.sh | 23 +- scripts/91-log_cleanup.sh | 12 +- 189 files changed, 7310 insertions(+), 880 deletions(-) create mode 100644 .github/CODEOWNERS create mode 100644 .github/workflows/ami-release.yml create mode 100644 .github/workflows/dockerhub-release.yml create mode 100644 .github/workflows/mirror.yml create mode 100644 .github/workflows/publish-migrations.yml create mode 100644 .github/workflows/test.yml create mode 100644 Dockerfile delete mode 100644 amazon-arm.json create mode 100644 amazon-arm64.pkr.hcl delete mode 100644 amazon.json create mode 100644 ansible.cfg create mode 100644 ansible/files/admin_api_scripts/grow_fs.sh create mode 100644 ansible/files/admin_api_scripts/manage_readonly_mode.sh create mode 100644 ansible/files/admin_api_scripts/pg_egress_collect.pl create mode 100644 ansible/files/admin_api_scripts/pg_upgrade_check.sh create mode 100644 ansible/files/admin_api_scripts/pg_upgrade_complete.sh create mode 100644 ansible/files/admin_api_scripts/pg_upgrade_initiate.sh create mode 100644 ansible/files/admin_api_scripts/pg_upgrade_pgsodium_getkey.sh create mode 100644 ansible/files/admin_api_scripts/pg_upgrade_prepare.sh create mode 100644 ansible/files/adminapi.service.j2 create mode 100644 ansible/files/adminapi.sudoers.conf create mode 100644 ansible/files/ansible-pull.service create mode 100644 ansible/files/ansible-pull.timer create mode 100644 ansible/files/cron.deny create mode 100644 ansible/files/database-optimizations.service.j2 delete mode 100644 ansible/files/docker_mnt/init.sh create mode 100644 ansible/files/fail2ban_config/fail2ban.service.conf create mode 100644 ansible/files/fail2ban_config/jail-ssh.conf create mode 100644 ansible/files/fail2ban_config/jail.local create mode 100644 ansible/files/gotrue.service.j2 create mode 100644 ansible/files/journald.conf create mode 100644 ansible/files/kong_config/kong.conf.j2 create mode 100644 ansible/files/kong_config/kong.env.j2 create mode 100644 ansible/files/kong_config/kong.service.j2 create mode 100644 ansible/files/logind.conf create mode 100644 ansible/files/logrotate_config/logrotate-postgres-auth.conf create mode 100644 ansible/files/logrotate_config/logrotate-postgres-csv.conf rename ansible/files/{logrotate-postgres => logrotate_config/logrotate-postgres.conf} (68%) create mode 100644 ansible/files/logrotate_config/logrotate-walg.conf create mode 100644 ansible/files/nginx.service.j2 delete mode 100644 ansible/files/node_exporter.service.j2 create mode 100644 ansible/files/pg_egress_collect.service.j2 create mode 100644 ansible/files/pgsodium_getkey_readonly.sh.j2 create mode 100755 ansible/files/pgsodium_getkey_urandom.sh.j2 create mode 100644 ansible/files/postgresql_config/custom_walg.conf.j2 create mode 100644 ansible/files/postgresql_config/postgresql-csvlog.conf create mode 100644 ansible/files/postgresql_config/postgresql-stdout-log.conf create mode 100644 ansible/files/postgresql_config/supautils.conf.j2 create mode 100644 ansible/files/postgresql_extension_custom_scripts/pgsodium/after-create.sql create mode 100644 ansible/files/postgrest-optimizations.service.j2 delete mode 100644 ansible/files/queries.yml.j2 create mode 100644 ansible/files/sodium_extension.sql create mode 100644 ansible/files/tealbase_facts.ini create mode 100644 ansible/files/ufw.service.conf create mode 100644 ansible/files/vector.service.j2 create mode 100644 ansible/files/walg_helper_scripts/wal_change_ownership.sh create mode 100644 ansible/files/walg_helper_scripts/wal_fetch.sh create mode 100644 ansible/tasks/clean-build-dependencies.yml create mode 100644 ansible/tasks/docker/finalize.yml create mode 100644 ansible/tasks/finalize-ami.yml create mode 100644 ansible/tasks/internal/admin-api.yml create mode 100644 ansible/tasks/internal/admin-mgr.yml delete mode 100644 ansible/tasks/internal/node-exporter.yml create mode 100644 ansible/tasks/internal/pg_egress_collect.yml create mode 100644 ansible/tasks/internal/setup-ansible-pull.yml create mode 100644 ansible/tasks/internal/setup-nftables.yml create mode 100644 ansible/tasks/postgres-extensions/14-pg_plan_filter.yml create mode 100644 ansible/tasks/postgres-extensions/15-pg_net.yml create mode 100644 ansible/tasks/postgres-extensions/16-rum.yml create mode 100644 ansible/tasks/postgres-extensions/17-pg_hashids.yml create mode 100644 ansible/tasks/postgres-extensions/18-pgsodium.yml create mode 100644 ansible/tasks/postgres-extensions/19-pg_graphql.yml create mode 100644 ansible/tasks/postgres-extensions/20-pg_stat_monitor.yml create mode 100644 ansible/tasks/postgres-extensions/21-auto_explain.yml create mode 100644 ansible/tasks/postgres-extensions/22-pg_jsonschema.yml create mode 100644 ansible/tasks/postgres-extensions/23-vault.yml create mode 100644 ansible/tasks/postgres-extensions/24-pgroonga.yml create mode 100644 ansible/tasks/postgres-extensions/25-wrappers.yml create mode 100644 ansible/tasks/postgres-extensions/26-hypopg.yml create mode 100644 ansible/tasks/postgres-extensions/27-pg_repack.yml create mode 100644 ansible/tasks/postgres-extensions/28-pgvector.yml create mode 100644 ansible/tasks/postgres-extensions/99-finish_async_tasks.yml create mode 100644 ansible/tasks/setup-gotrue.yml create mode 100644 ansible/tasks/setup-kong.yml create mode 100644 ansible/tasks/setup-migrations.yml create mode 100644 ansible/tasks/setup-nginx.yml create mode 100644 ansible/tasks/test-image.yml create mode 100644 common.vars.pkr.hcl create mode 100644 development-arm.vars.pkr.hcl create mode 100644 docker-compose.yaml delete mode 100644 docs/.DS_Store create mode 100644 ebssurrogate/USAGE.md create mode 100644 ebssurrogate/files/70-ec2-nvme-devices.rules create mode 100644 ebssurrogate/files/apparmor_profiles/opt.gotrue.gotrue create mode 100644 ebssurrogate/files/apparmor_profiles/opt.postgrest create mode 100644 ebssurrogate/files/apparmor_profiles/usr.bin.vector create mode 100644 ebssurrogate/files/apparmor_profiles/usr.lib.postgresql.bin.postgres create mode 100644 ebssurrogate/files/apparmor_profiles/usr.local.bin.pgbouncer create mode 100644 ebssurrogate/files/cloud.cfg create mode 100644 ebssurrogate/files/ebsnvme-id create mode 100644 ebssurrogate/files/sources-arm64.cfg create mode 100644 ebssurrogate/files/sources.cfg create mode 100644 ebssurrogate/files/unit-tests/test-extensions.sql create mode 100644 ebssurrogate/files/unit-tests/unit-test-01.sql create mode 100644 ebssurrogate/files/unit-tests/verify-extensions.sql create mode 100644 ebssurrogate/files/vector.timer create mode 100755 ebssurrogate/scripts/chroot-bootstrap.sh create mode 100755 ebssurrogate/scripts/surrogate-bootstrap.sh create mode 100644 migrations/.env create mode 100644 migrations/README.md create mode 100644 migrations/db/init-scripts/00000000000000-initial-schema.sql create mode 100644 migrations/db/init-scripts/00000000000001-auth-schema.sql create mode 100644 migrations/db/init-scripts/00000000000002-storage-schema.sql create mode 100644 migrations/db/init-scripts/00000000000003-post-setup.sql create mode 100755 migrations/db/migrate.sh create mode 100644 migrations/db/migrations/10000000000000_demote-postgres.sql create mode 100644 migrations/db/migrations/20211115181400_update-auth-permissions.sql create mode 100644 migrations/db/migrations/20211118015519_create-realtime-schema.sql create mode 100644 migrations/db/migrations/20211122051245_update-realtime-permissions.sql create mode 100644 migrations/db/migrations/20211124212715_update-auth-owner.sql create mode 100644 migrations/db/migrations/20211130151719_update-realtime-permissions.sql create mode 100644 migrations/db/migrations/20220118070449_enable-safeupdate-postgrest.sql create mode 100644 migrations/db/migrations/20220126121436_finer-postgrest-triggers.sql create mode 100644 migrations/db/migrations/20220224211803_fix-postgrest-supautils.sql create mode 100644 migrations/db/migrations/20220317095840_pg_graphql.sql create mode 100644 migrations/db/migrations/20220321174452_fix-postgrest-alter-type-event-trigger.sql create mode 100644 migrations/db/migrations/20220322085208_gotrue-session-limit.sql create mode 100644 migrations/db/migrations/20220404205710_pg_graphql-on-by-default.sql create mode 100644 migrations/db/migrations/20220609081115_grant-tealbase-auth-admin-and-tealbase-storage-admin-to-postgres.sql create mode 100644 migrations/db/migrations/20220613123923_pg_graphql-pg-dump-perms.sql create mode 100644 migrations/db/migrations/20220713082019_pg_cron-pg_net-temp-perms-fix.sql create mode 100644 migrations/db/migrations/20221028101028_set_authenticator_timeout.sql create mode 100644 migrations/db/migrations/20221103090837_revoke_admin.sql create mode 100644 migrations/db/migrations/20221207154255_create_pgsodium_and_vault.sql create mode 100644 migrations/db/migrations/20230201083204_grant_auth_roles_to_postgres.sql create mode 100644 migrations/docker-compose.yaml create mode 100644 migrations/schema.sql create mode 100644 migrations/tests/database/exists.sql create mode 100644 migrations/tests/database/privs.sql create mode 100644 migrations/tests/database/test.sql create mode 100644 migrations/tests/fixtures.sql create mode 100644 migrations/tests/storage/exists.sql create mode 100644 migrations/tests/storage/test.sql create mode 100644 migrations/tests/test.sql diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 0000000..2f1c9a1 --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1,2 @@ +* @supabase/backend +migrations/ @supabase/cli @supabase/backend diff --git a/.github/workflows/ami-release.yml b/.github/workflows/ami-release.yml new file mode 100644 index 0000000..e078af1 --- /dev/null +++ b/.github/workflows/ami-release.yml @@ -0,0 +1,51 @@ +name: Release AMI + +on: + push: + branches: + - develop + workflow_dispatch: + +jobs: + build: + runs-on: [self-hosted, linux] + timeout-minutes: 150 + + steps: + - name: Checkout Repo + uses: actions/checkout@v2 + + - name: Build AMI + run: | + GIT_SHA=$(git rev-parse HEAD) + packer build -var "git-head-version=${GIT_SHA}" -var "packer-execution-id=${GITHUB_RUN_ID}" -var-file="development-arm.vars.pkr.hcl" -var-file="common.vars.pkr.hcl" amazon-arm64.pkr.hcl + + - name: Grab release version + id: process_release_version + run: | + VERSION=$(sed -e 's/postgres-version = "\(.*\)"/\1/g' common.vars.pkr.hcl) + GIT_SHA=$(git rev-parse HEAD) + echo "version=$VERSION" >> "$GITHUB_OUTPUT" + echo "git_sha=$GIT_SHA" >> "$GITHUB_OUTPUT" + + - name: Create release + uses: softprops/action-gh-release@v1 + with: + name: ${{ steps.process_release_version.outputs.version }} + tag_name: ${{ steps.process_release_version.outputs.version }} + target_commitish: ${{ steps.process_release_vesion.outputs.git_sha }} + + - name: Slack Notification on Failure + if: ${{ failure() }} + uses: rtCamp/action-slack-notify@v2 + env: + SLACK_WEBHOOK: ${{ secrets.SLACK_NOTIFICATIONS_WEBHOOK }} + SLACK_USERNAME: 'gha-failures-notifier' + SLACK_COLOR: 'danger' + SLACK_MESSAGE: 'Building Postgres AMI failed' + SLACK_FOOTER: '' + + - name: Cleanup resources on build cancellation + if: ${{ cancelled() }} + run: | + aws ec2 describe-instances --filters "Name=tag:packerExecutionId,Values=${GITHUB_RUN_ID}" --query "Reservations[].Instances[].InstanceId" --output text | xargs -I {} aws ec2 terminate-instances --instance-ids {} diff --git a/.github/workflows/dockerhub-release.yml b/.github/workflows/dockerhub-release.yml new file mode 100644 index 0000000..eee4ae1 --- /dev/null +++ b/.github/workflows/dockerhub-release.yml @@ -0,0 +1,195 @@ +name: Release on Dockerhub + +on: + push: + branches: + - develop + paths: + - ".github/workflows/dockerhub-release.yml" + - "common.vars*" + +jobs: + settings: + runs-on: ubuntu-20.04 + outputs: + docker_version: ${{ steps.settings.outputs.postgres-version }} + steps: + - uses: actions/checkout@v3 + + - id: settings + # Remove spaces and quotes to get the raw version string + run: sed -r 's/(\s|\")+//g' common.vars.pkr.hcl >> $GITHUB_OUTPUT + + docker_x86_release: + needs: settings + runs-on: [self-hosted, X64] + timeout-minutes: 120 + env: + arch: amd64 + outputs: + image_digest: ${{ steps.build.outputs.digest }} + steps: + - uses: actions/checkout@v3 + + - id: meta + uses: docker/metadata-action@v4 + with: + images: | + supabase/postgres + tags: | + type=raw,value=${{ needs.settings.outputs.docker_version }}_${{ env.arch }} + + - id: buildx-context + run: | + docker context create builders + + - uses: docker/setup-buildx-action@v2 + with: + endpoint: builders + + - uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + + - id: copy-cache + name: Copy Buildcache + run: | + docker rm -f buildcache + docker create --name buildcache public.ecr.aws/t3w2s2c9/postgres-buildcache:latest ls + docker cp buildcache:/ccache/. ./docker/cache + docker rm -f buildcache + + - id: build + uses: docker/build-push-action@v3 + with: + context: . + push: true + tags: ${{ steps.meta.outputs.tags }} + platforms: linux/${{ env.arch }} + no-cache: true + + - name: Slack Notification + if: ${{ failure() }} + uses: rtCamp/action-slack-notify@v2 + env: + SLACK_WEBHOOK: ${{ secrets.SLACK_NOTIFICATIONS_WEBHOOK }} + SLACK_USERNAME: "gha-failures-notifier" + SLACK_COLOR: "danger" + SLACK_MESSAGE: "Building Postgres x86 image failed" + SLACK_FOOTER: "" + + docker_arm_release: + needs: settings + runs-on: [arm-runner] + timeout-minutes: 120 + env: + arch: arm64 + outputs: + image_digest: ${{ steps.build.outputs.digest }} + steps: + - uses: actions/checkout@v3 + + - id: meta + uses: docker/metadata-action@v4 + with: + images: | + supabase/postgres + tags: | + type=raw,value=${{ needs.settings.outputs.docker_version }}_${{ env.arch }} + + - uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + + - id: copy-cache + name: Copy Buildcache + run: | + docker rm -f buildcache + docker create --name buildcache public.ecr.aws/t3w2s2c9/postgres-buildcache:latest ls + docker cp buildcache:/ccache/. ./docker/cache/ + docker rm -f buildcache + + - uses: docker/setup-buildx-action@v2 + with: + driver: docker + driver-opts: | + image=moby/buildkit:master + network=host + + - id: build + uses: docker/build-push-action@v3 + with: + context: . + push: true + tags: ${{ steps.meta.outputs.tags }} + platforms: linux/${{ env.arch }} + no-cache: true + + - name: Slack Notification + if: ${{ failure() }} + uses: rtCamp/action-slack-notify@v2 + env: + SLACK_WEBHOOK: ${{ secrets.SLACK_NOTIFICATIONS_WEBHOOK }} + SLACK_USERNAME: "gha-failures-notifier" + SLACK_COLOR: "danger" + SLACK_MESSAGE: "Building Postgres arm image failed" + SLACK_FOOTER: "" + + merge_manifest: + needs: [settings, docker_x86_release, docker_arm_release] + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + id-token: write + steps: + - uses: docker/setup-buildx-action@v2 + + - uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + + - name: Merge multi-arch manifests + run: | + docker buildx imagetools create -t supabase/postgres:${{ needs.settings.outputs.docker_version }} \ + supabase/postgres@${{ needs.docker_x86_release.outputs.image_digest }} \ + supabase/postgres@${{ needs.docker_arm_release.outputs.image_digest }} + + - name: configure aws credentials + uses: aws-actions/configure-aws-credentials@v1 + with: + role-to-assume: ${{ secrets.PROD_AWS_ROLE }} + aws-region: us-east-1 + + - name: Login to ECR + uses: docker/login-action@v2 + with: + registry: public.ecr.aws + + - name: Login to GHCR + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Mirror Images + uses: akhilerm/tag-push-action@v2.1.0 + with: + src: docker.io/supabase/postgres:${{ needs.settings.outputs.docker_version }} + dst: | + public.ecr.aws/supabase/postgres:${{ needs.settings.outputs.docker_version }} + ghcr.io/supabase/postgres:${{ needs.settings.outputs.docker_version }} + + - name: Slack Notification + if: ${{ failure() }} + uses: rtCamp/action-slack-notify@v2 + env: + SLACK_WEBHOOK: ${{ secrets.SLACK_NOTIFICATIONS_WEBHOOK }} + SLACK_USERNAME: "gha-failures-notifier" + SLACK_COLOR: "danger" + SLACK_MESSAGE: "Building Postgres image failed" + SLACK_FOOTER: "" diff --git a/.github/workflows/mirror.yml b/.github/workflows/mirror.yml new file mode 100644 index 0000000..b4c2c9b --- /dev/null +++ b/.github/workflows/mirror.yml @@ -0,0 +1,37 @@ +name: Mirror Image + +on: + workflow_dispatch: + inputs: + version: + description: "Image tag" + required: true + type: string + +jobs: + mirror: + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + id-token: write + steps: + - name: configure aws credentials + uses: aws-actions/configure-aws-credentials@v1 + with: + role-to-assume: ${{ secrets.PROD_AWS_ROLE }} + aws-region: us-east-1 + - uses: docker/login-action@v2 + with: + registry: public.ecr.aws + - uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + - uses: akhilerm/tag-push-action@v2.1.0 + with: + src: docker.io/supabase/postgres:${{ inputs.version }} + dst: | + public.ecr.aws/supabase/postgres:${{ inputs.version }} + ghcr.io/supabase/postgres:${{ inputs.version }} diff --git a/.github/workflows/publish-migrations.yml b/.github/workflows/publish-migrations.yml new file mode 100644 index 0000000..fea3b9b --- /dev/null +++ b/.github/workflows/publish-migrations.yml @@ -0,0 +1,43 @@ +name: Release Migrations + +on: + push: + branches: + - develop + +jobs: + build: + runs-on: [self-hosted, linux] + timeout-minutes: 15 + + steps: + - name: Checkout Repo + uses: actions/checkout@v2 + + - name: Merging migration files + run: cat $(ls -1) > ../migration-output.sql + working-directory: ${{ github.workspace }}/migrations/db/migrations + + - name: Push migration files to S3 + uses: jakejarvis/s3-sync-action@master + with: + args: --delete + env: + AWS_S3_BUCKET: ${{ secrets.PG_INIT_SCRIPT_S3_BUCKET_STAGING }} + AWS_ACCESS_KEY_ID: ${{ secrets.PG_INIT_SCRIPT_ACCESS_KEY_ID_STAGING }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.PG_INIT_SCRIPT_SECRET_ACCESS_KEY_STAGING }} + AWS_REGION: ap-southeast-1 + SOURCE_DIR: migrations/db + DEST_DIR: migrations/db + + - name: Push migration files to S3 + uses: jakejarvis/s3-sync-action@master + with: + args: --delete + env: + AWS_S3_BUCKET: ${{ secrets.PG_INIT_SCRIPT_S3_BUCKET_PROD }} + AWS_ACCESS_KEY_ID: ${{ secrets.PG_INIT_SCRIPT_ACCESS_KEY_ID_PROD }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.PG_INIT_SCRIPT_SECRET_ACCESS_KEY_PROD }} + AWS_REGION: ap-southeast-1 + SOURCE_DIR: migrations/db + DEST_DIR: migrations/db diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml new file mode 100644 index 0000000..13f47d5 --- /dev/null +++ b/.github/workflows/test.yml @@ -0,0 +1,107 @@ +name: test + +on: + push: + branches: + - develop + pull_request: + workflow_dispatch: + +jobs: + build: + if: ${{ github.event_name != 'pull_request' }} + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + + - run: docker compose up --abort-on-container-exit + + migrate: + runs-on: ubuntu-latest + env: + POSTGRES_PASSWORD: password + + strategy: + matrix: + supabase-version: ["15.1.0.11"] + timeout-minutes: 10 + + services: + postgres: + image: supabase/postgres:${{ matrix.supabase-version }} + ports: + - 5478:5432 + # Set health checks to wait until postgres has started + options: >- + --health-cmd "pg_isready -U postgres -h localhost" + --health-interval 5s + --health-timeout 5s + --health-retries 10 + env: + POSTGRES_PASSWORD: ${{ env.POSTGRES_PASSWORD }} + volumes: + # Disable migration by removing from entrypoint + - /dev/null:/docker-entrypoint-initdb.d/migrate.sh + + steps: + - name: checkout + uses: actions/checkout@v3 + + - name: install dbmate + run: | + curl -fsSL -o /usr/local/bin/dbmate https://github.com/amacneil/dbmate/releases/latest/download/dbmate-linux-amd64 + sudo chmod +x /usr/local/bin/dbmate + + - name: migrate schema + run: ./migrations/db/migrate.sh + env: + USE_DBMATE: 1 + POSTGRES_PORT: 5478 + POSTGRES_PASSWORD: ${{ env.POSTGRES_PASSWORD }} + + - name: install pg_prove + run: sudo cpan TAP::Parser::SourceHandler::pgTAP + env: + SHELL: /bin/bash + + - name: run tests + run: pg_prove migrations/tests/test.sql + env: + PGHOST: localhost + PGPORT: 5478 + PGUSER: postgres + PGPASSWORD: ${{ env.POSTGRES_PASSWORD }} + + - name: migrations should be idempotent + run: | + for sql in ./migrations/db/migrations/*.sql; do + echo "$0: running $sql" + psql -v ON_ERROR_STOP=1 --no-password --no-psqlrc -f "$sql" + done + env: + PGHOST: localhost + PGPORT: 5478 + PGDATABASE: postgres + PGUSER: supabase_admin + PGPASSWORD: ${{ env.POSTGRES_PASSWORD }} + + - name: run tests + run: pg_prove migrations/tests/test.sql + env: + PGHOST: localhost + PGPORT: 5478 + PGUSER: postgres + PGPASSWORD: ${{ env.POSTGRES_PASSWORD }} + + schema: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: verify schema.sql is committed + run: | + docker compose -f migrations/docker-compose.yaml up db dbmate --abort-on-container-exit + if ! git diff --ignore-space-at-eol --exit-code --quiet migrations/schema.sql; then + echo "Detected uncommitted changes after build. See status below:" + git diff + exit 1 + fi diff --git a/.gitignore b/.gitignore index 8c1f8fa..4dafd3b 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,5 @@ .DS_Store .python-version venv/ +*.swp +docker/cache/ \ No newline at end of file diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..4ebb436 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,39 @@ +ARG VERSION=15.1 + +FROM postgres:$VERSION + +COPY ansible/ /tmp/ansible/ + +# needed for plv8 Makefile selection +ENV DOCKER true +ENV CCACHE_DIR=/ccache +ENV PATH=/usr/lib/ccache:$PATH +ENV DEBIAN_FRONTEND noninteractive + +RUN apt update && \ + apt install -y ansible sudo git ccache && \ + apt -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" upgrade + +RUN --mount=type=bind,source=docker/cache,target=/ccache,rw \ + ccache -s && \ + ansible-galaxy collection install community.general && \ + cd /tmp/ansible && \ + ansible-playbook -e '{"async_mode": false}' playbook-docker.yml && \ + apt -y autoremove && \ + apt -y autoclean && \ + ccache -s && \ + apt install -y default-jdk-headless locales && \ + sed -i '/en_US.UTF-8/s/^# //g' /etc/locale.gen && \ + locale-gen && \ + rm -rf /tmp/* /var/lib/apt/lists/* /var/tmp/* /usr/lib/python3/dist-packages/ansible_collections/* + +ENV LANGUAGE en_US.UTF-8 +ENV LANG en_US.UTF-8 +ENV LC_ALL en_US.UTF-8 + +COPY ansible/files/pgbouncer_config/pgbouncer_auth_schema.sql /docker-entrypoint-initdb.d/00-schema.sql +COPY ansible/files/stat_extension.sql /docker-entrypoint-initdb.d/01-extension.sql +# COPY ansible/files/sodium_extension.sql /docker-entrypoint-initdb.d/02-sodium-extension.sql +COPY migrations/db/ /docker-entrypoint-initdb.d/ + +CMD ["postgres", "-c", "config_file=/etc/postgresql/postgresql.conf"] diff --git a/README.md b/README.md index 68ade93..cf657d5 100644 --- a/README.md +++ b/README.md @@ -3,31 +3,42 @@ Unmodified Postgres with some useful plugins. Our goal with this repo is not to modify Postgres, but to provide some of the most common extensions with a one-click install. ## Primary Features -- ✅ Postgres [13](https://www.postgresql.org/about/news/postgresql-13-released-2077/). +- ✅ Postgres [15](https://www.postgresql.org/about/news/postgresql-15-released-2526/). - ✅ Ubuntu 20.04 (Focal Fossa). - ✅ [wal_level](https://www.postgresql.org/docs/current/runtime-config-wal.html) = logical and [max_replication_slots](https://www.postgresql.org/docs/current/runtime-config-replication.html) = 5. Ready for replication. - ✅ [Large Systems Extensions](https://github.com/aws/aws-graviton-getting-started#building-for-graviton-and-graviton2). Enabled for ARM images. ## Extensions -| Extension | Description | -| ------------- | ------------- | -| [Postgres contrib modules](https://www.postgresql.org/docs/current/contrib.html) | Because everyone should enable `pg_stat_statements`. | -| [PostGIS](https://postgis.net/) | Postgres' most popular extension - support for geographic objects. | -| [pgRouting](https://pgrouting.org/) | Extension of PostGIS - provides geospatial routing functionalities. | -| [pgTAP](https://pgtap.org/) | Unit Testing for Postgres. | -| [pg_cron](https://github.com/citusdata/pg_cron) | Run CRON jobs inside Postgres. | -| [pgAudit](https://www.pgaudit.org/) | Generate highly compliant audit logs. | -| [pgjwt](https://github.com/michelp/pgjwt) | Generate JSON Web Tokens (JWT) in Postgres. | -| [pgsql-http](https://github.com/pramsey/pgsql-http) | HTTP client for Postgres. | -| [plpgsql_check](https://github.com/okbob/plpgsql_check) | Linter tool for PL/pgSQL. | -| [pg-safeupdate](https://github.com/eradman/pg-safeupdate) | Protect your data from accidental updates or deletes. | -| [wal2json](https://github.com/eulerto/wal2json) | JSON output plugin for logical replication decoding. | -| [PL/Java](https://github.com/tada/pljava) | Write in Java functions in Postgres. | -| [plv8](https://github.com/plv8/plv8) | Write in Javascript functions in Postgres. | - -Can't find your favorite extension? Suggest for it to be added into future versions [here](https://github.com/tealbase/tealbase/discussions/679)! +| Extension | Version | Description | +| ------------- | :-------------: | ------------- | +| [Postgres contrib modules](https://www.postgresql.org/docs/current/contrib.html) | - | Because everyone should enable `pg_stat_statements`. | +| [PostGIS](https://postgis.net/) | [3.3.2](https://git.osgeo.org/gitea/postgis/postgis/raw/tag/3.3.2/NEWS) | Postgres' most popular extension - support for geographic objects. | +| [pgRouting](https://pgrouting.org/) | [v3.4.1](https://github.com/pgRouting/pgrouting/releases/tag/v3.4.1) | Extension of PostGIS - provides geospatial routing functionalities. | +| [pgTAP](https://pgtap.org/) | [v1.2.0](https://github.com/theory/pgtap/releases/tag/v1.2.0) | Unit Testing for Postgres. | +| [pg_cron](https://github.com/citusdata/pg_cron) | [v1.4.2](https://github.com/citusdata/pg_cron/releases/tag/v1.4.2) | Run CRON jobs inside Postgres. | +| [pgAudit](https://www.pgaudit.org/) | [1.7.0](https://github.com/pgaudit/pgaudit/releases/tag/1.7.0) | Generate highly compliant audit logs. | +| [pgjwt](https://github.com/michelp/pgjwt) | [commit](https://github.com/michelp/pgjwt/commit/9742dab1b2f297ad3811120db7b21451bca2d3c9) | Generate JSON Web Tokens (JWT) in Postgres. | +| [pgsql-http](https://github.com/pramsey/pgsql-http) | [1.5.0](https://github.com/pramsey/pgsql-http/releases/tag/v1.5.0) | HTTP client for Postgres. | +| [plpgsql_check](https://github.com/okbob/plpgsql_check) | [2.2.3](https://github.com/okbob/plpgsql_check/releases/tag/v2.2.3) | Linter tool for PL/pgSQL. | +| [pg-safeupdate](https://github.com/eradman/pg-safeupdate) | [1.4](https://github.com/eradman/pg-safeupdate/releases/tag/1.4) | Protect your data from accidental updates or deletes. | +| [wal2json](https://github.com/eulerto/wal2json) | [commit](https://github.com/eulerto/wal2json/commit/53b548a29ebd6119323b6eb2f6013d7c5fe807ec) | JSON output plugin for logical replication decoding. | +| [PL/Java](https://github.com/tada/pljava) | [1.6.4](https://github.com/tada/pljava/releases/tag/V1_6_4) | Write in Java functions in Postgres. | +| [plv8](https://github.com/plv8/plv8) | [commit](https://github.com/plv8/plv8/commit/bcddd92f71530e117f2f98b92d206dafe824f73a) | Write in Javascript functions in Postgres. | +| [pg_plan_filter](https://github.com/pgexperts/pg_plan_filter) | [commit](https://github.com/pgexperts/pg_plan_filter/commit/5081a7b5cb890876e67d8e7486b6a64c38c9a492) | Only allow statements that fulfill set criteria to be executed. | +| [pg_net](https://github.com/tealbase/pg_net) | [v0.6.1](https://github.com/tealbase/pg_net/releases/tag/v0.6.1) | Expose the SQL interface for async networking. | +| [rum](https://github.com/postgrespro/rum) | [1.3.13](https://github.com/postgrespro/rum/releases/tag/1.3.13) | An alternative to the GIN index. | +| [pg_hashids](https://github.com/iCyberon/pg_hashids) | [commit](https://github.com/iCyberon/pg_hashids/commit/83398bcbb616aac2970f5e77d93a3200f0f28e74) | Generate unique identifiers from numbers. | +| [pgsodium](https://github.com/michelp/pgsodium) | [3.1.0](https://github.com/michelp/pgsodium/releases/tag/2.0.0) | Modern encryption API using libsodium. | +| [pg_stat_monitor](https://github.com/percona/pg_stat_monitor) | [1.0.1](https://github.com/percona/pg_stat_monitor/releases/tag/1.0.1) | Query Performance Monitoring Tool for PostgreSQL +| [pgvector](https://github.com/pgvector/pgvector) | [v0.4.0](https://github.com/pgvector/pgvector/releases/tag/v0.4.0) | Open-source vector similarity search for Postgres +| [pg_repack](https://github.com/reorg/pg_repack) | [ver_1.4.8](https://github.com/reorg/pg_repack/releases/tag/ver_1.4.8) | Tool to remove bloat from tables and indexes + + +Can't find your favorite extension? Suggest for it to be added into future releases [here](https://github.com/tealbase/tealbase/discussions/679)! ## Enhanced Security +*This is only available for our AWS EC2/ DO Droplet images* + Aside from having [ufw](https://help.ubuntu.com/community/UFW),[fail2ban](https://www.fail2ban.org/wiki/index.php/Main_Page), and [unattended-upgrades](https://wiki.debian.org/UnattendedUpgrades) installed, we also have the following enhancements in place: | Enhancement | Description | | ------------- | ------------- | @@ -35,11 +46,13 @@ Aside from having [ufw](https://help.ubuntu.com/community/UFW),[fail2ban](https: | [fail2ban filter](https://github.com/tealbase/postgres/blob/develop/ansible/files/fail2ban_config/filter-pgbouncer.conf.j2) for PgBouncer access | Monitors for brute force attempts over at port `6543`. | ## Additional Goodies -| Goodie | Description | -| ------------- | ------------- | -| [PgBouncer](https://postgis.net/) | Set up Connection Pooling. | -| [PostgREST](https://postgrest.org/en/stable/) | Instantly transform your database into an RESTful API. | -| [WAL-G](https://github.com/wal-g/wal-g#wal-g) | Tool for physical database backup and recovery. | +*This is only available for our AWS EC2/ DO Droplet images* + +| Goodie | Version | Description | +| ------------- | :-------------: | ------------- | +| [PgBouncer](https://www.pgbouncer.org/) | [1.16.1](http://www.pgbouncer.org/changelog.html#pgbouncer-116x) | Set up Connection Pooling. | +| [PostgREST](https://postgrest.org/en/stable/) | [v10.1.1](https://github.com/PostgREST/postgrest/releases/tag/v10.1.1) | Instantly transform your database into an RESTful API. | +| [WAL-G](https://github.com/wal-g/wal-g#wal-g) | [v2.0.1](https://github.com/wal-g/wal-g/releases/tag/v2.0.1) | Tool for physical database backup and recovery. | ## Install @@ -77,11 +90,9 @@ $ time packer build -timestamp-ui \ ## Motivation -After talking to a lot of techies, we've found that most believe Postgres is the best (operational) database but they _still_ choose other databases. This is overwhelmingly because "the other one was quicker/easier". Our goal is to make it fast and simple to get started with Postgres, so that we never hear that excuse again. - -Our secondary goal is to show off a few of Postgres' most exciting features. This is to convince new developers to choose it over other database (a decision we hope they'll appreciate once they start scaling). - -Finally, this is the same build we offer at [tealbase](https://tealbase.io), and everything we do is opensource. This repo makes it easy to _install_ Postgres, tealbase makes it easy to _use_ Postgres. +- Make it fast and simple to get started with Postgres. +- Show off a few of Postgres' most exciting features. +- This is the same build we offer at [tealbase](https://tealbase.io). ## Roadmap diff --git a/amazon-arm.json b/amazon-arm.json deleted file mode 100644 index bf717cf..0000000 --- a/amazon-arm.json +++ /dev/null @@ -1,60 +0,0 @@ -{ - "variables": { - "aws_access_key": "", - "aws_secret_key": "", - "region": "ap-northeast-1", - "ami_regions": "eu-central-1,eu-west-1,eu-west-2,ap-south-1,ap-southeast-1,ap-southeast-2,us-west-1,us-east-1,ca-central-1,sa-east-1,ap-northeast-1", - "ami": "ami-076d8ebdd0e1ec091", - "ami_name": "tealbase-postgres-13.3.0", - "environment": "prod", - "ansible_arguments": "--skip-tags,update-only,--skip-tags,install-postgrest,--skip-tags,install-pgbouncer,--skip-tags,install-tealbase-internal" - }, - "builders": [ - { - "type": "amazon-ebs", - "access_key": "{{user `aws_access_key`}}", - "secret_key": "{{user `aws_secret_key`}}", - "region": "{{user `region`}}", - "ami_regions": "{{user `ami_regions`}}", - "source_ami": "{{user `ami`}}", - "instance_type": "r6g.2xlarge", - "ssh_username": "ubuntu", - "ami_name": "{{user `ami_name`}}", - "tags": { - "environment": "{{user `environment`}}", - "appType": "postgres" - }, - "launch_block_device_mappings": [ - { - "device_name": "/dev/sda1", - "volume_size": 16, - "volume_type": "gp2", - "delete_on_termination": true - } - ] - } - ], - "provisioners": [ - { - "type": "shell", - "inline": [ - "while [ ! -f /var/lib/cloud/instance/boot-finished ]; do echo 'Waiting for cloud-init...'; sleep 1; done" - ] - }, - { - "type": "ansible", - "user": "ubuntu", - "playbook_file": "ansible/playbook.yml", - "extra_arguments": "{{user `ansible_arguments`}}" - }, - { - "execute_command": "echo 'packer' | sudo -S sh -c '{{ .Vars }} {{ .Path }}'", - "type": "shell", - "scripts": [ - "scripts/02-credentials_cleanup.sh", - "scripts/90-cleanup.sh", - "scripts/91-log_cleanup.sh" - ] - } - ] -} diff --git a/amazon-arm64.pkr.hcl b/amazon-arm64.pkr.hcl new file mode 100644 index 0000000..e306d97 --- /dev/null +++ b/amazon-arm64.pkr.hcl @@ -0,0 +1,253 @@ +variable "ami" { + type = string + default = "ubuntu/images/hvm-ssd/ubuntu-focal-20.04-arm64-server-*" +} + +variable "profile" { + type = string + default = "${env("AWS_PROFILE")}" +} + +variable "ami_name" { + type = string + default = "tealbase-postgres" +} + +variable "ami_regions" { + type = list(string) + default = ["ap-southeast-2"] +} + +variable "ansible_arguments" { + type = string + default = "--skip-tags,install-postgrest,--skip-tags,install-pgbouncer,--skip-tags,install-tealbase-internal,ebssurrogate_mode='true'" +} + +variable "aws_access_key" { + type = string + default = "" +} + +variable "aws_secret_key" { + type = string + default = "" +} + +variable "environment" { + type = string + default = "prod" +} + +variable "region" { + type = string +} + +variable "build-vol" { + type = string + default = "xvdc" +} + +# ccache docker image details +variable "docker_user" { + type = string + default = "" +} + +variable "docker_passwd" { + type = string + default = "" +} + +variable "docker_image" { + type = string + default = "" +} + +variable "docker_image_tag" { + type = string + default = "latest" +} + +locals { + creator = "packer" +} + +variable "postgres-version" { + type = string + default = "" +} + +variable "git-head-version" { + type = string + default = "unknown" +} + +variable "packer-execution-id" { + type = string + default = "unknown" +} + +# source block +source "amazon-ebssurrogate" "source" { + profile = "${var.profile}" + #access_key = "${var.aws_access_key}" + #ami_name = "${var.ami_name}-arm64-${formatdate("YYYY-MM-DD-hhmm", timestamp())}" + ami_name = "${var.ami_name}-${var.postgres-version}" + ami_virtualization_type = "hvm" + ami_architecture = "arm64" + ami_regions = "${var.ami_regions}" + instance_type = "c6g.4xlarge" + region = "${var.region}" + #secret_key = "${var.aws_secret_key}" + + # Use latest official ubuntu focal ami owned by Canonical. + source_ami_filter { + filters = { + virtualization-type = "hvm" + name = "${var.ami}" + root-device-type = "ebs" + } + owners = [ "099720109477" ] + most_recent = true + } + ena_support = true + launch_block_device_mappings { + device_name = "/dev/xvdf" + delete_on_termination = true + volume_size = 10 + volume_type = "gp3" + } + + launch_block_device_mappings { + device_name = "/dev/xvdh" + delete_on_termination = true + volume_size = 8 + volume_type = "gp3" + } + + launch_block_device_mappings { + device_name = "/dev/${var.build-vol}" + delete_on_termination = true + volume_size = 16 + volume_type = "gp2" + omit_from_artifact = true + } + + run_tags = { + creator = "packer" + appType = "postgres" + packerExecutionId = "${var.packer-execution-id}" + } + run_volume_tags = { + creator = "packer" + appType = "postgres" + } + snapshot_tags = { + creator = "packer" + appType = "postgres" + } + tags = { + creator = "packer" + appType = "postgres" + postgresVersion = "${var.postgres-version}" + sourceSha = "${var.git-head-version}" + } + + communicator = "ssh" + ssh_pty = true + ssh_username = "ubuntu" + ssh_timeout = "5m" + + ami_root_device { + source_device_name = "/dev/xvdf" + device_name = "/dev/xvda" + delete_on_termination = true + volume_size = 10 + volume_type = "gp2" + } +} + +# a build block invokes sources and runs provisioning steps on them. +build { + sources = ["source.amazon-ebssurrogate.source"] + + provisioner "file" { + source = "ebssurrogate/files/sources-arm64.cfg" + destination = "/tmp/sources.list" + } + + provisioner "file" { + source = "ebssurrogate/files/ebsnvme-id" + destination = "/tmp/ebsnvme-id" + } + + provisioner "file" { + source = "ebssurrogate/files/70-ec2-nvme-devices.rules" + destination = "/tmp/70-ec2-nvme-devices.rules" + } + + provisioner "file" { + source = "ebssurrogate/scripts/chroot-bootstrap.sh" + destination = "/tmp/chroot-bootstrap.sh" + } + + provisioner "file" { + source = "ebssurrogate/files/cloud.cfg" + destination = "/tmp/cloud.cfg" + } + + provisioner "file" { + source = "ebssurrogate/files/vector.timer" + destination = "/tmp/vector.timer" + } + + provisioner "file" { + source = "ebssurrogate/files/apparmor_profiles" + destination = "/tmp" + } + + provisioner "file" { + source = "migrations" + destination = "/tmp" + } + + provisioner "file" { + source = "ebssurrogate/files/unit-tests" + destination = "/tmp" + } + + # Copy ansible playbook + provisioner "shell" { + inline = ["mkdir /tmp/ansible-playbook"] + } + + provisioner "file" { + source = "ansible" + destination = "/tmp/ansible-playbook" + } + + provisioner "file" { + source = "scripts" + destination = "/tmp/ansible-playbook" + } + + provisioner "shell" { + environment_vars = [ + "ARGS=${var.ansible_arguments}", + "DOCKER_USER=${var.docker_user}", + "DOCKER_PASSWD=${var.docker_passwd}", + "DOCKER_IMAGE=${var.docker_image}", + "DOCKER_IMAGE_TAG=${var.docker_image_tag}" + ] + script = "ebssurrogate/scripts/surrogate-bootstrap.sh" + execute_command = "sudo -S sh -c '{{ .Vars }} {{ .Path }}'" + start_retry_timeout = "5m" + skip_clean = true + } + + provisioner "file" { + source = "/tmp/ansible.log" + destination = "/tmp/ansible.log" + direction = "download" + } +} diff --git a/amazon.json b/amazon.json deleted file mode 100644 index 2fabed7..0000000 --- a/amazon.json +++ /dev/null @@ -1,60 +0,0 @@ -{ - "variables": { - "aws_access_key": "", - "aws_secret_key": "", - "region": "af-south-1", - "ami_regions": "af-south-1", - "ami": "ami-08a4b40f2fe1e4b35", - "ami_name": "tealbase-postgres-13.3.0.4", - "environment": "prod", - "ansible_arguments": "--skip-tags,update-only,--skip-tags,install-postgrest,--skip-tags,install-pgbouncer,--skip-tags,install-tealbase-internal" - }, - "builders": [ - { - "type": "amazon-ebs", - "access_key": "{{user `aws_access_key`}}", - "secret_key": "{{user `aws_secret_key`}}", - "region": "{{user `region`}}", - "ami_regions": "{{user `ami_regions`}}", - "source_ami": "{{user `ami`}}", - "instance_type": "m5.2xlarge", - "ssh_username": "ubuntu", - "ami_name": "{{user `ami_name`}}", - "tags": { - "environment": "{{user `environment`}}", - "appType": "postgres" - }, - "launch_block_device_mappings": [ - { - "device_name": "/dev/sda1", - "volume_size": 16, - "volume_type": "gp2", - "delete_on_termination": true - } - ] - } - ], - "provisioners": [ - { - "type": "shell", - "inline": [ - "while [ ! -f /var/lib/cloud/instance/boot-finished ]; do echo 'Waiting for cloud-init...'; sleep 1; done" - ] - }, - { - "type": "ansible", - "user": "ubuntu", - "playbook_file": "ansible/playbook.yml", - "extra_arguments": "{{user `ansible_arguments`}}" - }, - { - "execute_command": "echo 'packer' | sudo -S sh -c '{{ .Vars }} {{ .Path }}'", - "type": "shell", - "scripts": [ - "scripts/02-credentials_cleanup.sh", - "scripts/90-cleanup.sh", - "scripts/91-log_cleanup.sh" - ] - } - ] -} diff --git a/ansible.cfg b/ansible.cfg new file mode 100644 index 0000000..5410ed8 --- /dev/null +++ b/ansible.cfg @@ -0,0 +1,3 @@ +[defaults] + +callback_whitelist = profile_tasks diff --git a/ansible/files/admin_api_scripts/grow_fs.sh b/ansible/files/admin_api_scripts/grow_fs.sh new file mode 100644 index 0000000..6d2a4e5 --- /dev/null +++ b/ansible/files/admin_api_scripts/grow_fs.sh @@ -0,0 +1,23 @@ +#! /usr/bin/env bash + +set -euo pipefail + +VOLUME_TYPE=${1:-data} + +if [ -b /dev/nvme1n1 ] ; then + if [[ "${VOLUME_TYPE}" == "data" ]]; then + resize2fs /dev/nvme1n1 + + elif [[ "${VOLUME_TYPE}" == "root" ]] ; then + growpart /dev/nvme0n1 2 + resize2fs /dev/nvme0n1p2 + + else + echo "Invalid disk specified: ${VOLUME_TYPE}" + exit 1 + fi +else + growpart /dev/nvme0n1 2 + resize2fs /dev/nvme0n1p2 +fi +echo "Done resizing disk" diff --git a/ansible/files/admin_api_scripts/manage_readonly_mode.sh b/ansible/files/admin_api_scripts/manage_readonly_mode.sh new file mode 100644 index 0000000..1e258bd --- /dev/null +++ b/ansible/files/admin_api_scripts/manage_readonly_mode.sh @@ -0,0 +1,45 @@ +#! /usr/bin/env bash + +set -euo pipefail + +SUBCOMMAND=$1 + +function set_mode { + MODE=$1 + psql -h localhost -U tealbase_admin -d postgres -c "ALTER SYSTEM SET default_transaction_read_only to ${MODE};" + psql -h localhost -U tealbase_admin -d postgres -c "SELECT pg_reload_conf();" +} + +function check_override { + COMMAND=$(cat < 220.235.16.223.62599: Flags [S.], cksum 0x5de3 (incorrect -> 0x63da), seq 2314200657, ack 2071735457, win 62643, options [mss 8961,sackOK,TS val 3358598837 ecr 1277499190,nop,wscale 7], length 0 +# 1674013833.989257 IP (tos 0x0, ttl 64, id 24975, offset 0, flags [DF], proto TCP (6), length 52) +# 10.112.101.122.5432 > 220.235.16.223.62599: Flags [.], cksum 0x5ddb (incorrect -> 0xa25b), seq 1, ack 9, win 490, options [nop,nop,TS val 3358598885 ecr 1277499232], length 0 +sub extract_packet_length { + my ($line) = @_; + + #print("debug: >> " . $line); + + if ($line =~ /^\s+\d+\.\d+\.\d+\.\d+\..*, length (\d+)$/) { + # extract tcp packet length and add it up + my $len = $1; + $captured_len += $len; + } +} + +# write total length to file +sub write_file { + my ($output) = @_; + + my $now = strftime "%F %T", localtime time; + print "[$now] write captured len $captured_len to $output\n"; + + open(my $fh, "+>", $output) or die "Could not open file '$output' $!"; + print $fh "$captured_len"; + close($fh) or die "Could not write file '$output' $!"; +} + +# main +sub main { + # get arguments + GetOptions( + "interval:i" => \(my $interval = 60), + "output:s" => \(my $output = "/tmp/pg_egress_collect.txt"), + "help" => sub { HelpMessage(0) }, + ) or HelpMessage(1); + + my $loop = IO::Async::Loop->new; + + # tcpdump extractor + my $extractor = IO::Async::Stream->new_for_stdin( + on_read => sub { + my ($self, $buffref, $eof) = @_; + + while($$buffref =~ s/^(.*\n)//) { + my $line = $1; + extract_packet_length($line); + } + + return 0; + }, + ); + + # schedule file writer per minute + my $writer = IO::Async::Timer::Periodic->new( + interval => $interval, + on_tick => sub { + write_file($output); + + # reset total captured length + $captured_len = 0; + }, + ); + $writer->start; + + print "pg_egress_collect started, egress data will be saved to $output at interval $interval seconds.\n"; + + $loop->add($extractor); + $loop->add($writer); + $loop->run; +} + +main(); + +__END__ + +=head1 NAME + +pg_egress_collect.pl - collect egress from tcpdump output, extract TCP packet length, aggregate in specified interval and write to output file. + +=head1 SYNOPSIS + +pg_egress_collect.pl [-i interval] [-o output] + +Options: + + -i, --interval interval + output file write interval, in seconds, default is 60 seconds + + -o, --output output + output file path, default is /tmp/pg_egress_collect.txt + + -h, --help + print this help message + +=cut diff --git a/ansible/files/admin_api_scripts/pg_upgrade_check.sh b/ansible/files/admin_api_scripts/pg_upgrade_check.sh new file mode 100644 index 0000000..1e5dd14 --- /dev/null +++ b/ansible/files/admin_api_scripts/pg_upgrade_check.sh @@ -0,0 +1,16 @@ +#! /usr/bin/env bash +## This script provides a method to check the status of the database upgrade +## process, which is updated in /tmp/pg-upgrade-status by pg_upgrade_initiate.sh +## This runs on the old (source) instance. + +set -euo pipefail + +STATUS_FILE="/tmp/pg-upgrade-status" + +if [ -f "${STATUS_FILE}" ]; then + STATUS=$(cat "${STATUS_FILE}") + echo -n "${STATUS}" +else + echo -n "unknown" +fi + diff --git a/ansible/files/admin_api_scripts/pg_upgrade_complete.sh b/ansible/files/admin_api_scripts/pg_upgrade_complete.sh new file mode 100644 index 0000000..2c90c36 --- /dev/null +++ b/ansible/files/admin_api_scripts/pg_upgrade_complete.sh @@ -0,0 +1,45 @@ +#! /usr/bin/env bash + +## This script is run on the newly launched instance which is to be promoted to +## become the primary database instance once the upgrade successfully completes. +## The following commands copy custom PG configs and enable previously disabled +## extensions, containing regtypes referencing system OIDs. + +# Extensions to be reenabled after pg_upgrade. +# Running an upgrade with these extensions enabled will result in errors due to +# them depending on regtypes referencing system OIDs. Thus they have been disabled +# beforehand. +EXTENSIONS_TO_REENABLE=( + "pg_graphql" +) + + +run_sql() { + STATEMENT=$1 + psql -h localhost -U tealbase_admin -d postgres -c "$STATEMENT" +} + +function complete_pg_upgrade { + mount -a -v + + # copying custom configurations + cp -R /data/conf/* /etc/postgresql-custom/ + + service postgresql start + su -c 'vacuumdb --all --analyze-in-stages' -s $SHELL postgres + + for EXTENSION in "${EXTENSIONS_TO_REENABLE[@]}"; do + run_sql "CREATE EXTENSION IF NOT EXISTS ${EXTENSION} CASCADE;" + done + + sleep 5 + service postgresql restart + + sleep 5 + service postgresql restart +} + +set -euo pipefail + +complete_pg_upgrade >> /var/log/pg-upgrade-complete.log 2>&1 +echo "Upgrade job completed" diff --git a/ansible/files/admin_api_scripts/pg_upgrade_initiate.sh b/ansible/files/admin_api_scripts/pg_upgrade_initiate.sh new file mode 100644 index 0000000..6e21919 --- /dev/null +++ b/ansible/files/admin_api_scripts/pg_upgrade_initiate.sh @@ -0,0 +1,124 @@ +#! /usr/bin/env bash + +## This script is run on the old (source) instance, mounting the data disk +## of the newly launched instance, disabling extensions containing regtypes, +## and running pg_upgrade. +## It reports the current status of the upgrade process to /tmp/pg-upgrade-status, +## which can then be subsequently checked through pg_upgrade_check.sh. + +# Extensions to disable before running pg_upgrade. +# Running an upgrade with these extensions enabled will result in errors due to +# them depending on regtypes referencing system OIDs. +EXTENSIONS_TO_DISABLE=( + "pg_graphql" +) + +set -eEuo pipefail + +PGVERSION=$1 + +MOUNT_POINT="/data_migration" + +run_sql() { + STATEMENT=$1 + psql -h localhost -U tealbase_admin -d postgres -c "$STATEMENT" +} + +cleanup() { + UPGRADE_STATUS=${1:-"failed"} + EXIT_CODE=${?:-0} + + if [ -L /var/lib/postgresql ]; then + rm /var/lib/postgresql + mv /var/lib/postgresql.bak /var/lib/postgresql + fi + + systemctl restart postgresql + sleep 10 + systemctl restart postgresql + + for EXTENSION in "${EXTENSIONS_TO_DISABLE[@]}"; do + run_sql "CREATE EXTENSION IF NOT EXISTS ${EXTENSION} CASCADE;" + done + + run_sql "ALTER USER postgres WITH NOSUPERUSER;" + if [ -d "${MOUNT_POINT}/pgdata/pg_upgrade_output.d/" ]; then + cp -R "${MOUNT_POINT}/pgdata/pg_upgrade_output.d/" /var/log/ + fi + + umount $MOUNT_POINT + echo "${UPGRADE_STATUS}" > /tmp/pg-upgrade-status + + exit $EXIT_CODE +} + +function initiate_upgrade { + BLOCK_DEVICE=$(lsblk -dpno name | grep -v "/dev/nvme[0-1]") + echo "running" > /tmp/pg-upgrade-status + + mkdir -p "$MOUNT_POINT" + mount "$BLOCK_DEVICE" "$MOUNT_POINT" + + SHARED_PRELOAD_LIBRARIES=$(cat /etc/postgresql/postgresql.conf | grep shared_preload_libraries | sed "s/shared_preload_libraries = '\(.*\)'.*/\1/") + PGDATAOLD=$(cat /etc/postgresql/postgresql.conf | grep data_directory | sed "s/data_directory = '\(.*\)'.*/\1/") + + PGDATANEW="$MOUNT_POINT/pgdata" + PGBINNEW="/tmp/pg_upgrade_bin/$PGVERSION/bin" + PGSHARENEW="/tmp/pg_upgrade_bin/$PGVERSION/share" + + mkdir -p "/tmp/pg_upgrade_bin" + tar zxvf "/tmp/persistent/pg_upgrade_bin.tar.gz" -C "/tmp/pg_upgrade_bin" + + # copy upgrade-specific pgsodium_getkey script into the share dir + cp /root/pg_upgrade_pgsodium_getkey.sh "$PGSHARENEW/extension/pgsodium_getkey" + chmod +x "$PGSHARENEW/extension/pgsodium_getkey" + + chown -R postgres:postgres "/tmp/pg_upgrade_bin/$PGVERSION" + + for EXTENSION in "${EXTENSIONS_TO_DISABLE[@]}"; do + run_sql "DROP EXTENSION IF EXISTS ${EXTENSION} CASCADE;" + done + + run_sql "ALTER USER postgres WITH SUPERUSER;" + + + chown -R postgres:postgres "$MOUNT_POINT/" + rm -rf "$PGDATANEW/" + su -c "$PGBINNEW/initdb -L $PGSHARENEW -D $PGDATANEW/" -s $SHELL postgres + + # running upgrade using at least 1 cpu core + WORKERS=$(nproc | awk '{ print ($1 == 1 ? 1 : $1 - 1) }') + + # upgrade job outputs a log in the cwd; needs write permissions + cd /tmp + + UPGRADE_COMMAND=$(cat <> /var/log/pg-upgrade-initiate.log 2>&1 +echo "Upgrade initiate job completed " diff --git a/ansible/files/admin_api_scripts/pg_upgrade_pgsodium_getkey.sh b/ansible/files/admin_api_scripts/pg_upgrade_pgsodium_getkey.sh new file mode 100644 index 0000000..5a5a90e --- /dev/null +++ b/ansible/files/admin_api_scripts/pg_upgrade_pgsodium_getkey.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +set -euo pipefail + +KEY_FILE=/etc/postgresql-custom/pgsodium_root.key + +# if key file doesn't exist (project previously didn't use pgsodium), generate a new key +if [[ ! -f "${KEY_FILE}" ]]; then + head -c 32 /dev/urandom | od -A n -t x1 | tr -d ' \n' > $KEY_FILE +fi + +cat $KEY_FILE diff --git a/ansible/files/admin_api_scripts/pg_upgrade_prepare.sh b/ansible/files/admin_api_scripts/pg_upgrade_prepare.sh new file mode 100644 index 0000000..9754726 --- /dev/null +++ b/ansible/files/admin_api_scripts/pg_upgrade_prepare.sh @@ -0,0 +1,14 @@ +#! /usr/bin/env bash +## This script is runs in advance of the database version upgrade, on the newly +## launched instance which will eventually be promoted to become the primary +## database instance once the upgrade successfully completes, terminating the +## previous (source) instance. +## The following commands safely stop the Postgres service and unmount +## the data disk off the newly launched instance, to be re-attached to the +## source instance and run the upgrade there. + +set -euo pipefail + +systemctl stop postgresql +umount /data + diff --git a/ansible/files/adminapi.service.j2 b/ansible/files/adminapi.service.j2 new file mode 100644 index 0000000..7db04ea --- /dev/null +++ b/ansible/files/adminapi.service.j2 @@ -0,0 +1,12 @@ +[Unit] +Description=AdminAPI + +[Service] +Type=simple +ExecStart=/opt/tealbase-admin-api +User=adminapi +Restart=always +RestartSec=3 + +[Install] +WantedBy=multi-user.target diff --git a/ansible/files/adminapi.sudoers.conf b/ansible/files/adminapi.sudoers.conf new file mode 100644 index 0000000..97c4571 --- /dev/null +++ b/ansible/files/adminapi.sudoers.conf @@ -0,0 +1,24 @@ +Cmnd_Alias KONG = /bin/systemctl start kong.service, /bin/systemctl stop kong.service, /bin/systemctl restart kong.service, /bin/systemctl disable kong.service, /bin/systemctl enable kong.service, /bin/systemctl reload kong.service +Cmnd_Alias POSTGREST = /bin/systemctl start postgrest.service, /bin/systemctl stop postgrest.service, /bin/systemctl restart postgrest.service, /bin/systemctl disable postgrest.service, /bin/systemctl enable postgrest.service +Cmnd_Alias GOTRUE = /bin/systemctl start gotrue.service, /bin/systemctl stop gotrue.service, /bin/systemctl restart gotrue.service, /bin/systemctl disable gotrue.service, /bin/systemctl enable gotrue.service +Cmnd_Alias PGBOUNCER = /bin/systemctl start pgbouncer.service, /bin/systemctl stop pgbouncer.service, /bin/systemctl restart pgbouncer.service, /bin/systemctl disable pgbouncer.service, /bin/systemctl enable pgbouncer.service, /bin/systemctl reload pgbouncer.service + +%adminapi ALL= NOPASSWD: /root/grow_fs.sh +%adminapi ALL= NOPASSWD: /root/manage_readonly_mode.sh +%adminapi ALL= NOPASSWD: /root/pg_upgrade_prepare.sh +%adminapi ALL= NOPASSWD: /root/pg_upgrade_initiate.sh +%adminapi ALL= NOPASSWD: /root/pg_upgrade_complete.sh +%adminapi ALL= NOPASSWD: /root/pg_upgrade_check.sh +%adminapi ALL= NOPASSWD: /root/pg_upgrade_pgsodium_getkey.sh +%adminapi ALL= NOPASSWD: /usr/bin/systemctl daemon-reload +%adminapi ALL= NOPASSWD: /usr/bin/systemctl reload postgresql.service +%adminapi ALL= NOPASSWD: /usr/bin/systemctl restart postgresql.service +%adminapi ALL= NOPASSWD: /usr/bin/systemctl restart adminapi.service +%adminapi ALL= NOPASSWD: /bin/systemctl daemon-reload +%adminapi ALL= NOPASSWD: /bin/systemctl restart services.slice +%adminapi ALL= NOPASSWD: /usr/sbin/nft -f /etc/nftables/tealbase_managed.conf +%adminapi ALL= NOPASSWD: /usr/bin/admin-mgr +%adminapi ALL= NOPASSWD: KONG +%adminapi ALL= NOPASSWD: POSTGREST +%adminapi ALL= NOPASSWD: GOTRUE +%adminapi ALL= NOPASSWD: PGBOUNCER diff --git a/ansible/files/ansible-pull.service b/ansible/files/ansible-pull.service new file mode 100644 index 0000000..3e061b3 --- /dev/null +++ b/ansible/files/ansible-pull.service @@ -0,0 +1,20 @@ +[Unit] +Description=Ansible pull + +[Service] +Type=simple +User=ubuntu + +ExecStart=/usr/bin/ansible-pull --private-key "$SSH_READ_KEY_FILE" -U "$REPO" --accept-host-key -t "$REGION,db-all" -i localhost --clean --full "$PLAYBOOK" -v -o -C "$REPO_BRANCH" + +# --verify-commit +# temporarily disable commit verification, while we figure out how we want to balance commit signatures +# and PR reviews; an --ff-only merge options would have allowed us to use this pretty nicely + +MemoryAccounting=true +MemoryMax=30% + +StandardOutput=append:/var/log/ansible-pull.stdout +StandardError=append:/var/log/ansible-pull.error + +TimeoutStopSec=600 diff --git a/ansible/files/ansible-pull.timer b/ansible/files/ansible-pull.timer new file mode 100644 index 0000000..27ce24b --- /dev/null +++ b/ansible/files/ansible-pull.timer @@ -0,0 +1,11 @@ +[Unit] +Description=Run ansible roughly every 3 hours + +[Timer] +OnBootSec=1h +OnUnitActiveSec=3h +RandomizedDelaySec=1h +Persistent=true + +[Install] +WantedBy=timers.target diff --git a/ansible/files/cron.deny b/ansible/files/cron.deny new file mode 100644 index 0000000..3b5199b --- /dev/null +++ b/ansible/files/cron.deny @@ -0,0 +1,2 @@ +ubuntu +postgres diff --git a/ansible/files/database-optimizations.service.j2 b/ansible/files/database-optimizations.service.j2 new file mode 100644 index 0000000..599a17d --- /dev/null +++ b/ansible/files/database-optimizations.service.j2 @@ -0,0 +1,12 @@ +[Unit] +Description=Postgresql optimizations + +[Service] +Type=oneshot +# we do not want failures from these commands to cause downstream service startup to fail +ExecStart=-/opt/tealbase-admin-api optimize db --destination-config-file-path /etc/postgresql-custom/generated-optimizations.conf +ExecStart=-/opt/tealbase-admin-api optimize pgbouncer --destination-config-file-path /etc/pgbouncer-custom/generated-optimizations.ini +User=adminapi + +[Install] +WantedBy=multi-user.target diff --git a/ansible/files/docker_mnt/init.sh b/ansible/files/docker_mnt/init.sh deleted file mode 100644 index fd12bbe..0000000 --- a/ansible/files/docker_mnt/init.sh +++ /dev/null @@ -1,3 +0,0 @@ -cat /etc/postgresql/postgresql.conf > $PGDATA/postgresql.conf -echo "host replication $POSTGRES_USER 0.0.0.0/0 trust" >> $PGDATA/pg_hba.conf -echo "host all all 127.0.0.1/32 trust" >> $PGDATA/pg_hba.conf \ No newline at end of file diff --git a/ansible/files/fail2ban_config/fail2ban.service.conf b/ansible/files/fail2ban_config/fail2ban.service.conf new file mode 100644 index 0000000..431d1db --- /dev/null +++ b/ansible/files/fail2ban_config/fail2ban.service.conf @@ -0,0 +1,6 @@ +[Unit] +After=nftables.service +Wants=nftables.service + +[Service] +ExecStartPost=/bin/bash -c "sleep 5 && chmod g+w /var/run/fail2ban/fail2ban.sock" diff --git a/ansible/files/fail2ban_config/jail-postgresql.conf.j2 b/ansible/files/fail2ban_config/jail-postgresql.conf.j2 index 516f532..9822d4f 100644 --- a/ansible/files/fail2ban_config/jail-postgresql.conf.j2 +++ b/ansible/files/fail2ban_config/jail-postgresql.conf.j2 @@ -3,5 +3,5 @@ enabled = true port = 5432 protocol = tcp filter = postgresql -logpath = /var/lib/postgresql/data/pg_log/postgresql.csv -maxretry = 3 \ No newline at end of file +logpath = /var/log/postgresql/postgresql.csv +maxretry = 3 diff --git a/ansible/files/fail2ban_config/jail-ssh.conf b/ansible/files/fail2ban_config/jail-ssh.conf new file mode 100644 index 0000000..5476c30 --- /dev/null +++ b/ansible/files/fail2ban_config/jail-ssh.conf @@ -0,0 +1,4 @@ +[sshd] + +backend = systemd +mode = aggressive diff --git a/ansible/files/fail2ban_config/jail.local b/ansible/files/fail2ban_config/jail.local new file mode 100644 index 0000000..44e8210 --- /dev/null +++ b/ansible/files/fail2ban_config/jail.local @@ -0,0 +1,4 @@ +[DEFAULT] + +banaction = nftables-multiport +banaction_allports = nftables-allports diff --git a/ansible/files/gotrue.service.j2 b/ansible/files/gotrue.service.j2 new file mode 100644 index 0000000..c37a236 --- /dev/null +++ b/ansible/files/gotrue.service.j2 @@ -0,0 +1,20 @@ +[Unit] +Description=Gotrue + +[Service] +Type=simple +WorkingDirectory=/opt/gotrue +ExecStart=/opt/gotrue/gotrue +User=gotrue +Restart=always +RestartSec=3 + +MemoryAccounting=true +MemoryMax=50% + +EnvironmentFile=/etc/gotrue.env + +Slice=services.slice + +[Install] +WantedBy=multi-user.target diff --git a/ansible/files/journald.conf b/ansible/files/journald.conf new file mode 100644 index 0000000..2eb89f9 --- /dev/null +++ b/ansible/files/journald.conf @@ -0,0 +1,6 @@ +[Journal] +Storage=persistent +SystemMaxUse=3G +SystemKeepFree=3G +SystemMaxFileSize=200M +ForwardToSyslog=no diff --git a/ansible/files/kong_config/kong.conf.j2 b/ansible/files/kong_config/kong.conf.j2 new file mode 100644 index 0000000..54ce718 --- /dev/null +++ b/ansible/files/kong_config/kong.conf.j2 @@ -0,0 +1,7 @@ +database = off +declarative_config = /etc/kong/kong.yml + +# plugins defined in the dockerfile +plugins = request-transformer,cors,key-auth,http-log + +proxy_listen = 0.0.0.0:80 reuseport backlog=16384, 0.0.0.0:443 http2 ssl reuseport backlog=16834 diff --git a/ansible/files/kong_config/kong.env.j2 b/ansible/files/kong_config/kong.env.j2 new file mode 100644 index 0000000..57613fd --- /dev/null +++ b/ansible/files/kong_config/kong.env.j2 @@ -0,0 +1,8 @@ +KONG_NGINX_HTTP_GZIP=on +KONG_NGINX_HTTP_GZIP_COMP_LEVEL=6 +KONG_NGINX_HTTP_GZIP_MIN_LENGTH=256 +KONG_NGINX_HTTP_GZIP_PROXIED=any +KONG_NGINX_HTTP_GZIP_VARY=on +KONG_NGINX_HTTP_GZIP_TYPES=text/plain application/xml application/openapi+json application/json +KONG_PROXY_ERROR_LOG=syslog:server=unix:/dev/log +KONG_ADMIN_ERROR_LOG=syslog:server=unix:/dev/log diff --git a/ansible/files/kong_config/kong.service.j2 b/ansible/files/kong_config/kong.service.j2 new file mode 100644 index 0000000..6df4b55 --- /dev/null +++ b/ansible/files/kong_config/kong.service.j2 @@ -0,0 +1,24 @@ +[Unit] +Description=Kong server +After=postgrest.service gotrue.service adminapi.service +Wants=postgrest.service gotrue.service adminapi.service + +[Service] +Type=forking +ExecStart=/usr/local/bin/kong start -c /etc/kong/kong.conf +ExecReload=/usr/local/bin/kong reload -c /etc/kong/kong.conf +ExecStop=/usr/local/bin/kong stop +User=kong +EnvironmentFile=/etc/kong/kong.env +Slice=services.slice +Restart=always +RestartSec=3 +LimitNOFILE=100000 + +# The kong user is unpriviledged and thus not permited to bind on ports < 1024 +# Via systemd we grant the process a set of priviledges to bind to 80/443 +# See http://archive.vn/36zJU +AmbientCapabilities=CAP_NET_BIND_SERVICE + +[Install] +WantedBy=multi-user.target diff --git a/ansible/files/logind.conf b/ansible/files/logind.conf new file mode 100644 index 0000000..732900f --- /dev/null +++ b/ansible/files/logind.conf @@ -0,0 +1,2 @@ +[Login] +RemoveIPC=no diff --git a/ansible/files/logrotate_config/logrotate-postgres-auth.conf b/ansible/files/logrotate_config/logrotate-postgres-auth.conf new file mode 100644 index 0000000..050210e --- /dev/null +++ b/ansible/files/logrotate_config/logrotate-postgres-auth.conf @@ -0,0 +1,8 @@ +/var/log/postgresql/auth-failures.csv { + size 10M + rotate 5 + compress + delaycompress + notifempty + missingok +} diff --git a/ansible/files/logrotate_config/logrotate-postgres-csv.conf b/ansible/files/logrotate_config/logrotate-postgres-csv.conf new file mode 100644 index 0000000..d77a98d --- /dev/null +++ b/ansible/files/logrotate_config/logrotate-postgres-csv.conf @@ -0,0 +1,11 @@ +/var/log/postgresql/postgresql.csv { + size 50M + rotate 9 + compress + delaycompress + notifempty + missingok + postrotate + sudo -u postgres pg_ctl -D /var/lib/postgresql/data logrotate + endscript +} diff --git a/ansible/files/logrotate-postgres b/ansible/files/logrotate_config/logrotate-postgres.conf similarity index 68% rename from ansible/files/logrotate-postgres rename to ansible/files/logrotate_config/logrotate-postgres.conf index 3266dbd..c802320 100644 --- a/ansible/files/logrotate-postgres +++ b/ansible/files/logrotate_config/logrotate-postgres.conf @@ -1,4 +1,4 @@ -/var/lib/postgresql/data/pg_log/postgresql.log { +/var/log/postgresql/postgresql.log { size 50M rotate 3 copytruncate diff --git a/ansible/files/logrotate_config/logrotate-walg.conf b/ansible/files/logrotate_config/logrotate-walg.conf new file mode 100644 index 0000000..49eeb59 --- /dev/null +++ b/ansible/files/logrotate_config/logrotate-walg.conf @@ -0,0 +1,9 @@ +/var/log/wal-g/*.log { + size 50M + rotate 3 + copytruncate + delaycompress + compress + notifempty + missingok +} diff --git a/ansible/files/nginx.service.j2 b/ansible/files/nginx.service.j2 new file mode 100644 index 0000000..872e334 --- /dev/null +++ b/ansible/files/nginx.service.j2 @@ -0,0 +1,22 @@ +[Unit] +Description=nginx server +After=postgrest.service gotrue.service adminapi.service +Wants=postgrest.service gotrue.service adminapi.service + +[Service] +Type=forking +ExecStart=/usr/local/nginx/sbin/nginx -c /etc/nginx/nginx.conf +ExecReload=/usr/local/nginx/sbin/nginx -s reload -c /etc/nginx/nginx.conf +ExecStop=/usr/local/nginx/sbin/nginx -s quit +User=nginx +Slice=services.slice +Restart=always +RestartSec=3 +LimitNOFILE=100000 + +# Via systemd we grant the process a set of privileges to bind to 80/443 +# See http://archive.vn/36zJU +AmbientCapabilities=CAP_NET_BIND_SERVICE + +[Install] +WantedBy=multi-user.target diff --git a/ansible/files/node_exporter.service.j2 b/ansible/files/node_exporter.service.j2 deleted file mode 100644 index 4af7195..0000000 --- a/ansible/files/node_exporter.service.j2 +++ /dev/null @@ -1,16 +0,0 @@ -[Unit] -Description=Node Exporter -After=network-online.target - -[Service] -Type=simple -ExecStart=/opt/node_exporter/node_exporter --web.disable-exporter-metrics --collector.disable-defaults {% for collector in collectors %} --collector.{{ collector }} {% endfor %} - -User=root -StandardOutput=file:/var/log/node_exporter.stdout -StandardError=file:/var/log/node_exporter.error -Restart=on-failure -RestartSec=3 - -[Install] -WantedBy=multi-user.target diff --git a/ansible/files/pg_egress_collect.service.j2 b/ansible/files/pg_egress_collect.service.j2 new file mode 100644 index 0000000..377b6e3 --- /dev/null +++ b/ansible/files/pg_egress_collect.service.j2 @@ -0,0 +1,13 @@ +[Unit] +Description=Postgres Egress Collector + +[Service] +Type=simple +ExecStart=/bin/bash -c "tcpdump -s 128 -Q out -i any -nn -tt -vv -p -l 'tcp and (port 5432 or port 6543)' | perl /root/pg_egress_collect.pl" +User=root +Slice=services.slice +Restart=always +RestartSec=3 + +[Install] +WantedBy=multi-user.target diff --git a/ansible/files/pgbouncer_config/pgbouncer.ini.j2 b/ansible/files/pgbouncer_config/pgbouncer.ini.j2 index c0c5dd0..1d83b25 100644 --- a/ansible/files/pgbouncer_config/pgbouncer.ini.j2 +++ b/ansible/files/pgbouncer_config/pgbouncer.ini.j2 @@ -51,7 +51,7 @@ pidfile = /var/run/pgbouncer/pgbouncer.pid ;;; ;; IP address or * which means all IPs -listen_addr = * +listen_addr = 0.0.0.0 listen_port = 6543 ;; Unix socket is also used for -R. @@ -113,7 +113,7 @@ unix_socket_dir = /tmp ;;; ;; any, trust, plain, md5, cert, hba, pam -auth_type = md5 +auth_type = scram-sha-256 auth_file = /etc/pgbouncer/userlist.txt ;; Path to HBA-style auth config @@ -358,3 +358,7 @@ default_pool_size = 15 ;; Read additional config from other file ;%include /etc/pgbouncer/pgbouncer-other.ini + +%include /etc/pgbouncer-custom/generated-optimizations.ini +%include /etc/pgbouncer-custom/custom-overrides.ini +%include /etc/pgbouncer-custom/ssl-config.ini diff --git a/ansible/files/pgbouncer_config/pgbouncer.service.j2 b/ansible/files/pgbouncer_config/pgbouncer.service.j2 index 96273cb..5a0447b 100644 --- a/ansible/files/pgbouncer_config/pgbouncer.service.j2 +++ b/ansible/files/pgbouncer_config/pgbouncer.service.j2 @@ -1,40 +1,20 @@ -# Example systemd service unit for PgBouncer -# -# - Adjust the paths in ExecStart for your installation. -# -# - The User setting requires careful consideration. PgBouncer needs -# to be able to place a Unix-domain socket file where PostgreSQL -# clients will look for it. In the olden days, this was in /tmp, -# but systems using systemd now prefer something like -# /var/run/postgresql/. But then some systems also lock down that -# directory so that only the postgres user can write to it. That -# means you need to either -# -# - run PgBouncer as the postgres user, or -# -# - create a separate user and add it to the postgres group and -# make /var/run/postgresql/ group-writable, or -# -# - use systemd to create the sockets; see pgbouncer.socket nearby. -# -# For packagers and deployment systems, this requires some -# coordination between the PgBouncer and the PostgreSQL -# packages/components. -# [Unit] Description=connection pooler for PostgreSQL Documentation=man:pgbouncer(1) Documentation=https://www.pgbouncer.org/ After=network.target -#Requires=pgbouncer.socket +{% if tealbase_internal is defined %} +Requires=database-optimizations.service +After=database-optimizations.service +{% endif %} [Service] Type=notify -User=postgres +User=pgbouncer ExecStart=/usr/local/bin/pgbouncer /etc/pgbouncer/pgbouncer.ini ExecReload=/bin/kill -HUP $MAINPID KillSignal=SIGINT -#LimitNOFILE=1024 +LimitNOFILE=65536 [Install] -WantedBy=multi-user.target \ No newline at end of file +WantedBy=multi-user.target diff --git a/ansible/files/pgbouncer_config/pgbouncer_auth_schema.sql b/ansible/files/pgbouncer_config/pgbouncer_auth_schema.sql index bc1342f..c10ce44 100644 --- a/ansible/files/pgbouncer_config/pgbouncer_auth_schema.sql +++ b/ansible/files/pgbouncer_config/pgbouncer_auth_schema.sql @@ -1,5 +1,7 @@ CREATE USER pgbouncer; +REVOKE ALL PRIVILEGES ON SCHEMA public FROM pgbouncer; + CREATE SCHEMA pgbouncer AUTHORIZATION pgbouncer; CREATE OR REPLACE FUNCTION pgbouncer.get_auth(p_usename TEXT) diff --git a/ansible/files/pgbouncer_config/tmpfiles.d-pgbouncer.conf.j2 b/ansible/files/pgbouncer_config/tmpfiles.d-pgbouncer.conf.j2 index 3889ed2..d5d2cd4 100644 --- a/ansible/files/pgbouncer_config/tmpfiles.d-pgbouncer.conf.j2 +++ b/ansible/files/pgbouncer_config/tmpfiles.d-pgbouncer.conf.j2 @@ -1,2 +1,2 @@ # Directory for PostgreSQL sockets, lockfiles and stats tempfiles -d /run/pgbouncer 2775 postgres postgres - - \ No newline at end of file +d /run/pgbouncer 2775 pgbouncer postgres - - \ No newline at end of file diff --git a/ansible/files/pgsodium_getkey_readonly.sh.j2 b/ansible/files/pgsodium_getkey_readonly.sh.j2 new file mode 100644 index 0000000..e0a7273 --- /dev/null +++ b/ansible/files/pgsodium_getkey_readonly.sh.j2 @@ -0,0 +1,14 @@ +#!/bin/bash + +set -euo pipefail + +KEY_FILE=/etc/postgresql-custom/pgsodium_root.key + +# On the hosted platform, the root key is generated and managed for each project +# If for some reason the key is missing, we want to fail loudly, +# rather than generating a new one. +if [[ ! -f "${KEY_FILE}" ]]; then + echo "Key file ${KEY_FILE} does not exist." >&2 + exit 1 +fi +cat $KEY_FILE diff --git a/ansible/files/pgsodium_getkey_urandom.sh.j2 b/ansible/files/pgsodium_getkey_urandom.sh.j2 new file mode 100755 index 0000000..e8039d0 --- /dev/null +++ b/ansible/files/pgsodium_getkey_urandom.sh.j2 @@ -0,0 +1,10 @@ +#!/bin/bash + +set -euo pipefail + +KEY_FILE=/etc/postgresql-custom/pgsodium_root.key + +if [[ ! -f "${KEY_FILE}" ]]; then + head -c 32 /dev/urandom | od -A n -t x1 | tr -d ' \n' > "${KEY_FILE}" +fi +cat $KEY_FILE diff --git a/ansible/files/postgres_exporter.service.j2 b/ansible/files/postgres_exporter.service.j2 index 65d2120..7ddb5be 100644 --- a/ansible/files/postgres_exporter.service.j2 +++ b/ansible/files/postgres_exporter.service.j2 @@ -3,14 +3,13 @@ Description=Postgres Exporter [Service] Type=simple -ExecStart=/opt/postgres_exporter/postgres_exporter --auto-discover-databases --disable-settings-metrics --extend.query-path="/opt/postgres_exporter/queries.yml" --disable-default-metrics +ExecStart=/opt/postgres_exporter/postgres_exporter --disable-settings-metrics --extend.query-path="/opt/postgres_exporter/queries.yml" --disable-default-metrics User=root -StandardOutput=file:/var/log/postgres_exporter.stdout -StandardError=file:/var/log/postgres_exporter.error +StandardOutput=append:/var/log/postgres_exporter.stdout +StandardError=append:/var/log/postgres_exporter.error Restart=always RestartSec=3 -Environment="DATA_SOURCE_URI=localhost/postgres?sslmode=disable" -Environment="DATA_SOURCE_USER=tealbase_admin" +Environment="DATA_SOURCE_NAME=host=localhost dbname=postgres sslmode=disable user=tealbase_admin pg_stat_statements.track=none application_name=postgres_exporter" [Install] WantedBy=multi-user.target diff --git a/ansible/files/postgresql_config/custom_walg.conf.j2 b/ansible/files/postgresql_config/custom_walg.conf.j2 new file mode 100644 index 0000000..7c9c1bb --- /dev/null +++ b/ansible/files/postgresql_config/custom_walg.conf.j2 @@ -0,0 +1,17 @@ +# - Archiving - + +#archive_mode = on +#archive_command = '/usr/bin/admin-mgr wal-push %p >> /var/log/wal-g/wal-push.log 2>&1' +#archive_timeout = 120 + + +# - Archive Recovery - + +#restore_command = '/usr/bin/admin-mgr wal-fetch %f %p >> /var/log/wal-g/wal-fetch.log 2>&1' + +# - Recovery Target - + +#recovery_target_time = '' +#recovery_target_action = 'promote' +#recovery_target_timeline = 'current' +#recovery_target_inclusive = off diff --git a/ansible/files/postgresql_config/pg_hba.conf.j2 b/ansible/files/postgresql_config/pg_hba.conf.j2 index 1e4c866..ebb1767 100755 --- a/ansible/files/postgresql_config/pg_hba.conf.j2 +++ b/ansible/files/postgresql_config/pg_hba.conf.j2 @@ -78,22 +78,14 @@ # TYPE DATABASE USER ADDRESS METHOD -# Default: -# "local" is for Unix domain socket connections only -local all all peer -# IPv4 local connections: +# trust local connections +local all tealbase_admin scram-sha-256 +local all all peer map=tealbase_map host all all 127.0.0.1/32 trust -# IPv6 local connections: -host all all ::1/128 md5 -# Local root Unix user, passwordless access -local all postgres peer map=root_as_postgres -# IPv4 external connections -host all all 0.0.0.0/0 md5 - -# MD5 hashed password hosts - -# Password hosts +host all all ::1/128 trust -# Trusted hosts - -# User custom \ No newline at end of file +# IPv4 external connections +host all all 10.0.0.0/8 scram-sha-256 +host all all 172.16.0.0/12 scram-sha-256 +host all all 192.168.0.0/16 scram-sha-256 +host all all 0.0.0.0/0 scram-sha-256 diff --git a/ansible/files/postgresql_config/pg_ident.conf.j2 b/ansible/files/postgresql_config/pg_ident.conf.j2 index a5c8de7..1430bc8 100755 --- a/ansible/files/postgresql_config/pg_ident.conf.j2 +++ b/ansible/files/postgresql_config/pg_ident.conf.j2 @@ -40,5 +40,11 @@ # ---------------------------------- # MAPNAME SYSTEM-USERNAME PG-USERNAME -# root is allowed to login as postgres -root_as_postgres postgres postgres +tealbase_map postgres postgres +tealbase_map root postgres +tealbase_map ubuntu postgres + +# tealbase-specific users +tealbase_map gotrue tealbase_auth_admin +tealbase_map postgrest authenticator +tealbase_map adminapi postgres diff --git a/ansible/files/postgresql_config/postgresql-csvlog.conf b/ansible/files/postgresql_config/postgresql-csvlog.conf new file mode 100644 index 0000000..b8d64da --- /dev/null +++ b/ansible/files/postgresql_config/postgresql-csvlog.conf @@ -0,0 +1,33 @@ +# - Where to Log - + +log_destination = 'csvlog' # Valid values are combinations of + # stderr, csvlog, syslog, and eventlog, + # depending on platform. csvlog + # requires logging_collector to be on. + +# This is used when logging to stderr: +logging_collector = on # Enable capturing of stderr and csvlog + # into log files. Required to be on for + # csvlogs. + # (change requires restart) + +# These are only used if logging_collector is on: +log_directory = '/var/log/postgresql' # directory where log files are written, + # can be absolute or relative to PGDATA +log_filename = 'postgresql.log' # log file name pattern, + # can include strftime() escapes +log_file_mode = 0640 # creation mode for log files, + # begin with 0 to use octal notation +log_rotation_age = 0 # Automatic rotation of logfiles will + # happen after that time. 0 disables. +log_rotation_size = 0 # Automatic rotation of logfiles will + # happen after that much log output. + # 0 disables. +#log_truncate_on_rotation = off # If on, an existing log file with the + # same name as the new log file will be + # truncated rather than appended to. + # But such truncation only occurs on + # time-driven rotation, not on restarts + # or size-driven rotation. Default is + # off, meaning append to existing files + # in all cases. diff --git a/ansible/files/postgresql_config/postgresql-stdout-log.conf b/ansible/files/postgresql_config/postgresql-stdout-log.conf new file mode 100644 index 0000000..6ae4ff4 --- /dev/null +++ b/ansible/files/postgresql_config/postgresql-stdout-log.conf @@ -0,0 +1,4 @@ +logging_collector = off # Enable capturing of stderr and csvlog + # into log files. Required to be on for + # csvlogs. + # (change requires restart) diff --git a/ansible/files/postgresql_config/postgresql.conf.j2 b/ansible/files/postgresql_config/postgresql.conf.j2 index 8b4fec8..b46068a 100644 --- a/ansible/files/postgresql_config/postgresql.conf.j2 +++ b/ansible/files/postgresql_config/postgresql.conf.j2 @@ -24,7 +24,8 @@ # "postgres -c log_connections=on". Some parameters can be changed at run time # with the "SET" SQL command. # -# Memory units: kB = kilobytes Time units: ms = milliseconds +# Memory units: B = bytes Time units: us = microseconds +# kB = kilobytes ms = milliseconds # MB = megabytes s = seconds # GB = gigabytes min = minutes # TB = terabytes h = hours @@ -92,7 +93,7 @@ listen_addresses = '*' # what IP address(es) to listen on; # - Authentication - authentication_timeout = 1min # 1s-600s -password_encryption = md5 # scram-sha-256 or md5 +password_encryption = scram-sha-256 # scram-sha-256 or md5 db_user_namespace = off # GSSAPI using Kerberos @@ -105,6 +106,7 @@ ssl = off ssl_ca_file = '' ssl_cert_file = '' ssl_crl_file = '' +ssl_crl_dir = '' ssl_key_file = '' ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers ssl_prefer_server_ciphers = on @@ -124,28 +126,28 @@ ssl_passphrase_command_supports_reload = off shared_buffers = 128MB # min 128kB # (change requires restart) -# huge_pages = try # on, off, or try +#huge_pages = try # on, off, or try # (change requires restart) -# huge_page_size = 0 # zero for system default +#huge_page_size = 0 # zero for system default # (change requires restart) -# temp_buffers = 8MB # min 800kB -# max_prepared_transactions = 0 # zero disables the feature +#temp_buffers = 8MB # min 800kB +#max_prepared_transactions = 0 # zero disables the feature # (change requires restart) # Caution: it is not advisable to set max_prepared_transactions nonzero unless # you actively intend to use prepared transactions. -# work_mem = 4MB # min 64kB -# hash_mem_multiplier = 1.0 # 1-1000.0 multiplier on hash table work_mem -# maintenance_work_mem = 64MB # min 1MB -# autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem -# logical_decoding_work_mem = 64MB # min 64kB -# max_stack_depth = 2MB # min 100kB -# shared_memory_type = mmap # the default is the first option +#work_mem = 4MB # min 64kB +#hash_mem_multiplier = 1.0 # 1-1000.0 multiplier on hash table work_mem +#maintenance_work_mem = 64MB # min 1MB +#autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem +#logical_decoding_work_mem = 64MB # min 64kB +#max_stack_depth = 2MB # min 100kB +#shared_memory_type = mmap # the default is the first option # supported by the operating system: # mmap # sysv # windows # (change requires restart) -# dynamic_shared_memory_type = posix # the default is the first option +#dynamic_shared_memory_type = posix # the default is the first option # supported by the operating system: # posix # sysv @@ -298,7 +300,7 @@ max_wal_senders = 10 # max number of walsender processes max_replication_slots = 5 # max number of replication slots # (change requires restart) #wal_keep_size = 0 # in megabytes; 0 disables -#max_slot_wal_keep_size = -1 # in megabytes; -1 disables +max_slot_wal_keep_size = 1024 # in megabytes; -1 disables #wal_sender_timeout = 60s # in milliseconds; 0 disables #track_commit_timestamp = off # collect timestamp of transaction commit # (change requires restart) @@ -426,39 +428,7 @@ effective_cache_size = 128MB # REPORTING AND LOGGING #------------------------------------------------------------------------------ -# - Where to Log - - -log_destination = 'csvlog' # Valid values are combinations of - # stderr, csvlog, syslog, and eventlog, - # depending on platform. csvlog - # requires logging_collector to be on. - -# This is used when logging to stderr: -logging_collector = on # Enable capturing of stderr and csvlog - # into log files. Required to be on for - # csvlogs. - # (change requires restart) - -# These are only used if logging_collector is on: -log_directory = 'pg_log' # directory where log files are written, - # can be absolute or relative to PGDATA -log_filename = 'postgresql.log' # log file name pattern, - # can include strftime() escapes -# log_file_mode = 0600 # creation mode for log files, - # begin with 0 to use octal notation -log_rotation_age = 0 # Automatic rotation of logfiles will - # happen after that time. 0 disables. -log_rotation_size = 0 # Automatic rotation of logfiles will - # happen after that much log output. - # 0 disables. -#log_truncate_on_rotation = off # If on, an existing log file with the - # same name as the new log file will be - # truncated rather than appended to. - # But such truncation only occurs on - # time-driven rotation, not on restarts - # or size-driven rotation. Default is - # off, meaning append to existing files - # in all cases. +include = '/etc/postgresql/logging.conf' # These are relevant when logging to syslog: #syslog_facility = 'LOCAL0' @@ -570,7 +540,7 @@ log_line_prefix = '%h %m [%p] %q%u@%d ' # special values: #log_parameter_max_length_on_error = 0 # when logging an error, limit logged # bind-parameter values to N bytes; # -1 means print in full, 0 disables -#log_statement = 'none' # none, ddl, mod, all +log_statement = 'none' # none, ddl, mod, all #log_replication_commands = off #log_temp_files = -1 # log temporary files equal or larger # than the specified size in kilobytes; @@ -717,7 +687,8 @@ default_text_search_config = 'pg_catalog.english' #local_preload_libraries = '' #session_preload_libraries = '' -shared_preload_libraries = 'pg_stat_statements, pgaudit, plpgsql, plpgsql_check, pg_cron' # (change requires restart) + +shared_preload_libraries = 'pg_stat_statements, pg_stat_monitor, pgaudit, plpgsql, plpgsql_check, pg_cron, pg_net, pgsodium, timescaledb, auto_explain' # (change requires restart) jit_provider = 'llvmjit' # JIT library to use # - Other Defaults - @@ -765,8 +736,6 @@ jit_provider = 'llvmjit' # JIT library to use #exit_on_error = off # terminate session on any error? #restart_after_crash = on # reinitialize after backend crash? -#remove_temp_files_after_crash = on # remove temporary files after - # backend crash? #data_sync_retry = off # retry or panic on failure to fsync # data? # (change requires restart) @@ -786,9 +755,19 @@ jit_provider = 'llvmjit' # JIT library to use #include_if_exists = '...' # include file only if it exists #include = '...' # include file +# Automatically generated optimizations +#include = '/etc/postgresql-custom/generated-optimizations.conf' +# User-supplied custom parameters, override any automatically generated ones +#include = '/etc/postgresql-custom/custom-overrides.conf' + +# WAL-G specific configurations +#include = '/etc/postgresql-custom/wal-g.conf' + +# supautils specific configurations +#include = '/etc/postgresql-custom/supautils.conf' #------------------------------------------------------------------------------ # CUSTOMIZED OPTIONS #------------------------------------------------------------------------------ -# Add settings for extensions here \ No newline at end of file +# Add settings for extensions here diff --git a/ansible/files/postgresql_config/postgresql.service.j2 b/ansible/files/postgresql_config/postgresql.service.j2 index 1a3544b..0be175b 100644 --- a/ansible/files/postgresql_config/postgresql.service.j2 +++ b/ansible/files/postgresql_config/postgresql.service.j2 @@ -1,6 +1,10 @@ [Unit] Description=PostgreSQL database server Documentation=man:postgres(1) +{% if tealbase_internal is defined %} +Requires=database-optimizations.service +After=database-optimizations.service +{% endif %} [Service] Type=notify @@ -9,7 +13,10 @@ ExecStart=/usr/lib/postgresql/bin/postgres -D /etc/postgresql ExecReload=/bin/kill -HUP $MAINPID KillMode=mixed KillSignal=SIGINT -TimeoutSec=0 +TimeoutStopSec=90 +Restart=always +RestartSec=5 +OOMScoreAdjust=-1000 [Install] -WantedBy=multi-user.target \ No newline at end of file +WantedBy=multi-user.target diff --git a/ansible/files/postgresql_config/supautils.conf.j2 b/ansible/files/postgresql_config/supautils.conf.j2 new file mode 100644 index 0000000..c6ef185 --- /dev/null +++ b/ansible/files/postgresql_config/supautils.conf.j2 @@ -0,0 +1,12 @@ +supautils.placeholders = 'response.headers' +supautils.placeholders_disallowed_values = '"content-type"' +# full list: address_standardizer, address_standardizer_data_us, adminpack, amcheck, autoinc, bloom, btree_gin, btree_gist, citext, cube, dblink, dict_int, dict_xsyn, earthdistance, file_fdw, fuzzystrmatch, hstore, http, insert_username, intagg, intarray, isn, lo, ltree, moddatetime, old_snapshot, pageinspect, pg_buffercache, pg_cron, pg_freespacemap, pg_graphql, pg_hashids, pg_jsonschema, pg_net, pg_prewarm, pg_stat_monitor, pg_stat_statements, pg_surgery, pg_trgm, pg_visibility, pg_walinspect, pgaudit, pgcrypto, pgjwt, pgroonga, pgroonga_database, pgrouting, pgrowlocks, pgsodium, pgstattuple, pgtap, vector, plcoffee, pljava, plls, plpgsql, plpgsql_check, plv8, postgis, postgis_raster, postgis_sfcgal, postgis_tiger_geocoder, postgis_topology, postgres_fdw, refint, rum, seg, sslinfo, tealbase_vault, supautils, tablefunc, tcn, timescaledb, tsm_system_rows, tsm_system_time, unaccent, uuid-ossp, wrappers, xml2 +# omitted because may be unsafe: adminpack, amcheck, file_fdw, lo, old_snapshot, pageinspect, pg_buffercache, pg_freespacemap, pg_prewarm, pg_surgery, pg_visibility, pgstattuple +# omitted because deprecated: intagg, xml2 +supautils.privileged_extensions = 'address_standardizer, address_standardizer_data_us, autoinc, bloom, btree_gin, btree_gist, citext, cube, dblink, dict_int, dict_xsyn, earthdistance, fuzzystrmatch, hstore, http, insert_username, intarray, isn, ltree, moddatetime, pg_cron, pg_graphql, pg_hashids, pg_jsonschema, pg_net, pg_stat_monitor, pg_stat_statements, pg_trgm, pg_walinspect, pgaudit, pgcrypto, pgjwt, pgroonga, pgroonga_database, pgrouting, pgrowlocks, pgsodium, pgtap, vector, plcoffee, pljava, plls, plpgsql, plpgsql_check, plv8, postgis, postgis_raster, postgis_sfcgal, postgis_tiger_geocoder, postgis_topology, postgres_fdw, refint, rum, seg, sslinfo, tealbase_vault, supautils, tablefunc, tcn, timescaledb, tsm_system_rows, tsm_system_time, unaccent, uuid-ossp, wrappers' +supautils.privileged_extensions_custom_scripts_path = '/etc/postgresql-custom/extension-custom-scripts' +supautils.privileged_extensions_superuser = 'tealbase_admin' +supautils.privileged_role = 'postgres' +supautils.privileged_role_allowed_configs = 'pgaudit.log, pgaudit.log_catalog, pgaudit.log_client, pgaudit.log_level, pgaudit.log_relation, pgaudit.log_rows, pgaudit.log_statement, pgaudit.log_statement_once, pgaudit.role, session_replication_role, track_io_timing' +supautils.reserved_memberships = 'pg_read_server_files, pg_write_server_files, pg_execute_server_program, authenticator' +supautils.reserved_roles = 'tealbase_admin, tealbase_auth_admin, tealbase_storage_admin, tealbase_replication_admin, dashboard_user, pgbouncer, service_role, authenticator, authenticated, anon' diff --git a/ansible/files/postgresql_extension_custom_scripts/pgsodium/after-create.sql b/ansible/files/postgresql_extension_custom_scripts/pgsodium/after-create.sql new file mode 100644 index 0000000..907c67e --- /dev/null +++ b/ansible/files/postgresql_extension_custom_scripts/pgsodium/after-create.sql @@ -0,0 +1,3 @@ +grant execute on function pgsodium.crypto_aead_det_decrypt(bytea, bytea, uuid, bytea) to service_role; +grant execute on function pgsodium.crypto_aead_det_encrypt(bytea, bytea, uuid, bytea) to service_role; +grant execute on function pgsodium.crypto_aead_det_keygen to service_role; diff --git a/ansible/files/postgrest-optimizations.service.j2 b/ansible/files/postgrest-optimizations.service.j2 new file mode 100644 index 0000000..c671e0d --- /dev/null +++ b/ansible/files/postgrest-optimizations.service.j2 @@ -0,0 +1,11 @@ +[Unit] +Description=Postgrest optimizations + +[Service] +Type=oneshot +# we don't want failures from this command to cause PG startup to fail +ExecStart=/bin/bash -c "/opt/tealbase-admin-api optimize postgrest --destination-config-file-path /etc/postgrest/generated.conf ; exit 0" +User=postgrest + +[Install] +WantedBy=multi-user.target diff --git a/ansible/files/postgrest.service.j2 b/ansible/files/postgrest.service.j2 index 8018a03..290f077 100644 --- a/ansible/files/postgrest.service.j2 +++ b/ansible/files/postgrest.service.j2 @@ -1,15 +1,18 @@ [Unit] Description=PostgREST +Requires=postgrest-optimizations.service +After=postgrest-optimizations.service [Service] Type=simple -ExecStart=/opt/postgrest /etc/postgrest.conf +# We allow the base config (sent from the worker) to override the generated config +ExecStartPre=/etc/postgrest/merge.sh /etc/postgrest/generated.conf /etc/postgrest/base.conf +ExecStart=/opt/postgrest /etc/postgrest/merged.conf User=postgrest -StandardOutput=file:/var/log/postgrest.stdout -StandardError=file:/var/log/postgrest.error Slice=services.slice Restart=always RestartSec=3 +LimitNOFILE=100000 [Install] WantedBy=multi-user.target diff --git a/ansible/files/queries.yml.j2 b/ansible/files/queries.yml.j2 deleted file mode 100644 index 0de4ca2..0000000 --- a/ansible/files/queries.yml.j2 +++ /dev/null @@ -1,194 +0,0 @@ -pg_database: - query: "SELECT SUM(pg_database_size(pg_database.datname)) / (1024 * 1024) as size_mb FROM pg_database" - master: true - cache_seconds: 30 - metrics: - - size_mb: - usage: "GAUGE" - description: "Disk space used by the database" - -pg_stat_bgwriter: - query: | - select checkpoints_timed as checkpoints_timed_total, - checkpoints_req as checkpoints_req_total, - checkpoint_write_time as checkpoint_write_time_total, - checkpoint_sync_time as checkpoint_sync_time_total, - buffers_checkpoint as buffers_checkpoint_total, - buffers_clean as buffers_clean_total, - maxwritten_clean as maxwritten_clean_total, - buffers_backend as buffers_backend_total, - buffers_backend_fsync as buffers_backend_fsync_total, - buffers_alloc as buffers_alloc_total, - stats_reset - from pg_stat_bgwriter - cache_seconds: 30 - master: true - metrics: - - checkpoints_timed_total: - usage: "COUNTER" - description: "Scheduled checkpoints performed" - - checkpoints_req_total: - usage: "COUNTER" - description: "Requested checkpoints performed" - - checkpoint_write_time_total: - usage: "COUNTER" - description: "Time spent writing checkpoint files to disk" - - checkpoint_sync_time_total: - usage: "COUNTER" - description: "Time spent synchronizing checkpoint files to disk" - - buffers_checkpoint_total: - usage: "COUNTER" - description: "Buffers written during checkpoints" - - buffers_clean_total: - usage: "COUNTER" - description: "Buffers written by bg writter" - - maxwritten_clean_total: - usage: "COUNTER" - description: "Number of times bg writer stopped a cleaning scan because it had written too many buffers" - - buffers_backend_total: - usage: "COUNTER" - description: "Buffers written directly by a backend" - - buffers_backend_fsync_total: - usage: "COUNTER" - description: "fsync calls executed by a backend directly" - - buffers_alloc_total: - usage: "COUNTER" - description: "Buffers allocated" - - stats_reset: - usage: "COUNTER" - description: "Most recent stat reset time" - - -pg_stat_database: - cache_seconds: 30 - query: | - SELECT sum(numbackends) as num_backends, - sum(xact_commit) as xact_commit_total, - sum(xact_rollback) as xact_rollback_total, - sum(blks_read) as blks_read_total, - sum(blks_hit) as blks_hit_total, - sum(tup_returned) as tup_returned_total, - sum(tup_fetched) as tup_fetched_total, - sum(tup_inserted) as tup_inserted_total, - sum(tup_updated) as tup_updated_total, - sum(tup_deleted) as tup_deleted_total, - sum(conflicts) as conflicts_total, - sum(temp_files) as temp_files_total, - sum(temp_bytes) as temp_bytes_total, - sum(deadlocks) as deadlocks_total, - max(stats_reset) as most_recent_reset - FROM pg_stat_database - master: true - metrics: - - num_backends: - usage: "GAUGE" - description: "The number of active backends" - - xact_commit_total: - usage: "COUNTER" - description: "Transactions committed" - - xact_rollback_total: - usage: "COUNTER" - description: "Transactions rolled back" - - blks_read_total: - usage: "COUNTER" - description: "Number of disk blocks read" - - blks_hit_total: - usage: "COUNTER" - description: "Disk blocks found in buffer cache" - - tup_returned_total: - usage: "COUNTER" - description: "Rows returned by queries" - - tup_fetched_total: - usage: "COUNTER" - description: "Rows fetched by queries" - - tup_inserted_total: - usage: "COUNTER" - description: "Rows inserted" - - tup_updated_total: - usage: "COUNTER" - description: "Rows updated" - - tup_deleted_total: - usage: "COUNTER" - description: "Rows deleted" - - conflicts_total: - usage: "COUNTER" - description: "Queries canceled due to conflicts with recovery" - - temp_files_total: - usage: "COUNTER" - description: "Temp files created by queries" - - temp_bytes_total: - usage: "COUNTER" - description: "Temp data written by queries" - - deadlocks_total: - usage: "COUNTER" - description: "Deadlocks detected" - - most_recent_reset: - usage: "COUNTER" - description: "The most recent time one of the databases had its statistics reset" - -pg_stat_database_conflicts: - query: | - SELECT sum(confl_tablespace) as confl_tablespace_total, - sum(confl_lock) as confl_lock_total, - sum(confl_snapshot) as confl_snapshot_total, - sum(confl_bufferpin) as confl_bufferpin_total, - sum(confl_deadlock) as confl_deadlock_total - from pg_stat_database_conflicts - cache_seconds: 30 - master: true - metrics: - - confl_tablespace_total: - usage: "COUNTER" - description: "Queries cancelled due to dropped tablespaces" - - confl_lock_total: - usage: "COUNTER" - description: "Queries cancelled due to lock timeouts" - - confl_snapshot_total: - usage: "COUNTER" - description: "Queries cancelled due to old snapshots" - - confl_bufferpin_total: - usage: "COUNTER" - description: "Queries cancelled due to pinned buffers" - - confl_deadlock_total: - usage: "COUNTER" - description: "Queries cancelled due to deadlocks" - -pg_stat_statements: - query: "SELECT sum(calls) as total_queries, sum(total_exec_time / 1000) as total_time_seconds FROM extensions.pg_stat_statements t1 JOIN pg_database t3 ON (t1.dbid=t3.oid)" - master: true - metrics: - - total_queries: - usage: "COUNTER" - description: "Number of times executed" - - total_time_seconds: - usage: "COUNTER" - description: "Total time spent, in seconds" - -auth_users: - query: "select count(id) as user_count from auth.users" - master: true - cache_seconds: 30 - metrics: - - user_count: - usage: "GAUGE" - description: "Number of users in the project db" - -replication: - query: "SELECT pg_wal_lsn_diff(pg_current_wal_lsn(), restart_lsn) AS realtime_lag_bytes, active AS realtime_slot_status FROM pg_replication_slots where slot_name = 'realtime'" - master: true - metrics: - - realtime_lag_bytes: - usage: "GAUGE" - description: "Replication Lag for Realtime" - - realtime_slot_status: - usage: "GAUGE" - description: "Replication Slot active status" - -storage: - query: "select sum(size) / (1024 * 1024) as storage_size_mb from storage.get_size_by_bucket()" - master: true - cache_seconds: 30 - metrics: - - storage_size_mb: - usage: "GAUGE" - description: "The total size used for all storage buckets, in mb" diff --git a/ansible/files/sodium_extension.sql b/ansible/files/sodium_extension.sql new file mode 100644 index 0000000..a19cabf --- /dev/null +++ b/ansible/files/sodium_extension.sql @@ -0,0 +1,6 @@ +create schema if not exists pgsodium; +create extension if not exists pgsodium with schema pgsodium cascade; + +grant pgsodium_keyiduser to postgres with admin option; +grant pgsodium_keyholder to postgres with admin option; +grant pgsodium_keymaker to postgres with admin option; diff --git a/ansible/files/tealbase_facts.ini b/ansible/files/tealbase_facts.ini new file mode 100644 index 0000000..44e01b4 --- /dev/null +++ b/ansible/files/tealbase_facts.ini @@ -0,0 +1,2 @@ +[general] +postgres_version=15 diff --git a/ansible/files/ufw.service.conf b/ansible/files/ufw.service.conf new file mode 100644 index 0000000..83b82ef --- /dev/null +++ b/ansible/files/ufw.service.conf @@ -0,0 +1,4 @@ +[Unit] +After=nftables.service +Requires=nftables.service +PartOf=nftables.service diff --git a/ansible/files/vector.service.j2 b/ansible/files/vector.service.j2 new file mode 100644 index 0000000..1c88baa --- /dev/null +++ b/ansible/files/vector.service.j2 @@ -0,0 +1,20 @@ +[Unit] +Description=Vector +Documentation=https://vector.dev +After=network-online.target +Requires=network-online.target + +[Service] +User=vector +Group=vector +ExecStartPre=/usr/bin/vector validate --config-yaml /etc/vector/vector.yaml +ExecStart=/usr/bin/vector --config-yaml /etc/vector/vector.yaml +ExecReload=/usr/bin/vector validate --config-yaml /etc/vector/vector.yaml +ExecReload=/bin/kill -HUP $MAINPID +Restart=always +RestartSec=3 +AmbientCapabilities=CAP_NET_BIND_SERVICE +EnvironmentFile=-/etc/default/vector + +[Install] +WantedBy=multi-user.target diff --git a/ansible/files/walg_helper_scripts/wal_change_ownership.sh b/ansible/files/walg_helper_scripts/wal_change_ownership.sh new file mode 100644 index 0000000..3f0112d --- /dev/null +++ b/ansible/files/walg_helper_scripts/wal_change_ownership.sh @@ -0,0 +1,42 @@ +#! /usr/bin/env bash + +set -euo pipefail + +filename=$1 + +if [[ -z "$filename" ]]; then + echo "Nothing supplied. Exiting." + exit 1 +fi + +full_path=/tmp/wal_fetch_dir/$filename + +num_paths=$(readlink -f "$full_path" | wc -l) + +# Checks if supplied filename string contains multiple paths +# For example, "correct/path /var/lib/injected/path /var/lib/etc" +if [[ "$num_paths" -gt 1 ]]; then + echo "Multiple paths supplied. Exiting." + exit 1 +fi + +base_dir=$(readlink -f "$full_path" | cut -d'/' -f2) + +# Checks if directory/ file to be manipulated +# is indeed within the /tmp directory +# For example, "/tmp/../var/lib/postgresql/..." +# will return "var" as the value for $base_dir +if [[ "$base_dir" != "tmp" ]]; then + echo "Attempt to manipulate a file not in /tmp. Exiting." + exit 1 +fi + +# Checks if change of ownership will be applied to a file +# If not, exit +if [[ ! -f $full_path ]]; then + echo "Either file does not exist or is a directory. Exiting." + exit 1 +fi + +# once valid, proceed to change ownership +chown postgres:postgres "$full_path" diff --git a/ansible/files/walg_helper_scripts/wal_fetch.sh b/ansible/files/walg_helper_scripts/wal_fetch.sh new file mode 100644 index 0000000..33448ac --- /dev/null +++ b/ansible/files/walg_helper_scripts/wal_fetch.sh @@ -0,0 +1,12 @@ +#! /usr/bin/env bash + +set -euo pipefail + +# Fetch the WAL file and temporarily store them in /tmp +sudo -u wal-g wal-g wal-fetch "$1" /tmp/wal_fetch_dir/"$1" --config /etc/wal-g/config.json + +# Ensure WAL file is owned by the postgres Linux user +sudo -u root /root/wal_change_ownership.sh "$1" + +# Move file to its final destination +mv /tmp/wal_fetch_dir/"$1" /var/lib/postgresql/data/"$2" diff --git a/ansible/playbook-docker.yml b/ansible/playbook-docker.yml index f4a937d..c5fc60e 100644 --- a/ansible/playbook-docker.yml +++ b/ansible/playbook-docker.yml @@ -1,26 +1,5 @@ -- name: Preparing Docker container - hosts: localhost - tasks: - - name: Pull Postgres Image - docker_container: - name: "tealbase-postgres-build" - image: "postgres:13.3" - env: - LANGUAGE: "en_US.UTF-8" - LANG: "en_US.UTF-8" - LC_ALL: "en_US.UTF-8" - state: started - memory: 4G - memory_swap: 6G - command: tail -f /dev/null - - name: Add Postgres Image to Ansible Hosts - add_host: - name: "tealbase-postgres-build" - ansible_connection: docker - ansible_ssh_user: root - - name: Build tealbase Postgres - hosts: "tealbase-postgres-build" + hosts: localhost gather_facts: false vars_files: @@ -33,20 +12,8 @@ - name: Install Postgres extensions import_tasks: tasks/setup-extensions.yml + - name: Finalize docker + import_tasks: tasks/docker/finalize.yml + - name: Cleanup container import_tasks: tasks/docker/cleanup.yml - -- name: Create tealbase/postgres docker image - hosts: localhost - tasks: - - name: Commit Docker image - command: docker commit --change='CMD ["postgres"]' "tealbase-postgres-build" "tealbase/postgres" - -- name: Clean Up Postgres Image - hosts: localhost - tasks: - - name: Remove Running Base Image - docker_container: - name: tealbase-postgres-build - state: absent - force_kill: yes \ No newline at end of file diff --git a/ansible/playbook.yml b/ansible/playbook.yml index 5362de9..77f4cbb 100644 --- a/ansible/playbook.yml +++ b/ansible/playbook.yml @@ -9,41 +9,79 @@ vars: sql_files: + - { + source: "pgbouncer_config/pgbouncer_auth_schema.sql", + dest: "00-schema.sql", + } - { source: "stat_extension.sql", dest: "01-extension.sql" } environment: PATH: /usr/lib/postgresql/bin:{{ ansible_env.PATH }} tasks: + - set_fact: + tealbase_internal: true + tags: + - install-tealbase-internal + + - set_fact: + parallel_jobs: 16 + - name: Install Postgres from source import_tasks: tasks/setup-postgres.yml - name: Install Postgres extensions import_tasks: tasks/setup-extensions.yml - - name: Start Postgres Database - systemd: - name: postgresql - state: started + - name: Install PgBouncer + import_tasks: tasks/setup-pgbouncer.yml + tags: + - install-pgbouncer + - install-tealbase-internal - name: Install WAL-G import_tasks: tasks/setup-wal-g.yml - - name: Install PgBouncer - import_tasks: tasks/setup-pgbouncer.yml + - name: Install Gotrue + import_tasks: tasks/setup-gotrue.yml tags: - - install-pgbouncer + - install-gotrue + - install-tealbase-internal - name: Install PostgREST import_tasks: tasks/setup-postgrest.yml tags: - install-postgrest - + - install-tealbase-internal + + - name: Install Kong + import_tasks: tasks/setup-kong.yml + tags: + - install-tealbase-internal + + - name: Install nginx + import_tasks: tasks/setup-nginx.yml + tags: + - install-tealbase-internal + - name: Install tealbase specific content import_tasks: tasks/setup-tealbase-internal.yml tags: - install-tealbase-internal + - name: Start Postgres Database + systemd: + name: postgresql + state: started + when: not ebssurrogate_mode + + - name: Start Postgres Database without Systemd + become: yes + become_user: postgres + shell: + cmd: /usr/bin/pg_ctl -D /var/lib/postgresql/data start + when: ebssurrogate_mode + - name: Adjust APT update intervals copy: src: files/apt_periodic @@ -68,44 +106,15 @@ state: absent loop: "{{ sql_files }}" - - name: UFW - Allow SSH connections - ufw: - rule: allow - name: OpenSSH - - - name: UFW - Allow connections to postgreSQL (5432) - ufw: - rule: allow - port: "5432" - - - name: UFW - Allow connections to postgreSQL (6543) - ufw: - rule: allow - port: "6543" + - name: First boot optimizations + import_tasks: tasks/internal/optimizations.yml tags: - - install-pgbouncer - - - name: UFW - Deny all other incoming traffic by default - ufw: - state: enabled - policy: deny - direction: incoming - - - name: Setup logrotate for postgres logs - copy: - src: files/logrotate-postgres - dest: /etc/logrotate.d/postgres - - - name: Configure logrotation to run every hour - shell: - cmd: mv /etc/cron.daily/logrotate /etc/cron.hourly/ - become: yes + - install-tealbase-internal - - name: restart crond - systemd: - state: restarted - name: cron - become: yes + - name: Finalize AMI + import_tasks: tasks/finalize-ami.yml + tags: + - install-tealbase-internal - name: Enhance fail2ban import_tasks: tasks/setup-fail2ban.yml @@ -119,3 +128,39 @@ - ec2-instance-connect tags: - aws-only + + # Install this at the end to prevent it from kicking in during the apt process, causing conflicts + - name: Install security tools + become: yes + apt: + pkg: + - unattended-upgrades + update_cache: yes + cache_valid_time: 3600 + + # Put PG binaries in a directory under $PATH + - name: Find all files in /usr/lib/postgresql/bin + find: + paths: /usr/lib/postgresql/bin + register: postgresql_bin + + - name: Clean out build dependencies + import_tasks: tasks/clean-build-dependencies.yml + + - name: Create symbolic links for Postgres binaries to /usr/bin/ + become: yes + shell: + cmd: "for fl in /usr/lib/postgresql/bin/* ; do ln -sf $fl /usr/bin/$(basename $fl) ; done" + + - name: Run migrations + import_tasks: tasks/setup-migrations.yml + + - name: Stop Postgres Database without Systemd + become: yes + become_user: postgres + shell: + cmd: /usr/bin/pg_ctl -D /var/lib/postgresql/data stop + when: ebssurrogate_mode + + - name: Run unit tests + import_tasks: tasks/test-image.yml diff --git a/ansible/tasks/clean-build-dependencies.yml b/ansible/tasks/clean-build-dependencies.yml new file mode 100644 index 0000000..5bbcc80 --- /dev/null +++ b/ansible/tasks/clean-build-dependencies.yml @@ -0,0 +1,17 @@ +- name: Remove build dependencies + apt: + pkg: + - bison + - build-essential + - clang-11 + - cmake + - cpp + - flex + - g++ + - g++-10 + - g++-9 + - gcc-10 + - ninja-build + - python2 + state: absent + autoremove: yes diff --git a/ansible/tasks/docker/cleanup.yml b/ansible/tasks/docker/cleanup.yml index 2ccc2af..aca7cc8 100644 --- a/ansible/tasks/docker/cleanup.yml +++ b/ansible/tasks/docker/cleanup.yml @@ -1,5 +1,3 @@ - - - name: Cleanup - remove build dependencies apt: pkg: @@ -7,7 +5,7 @@ - rsync - ca-certificates - build-essential - - postgresql-server-dev-13 + - postgresql-server-dev-{{ postgresql_major }} - curl - git-core - gpp @@ -18,50 +16,3 @@ - ninja-build - python state: absent - -- name: Cleanup - apt update and apt upgrade - apt: update_cache=yes upgrade=yes - # SEE http://archive.vn/DKJjs#parameter-upgrade - -- name: Cleanup - remove dependencies that are no longer required - apt: - autoremove: yes - -- name: Cleanup - remove useless packages from the cache - apt: - autoclean: yes - -- name: Cleanup - reinstall headless jdk - apt: - pkg: - - default-jdk-headless - update_cache: yes - install_recommends: no - -- name: Cleanup - find all files in /tmp - find: - paths: /tmp - file_type: any - register: tmp_items_to_delete - -- name: Cleanup - delete all items in /tmp - file: - path: "/tmp/{{ item.path | basename }}" - state: absent - force: yes - with_items: "{{ tmp_items_to_delete.files }}" - -- name: Cleanup - find all files in /var/lib/apt/lists/* - find: - paths: /var/lib/apt/lists - file_type: any - register: var_items_to_delete - -- name: Cleanup - delete all items in /tmp - file: - path: "/var/lib/apt/lists/{{ item.path | basename }}" - state: absent - force: yes - with_items: "{{ var_items_to_delete.files }}" - - \ No newline at end of file diff --git a/ansible/tasks/docker/finalize.yml b/ansible/tasks/docker/finalize.yml new file mode 100644 index 0000000..017a1d7 --- /dev/null +++ b/ansible/tasks/docker/finalize.yml @@ -0,0 +1,24 @@ +- name: PG logging conf + template: + src: files/postgresql_config/postgresql-stdout-log.conf + dest: /etc/postgresql/logging.conf + group: postgres + +- name: import pgsodium_getkey_urandom.sh + template: + src: files/pgsodium_getkey_urandom.sh.j2 + dest: "{{ pg_bindir }}/pgsodium_getkey.sh" + owner: postgres + group: postgres + mode: 0700 + +- name: allow pgsodium key generation + become: yes + file: + path: '{{ item }}' + recurse: yes + owner: postgres + group: postgres + mode: '0774' + with_items: + - /etc/postgresql-custom diff --git a/ansible/tasks/docker/setup.yml b/ansible/tasks/docker/setup.yml index 70a54d2..9d0062e 100644 --- a/ansible/tasks/docker/setup.yml +++ b/ansible/tasks/docker/setup.yml @@ -1,5 +1,6 @@ - name: Install Python3 - raw: apt update && apt upgrade -y && apt install python3 -y + raw: export DEBIAN_FRONTEND=noninteractive ; sh -c "apt update && apt upgrade -y && apt install python3 -y" + timeout: 300 - name: Setup - install common dependencies apt: @@ -7,7 +8,7 @@ - rsync - ca-certificates - build-essential - - postgresql-server-dev-13 + - postgresql-server-dev-{{ postgresql_major }} - curl - git-core - gpp @@ -17,6 +18,9 @@ - cmake - ninja-build - python + - systemtap-sdt-dev + - ccache + - clang update_cache: yes install_recommends: no @@ -27,10 +31,26 @@ - set_fact: platform: "{{ platform_output.stdout }}" +- name: determining number of processors + shell: nproc + register: nproc_output +- set_fact: + parallel_jobs: "{{ nproc_output.stdout }}" + - name: Setup - import postgresql.conf - synchronize: + template: src: files/postgresql_config/postgresql.conf.j2 - dest: etc/postgresql/postgresql.conf + dest: /etc/postgresql/postgresql.conf + +- name: Setup - import postgresql.conf + synchronize: + src: files/postgresql_config/pg_hba.conf.j2 + dest: /etc/postgresql/pg_hba.conf + +- name: Setup - import postgresql.conf + synchronize: + src: files/postgresql_config/pg_ident.conf.j2 + dest: /etc/postgresql/pg_ident.conf - set_fact: regex_string: "#unix_socket_directories = '/tmp'" @@ -48,22 +68,3 @@ path: /etc/postgresql/postgresql.conf regexp: '{{ regex_string }}' replace: unix_socket_directories = '/var/run/postgresql' - -- name: Setup - modify hba_file directory - become: yes - replace: - path: /etc/postgresql/postgresql.conf - regexp: hba_file = '/etc/postgresql/pg_hba.conf' - replace: hba_file = '/var/lib/postgresql/data/pg_hba.conf' - -- name: Setup - modify ident_file directory - become: yes - replace: - path: /etc/postgresql/postgresql.conf - regexp: ident_file = '/etc/postgresql/pg_ident.conf' - replace: ident_file = '/var/lib/postgresql/data/pg_ident.conf' - -- name: Setup - add init script to /docker-entrypoint-initdb.d - synchronize: - src: files/docker_mnt/init.sh - dest: /docker-entrypoint-initdb.d/init.sh \ No newline at end of file diff --git a/ansible/tasks/finalize-ami.yml b/ansible/tasks/finalize-ami.yml new file mode 100644 index 0000000..269cde1 --- /dev/null +++ b/ansible/tasks/finalize-ami.yml @@ -0,0 +1,75 @@ +- name: PG logging conf + template: + src: files/postgresql_config/postgresql-csvlog.conf + dest: /etc/postgresql/logging.conf + group: postgres + +- name: UFW - Allow SSH connections + ufw: + rule: allow + name: OpenSSH + +- name: UFW - Allow connections to postgreSQL (5432) + ufw: + rule: allow + port: "5432" + +- name: UFW - Allow connections to postgreSQL (6543) + ufw: + rule: allow + port: "6543" + tags: + - install-pgbouncer + +- name: UFW - Allow connections to http (80) + ufw: + rule: allow + port: http + tags: + - install-tealbase-internal + +- name: UFW - Allow connections to https (443) + ufw: + rule: allow + port: https + tags: + - install-tealbase-internal + +- name: UFW - Deny all other incoming traffic by default + ufw: + state: enabled + policy: deny + direction: incoming + +- name: Move logrotate files to /etc/logrotate.d/ + copy: + src: "files/logrotate_config/{{ item.file }}" + dest: "/etc/logrotate.d/{{ item.file }}" + mode: "0700" + owner: root + loop: + - { file: "logrotate-postgres-csv.conf" } + - { file: "logrotate-postgres.conf" } + - { file: "logrotate-walg.conf" } + - { file: "logrotate-postgres-auth.conf" } + +- name: Disable cron access + copy: + src: files/cron.deny + dest: /etc/cron.deny + +- name: Configure logrotation to run every hour + shell: + cmd: | + cp /usr/lib/systemd/system/logrotate.timer /etc/systemd/system/logrotate.timer + sed -i -e 's;daily;*:0/10;' /etc/systemd/system/logrotate.timer + systemctl reenable logrotate.timer + become: yes + +- name: import pgsodium_getkey script + template: + src: files/pgsodium_getkey_readonly.sh.j2 + dest: "{{ pg_bindir }}/pgsodium_getkey.sh" + owner: postgres + group: postgres + mode: 0700 diff --git a/ansible/tasks/internal/admin-api.yml b/ansible/tasks/internal/admin-api.yml new file mode 100644 index 0000000..05c2f06 --- /dev/null +++ b/ansible/tasks/internal/admin-api.yml @@ -0,0 +1,76 @@ +- name: adminapi - system user + user: + name: adminapi + groups: root,admin,kong,pgbouncer,postgres,postgrest,systemd-journal,wal-g + append: yes + +- name: Move shell scripts to /root dir + copy: + src: "files/admin_api_scripts/{{ item.file }}" + dest: "/root/{{ item.file }}" + mode: "0700" + owner: root + loop: + - { file: "grow_fs.sh" } + - { file: "manage_readonly_mode.sh" } + - { file: "pg_upgrade_check.sh" } + - { file: "pg_upgrade_complete.sh" } + - { file: "pg_upgrade_initiate.sh" } + - { file: "pg_upgrade_prepare.sh" } + - { file: "pg_upgrade_pgsodium_getkey.sh" } + - { file: "pg_egress_collect.pl" } + +- name: give adminapi user permissions + copy: + src: files/adminapi.sudoers.conf + dest: /etc/sudoers.d/adminapi + +- name: perms for adminapi + shell: | + chmod g+w /etc + +- name: Setting arch (x86) + set_fact: + arch: "x86" + when: platform == "amd64" + +- name: Setting arch (arm) + set_fact: + arch: "arm64" + when: platform == "arm64" + +- name: Download adminapi archive + get_url: + url: "https://tealbase-public-artifacts-bucket.s3.amazonaws.com/tealbase-admin-api/v{{ adminapi_release }}/tealbase-admin-api_{{ adminapi_release }}_linux_{{ arch }}.tar.gz" + dest: "/tmp/adminapi.tar.gz" + timeout: 90 + +- name: adminapi - unpack archive in /opt + unarchive: + remote_src: yes + src: /tmp/adminapi.tar.gz + dest: /opt + owner: adminapi + +- name: adminapi - config dir + file: + path: /etc/adminapi + owner: adminapi + state: directory + +- name: adminapi - create service file + template: + src: files/adminapi.service.j2 + dest: /etc/systemd/system/adminapi.service + +- name: UFW - Allow connections to adminapi ports + ufw: + rule: allow + port: "8085" + +- name: adminapi - reload systemd + systemd: + daemon_reload: yes + +- name: adminapi - grant extra priviliges to user + shell: chmod 775 /etc && chmod 775 /etc/kong diff --git a/ansible/tasks/internal/admin-mgr.yml b/ansible/tasks/internal/admin-mgr.yml new file mode 100644 index 0000000..79b848e --- /dev/null +++ b/ansible/tasks/internal/admin-mgr.yml @@ -0,0 +1,22 @@ +- name: Setting arch (x86) + set_fact: + arch: "x86" + when: platform == "amd64" + +- name: Setting arch (arm) + set_fact: + arch: "arm64" + when: platform == "arm64" + +- name: Download admin-mgr archive + get_url: + url: "https://tealbase-public-artifacts-bucket.s3.amazonaws.com/admin-mgr/v{{ adminmgr_release }}/admin-mgr_{{ adminmgr_release }}_linux_{{ arch }}.tar.gz" + dest: "/tmp/admin-mgr.tar.gz" + timeout: 90 + +- name: admin-mgr - unpack archive in /usr/bin/ + unarchive: + remote_src: yes + src: /tmp/admin-mgr.tar.gz + dest: /usr/bin/ + owner: root diff --git a/ansible/tasks/internal/node-exporter.yml b/ansible/tasks/internal/node-exporter.yml deleted file mode 100644 index 355dcdb..0000000 --- a/ansible/tasks/internal/node-exporter.yml +++ /dev/null @@ -1,46 +0,0 @@ -- name: UFW - Allow connections to node exporter ports - ufw: - rule: allow - port: "9100" - -- name: Node Exporter - download binary archive - get_url: - url: "https://github.com/prometheus/node_exporter/releases/download/v{{ node_exporter_release }}/node_exporter-{{ node_exporter_release }}.linux-{{ platform }}.tar.gz" - dest: /tmp/node_exporter.tar.gz - checksum: "{{ node_exporter_release_checksum[platform] }}" - -- name: create directories - file: - state: directory - owner: root - path: "/opt/node_exporter" - -- name: node_exporter - unpack archives in /opt - unarchive: - remote_src: yes - src: "/tmp/node_exporter.tar.gz" - dest: /opt/node_exporter - owner: root - extra_opts: [--strip-components=1] - -- name: node_exporter - create service files - template: - src: files/node_exporter.service.j2 - dest: /etc/systemd/system/node_exporter.service - vars: - collectors: - - cpu - - diskstats - - filesystem - - filesystem.ignored-mount-points='^/.+' - - loadavg - - meminfo - - netdev - - netdev.device-exclude='lo' - -- name: node_exporter - reload systemd - systemd: - daemon_reload: yes - enabled: no - state: stopped - name: node_exporter diff --git a/ansible/tasks/internal/optimizations.yml b/ansible/tasks/internal/optimizations.yml index 8ee6551..895accc 100644 --- a/ansible/tasks/internal/optimizations.yml +++ b/ansible/tasks/internal/optimizations.yml @@ -2,6 +2,7 @@ community.general.snap: name: amazon-ssm-agent state: absent + failed_when: not ebssurrogate_mode - name: ensure services are stopped and disabled for first boot systemd: @@ -13,3 +14,34 @@ - postgresql - pgbouncer - fail2ban + - motd-news + - vector + failed_when: not ebssurrogate_mode + +- name: Remove snapd + apt: + state: absent + pkg: + - snapd + failed_when: not ebssurrogate_mode + +- name: ensure services are stopped and disabled for first boot + systemd: + enabled: no + name: '{{ item }}' + state: stopped + masked: yes + with_items: + - lvm2-monitor + failed_when: not ebssurrogate_mode + +- name: disable man-db + become: yes + file: + state: absent + path: "/etc/cron.daily/{{ item }}" + with_items: + - man-db + - popularity-contest + - ubuntu-advantage-tools + failed_when: not ebssurrogate_mode diff --git a/ansible/tasks/internal/pg_egress_collect.yml b/ansible/tasks/internal/pg_egress_collect.yml new file mode 100644 index 0000000..be9fefe --- /dev/null +++ b/ansible/tasks/internal/pg_egress_collect.yml @@ -0,0 +1,15 @@ +- name: pg_egress_collect - install tcpdump and perl async lib + apt: + pkg: + - tcpdump + - libio-async-perl + +- name: pg_egress_collect - create service file + template: + src: files/pg_egress_collect.service.j2 + dest: /etc/systemd/system/pg_egress_collect.service + +- name: pg_egress_collect - reload systemd + systemd: + daemon_reload: yes + diff --git a/ansible/tasks/internal/postgres-exporter.yml b/ansible/tasks/internal/postgres-exporter.yml index 4fecaea..b4c1aed 100644 --- a/ansible/tasks/internal/postgres-exporter.yml +++ b/ansible/tasks/internal/postgres-exporter.yml @@ -19,6 +19,7 @@ url: "https://github.com/prometheus-community/postgres_exporter/releases/download/v{{ postgres_exporter_release }}/postgres_exporter-{{ postgres_exporter_release }}.linux-{{ platform }}.tar.gz" dest: /tmp/postgres_exporter.tar.gz checksum: "{{ postgres_exporter_release_checksum[platform] }}" + timeout: 60 - name: expand postgres exporter unarchive: @@ -33,11 +34,6 @@ src: files/postgres_exporter.service.j2 dest: /etc/systemd/system/postgres_exporter.service -- name: exporter copy over queries - template: - src: files/queries.yml.j2 - dest: /opt/postgres_exporter/queries.yml - - name: exporter ensure service is present systemd: enabled: no diff --git a/ansible/tasks/internal/setup-ansible-pull.yml b/ansible/tasks/internal/setup-ansible-pull.yml new file mode 100644 index 0000000..ad1aa78 --- /dev/null +++ b/ansible/tasks/internal/setup-ansible-pull.yml @@ -0,0 +1,29 @@ +- name: install ansible + shell: + cmd: | + apt install -y software-properties-common + add-apt-repository --yes --update ppa:ansible/ansible + apt install -y ansible + sed -i -e 's/#callback_whitelist.*/callback_whitelist = profile_tasks/' /etc/ansible/ansible.cfg + +- name: ansible pull systemd units + copy: + src: files/{{ item }} + dest: /etc/systemd/system/{{ item }} + with_items: + - ansible-pull.service + - ansible-pull.timer + +- name: create facts dir + file: + path: /etc/ansible/facts.d + state: directory + +- name: ansible facts + copy: + src: files/tealbase_facts.ini + dest: /etc/ansible/facts.d/tealbase.fact + +- name: reload systemd + systemd: + daemon_reload: yes diff --git a/ansible/tasks/internal/setup-nftables.yml b/ansible/tasks/internal/setup-nftables.yml new file mode 100644 index 0000000..e0f0c72 --- /dev/null +++ b/ansible/tasks/internal/setup-nftables.yml @@ -0,0 +1,34 @@ +- name: nftables overrides + file: + state: directory + path: /etc/nftables + owner: adminapi + +- name: nftables empty config + file: + state: touch + path: /etc/nftables/tealbase_managed.conf + owner: adminapi + +- name: include managed config + shell: | + cat >> "/etc/nftables.conf" << EOF + table inet tealbase_managed { } + include "/etc/nftables/tealbase_managed.conf"; + + EOF + +- name: ufw overrides dir + file: + state: directory + path: /etc/systemd/system/ufw.service.d + owner: root + +- name: Custom systemd overrides + copy: + src: files/ufw.service.conf + dest: /etc/systemd/system/ufw.service.d/overrides.conf + +- name: reload systemd + systemd: + daemon_reload: yes diff --git a/ansible/tasks/internal/supautils.yml b/ansible/tasks/internal/supautils.yml index 45419ad..d4b266b 100644 --- a/ansible/tasks/internal/supautils.yml +++ b/ansible/tasks/internal/supautils.yml @@ -1,9 +1,18 @@ # supautils +- name: supautils - download & install dependencies + apt: + pkg: + - build-essential + - clang-11 + update_cache: yes + cache_valid_time: 3600 + - name: supautils - download latest release get_url: url: "https://github.com/tealbase/supautils/archive/refs/tags/v{{ supautils_release }}.tar.gz" dest: /tmp/supautils-{{ supautils_release }}.tar.gz checksum: "{{ supautils_release_checksum }}" + timeout: 60 - name: supautils - unpack archive unarchive: @@ -23,23 +32,46 @@ target: install become: yes -- name: supautils - set supautils.reserved_roles +- name: supautils - add supautils to session_preload_libraries become: yes - lineinfile: + replace: path: /etc/postgresql/postgresql.conf - state: present - line: supautils.reserved_roles = 'tealbase_admin, tealbase_auth_admin, tealbase_storage_admin, dashboard_user, pgbouncer, service_role, authenticator, authenticated, anon' + regexp: "#session_preload_libraries = ''" + replace: session_preload_libraries = 'supautils' -- name: supautils - set supautils.reserved_memberships +- name: supautils - write custom supautils.conf + template: + src: "files/postgresql_config/supautils.conf.j2" + dest: /etc/postgresql-custom/supautils.conf + mode: 0664 + owner: postgres + group: postgres + +- name: supautils - copy extension custom scripts + copy: + src: files/postgresql_extension_custom_scripts/ + dest: /etc/postgresql-custom/extension-custom-scripts become: yes - lineinfile: - path: /etc/postgresql/postgresql.conf - state: present - line: supautils.reserved_memberships = 'pg_read_server_files, pg_write_server_files, pg_execute_server_program' -- name: supautils - add supautils to shared_preload_libraries +- name: supautils - chown extension custom scripts + file: + mode: 0775 + owner: postgres + group: postgres + path: /etc/postgresql-custom/extension-custom-scripts + recurse: yes + become: yes + +- name: supautils - include /etc/postgresql-custom/supautils.conf in postgresql.conf become: yes replace: path: /etc/postgresql/postgresql.conf - regexp: shared_preload_libraries = ' - replace: shared_preload_libraries = 'supautils, + regexp: "#include = '/etc/postgresql-custom/supautils.conf'" + replace: "include = '/etc/postgresql-custom/supautils.conf'" + +- name: supautils - remove build dependencies + apt: + pkg: + - build-essential + - clang-11 + state: absent diff --git a/ansible/tasks/postgres-extensions/01-postgis.yml b/ansible/tasks/postgres-extensions/01-postgis.yml index 9aaad6d..1381740 100644 --- a/ansible/tasks/postgres-extensions/01-postgis.yml +++ b/ansible/tasks/postgres-extensions/01-postgis.yml @@ -12,15 +12,28 @@ - libmpfr-dev - libgmp-dev - cmake + - libprotobuf-c-dev + - protobuf-c-compiler update_cache: yes cache_valid_time: 3600 install_recommends: no +- name: postgis - ensure dependencies do not get autoremoved + shell: | + set -e + apt-mark manual libgeos* libproj* libgdal* libjson-c* libxml2* libboost* libcgal* libmpfr* libgmp* + apt-mark auto libgeos*-dev libproj*-dev libgdal*-dev libjson-c*-dev libxml2*-dev libboost*-dev libcgal*-dev libmpfr*-dev libgmp*-dev + + become: yes + args: + executable: /bin/bash + - name: postgis - download SFCGAL dependency get_url: url: "https://gitlab.com/Oslandia/SFCGAL/-/archive/v{{ sfcgal_release }}/SFCGAL-v{{ sfcgal_release }}.tar.gz" dest: /tmp/SFCGAL-v{{ sfcgal_release }}.tar.gz checksum: "{{ sfcgal_release_checksum }}" + timeout: 60 - name: postgis - unpack SFCGAL unarchive: @@ -38,6 +51,7 @@ - name: postgis - build SFCGAL make: chdir: /tmp/SFCGAL-v{{ sfcgal_release }} + jobs: "{{ parallel_jobs | default(omit) }}" become: yes - name: postgis - install SFCGAL @@ -47,10 +61,8 @@ become: yes - name: postgis - download latest release - get_url: - url: "https://download.osgeo.org/postgis/source/postgis-{{ postgis_release }}.tar.gz" - dest: /tmp/postgis-{{ postgis_release }}.tar.gz - checksum: "{{ postgis_release_checksum }}" + shell: + cmd: "curl -sf -L https://tealbase-public-artifacts-bucket.s3.amazonaws.com/postgis-{{ postgis_release }}.tar.gz -o /tmp/postgis-{{ postgis_release }}.tar.gz" - name: postgis - unpack archive unarchive: @@ -61,17 +73,28 @@ - name: postgis - configure shell: - cmd: "./configure --without-protobuf --with-sfcgal" + cmd: "./configure --with-sfcgal" chdir: /tmp/postgis-{{ postgis_release }} become: yes - name: postgis - build make: chdir: /tmp/postgis-{{ postgis_release }} + jobs: "{{ parallel_jobs | default(omit) }}" become: yes - name: postgis - install make: chdir: /tmp/postgis-{{ postgis_release }} target: install - become: yes \ No newline at end of file + become: yes + +- name: postgis - SFCGAL cleanup + file: + state: absent + path: /tmp/SFCGAL-v{{ sfcgal_release }} + +- name: postgis - cleanup + file: + state: absent + path: /tmp/postgis-{{ postgis_release }} diff --git a/ansible/tasks/postgres-extensions/02-pgrouting.yml b/ansible/tasks/postgres-extensions/02-pgrouting.yml index 9020d00..328d3e8 100644 --- a/ansible/tasks/postgres-extensions/02-pgrouting.yml +++ b/ansible/tasks/postgres-extensions/02-pgrouting.yml @@ -1,9 +1,18 @@ # pgRouting +- name: pgRouting - download & install dependencies + apt: + pkg: + - libboost-all-dev + update_cache: yes + cache_valid_time: 3600 + install_recommends: no + - name: pgRouting - download latest release get_url: url: "https://github.com/pgRouting/pgrouting/releases/download/v{{ pgrouting_release }}/pgrouting-{{ pgrouting_release }}.tar.gz" dest: /tmp/pgrouting-{{ pgrouting_release }}.tar.gz checksum: "{{ pgrouting_release_checksum }}" + timeout: 60 - name: pgRouting - unpack archive unarchive: @@ -27,10 +36,16 @@ - name: pgRouting - build make: chdir: /tmp/pgrouting-{{ pgrouting_release }}/build + jobs: "{{ parallel_jobs | default(omit) }}" become: yes - name: pgRouting - install make: chdir: /tmp/pgrouting-{{ pgrouting_release }}/build target: install - become: yes \ No newline at end of file + become: yes + +- name: pgRouting - cleanup + file: + state: absent + path: /tmp/pgrouting-{{ pgrouting_release }} diff --git a/ansible/tasks/postgres-extensions/03-pgtap.yml b/ansible/tasks/postgres-extensions/03-pgtap.yml index 6dc11f0..9b818b9 100644 --- a/ansible/tasks/postgres-extensions/03-pgtap.yml +++ b/ansible/tasks/postgres-extensions/03-pgtap.yml @@ -4,6 +4,7 @@ url: "https://github.com/theory/pgtap/archive/v{{ pgtap_release }}.tar.gz" dest: /tmp/pgtap-{{ pgtap_release }}.tar.gz checksum: "{{ pgtap_release_checksum }}" + timeout: 60 - name: pgTAP - unpack archive unarchive: @@ -16,4 +17,9 @@ make: chdir: /tmp/pgtap-{{ pgtap_release }} target: install - become: yes \ No newline at end of file + become: yes + +- name: pgTAP - cleanup + file: + state: absent + path: /tmp/pgtap-{{ pgtap_release }} diff --git a/ansible/tasks/postgres-extensions/04-pg_cron.yml b/ansible/tasks/postgres-extensions/04-pg_cron.yml index 5bdf294..787fe1d 100644 --- a/ansible/tasks/postgres-extensions/04-pg_cron.yml +++ b/ansible/tasks/postgres-extensions/04-pg_cron.yml @@ -4,6 +4,7 @@ url: "https://github.com/citusdata/pg_cron/archive/refs/tags/v{{ pg_cron_release }}.tar.gz" dest: /tmp/pg_cron-{{ pg_cron_release }}.tar.gz checksum: "{{ pg_cron_release_checksum }}" + timeout: 60 - name: pg_cron - unpack archive unarchive: @@ -28,4 +29,9 @@ lineinfile: path: /etc/postgresql/postgresql.conf state: present - line: cron.database_name = 'postgres' \ No newline at end of file + line: cron.database_name = 'postgres' + +- name: pg_cron - cleanup + file: + state: absent + path: /tmp/pg_cron-{{ pg_cron_release }} diff --git a/ansible/tasks/postgres-extensions/05-pgaudit.yml b/ansible/tasks/postgres-extensions/05-pgaudit.yml index 6d3b2bc..5f88c84 100644 --- a/ansible/tasks/postgres-extensions/05-pgaudit.yml +++ b/ansible/tasks/postgres-extensions/05-pgaudit.yml @@ -12,6 +12,7 @@ url: "https://github.com/pgaudit/pgaudit/archive/refs/tags/{{ pgaudit_release }}.tar.gz" dest: /tmp/pgaudit-{{ pgaudit_release }}.tar.gz checksum: "{{ pgaudit_release_checksum }}" + timeout: 60 - name: pgAudit - unpack archive unarchive: @@ -34,4 +35,9 @@ target: install params: USE_PGXS: 1 - become: yes \ No newline at end of file + become: yes + +- name: pgAudit - cleanup + file: + state: absent + path: /tmp/pgaudit-{{ pgaudit_release }} diff --git a/ansible/tasks/postgres-extensions/06-pgjwt.yml b/ansible/tasks/postgres-extensions/06-pgjwt.yml index b2734e1..4acc13c 100644 --- a/ansible/tasks/postgres-extensions/06-pgjwt.yml +++ b/ansible/tasks/postgres-extensions/06-pgjwt.yml @@ -9,4 +9,9 @@ make: chdir: /tmp/pgjwt target: install - become: yes \ No newline at end of file + become: yes + +- name: pgjwt - cleanup + file: + state: absent + path: /tmp/pgjwt diff --git a/ansible/tasks/postgres-extensions/07-pgsql-http.yml b/ansible/tasks/postgres-extensions/07-pgsql-http.yml index 6fd5cf9..73044d2 100644 --- a/ansible/tasks/postgres-extensions/07-pgsql-http.yml +++ b/ansible/tasks/postgres-extensions/07-pgsql-http.yml @@ -17,6 +17,7 @@ url: "https://github.com/pramsey/pgsql-http/archive/refs/tags/v{{ pgsql_http_release }}.tar.gz" dest: /tmp/pgsql_http-{{ pgsql_http_release }}.tar.gz checksum: "{{ pgsql_http_release_checksum }}" + timeout: 60 - name: pgsql-http - unpack archive unarchive: @@ -34,4 +35,9 @@ make: chdir: /tmp/pgsql-http-{{ pgsql_http_release }} target: install - become: yes \ No newline at end of file + become: yes + +- name: pgsql-http - cleanup + file: + state: absent + path: /tmp/pgsql-http-{{ pgsql_http_release }} diff --git a/ansible/tasks/postgres-extensions/08-plpgsql_check.yml b/ansible/tasks/postgres-extensions/08-plpgsql_check.yml index 16fb5aa..75bb041 100644 --- a/ansible/tasks/postgres-extensions/08-plpgsql_check.yml +++ b/ansible/tasks/postgres-extensions/08-plpgsql_check.yml @@ -11,6 +11,7 @@ url: "https://github.com/okbob/plpgsql_check/archive/refs/tags/v{{ plpgsql_check_release }}.tar.gz" dest: /tmp/plpgsql_check-{{ plpgsql_check_release }}.tar.gz checksum: "{{ plpgsql_check_release_checksum }}" + timeout: 60 - name: plpgsql_check - unpack archive unarchive: @@ -29,4 +30,9 @@ make: chdir: /tmp/plpgsql_check-{{ plpgsql_check_release }} target: install - become: yes \ No newline at end of file + become: yes + +- name: plpgsql_check - cleanup + file: + state: absent + path: /tmp/plpgsql_check-{{ plpgsql_check_release }} diff --git a/ansible/tasks/postgres-extensions/09-pg-safeupdate.yml b/ansible/tasks/postgres-extensions/09-pg-safeupdate.yml index e27cfd9..36ae41c 100644 --- a/ansible/tasks/postgres-extensions/09-pg-safeupdate.yml +++ b/ansible/tasks/postgres-extensions/09-pg-safeupdate.yml @@ -4,6 +4,7 @@ url: "https://github.com/eradman/pg-safeupdate/archive/refs/tags/{{ pg_safeupdate_release }}.tar.gz" dest: /tmp/pg_safeupdate-{{ pg_safeupdate_release }}.tar.gz checksum: "{{ pg_safeupdate_release_checksum }}" + timeout: 60 - name: pg-safeupdate - unpack archive unarchive: @@ -21,4 +22,9 @@ make: chdir: /tmp/pg-safeupdate-{{ pg_safeupdate_release }} target: install - become: yes \ No newline at end of file + become: yes + +- name: pg-safeupdate - cleanup + file: + state: absent + path: /tmp/pg-safeupdate-{{ pg_safeupdate_release }} diff --git a/ansible/tasks/postgres-extensions/10-timescaledb.yml b/ansible/tasks/postgres-extensions/10-timescaledb.yml index 4679899..cb4b842 100644 --- a/ansible/tasks/postgres-extensions/10-timescaledb.yml +++ b/ansible/tasks/postgres-extensions/10-timescaledb.yml @@ -15,7 +15,7 @@ - name: timescaledb - bootstrap shell: - cmd: "./bootstrap -DAPACHE_ONLY=1 -DREGRESS_CHECKS=OFF" + cmd: "./bootstrap -DAPACHE_ONLY=1" chdir: /tmp/timescaledb become: yes @@ -28,4 +28,9 @@ make: chdir: /tmp/timescaledb/build target: install - become: yes \ No newline at end of file + become: yes + +- name: timescaledb - cleanup + file: + state: absent + path: /tmp/timescaledb diff --git a/ansible/tasks/postgres-extensions/11-wal2json.yml b/ansible/tasks/postgres-extensions/11-wal2json.yml index 8fabbdd..7d6db24 100644 --- a/ansible/tasks/postgres-extensions/11-wal2json.yml +++ b/ansible/tasks/postgres-extensions/11-wal2json.yml @@ -1,24 +1,17 @@ # wal2json -- name: wal2json - download latest release - get_url: - url: "https://github.com/eulerto/wal2json/archive/refs/tags/wal2json_{{ wal2json_release }}.tar.gz" - dest: /tmp/wal2json-{{ wal2json_release }}.tar.gz - checksum: "{{ wal2json_release_checksum }}" - -- name: wal2json - unpack archive - unarchive: - remote_src: yes - src: /tmp/wal2json-{{ wal2json_release }}.tar.gz - dest: /tmp - become: yes - -- name: wal2json - build - make: - chdir: /tmp/wal2json-wal2json_{{ wal2json_release }} - become: yes +- name: wal2json - download by commit sha + git: + repo: https://github.com/eulerto/wal2json.git + dest: /tmp/wal2json + version: "{{ wal2json_commit_sha }}" - name: wal2json - install make: - chdir: /tmp/wal2json-wal2json_{{ wal2json_release }} + chdir: /tmp/wal2json target: install - become: yes \ No newline at end of file + become: yes + +- name: wal2json - cleanup + file: + state: absent + path: /tmp/wal2json diff --git a/ansible/tasks/postgres-extensions/12-pljava.yml b/ansible/tasks/postgres-extensions/12-pljava.yml index 3bea59c..309c7a2 100644 --- a/ansible/tasks/postgres-extensions/12-pljava.yml +++ b/ansible/tasks/postgres-extensions/12-pljava.yml @@ -5,32 +5,42 @@ - maven - default-jre - default-jdk + - libssl-dev update_cache: yes install_recommends: no +#TODO: revert to using main repo after PG15 support is merged: https://github.com/tada/pljava/pull/413 +# - name: pljava - download latest release +# get_url: +# url: https://github.com/tada/pljava/archive/V{{ pljava_release }}.tar.gz +# dest: /tmp/pljava-{{ pljava_release }}.tar.gz +# checksum: "{{ pljava_release_checksum }}" +# timeout: 60 + +# - name: pljava - unpack archive +# unarchive: +# remote_src: yes +# src: /tmp/pljava-{{ pljava_release }}.tar.gz +# dest: /tmp +# become: yes + - name: pljava - download latest release - get_url: - url: https://github.com/tada/pljava/archive/V{{ pljava_release }}.tar.gz - dest: /tmp/pljava-{{ pljava_release }}.tar.gz - checksum: "{{ pljava_release_checksum }}" - -- name: pljava - unpack archive - unarchive: - remote_src: yes - src: /tmp/pljava-{{ pljava_release }}.tar.gz - dest: /tmp become: yes + git: + repo: https://github.com/tealbase/pljava.git + dest: /tmp/pljava-{{ pljava_release }} + version: "{{ pljava_release }}" - name: pljava - build become: yes shell: - cmd: mvn clean install + cmd: mvn -T 1C clean install -Dmaven.test.skip -DskipTests -Dmaven.javadoc.skip=true chdir: /tmp/pljava-{{ pljava_release }} - name: pljava - install become: yes shell: - cmd: java -jar pljava-packaging/target/pljava-pg13.jar + cmd: java -jar pljava-packaging/target/pljava-pg{{ postgresql_major }}.jar chdir: /tmp/pljava-{{ pljava_release }} - name: pljava - remove build dependencies @@ -48,6 +58,12 @@ update_cache: yes install_recommends: no +- name: Hold jre package + dpkg_selections: + name: default-jre-headless + selection: hold + when: async_mode + - name: pljava - set pljava.libjvm_location become: yes lineinfile: @@ -59,4 +75,10 @@ become: yes file: path: ~/.m2 - state: absent \ No newline at end of file + state: absent + +- name: pljava - cleanup + become: yes + file: + state: absent + path: /tmp/pljava-{{ pljava_release }} diff --git a/ansible/tasks/postgres-extensions/13-plv8.yml b/ansible/tasks/postgres-extensions/13-plv8.yml index 1966649..0a84daf 100644 --- a/ansible/tasks/postgres-extensions/13-plv8.yml +++ b/ansible/tasks/postgres-extensions/13-plv8.yml @@ -26,7 +26,7 @@ git: repo: https://github.com/plv8/plv8.git dest: /tmp/plv8 - version: 3656177d384e3e02b74faa8e2931600f3690ab59 + version: "{{ plv8_commit_version }}" become: yes - name: Create a symbolic link @@ -35,14 +35,32 @@ dest: /lib/aarch64-linux-gnu/libc++.so state: link when: platform == "arm64" + ignore_errors: yes # not needed for docker build - name: plv8 - build make: chdir: /tmp/plv8 become: yes + when: not async_mode - name: plv8 - install make: chdir: /tmp/plv8 target: install - become: yes \ No newline at end of file + become: yes + when: not async_mode + +- name: plv8 - cleanup + file: + state: absent + path: /tmp/plv8 + when: not async_mode + +- name: plv8 - build + make: + chdir: /tmp/plv8 + become: yes + async: 2000 + poll: 0 + register: plv8_build + when: async_mode diff --git a/ansible/tasks/postgres-extensions/14-pg_plan_filter.yml b/ansible/tasks/postgres-extensions/14-pg_plan_filter.yml new file mode 100644 index 0000000..a776329 --- /dev/null +++ b/ansible/tasks/postgres-extensions/14-pg_plan_filter.yml @@ -0,0 +1,23 @@ +# pg_plan_filter +- name: pg_plan_filter - download latest release + git: + repo: https://github.com/pgexperts/pg_plan_filter.git + dest: /tmp/pg_plan_filter + version: "{{ pg_plan_filter_commit_version }}" + become: yes + +- name: pg_plan_filter - build + make: + chdir: /tmp/pg_plan_filter + become: yes + +- name: pg_plan_filter - install + make: + chdir: /tmp/pg_plan_filter + target: install + become: yes + +- name: pg_plan_filter - cleanup + file: + state: absent + path: /tmp/pg_plan_filter diff --git a/ansible/tasks/postgres-extensions/15-pg_net.yml b/ansible/tasks/postgres-extensions/15-pg_net.yml new file mode 100644 index 0000000..3f4419e --- /dev/null +++ b/ansible/tasks/postgres-extensions/15-pg_net.yml @@ -0,0 +1,37 @@ +# pg_net +- name: pg_net - download & install dependencies + apt: + pkg: + - libcurl4-gnutls-dev + update_cache: yes + install_recommends: no + +- name: pg_net - download latest release + get_url: + url: "https://github.com/tealbase/pg_net/archive/refs/tags/v{{pg_net_release}}.tar.gz" + dest: /tmp/pg_net-{{ pg_net_release }}.tar.gz + checksum: "{{ pg_net_release_checksum }}" + timeout: 60 + +- name: pg_net - unpack archive + unarchive: + remote_src: yes + src: /tmp/pg_net-{{ pg_net_release }}.tar.gz + dest: /tmp + become: yes + +- name: pg_net - build + make: + chdir: /tmp/pg_net-{{ pg_net_release }} + become: yes + +- name: pg_net - install + make: + chdir: /tmp/pg_net-{{ pg_net_release }} + target: install + become: yes + +- name: pg_net - cleanup + file: + state: absent + path: /tmp/pg_net-{{ pg_net_release }} diff --git a/ansible/tasks/postgres-extensions/16-rum.yml b/ansible/tasks/postgres-extensions/16-rum.yml new file mode 100644 index 0000000..f8cca16 --- /dev/null +++ b/ansible/tasks/postgres-extensions/16-rum.yml @@ -0,0 +1,34 @@ +# rum +- name: rum - download latest release + get_url: + url: "https://github.com/postgrespro/rum/archive/refs/tags/{{rum_release}}.tar.gz" + dest: /tmp/rum-{{ rum_release }}.tar.gz + checksum: "{{ rum_release_checksum }}" + timeout: 60 + +- name: rum - unpack archive + unarchive: + remote_src: yes + src: /tmp/rum-{{ rum_release }}.tar.gz + dest: /tmp + become: yes + +- name: rum - build + make: + chdir: /tmp/rum-{{ rum_release }} + params: + USE_PGXS: 1 + become: yes + +- name: rum - install + make: + chdir: /tmp/rum-{{ rum_release }} + target: install + params: + USE_PGXS: 1 + become: yes + +- name: rum - cleanup + file: + state: absent + path: /tmp/rum-{{ rum_release }} diff --git a/ansible/tasks/postgres-extensions/17-pg_hashids.yml b/ansible/tasks/postgres-extensions/17-pg_hashids.yml new file mode 100644 index 0000000..4f751de --- /dev/null +++ b/ansible/tasks/postgres-extensions/17-pg_hashids.yml @@ -0,0 +1,22 @@ +# pg_hashids +- name: pg_hashids - download from master branch + git: + repo: https://github.com/iCyberon/pg_hashids.git + dest: /tmp/pg_hashids + version: master + +- name: pg_hashids - build + make: + chdir: /tmp/pg_hashids + become: yes + +- name: pg_hashids - install + make: + chdir: /tmp/pg_hashids + target: install + become: yes + +- name: pg_hashids - cleanup + file: + state: absent + path: /tmp/pg_hashids diff --git a/ansible/tasks/postgres-extensions/18-pgsodium.yml b/ansible/tasks/postgres-extensions/18-pgsodium.yml new file mode 100644 index 0000000..a808e11 --- /dev/null +++ b/ansible/tasks/postgres-extensions/18-pgsodium.yml @@ -0,0 +1,81 @@ +# libsodium and pgsodium +- name: determine postgres bin directory + shell: pg_config --bindir + register: pg_bindir_output +- set_fact: + pg_bindir: "{{ pg_bindir_output.stdout }}" + +- name: libsodium - download libsodium + get_url: + url: "https://download.libsodium.org/libsodium/releases/libsodium-{{ libsodium_release }}.tar.gz" + dest: /tmp/libsodium-{{ libsodium_release }}.tar.gz + checksum: "{{ libsodium_release_checksum }}" + timeout: 60 + +- name: libsodium - unpack archive + unarchive: + remote_src: yes + src: /tmp/libsodium-{{ libsodium_release }}.tar.gz + dest: /tmp + become: yes + +- name: libsodium - configure + shell: + cmd: ./configure + chdir: /tmp/libsodium-{{ libsodium_release }} + become: yes + +- name: libsodium - build + make: + chdir: /tmp/libsodium-{{ libsodium_release }} + become: yes + +- name: libsodium - install + make: + chdir: /tmp/libsodium-{{ libsodium_release }} + target: install + become: yes + +- name: pgsodium - download pgsodium + get_url: + url: "https://github.com/michelp/pgsodium/archive/refs/tags/v{{ pgsodium_release }}.tar.gz" + dest: /tmp/pgsodium-{{ pgsodium_release }}.tar.gz + checksum: "{{ pgsodium_release_checksum }}" + timeout: 60 + +- name: pgsodium - unpack archive + unarchive: + remote_src: yes + src: /tmp/pgsodium-{{ pgsodium_release }}.tar.gz + dest: /tmp + become: yes + +- name: pgsodium - build + make: + chdir: /tmp/pgsodium-{{ pgsodium_release }} + jobs: "{{ parallel_jobs | default(omit) }}" + become: yes + +- name: pgsodium - install + make: + chdir: /tmp/pgsodium-{{ pgsodium_release }} + target: install + become: yes + +- name: pgsodium - set pgsodium.getkey_script + become: yes + lineinfile: + path: /etc/postgresql/postgresql.conf + state: present + # script is expected to be placed by finalization tasks for different target platforms + line: pgsodium.getkey_script= '{{ pg_bindir }}/pgsodium_getkey.sh' + +- name: libsodium - cleanup + file: + state: absent + path: /tmp/libsodium-{{ libsodium_release }} + +- name: pgsodium - cleanup + file: + state: absent + path: /tmp/pgsodium-{{ pgsodium_release }} diff --git a/ansible/tasks/postgres-extensions/19-pg_graphql.yml b/ansible/tasks/postgres-extensions/19-pg_graphql.yml new file mode 100644 index 0000000..f72edfe --- /dev/null +++ b/ansible/tasks/postgres-extensions/19-pg_graphql.yml @@ -0,0 +1,3 @@ +- name: install pg_graphql + ansible.builtin.apt: + deb: "https://github.com/tealbase/pg_graphql/releases/download/{{ pg_graphql_release }}/pg_graphql-{{ pg_graphql_release }}-pg{{ postgresql_major }}-{{ platform }}-linux-gnu.deb" diff --git a/ansible/tasks/postgres-extensions/20-pg_stat_monitor.yml b/ansible/tasks/postgres-extensions/20-pg_stat_monitor.yml new file mode 100644 index 0000000..bffddef --- /dev/null +++ b/ansible/tasks/postgres-extensions/20-pg_stat_monitor.yml @@ -0,0 +1,23 @@ +# pg_stat_monitor +- name: pg_stat_monitor - download and install dependencies + git: + repo: https://github.com/percona/pg_stat_monitor.git + dest: /tmp/pg_stat_monitor + version: "{{ pg_stat_monitor_release }}" + become: yes + +- name: pg_stat_monitor build + make: + chdir: /tmp/pg_stat_monitor + params: USE_PGXS=1 + +- name: pg_stat_monitor install + make: + chdir: /tmp/pg_stat_monitor + target: install + params: USE_PGXS=1 + +- name: pg_stat_monitor cleanup + file: + state: absent + path: /tmp/pg_stat_monitor diff --git a/ansible/tasks/postgres-extensions/21-auto_explain.yml b/ansible/tasks/postgres-extensions/21-auto_explain.yml new file mode 100644 index 0000000..b6a16fa --- /dev/null +++ b/ansible/tasks/postgres-extensions/21-auto_explain.yml @@ -0,0 +1,7 @@ + +- name: auto_explain - set auto_explain.log_min_duration + become: yes + lineinfile: + path: /etc/postgresql/postgresql.conf + state: present + line: auto_explain.log_min_duration = 10s diff --git a/ansible/tasks/postgres-extensions/22-pg_jsonschema.yml b/ansible/tasks/postgres-extensions/22-pg_jsonschema.yml new file mode 100644 index 0000000..0d78442 --- /dev/null +++ b/ansible/tasks/postgres-extensions/22-pg_jsonschema.yml @@ -0,0 +1,3 @@ +- name: install pg_jsonschema + ansible.builtin.apt: + deb: "https://github.com/tealbase/pg_jsonschema/releases/download/{{ pg_jsonschema_release }}/pg_jsonschema-{{ pg_jsonschema_release }}-pg{{ postgresql_major }}-{{ platform }}-linux-gnu.deb" diff --git a/ansible/tasks/postgres-extensions/23-vault.yml b/ansible/tasks/postgres-extensions/23-vault.yml new file mode 100644 index 0000000..dbb2db4 --- /dev/null +++ b/ansible/tasks/postgres-extensions/23-vault.yml @@ -0,0 +1,31 @@ +# vault + +- name: vault - download vault + get_url: + url: "https://github.com/tealbase/vault/archive/refs/tags/v{{ vault_release }}.tar.gz" + dest: /tmp/vault-{{ vault_release }}.tar.gz + checksum: "{{ vault_release_checksum }}" + timeout: 60 + +- name: vault - unpack archive + unarchive: + remote_src: yes + src: /tmp/vault-{{ vault_release }}.tar.gz + dest: /tmp + become: yes + +- name: vault - build + make: + chdir: /tmp/vault-{{ vault_release }} + become: yes + +- name: vault - install + make: + chdir: /tmp/vault-{{ vault_release }} + target: install + become: yes + +- name: vault - cleanup + file: + state: absent + path: /tmp/vault-{{ vault_release }} diff --git a/ansible/tasks/postgres-extensions/24-pgroonga.yml b/ansible/tasks/postgres-extensions/24-pgroonga.yml new file mode 100644 index 0000000..37c7a28 --- /dev/null +++ b/ansible/tasks/postgres-extensions/24-pgroonga.yml @@ -0,0 +1,83 @@ +# groonga and pgroonga +- name: groonga - download & install dependencies + apt: + pkg: + - zlib1g-dev + - liblzo2-dev + - libmsgpack-dev + - libzmq3-dev + - libevent-dev + - libmecab-dev + - mecab-naist-jdic + update_cache: yes + install_recommends: no + +- name: groonga - download groonga + get_url: + url: "https://packages.groonga.org/source/groonga/groonga-{{ groonga_release }}.tar.gz" + dest: /tmp/groonga-{{ groonga_release }}.tar.gz + checksum: "{{ groonga_release_checksum }}" + timeout: 60 + +- name: groonga - unpack archive + unarchive: + remote_src: yes + src: /tmp/groonga-{{ groonga_release }}.tar.gz + dest: /tmp + become: yes + +- name: groonga - configure + shell: + cmd: ./configure + chdir: /tmp/groonga-{{ groonga_release }} + become: yes + +- name: groonga - build + make: + chdir: /tmp/groonga-{{ groonga_release }} + jobs: "{{ parallel_jobs | default(omit) }}" + become: yes + +- name: groonga - install + make: + chdir: /tmp/groonga-{{ groonga_release }} + target: install + become: yes + +- name: pgroonga - download pgroonga + get_url: + url: "https://packages.groonga.org/source/pgroonga/pgroonga-{{ pgroonga_release }}.tar.gz" + dest: /tmp/pgroonga-{{ pgroonga_release }}.tar.gz + checksum: "{{ pgroonga_release_checksum }}" + timeout: 60 + +- name: pgroonga - unpack archive + unarchive: + remote_src: yes + src: /tmp/pgroonga-{{ pgroonga_release }}.tar.gz + dest: /tmp + become: yes + +- name: pgroonga - build + make: + chdir: /tmp/pgroonga-{{ pgroonga_release }} + jobs: "{{ parallel_jobs | default(omit) }}" + become: yes + +- name: pgroonga - install + make: + chdir: /tmp/pgroonga-{{ pgroonga_release }} + target: install + become: yes + +- name: groonga - cleanup + file: + state: absent + path: /tmp/groonga-{{ groonga_release }} + become: yes + +- name: pgroonga - cleanup + file: + state: absent + path: /tmp/pgroonga-{{ pgroonga_release }} + become: yes diff --git a/ansible/tasks/postgres-extensions/25-wrappers.yml b/ansible/tasks/postgres-extensions/25-wrappers.yml new file mode 100644 index 0000000..375c553 --- /dev/null +++ b/ansible/tasks/postgres-extensions/25-wrappers.yml @@ -0,0 +1,3 @@ +- name: install wrappers + ansible.builtin.apt: + deb: "https://github.com/tealbase/wrappers/releases/download/{{ wrappers_release }}/wrappers-{{ wrappers_release }}-pg{{ postgresql_major }}-{{ platform }}-linux-gnu.deb" diff --git a/ansible/tasks/postgres-extensions/26-hypopg.yml b/ansible/tasks/postgres-extensions/26-hypopg.yml new file mode 100644 index 0000000..eeb29be --- /dev/null +++ b/ansible/tasks/postgres-extensions/26-hypopg.yml @@ -0,0 +1,17 @@ +# hypopg +- name: hypopg - download by commit sha + git: + repo: https://github.com/HypoPG/hypopg.git + dest: /tmp/hypopg + version: "{{ hypopg_commit_sha }}" + +- name: hypopg - install + make: + chdir: /tmp/hypopg + target: install + become: yes + +- name: hypopg - cleanup + file: + state: absent + path: /tmp/hypopg diff --git a/ansible/tasks/postgres-extensions/27-pg_repack.yml b/ansible/tasks/postgres-extensions/27-pg_repack.yml new file mode 100644 index 0000000..3a2aa96 --- /dev/null +++ b/ansible/tasks/postgres-extensions/27-pg_repack.yml @@ -0,0 +1,37 @@ +# pg_repack +- name: pg_repack - download & install dependencies + apt: + pkg: + - liblz4-dev + - libz-dev + - libzstd-dev + - libreadline-dev + update_cache: yes + install_recommends: no + +- name: pg_repack - download latest release + git: + repo: https://github.com/reorg/pg_repack.git + dest: /tmp/pg_repack + version: "ver_{{ pg_repack_release }}" + become: yes + +- name: pg_repack - build + make: + chdir: /tmp/pg_repack + params: + USE_PGXS: 1 + become: yes + +- name: pg_repack - install + make: + chdir: /tmp/pg_repack + target: install + params: + USE_PGXS: 1 + become: yes + +- name: pg_repack - cleanup + file: + state: absent + path: /tmp/pg_repack diff --git a/ansible/tasks/postgres-extensions/28-pgvector.yml b/ansible/tasks/postgres-extensions/28-pgvector.yml new file mode 100644 index 0000000..05b523d --- /dev/null +++ b/ansible/tasks/postgres-extensions/28-pgvector.yml @@ -0,0 +1,23 @@ +# pgvector +- name: pgvector - download latest release + git: + repo: https://github.com/pgvector/pgvector.git + dest: /tmp/pgvector + version: '{{ pgvector_release }}' + become: yes + +- name: pgvector - build + make: + chdir: /tmp/pgvector + become: yes + +- name: pgvector - install + make: + chdir: /tmp/pgvector + target: install + become: yes + +- name: pgvector - cleanup + file: + state: absent + path: /tmp/pgvector diff --git a/ansible/tasks/postgres-extensions/99-finish_async_tasks.yml b/ansible/tasks/postgres-extensions/99-finish_async_tasks.yml new file mode 100644 index 0000000..2e0609b --- /dev/null +++ b/ansible/tasks/postgres-extensions/99-finish_async_tasks.yml @@ -0,0 +1,19 @@ +## Verify plv8 status and complete plv8-install +- name: Check if plv8 is complete + async_status: + jid: "{{ plv8_build.ansible_job_id }}" + register: job_result + until: job_result.finished + delay: 60 + retries: 60 + +- name: plv8 - install + make: + chdir: /tmp/plv8 + target: install + become: yes + +- name: plv8 - cleanup + file: + state: absent + path: /tmp/plv8 diff --git a/ansible/tasks/setup-extensions.yml b/ansible/tasks/setup-extensions.yml index a9f2d3f..b5b0b8c 100644 --- a/ansible/tasks/setup-extensions.yml +++ b/ansible/tasks/setup-extensions.yml @@ -1,3 +1,9 @@ +- name: Install plv8 + import_tasks: tasks/postgres-extensions/13-plv8.yml + +- name: Install pg_jsonschema + import_tasks: tasks/postgres-extensions/22-pg_jsonschema.yml + - name: Install postgis import_tasks: tasks/postgres-extensions/01-postgis.yml @@ -25,8 +31,8 @@ - name: Install pg-safeupdate import_tasks: tasks/postgres-extensions/09-pg-safeupdate.yml -# - name: Install timescaledb -# import_tasks: tasks/postgres-extensions/10-timescaledb.yml +- name: Install timescaledb + import_tasks: tasks/postgres-extensions/10-timescaledb.yml - name: Install wal2json import_tasks: tasks/postgres-extensions/11-wal2json.yml @@ -34,5 +40,48 @@ - name: Install pljava import_tasks: tasks/postgres-extensions/12-pljava.yml -- name: Install plv8 - import_tasks: tasks/postgres-extensions/13-plv8.yml \ No newline at end of file +- name: Install pg_plan_filter + import_tasks: tasks/postgres-extensions/14-pg_plan_filter.yml + +- name: Install pg_net + import_tasks: tasks/postgres-extensions/15-pg_net.yml + +- name: Install rum + import_tasks: tasks/postgres-extensions/16-rum.yml + +- name: Install pg_hashids + import_tasks: tasks/postgres-extensions/17-pg_hashids.yml + +- name: Install pgsodium + import_tasks: tasks/postgres-extensions/18-pgsodium.yml + +- name: Install pg_graphql + import_tasks: tasks/postgres-extensions/19-pg_graphql.yml + +- name: Install pg_stat_monitor + import_tasks: tasks/postgres-extensions/20-pg_stat_monitor.yml + +- name: Install auto_explain + import_tasks: tasks/postgres-extensions/21-auto_explain.yml + +# - name: Install vault +# import_tasks: tasks/postgres-extensions/23-vault.yml + +- name: Install PGroonga + import_tasks: tasks/postgres-extensions/24-pgroonga.yml + +- name: Install wrappers + import_tasks: tasks/postgres-extensions/25-wrappers.yml + +- name: Install hypopg + import_tasks: tasks/postgres-extensions/26-hypopg.yml + +- name: Install pg_repack + import_tasks: tasks/postgres-extensions/27-pg_repack.yml + +- name: Install pgvector + import_tasks: tasks/postgres-extensions/28-pgvector.yml + +- name: Verify async task status + import_tasks: tasks/postgres-extensions/99-finish_async_tasks.yml + when: async_mode diff --git a/ansible/tasks/setup-fail2ban.yml b/ansible/tasks/setup-fail2ban.yml index 2d901d0..abd26cf 100644 --- a/ansible/tasks/setup-fail2ban.yml +++ b/ansible/tasks/setup-fail2ban.yml @@ -1,10 +1,20 @@ -# set default bantime to 30 minutes -- name: supautils - add supautils to shared_preload_libraries +# set default bantime to 1 hour +- name: extend bantime become: yes replace: path: /etc/fail2ban/jail.conf regexp: bantime = 10m - replace: bantime = 1800 + replace: bantime = 3600 + +- name: Configure journald + copy: + src: files/fail2ban_config/jail-ssh.conf + dest: /etc/fail2ban/jail.d/sshd.local + +- name: configure fail2ban to use nftables + copy: + src: files/fail2ban_config/jail.local + dest: /etc/fail2ban/jail.local # postgresql - name: import jail.d/postgresql.conf @@ -19,6 +29,19 @@ dest: /etc/fail2ban/filter.d/postgresql.conf become: yes +- name: create overrides dir + file: + state: directory + owner: root + group: root + path: /etc/systemd/system/fail2ban.service.d + mode: '0700' + +- name: Custom systemd overrides + copy: + src: files/fail2ban_config/fail2ban.service.conf + dest: /etc/systemd/system/fail2ban.service.d/overrides.conf + - name: add in tealbase specific ignore filters lineinfile: path: /etc/fail2ban/filter.d/postgresql.conf @@ -38,4 +61,10 @@ - name: fail2ban - restart systemd: name: fail2ban - state: restarted \ No newline at end of file + state: restarted + +- name: fail2ban - disable service + systemd: + name: fail2ban + enabled: no + daemon_reload: yes diff --git a/ansible/tasks/setup-gotrue.yml b/ansible/tasks/setup-gotrue.yml new file mode 100644 index 0000000..cb20007 --- /dev/null +++ b/ansible/tasks/setup-gotrue.yml @@ -0,0 +1,54 @@ +- name: UFW - Allow connections to GoTrue metrics exporter + ufw: + rule: allow + port: "9122" + +# use this user for the Gotrue build and for running the service +- name: Gotrue - system user + user: name=gotrue + +- name: Setting arch (x86) + set_fact: + arch: "x86" + when: platform == "amd64" + +- name: Setting arch (arm) + set_fact: + arch: "arm64" + when: platform == "arm64" + +- name: gotrue - download commit archive + get_url: + url: "https://github.com/tealbase/gotrue/releases/download/{{ gotrue_release }}/gotrue-{{ gotrue_release }}-{{ arch }}.tar.gz" + dest: /tmp/gotrue.tar.gz + checksum: "{{ gotrue_release_checksum }}" + +- name: gotrue - create /opt/gotrue + file: + path: /opt/gotrue + state: directory + owner: gotrue + mode: 0775 + +- name: gotrue - unpack archive in /opt/gotrue + unarchive: + remote_src: yes + src: /tmp/gotrue.tar.gz + dest: /opt/gotrue + owner: gotrue + +# libpq is a C library that enables user programs to communicate with +# the PostgreSQL database server. +# - name: gotrue - system dependencies +# apt: +# pkg: +# - libpq-dev + +- name: gotrue - create service file + template: + src: files/gotrue.service.j2 + dest: /etc/systemd/system/gotrue.service + +- name: gotrue - reload systemd + systemd: + daemon_reload: yes diff --git a/ansible/tasks/setup-kong.yml b/ansible/tasks/setup-kong.yml new file mode 100644 index 0000000..09d6d2b --- /dev/null +++ b/ansible/tasks/setup-kong.yml @@ -0,0 +1,62 @@ +- name: Kong - system user + user: name=kong + +# Kong installation steps from http://archive.vn/3HRQx +- name: Kong - system dependencies + apt: + pkg: + - openssl + - libpcre3 + - procps + - perl + +- name: Kong - download deb package + get_url: + url: "https://download.konghq.com/gateway-2.x-ubuntu-{{ kong_release_target }}/pool/all/k/kong/{{ kong_deb }}" + dest: /tmp/kong.deb + checksum: "{{ kong_deb_checksum }}" + +- name: Kong - deb installation + apt: deb=file:///tmp/kong.deb + +- name: Kong - ensure it is NOT autoremoved + shell: | + set -e + apt-mark manual kong zlib1g* + +- name: Kong - configuration + template: + src: files/kong_config/kong.conf.j2 + dest: /etc/kong/kong.conf + +- name: Kong - hand over ownership of /usr/local/kong to user kong + file: + path: /usr/local/kong + recurse: yes + owner: kong + +# [warn] ulimit is currently set to "1024". For better performance set it to at least +# "4096" using "ulimit -n" +- name: Kong - bump up ulimit + pam_limits: + limit_item: nofile + limit_type: soft + domain: kong + value: "4096" + +- name: Kong - create env file + template: + src: files/kong_config/kong.env.j2 + dest: /etc/kong/kong.env + +- name: Kong - create service file + template: + src: files/kong_config/kong.service.j2 + dest: /etc/systemd/system/kong.service + +- name: Kong - disable service + systemd: + enabled: no + name: kong + state: stopped + daemon_reload: yes diff --git a/ansible/tasks/setup-migrations.yml b/ansible/tasks/setup-migrations.yml new file mode 100644 index 0000000..570f776 --- /dev/null +++ b/ansible/tasks/setup-migrations.yml @@ -0,0 +1,13 @@ +- name: Run migrate.sh script + shell: ./migrate.sh + register: retval + when: ebssurrogate_mode + args: + chdir: /tmp/migrations/db + failed_when: retval.rc != 0 + +- name: Create /root/MIGRATION-AMI file + file: + path: "/root/MIGRATION-AMI" + state: touch + when: ebssurrogate_mode diff --git a/ansible/tasks/setup-nginx.yml b/ansible/tasks/setup-nginx.yml new file mode 100644 index 0000000..22b9486 --- /dev/null +++ b/ansible/tasks/setup-nginx.yml @@ -0,0 +1,81 @@ +- name: nginx - system user + user: name=nginx + +# Kong installation steps from http://archive.vn/3HRQx +- name: nginx - system dependencies + apt: + pkg: + - openssl + - libpcre3-dev + - libssl-dev + - zlib1g-dev + +- name: nginx - download source + get_url: + url: "https://nginx.org/download/nginx-{{ nginx_release }}.tar.gz" + dest: /tmp/nginx-{{ nginx_release }}.tar.gz + checksum: "{{ nginx_release_checksum }}" + +- name: nginx - unpack archive + unarchive: + remote_src: yes + src: /tmp/nginx-{{ nginx_release }}.tar.gz + dest: /tmp + +- name: nginx - configure + shell: + chdir: /tmp/nginx-{{ nginx_release }} + cmd: | + set -e + + ./configure \ + --prefix=/usr/local/nginx \ + --conf-path=/etc/nginx/nginx.conf \ + --with-http_ssl_module \ + --with-http_realip_module \ + --with-threads + become: yes + +- name: nginx - build + make: + chdir: /tmp/nginx-{{ nginx_release }} + jobs: "{{ parallel_jobs | default(omit) }}" + become: yes + +- name: nginx - install + make: + chdir: /tmp/nginx-{{ nginx_release }} + target: install + become: yes + +- name: nginx - hand over ownership of /usr/local/nginx to user nginx + file: + path: /usr/local/nginx + recurse: yes + owner: nginx + +- name: nginx - hand over ownership of /etc/nginx to user nginx + file: + path: /etc/nginx + recurse: yes + owner: nginx + +# [warn] ulimit is currently set to "1024". For better performance set it to at least +# "4096" using "ulimit -n" +- name: nginx - bump up ulimit + pam_limits: + limit_item: nofile + limit_type: soft + domain: nginx + value: "4096" + +- name: nginx - create service file + template: + src: files/nginx.service.j2 + dest: /etc/systemd/system/nginx.service + +# Keep it dormant for the timebeing + +# - name: nginx - reload systemd +# systemd: +# daemon_reload: yes diff --git a/ansible/tasks/setup-pgbouncer.yml b/ansible/tasks/setup-pgbouncer.yml index 7400b19..5fa7608 100644 --- a/ansible/tasks/setup-pgbouncer.yml +++ b/ansible/tasks/setup-pgbouncer.yml @@ -2,9 +2,11 @@ - name: PgBouncer - download & install dependencies apt: pkg: + - build-essential - libssl-dev - pkg-config - libevent-dev + - libsystemd-dev update_cache: yes cache_valid_time: 3600 @@ -13,6 +15,7 @@ url: "https://www.pgbouncer.org/downloads/files/{{ pgbouncer_release }}/pgbouncer-{{ pgbouncer_release }}.tar.gz" dest: /tmp/pgbouncer-{{ pgbouncer_release }}.tar.gz checksum: "{{ pgbouncer_release_checksum }}" + timeout: 60 - name: PgBouncer - unpack archive unarchive: @@ -38,24 +41,56 @@ target: install become: yes -# Create /etc/postgresql directory and make sure postgres group owns it +- name: Create pgbouncer user + user: + name: pgbouncer + shell: /bin/false + comment: PgBouncer user + groups: postgres,ssl-cert + - name: PgBouncer - create a directory if it does not exist file: path: /etc/pgbouncer state: directory - group: postgres + owner: pgbouncer + group: pgbouncer + mode: '0700' + +- name: PgBouncer - create a directory if it does not exist + file: + state: directory + owner: pgbouncer + group: pgbouncer + path: '{{ item }}' + mode: '0775' + with_items: + - '/etc/pgbouncer-custom' + +- name: create placeholder config files + file: + path: '/etc/pgbouncer-custom/{{ item }}' + state: touch + owner: pgbouncer + group: pgbouncer + mode: 0664 + with_items: + - 'generated-optimizations.ini' + - 'custom-overrides.ini' + - 'ssl-config.ini' - name: PgBouncer - adjust pgbouncer.ini copy: src: files/pgbouncer_config/pgbouncer.ini.j2 dest: /etc/pgbouncer/pgbouncer.ini + owner: pgbouncer + mode: '0700' - name: PgBouncer - create a directory if it does not exist file: path: /etc/pgbouncer/userlist.txt state: touch - group: postgres - owner: postgres + owner: pgbouncer + mode: '0700' - name: import /etc/tmpfiles.d/pgbouncer.conf template: @@ -71,20 +106,21 @@ insertafter: '# Default:' line: "{{ item }}" with_items: - - "host all pgbouncer 127.0.0.1/32 md5" - - "# Allow connection by pgbouncer user" + - "host all pgbouncer 0.0.0.0/0 reject" + - "host all pgbouncer 127.0.0.1/32 scram-sha-256" + - "# Connection configuration for pgbouncer user" -# Run PgBouncer SQL script -- name: Transfer init SQL files +- name: PgBouncer - By default allow ssl connections. + become: yes copy: - src: files/pgbouncer_config/pgbouncer_auth_schema.sql - dest: /tmp/00-schema.sql + dest: /etc/pgbouncer-custom/ssl-config.ini + content: | + client_tls_sslmode = allow -- name: Execute init SQL files - become: yes - become_user: postgres - shell: - cmd: /usr/lib/postgresql/bin/psql -f /tmp/00-schema.sql +- name: Grant pg_hba and pgbouncer grp perm for adminapi updates + shell: | + chmod g+w /etc/postgresql/pg_hba.conf + chmod g+w /etc/pgbouncer-custom/ssl-config.ini # Add fail2ban filter - name: import jail.d/pgbouncer.conf @@ -108,4 +144,4 @@ - name: PgBouncer - reload systemd systemd: - daemon_reload: yes \ No newline at end of file + daemon_reload: yes diff --git a/ansible/tasks/setup-postgres.yml b/ansible/tasks/setup-postgres.yml index 679c3ce..fe70f38 100644 --- a/ansible/tasks/setup-postgres.yml +++ b/ansible/tasks/setup-postgres.yml @@ -1,9 +1,9 @@ # Downloading dependencies - name: Postgres dependencies - become: yes apt: pkg: - build-essential + - pkg-config - libreadline-dev - zlib1g-dev - flex @@ -12,21 +12,20 @@ - libxslt-dev - libssl-dev - libsystemd-dev - - libpq-dev - libxml2-utils - uuid-dev - xsltproc - ssl-cert + - liblz4-dev + - libicu-dev - name: Download LLVM & Clang - become: yes apt: pkg: - llvm-11-dev - clang-11 - name: Download GCC 10 - become: yes apt: pkg: - gcc-10 @@ -35,6 +34,14 @@ - name: Switch to GCC 10 shell: cmd: update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-10 100 --slave /usr/bin/g++ g++ /usr/bin/g++-10 --slave /usr/bin/gcov gcov /usr/bin/gcov-10 + +# Setup permissions +- name: Update permissions for /var/tmp directory + file: + path: /var/tmp/ + owner: root + group: root + mode: '1777' become: yes # Building Postgres from source @@ -43,41 +50,44 @@ url: https://ftp.postgresql.org/pub/source/v{{ postgresql_release }}/postgresql-{{ postgresql_release }}.tar.gz dest: /tmp checksum: "{{ postgresql_release_checksum }}" + timeout: 60 - name: Postgres - unpack archive unarchive: remote_src: yes src: /tmp/postgresql-{{ postgresql_release }}.tar.gz dest: /tmp - become: yes - name: Setting CFLAGS (arm) set_fact: - cflags: "-moutline-atomics -mtune=neoverse-n1 -fsigned-char" - when: platform == "arm64" + cflags: "-moutline-atomics -mtune=native -march=native -mcpu=native -fsigned-char" + when: platform == "arm64" - name: Setting CFLAGS (x86) set_fact: cflags: "-fsigned-char" - when: platform == "amd64" + when: platform == "amd64" - name: Postgres - configure shell: - cmd: CFLAGS='{{ cflags }}' LLVM_CONFIG=/usr/bin/llvm-config-11 CLANG=/usr/bin/clang-11 ./configure --with-llvm --with-openssl --with-systemd --with-uuid=e2fs --exec-prefix=/usr/lib/postgresql --datarootdir=/var/lib/postgresql + cmd: CFLAGS='{{ cflags }}' LLVM_CONFIG=/usr/bin/llvm-config-11 CLANG=/usr/bin/clang-11 ./configure --with-llvm --with-openssl --with-systemd --with-uuid=e2fs --with-libxml --with-icu --with-lz4 --exec-prefix=/usr/lib/postgresql --datarootdir=/var/lib/postgresql chdir: /tmp/postgresql-{{ postgresql_release }} - become: yes - name: Postgres - build make: - target: world + target: world-bin chdir: /tmp/postgresql-{{ postgresql_release }} - become: yes + jobs: "{{ parallel_jobs | default(omit) }}" - name: Postgres - install make: - target: install-world + target: install-world-bin chdir: /tmp/postgresql-{{ postgresql_release }} - become: yes + +- name: Create postgres group + group: + name: postgres + state: present # Create postgres user - name: Create postgres user @@ -85,23 +95,44 @@ name: postgres shell: /bin/false comment: Postgres user - groups: ssl-cert + group: postgres + groups: postgres,ssl-cert -- name: Recursively change ownership of a directory +- name: Create relevant directories file: - path: /var/lib/postgresql - state: directory + path: '{{ item }}' recurse: yes + state: directory owner: postgres group: postgres + with_items: + - '/etc/postgresql' + - '/etc/postgresql-custom' + - '/var/log/postgresql' + - '/var/lib/postgresql' -# Create /etc/postgresql directory and make sure postgres group owns it -- name: Create a directory if it does not exist +- name: Allow adminapi to write custom config file: - path: /etc/postgresql + path: '{{ item }}' + recurse: yes state: directory owner: postgres group: postgres + mode: 0775 + with_items: + - '/etc/postgresql' + - '/etc/postgresql-custom' + +- name: create placeholder config files + file: + path: '/etc/postgresql-custom/{{ item }}' + state: touch + owner: postgres + group: postgres + mode: 0664 + with_items: + - 'generated-optimizations.conf' + - 'custom-overrides.conf' # Move Postgres configuration files into /etc/postgresql # Add postgresql.conf @@ -131,7 +162,7 @@ register: postgresql_bin - name: Create symbolic links for Postgres binaries to /usr/bin/ - become: True + become: yes file: src: "{{ item.path }}" path: "/usr/bin/{{ item.path | basename }}" @@ -140,22 +171,43 @@ with_items: "{{ postgresql_bin.files }}" # init DB +- name: Create directory on data volume + file: + path: '{{ item }}' + recurse: yes + state: directory + owner: postgres + group: postgres + mode: 0750 + with_items: + - "/data/pgdata" + +- name: Link database data_dir to data volume directory + file: + src: "/data/pgdata" + path: "/var/lib/postgresql/data" + state: link + force: yes + - name: Initialize the database become: yes become_user: postgres shell: - cmd: /usr/lib/postgresql/bin/pg_ctl -D /var/lib/postgresql/data initdb + cmd: /usr/lib/postgresql/bin/pg_ctl -D /var/lib/postgresql/data initdb -o "--allow-group-access" vars: ansible_command_timeout: 60 # Circumvents the following error: # "Timeout (12s) waiting for privilege escalation prompt" -# Add systemd file for Postgres -- name: import postgresql.service +- name: copy PG systemd unit template: src: files/postgresql_config/postgresql.service.j2 dest: /etc/systemd/system/postgresql.service - become: yes + +- name: copy optimizations systemd unit + template: + src: files/database-optimizations.service.j2 + dest: /etc/systemd/system/database-optimizations.service # Reload - name: System - systemd reload diff --git a/ansible/tasks/setup-postgrest.yml b/ansible/tasks/setup-postgrest.yml index 16a10e8..57b76e1 100644 --- a/ansible/tasks/setup-postgrest.yml +++ b/ansible/tasks/setup-postgrest.yml @@ -7,19 +7,28 @@ apt: pkg: - libpq5 + - libnuma-dev + +- name: postgis - ensure dependencies do not get autoremoved + shell: | + set -e + apt-mark manual libnuma* + apt-mark auto libnuma*-dev - name: PostgREST - download ubuntu binary archive (arm) get_url: - url: "https://github.com/PostgREST/postgrest/releases/download/nightly/postgrest-nightly-{{ postgrest_arm_release }}.tar.xz" + url: "https://github.com/PostgREST/postgrest/releases/download/v{{ postgrest_release }}/postgrest-v{{ postgrest_release }}-ubuntu-aarch64.tar.xz" dest: /tmp/postgrest.tar.xz checksum: "{{ postgrest_arm_release_checksum }}" + timeout: 60 when: platform == "arm64" - name: PostgREST - download ubuntu binary archive (x86) get_url: - url: "https://github.com/PostgREST/postgrest/releases/download/nightly/postgrest-nightly-{{ postgrest_x86_release }}.tar.xz" + url: "https://github.com/PostgREST/postgrest/releases/download/v{{ postgrest_release }}/postgrest-v{{ postgrest_release }}-linux-static-x64.tar.xz" dest: /tmp/postgrest.tar.xz checksum: "{{ postgrest_x86_release_checksum }}" + timeout: 60 when: platform == "amd64" - name: PostgREST - unpack archive in /opt @@ -28,12 +37,48 @@ src: /tmp/postgrest.tar.xz dest: /opt owner: postgrest + mode: '0755' + +- name: create directories + file: + state: directory + owner: postgrest + group: postgrest + mode: '0775' + path: /etc/postgrest + +- name: empty files + file: + state: touch + owner: postgrest + group: postgrest + path: /etc/postgrest/{{ item }} + with_items: + - base.conf + - generated.conf + +- name: create conf merging script + copy: + content: | + #! /usr/bin/env bash + set -euo pipefail + set -x + + cd "$(dirname "$0")" + cat $@ > merged.conf + dest: /etc/postgrest/merge.sh + mode: 0750 + owner: postgrest + group: postgrest -- name: PostgREST - create service file +- name: PostgREST - create service files template: - src: files/postgrest.service.j2 - dest: /etc/systemd/system/postgrest.service + src: files/{{ item }}.j2 + dest: /etc/systemd/system/{{ item }} + with_items: + - postgrest.service + - postgrest-optimizations.service - name: PostgREST - reload systemd systemd: - daemon_reload: yes \ No newline at end of file + daemon_reload: yes diff --git a/ansible/tasks/setup-system.yml b/ansible/tasks/setup-system.yml index 3a3db1a..38fc7a9 100644 --- a/ansible/tasks/setup-system.yml +++ b/ansible/tasks/setup-system.yml @@ -1,19 +1,9 @@ -# DigitalOcean's ubuntu droplet isn't up to date with installed packages, and on -# a fresh install I see 71 security upgrades available. -- name: Terminate any ongoing updates - become: yes - shell: killall apt apt-get - ignore_errors: yes - tags: - - update - - update-only - - name: System - apt update and apt upgrade apt: update_cache=yes upgrade=yes + when: not ebssurrogate_mode # SEE http://archive.vn/DKJjs#parameter-upgrade - name: Install required security updates - become: yes apt: pkg: - tzdata @@ -22,21 +12,32 @@ # SEE https://github.com/georchestra/ansible/issues/55#issuecomment-588313638 # Without this, a similar error is faced - name: Install Ansible dependencies - become: yes apt: pkg: - acl - name: Install security tools - become: yes apt: pkg: - - ufw + - nftables - fail2ban - - unattended-upgrades update_cache: yes cache_valid_time: 3600 +- name: Use nftables backend + shell: | + update-alternatives --set iptables /usr/sbin/iptables-nft + update-alternatives --set ip6tables /usr/sbin/ip6tables-nft + update-alternatives --set arptables /usr/sbin/arptables-nft + update-alternatives --set ebtables /usr/sbin/ebtables-nft + systemctl restart ufw + +- name: Install other useful tools + apt: + pkg: + - sysstat + update_cache: yes + - name: Adjust APT update intervals copy: src: files/apt_periodic @@ -59,6 +60,45 @@ template: src: files/services.slice.j2 dest: /etc/systemd/system/services.slice + when: not ebssurrogate_mode - name: System - systemd reload - systemd: daemon_reload=yes \ No newline at end of file + systemd: daemon_reload=yes + +- name: Configure journald + copy: + src: files/journald.conf + dest: /etc/systemd/journald.conf + +- name: reload systemd-journald + systemd: + name: systemd-journald + state: restarted + +- name: Configure logind + copy: + src: files/logind.conf + dest: /etc/systemd/logind.conf + +- name: reload systemd-logind + systemd: + name: systemd-logind + state: restarted + +- name: enable timestamps for shell history + copy: + content: | + export HISTTIMEFORMAT='%d/%m/%y %T ' + dest: /etc/profile.d/09-history-timestamps.sh + mode: 0644 + owner: root + group: root + +- name: set hosts file + copy: + content: | + 127.0.0.1 localhost + dest: /etc/hosts + mode: 0644 + owner: root + group: root diff --git a/ansible/tasks/setup-tealbase-internal.yml b/ansible/tasks/setup-tealbase-internal.yml index 52b6958..c9113e2 100644 --- a/ansible/tasks/setup-tealbase-internal.yml +++ b/ansible/tasks/setup-tealbase-internal.yml @@ -9,12 +9,14 @@ get_url: url: "https://awscli.amazonaws.com/awscli-exe-linux-aarch64-{{ aws_cli_release }}.zip" dest: "/tmp/awscliv2.zip" + timeout: 60 when: platform == "arm64" - name: AWS CLI (x86) get_url: url: "https://awscli.amazonaws.com/awscli-exe-linux-x86_64-{{ aws_cli_release }}.zip" dest: "/tmp/awscliv2.zip" + timeout: 60 when: platform == "amd64" - name: AWS CLI - expand @@ -24,17 +26,71 @@ dest: "/tmp" - name: AWS CLI - install - shell: "/tmp/aws/install" + shell: "/tmp/aws/install --update" become: true +- name: install Vector for logging + become: yes + apt: + deb: "{{ vector_x86_deb }}" + when: platform == "amd64" + +- name: install Vector for logging + become: yes + apt: + deb: "{{ vector_arm_deb }}" + when: platform == "arm64" + +- name: add Vector to postgres group + become: yes + shell: + cmd: | + usermod -a -G postgres vector + +- name: create service files for Vector + template: + src: files/vector.service.j2 + dest: /etc/systemd/system/vector.service + +- name: vector - reload systemd + systemd: + daemon_reload: yes + +- name: Create checkpoints dir + become: yes + file: + path: /var/lib/vector + state: directory + owner: vector + +- name: Include file for generated optimizations in postgresql.conf + become: yes + replace: + path: /etc/postgresql/postgresql.conf + regexp: "#include = '/etc/postgresql-custom/generated-optimizations.conf'" + replace: "include = '/etc/postgresql-custom/generated-optimizations.conf'" + +- name: Include file for custom overrides in postgresql.conf + become: yes + replace: + path: /etc/postgresql/postgresql.conf + regexp: "#include = '/etc/postgresql-custom/custom-overrides.conf'" + replace: "include = '/etc/postgresql-custom/custom-overrides.conf'" + - name: Install Postgres exporter import_tasks: internal/postgres-exporter.yml -- name: Install node exporter - import_tasks: internal/node-exporter.yml - - name: Install supautils import_tasks: internal/supautils.yml -- name: Boot time optimizations - import_tasks: internal/optimizations.yml +- name: Install admin-mgr + import_tasks: internal/admin-mgr.yml + +- name: Install adminapi + import_tasks: internal/admin-api.yml + +- name: Init nftabless + import_tasks: internal/setup-nftables.yml + +- name: Install pg_egress_collect + import_tasks: internal/pg_egress_collect.yml diff --git a/ansible/tasks/setup-wal-g.yml b/ansible/tasks/setup-wal-g.yml index 3f2bc5a..439bb28 100644 --- a/ansible/tasks/setup-wal-g.yml +++ b/ansible/tasks/setup-wal-g.yml @@ -1,9 +1,11 @@ # Downloading dependencies -- name: Postgres dependencies +- name: wal-g dependencies become: yes apt: pkg: + - libbrotli-dev - liblzo2-dev + - libsodium-dev - cmake # install go dependency for WAL-G @@ -11,6 +13,8 @@ get_url: url: "https://golang.org/dl/go{{ golang_version }}.linux-{{ platform }}.tar.gz" dest: /tmp + checksum: "{{ golang_version_checksum[platform] }}" + timeout: 60 - name: unpack go archive unarchive: @@ -19,27 +23,108 @@ dest: /usr/local # Download WAL-G -- name: download wal-g - shell: - cmd: go get github.com/wal-g/wal-g; - environment: - PATH: "{{ ansible_env.PATH }}:/usr/local/go/bin" +- name: wal-g - download latest version + git: + repo: https://github.com/wal-g/wal-g.git + dest: /tmp/wal-g + version: "{{ wal_g_release }}" + become: yes + +- name: wal-g - pg_clean + make: + chdir: /tmp/wal-g + target: pg_clean + params: + GOBIN: "/usr/local/bin" + PATH: "{{ ansible_env.PATH }}:/usr/local/go/bin" + USE_LIBSODIUM: true + become: yes ignore_errors: yes - # ignore error https://github.com/wal-g/wal-g/issues/343#issuecomment-514544288 -# Install WAL-G -- name: install wal-g +- name: wal-g - deps + make: + chdir: /tmp/wal-g + target: deps + params: + GOBIN: "/usr/local/bin" + PATH: "{{ ansible_env.PATH }}:/usr/local/go/bin" + USE_LIBSODIUM: true + become: yes + ignore_errors: yes + +- name: wal-g - build and install + make: + chdir: /tmp/wal-g + target: pg_install + jobs: "{{ parallel_jobs | default(omit) }}" + params: + GOBIN: "/usr/local/bin" + PATH: "{{ ansible_env.PATH }}:/usr/local/go/bin" + USE_LIBSODIUM: true + become: yes + +- name: Create wal-g group + group: + name: wal-g + state: present + +- name: Create wal-g user + user: + name: wal-g + shell: /bin/false + comment: WAL-G user + group: wal-g + groups: wal-g, postgres + +- name: Create a config directory owned by wal-g + file: + path: /etc/wal-g + state: directory + owner: wal-g + group: wal-g + mode: '0770' + +- name: Create /etc/wal-g/config.json + file: + path: /etc/wal-g/config.json + state: touch + owner: wal-g + group: wal-g + mode: '0664' + +- name: Move custom wal-g.conf file to /etc/postgresql-custom/wal-g.conf + template: + src: "files/postgresql_config/custom_walg.conf.j2" + dest: /etc/postgresql-custom/wal-g.conf + mode: 0664 + owner: postgres + group: postgres + +- name: Add script to be run for restore_command + template: + src: "files/walg_helper_scripts/wal_fetch.sh" + dest: /home/postgres/wal_fetch.sh + mode: 0500 + owner: postgres + group: postgres + +- name: Add helper script for wal_fetch.sh + template: + src: "files/walg_helper_scripts/wal_change_ownership.sh" + dest: /root/wal_change_ownership.sh + mode: 0700 + owner: root + +- name: Include /etc/postgresql-custom/wal-g.conf in postgresql.conf become: yes - shell: - cmd: make install && make deps && make pg_install - chdir: "{{ ansible_env.HOME }}/go/src/github.com/wal-g/wal-g" - environment: - GOBIN: "/usr/local/bin" - PATH: "{{ ansible_env.PATH }}:/usr/local/go/bin" + replace: + path: /etc/postgresql/postgresql.conf + regexp: "#include = '/etc/postgresql-custom/wal-g.conf'" + replace: "include = '/etc/postgresql-custom/wal-g.conf'" # Clean up Go - name: Uninstall Go become: yes file: path: /usr/local/go - state: absent \ No newline at end of file + state: absent diff --git a/ansible/tasks/test-image.yml b/ansible/tasks/test-image.yml new file mode 100644 index 0000000..36c0046 --- /dev/null +++ b/ansible/tasks/test-image.yml @@ -0,0 +1,88 @@ +- name: install pg_prove + apt: + pkg: + - libtap-parser-sourcehandler-pgtap-perl + +- name: Temporarily disable PG Sodium references in config + become: yes + become_user: postgres + shell: + cmd: sed -i.bak -e "s/pg_net,\ pgsodium,\ timescaledb/pg_net,\ timescaledb/g" -e "s/pgsodium.getkey_script=/#pgsodium.getkey_script=/g" /etc/postgresql/postgresql.conf + when: ebssurrogate_mode + +- name: Start Postgres Database to load all extensions. + become: yes + become_user: postgres + shell: + cmd: /usr/bin/pg_ctl -D /var/lib/postgresql/data start "-o -c config_file=/etc/postgresql/postgresql.conf" + when: ebssurrogate_mode + +- name: Enable pgTAP extension + shell: /usr/lib/postgresql/bin/psql -U postgres -h localhost -d postgres -c "CREATE extension pgtap"; + when: ebssurrogate_mode + +- name: Create function for testing extensions + shell: /usr/lib/postgresql/bin/psql -U postgres -h localhost -d postgres -f /tmp/unit-tests/test-extensions.sql; + when: ebssurrogate_mode + +- name: Extension Installation tests + shell: /usr/bin/pg_prove -U tealbase_admin -h localhost -d postgres -v /tmp/unit-tests/verify-extensions.sql + register: retval + when: ebssurrogate_mode + +- name: Run Unit tests (with filename unit-test-*) on Postgres Database + shell: /usr/bin/pg_prove -U postgres -h localhost -d postgres -v /tmp/unit-tests/unit-test-*.sql + register: retval + failed_when: retval.rc != 0 + when: ebssurrogate_mode + +- name: Run migrations tests + shell: /usr/bin/pg_prove -U postgres -h localhost -d postgres -v tests/test.sql + register: retval + failed_when: retval.rc != 0 + when: ebssurrogate_mode + args: + chdir: /tmp/migrations + +- name: Re-enable PG Sodium references in config + become: yes + become_user: postgres + shell: + cmd: mv /etc/postgresql/postgresql.conf.bak /etc/postgresql/postgresql.conf + when: ebssurrogate_mode + +- name: Drop pgTap extension + shell: /usr/lib/postgresql/bin/psql -U postgres -h localhost -d postgres -c "DROP extension if exists pgtap"; + when: ebssurrogate_mode + +- name: Drop extension test function + shell: /usr/lib/postgresql/bin/psql -U postgres -h localhost -d postgres -c "DROP FUNCTION IF EXISTS install_available_extensions_and_test"; + when: ebssurrogate_mode + +- name: Reset db stats + shell: /usr/lib/postgresql/bin/psql --no-password --no-psqlrc -d postgres -h localhost -U tealbase_admin -c 'SELECT pg_stat_statements_reset(); SELECT pg_stat_reset();' + when: ebssurrogate_mode + +- name: remove pg_prove + apt: + pkg: + - libtap-parser-sourcehandler-pgtap-perl + state: absent + autoremove: yes + +- name: Get pg_config file details + ansible.builtin.stat: + path: /usr/bin/pg_config + register: st + +- name: Ensure pg_config points to our install of PG + ansible.builtin.fail: + msg: "Incorrect symlink for pg_config" + when: st.stat.lnk_target != '/usr/lib/postgresql/bin/pg_config' + +- name: Stop Postgres Database + become: yes + become_user: postgres + shell: + cmd: /usr/bin/pg_ctl -D /var/lib/postgresql/data stop + when: ebssurrogate_mode diff --git a/ansible/vars.yml b/ansible/vars.yml index a6f823a..edb7f2a 100644 --- a/ansible/vars.yml +++ b/ansible/vars.yml @@ -1,24 +1,37 @@ tealbase_internal: true +ebssurrogate_mode: true +async_mode: true -postgresql_major: "13" -postgresql_release: "13.3" -postgresql_release_checksum: sha1:aeb645988b1ec9ffbb2fc0a49d9055d3ab17affa +postgresql_major: "15" +postgresql_release: "15.1" +postgresql_release_checksum: sha256:ea2cf059a85882654b989acd07edc121833164a30340faee0d3615cf7058e66c # Non Postgres Extensions -pgbouncer_release: "1.15.0" -pgbouncer_release_checksum: sha1:ea7e9dbcab178f439a0fa402a78a7f1e4f43e6d4 +pgbouncer_release: "1.17.0" +pgbouncer_release_checksum: sha256:657309b7bc5c7a85cbf70a9a441b535f7824123081eabb7ba86d00349a256e23 -postgrest_arm_release: 2021-03-05-19-03-d3a8b5f-ubuntu-aarch64 -postgrest_arm_release_checksum: sha1:b9e9b06ead7230b75033e8ae17912714bf463a33 +postgrest_release: "10.1.1" +postgrest_arm_release_checksum: sha1:9653c57181839a81a7747f57f4292635e24ff7f0 +postgrest_x86_release_checksum: sha1:fecc55387caee54543dc020061fe1cd7232407e0 -postgrest_x86_release: 2021-03-05-19-03-d3a8b5f-linux-x64-static -postgrest_x86_release_checksum: sha1:4b4adde15f0d41d65a9136d1f8c0d9cd6fe79326 +gotrue_release: v2.40.1 +gotrue_release_checksum: sha1:aa650eae81bca18ccb575a2c28bff90480a91eae aws_cli_release: "2.2.7" -golang_version: "1.15.4" -wal_g_release: "0.2.15" -wal_g_release_checksum: sha1:e82d405121e0ccc322a323b9824e60c102b14004 +golang_version: "1.19.3" +golang_version_checksum: + arm64: sha256:99de2fe112a52ab748fb175edea64b313a0c8d51d6157dba683a6be163fd5eab + amd64: sha256:74b9640724fd4e6bb0ed2a1bc44ae813a03f1e72a4c76253e2d5c015494430ba + +kong_release_target: focal # if it works, it works +kong_deb: kong_2.8.1_arm64.deb +kong_deb_checksum: sha1:2086f6ccf8454fe64435252fea4d29d736d7ec61 + +nginx_release: 1.22.0 +nginx_release_checksum: sha1:419efb77b80f165666e2ee406ad8ae9b845aba93 + +wal_g_release: "v2.0.1" sfcgal_release: "1.3.10" sfcgal_release_checksum: sha1:f4add34a00afb0b5f594685fc646565a2bda259b @@ -28,44 +41,83 @@ postgres_exporter_release_checksum: arm64: sha256:d869c16791481dc8475487ad84ae4371a63f9b399898ca1c666eead5cccf7182 amd64: sha256:ff541bd3ee19c0ae003d71424a75edfcc8695e828dd20d5b4555ce433c89d60b -node_exporter_release: 1.1.2 -node_exporter_release_checksum: - arm64: sha256:eb5e7d16f18bb3272d0d832986fc8ac6cb0b6c42d487c94e15dabb10feae8e04 - amd64: sha256:8c1f6a317457a658e0ae68ad710f6b4098db2cad10204649b51e3c043aa3e70d +adminapi_release: 0.34.0 +adminmgr_release: 0.3.0 # Postgres Extensions -postgis_release: "3.1.2" -postgis_release_checksum: sha1:622f52f3bf338c8e51ea6d73d30d6a5d3140c517 +postgis_release: "3.3.2" +postgis_release_checksum: sha256:9a2a219da005a1730a39d1959a1c7cec619b1efb009b65be80ffc25bad299068 + +pgrouting_release: "3.4.1" +pgrouting_release_checksum: sha256:a4e034efee8cf67582b67033d9c3ff714a09d8f5425339624879df50aff3f642 + +pgtap_release: "1.2.0" +pgtap_release_checksum: sha256:9c7c3de67ea41638e14f06da5da57bac6f5bd03fea05c165a0ec862205a5c052 + +pg_cron_release: "1.4.2" +pg_cron_release_checksum: sha256:3652722ea98d94d8e27bf5e708dd7359f55a818a43550d046c5064c98876f1a8 + +pgaudit_release: "1.7.0" +pgaudit_release_checksum: sha256:8f4a73e451c88c567e516e6cba7dc1e23bc91686bb6f1f77f8f3126d428a8bd8 + +pgsql_http_release: "1.5.0" +pgsql_http_release_checksum: sha256:43efc9e82afcd110f205b86b8d28d1355d39b6b134161e9661a33a1346818f5d + +plpgsql_check_release: "2.2.5" +plpgsql_check_release_checksum: sha256:6c3a3c5faf3f9689425c6db8a6b20bf4cd5e7144a055e29538eae980c7232573 + +pg_safeupdate_release: "1.4" +pg_safeupdate_release_checksum: sha1:942dacd0ebce6123944212ffb3d6b5a0c09174f9 + +timescaledb_release: "2.9.1" + +wal2json_commit_sha: 770872b890f9e122290f178e7c7bfa19ec7afa94 + +supautils_release: "1.7.0" +supautils_release_checksum: sha256:e2353040262bd7a1720099f4a03ec485b05c74a202956aa1361e422f8765c6b3 + +pljava_release: "a5bfeca83cea2c4b844758a9c76db337392892e3" +pljava_release_checksum: sha1:550bea791c404c9d62050fd9c330e162bab20763 + +plv8_commit_version: bcddd92f71530e117f2f98b92d206dafe824f73a + +pg_plan_filter_commit_version: 5081a7b5cb890876e67d8e7486b6a64c38c9a492 + +pg_net_release: "0.7" +pg_net_release_checksum: sha1:52cffe467858182e12f158870b3339ed785a1320 + +rum_release: "1.3.13" +rum_release_checksum: sha256:6ab370532c965568df6210bd844ac6ba649f53055e48243525b0b7e5c4d69a7d + +vector_x86_deb: "https://packages.timber.io/vector/0.22.3/vector_0.22.3-1_amd64.deb" +vector_arm_deb: "https://packages.timber.io/vector/0.22.3/vector_0.22.3-1_arm64.deb" -pgrouting_release: "3.2.0" -pgrouting_release_checksum: sha1:d902d449ebc96b6cdcb2fac09434d0098467cda5 +libsodium_release: "1.0.18" +libsodium_release_checksum: sha1:795b73e3f92a362fabee238a71735579bf46bb97 -pgtap_release: "1.1.0" -pgtap_release_checksum: sha1:cca57708e723de18735a723b774577dc52f6f31e +pgsodium_release: "3.1.5" +pgsodium_release_checksum: sha256:bec847388a5db2a60ea9d991962ce27954d91b4c41cbcc7bd8e34472c69114d1 -pg_cron_release: "1.3.1" -pg_cron_release_checksum: sha1:679b6ff54e0b1070a5fd713c5d25c3378f371fac +pg_graphql_release: "v1.1.0" -pgaudit_release: "1.5.0" -pgaudit_release_checksum: sha1:8429125e8f70fcaa2c2f5a0e22b910a4afb821a4 +pg_jsonschema_release: "v0.1.4" -pgsql_http_release: "1.3.1" -pgsql_http_release_checksum: sha1:816a3fff53e05301b176cf0696799fc5a00f54e8 +pg_stat_monitor_release: "1.1.1" -plpgsql_check_release: "1.16.0" -plpgsql_check_release_checksum: sha1:626553fc2746fe10aa5a776a1229baf2af3365fc +vault_release: "0.2.8" +vault_release_checksum: sha256:842cdee6d5b586b1baacccfaa08b45d56566987af87952a5fe5ee80b24400754 -pg_safeupdate_release: "1.3" -pg_safeupdate_release_checksum: sha1:34a0353611bfd63f7ea760aac2afcb518bf3ba7c +groonga_release: "12.0.8" +groonga_release_checksum: sha1:32aee787efffc2a22760fde946fb6462286074e2 -timescaledb_release: "2.3.0" +pgroonga_release: "2.4.0" +pgroonga_release_checksum: sha1:235d67e8487b318e656d4d3016a49c14fae0512d -wal2json_release: "2_3" -wal2json_release_checksum: sha1:923f9bbcd0505a1f0b6eac1d371e4ff2d266a958 +wrappers_release: "v0.1.7" -supautils_release: "1.1.0" -supautils_release_checksum: sha1:326ac5c1933bd30d4a50da7568b27629a9ec544b +hypopg_commit_sha: 57d711bc4e37164c8edac81580a5f477e2a33d86 -pljava_release: "1_6_2" -pljava_release_checksum: sha1:9610b80cbd13d4d43bcdaa2928365dbfd1bf6e94 +pg_repack_release: "1.4.8" +pg_repack_release_checksum: sha1:74e54f43f6c062644e442224eacc2a2bc95a04ef +pgvector_release: "v0.4.0" diff --git a/common.vars.pkr.hcl b/common.vars.pkr.hcl new file mode 100644 index 0000000..055d46d --- /dev/null +++ b/common.vars.pkr.hcl @@ -0,0 +1 @@ +postgres-version = "15.1.0.33" diff --git a/development-arm.vars.pkr.hcl b/development-arm.vars.pkr.hcl new file mode 100644 index 0000000..6772bf6 --- /dev/null +++ b/development-arm.vars.pkr.hcl @@ -0,0 +1,7 @@ +arch = "arm64" +ami_regions = ["us-east-1"] +environment = "dev" +instance-type = "c6g.4xlarge" +region= "us-east-1" +ubuntu-2004 = "ami-0b49a4a6e8e22fa16" + diff --git a/docker-compose.yaml b/docker-compose.yaml new file mode 100644 index 0000000..c572b9d --- /dev/null +++ b/docker-compose.yaml @@ -0,0 +1,28 @@ +version: "3.8" + +services: + db: + image: tealbase/postgres:latest + build: . + restart: "no" + healthcheck: + test: pg_isready -U postgres -h localhost + interval: 2s + timeout: 2s + retries: 10 + environment: + POSTGRES_HOST: /var/run/postgresql + POSTGRES_PASSWORD: password + + pg_prove: + image: horrendo/pg_prove + depends_on: + db: + condition: service_healthy + environment: + PGHOST: db + PGUSER: postgres + PGPASSWORD: password + volumes: + - ./migrations/tests:/tests + command: pg_prove /tests/test.sql diff --git a/docs/.DS_Store b/docs/.DS_Store deleted file mode 100644 index 645d589fee626a29f181880604c8e0171669ea1d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6148 zcmeHKJ5Iwu5S<|@EYYN(pxgt{=$&)oF6!p{LFP<*i2GWuNGH}YkES3xF|98+o=Ks?ouE+oxI4cHp-mSY8 zJ}G-^^>Nl~3-kf{CCK%37;nWuZ^c+xE581fSL_+_HgOE}a`?R*$R7dYg+d1YgMm*_ Cw/ccache + docker push /ccache + ``` + + For ARM64 builds + + ``` + docker pull arm64v8/ubuntu + docker tag arm64v8/ubuntu:latest /ccache-arm64v8 + docker push /ccache-arm64v8 + ``` + + Now set DOCKER_IMAGE="/ccache" or DOCKER_IMAGE="/ccache-arm64v8" based on your AMI architecture. + + +## EBS-Surrogate File layout + +``` +$ tree ebssurrogate/ +ebssurrogate/ +├── files +│   ├── 70-ec2-nvme-devices.rules +│   ├── cloud.cfg # cloud.cfg for cloud-init +│   ├── ebsnvme-id +│   ├── sources-arm64.cfg # apt/sources.list for arm64 +│   ├── sources.cfg # apt/sources.list for amd64 +│   ├── vector.timer # systemd-timer to delay vectore execution +│   └── zfs-growpart-root.cfg +└── scripts + ├── chroot-bootstrap.sh # Installs grub and other required packages for build. Configures target AMI settings + └── surrogate-bootstrap.sh # Formats disk and setups chroot environment. Runs Ansible tasks within chrooted environment. +``` diff --git a/ebssurrogate/files/70-ec2-nvme-devices.rules b/ebssurrogate/files/70-ec2-nvme-devices.rules new file mode 100644 index 0000000..62a5deb --- /dev/null +++ b/ebssurrogate/files/70-ec2-nvme-devices.rules @@ -0,0 +1,25 @@ +# Copyright (C) 2006-2016 Amazon.com, Inc. or its affiliates. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS +# OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the +# License. + +#nvme-ns-* devices +KERNEL=="nvme[0-9]*n[0-9]*", ENV{DEVTYPE}=="disk", ATTRS{serial}=="?*", ATTRS{model}=="?*", SYMLINK+="disk/by-id/nvme-$attr{model}_$attr{serial}-ns-%n", OPTIONS+="string_escape=replace" + +#nvme partitions +KERNEL=="nvme[0-9]*n[0-9]*p[0-9]*", ENV{DEVTYPE}=="partition", ATTRS{serial}=="?*", ATTRS{model}=="?*", IMPORT{program}="ec2nvme-nsid %k" +KERNEL=="nvme[0-9]*n[0-9]*p[0-9]*", ENV{DEVTYPE}=="partition", ATTRS{serial}=="?*", ATTRS{model}=="?*", ENV{_NS_ID}=="?*", SYMLINK+="disk/by-id/nvme-$attr{model}_$attr{serial}-ns-$env{_NS_ID}-part%n", OPTIONS+="string_escape=replace" + +# ebs nvme devices +KERNEL=="nvme[0-9]*n[0-9]*", ENV{DEVTYPE}=="disk", ATTRS{model}=="Amazon Elastic Block Store", PROGRAM="/sbin/ebsnvme-id -u /dev/%k", SYMLINK+="%c" +KERNEL=="nvme[0-9]*n[0-9]*p[0-9]*", ENV{DEVTYPE}=="partition", ATTRS{model}=="Amazon Elastic Block Store", PROGRAM="/sbin/ebsnvme-id -u /dev/%k", SYMLINK+="%c%n" diff --git a/ebssurrogate/files/apparmor_profiles/opt.gotrue.gotrue b/ebssurrogate/files/apparmor_profiles/opt.gotrue.gotrue new file mode 100644 index 0000000..7b9594a --- /dev/null +++ b/ebssurrogate/files/apparmor_profiles/opt.gotrue.gotrue @@ -0,0 +1,15 @@ +#include + +/opt/gotrue/gotrue { + #include + #include + #include + + /opt/gotrue/gotrue r, + /opt/gotrue/migrations/ r, + /etc/ssl/certs/java/* r, + /opt/gotrue/migrations/** rw, + /proc/sys/net/core/somaxconn r, + /sys/kernel/mm/transparent_hugepage/hpage_pmd_size r, + owner /etc/gotrue.env r, +} diff --git a/ebssurrogate/files/apparmor_profiles/opt.postgrest b/ebssurrogate/files/apparmor_profiles/opt.postgrest new file mode 100644 index 0000000..c738a65 --- /dev/null +++ b/ebssurrogate/files/apparmor_profiles/opt.postgrest @@ -0,0 +1,12 @@ +#include + +/opt/postgrest { + #include + #include + #include + + /etc/gss/mech.d/ r, + /sys/devices/system/node/ r, + /sys/devices/system/node/node0/meminfo r, + owner /etc/postgrest/merged.conf r, +} diff --git a/ebssurrogate/files/apparmor_profiles/usr.bin.vector b/ebssurrogate/files/apparmor_profiles/usr.bin.vector new file mode 100644 index 0000000..b8a7eb2 --- /dev/null +++ b/ebssurrogate/files/apparmor_profiles/usr.bin.vector @@ -0,0 +1,35 @@ +#include + +/usr/bin/vector flags=(attach_disconnected) { + #include + #include + #include + #include + #include + #include + #include + #include + + deny @{HOME}/** rwx, + /etc/machine-id r, + /etc/vector/** r, + /proc/*/sched r, + /proc/cmdline r, + /proc/sys/kernel/osrelease r, + /run/log/journal/ r, + /var/log/journal/** r, + /run/systemd/notify rw, + /sys/firmware/efi/efivars/SecureBoot-8be4df61-93ca-11d2-aa0d-00e098032b8c r, + /sys/fs/cgroup/cpu,cpuacct/cpu.cfs_quota_us r, + /sys/kernel/mm/transparent_hugepage/enabled r, + /usr/bin/journalctl mrix, + /usr/bin/vector mrix, + /var/lib/vector/** rw, + /var/log/journal/ r, + /var/log/postgresql/ r, + /var/log/postgresql/** rw, + /var/run/systemd/notify rw, + owner /proc/*/cgroup r, + owner /proc/*/mountinfo r, + owner /proc/*/stat r, +} diff --git a/ebssurrogate/files/apparmor_profiles/usr.lib.postgresql.bin.postgres b/ebssurrogate/files/apparmor_profiles/usr.lib.postgresql.bin.postgres new file mode 100644 index 0000000..8e2efc3 --- /dev/null +++ b/ebssurrogate/files/apparmor_profiles/usr.lib.postgresql.bin.postgres @@ -0,0 +1,55 @@ +#include + +profile /usr/lib/postgresql/bin/postgres flags=(attach_disconnected) { +#include +#include +#include +#include +#include +#include +#include + +capability dac_override, +capability dac_read_search, + +deny @{HOME}/** rwx, + +/data/pgdata/** r, +/dev/shm rw, +/etc/java-11-openjdk/logging.properties r, +/etc/java-11-openjdk/security/default.policy r, +/etc/java-11-openjdk/security/java.policy r, +/etc/java-11-openjdk/security/java.security r, +/etc/mecabrc r, +/etc/postgresql-custom/** r, +/etc/postgresql/** r, +/etc/timezone r, +/etc/wal-g/config.json r, +/run/systemd/notify rw, +/usr/bin/cat rix, +/usr/bin/dash rix, +/usr/bin/mknod rix, +/usr/bin/admin-mgr Ux, +/usr/lib/postgresql/bin/* mrix, +/usr/local/bin/wal-g rix, +/usr/local/lib/groonga/plugins/tokenizers/mecab.so mr, +/usr/local/lib/libSFCGAL.so.* mr, +/usr/local/lib/libgroonga.so.* mr, +/usr/local/pgsql/etc/pljava.policy r, +/usr/share/postgresql/** r, +/var/lib/mecab/** r, +/var/lib/postgresql/** rwl, +/var/log/postgresql/** rw, +/var/log/wal-g/** w, +/var/run/systemd/notify rw, +/{,var/}run/postgresql/** rw, +owner /data/pgdata/ r, +owner /data/pgdata/** rwl, +owner /data/pgdata/pgroonga.log k, +owner /dev/shm/ rw, +owner /dev/shm/PostgreSQL.* rw, +owner /sys/kernel/mm/transparent_hugepage/hpage_pmd_size r, +owner /var/log/wal-g/** rw, +owner @{PROC}/[0-9]*/oom_adj rw, + +} diff --git a/ebssurrogate/files/apparmor_profiles/usr.local.bin.pgbouncer b/ebssurrogate/files/apparmor_profiles/usr.local.bin.pgbouncer new file mode 100644 index 0000000..7bf6d09 --- /dev/null +++ b/ebssurrogate/files/apparmor_profiles/usr.local.bin.pgbouncer @@ -0,0 +1,20 @@ +#include +profile /usr/local/bin/pgbouncer flags=(attach_disconnected) { + #include + #include + #include + #include + #include + #include + #include + + deny @{HOME}/** rwx, + /etc/pgbouncer-custom/** r, + /etc/pgbouncer/** r, + /proc/sys/kernel/random/uuid r, + /run/systemd/notify rw, + /usr/local/bin/pgbouncer mrix, + /var/log/pgbouncer.log rw, + /var/run/systemd/notify rw, + /{,var/}run/pgbouncer/** rw, +} diff --git a/ebssurrogate/files/cloud.cfg b/ebssurrogate/files/cloud.cfg new file mode 100644 index 0000000..678b5b6 --- /dev/null +++ b/ebssurrogate/files/cloud.cfg @@ -0,0 +1,137 @@ +# The top level settings are used as module +# and system configuration. +# A set of users which may be applied and/or used by various modules +# when a 'default' entry is found it will reference the 'default_user' +# from the distro configuration specified below +users: + - default + + +# If this is set, 'root' will not be able to ssh in and they +# will get a message to login instead as the default $user +disable_root: true + +# This will cause the set+update hostname module to not operate (if true) +preserve_hostname: false + +# If you use datasource_list array, keep array items in a single line. +# If you use multi line array, ds-identify script won't read array items. +# Example datasource config +# datasource: +# Ec2: +# metadata_urls: [ 'blah.com' ] +# timeout: 5 # (defaults to 50 seconds) +# max_wait: 10 # (defaults to 120 seconds) + + + +# The modules that run in the 'init' stage +cloud_init_modules: +# - migrator +# - seed_random +# - bootcmd + - write-files +# - growpart +# - resizefs +# - disk_setup +# - mounts + - set_hostname + - update_hostname + - update_etc_hosts +# - ca-certs +# - rsyslog + - users-groups + - ssh + +# The modules that run in the 'config' stage +cloud_config_modules: +# Emit the cloud config ready event +# this can be used by upstart jobs for 'start on cloud-config'. +# - emit_upstart +# - snap +# - ssh-import-id +# - locale +# - set-passwords +# - grub-dpkg +# - apt-pipelining +# - apt-configure +# - ubuntu-advantage + - ntp + - timezone + - disable-ec2-metadata + - runcmd +# - byobu + +# The modules that run in the 'final' stage +cloud_final_modules: +# - package-update-upgrade-install +# - fan +# - landscape +# - lxd +# - ubuntu-drivers +# - puppet +# - chef +# - mcollective +# - salt-minion + - reset_rmc + - refresh_rmc_and_interface +# - rightscale_userdata + - scripts-vendor + - scripts-per-once + - scripts-per-boot + - scripts-per-instance + - scripts-user +# - ssh-authkey-fingerprints +# - keys-to-console +# - phone-home + - final-message + - power-state-change + +# System and/or distro specific settings +# (not accessible to handlers/transforms) +system_info: + # This will affect which distro class gets used + distro: ubuntu + # Default user name + that default users groups (if added/used) + default_user: + name: ubuntu + lock_passwd: True + gecos: Ubuntu + groups: [adm, audio, cdrom, dialout, dip, floppy, lxd, netdev, plugdev, sudo, video] + sudo: ["ALL=(ALL) NOPASSWD:ALL"] + shell: /bin/bash + network: + renderers: ['netplan', 'eni', 'sysconfig'] + # Automatically discover the best ntp_client + ntp_client: auto + # Other config here will be given to the distro class and/or path classes + paths: + cloud_dir: /var/lib/cloud/ + templates_dir: /etc/cloud/templates/ + upstart_dir: /etc/init/ + package_mirrors: + - arches: [i386, amd64] + failsafe: + primary: http://archive.ubuntu.com/ubuntu + security: http://security.ubuntu.com/ubuntu + search: + primary: + - http://%(ec2_region)s.ec2.archive.ubuntu.com/ubuntu/ + - http://%(availability_zone)s.clouds.archive.ubuntu.com/ubuntu/ + - http://%(region)s.clouds.archive.ubuntu.com/ubuntu/ + security: [] + - arches: [arm64, armel, armhf] + failsafe: + primary: http://ports.ubuntu.com/ubuntu-ports + security: http://ports.ubuntu.com/ubuntu-ports + search: + primary: + - http://%(ec2_region)s.ec2.ports.ubuntu.com/ubuntu-ports/ + - http://%(availability_zone)s.clouds.ports.ubuntu.com/ubuntu-ports/ + - http://%(region)s.clouds.ports.ubuntu.com/ubuntu-ports/ + security: [] + - arches: [default] + failsafe: + primary: http://ports.ubuntu.com/ubuntu-ports + security: http://ports.ubuntu.com/ubuntu-ports + ssh_svcname: ssh diff --git a/ebssurrogate/files/ebsnvme-id b/ebssurrogate/files/ebsnvme-id new file mode 100644 index 0000000..b543c0d --- /dev/null +++ b/ebssurrogate/files/ebsnvme-id @@ -0,0 +1,173 @@ +#!/usr/bin/env python2.7 + +# Copyright (C) 2017 Amazon.com, Inc. or its affiliates. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS +# OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the +# License. + +""" +Usage: +Read EBS device information and provide information about +the volume. +""" + +import argparse +from ctypes import * +from fcntl import ioctl +import sys + +NVME_ADMIN_IDENTIFY = 0x06 +NVME_IOCTL_ADMIN_CMD = 0xC0484E41 +AMZN_NVME_VID = 0x1D0F +AMZN_NVME_EBS_MN = "Amazon Elastic Block Store" + +class nvme_admin_command(Structure): + _pack_ = 1 + _fields_ = [("opcode", c_uint8), # op code + ("flags", c_uint8), # fused operation + ("cid", c_uint16), # command id + ("nsid", c_uint32), # namespace id + ("reserved0", c_uint64), + ("mptr", c_uint64), # metadata pointer + ("addr", c_uint64), # data pointer + ("mlen", c_uint32), # metadata length + ("alen", c_uint32), # data length + ("cdw10", c_uint32), + ("cdw11", c_uint32), + ("cdw12", c_uint32), + ("cdw13", c_uint32), + ("cdw14", c_uint32), + ("cdw15", c_uint32), + ("reserved1", c_uint64)] + +class nvme_identify_controller_amzn_vs(Structure): + _pack_ = 1 + _fields_ = [("bdev", c_char * 32), # block device name + ("reserved0", c_char * (1024 - 32))] + +class nvme_identify_controller_psd(Structure): + _pack_ = 1 + _fields_ = [("mp", c_uint16), # maximum power + ("reserved0", c_uint16), + ("enlat", c_uint32), # entry latency + ("exlat", c_uint32), # exit latency + ("rrt", c_uint8), # relative read throughput + ("rrl", c_uint8), # relative read latency + ("rwt", c_uint8), # relative write throughput + ("rwl", c_uint8), # relative write latency + ("reserved1", c_char * 16)] + +class nvme_identify_controller(Structure): + _pack_ = 1 + _fields_ = [("vid", c_uint16), # PCI Vendor ID + ("ssvid", c_uint16), # PCI Subsystem Vendor ID + ("sn", c_char * 20), # Serial Number + ("mn", c_char * 40), # Module Number + ("fr", c_char * 8), # Firmware Revision + ("rab", c_uint8), # Recommend Arbitration Burst + ("ieee", c_uint8 * 3), # IEEE OUI Identifier + ("mic", c_uint8), # Multi-Interface Capabilities + ("mdts", c_uint8), # Maximum Data Transfer Size + ("reserved0", c_uint8 * (256 - 78)), + ("oacs", c_uint16), # Optional Admin Command Support + ("acl", c_uint8), # Abort Command Limit + ("aerl", c_uint8), # Asynchronous Event Request Limit + ("frmw", c_uint8), # Firmware Updates + ("lpa", c_uint8), # Log Page Attributes + ("elpe", c_uint8), # Error Log Page Entries + ("npss", c_uint8), # Number of Power States Support + ("avscc", c_uint8), # Admin Vendor Specific Command Configuration + ("reserved1", c_uint8 * (512 - 265)), + ("sqes", c_uint8), # Submission Queue Entry Size + ("cqes", c_uint8), # Completion Queue Entry Size + ("reserved2", c_uint16), + ("nn", c_uint32), # Number of Namespaces + ("oncs", c_uint16), # Optional NVM Command Support + ("fuses", c_uint16), # Fused Operation Support + ("fna", c_uint8), # Format NVM Attributes + ("vwc", c_uint8), # Volatile Write Cache + ("awun", c_uint16), # Atomic Write Unit Normal + ("awupf", c_uint16), # Atomic Write Unit Power Fail + ("nvscc", c_uint8), # NVM Vendor Specific Command Configuration + ("reserved3", c_uint8 * (704 - 531)), + ("reserved4", c_uint8 * (2048 - 704)), + ("psd", nvme_identify_controller_psd * 32), # Power State Descriptor + ("vs", nvme_identify_controller_amzn_vs)] # Vendor Specific + +class ebs_nvme_device: + def __init__(self, device): + self.device = device + self.ctrl_identify() + + def _nvme_ioctl(self, id_response, id_len): + admin_cmd = nvme_admin_command(opcode = NVME_ADMIN_IDENTIFY, + addr = id_response, + alen = id_len, + cdw10 = 1) + + with open(self.device, "rw") as nvme: + ioctl(nvme, NVME_IOCTL_ADMIN_CMD, admin_cmd) + + def ctrl_identify(self): + self.id_ctrl = nvme_identify_controller() + self._nvme_ioctl(addressof(self.id_ctrl), sizeof(self.id_ctrl)) + + if self.id_ctrl.vid != AMZN_NVME_VID or self.id_ctrl.mn.strip() != AMZN_NVME_EBS_MN: + raise TypeError("[ERROR] Not an EBS device: '{0}'".format(self.device)) + + def get_volume_id(self): + vol = self.id_ctrl.sn + + if vol.startswith("vol") and vol[3] != "-": + vol = "vol-" + vol[3:] + + return vol + + def get_block_device(self, stripped=False): + dev = self.id_ctrl.vs.bdev.strip() + + if stripped and dev.startswith("/dev/"): + dev = dev[5:] + + return dev + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Reads EBS information from NVMe devices.") + parser.add_argument("device", nargs=1, help="Device to query") + + display = parser.add_argument_group("Display Options") + display.add_argument("-v", "--volume", action="store_true", + help="Return volume-id") + display.add_argument("-b", "--block-dev", action="store_true", + help="Return block device mapping") + display.add_argument("-u", "--udev", action="store_true", + help="Output data in format suitable for udev rules") + + if len(sys.argv) < 2: + parser.print_help() + sys.exit(1) + + args = parser.parse_args() + + get_all = not (args.udev or args.volume or args.block_dev) + + try: + dev = ebs_nvme_device(args.device[0]) + except (IOError, TypeError) as err: + print >> sys.stderr, err + sys.exit(1) + + if get_all or args.volume: + print "Volume ID: {0}".format(dev.get_volume_id()) + if get_all or args.block_dev or args.udev: + print dev.get_block_device(args.udev) diff --git a/ebssurrogate/files/sources-arm64.cfg b/ebssurrogate/files/sources-arm64.cfg new file mode 100644 index 0000000..a236377 --- /dev/null +++ b/ebssurrogate/files/sources-arm64.cfg @@ -0,0 +1,10 @@ +deb http://REGION.clouds.ports.ubuntu.com/ubuntu-ports/ focal main restricted +deb http://REGION.clouds.ports.ubuntu.com/ubuntu-ports/ focal-updates main restricted +deb http://REGION.clouds.ports.ubuntu.com/ubuntu-ports/ focal universe +deb http://REGION.clouds.ports.ubuntu.com/ubuntu-ports/ focal-updates universe +deb http://REGION.clouds.ports.ubuntu.com/ubuntu-ports/ focal multiverse +deb http://REGION.clouds.ports.ubuntu.com/ubuntu-ports/ focal-updates multiverse +deb http://REGION.clouds.ports.ubuntu.com/ubuntu-ports/ focal-backports main restricted universe multiverse +deb http://ports.ubuntu.com/ubuntu-ports focal-security main restricted +deb http://ports.ubuntu.com/ubuntu-ports focal-security universe +deb http://ports.ubuntu.com/ubuntu-ports focal-security multiverse diff --git a/ebssurrogate/files/sources.cfg b/ebssurrogate/files/sources.cfg new file mode 100644 index 0000000..ec30118 --- /dev/null +++ b/ebssurrogate/files/sources.cfg @@ -0,0 +1,10 @@ +deb http://REGION.ec2.archive.ubuntu.com/ubuntu/ focal main restricted +deb http://REGION.ec2.archive.ubuntu.com/ubuntu/ focal-updates main restricted +deb http://REGION.ec2.archive.ubuntu.com/ubuntu/ focal universe +deb http://REGION.ec2.archive.ubuntu.com/ubuntu/ focal-updates universe +deb http://REGION.ec2.archive.ubuntu.com/ubuntu/ focal multiverse +deb http://REGION.ec2.archive.ubuntu.com/ubuntu/ focal-updates multiverse +deb http://REGION.ec2.archive.ubuntu.com/ubuntu/ focal-backports main restricted universe multiverse +deb http://security.ubuntu.com/ubuntu focal-security main restricted +deb http://security.ubuntu.com/ubuntu focal-security universe +deb http://security.ubuntu.com/ubuntu focal-security multiverse diff --git a/ebssurrogate/files/unit-tests/test-extensions.sql b/ebssurrogate/files/unit-tests/test-extensions.sql new file mode 100644 index 0000000..60c6507 --- /dev/null +++ b/ebssurrogate/files/unit-tests/test-extensions.sql @@ -0,0 +1,20 @@ +CREATE OR REPLACE FUNCTION install_available_extensions_and_test() RETURNS boolean AS $$ +DECLARE extension_name TEXT; +allowed_extentions TEXT[] := string_to_array(current_setting('supautils.privileged_extensions'), ','); +BEGIN + FOREACH extension_name IN ARRAY allowed_extentions + LOOP + SELECT trim(extension_name) INTO extension_name; + /* skip below extensions check for now */ + CONTINUE WHEN extension_name = 'pgsodium'; + CONTINUE WHEN extension_name = 'plpgsql' OR extension_name = 'plpgsql_check' OR extension_name = 'pgtap'; + CONTINUE WHEN extension_name = 'tealbase_vault' OR extension_name = 'wrappers'; + RAISE notice 'START TEST FOR: %', extension_name; + EXECUTE format('DROP EXTENSION IF EXISTS %s CASCADE', quote_ident(extension_name)); + EXECUTE format('CREATE EXTENSION %s CASCADE', quote_ident(extension_name)); + RAISE notice 'END TEST FOR: %', extension_name; + END LOOP; + RAISE notice 'EXTENSION TESTS COMPLETED..'; + return true; +END; +$$ LANGUAGE plpgsql; diff --git a/ebssurrogate/files/unit-tests/unit-test-01.sql b/ebssurrogate/files/unit-tests/unit-test-01.sql new file mode 100644 index 0000000..3b28abe --- /dev/null +++ b/ebssurrogate/files/unit-tests/unit-test-01.sql @@ -0,0 +1,31 @@ +BEGIN; +SELECT plan(8); + +-- Check installed extensions +SELECT extensions_are( + ARRAY[ + 'plpgsql', + 'pg_stat_statements', + 'pgsodium', + 'pgtap', + 'pg_graphql', + 'pgcrypto', + 'pgjwt', + 'uuid-ossp' + ] +); + + +-- Check schemas exists +SELECT has_schema('pg_toast'); +SELECT has_schema('pg_catalog'); +SELECT has_schema('information_schema'); +SELECT has_schema('public'); + +-- Check that service_role can execute certain pgsodium functions +SELECT function_privs_are('pgsodium', 'crypto_aead_det_decrypt', array['bytea', 'bytea', 'uuid', 'bytea'], 'service_role', array['EXECUTE']); +SELECT function_privs_are('pgsodium', 'crypto_aead_det_encrypt', array['bytea', 'bytea', 'uuid', 'bytea'], 'service_role', array['EXECUTE']); +SELECT function_privs_are('pgsodium', 'crypto_aead_det_keygen', array[]::text[], 'service_role', array['EXECUTE']); + +SELECT * from finish(); +ROLLBACK; diff --git a/ebssurrogate/files/unit-tests/verify-extensions.sql b/ebssurrogate/files/unit-tests/verify-extensions.sql new file mode 100644 index 0000000..81b2c7f --- /dev/null +++ b/ebssurrogate/files/unit-tests/verify-extensions.sql @@ -0,0 +1,12 @@ +BEGIN; +SELECT plan(3); +SELECT has_function( + 'install_available_extensions_and_test' +); +SELECT function_returns( + 'install_available_extensions_and_test', + 'boolean' +); +SELECT ok(install_available_extensions_and_test(),'extension test'); +SELECT * FROM finish(); +ROLLBACK; diff --git a/ebssurrogate/files/vector.timer b/ebssurrogate/files/vector.timer new file mode 100644 index 0000000..68bb4d6 --- /dev/null +++ b/ebssurrogate/files/vector.timer @@ -0,0 +1,9 @@ +[Unit] +Description= Defer running the vector.service 60secs after boot up + +[Timer] +OnBootSec=60s +Unit=vector.service + +[Install] +WantedBy=multi-user.target diff --git a/ebssurrogate/scripts/chroot-bootstrap.sh b/ebssurrogate/scripts/chroot-bootstrap.sh new file mode 100755 index 0000000..c679714 --- /dev/null +++ b/ebssurrogate/scripts/chroot-bootstrap.sh @@ -0,0 +1,213 @@ +#!/usr/bin/env bash +# +# This script runs inside chrooted environment. It installs grub and its +# Configuration file. +# + +set -o errexit +set -o pipefail +set -o xtrace + +export DEBIAN_FRONTEND=noninteractive + +export APT_OPTIONS="-oAPT::Install-Recommends=false \ + -oAPT::Install-Suggests=false \ + -oAcquire::Languages=none" + +if [ $(dpkg --print-architecture) = "amd64" ]; +then + ARCH="amd64"; +else + ARCH="arm64"; +fi + + + +function update_install_packages { + # Update APT with new sources + cat /etc/apt/sources.list + apt-get $APT_OPTIONS update && apt-get $APT_OPTIONS --yes dist-upgrade + + # Do not configure grub during package install + if [ "${ARCH}" = "amd64" ]; then + echo 'grub-pc grub-pc/install_devices_empty select true' | debconf-set-selections + echo 'grub-pc grub-pc/install_devices select' | debconf-set-selections + # Install various packages needed for a booting system + apt-get install -y \ + linux-aws \ + grub-pc \ + e2fsprogs + else + apt-get install -y e2fsprogs + fi + # Install standard packages + apt-get install -y \ + sudo \ + cloud-init \ + acpid \ + ec2-hibinit-agent \ + ec2-instance-connect \ + hibagent \ + ncurses-term \ + ssh-import-id \ + + # apt upgrade + apt-get upgrade -y + + # Install OpenSSH and other packages + sudo add-apt-repository universe + apt-get update + apt-get install -y --no-install-recommends \ + openssh-server \ + git \ + ufw \ + cron \ + logrotate \ + fail2ban \ + locales \ + at \ + less \ + python3-systemd + + if [ "${ARCH}" = "arm64" ]; then + apt-get $APT_OPTIONS --yes install linux-aws initramfs-tools dosfstools + fi +} + +function setup_locale { +cat << EOF > /etc/default/locale +LANG="C.UTF-8" +LC_CTYPE="C.UTF-8" +EOF + localedef -i en_US -f UTF-8 en_US.UTF-8 +} + +# Disable IPV6 for ufw +function disable_ufw_ipv6 { + sed -i 's/IPV6=yes/IPV6=no/g' /etc/default/ufw +} + +function install_packages_for_build { + apt-get install -y --no-install-recommends linux-libc-dev \ + acl \ + magic-wormhole sysstat \ + build-essential libreadline-dev zlib1g-dev flex bison libxml2-dev libxslt-dev libssl-dev libsystemd-dev libpq-dev libxml2-utils uuid-dev xsltproc ssl-cert \ + llvm-11-dev clang-11 \ + gcc-10 g++-10 \ + libgeos-dev libproj-dev libgdal-dev libjson-c-dev libboost-all-dev libcgal-dev libmpfr-dev libgmp-dev cmake \ + libkrb5-dev \ + maven default-jre default-jdk \ + curl gpp apt-transport-https cmake libc++-dev libc++abi-dev libc++1 libglib2.0-dev libtinfo5 libc++abi1 ninja-build python \ + liblzo2-dev + + # Mark llvm as manual to prevent auto removal + apt-mark manual libllvm11:arm64 +} + +function setup_apparmor { + apt-get install -y apparmor apparmor-utils auditd + + # Copy apparmor profiles + cp -rv /tmp/apparmor_profiles/* /etc/apparmor.d/ +} + +function setup_grub_conf_arm64 { +cat << EOF > /etc/default/grub +GRUB_DEFAULT=0 +GRUB_TIMEOUT=0 +GRUB_TIMEOUT_STYLE="hidden" +GRUB_DISTRIBUTOR="tealbase postgresql" +GRUB_CMDLINE_LINUX_DEFAULT="nomodeset console=tty1 console=ttyS0 ipv6.disable=1" +EOF +} + +function setup_grub_conf_amd64 { + mkdir -p /etc/default/grub.d + +cat << EOF > /etc/default/grub.d/50-aws-settings.cfg +GRUB_RECORDFAIL_TIMEOUT=0 +GRUB_TIMEOUT=0 +GRUB_CMDLINE_LINUX_DEFAULT=" root=/dev/nvme0n1p2 rootfstype=ext4 rw noatime,nodiratime,discard console=tty1 console=ttyS0 ip=dhcp tsc=reliable net.ifnames=0 quiet module_blacklist=psmouse,input_leds,autofs4 ipv6.disable=1 nvme_core.io_timeout=4294967295 systemd.hostname=ubuntu ipv6.disable=1" +GRUB_TERMINAL=console +GRUB_DISABLE_LINUX_UUID=true +EOF +} + +# Install GRUB +function install_configure_grub { + if [ "${ARCH}" = "arm64" ]; then + apt-get $APT_OPTIONS --yes install cloud-guest-utils fdisk grub-efi-arm64 + setup_grub_conf_arm64 + rm -rf /etc/grub.d/30_os-prober + sleep 1 + fi + grub-install /dev/xvdf && update-grub +} + +# skip fsck for first boot +function disable_fsck { + touch /fastboot +} + +# Don't request hostname during boot but set hostname +function setup_hostname { + sed -i 's/gethostname()/ubuntu /g' /etc/dhcp/dhclient.conf + sed -i 's/host-name,//g' /etc/dhcp/dhclient.conf + echo "ubuntu" > /etc/hostname + chmod 644 /etc/hostname +} + +# Set options for the default interface +function setup_eth0_interface { +cat << EOF > /etc/netplan/eth0.yaml +network: + version: 2 + ethernets: + eth0: + dhcp4: true +EOF +} + +function disable_sshd_passwd_auth { + sed -i -E -e 's/^#?\s*PasswordAuthentication\s+(yes|no)\s*$/PasswordAuthentication no/g' \ + -e 's/^#?\s*ChallengeResponseAuthentication\s+(yes|no)\s*$/ChallengeResponseAuthentication no/g' \ + /etc/ssh/sshd_config +} + +function create_admin_account { + groupadd admin +} + +#Set default target as multi-user +function set_default_target { + rm -f /etc/systemd/system/default.target + ln -s /lib/systemd/system/multi-user.target /etc/systemd/system/default.target +} + +# Setup ccache +function setup_ccache { + apt-get install ccache -y + mkdir -p /tmp/ccache + export PATH=/usr/lib/ccache:$PATH + echo "PATH=$PATH" >> /etc/environment +} + +# Clear apt caches +function cleanup_cache { + apt-get clean +} + +update_install_packages +setup_locale +install_packages_for_build +install_configure_grub +setup_apparmor +setup_hostname +create_admin_account +set_default_target +setup_eth0_interface +disable_ufw_ipv6 +disable_sshd_passwd_auth +disable_fsck +setup_ccache +cleanup_cache diff --git a/ebssurrogate/scripts/surrogate-bootstrap.sh b/ebssurrogate/scripts/surrogate-bootstrap.sh new file mode 100755 index 0000000..ac826bc --- /dev/null +++ b/ebssurrogate/scripts/surrogate-bootstrap.sh @@ -0,0 +1,312 @@ +#!/usr/bin/env bash +# +# This script creates filesystem and setups up chrooted +# enviroment for further processing. It also runs +# ansible playbook and finally does system cleanup. +# +# Adapted from: https://github.com/jen20/packer-ubuntu-zfs + +set -o errexit +set -o pipefail +set -o xtrace + +if [ $(dpkg --print-architecture) = "amd64" ]; +then + ARCH="amd64"; +else + ARCH="arm64"; +fi + +function waitfor_boot_finished { + export DEBIAN_FRONTEND=noninteractive + + echo "args: ${ARGS}" + # Wait for cloudinit on the surrogate to complete before making progress + while [[ ! -f /var/lib/cloud/instance/boot-finished ]]; do + echo 'Waiting for cloud-init...' + sleep 1 + done +} + +function install_packages { + # Setup Ansible on host VM + apt-get update && sudo apt-get install software-properties-common -y + add-apt-repository --yes --update ppa:ansible/ansible && sudo apt-get install ansible -y + ansible-galaxy collection install community.general + + # Update apt and install required packages + apt-get update + apt-get install -y \ + gdisk \ + e2fsprogs \ + debootstrap \ + nvme-cli \ + docker.io + +} + +# Partition the new root EBS volume +function create_partition_table { + + if [ "${ARCH}" = "arm64" ]; then + parted --script /dev/xvdf \ + mklabel gpt \ + mkpart UEFI 1MiB 100MiB \ + mkpart ROOT 100MiB 100% + set 1 esp on \ + set 1 boot on + parted --script /dev/xvdf print + else + sgdisk -Zg -n1:0:4095 -t1:EF02 -c1:GRUB -n2:0:0 -t2:8300 -c2:EXT4 /dev/xvdf + fi + + sleep 2 +} + +function device_partition_mappings { + # NVMe EBS launch device mappings (symlinks): /dev/nvme*n* to /dev/xvd* + declare -A blkdev_mappings + for blkdev in $(nvme list | awk '/^\/dev/ { print $1 }'); do # /dev/nvme*n* + # Mapping info from disk headers + header=$(nvme id-ctrl --raw-binary "${blkdev}" | cut -c3073-3104 | tr -s ' ' | sed 's/ $//g' | sed 's!/dev/!!') + mapping="/dev/${header%%[0-9]}" # normalize sda1 => sda + + # Create /dev/xvd* device symlink + if [[ ! -z "$mapping" ]] && [[ -b "${blkdev}" ]] && [[ ! -L "${mapping}" ]]; then + ln -s "$blkdev" "$mapping" + + blkdev_mappings["$blkdev"]="$mapping" + fi + done + + create_partition_table + + # NVMe EBS launch device partition mappings (symlinks): /dev/nvme*n*p* to /dev/xvd*[0-9]+ + declare -A partdev_mappings + for blkdev in "${!blkdev_mappings[@]}"; do # /dev/nvme*n* + mapping="${blkdev_mappings[$blkdev]}" + + # Create /dev/xvd*[0-9]+ partition device symlink + for partdev in "${blkdev}"p*; do + partnum=${partdev##*p} + if [[ ! -L "${mapping}${partnum}" ]]; then + ln -s "${blkdev}p${partnum}" "${mapping}${partnum}" + + partdev_mappings["${blkdev}p${partnum}"]="${mapping}${partnum}" + fi + done + done +} + + +#Download and install latest e2fsprogs for fast_commit feature,if required. +function format_and_mount_rootfs { + mkfs.ext4 -m0.1 /dev/xvdf2 + + mount -o noatime,nodiratime /dev/xvdf2 /mnt + if [ "${ARCH}" = "arm64" ]; then + mkfs.fat -F32 /dev/xvdf1 + mkdir -p /mnt/boot/efi + sleep 2 + mount /dev/xvdf1 /mnt/boot/efi + fi + + mkfs.ext4 /dev/xvdh + mkdir -p /mnt/data + mount -o defaults,discard /dev/xvdh /mnt/data +} + +function create_swapfile { + fallocate -l 1G /mnt/swapfile + chmod 600 /mnt/swapfile + mkswap /mnt/swapfile +} + +function format_build_partition { + mkfs.ext4 -O ^has_journal /dev/xvdc +} +function pull_docker { + docker run -itd --name ccachedata "${DOCKER_IMAGE}:${DOCKER_IMAGE_TAG}" sh + docker exec -itd ccachedata mkdir -p /build/ccache +} + +# Create fstab +function create_fstab { + FMT="%-42s %-11s %-5s %-17s %-5s %s" +cat > "/mnt/etc/fstab" << EOF +$(printf "${FMT}" "# DEVICE UUID" "MOUNTPOINT" "TYPE" "OPTIONS" "DUMP" "FSCK") +$(findmnt -no SOURCE /mnt | xargs blkid -o export | awk -v FMT="${FMT}" '/^UUID=/ { printf(FMT, $0, "/", "ext4", "defaults,discard", "0", "1" ) }') +$(findmnt -no SOURCE /mnt/boot/efi | xargs blkid -o export | awk -v FMT="${FMT}" '/^UUID=/ { printf(FMT, $0, "/boot/efi", "vfat", "umask=0077", "0", "1" ) }') +$(findmnt -no SOURCE /mnt/data | xargs blkid -o export | awk -v FMT="${FMT}" '/^UUID=/ { printf(FMT, $0, "/data", "ext4", "defaults,discard", "0", "2" ) }') +$(printf "$FMT" "/swapfile" "none" "swap" "sw" "0" "0") +EOF + unset FMT +} + +function setup_chroot_environment { + # Bootstrap Ubuntu into /mnt + debootstrap --arch ${ARCH} --variant=minbase focal /mnt + + # Update ec2-region + REGION=$(curl --silent --fail http://169.254.169.254/latest/meta-data/placement/availability-zone | sed -E 's|[a-z]+$||g') + sed -i "s/REGION/${REGION}/g" /tmp/sources.list + cp /tmp/sources.list /mnt/etc/apt/sources.list + + if [ "${ARCH}" = "arm64" ]; then + create_fstab + fi + + # Create mount points and mount the filesystem + mkdir -p /mnt/{dev,proc,sys} + mount --rbind /dev /mnt/dev + mount --rbind /proc /mnt/proc + mount --rbind /sys /mnt/sys + + # Create build mount point and mount + mkdir -p /mnt/tmp + mount /dev/xvdc /mnt/tmp + chmod 777 /mnt/tmp + + # Copy apparmor profiles + chmod 644 /tmp/apparmor_profiles/* + cp -r /tmp/apparmor_profiles /mnt/tmp/ + + # Copy migrations + cp -r /tmp/migrations /mnt/tmp/ + + # Copy unit tests + cp -r /tmp/unit-tests /mnt/tmp/ + + # Copy the bootstrap script into place and execute inside chroot + cp /tmp/chroot-bootstrap.sh /mnt/tmp/chroot-bootstrap.sh + chroot /mnt /tmp/chroot-bootstrap.sh + rm -f /mnt/tmp/chroot-bootstrap.sh + + # Copy the nvme identification script into /sbin inside the chroot + mkdir -p /mnt/sbin + cp /tmp/ebsnvme-id /mnt/sbin/ebsnvme-id + chmod +x /mnt/sbin/ebsnvme-id + + # Copy the udev rules for identifying nvme devices into the chroot + mkdir -p /mnt/etc/udev/rules.d + cp /tmp/70-ec2-nvme-devices.rules \ + /mnt/etc/udev/rules.d/70-ec2-nvme-devices.rules + + #Copy custom cloud-init + rm -f /mnt/etc/cloud/cloud.cfg + cp /tmp/cloud.cfg /mnt/etc/cloud/cloud.cfg + + sleep 2 +} + +function download_ccache { + docker cp ccachedata:/build/ccache/. /mnt/tmp/ccache +} + +function execute_playbook { + +tee /etc/ansible/ansible.cfg < [db/init-scripts](db/init-scripts) +2. [init-scripts/migrations](https://github.com/tealbase/infrastructure/tree/develop/init-scripts/migrations) => [db/migrations](db/migrations) + +For compatibility with hosted projects, we include [migrate.sh](migrate.sh) that executes migrations in the same order as ami build: + +1. Run all `db/init-scripts` with `postgres` superuser role. +2. Run all `db/migrations` with `tealbase_admin` superuser role. +3. Finalize role passwords with `/etc/postgres.schema.sql` if present. + +Additionally, [tealbase/postgres](https://github.com/tealbase/postgres/blob/develop/ansible/playbook-docker.yml#L9) image contains several migration scripts to configure default extensions. These are run first by docker entrypoint and included in ami by ansible. + +## Guidelines + +- Migrations are append only. Never edit existing migrations once they are on master. +- Self contained components (gotrue, storage, realtime) may contain their own migrations. +- Self hosted tealbase users should update role passwords separately after running all migrations. +- Prod release is done by publishing a new GitHub release on master branch. + +## Requirements + +- [dbmate](https://github.com/amacneil/dbmate) +- [docker-compose](https://docs.docker.com/compose/) + +## Usage + +### Add a Migration + +```shell +# Start the database server +docker-compose up + +# create a new migration +dbmate new '' +``` + +Then, populate the migration at `./db/migrations/xxxxxxxxx_` and make sure it execute sucessfully with + +```shell +dbmate up +``` + +### Adding a migration with docker-compose + +dbmate can optionally be run locally using docker: + +```shell +# Start the database server +docker-compose up + +# create a new migration +docker-compose run --rm dbmate new '' +``` + +Then, populate the migration at `./db/migrations/xxxxxxxxx_` and make sure it execute sucessfully with + +```shell +docker-compose run --rm dbmate up +``` + +## Testing + +Migrations are tested in CI to ensure they do not raise an exception against previously released `tealbase/postgres` docker images. The full version matrix is at [test.yml](./.github/workflows/test.yml) in the `tealbase-version` variable. diff --git a/migrations/db/init-scripts/00000000000000-initial-schema.sql b/migrations/db/init-scripts/00000000000000-initial-schema.sql new file mode 100644 index 0000000..35a9069 --- /dev/null +++ b/migrations/db/init-scripts/00000000000000-initial-schema.sql @@ -0,0 +1,54 @@ +-- migrate:up + +-- Set up realtime +-- defaults to empty publication +create publication tealbase_realtime; + +-- tealbase super admin +create user tealbase_admin; +alter user tealbase_admin with superuser createdb createrole replication bypassrls; + +-- tealbase replication user +create user tealbase_replication_admin with login replication; + +-- Extension namespacing +create schema if not exists extensions; +create extension if not exists "uuid-ossp" with schema extensions; +create extension if not exists pgcrypto with schema extensions; +create extension if not exists pgjwt with schema extensions; + +-- Set up auth roles for the developer +create role anon nologin noinherit; +create role authenticated nologin noinherit; -- "logged in" user: web_user, app_user, etc +create role service_role nologin noinherit bypassrls; -- allow developers to create JWT's that bypass their policies + +create user authenticator noinherit; +grant anon to authenticator; +grant authenticated to authenticator; +grant service_role to authenticator; +grant tealbase_admin to authenticator; + +grant usage on schema public to postgres, anon, authenticated, service_role; +alter default privileges in schema public grant all on tables to postgres, anon, authenticated, service_role; +alter default privileges in schema public grant all on functions to postgres, anon, authenticated, service_role; +alter default privileges in schema public grant all on sequences to postgres, anon, authenticated, service_role; + +-- Allow Extensions to be used in the API +grant usage on schema extensions to postgres, anon, authenticated, service_role; + +-- Set up namespacing +alter user tealbase_admin SET search_path TO public, extensions; -- don't include the "auth" schema + +-- These are required so that the users receive grants whenever "tealbase_admin" creates tables/function +alter default privileges for user tealbase_admin in schema public grant all + on sequences to postgres, anon, authenticated, service_role; +alter default privileges for user tealbase_admin in schema public grant all + on tables to postgres, anon, authenticated, service_role; +alter default privileges for user tealbase_admin in schema public grant all + on functions to postgres, anon, authenticated, service_role; + +-- Set short statement/query timeouts for API roles +alter role anon set statement_timeout = '3s'; +alter role authenticated set statement_timeout = '8s'; + +-- migrate:down diff --git a/migrations/db/init-scripts/00000000000001-auth-schema.sql b/migrations/db/init-scripts/00000000000001-auth-schema.sql new file mode 100644 index 0000000..10f5d0b --- /dev/null +++ b/migrations/db/init-scripts/00000000000001-auth-schema.sql @@ -0,0 +1,123 @@ +-- migrate:up + +CREATE SCHEMA IF NOT EXISTS auth AUTHORIZATION tealbase_admin; + +-- auth.users definition + +CREATE TABLE auth.users ( + instance_id uuid NULL, + id uuid NOT NULL UNIQUE, + aud varchar(255) NULL, + "role" varchar(255) NULL, + email varchar(255) NULL UNIQUE, + encrypted_password varchar(255) NULL, + confirmed_at timestamptz NULL, + invited_at timestamptz NULL, + confirmation_token varchar(255) NULL, + confirmation_sent_at timestamptz NULL, + recovery_token varchar(255) NULL, + recovery_sent_at timestamptz NULL, + email_change_token varchar(255) NULL, + email_change varchar(255) NULL, + email_change_sent_at timestamptz NULL, + last_sign_in_at timestamptz NULL, + raw_app_meta_data jsonb NULL, + raw_user_meta_data jsonb NULL, + is_super_admin bool NULL, + created_at timestamptz NULL, + updated_at timestamptz NULL, + CONSTRAINT users_pkey PRIMARY KEY (id) +); +CREATE INDEX users_instance_id_email_idx ON auth.users USING btree (instance_id, email); +CREATE INDEX users_instance_id_idx ON auth.users USING btree (instance_id); +comment on table auth.users is 'Auth: Stores user login data within a secure schema.'; + +-- auth.refresh_tokens definition + +CREATE TABLE auth.refresh_tokens ( + instance_id uuid NULL, + id bigserial NOT NULL, + "token" varchar(255) NULL, + user_id varchar(255) NULL, + revoked bool NULL, + created_at timestamptz NULL, + updated_at timestamptz NULL, + CONSTRAINT refresh_tokens_pkey PRIMARY KEY (id) +); +CREATE INDEX refresh_tokens_instance_id_idx ON auth.refresh_tokens USING btree (instance_id); +CREATE INDEX refresh_tokens_instance_id_user_id_idx ON auth.refresh_tokens USING btree (instance_id, user_id); +CREATE INDEX refresh_tokens_token_idx ON auth.refresh_tokens USING btree (token); +comment on table auth.refresh_tokens is 'Auth: Store of tokens used to refresh JWT tokens once they expire.'; + +-- auth.instances definition + +CREATE TABLE auth.instances ( + id uuid NOT NULL, + uuid uuid NULL, + raw_base_config text NULL, + created_at timestamptz NULL, + updated_at timestamptz NULL, + CONSTRAINT instances_pkey PRIMARY KEY (id) +); +comment on table auth.instances is 'Auth: Manages users across multiple sites.'; + +-- auth.audit_log_entries definition + +CREATE TABLE auth.audit_log_entries ( + instance_id uuid NULL, + id uuid NOT NULL, + payload json NULL, + created_at timestamptz NULL, + CONSTRAINT audit_log_entries_pkey PRIMARY KEY (id) +); +CREATE INDEX audit_logs_instance_id_idx ON auth.audit_log_entries USING btree (instance_id); +comment on table auth.audit_log_entries is 'Auth: Audit trail for user actions.'; + +-- auth.schema_migrations definition + +CREATE TABLE auth.schema_migrations ( + "version" varchar(255) NOT NULL, + CONSTRAINT schema_migrations_pkey PRIMARY KEY ("version") +); +comment on table auth.schema_migrations is 'Auth: Manages updates to the auth system.'; + +INSERT INTO auth.schema_migrations (version) +VALUES ('20171026211738'), + ('20171026211808'), + ('20171026211834'), + ('20180103212743'), + ('20180108183307'), + ('20180119214651'), + ('20180125194653'); + +-- Gets the User ID from the request cookie +create or replace function auth.uid() returns uuid as $$ + select nullif(current_setting('request.jwt.claim.sub', true), '')::uuid; +$$ language sql stable; + +-- Gets the User ID from the request cookie +create or replace function auth.role() returns text as $$ + select nullif(current_setting('request.jwt.claim.role', true), '')::text; +$$ language sql stable; + +-- Gets the User email +create or replace function auth.email() returns text as $$ + select nullif(current_setting('request.jwt.claim.email', true), '')::text; +$$ language sql stable; + +-- usage on auth functions to API roles +GRANT USAGE ON SCHEMA auth TO anon, authenticated, service_role; + +-- tealbase super admin +CREATE USER tealbase_auth_admin NOINHERIT CREATEROLE LOGIN NOREPLICATION; +GRANT ALL PRIVILEGES ON SCHEMA auth TO tealbase_auth_admin; +GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA auth TO tealbase_auth_admin; +GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA auth TO tealbase_auth_admin; +ALTER USER tealbase_auth_admin SET search_path = "auth"; +ALTER table "auth".users OWNER TO tealbase_auth_admin; +ALTER table "auth".refresh_tokens OWNER TO tealbase_auth_admin; +ALTER table "auth".audit_log_entries OWNER TO tealbase_auth_admin; +ALTER table "auth".instances OWNER TO tealbase_auth_admin; +ALTER table "auth".schema_migrations OWNER TO tealbase_auth_admin; + +-- migrate:down diff --git a/migrations/db/init-scripts/00000000000002-storage-schema.sql b/migrations/db/init-scripts/00000000000002-storage-schema.sql new file mode 100644 index 0000000..99d2eb7 --- /dev/null +++ b/migrations/db/init-scripts/00000000000002-storage-schema.sql @@ -0,0 +1,120 @@ +-- migrate:up + +CREATE SCHEMA IF NOT EXISTS storage AUTHORIZATION tealbase_admin; + +grant usage on schema storage to postgres, anon, authenticated, service_role; +alter default privileges in schema storage grant all on tables to postgres, anon, authenticated, service_role; +alter default privileges in schema storage grant all on functions to postgres, anon, authenticated, service_role; +alter default privileges in schema storage grant all on sequences to postgres, anon, authenticated, service_role; + +CREATE TABLE "storage"."buckets" ( + "id" text not NULL, + "name" text NOT NULL, + "owner" uuid, + "created_at" timestamptz DEFAULT now(), + "updated_at" timestamptz DEFAULT now(), + CONSTRAINT "buckets_owner_fkey" FOREIGN KEY ("owner") REFERENCES "auth"."users"("id"), + PRIMARY KEY ("id") +); +CREATE UNIQUE INDEX "bname" ON "storage"."buckets" USING BTREE ("name"); + +CREATE TABLE "storage"."objects" ( + "id" uuid NOT NULL DEFAULT extensions.uuid_generate_v4(), + "bucket_id" text, + "name" text, + "owner" uuid, + "created_at" timestamptz DEFAULT now(), + "updated_at" timestamptz DEFAULT now(), + "last_accessed_at" timestamptz DEFAULT now(), + "metadata" jsonb, + CONSTRAINT "objects_bucketId_fkey" FOREIGN KEY ("bucket_id") REFERENCES "storage"."buckets"("id"), + CONSTRAINT "objects_owner_fkey" FOREIGN KEY ("owner") REFERENCES "auth"."users"("id"), + PRIMARY KEY ("id") +); +CREATE UNIQUE INDEX "bucketid_objname" ON "storage"."objects" USING BTREE ("bucket_id","name"); +CREATE INDEX name_prefix_search ON storage.objects(name text_pattern_ops); + +ALTER TABLE storage.objects ENABLE ROW LEVEL SECURITY; + +CREATE FUNCTION storage.foldername(name text) + RETURNS text[] + LANGUAGE plpgsql +AS $function$ +DECLARE +_parts text[]; +BEGIN + select string_to_array(name, '/') into _parts; + return _parts[1:array_length(_parts,1)-1]; +END +$function$; + +CREATE FUNCTION storage.filename(name text) + RETURNS text + LANGUAGE plpgsql +AS $function$ +DECLARE +_parts text[]; +BEGIN + select string_to_array(name, '/') into _parts; + return _parts[array_length(_parts,1)]; +END +$function$; + +CREATE FUNCTION storage.extension(name text) + RETURNS text + LANGUAGE plpgsql +AS $function$ +DECLARE +_parts text[]; +_filename text; +BEGIN + select string_to_array(name, '/') into _parts; + select _parts[array_length(_parts,1)] into _filename; + -- @todo return the last part instead of 2 + return split_part(_filename, '.', 2); +END +$function$; + +CREATE FUNCTION storage.search(prefix text, bucketname text, limits int DEFAULT 100, levels int DEFAULT 1, offsets int DEFAULT 0) + RETURNS TABLE ( + name text, + id uuid, + updated_at TIMESTAMPTZ, + created_at TIMESTAMPTZ, + last_accessed_at TIMESTAMPTZ, + metadata jsonb + ) + LANGUAGE plpgsql +AS $function$ +DECLARE +_bucketId text; +BEGIN + -- will be replaced by migrations when server starts + -- saving space for cloud-init +END +$function$; + +-- create migrations table +-- https://github.com/ThomWright/postgres-migrations/blob/master/src/migrations/0_create-migrations-table.sql +-- we add this table here and not let it be auto-created so that the permissions are properly applied to it +CREATE TABLE IF NOT EXISTS storage.migrations ( + id integer PRIMARY KEY, + name varchar(100) UNIQUE NOT NULL, + hash varchar(40) NOT NULL, -- sha1 hex encoded hash of the file name and contents, to ensure it hasn't been altered since applying the migration + executed_at timestamp DEFAULT current_timestamp +); + +CREATE USER tealbase_storage_admin NOINHERIT CREATEROLE LOGIN NOREPLICATION; +GRANT ALL PRIVILEGES ON SCHEMA storage TO tealbase_storage_admin; +GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA storage TO tealbase_storage_admin; +GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA storage TO tealbase_storage_admin; +ALTER USER tealbase_storage_admin SET search_path = "storage"; +ALTER table "storage".objects owner to tealbase_storage_admin; +ALTER table "storage".buckets owner to tealbase_storage_admin; +ALTER table "storage".migrations OWNER TO tealbase_storage_admin; +ALTER function "storage".foldername(text) owner to tealbase_storage_admin; +ALTER function "storage".filename(text) owner to tealbase_storage_admin; +ALTER function "storage".extension(text) owner to tealbase_storage_admin; +ALTER function "storage".search(text,text,int,int,int) owner to tealbase_storage_admin; + +-- migrate:down diff --git a/migrations/db/init-scripts/00000000000003-post-setup.sql b/migrations/db/init-scripts/00000000000003-post-setup.sql new file mode 100644 index 0000000..f7c9051 --- /dev/null +++ b/migrations/db/init-scripts/00000000000003-post-setup.sql @@ -0,0 +1,119 @@ +-- migrate:up + +ALTER ROLE tealbase_admin SET search_path TO "\$user",public,auth,extensions; +ALTER ROLE postgres SET search_path TO "\$user",public,extensions; + +-- Trigger for pg_cron +CREATE OR REPLACE FUNCTION extensions.grant_pg_cron_access() +RETURNS event_trigger +LANGUAGE plpgsql +AS $$ +DECLARE + schema_is_cron bool; +BEGIN + schema_is_cron = ( + SELECT n.nspname = 'cron' + FROM pg_event_trigger_ddl_commands() AS ev + LEFT JOIN pg_catalog.pg_namespace AS n + ON ev.objid = n.oid + ); + + IF schema_is_cron + THEN + grant usage on schema cron to postgres with grant option; + + alter default privileges in schema cron grant all on tables to postgres with grant option; + alter default privileges in schema cron grant all on functions to postgres with grant option; + alter default privileges in schema cron grant all on sequences to postgres with grant option; + + alter default privileges for user tealbase_admin in schema cron grant all + on sequences to postgres with grant option; + alter default privileges for user tealbase_admin in schema cron grant all + on tables to postgres with grant option; + alter default privileges for user tealbase_admin in schema cron grant all + on functions to postgres with grant option; + + grant all privileges on all tables in schema cron to postgres with grant option; + + END IF; + +END; +$$; +CREATE EVENT TRIGGER issue_pg_cron_access ON ddl_command_end WHEN TAG in ('CREATE SCHEMA') +EXECUTE PROCEDURE extensions.grant_pg_cron_access(); +COMMENT ON FUNCTION extensions.grant_pg_cron_access IS 'Grants access to pg_cron'; + +-- Event trigger for pg_net +CREATE OR REPLACE FUNCTION extensions.grant_pg_net_access() +RETURNS event_trigger +LANGUAGE plpgsql +AS $$ +BEGIN + IF EXISTS ( + SELECT 1 + FROM pg_event_trigger_ddl_commands() AS ev + JOIN pg_extension AS ext + ON ev.objid = ext.oid + WHERE ext.extname = 'pg_net' + ) + THEN + IF NOT EXISTS ( + SELECT 1 + FROM pg_roles + WHERE rolname = 'tealbase_functions_admin' + ) + THEN + CREATE USER tealbase_functions_admin NOINHERIT CREATEROLE LOGIN NOREPLICATION; + END IF; + + GRANT USAGE ON SCHEMA net TO tealbase_functions_admin, postgres, anon, authenticated, service_role; + + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; + + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; + + REVOKE ALL ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; + REVOKE ALL ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; + + GRANT EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO tealbase_functions_admin, postgres, anon, authenticated, service_role; + GRANT EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO tealbase_functions_admin, postgres, anon, authenticated, service_role; + END IF; +END; +$$; +COMMENT ON FUNCTION extensions.grant_pg_net_access IS 'Grants access to pg_net'; + +DO +$$ +BEGIN + IF NOT EXISTS ( + SELECT 1 + FROM pg_event_trigger + WHERE evtname = 'issue_pg_net_access' + ) THEN + CREATE EVENT TRIGGER issue_pg_net_access + ON ddl_command_end + WHEN TAG IN ('CREATE EXTENSION') + EXECUTE PROCEDURE extensions.grant_pg_net_access(); + END IF; +END +$$; + +-- tealbase dashboard user +CREATE ROLE dashboard_user NOSUPERUSER CREATEDB CREATEROLE REPLICATION; +GRANT ALL ON DATABASE postgres TO dashboard_user; +GRANT ALL ON SCHEMA auth TO dashboard_user; +GRANT ALL ON SCHEMA extensions TO dashboard_user; +GRANT ALL ON SCHEMA storage TO dashboard_user; +GRANT ALL ON ALL TABLES IN SCHEMA auth TO dashboard_user; +GRANT ALL ON ALL TABLES IN SCHEMA extensions TO dashboard_user; +-- GRANT ALL ON ALL TABLES IN SCHEMA storage TO dashboard_user; +GRANT ALL ON ALL SEQUENCES IN SCHEMA auth TO dashboard_user; +GRANT ALL ON ALL SEQUENCES IN SCHEMA storage TO dashboard_user; +GRANT ALL ON ALL SEQUENCES IN SCHEMA extensions TO dashboard_user; +GRANT ALL ON ALL ROUTINES IN SCHEMA auth TO dashboard_user; +GRANT ALL ON ALL ROUTINES IN SCHEMA storage TO dashboard_user; +GRANT ALL ON ALL ROUTINES IN SCHEMA extensions TO dashboard_user; + +-- migrate:down diff --git a/migrations/db/migrate.sh b/migrations/db/migrate.sh new file mode 100755 index 0000000..dd740ce --- /dev/null +++ b/migrations/db/migrate.sh @@ -0,0 +1,58 @@ +#!/bin/sh +set -eu + +####################################### +# Used by both ami and docker builds to initialise database schema. +# Env vars: +# POSTGRES_DB defaults to postgres +# POSTGRES_HOST defaults to localhost +# POSTGRES_PORT defaults to 5432 +# POSTGRES_PASSWORD defaults to "" +# USE_DBMATE defaults to "" +# Exit code: +# 0 if migration succeeds, non-zero on error. +####################################### + +export PGDATABASE="${POSTGRES_DB:-postgres}" +export PGHOST="${POSTGRES_HOST:-localhost}" +export PGPORT="${POSTGRES_PORT:-5432}" +export PGPASSWORD="${POSTGRES_PASSWORD:-}" + +# if args are supplied, simply forward to dbmate +connect="$PGPASSWORD@$PGHOST:$PGPORT/$PGDATABASE?sslmode=disable" +if [ "$#" -ne 0 ]; then + export DATABASE_URL="${DATABASE_URL:-postgres://tealbase_admin:$connect}" + exec dbmate "$@" + exit 0 +fi + +db=$( cd -- "$( dirname -- "$0" )" > /dev/null 2>&1 && pwd ) +if [ -z "${USE_DBMATE:-}" ]; then + # run init scripts as postgres user + for sql in "$db"/init-scripts/*.sql; do + echo "$0: running $sql" + psql -v ON_ERROR_STOP=1 --no-password --no-psqlrc -U postgres -f "$sql" + done + psql -v ON_ERROR_STOP=1 --no-password --no-psqlrc -U postgres -c "ALTER USER tealbase_admin WITH PASSWORD '$PGPASSWORD'" + # run migrations as super user - postgres user demoted in post-setup + for sql in "$db"/migrations/*.sql; do + echo "$0: running $sql" + psql -v ON_ERROR_STOP=1 --no-password --no-psqlrc -U tealbase_admin -f "$sql" + done +else + # run init scripts as postgres user + DBMATE_MIGRATIONS_DIR="$db/init-scripts" DATABASE_URL="postgres://postgres:$connect" dbmate --no-dump-schema migrate + psql -v ON_ERROR_STOP=1 --no-password --no-psqlrc -U postgres -c "ALTER USER tealbase_admin WITH PASSWORD '$PGPASSWORD'" + # run migrations as super user - postgres user demoted in post-setup + DBMATE_MIGRATIONS_DIR="$db/migrations" DATABASE_URL="postgres://tealbase_admin:$connect" dbmate --no-dump-schema migrate +fi + +# run any post migration script to update role passwords +postinit="/etc/postgresql.schema.sql" +if [ -e "$postinit" ]; then + echo "$0: running $postinit" + psql -v ON_ERROR_STOP=1 --no-password --no-psqlrc -U tealbase_admin -f "$postinit" +fi + +# once done with everything, reset stats from init +psql -v ON_ERROR_STOP=1 --no-password --no-psqlrc -U tealbase_admin -c 'SELECT extensions.pg_stat_statements_reset(); SELECT pg_stat_reset();' || true diff --git a/migrations/db/migrations/10000000000000_demote-postgres.sql b/migrations/db/migrations/10000000000000_demote-postgres.sql new file mode 100644 index 0000000..1f7e2e2 --- /dev/null +++ b/migrations/db/migrations/10000000000000_demote-postgres.sql @@ -0,0 +1,19 @@ +-- migrate:up + +-- demote postgres user +GRANT ALL ON DATABASE postgres TO postgres; +GRANT ALL ON SCHEMA auth TO postgres; +GRANT ALL ON SCHEMA extensions TO postgres; +GRANT ALL ON SCHEMA storage TO postgres; +GRANT ALL ON ALL TABLES IN SCHEMA auth TO postgres; +GRANT ALL ON ALL TABLES IN SCHEMA storage TO postgres; +GRANT ALL ON ALL TABLES IN SCHEMA extensions TO postgres; +GRANT ALL ON ALL SEQUENCES IN SCHEMA auth TO postgres; +GRANT ALL ON ALL SEQUENCES IN SCHEMA storage TO postgres; +GRANT ALL ON ALL SEQUENCES IN SCHEMA extensions TO postgres; +GRANT ALL ON ALL ROUTINES IN SCHEMA auth TO postgres; +GRANT ALL ON ALL ROUTINES IN SCHEMA storage TO postgres; +GRANT ALL ON ALL ROUTINES IN SCHEMA extensions TO postgres; +ALTER ROLE postgres NOSUPERUSER CREATEDB CREATEROLE LOGIN REPLICATION BYPASSRLS; + +-- migrate:down diff --git a/migrations/db/migrations/20211115181400_update-auth-permissions.sql b/migrations/db/migrations/20211115181400_update-auth-permissions.sql new file mode 100644 index 0000000..363eece --- /dev/null +++ b/migrations/db/migrations/20211115181400_update-auth-permissions.sql @@ -0,0 +1,22 @@ +-- migrate:up + +-- update auth schema permissions +GRANT ALL PRIVILEGES ON SCHEMA auth TO tealbase_auth_admin; +GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA auth TO tealbase_auth_admin; +GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA auth TO tealbase_auth_admin; + +ALTER table IF EXISTS "auth".users OWNER TO tealbase_auth_admin; +ALTER table IF EXISTS "auth".refresh_tokens OWNER TO tealbase_auth_admin; +ALTER table IF EXISTS "auth".audit_log_entries OWNER TO tealbase_auth_admin; +ALTER table IF EXISTS "auth".instances OWNER TO tealbase_auth_admin; +ALTER table IF EXISTS "auth".schema_migrations OWNER TO tealbase_auth_admin; + +GRANT USAGE ON SCHEMA auth TO postgres; +GRANT ALL ON ALL TABLES IN SCHEMA auth TO postgres, dashboard_user; +GRANT ALL ON ALL SEQUENCES IN SCHEMA auth TO postgres, dashboard_user; +GRANT ALL ON ALL ROUTINES IN SCHEMA auth TO postgres, dashboard_user; +ALTER DEFAULT PRIVILEGES FOR ROLE tealbase_auth_admin IN SCHEMA auth GRANT ALL ON TABLES TO postgres, dashboard_user; +ALTER DEFAULT PRIVILEGES FOR ROLE tealbase_auth_admin IN SCHEMA auth GRANT ALL ON SEQUENCES TO postgres, dashboard_user; +ALTER DEFAULT PRIVILEGES FOR ROLE tealbase_auth_admin IN SCHEMA auth GRANT ALL ON ROUTINES TO postgres, dashboard_user; + +-- migrate:down diff --git a/migrations/db/migrations/20211118015519_create-realtime-schema.sql b/migrations/db/migrations/20211118015519_create-realtime-schema.sql new file mode 100644 index 0000000..e06bfb0 --- /dev/null +++ b/migrations/db/migrations/20211118015519_create-realtime-schema.sql @@ -0,0 +1,6 @@ +-- migrate:up + +-- create realtime schema for Realtime RLS (WALRUS) +CREATE SCHEMA IF NOT EXISTS realtime; + +-- migrate:down diff --git a/migrations/db/migrations/20211122051245_update-realtime-permissions.sql b/migrations/db/migrations/20211122051245_update-realtime-permissions.sql new file mode 100644 index 0000000..0875b83 --- /dev/null +++ b/migrations/db/migrations/20211122051245_update-realtime-permissions.sql @@ -0,0 +1,9 @@ +-- migrate:up + +-- update realtime schema permissions +GRANT USAGE ON SCHEMA realtime TO postgres; +GRANT ALL ON ALL TABLES IN SCHEMA realtime TO postgres, dashboard_user; +GRANT ALL ON ALL SEQUENCES IN SCHEMA realtime TO postgres, dashboard_user; +GRANT ALL ON ALL ROUTINES IN SCHEMA realtime TO postgres, dashboard_user; + +-- migrate:down diff --git a/migrations/db/migrations/20211124212715_update-auth-owner.sql b/migrations/db/migrations/20211124212715_update-auth-owner.sql new file mode 100644 index 0000000..456b67b --- /dev/null +++ b/migrations/db/migrations/20211124212715_update-auth-owner.sql @@ -0,0 +1,8 @@ +-- migrate:up + +-- update owner for auth.uid, auth.role and auth.email functions +ALTER FUNCTION auth.uid owner to tealbase_auth_admin; +ALTER FUNCTION auth.role owner to tealbase_auth_admin; +ALTER FUNCTION auth.email owner to tealbase_auth_admin; + +-- migrate:down diff --git a/migrations/db/migrations/20211130151719_update-realtime-permissions.sql b/migrations/db/migrations/20211130151719_update-realtime-permissions.sql new file mode 100644 index 0000000..f9f2848 --- /dev/null +++ b/migrations/db/migrations/20211130151719_update-realtime-permissions.sql @@ -0,0 +1,8 @@ +-- migrate:up + +-- Update future objects' permissions +ALTER DEFAULT PRIVILEGES FOR ROLE tealbase_admin IN SCHEMA realtime GRANT ALL ON TABLES TO postgres, dashboard_user; +ALTER DEFAULT PRIVILEGES FOR ROLE tealbase_admin IN SCHEMA realtime GRANT ALL ON SEQUENCES TO postgres, dashboard_user; +ALTER DEFAULT PRIVILEGES FOR ROLE tealbase_admin IN SCHEMA realtime GRANT ALL ON ROUTINES TO postgres, dashboard_user; + +-- migrate:down diff --git a/migrations/db/migrations/20220118070449_enable-safeupdate-postgrest.sql b/migrations/db/migrations/20220118070449_enable-safeupdate-postgrest.sql new file mode 100644 index 0000000..f0d3a85 --- /dev/null +++ b/migrations/db/migrations/20220118070449_enable-safeupdate-postgrest.sql @@ -0,0 +1,4 @@ +-- migrate:up +ALTER ROLE authenticator SET session_preload_libraries = 'safeupdate'; + +-- migrate:down diff --git a/migrations/db/migrations/20220126121436_finer-postgrest-triggers.sql b/migrations/db/migrations/20220126121436_finer-postgrest-triggers.sql new file mode 100644 index 0000000..864bcc3 --- /dev/null +++ b/migrations/db/migrations/20220126121436_finer-postgrest-triggers.sql @@ -0,0 +1,70 @@ +-- migrate:up + +drop event trigger if exists api_restart; +drop function if exists extensions.notify_api_restart(); + +-- https://postgrest.org/en/latest/schema_cache.html#finer-grained-event-trigger +-- watch create and alter +CREATE OR REPLACE FUNCTION extensions.pgrst_ddl_watch() RETURNS event_trigger AS $$ +DECLARE + cmd record; +BEGIN + FOR cmd IN SELECT * FROM pg_event_trigger_ddl_commands() + LOOP + IF cmd.command_tag IN ( + 'CREATE SCHEMA', 'ALTER SCHEMA' + , 'CREATE TABLE', 'CREATE TABLE AS', 'SELECT INTO', 'ALTER TABLE' + , 'CREATE FOREIGN TABLE', 'ALTER FOREIGN TABLE' + , 'CREATE VIEW', 'ALTER VIEW' + , 'CREATE MATERIALIZED VIEW', 'ALTER MATERIALIZED VIEW' + , 'CREATE FUNCTION', 'ALTER FUNCTION' + , 'CREATE TRIGGER' + , 'CREATE TYPE' + , 'CREATE RULE' + , 'COMMENT' + ) + -- don't notify in case of CREATE TEMP table or other objects created on pg_temp + AND cmd.schema_name is distinct from 'pg_temp' + THEN + NOTIFY pgrst, 'reload schema'; + END IF; + END LOOP; +END; $$ LANGUAGE plpgsql; + +-- watch drop +CREATE OR REPLACE FUNCTION extensions.pgrst_drop_watch() RETURNS event_trigger AS $$ +DECLARE + obj record; +BEGIN + FOR obj IN SELECT * FROM pg_event_trigger_dropped_objects() + LOOP + IF obj.object_type IN ( + 'schema' + , 'table' + , 'foreign table' + , 'view' + , 'materialized view' + , 'function' + , 'trigger' + , 'type' + , 'rule' + ) + AND obj.is_temporary IS false -- no pg_temp objects + THEN + NOTIFY pgrst, 'reload schema'; + END IF; + END LOOP; +END; $$ LANGUAGE plpgsql; + +DROP EVENT TRIGGER IF EXISTS pgrst_ddl_watch; +CREATE EVENT TRIGGER pgrst_ddl_watch + ON ddl_command_end + EXECUTE PROCEDURE extensions.pgrst_ddl_watch(); + +DROP EVENT TRIGGER IF EXISTS pgrst_drop_watch; +CREATE EVENT TRIGGER pgrst_drop_watch + ON sql_drop + EXECUTE PROCEDURE extensions.pgrst_drop_watch(); + + +-- migrate:down diff --git a/migrations/db/migrations/20220224211803_fix-postgrest-supautils.sql b/migrations/db/migrations/20220224211803_fix-postgrest-supautils.sql new file mode 100644 index 0000000..b254b8a --- /dev/null +++ b/migrations/db/migrations/20220224211803_fix-postgrest-supautils.sql @@ -0,0 +1,21 @@ +-- migrate:up + +-- Note: supatils extension is not installed in docker image. + +DO $$ +DECLARE + supautils_exists boolean; +BEGIN + supautils_exists = ( + select count(*) = 1 + from pg_available_extensions + where name = 'supautils' + ); + + IF supautils_exists + THEN + ALTER ROLE authenticator SET session_preload_libraries = supautils, safeupdate; + END IF; +END $$; + +-- migrate:down diff --git a/migrations/db/migrations/20220317095840_pg_graphql.sql b/migrations/db/migrations/20220317095840_pg_graphql.sql new file mode 100644 index 0000000..19cf250 --- /dev/null +++ b/migrations/db/migrations/20220317095840_pg_graphql.sql @@ -0,0 +1,146 @@ +-- migrate:up +create schema if not exists graphql_public; + +-- GraphQL Placeholder Entrypoint +create or replace function graphql_public.graphql( + "operationName" text default null, + query text default null, + variables jsonb default null, + extensions jsonb default null +) + returns jsonb + language plpgsql +as $$ + DECLARE + server_version float; + BEGIN + server_version = (SELECT (SPLIT_PART((select version()), ' ', 2))::float); + + IF server_version >= 14 THEN + RETURN jsonb_build_object( + 'data', null::jsonb, + 'errors', array['pg_graphql extension is not enabled.'] + ); + ELSE + RETURN jsonb_build_object( + 'data', null::jsonb, + 'errors', array['pg_graphql is only available on projects running Postgres 14 onwards.'] + ); + END IF; + END; +$$; + +grant usage on schema graphql_public to postgres, anon, authenticated, service_role; +alter default privileges in schema graphql_public grant all on tables to postgres, anon, authenticated, service_role; +alter default privileges in schema graphql_public grant all on functions to postgres, anon, authenticated, service_role; +alter default privileges in schema graphql_public grant all on sequences to postgres, anon, authenticated, service_role; + +alter default privileges for user tealbase_admin in schema graphql_public grant all + on sequences to postgres, anon, authenticated, service_role; +alter default privileges for user tealbase_admin in schema graphql_public grant all + on tables to postgres, anon, authenticated, service_role; +alter default privileges for user tealbase_admin in schema graphql_public grant all + on functions to postgres, anon, authenticated, service_role; + +-- Trigger upon enabling pg_graphql +CREATE OR REPLACE FUNCTION extensions.grant_pg_graphql_access() +RETURNS event_trigger +LANGUAGE plpgsql +AS $func$ + DECLARE + func_is_graphql_resolve bool; + BEGIN + func_is_graphql_resolve = ( + SELECT n.proname = 'resolve' + FROM pg_event_trigger_ddl_commands() AS ev + LEFT JOIN pg_catalog.pg_proc AS n + ON ev.objid = n.oid + ); + + IF func_is_graphql_resolve + THEN + grant usage on schema graphql to postgres, anon, authenticated, service_role; + grant all on function graphql.resolve to postgres, anon, authenticated, service_role; + + alter default privileges in schema graphql grant all on tables to postgres, anon, authenticated, service_role; + alter default privileges in schema graphql grant all on functions to postgres, anon, authenticated, service_role; + alter default privileges in schema graphql grant all on sequences to postgres, anon, authenticated, service_role; + + DROP FUNCTION IF EXISTS graphql_public.graphql; + create or replace function graphql_public.graphql( + "operationName" text default null, + query text default null, + variables jsonb default null, + extensions jsonb default null + ) + returns jsonb + language sql + as $$ + SELECT graphql.resolve(query, coalesce(variables, '{}')); + $$; + + grant execute on function graphql.resolve to postgres, anon, authenticated, service_role; + END IF; + + END; +$func$; + +DROP EVENT TRIGGER IF EXISTS issue_pg_graphql_access; +CREATE EVENT TRIGGER issue_pg_graphql_access ON ddl_command_end WHEN TAG in ('CREATE FUNCTION') +EXECUTE PROCEDURE extensions.grant_pg_graphql_access(); +COMMENT ON FUNCTION extensions.grant_pg_graphql_access IS 'Grants access to pg_graphql'; + +-- Trigger upon dropping the pg_graphql extension +CREATE OR REPLACE FUNCTION extensions.set_graphql_placeholder() +RETURNS event_trigger +LANGUAGE plpgsql +AS $func$ + DECLARE + graphql_is_dropped bool; + BEGIN + graphql_is_dropped = ( + SELECT ev.schema_name = 'graphql_public' + FROM pg_event_trigger_dropped_objects() AS ev + WHERE ev.schema_name = 'graphql_public' + ); + + IF graphql_is_dropped + THEN + create or replace function graphql_public.graphql( + "operationName" text default null, + query text default null, + variables jsonb default null, + extensions jsonb default null + ) + returns jsonb + language plpgsql + as $$ + DECLARE + server_version float; + BEGIN + server_version = (SELECT (SPLIT_PART((select version()), ' ', 2))::float); + + IF server_version >= 14 THEN + RETURN jsonb_build_object( + 'data', null::jsonb, + 'errors', array['pg_graphql extension is not enabled.'] + ); + ELSE + RETURN jsonb_build_object( + 'data', null::jsonb, + 'errors', array['pg_graphql is only available on projects running Postgres 14 onwards.'] + ); + END IF; + END; + $$; + END IF; + + END; +$func$; + +DROP EVENT TRIGGER IF EXISTS issue_graphql_placeholder; +CREATE EVENT TRIGGER issue_graphql_placeholder ON sql_drop WHEN TAG in ('DROP EXTENSION') +EXECUTE PROCEDURE extensions.set_graphql_placeholder(); +COMMENT ON FUNCTION extensions.set_graphql_placeholder IS 'Reintroduces placeholder function for graphql_public.graphql'; + +-- migrate:down diff --git a/migrations/db/migrations/20220321174452_fix-postgrest-alter-type-event-trigger.sql b/migrations/db/migrations/20220321174452_fix-postgrest-alter-type-event-trigger.sql new file mode 100644 index 0000000..339def9 --- /dev/null +++ b/migrations/db/migrations/20220321174452_fix-postgrest-alter-type-event-trigger.sql @@ -0,0 +1,70 @@ +-- migrate:up + +drop event trigger if exists api_restart; +drop function if exists extensions.notify_api_restart(); + +-- https://postgrest.org/en/latest/schema_cache.html#finer-grained-event-trigger +-- watch create and alter +CREATE OR REPLACE FUNCTION extensions.pgrst_ddl_watch() RETURNS event_trigger AS $$ +DECLARE + cmd record; +BEGIN + FOR cmd IN SELECT * FROM pg_event_trigger_ddl_commands() + LOOP + IF cmd.command_tag IN ( + 'CREATE SCHEMA', 'ALTER SCHEMA' + , 'CREATE TABLE', 'CREATE TABLE AS', 'SELECT INTO', 'ALTER TABLE' + , 'CREATE FOREIGN TABLE', 'ALTER FOREIGN TABLE' + , 'CREATE VIEW', 'ALTER VIEW' + , 'CREATE MATERIALIZED VIEW', 'ALTER MATERIALIZED VIEW' + , 'CREATE FUNCTION', 'ALTER FUNCTION' + , 'CREATE TRIGGER' + , 'CREATE TYPE', 'ALTER TYPE' + , 'CREATE RULE' + , 'COMMENT' + ) + -- don't notify in case of CREATE TEMP table or other objects created on pg_temp + AND cmd.schema_name is distinct from 'pg_temp' + THEN + NOTIFY pgrst, 'reload schema'; + END IF; + END LOOP; +END; $$ LANGUAGE plpgsql; + +-- watch drop +CREATE OR REPLACE FUNCTION extensions.pgrst_drop_watch() RETURNS event_trigger AS $$ +DECLARE + obj record; +BEGIN + FOR obj IN SELECT * FROM pg_event_trigger_dropped_objects() + LOOP + IF obj.object_type IN ( + 'schema' + , 'table' + , 'foreign table' + , 'view' + , 'materialized view' + , 'function' + , 'trigger' + , 'type' + , 'rule' + ) + AND obj.is_temporary IS false -- no pg_temp objects + THEN + NOTIFY pgrst, 'reload schema'; + END IF; + END LOOP; +END; $$ LANGUAGE plpgsql; + +DROP EVENT TRIGGER IF EXISTS pgrst_ddl_watch; +CREATE EVENT TRIGGER pgrst_ddl_watch + ON ddl_command_end + EXECUTE PROCEDURE extensions.pgrst_ddl_watch(); + +DROP EVENT TRIGGER IF EXISTS pgrst_drop_watch; +CREATE EVENT TRIGGER pgrst_drop_watch + ON sql_drop + EXECUTE PROCEDURE extensions.pgrst_drop_watch(); + + +-- migrate:down diff --git a/migrations/db/migrations/20220322085208_gotrue-session-limit.sql b/migrations/db/migrations/20220322085208_gotrue-session-limit.sql new file mode 100644 index 0000000..2b36901 --- /dev/null +++ b/migrations/db/migrations/20220322085208_gotrue-session-limit.sql @@ -0,0 +1,4 @@ +-- migrate:up +ALTER ROLE tealbase_auth_admin SET idle_in_transaction_session_timeout TO 60000; + +-- migrate:down diff --git a/migrations/db/migrations/20220404205710_pg_graphql-on-by-default.sql b/migrations/db/migrations/20220404205710_pg_graphql-on-by-default.sql new file mode 100644 index 0000000..452901e --- /dev/null +++ b/migrations/db/migrations/20220404205710_pg_graphql-on-by-default.sql @@ -0,0 +1,161 @@ +-- migrate:up + +-- Update Trigger upon enabling pg_graphql +create or replace function extensions.grant_pg_graphql_access() + returns event_trigger + language plpgsql +AS $func$ +DECLARE + func_is_graphql_resolve bool; +BEGIN + func_is_graphql_resolve = ( + SELECT n.proname = 'resolve' + FROM pg_event_trigger_ddl_commands() AS ev + LEFT JOIN pg_catalog.pg_proc AS n + ON ev.objid = n.oid + ); + + IF func_is_graphql_resolve + THEN + grant usage on schema graphql to postgres, anon, authenticated, service_role; + grant all on function graphql.resolve to postgres, anon, authenticated, service_role; + + alter default privileges in schema graphql grant all on tables to postgres, anon, authenticated, service_role; + alter default privileges in schema graphql grant all on functions to postgres, anon, authenticated, service_role; + alter default privileges in schema graphql grant all on sequences to postgres, anon, authenticated, service_role; + + -- Update public wrapper to pass all arguments through to the pg_graphql resolve func + DROP FUNCTION IF EXISTS graphql_public.graphql; + create or replace function graphql_public.graphql( + "operationName" text default null, + query text default null, + variables jsonb default null, + extensions jsonb default null + ) + returns jsonb + language sql + as $$ + -- This changed + select graphql.resolve( + query := query, + variables := coalesce(variables, '{}'), + "operationName" := "operationName", + extensions := extensions + ); + $$; + + grant execute on function graphql.resolve to postgres, anon, authenticated, service_role; + END IF; + +END; +$func$; + +CREATE OR REPLACE FUNCTION extensions.set_graphql_placeholder() +RETURNS event_trigger +LANGUAGE plpgsql +AS $func$ + DECLARE + graphql_is_dropped bool; + BEGIN + graphql_is_dropped = ( + SELECT ev.schema_name = 'graphql_public' + FROM pg_event_trigger_dropped_objects() AS ev + WHERE ev.schema_name = 'graphql_public' + ); + + IF graphql_is_dropped + THEN + create or replace function graphql_public.graphql( + "operationName" text default null, + query text default null, + variables jsonb default null, + extensions jsonb default null + ) + returns jsonb + language plpgsql + as $$ + DECLARE + server_version float; + BEGIN + server_version = (SELECT (SPLIT_PART((select version()), ' ', 2))::float); + + IF server_version >= 14 THEN + RETURN jsonb_build_object( + 'errors', jsonb_build_array( + jsonb_build_object( + 'message', 'pg_graphql extension is not enabled.' + ) + ) + ); + ELSE + RETURN jsonb_build_object( + 'errors', jsonb_build_array( + jsonb_build_object( + 'message', 'pg_graphql is only available on projects running Postgres 14 onwards.' + ) + ) + ); + END IF; + END; + $$; + END IF; + + END; +$func$; + +-- GraphQL Placeholder Entrypoint +create or replace function graphql_public.graphql( + "operationName" text default null, + query text default null, + variables jsonb default null, + extensions jsonb default null +) + returns jsonb + language plpgsql +as $$ + DECLARE + server_version float; + BEGIN + server_version = (SELECT (SPLIT_PART((select version()), ' ', 2))::float); + + IF server_version >= 14 THEN + RETURN jsonb_build_object( + 'errors', jsonb_build_array( + jsonb_build_object( + 'message', 'pg_graphql extension is not enabled.' + ) + ) + ); + ELSE + RETURN jsonb_build_object( + 'errors', jsonb_build_array( + jsonb_build_object( + 'message', 'pg_graphql is only available on projects running Postgres 14 onwards.' + ) + ) + ); + END IF; + END; +$$; + + +drop extension if exists pg_graphql; +-- Avoids limitation of only being able to load the extension via dashboard +-- Only install as well if the extension is actually installed +DO $$ +DECLARE + graphql_exists boolean; +BEGIN + graphql_exists = ( + select count(*) = 1 + from pg_available_extensions + where name = 'pg_graphql' + ); + + IF graphql_exists + THEN + create extension if not exists pg_graphql; + END IF; +END $$; + +-- migrate:down diff --git a/migrations/db/migrations/20220609081115_grant-tealbase-auth-admin-and-tealbase-storage-admin-to-postgres.sql b/migrations/db/migrations/20220609081115_grant-tealbase-auth-admin-and-tealbase-storage-admin-to-postgres.sql new file mode 100644 index 0000000..31e908a --- /dev/null +++ b/migrations/db/migrations/20220609081115_grant-tealbase-auth-admin-and-tealbase-storage-admin-to-postgres.sql @@ -0,0 +1,10 @@ +-- migrate:up + +-- This is done so that the `postgres` role can manage auth tables triggers, +-- storage tables policies, etc. which unblocks the revocation of superuser +-- access. +-- +-- More context: https://www.notion.so/tealbase/RFC-Postgres-Permissions-I-40cb4f61bd4145fd9e75ce657c0e31dd#bf5d853436384e6e8e339d0a2e684cbb +grant tealbase_auth_admin, tealbase_storage_admin to postgres; + +-- migrate:down diff --git a/migrations/db/migrations/20220613123923_pg_graphql-pg-dump-perms.sql b/migrations/db/migrations/20220613123923_pg_graphql-pg-dump-perms.sql new file mode 100644 index 0000000..915b1c0 --- /dev/null +++ b/migrations/db/migrations/20220613123923_pg_graphql-pg-dump-perms.sql @@ -0,0 +1,74 @@ +-- migrate:up + +create or replace function extensions.grant_pg_graphql_access() + returns event_trigger + language plpgsql +AS $func$ +DECLARE + func_is_graphql_resolve bool; +BEGIN + func_is_graphql_resolve = ( + SELECT n.proname = 'resolve' + FROM pg_event_trigger_ddl_commands() AS ev + LEFT JOIN pg_catalog.pg_proc AS n + ON ev.objid = n.oid + ); + + IF func_is_graphql_resolve + THEN + -- Update public wrapper to pass all arguments through to the pg_graphql resolve func + DROP FUNCTION IF EXISTS graphql_public.graphql; + create or replace function graphql_public.graphql( + "operationName" text default null, + query text default null, + variables jsonb default null, + extensions jsonb default null + ) + returns jsonb + language sql + as $$ + select graphql.resolve( + query := query, + variables := coalesce(variables, '{}'), + "operationName" := "operationName", + extensions := extensions + ); + $$; + + -- This hook executes when `graphql.resolve` is created. That is not necessarily the last + -- function in the extension so we need to grant permissions on existing entities AND + -- update default permissions to any others that are created after `graphql.resolve` + grant usage on schema graphql to postgres, anon, authenticated, service_role; + grant select on all tables in schema graphql to postgres, anon, authenticated, service_role; + grant execute on all functions in schema graphql to postgres, anon, authenticated, service_role; + grant all on all sequences in schema graphql to postgres, anon, authenticated, service_role; + alter default privileges in schema graphql grant all on tables to postgres, anon, authenticated, service_role; + alter default privileges in schema graphql grant all on functions to postgres, anon, authenticated, service_role; + alter default privileges in schema graphql grant all on sequences to postgres, anon, authenticated, service_role; + END IF; + +END; +$func$; + +-- Cycle the extension off and back on to apply the permissions update. + +drop extension if exists pg_graphql; +-- Avoids limitation of only being able to load the extension via dashboard +-- Only install as well if the extension is actually installed +DO $$ +DECLARE + graphql_exists boolean; +BEGIN + graphql_exists = ( + select count(*) = 1 + from pg_available_extensions + where name = 'pg_graphql' + ); + + IF graphql_exists + THEN + create extension if not exists pg_graphql; + END IF; +END $$; + +-- migrate:down diff --git a/migrations/db/migrations/20220713082019_pg_cron-pg_net-temp-perms-fix.sql b/migrations/db/migrations/20220713082019_pg_cron-pg_net-temp-perms-fix.sql new file mode 100644 index 0000000..e3477da --- /dev/null +++ b/migrations/db/migrations/20220713082019_pg_cron-pg_net-temp-perms-fix.sql @@ -0,0 +1,73 @@ +-- migrate:up +DO $$ +DECLARE + pg_cron_installed boolean; +BEGIN + -- checks if pg_cron is enabled + pg_cron_installed = ( + select count(*) = 1 + from pg_available_extensions + where name = 'pg_cron' + and installed_version is not null + ); + + IF pg_cron_installed + THEN + grant usage on schema cron to postgres with grant option; + + alter default privileges in schema cron grant all on tables to postgres with grant option; + alter default privileges in schema cron grant all on functions to postgres with grant option; + alter default privileges in schema cron grant all on sequences to postgres with grant option; + + alter default privileges for user tealbase_admin in schema cron grant all + on sequences to postgres with grant option; + alter default privileges for user tealbase_admin in schema cron grant all + on tables to postgres with grant option; + alter default privileges for user tealbase_admin in schema cron grant all + on functions to postgres with grant option; + + grant all privileges on all tables in schema cron to postgres with grant option; + END IF; +END $$; + +DO $$ +DECLARE + pg_net_installed boolean; +BEGIN + -- checks if pg_net is enabled + pg_net_installed = ( + select count(*) = 1 + from pg_available_extensions + where name = 'pg_net' + and installed_version is not null + + ); + + IF pg_net_installed + THEN + IF NOT EXISTS ( + SELECT 1 + FROM pg_roles + WHERE rolname = 'tealbase_functions_admin' + ) + THEN + CREATE USER tealbase_functions_admin NOINHERIT CREATEROLE LOGIN NOREPLICATION; + END IF; + + GRANT USAGE ON SCHEMA net TO tealbase_functions_admin, postgres, anon, authenticated, service_role; + + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; + + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; + + REVOKE ALL ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; + REVOKE ALL ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; + + GRANT EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO tealbase_functions_admin, postgres, anon, authenticated, service_role; + GRANT EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO tealbase_functions_admin, postgres, anon, authenticated, service_role; + END IF; +END $$; + +-- migrate:down diff --git a/migrations/db/migrations/20221028101028_set_authenticator_timeout.sql b/migrations/db/migrations/20221028101028_set_authenticator_timeout.sql new file mode 100644 index 0000000..d38c2bf --- /dev/null +++ b/migrations/db/migrations/20221028101028_set_authenticator_timeout.sql @@ -0,0 +1,5 @@ +-- migrate:up +alter role authenticator set statement_timeout = '8s'; + +-- migrate:down + diff --git a/migrations/db/migrations/20221103090837_revoke_admin.sql b/migrations/db/migrations/20221103090837_revoke_admin.sql new file mode 100644 index 0000000..5565694 --- /dev/null +++ b/migrations/db/migrations/20221103090837_revoke_admin.sql @@ -0,0 +1,5 @@ +-- migrate:up +revoke tealbase_admin from authenticator; + +-- migrate:down + diff --git a/migrations/db/migrations/20221207154255_create_pgsodium_and_vault.sql b/migrations/db/migrations/20221207154255_create_pgsodium_and_vault.sql new file mode 100644 index 0000000..cd0da1f --- /dev/null +++ b/migrations/db/migrations/20221207154255_create_pgsodium_and_vault.sql @@ -0,0 +1,15 @@ +-- migrate:up + +create extension if not exists pgsodium; + +grant pgsodium_keyiduser to postgres with admin option; +grant pgsodium_keyholder to postgres with admin option; +grant pgsodium_keymaker to postgres with admin option; + +grant execute on function pgsodium.crypto_aead_det_decrypt(bytea, bytea, uuid, bytea) to service_role; +grant execute on function pgsodium.crypto_aead_det_encrypt(bytea, bytea, uuid, bytea) to service_role; +grant execute on function pgsodium.crypto_aead_det_keygen to service_role; + +-- create extension if not exists tealbase_vault; + +-- migrate:down diff --git a/migrations/db/migrations/20230201083204_grant_auth_roles_to_postgres.sql b/migrations/db/migrations/20230201083204_grant_auth_roles_to_postgres.sql new file mode 100644 index 0000000..f975813 --- /dev/null +++ b/migrations/db/migrations/20230201083204_grant_auth_roles_to_postgres.sql @@ -0,0 +1,5 @@ +-- migrate:up +grant anon, authenticated, service_role to postgres; + +-- migrate:down + diff --git a/migrations/docker-compose.yaml b/migrations/docker-compose.yaml new file mode 100644 index 0000000..47895cb --- /dev/null +++ b/migrations/docker-compose.yaml @@ -0,0 +1,49 @@ +# Usage +# Start: docker-compose up +# Stop: docker-compose down -v + +version: "3.8" + +services: + db: + image: tealbase/postgres:15.1.0.11 + restart: "no" + ports: + - 5478:5432 + healthcheck: + test: pg_isready -U postgres -h localhost + interval: 2s + timeout: 2s + retries: 10 + environment: + POSTGRES_HOST: /var/run/postgresql + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + volumes: + - ../ansible/files/postgresql_config/pg_hba.conf.j2:/etc/postgresql/pg_hba.conf + - ./db/init-scripts:/docker-entrypoint-initdb.d/init-scripts + - ./db/migrations:/docker-entrypoint-initdb.d/migrations + - ./db/migrate.sh:/docker-entrypoint-initdb.d/migrate.sh + + pg_prove: + image: horrendo/pg_prove + depends_on: + db: + condition: service_healthy + environment: + PGHOST: db + PGUSER: postgres + PGPASSWORD: ${POSTGRES_PASSWORD} + volumes: + - ./tests:/tests + command: pg_prove /tests/test.sql + + dbmate: + image: ghcr.io/amacneil/dbmate:main + depends_on: + db: + condition: service_healthy + volumes: + - ./schema.sql:/db/schema.sql + environment: + DATABASE_URL: postgres://postgres:${POSTGRES_PASSWORD}@db/postgres?sslmode=disable + command: dump diff --git a/migrations/schema.sql b/migrations/schema.sql new file mode 100644 index 0000000..7b8fb43 --- /dev/null +++ b/migrations/schema.sql @@ -0,0 +1,996 @@ +SET statement_timeout = 0; +SET lock_timeout = 0; +SET idle_in_transaction_session_timeout = 0; +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = on; +SELECT pg_catalog.set_config('search_path', '', false); +SET check_function_bodies = false; +SET xmloption = content; +SET client_min_messages = warning; +SET row_security = off; + +-- +-- Name: auth; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA auth; + + +-- +-- Name: extensions; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA extensions; + + +-- +-- Name: graphql; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA graphql; + + +-- +-- Name: graphql_public; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA graphql_public; + + +-- +-- Name: pgbouncer; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA pgbouncer; + + +-- +-- Name: pgsodium; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA pgsodium; + + +-- +-- Name: pgsodium; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS pgsodium WITH SCHEMA pgsodium; + + +-- +-- Name: EXTENSION pgsodium; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION pgsodium IS 'Pgsodium is a modern cryptography library for Postgres.'; + + +-- +-- Name: realtime; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA realtime; + + +-- +-- Name: storage; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA storage; + + +-- +-- Name: pg_graphql; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS pg_graphql WITH SCHEMA graphql; + + +-- +-- Name: EXTENSION pg_graphql; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION pg_graphql IS 'pg_graphql: GraphQL support'; + + +-- +-- Name: pg_stat_statements; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS pg_stat_statements WITH SCHEMA extensions; + + +-- +-- Name: EXTENSION pg_stat_statements; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION pg_stat_statements IS 'track planning and execution statistics of all SQL statements executed'; + + +-- +-- Name: pgcrypto; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS pgcrypto WITH SCHEMA extensions; + + +-- +-- Name: EXTENSION pgcrypto; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION pgcrypto IS 'cryptographic functions'; + + +-- +-- Name: pgjwt; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS pgjwt WITH SCHEMA extensions; + + +-- +-- Name: EXTENSION pgjwt; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION pgjwt IS 'JSON Web Token API for Postgresql'; + + +-- +-- Name: uuid-ossp; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS "uuid-ossp" WITH SCHEMA extensions; + + +-- +-- Name: EXTENSION "uuid-ossp"; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION "uuid-ossp" IS 'generate universally unique identifiers (UUIDs)'; + + +-- +-- Name: email(); Type: FUNCTION; Schema: auth; Owner: - +-- + +CREATE FUNCTION auth.email() RETURNS text + LANGUAGE sql STABLE + AS $$ + select nullif(current_setting('request.jwt.claim.email', true), '')::text; +$$; + + +-- +-- Name: role(); Type: FUNCTION; Schema: auth; Owner: - +-- + +CREATE FUNCTION auth.role() RETURNS text + LANGUAGE sql STABLE + AS $$ + select nullif(current_setting('request.jwt.claim.role', true), '')::text; +$$; + + +-- +-- Name: uid(); Type: FUNCTION; Schema: auth; Owner: - +-- + +CREATE FUNCTION auth.uid() RETURNS uuid + LANGUAGE sql STABLE + AS $$ + select nullif(current_setting('request.jwt.claim.sub', true), '')::uuid; +$$; + + +-- +-- Name: grant_pg_cron_access(); Type: FUNCTION; Schema: extensions; Owner: - +-- + +CREATE FUNCTION extensions.grant_pg_cron_access() RETURNS event_trigger + LANGUAGE plpgsql + AS $$ +DECLARE + schema_is_cron bool; +BEGIN + schema_is_cron = ( + SELECT n.nspname = 'cron' + FROM pg_event_trigger_ddl_commands() AS ev + LEFT JOIN pg_catalog.pg_namespace AS n + ON ev.objid = n.oid + ); + + IF schema_is_cron + THEN + grant usage on schema cron to postgres with grant option; + + alter default privileges in schema cron grant all on tables to postgres with grant option; + alter default privileges in schema cron grant all on functions to postgres with grant option; + alter default privileges in schema cron grant all on sequences to postgres with grant option; + + alter default privileges for user tealbase_admin in schema cron grant all + on sequences to postgres with grant option; + alter default privileges for user tealbase_admin in schema cron grant all + on tables to postgres with grant option; + alter default privileges for user tealbase_admin in schema cron grant all + on functions to postgres with grant option; + + grant all privileges on all tables in schema cron to postgres with grant option; + + END IF; + +END; +$$; + + +-- +-- Name: FUNCTION grant_pg_cron_access(); Type: COMMENT; Schema: extensions; Owner: - +-- + +COMMENT ON FUNCTION extensions.grant_pg_cron_access() IS 'Grants access to pg_cron'; + + +-- +-- Name: grant_pg_graphql_access(); Type: FUNCTION; Schema: extensions; Owner: - +-- + +CREATE FUNCTION extensions.grant_pg_graphql_access() RETURNS event_trigger + LANGUAGE plpgsql + AS $_$ +DECLARE + func_is_graphql_resolve bool; +BEGIN + func_is_graphql_resolve = ( + SELECT n.proname = 'resolve' + FROM pg_event_trigger_ddl_commands() AS ev + LEFT JOIN pg_catalog.pg_proc AS n + ON ev.objid = n.oid + ); + + IF func_is_graphql_resolve + THEN + -- Update public wrapper to pass all arguments through to the pg_graphql resolve func + DROP FUNCTION IF EXISTS graphql_public.graphql; + create or replace function graphql_public.graphql( + "operationName" text default null, + query text default null, + variables jsonb default null, + extensions jsonb default null + ) + returns jsonb + language sql + as $$ + select graphql.resolve( + query := query, + variables := coalesce(variables, '{}'), + "operationName" := "operationName", + extensions := extensions + ); + $$; + + -- This hook executes when `graphql.resolve` is created. That is not necessarily the last + -- function in the extension so we need to grant permissions on existing entities AND + -- update default permissions to any others that are created after `graphql.resolve` + grant usage on schema graphql to postgres, anon, authenticated, service_role; + grant select on all tables in schema graphql to postgres, anon, authenticated, service_role; + grant execute on all functions in schema graphql to postgres, anon, authenticated, service_role; + grant all on all sequences in schema graphql to postgres, anon, authenticated, service_role; + alter default privileges in schema graphql grant all on tables to postgres, anon, authenticated, service_role; + alter default privileges in schema graphql grant all on functions to postgres, anon, authenticated, service_role; + alter default privileges in schema graphql grant all on sequences to postgres, anon, authenticated, service_role; + END IF; + +END; +$_$; + + +-- +-- Name: FUNCTION grant_pg_graphql_access(); Type: COMMENT; Schema: extensions; Owner: - +-- + +COMMENT ON FUNCTION extensions.grant_pg_graphql_access() IS 'Grants access to pg_graphql'; + + +-- +-- Name: grant_pg_net_access(); Type: FUNCTION; Schema: extensions; Owner: - +-- + +CREATE FUNCTION extensions.grant_pg_net_access() RETURNS event_trigger + LANGUAGE plpgsql + AS $$ +BEGIN + IF EXISTS ( + SELECT 1 + FROM pg_event_trigger_ddl_commands() AS ev + JOIN pg_extension AS ext + ON ev.objid = ext.oid + WHERE ext.extname = 'pg_net' + ) + THEN + IF NOT EXISTS ( + SELECT 1 + FROM pg_roles + WHERE rolname = 'tealbase_functions_admin' + ) + THEN + CREATE USER tealbase_functions_admin NOINHERIT CREATEROLE LOGIN NOREPLICATION; + END IF; + + GRANT USAGE ON SCHEMA net TO tealbase_functions_admin, postgres, anon, authenticated, service_role; + + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; + + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; + + REVOKE ALL ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; + REVOKE ALL ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; + + GRANT EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO tealbase_functions_admin, postgres, anon, authenticated, service_role; + GRANT EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO tealbase_functions_admin, postgres, anon, authenticated, service_role; + END IF; +END; +$$; + + +-- +-- Name: FUNCTION grant_pg_net_access(); Type: COMMENT; Schema: extensions; Owner: - +-- + +COMMENT ON FUNCTION extensions.grant_pg_net_access() IS 'Grants access to pg_net'; + + +-- +-- Name: pgrst_ddl_watch(); Type: FUNCTION; Schema: extensions; Owner: - +-- + +CREATE FUNCTION extensions.pgrst_ddl_watch() RETURNS event_trigger + LANGUAGE plpgsql + AS $$ +DECLARE + cmd record; +BEGIN + FOR cmd IN SELECT * FROM pg_event_trigger_ddl_commands() + LOOP + IF cmd.command_tag IN ( + 'CREATE SCHEMA', 'ALTER SCHEMA' + , 'CREATE TABLE', 'CREATE TABLE AS', 'SELECT INTO', 'ALTER TABLE' + , 'CREATE FOREIGN TABLE', 'ALTER FOREIGN TABLE' + , 'CREATE VIEW', 'ALTER VIEW' + , 'CREATE MATERIALIZED VIEW', 'ALTER MATERIALIZED VIEW' + , 'CREATE FUNCTION', 'ALTER FUNCTION' + , 'CREATE TRIGGER' + , 'CREATE TYPE', 'ALTER TYPE' + , 'CREATE RULE' + , 'COMMENT' + ) + -- don't notify in case of CREATE TEMP table or other objects created on pg_temp + AND cmd.schema_name is distinct from 'pg_temp' + THEN + NOTIFY pgrst, 'reload schema'; + END IF; + END LOOP; +END; $$; + + +-- +-- Name: pgrst_drop_watch(); Type: FUNCTION; Schema: extensions; Owner: - +-- + +CREATE FUNCTION extensions.pgrst_drop_watch() RETURNS event_trigger + LANGUAGE plpgsql + AS $$ +DECLARE + obj record; +BEGIN + FOR obj IN SELECT * FROM pg_event_trigger_dropped_objects() + LOOP + IF obj.object_type IN ( + 'schema' + , 'table' + , 'foreign table' + , 'view' + , 'materialized view' + , 'function' + , 'trigger' + , 'type' + , 'rule' + ) + AND obj.is_temporary IS false -- no pg_temp objects + THEN + NOTIFY pgrst, 'reload schema'; + END IF; + END LOOP; +END; $$; + + +-- +-- Name: set_graphql_placeholder(); Type: FUNCTION; Schema: extensions; Owner: - +-- + +CREATE FUNCTION extensions.set_graphql_placeholder() RETURNS event_trigger + LANGUAGE plpgsql + AS $_$ + DECLARE + graphql_is_dropped bool; + BEGIN + graphql_is_dropped = ( + SELECT ev.schema_name = 'graphql_public' + FROM pg_event_trigger_dropped_objects() AS ev + WHERE ev.schema_name = 'graphql_public' + ); + + IF graphql_is_dropped + THEN + create or replace function graphql_public.graphql( + "operationName" text default null, + query text default null, + variables jsonb default null, + extensions jsonb default null + ) + returns jsonb + language plpgsql + as $$ + DECLARE + server_version float; + BEGIN + server_version = (SELECT (SPLIT_PART((select version()), ' ', 2))::float); + + IF server_version >= 14 THEN + RETURN jsonb_build_object( + 'errors', jsonb_build_array( + jsonb_build_object( + 'message', 'pg_graphql extension is not enabled.' + ) + ) + ); + ELSE + RETURN jsonb_build_object( + 'errors', jsonb_build_array( + jsonb_build_object( + 'message', 'pg_graphql is only available on projects running Postgres 14 onwards.' + ) + ) + ); + END IF; + END; + $$; + END IF; + + END; +$_$; + + +-- +-- Name: FUNCTION set_graphql_placeholder(); Type: COMMENT; Schema: extensions; Owner: - +-- + +COMMENT ON FUNCTION extensions.set_graphql_placeholder() IS 'Reintroduces placeholder function for graphql_public.graphql'; + + +-- +-- Name: get_auth(text); Type: FUNCTION; Schema: pgbouncer; Owner: - +-- + +CREATE FUNCTION pgbouncer.get_auth(p_usename text) RETURNS TABLE(username text, password text) + LANGUAGE plpgsql SECURITY DEFINER + AS $$ +BEGIN + RAISE WARNING 'PgBouncer auth request: %', p_usename; + + RETURN QUERY + SELECT usename::TEXT, passwd::TEXT FROM pg_catalog.pg_shadow + WHERE usename = p_usename; +END; +$$; + + +-- +-- Name: extension(text); Type: FUNCTION; Schema: storage; Owner: - +-- + +CREATE FUNCTION storage.extension(name text) RETURNS text + LANGUAGE plpgsql + AS $$ +DECLARE +_parts text[]; +_filename text; +BEGIN + select string_to_array(name, '/') into _parts; + select _parts[array_length(_parts,1)] into _filename; + -- @todo return the last part instead of 2 + return split_part(_filename, '.', 2); +END +$$; + + +-- +-- Name: filename(text); Type: FUNCTION; Schema: storage; Owner: - +-- + +CREATE FUNCTION storage.filename(name text) RETURNS text + LANGUAGE plpgsql + AS $$ +DECLARE +_parts text[]; +BEGIN + select string_to_array(name, '/') into _parts; + return _parts[array_length(_parts,1)]; +END +$$; + + +-- +-- Name: foldername(text); Type: FUNCTION; Schema: storage; Owner: - +-- + +CREATE FUNCTION storage.foldername(name text) RETURNS text[] + LANGUAGE plpgsql + AS $$ +DECLARE +_parts text[]; +BEGIN + select string_to_array(name, '/') into _parts; + return _parts[1:array_length(_parts,1)-1]; +END +$$; + + +-- +-- Name: search(text, text, integer, integer, integer); Type: FUNCTION; Schema: storage; Owner: - +-- + +CREATE FUNCTION storage.search(prefix text, bucketname text, limits integer DEFAULT 100, levels integer DEFAULT 1, offsets integer DEFAULT 0) RETURNS TABLE(name text, id uuid, updated_at timestamp with time zone, created_at timestamp with time zone, last_accessed_at timestamp with time zone, metadata jsonb) + LANGUAGE plpgsql + AS $$ +DECLARE +_bucketId text; +BEGIN + -- will be replaced by migrations when server starts + -- saving space for cloud-init +END +$$; + + +SET default_tablespace = ''; + +SET default_table_access_method = heap; + +-- +-- Name: audit_log_entries; Type: TABLE; Schema: auth; Owner: - +-- + +CREATE TABLE auth.audit_log_entries ( + instance_id uuid, + id uuid NOT NULL, + payload json, + created_at timestamp with time zone +); + + +-- +-- Name: TABLE audit_log_entries; Type: COMMENT; Schema: auth; Owner: - +-- + +COMMENT ON TABLE auth.audit_log_entries IS 'Auth: Audit trail for user actions.'; + + +-- +-- Name: instances; Type: TABLE; Schema: auth; Owner: - +-- + +CREATE TABLE auth.instances ( + id uuid NOT NULL, + uuid uuid, + raw_base_config text, + created_at timestamp with time zone, + updated_at timestamp with time zone +); + + +-- +-- Name: TABLE instances; Type: COMMENT; Schema: auth; Owner: - +-- + +COMMENT ON TABLE auth.instances IS 'Auth: Manages users across multiple sites.'; + + +-- +-- Name: refresh_tokens; Type: TABLE; Schema: auth; Owner: - +-- + +CREATE TABLE auth.refresh_tokens ( + instance_id uuid, + id bigint NOT NULL, + token character varying(255), + user_id character varying(255), + revoked boolean, + created_at timestamp with time zone, + updated_at timestamp with time zone +); + + +-- +-- Name: TABLE refresh_tokens; Type: COMMENT; Schema: auth; Owner: - +-- + +COMMENT ON TABLE auth.refresh_tokens IS 'Auth: Store of tokens used to refresh JWT tokens once they expire.'; + + +-- +-- Name: refresh_tokens_id_seq; Type: SEQUENCE; Schema: auth; Owner: - +-- + +CREATE SEQUENCE auth.refresh_tokens_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: refresh_tokens_id_seq; Type: SEQUENCE OWNED BY; Schema: auth; Owner: - +-- + +ALTER SEQUENCE auth.refresh_tokens_id_seq OWNED BY auth.refresh_tokens.id; + + +-- +-- Name: schema_migrations; Type: TABLE; Schema: auth; Owner: - +-- + +CREATE TABLE auth.schema_migrations ( + version character varying(255) NOT NULL +); + + +-- +-- Name: TABLE schema_migrations; Type: COMMENT; Schema: auth; Owner: - +-- + +COMMENT ON TABLE auth.schema_migrations IS 'Auth: Manages updates to the auth system.'; + + +-- +-- Name: users; Type: TABLE; Schema: auth; Owner: - +-- + +CREATE TABLE auth.users ( + instance_id uuid, + id uuid NOT NULL, + aud character varying(255), + role character varying(255), + email character varying(255), + encrypted_password character varying(255), + confirmed_at timestamp with time zone, + invited_at timestamp with time zone, + confirmation_token character varying(255), + confirmation_sent_at timestamp with time zone, + recovery_token character varying(255), + recovery_sent_at timestamp with time zone, + email_change_token character varying(255), + email_change character varying(255), + email_change_sent_at timestamp with time zone, + last_sign_in_at timestamp with time zone, + raw_app_meta_data jsonb, + raw_user_meta_data jsonb, + is_super_admin boolean, + created_at timestamp with time zone, + updated_at timestamp with time zone +); + + +-- +-- Name: TABLE users; Type: COMMENT; Schema: auth; Owner: - +-- + +COMMENT ON TABLE auth.users IS 'Auth: Stores user login data within a secure schema.'; + + +-- +-- Name: schema_migrations; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.schema_migrations ( + version character varying(255) NOT NULL +); + + +-- +-- Name: buckets; Type: TABLE; Schema: storage; Owner: - +-- + +CREATE TABLE storage.buckets ( + id text NOT NULL, + name text NOT NULL, + owner uuid, + created_at timestamp with time zone DEFAULT now(), + updated_at timestamp with time zone DEFAULT now() +); + + +-- +-- Name: migrations; Type: TABLE; Schema: storage; Owner: - +-- + +CREATE TABLE storage.migrations ( + id integer NOT NULL, + name character varying(100) NOT NULL, + hash character varying(40) NOT NULL, + executed_at timestamp without time zone DEFAULT CURRENT_TIMESTAMP +); + + +-- +-- Name: objects; Type: TABLE; Schema: storage; Owner: - +-- + +CREATE TABLE storage.objects ( + id uuid DEFAULT extensions.uuid_generate_v4() NOT NULL, + bucket_id text, + name text, + owner uuid, + created_at timestamp with time zone DEFAULT now(), + updated_at timestamp with time zone DEFAULT now(), + last_accessed_at timestamp with time zone DEFAULT now(), + metadata jsonb +); + + +-- +-- Name: refresh_tokens id; Type: DEFAULT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.refresh_tokens ALTER COLUMN id SET DEFAULT nextval('auth.refresh_tokens_id_seq'::regclass); + + +-- +-- Name: audit_log_entries audit_log_entries_pkey; Type: CONSTRAINT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.audit_log_entries + ADD CONSTRAINT audit_log_entries_pkey PRIMARY KEY (id); + + +-- +-- Name: instances instances_pkey; Type: CONSTRAINT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.instances + ADD CONSTRAINT instances_pkey PRIMARY KEY (id); + + +-- +-- Name: refresh_tokens refresh_tokens_pkey; Type: CONSTRAINT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.refresh_tokens + ADD CONSTRAINT refresh_tokens_pkey PRIMARY KEY (id); + + +-- +-- Name: schema_migrations schema_migrations_pkey; Type: CONSTRAINT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.schema_migrations + ADD CONSTRAINT schema_migrations_pkey PRIMARY KEY (version); + + +-- +-- Name: users users_email_key; Type: CONSTRAINT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.users + ADD CONSTRAINT users_email_key UNIQUE (email); + + +-- +-- Name: users users_pkey; Type: CONSTRAINT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.users + ADD CONSTRAINT users_pkey PRIMARY KEY (id); + + +-- +-- Name: schema_migrations schema_migrations_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.schema_migrations + ADD CONSTRAINT schema_migrations_pkey PRIMARY KEY (version); + + +-- +-- Name: buckets buckets_pkey; Type: CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.buckets + ADD CONSTRAINT buckets_pkey PRIMARY KEY (id); + + +-- +-- Name: migrations migrations_name_key; Type: CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.migrations + ADD CONSTRAINT migrations_name_key UNIQUE (name); + + +-- +-- Name: migrations migrations_pkey; Type: CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.migrations + ADD CONSTRAINT migrations_pkey PRIMARY KEY (id); + + +-- +-- Name: objects objects_pkey; Type: CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.objects + ADD CONSTRAINT objects_pkey PRIMARY KEY (id); + + +-- +-- Name: audit_logs_instance_id_idx; Type: INDEX; Schema: auth; Owner: - +-- + +CREATE INDEX audit_logs_instance_id_idx ON auth.audit_log_entries USING btree (instance_id); + + +-- +-- Name: refresh_tokens_instance_id_idx; Type: INDEX; Schema: auth; Owner: - +-- + +CREATE INDEX refresh_tokens_instance_id_idx ON auth.refresh_tokens USING btree (instance_id); + + +-- +-- Name: refresh_tokens_instance_id_user_id_idx; Type: INDEX; Schema: auth; Owner: - +-- + +CREATE INDEX refresh_tokens_instance_id_user_id_idx ON auth.refresh_tokens USING btree (instance_id, user_id); + + +-- +-- Name: refresh_tokens_token_idx; Type: INDEX; Schema: auth; Owner: - +-- + +CREATE INDEX refresh_tokens_token_idx ON auth.refresh_tokens USING btree (token); + + +-- +-- Name: users_instance_id_email_idx; Type: INDEX; Schema: auth; Owner: - +-- + +CREATE INDEX users_instance_id_email_idx ON auth.users USING btree (instance_id, email); + + +-- +-- Name: users_instance_id_idx; Type: INDEX; Schema: auth; Owner: - +-- + +CREATE INDEX users_instance_id_idx ON auth.users USING btree (instance_id); + + +-- +-- Name: bname; Type: INDEX; Schema: storage; Owner: - +-- + +CREATE UNIQUE INDEX bname ON storage.buckets USING btree (name); + + +-- +-- Name: bucketid_objname; Type: INDEX; Schema: storage; Owner: - +-- + +CREATE UNIQUE INDEX bucketid_objname ON storage.objects USING btree (bucket_id, name); + + +-- +-- Name: name_prefix_search; Type: INDEX; Schema: storage; Owner: - +-- + +CREATE INDEX name_prefix_search ON storage.objects USING btree (name text_pattern_ops); + + +-- +-- Name: buckets buckets_owner_fkey; Type: FK CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.buckets + ADD CONSTRAINT buckets_owner_fkey FOREIGN KEY (owner) REFERENCES auth.users(id); + + +-- +-- Name: objects objects_bucketId_fkey; Type: FK CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.objects + ADD CONSTRAINT "objects_bucketId_fkey" FOREIGN KEY (bucket_id) REFERENCES storage.buckets(id); + + +-- +-- Name: objects objects_owner_fkey; Type: FK CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.objects + ADD CONSTRAINT objects_owner_fkey FOREIGN KEY (owner) REFERENCES auth.users(id); + + +-- +-- Name: objects; Type: ROW SECURITY; Schema: storage; Owner: - +-- + +ALTER TABLE storage.objects ENABLE ROW LEVEL SECURITY; + +-- +-- Name: tealbase_realtime; Type: PUBLICATION; Schema: -; Owner: - +-- + +CREATE PUBLICATION tealbase_realtime WITH (publish = 'insert, update, delete, truncate'); + + +-- +-- Name: issue_graphql_placeholder; Type: EVENT TRIGGER; Schema: -; Owner: - +-- + +CREATE EVENT TRIGGER issue_graphql_placeholder ON sql_drop + WHEN TAG IN ('DROP EXTENSION') + EXECUTE FUNCTION extensions.set_graphql_placeholder(); + + +-- +-- Name: issue_pg_cron_access; Type: EVENT TRIGGER; Schema: -; Owner: - +-- + +CREATE EVENT TRIGGER issue_pg_cron_access ON ddl_command_end + WHEN TAG IN ('CREATE SCHEMA') + EXECUTE FUNCTION extensions.grant_pg_cron_access(); + + +-- +-- Name: issue_pg_graphql_access; Type: EVENT TRIGGER; Schema: -; Owner: - +-- + +CREATE EVENT TRIGGER issue_pg_graphql_access ON ddl_command_end + WHEN TAG IN ('CREATE FUNCTION') + EXECUTE FUNCTION extensions.grant_pg_graphql_access(); + + +-- +-- Name: issue_pg_net_access; Type: EVENT TRIGGER; Schema: -; Owner: - +-- + +CREATE EVENT TRIGGER issue_pg_net_access ON ddl_command_end + WHEN TAG IN ('CREATE EXTENSION') + EXECUTE FUNCTION extensions.grant_pg_net_access(); + + +-- +-- Name: pgrst_ddl_watch; Type: EVENT TRIGGER; Schema: -; Owner: - +-- + +CREATE EVENT TRIGGER pgrst_ddl_watch ON ddl_command_end + EXECUTE FUNCTION extensions.pgrst_ddl_watch(); + + +-- +-- Name: pgrst_drop_watch; Type: EVENT TRIGGER; Schema: -; Owner: - +-- + +CREATE EVENT TRIGGER pgrst_drop_watch ON sql_drop + EXECUTE FUNCTION extensions.pgrst_drop_watch(); + + +-- +-- PostgreSQL database dump complete +-- + + +-- +-- Dbmate schema migrations +-- + diff --git a/migrations/tests/database/exists.sql b/migrations/tests/database/exists.sql new file mode 100644 index 0000000..48770e0 --- /dev/null +++ b/migrations/tests/database/exists.sql @@ -0,0 +1,10 @@ + +SELECT schemas_are(ARRAY[ + 'public', + 'auth', + 'extensions', + 'graphql', + 'graphql_public', + 'realtime', + 'storage' + ]); diff --git a/migrations/tests/database/privs.sql b/migrations/tests/database/privs.sql new file mode 100644 index 0000000..cedf41f --- /dev/null +++ b/migrations/tests/database/privs.sql @@ -0,0 +1,8 @@ + +SELECT database_privs_are( + 'postgres', 'postgres', ARRAY['CONNECT', 'TEMPORARY', 'CREATE'] +); + +SELECT function_privs_are('pgsodium', 'crypto_aead_det_decrypt', array['bytea', 'bytea', 'uuid', 'bytea'], 'service_role', array['EXECUTE']); +SELECT function_privs_are('pgsodium', 'crypto_aead_det_encrypt', array['bytea', 'bytea', 'uuid', 'bytea'], 'service_role', array['EXECUTE']); +SELECT function_privs_are('pgsodium', 'crypto_aead_det_keygen', array[]::text[], 'service_role', array['EXECUTE']); diff --git a/migrations/tests/database/test.sql b/migrations/tests/database/test.sql new file mode 100644 index 0000000..465c38a --- /dev/null +++ b/migrations/tests/database/test.sql @@ -0,0 +1,3 @@ + +\ir exists.sql +\ir privs.sql diff --git a/migrations/tests/fixtures.sql b/migrations/tests/fixtures.sql new file mode 100644 index 0000000..3699e1a --- /dev/null +++ b/migrations/tests/fixtures.sql @@ -0,0 +1,67 @@ +CREATE ROLE test_user_role; + +CREATE ROLE test_admin_role; + +GRANT authenticated TO test_user_role; + +GRANT postgres TO test_admin_role; + +INSERT INTO auth.users (id, "role", email) + VALUES (gen_random_uuid (), 'test_user_role', 'bob@tealbase.com') +RETURNING + * \gset bob_ + +INSERT INTO auth.users (id, "role", email) + VALUES (gen_random_uuid (), 'test_user_role', 'alice@tealbase.com') +RETURNING + * \gset alice_ + +INSERT INTO auth.users (id, "role", email) + VALUES (gen_random_uuid (), 'test_admin_role', 'admin@tealbase.com') +RETURNING + * \gset admin_ + +CREATE OR REPLACE FUNCTION test_logout () + RETURNS void + LANGUAGE plpgsql + AS $$ +BEGIN + PERFORM + set_config('request.jwt.claim.sub', NULL, TRUE); + PERFORM + set_config('request.jwt.claim.role', NULL, TRUE); + PERFORM + set_config('request.jwt.claim.email', NULL, TRUE); + RESET ROLE; +END; +$$; + +CREATE OR REPLACE FUNCTION test_login (user_email text, logout_first boolean = TRUE) + RETURNS auth.users + LANGUAGE plpgsql + AS $$ +DECLARE + auth_user auth.users; +BEGIN + IF logout_first THEN + PERFORM + test_logout (); + END IF; + SELECT + * INTO auth_user + FROM + auth.users + WHERE + email = user_email; + PERFORM + set_config('request.jwt.claim.sub', (auth_user).id::text, TRUE); + PERFORM + set_config('request.jwt.claim.role', (auth_user).ROLE, TRUE); + PERFORM + set_config('request.jwt.claim.email', (auth_user).email, TRUE); + RAISE NOTICE '%', format( 'SET ROLE %I; -- Logging in as %L (%L)', (auth_user).ROLE, (auth_user).id, (auth_user).email); + EXECUTE format('SET ROLE %I', (auth_user).ROLE); + RETURN auth_user; +END; +$$; + diff --git a/migrations/tests/storage/exists.sql b/migrations/tests/storage/exists.sql new file mode 100644 index 0000000..fae3d10 --- /dev/null +++ b/migrations/tests/storage/exists.sql @@ -0,0 +1,13 @@ + +-- Sanity test object existence in storage schema + +select has_table('storage'::name, 'buckets'::name); +select has_table('storage'::name, 'objects'::name); +select has_table('storage'::name, 'migrations'::name); +select has_function('storage'::name, 'foldername'::name); +select has_function('storage'::name, 'filename'::name); +select has_function('storage'::name, 'extension'::name); +select has_function('storage'::name, 'search'::name); + +select todo('This test should probably fail.'); select schema_privs_are('storage', 'anon', ARRAY['USAGE']); + diff --git a/migrations/tests/storage/test.sql b/migrations/tests/storage/test.sql new file mode 100644 index 0000000..4fd9862 --- /dev/null +++ b/migrations/tests/storage/test.sql @@ -0,0 +1,2 @@ + +\ir exists.sql diff --git a/migrations/tests/test.sql b/migrations/tests/test.sql new file mode 100644 index 0000000..637fef9 --- /dev/null +++ b/migrations/tests/test.sql @@ -0,0 +1,13 @@ +CREATE EXTENSION IF NOT EXISTS pgtap; + +BEGIN; + +SELECT plan(13); + +\ir fixtures.sql +\ir database/test.sql +\ir storage/test.sql + +SELECT * FROM finish(); + +ROLLBACK; diff --git a/scripts/02-credentials_cleanup.sh b/scripts/02-credentials_cleanup.sh index d1b359a..a7b966f 100644 --- a/scripts/02-credentials_cleanup.sh +++ b/scripts/02-credentials_cleanup.sh @@ -1 +1 @@ -sudo rm /home/ubuntu/.ssh/authorized_keys \ No newline at end of file +sudo rm /home/ubuntu/.ssh/authorized_keys diff --git a/scripts/90-cleanup.sh b/scripts/90-cleanup.sh index 23f6820..2e6a3f1 100644 --- a/scripts/90-cleanup.sh +++ b/scripts/90-cleanup.sh @@ -18,12 +18,31 @@ if [ -n "$(command -v yum)" ]; then yum update -y yum clean all elif [ -n "$(command -v apt-get)" ]; then + # Cleanup more packages + apt-get -y remove --purge \ + automake \ + autoconf \ + autotools-dev \ + cmake-data \ + cpp-8 \ + cpp-9 \ + cpp-10 \ + gcc-8 \ + gcc-9 \ + gcc-10 \ + git \ + git-man \ + ansible \ + libicu-dev \ + libcgal-dev \ + libgcc-9-dev \ + libgcc-8-dev \ + linux-headers-5.11.0-1021-aws apt-get -y update apt-get -y upgrade apt-get -y autoremove apt-get -y autoclean fi - rm -rf /tmp/* /var/tmp/* history -c cat /dev/null > /root/.bash_history @@ -53,4 +72,4 @@ dd if=/dev/zero of=/zerofile & sleep 5 done sync; rm /zerofile; sync -cat /dev/null > /var/log/lastlog; cat /dev/null > /var/log/wtmp \ No newline at end of file +cat /dev/null > /var/log/lastlog; cat /dev/null > /var/log/wtmp diff --git a/scripts/91-log_cleanup.sh b/scripts/91-log_cleanup.sh index 8521164..39e5ff3 100644 --- a/scripts/91-log_cleanup.sh +++ b/scripts/91-log_cleanup.sh @@ -8,4 +8,14 @@ rm -rf /var/log/* touch /var/log/auth.log touch /var/log/pgbouncer.log -chown postgres:postgres /var/log/pgbouncer.log \ No newline at end of file +chown pgbouncer:postgres /var/log/pgbouncer.log + +mkdir /var/log/postgresql +chown postgres:postgres /var/log/postgresql + +mkdir /var/log/wal-g +cd /var/log/wal-g +touch backup-push.log backup-fetch.log wal-push.log wal-fetch.log +chown -R postgres:postgres /var/log/wal-g +chmod -R 0300 /var/log/wal-g + From bb675d3e728423d2ab0b3295e5124606119e5ba5 Mon Sep 17 00:00:00 2001 From: Ekjot Singh Date: Tue, 15 Jul 2025 17:12:00 +0530 Subject: [PATCH 3/9] postgres-15.1.0.34-rc1 --- .github/workflows/ami-release.yml | 16 +++++++++++----- common.vars.pkr.hcl | 2 +- 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/.github/workflows/ami-release.yml b/.github/workflows/ami-release.yml index e078af1..c1394ae 100644 --- a/.github/workflows/ami-release.yml +++ b/.github/workflows/ami-release.yml @@ -4,6 +4,7 @@ on: push: branches: - develop + - da/test-tags workflow_dispatch: jobs: @@ -15,10 +16,10 @@ jobs: - name: Checkout Repo uses: actions/checkout@v2 - - name: Build AMI - run: | - GIT_SHA=$(git rev-parse HEAD) - packer build -var "git-head-version=${GIT_SHA}" -var "packer-execution-id=${GITHUB_RUN_ID}" -var-file="development-arm.vars.pkr.hcl" -var-file="common.vars.pkr.hcl" amazon-arm64.pkr.hcl + # - name: Build AMI + # run: | + # GIT_SHA=${{github.sha}} + # packer build -var "git-head-version=${GIT_SHA}" -var "packer-execution-id=${GITHUB_RUN_ID}" -var-file="development-arm.vars.pkr.hcl" -var-file="common.vars.pkr.hcl" amazon-arm64.pkr.hcl - name: Grab release version id: process_release_version @@ -28,12 +29,17 @@ jobs: echo "version=$VERSION" >> "$GITHUB_OUTPUT" echo "git_sha=$GIT_SHA" >> "$GITHUB_OUTPUT" + - name: echo output + run: | + echo "sha was ${{github.sha}} ${{ steps.process_release_vesion.outputs.git_sha }}" + echo "version was ${{ steps.process_release_version.outputs.version }}" + - name: Create release uses: softprops/action-gh-release@v1 with: name: ${{ steps.process_release_version.outputs.version }} tag_name: ${{ steps.process_release_version.outputs.version }} - target_commitish: ${{ steps.process_release_vesion.outputs.git_sha }} + target_commitish: ${{github.sha}} - name: Slack Notification on Failure if: ${{ failure() }} diff --git a/common.vars.pkr.hcl b/common.vars.pkr.hcl index 055d46d..a1eb467 100644 --- a/common.vars.pkr.hcl +++ b/common.vars.pkr.hcl @@ -1 +1 @@ -postgres-version = "15.1.0.33" +postgres-version = "15.1.0.34-rc1" From f593842e274802dab99c8461b367c757ceb73d86 Mon Sep 17 00:00:00 2001 From: Ekjot Singh Date: Tue, 15 Jul 2025 17:18:08 +0530 Subject: [PATCH 4/9] postgres-15.1.0.36 --- .github/workflows/ami-release.yml | 9 +--- .../admin_api_scripts/pg_upgrade_complete.sh | 52 +++++++++++++++---- .../admin_api_scripts/pg_upgrade_initiate.sh | 46 ++++++++++++---- .../admin_api_scripts/pg_upgrade_prepare.sh | 9 ++-- ansible/vars.yml | 6 +-- common.vars.pkr.hcl | 2 +- 6 files changed, 88 insertions(+), 36 deletions(-) diff --git a/.github/workflows/ami-release.yml b/.github/workflows/ami-release.yml index c1394ae..8b40d13 100644 --- a/.github/workflows/ami-release.yml +++ b/.github/workflows/ami-release.yml @@ -4,7 +4,7 @@ on: push: branches: - develop - - da/test-tags + - da/fix-release workflow_dispatch: jobs: @@ -25,14 +25,7 @@ jobs: id: process_release_version run: | VERSION=$(sed -e 's/postgres-version = "\(.*\)"/\1/g' common.vars.pkr.hcl) - GIT_SHA=$(git rev-parse HEAD) echo "version=$VERSION" >> "$GITHUB_OUTPUT" - echo "git_sha=$GIT_SHA" >> "$GITHUB_OUTPUT" - - - name: echo output - run: | - echo "sha was ${{github.sha}} ${{ steps.process_release_vesion.outputs.git_sha }}" - echo "version was ${{ steps.process_release_version.outputs.version }}" - name: Create release uses: softprops/action-gh-release@v1 diff --git a/ansible/files/admin_api_scripts/pg_upgrade_complete.sh b/ansible/files/admin_api_scripts/pg_upgrade_complete.sh index 2c90c36..1108c3e 100644 --- a/ansible/files/admin_api_scripts/pg_upgrade_complete.sh +++ b/ansible/files/admin_api_scripts/pg_upgrade_complete.sh @@ -6,40 +6,70 @@ ## extensions, containing regtypes referencing system OIDs. # Extensions to be reenabled after pg_upgrade. -# Running an upgrade with these extensions enabled will result in errors due to +# Running an upgrade with these extensions enabled will result in errors due to # them depending on regtypes referencing system OIDs. Thus they have been disabled # beforehand. EXTENSIONS_TO_REENABLE=( "pg_graphql" ) +set -eEuo pipefail run_sql() { - STATEMENT=$1 - psql -h localhost -U tealbase_admin -d postgres -c "$STATEMENT" + psql -h localhost -U tealbase_admin -d postgres "$@" +} + +cleanup() { + UPGRADE_STATUS=${1:-"failed"} + EXIT_CODE=${?:-0} + + echo "${UPGRADE_STATUS}" > /tmp/pg-upgrade-status + + exit $EXIT_CODE } function complete_pg_upgrade { + if [ -f /tmp/pg-upgrade-status ]; then + echo "Upgrade job already started. Bailing." + exit 0 + fi + + echo "running" > /tmp/pg-upgrade-status + mount -a -v # copying custom configurations cp -R /data/conf/* /etc/postgresql-custom/ + chown -R postgres:postgres /var/lib/postgresql/data + chown -R postgres:postgres /data/pgdata service postgresql start - su -c 'vacuumdb --all --analyze-in-stages' -s $SHELL postgres for EXTENSION in "${EXTENSIONS_TO_REENABLE[@]}"; do - run_sql "CREATE EXTENSION IF NOT EXISTS ${EXTENSION} CASCADE;" + run_sql -c "CREATE EXTENSION IF NOT EXISTS ${EXTENSION} CASCADE;" done - - sleep 5 - service postgresql restart + + if [ -d /data/sql ]; then + for FILE in /data/sql/*.sql; do + if [ -f "$FILE" ]; then + run_sql -f $FILE + fi + done + fi sleep 5 service postgresql restart + + start_vacuum_analyze + + echo "Upgrade job completed" +} + +function start_vacuum_analyze { + su -c 'vacuumdb --all --analyze-in-stages' -s $SHELL postgres + cleanup "complete" } -set -euo pipefail +trap cleanup ERR -complete_pg_upgrade >> /var/log/pg-upgrade-complete.log 2>&1 -echo "Upgrade job completed" +complete_pg_upgrade >>/var/log/pg-upgrade-complete.log 2>&1 & diff --git a/ansible/files/admin_api_scripts/pg_upgrade_initiate.sh b/ansible/files/admin_api_scripts/pg_upgrade_initiate.sh index 6e21919..9750ce3 100644 --- a/ansible/files/admin_api_scripts/pg_upgrade_initiate.sh +++ b/ansible/files/admin_api_scripts/pg_upgrade_initiate.sh @@ -28,6 +28,10 @@ cleanup() { UPGRADE_STATUS=${1:-"failed"} EXIT_CODE=${?:-0} + if [ -d "${MOUNT_POINT}/pgdata/pg_upgrade_output.d/" ]; then + cp -R "${MOUNT_POINT}/pgdata/pg_upgrade_output.d/" /var/log/ + fi + if [ -L /var/lib/postgresql ]; then rm /var/lib/postgresql mv /var/lib/postgresql.bak /var/lib/postgresql @@ -42,9 +46,6 @@ cleanup() { done run_sql "ALTER USER postgres WITH NOSUPERUSER;" - if [ -d "${MOUNT_POINT}/pgdata/pg_upgrade_output.d/" ]; then - cp -R "${MOUNT_POINT}/pgdata/pg_upgrade_output.d/" /var/log/ - fi umount $MOUNT_POINT echo "${UPGRADE_STATUS}" > /tmp/pg-upgrade-status @@ -53,11 +54,19 @@ cleanup() { } function initiate_upgrade { - BLOCK_DEVICE=$(lsblk -dpno name | grep -v "/dev/nvme[0-1]") echo "running" > /tmp/pg-upgrade-status + # awk NF==3 prints lines with exactly 3 fields, which are the block devices currently not mounted anywhere + # excluding nvme0 since it is the root disk + BLOCK_DEVICE=$(lsblk -dprno name,size,mountpoint,type | grep "disk" | grep -v "nvme0" | awk 'NF==3 { print $1; }') + + if [ -x "$(command -v blockdev)" ]; then + blockdev --rereadpt "$BLOCK_DEVICE" + fi + mkdir -p "$MOUNT_POINT" mount "$BLOCK_DEVICE" "$MOUNT_POINT" + resize2fs "$BLOCK_DEVICE" SHARED_PRELOAD_LIBRARIES=$(cat /etc/postgresql/postgresql.conf | grep shared_preload_libraries | sed "s/shared_preload_libraries = '\(.*\)'.*/\1/") PGDATAOLD=$(cat /etc/postgresql/postgresql.conf | grep data_directory | sed "s/data_directory = '\(.*\)'.*/\1/") @@ -73,6 +82,12 @@ function initiate_upgrade { cp /root/pg_upgrade_pgsodium_getkey.sh "$PGSHARENEW/extension/pgsodium_getkey" chmod +x "$PGSHARENEW/extension/pgsodium_getkey" + if [ -f "$MOUNT_POINT/pgsodium_root.key" ]; then + cp "$MOUNT_POINT/pgsodium_root.key" /etc/postgresql-custom/pgsodium_root.key + chown postgres:postgres /etc/postgresql-custom/pgsodium_root.key + chmod 600 /etc/postgresql-custom/pgsodium_root.key + fi + chown -R postgres:postgres "/tmp/pg_upgrade_bin/$PGVERSION" for EXTENSION in "${EXTENSIONS_TO_DISABLE[@]}"; do @@ -90,7 +105,9 @@ function initiate_upgrade { WORKERS=$(nproc | awk '{ print ($1 == 1 ? 1 : $1 - 1) }') # upgrade job outputs a log in the cwd; needs write permissions - cd /tmp + mkdir -p /tmp/pg_upgrade + chown -R postgres:postgres /tmp/pg_upgrade + cd /tmp/pg_upgrade UPGRADE_COMMAND=$(cat <> /var/log/pg-upgrade-initiate.log 2>&1 -echo "Upgrade initiate job completed " +initiate_upgrade >> /var/log/pg-upgrade-initiate.log 2>&1 & +echo "Upgrade initiate job completed" diff --git a/ansible/files/admin_api_scripts/pg_upgrade_prepare.sh b/ansible/files/admin_api_scripts/pg_upgrade_prepare.sh index 9754726..7d7eb98 100644 --- a/ansible/files/admin_api_scripts/pg_upgrade_prepare.sh +++ b/ansible/files/admin_api_scripts/pg_upgrade_prepare.sh @@ -1,14 +1,15 @@ #! /usr/bin/env bash -## This script is runs in advance of the database version upgrade, on the newly -## launched instance which will eventually be promoted to become the primary +## This script is runs in advance of the database version upgrade, on the newly +## launched instance which will eventually be promoted to become the primary ## database instance once the upgrade successfully completes, terminating the ## previous (source) instance. -## The following commands safely stop the Postgres service and unmount +## The following commands safely stop the Postgres service and unmount ## the data disk off the newly launched instance, to be re-attached to the ## source instance and run the upgrade there. set -euo pipefail systemctl stop postgresql -umount /data +cp /etc/postgresql-custom/pgsodium_root.key /data/pgsodium_root.key +umount /data diff --git a/ansible/vars.yml b/ansible/vars.yml index edb7f2a..2a0e92d 100644 --- a/ansible/vars.yml +++ b/ansible/vars.yml @@ -10,9 +10,9 @@ postgresql_release_checksum: sha256:ea2cf059a85882654b989acd07edc121833164a30340 pgbouncer_release: "1.17.0" pgbouncer_release_checksum: sha256:657309b7bc5c7a85cbf70a9a441b535f7824123081eabb7ba86d00349a256e23 -postgrest_release: "10.1.1" -postgrest_arm_release_checksum: sha1:9653c57181839a81a7747f57f4292635e24ff7f0 -postgrest_x86_release_checksum: sha1:fecc55387caee54543dc020061fe1cd7232407e0 +postgrest_release: "10.1.2" +postgrest_arm_release_checksum: sha1:bc1a811dc0482853b226c644551f290411573f23 +postgrest_x86_release_checksum: sha1:96844c1c66d16d6bde41b4c34173f92fe4a3385b gotrue_release: v2.40.1 gotrue_release_checksum: sha1:aa650eae81bca18ccb575a2c28bff90480a91eae diff --git a/common.vars.pkr.hcl b/common.vars.pkr.hcl index a1eb467..7257030 100644 --- a/common.vars.pkr.hcl +++ b/common.vars.pkr.hcl @@ -1 +1 @@ -postgres-version = "15.1.0.34-rc1" +postgres-version = "15.1.0.36" From beb1341f2ff59b5d9669f9506f230fa063c9c7cd Mon Sep 17 00:00:00 2001 From: Ekjot Singh Date: Tue, 15 Jul 2025 18:54:04 +0530 Subject: [PATCH 5/9] postgres-15.1.0.37 --- .github/workflows/ami-release.yml | 9 ++++----- ansible/tasks/internal/admin-mgr.yml | 2 +- common.vars.pkr.hcl | 2 +- docker-compose.yaml | 4 ++++ migrations/docker-compose.yaml | 1 - 5 files changed, 10 insertions(+), 8 deletions(-) diff --git a/.github/workflows/ami-release.yml b/.github/workflows/ami-release.yml index 8b40d13..01b9206 100644 --- a/.github/workflows/ami-release.yml +++ b/.github/workflows/ami-release.yml @@ -4,7 +4,6 @@ on: push: branches: - develop - - da/fix-release workflow_dispatch: jobs: @@ -16,10 +15,10 @@ jobs: - name: Checkout Repo uses: actions/checkout@v2 - # - name: Build AMI - # run: | - # GIT_SHA=${{github.sha}} - # packer build -var "git-head-version=${GIT_SHA}" -var "packer-execution-id=${GITHUB_RUN_ID}" -var-file="development-arm.vars.pkr.hcl" -var-file="common.vars.pkr.hcl" amazon-arm64.pkr.hcl + - name: Build AMI + run: | + GIT_SHA=${{github.sha}} + packer build -var "git-head-version=${GIT_SHA}" -var "packer-execution-id=${GITHUB_RUN_ID}" -var-file="development-arm.vars.pkr.hcl" -var-file="common.vars.pkr.hcl" amazon-arm64.pkr.hcl - name: Grab release version id: process_release_version diff --git a/ansible/tasks/internal/admin-mgr.yml b/ansible/tasks/internal/admin-mgr.yml index 79b848e..2397616 100644 --- a/ansible/tasks/internal/admin-mgr.yml +++ b/ansible/tasks/internal/admin-mgr.yml @@ -1,6 +1,6 @@ - name: Setting arch (x86) set_fact: - arch: "x86" + arch: "amd64" when: platform == "amd64" - name: Setting arch (arm) diff --git a/common.vars.pkr.hcl b/common.vars.pkr.hcl index 7257030..b3f4a30 100644 --- a/common.vars.pkr.hcl +++ b/common.vars.pkr.hcl @@ -1 +1 @@ -postgres-version = "15.1.0.36" +postgres-version = "15.1.0.37" diff --git a/docker-compose.yaml b/docker-compose.yaml index c572b9d..0b19ae3 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -13,6 +13,10 @@ services: environment: POSTGRES_HOST: /var/run/postgresql POSTGRES_PASSWORD: password + volumes: + - ./migrations/db/init-scripts:/docker-entrypoint-initdb.d/init-scripts + - ./migrations/db/migrations:/docker-entrypoint-initdb.d/migrations + - ./migrations/db/migrate.sh:/docker-entrypoint-initdb.d/migrate.sh pg_prove: image: horrendo/pg_prove diff --git a/migrations/docker-compose.yaml b/migrations/docker-compose.yaml index 47895cb..26fe776 100644 --- a/migrations/docker-compose.yaml +++ b/migrations/docker-compose.yaml @@ -19,7 +19,6 @@ services: POSTGRES_HOST: /var/run/postgresql POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} volumes: - - ../ansible/files/postgresql_config/pg_hba.conf.j2:/etc/postgresql/pg_hba.conf - ./db/init-scripts:/docker-entrypoint-initdb.d/init-scripts - ./db/migrations:/docker-entrypoint-initdb.d/migrations - ./db/migrate.sh:/docker-entrypoint-initdb.d/migrate.sh From 9aab4105d0286604acf2c39730faccc1b7d69ba0 Mon Sep 17 00:00:00 2001 From: Ekjot Singh Date: Tue, 15 Jul 2025 18:57:02 +0530 Subject: [PATCH 6/9] postgres-15.1.0.38 --- .github/workflows/test.yml | 2 +- ansible/files/admin_api_scripts/pg_egress_collect.pl | 4 ++-- ansible/files/pg_egress_collect.service.j2 | 2 +- ansible/vars.yml | 4 ++-- common.vars.pkr.hcl | 2 +- docker-compose.yaml | 4 ---- 6 files changed, 7 insertions(+), 11 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 13f47d5..af249f2 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -14,7 +14,7 @@ jobs: steps: - uses: actions/checkout@v3 - - run: docker compose up --abort-on-container-exit + - run: docker compose up --abort-on-container-exit --build migrate: runs-on: ubuntu-latest diff --git a/ansible/files/admin_api_scripts/pg_egress_collect.pl b/ansible/files/admin_api_scripts/pg_egress_collect.pl index 02c5650..2acc98a 100644 --- a/ansible/files/admin_api_scripts/pg_egress_collect.pl +++ b/ansible/files/admin_api_scripts/pg_egress_collect.pl @@ -2,13 +2,13 @@ # This script receive tcpdump output through STDIN and does: # -# 1. extract outgoing TCP packet length on all devices port 5432 and 6543 +# 1. extract outgoing TCP packet length on the 1st non-loopback device port 5432 and 6543 # 2. sum the length up to one minute # 3. save the total length to file (default is /tmp/pg_egress_collect.txt) per minute # # Usage: # -# tcpdump -s 128 -Q out -i any -nn -tt -vv -p -l 'tcp and (port 5432 or port 6543)' | perl pg_egress_collect.pl -o /tmp/output.txt +# tcpdump -s 128 -Q out -nn -tt -vv -p -l 'tcp and (port 5432 or port 6543)' | perl pg_egress_collect.pl -o /tmp/output.txt # use POSIX; diff --git a/ansible/files/pg_egress_collect.service.j2 b/ansible/files/pg_egress_collect.service.j2 index 377b6e3..7ac04f4 100644 --- a/ansible/files/pg_egress_collect.service.j2 +++ b/ansible/files/pg_egress_collect.service.j2 @@ -3,7 +3,7 @@ Description=Postgres Egress Collector [Service] Type=simple -ExecStart=/bin/bash -c "tcpdump -s 128 -Q out -i any -nn -tt -vv -p -l 'tcp and (port 5432 or port 6543)' | perl /root/pg_egress_collect.pl" +ExecStart=/bin/bash -c "tcpdump -s 128 -Q out -nn -tt -vv -p -l 'tcp and (port 5432 or port 6543)' | perl /root/pg_egress_collect.pl" User=root Slice=services.slice Restart=always diff --git a/ansible/vars.yml b/ansible/vars.yml index 2a0e92d..55eda23 100644 --- a/ansible/vars.yml +++ b/ansible/vars.yml @@ -83,8 +83,8 @@ plv8_commit_version: bcddd92f71530e117f2f98b92d206dafe824f73a pg_plan_filter_commit_version: 5081a7b5cb890876e67d8e7486b6a64c38c9a492 -pg_net_release: "0.7" -pg_net_release_checksum: sha1:52cffe467858182e12f158870b3339ed785a1320 +pg_net_release: "0.7.1" +pg_net_release_checksum: sha1:1df576745a320b6faa78cff5067cad9bdd4cfbcb rum_release: "1.3.13" rum_release_checksum: sha256:6ab370532c965568df6210bd844ac6ba649f53055e48243525b0b7e5c4d69a7d diff --git a/common.vars.pkr.hcl b/common.vars.pkr.hcl index b3f4a30..a1eee1c 100644 --- a/common.vars.pkr.hcl +++ b/common.vars.pkr.hcl @@ -1 +1 @@ -postgres-version = "15.1.0.37" +postgres-version = "15.1.0.38" diff --git a/docker-compose.yaml b/docker-compose.yaml index 0b19ae3..c572b9d 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -13,10 +13,6 @@ services: environment: POSTGRES_HOST: /var/run/postgresql POSTGRES_PASSWORD: password - volumes: - - ./migrations/db/init-scripts:/docker-entrypoint-initdb.d/init-scripts - - ./migrations/db/migrations:/docker-entrypoint-initdb.d/migrations - - ./migrations/db/migrate.sh:/docker-entrypoint-initdb.d/migrate.sh pg_prove: image: horrendo/pg_prove From cdcdf382f9c5ba9b7c9720b228decc66d2ea3025 Mon Sep 17 00:00:00 2001 From: Ekjot Singh Date: Mon, 28 Jul 2025 01:17:42 +0530 Subject: [PATCH 7/9] feat: 15.6.146 --- .github/CODEOWNERS | 2 + .github/FUNDING.yml | 2 +- .github/PULL_REQUEST_TEMPLATE/default.md | 15 + .../extension_upgrade.md | 49 + .github/pull_request_template.md | 4 + .github/workflows/ami-release-nix.yml | 141 + .github/workflows/ami-release.yml | 49 - .github/workflows/build-ccache.yml | 86 + .github/workflows/check-shellscripts.yml | 27 + .github/workflows/ci.yml | 21 + .github/workflows/dockerhub-release-15-6.yml | 104 + .github/workflows/dockerhub-release-aio.yml | 157 + .../workflows/dockerhub-release-orioledb.yml | 86 + .github/workflows/dockerhub-release.yml | 177 +- .github/workflows/mirror-postgrest.yml | 33 + .github/workflows/mirror.yml | 13 +- .github/workflows/nix-build.yml | 82 + .github/workflows/package-plv8.yml | 78 + .github/workflows/publish-migrations.yml | 37 +- ...ublish-nix-pgupgrade-bin-flake-version.yml | 101 + .../publish-nix-pgupgrade-scripts.yml | 104 + .github/workflows/test-pg-upgrade.yml | 133 + .github/workflows/test.yml | 136 +- .github/workflows/testinfra-nix.yml | 94 + .gitignore | 21 +- CONTRIBUTING.md | 46 + Dockerfile | 1017 ++- Dockerfile-156 | 223 + README.md | 19 +- amazon-arm64-nix.pkr.hcl | 277 + amazon-arm64.pkr.hcl | 31 +- ansible/files/admin_api_scripts/grow_fs.sh | 11 +- .../admin_api_scripts/manage_readonly_mode.sh | 4 +- .../admin_api_scripts/pg_egress_collect.pl | 10 +- .../admin_api_scripts/pg_upgrade_complete.sh | 75 - .../admin_api_scripts/pg_upgrade_initiate.sh | 152 - .../check.sh} | 2 +- .../pg_upgrade_scripts/common.sh | 551 ++ .../pg_upgrade_scripts/complete.sh | 204 + .../pg_upgrade_scripts/initiate.sh | 470 ++ .../pgsodium_getkey.sh} | 0 .../prepare.sh} | 0 ansible/files/adminapi.service.j2 | 1 + ansible/files/adminapi.sudoers.conf | 24 +- ansible/files/commence-backup.service.j2 | 12 + ansible/files/default.sysstat | 9 + ansible/files/envoy.service | 31 + ansible/files/envoy_config/cds.yaml | 86 + ansible/files/envoy_config/envoy.yaml | 23 + ansible/files/envoy_config/lds.yaml | 436 ++ .../fail2ban_config/filter-pgbouncer.conf.j2 | 3 +- .../fail2ban_config/jail-pgbouncer.conf.j2 | 4 +- .../fail2ban_config/jail-postgresql.conf.j2 | 3 +- ansible/files/gotrue-optimizations.service.j2 | 11 + ansible/files/gotrue.service.j2 | 2 + ansible/files/kong_config/kong.conf.j2 | 2 +- ansible/files/kong_config/kong.service.j2 | 10 +- .../logrotate-postgres-csv.conf | 2 +- ansible/files/manifest.json | 1 + ansible/files/permission_check.py | 204 + .../files/pgbouncer_config/pgbouncer.ini.j2 | 4 +- ansible/files/postgres_exporter.service.j2 | 7 +- ansible/files/postgres_prestart.sh.j2 | 9 + .../custom_read_replica.conf.j2 | 5 + .../postgresql_config/custom_walg.conf.j2 | 4 + .../files/postgresql_config/pg_hba.conf.j2 | 3 + .../postgresql_config/postgresql.conf.j2 | 9 +- .../postgresql_config/postgresql.service.j2 | 3 + .../files/postgresql_config/supautils.conf.j2 | 18 +- .../tmpfiles.postgresql.conf | 5 + .../before-create.sql | 84 + .../dblink/after-create.sql | 14 + .../pg_cron/after-create.sql | 13 + .../pg_tle/after-create.sql | 1 + .../pgmq/after-create.sql | 19 + .../postgis_tiger_geocoder/after-create.sql | 10 + .../postgres_fdw/after-create.sql | 21 + ansible/files/start-envoy.sh | 12 + ansible/files/sysstat.sysstat | 36 + .../systemd-networkd-check-and-fix.service | 11 + .../systemd-networkd-check-and-fix.sh | 20 + .../systemd-networkd-check-and-fix.timer | 9 + ansible/files/systemd-resolved.conf | 8 + ansible/manifest-playbook.yml | 75 + ansible/playbook-docker.yml | 19 - ansible/playbook.yml | 107 +- ansible/tasks/clean-build-dependencies.yml | 4 + ansible/tasks/docker/cleanup.yml | 18 - ansible/tasks/docker/finalize.yml | 24 - ansible/tasks/docker/setup.yml | 70 - ansible/tasks/finalize-ami.yml | 8 +- ansible/tasks/fix_ipv6_ndisc.yml | 33 + ansible/tasks/internal/admin-api.yml | 33 +- .../tasks/internal/collect-pg-binaries.yml | 49 + ansible/tasks/internal/install-salt.yml | 47 + ansible/tasks/internal/optimizations.yml | 34 +- ansible/tasks/internal/postgres-exporter.yml | 16 +- .../tasks/internal/postgresql-prestart.yml | 7 + .../tasks/postgres-extensions/01-postgis.yml | 8 +- .../postgres-extensions/02-pgrouting.yml | 3 +- .../tasks/postgres-extensions/04-pg_cron.yml | 7 - .../tasks/postgres-extensions/06-pgjwt.yml | 2 +- .../tasks/postgres-extensions/11-wal2json.yml | 2 +- ansible/tasks/postgres-extensions/13-plv8.yml | 9 +- .../postgres-extensions/14-pg_plan_filter.yml | 2 +- .../postgres-extensions/17-pg_hashids.yml | 2 +- .../tasks/postgres-extensions/18-pgsodium.yml | 3 +- .../postgres-extensions/19-pg_graphql.yml | 2 +- .../postgres-extensions/21-auto_explain.yml | 7 - .../postgres-extensions/22-pg_jsonschema.yml | 2 +- .../tasks/postgres-extensions/24-pgroonga.yml | 6 +- .../tasks/postgres-extensions/25-wrappers.yml | 2 +- .../tasks/postgres-extensions/26-hypopg.yml | 2 +- .../postgres-extensions/27-pg_repack.yml | 65 +- .../tasks/postgres-extensions/28-pgvector.yml | 4 +- .../tasks/postgres-extensions/29-pg_tle.yml | 12 + ansible/tasks/setup-docker.yml | 80 + ansible/tasks/setup-envoy.yml | 60 + ansible/tasks/setup-extensions.yml | 20 +- ansible/tasks/setup-fail2ban.yml | 10 + ansible/tasks/setup-gotrue.yml | 7 +- ansible/tasks/setup-kong.yml | 2 +- ansible/tasks/setup-migrations.yml | 4 +- ansible/tasks/setup-nginx.yml | 3 +- ansible/tasks/setup-pgbouncer.yml | 12 - ansible/tasks/setup-postgres.yml | 270 +- ansible/tasks/setup-postgrest.yml | 25 + ansible/tasks/setup-system.yml | 87 +- ansible/tasks/setup-tealbase-internal.yml | 26 +- ansible/tasks/setup-wal-g.yml | 4 +- ansible/tasks/stage2-setup-postgres.yml | 234 + ansible/tasks/test-image.yml | 81 +- ansible/vars.yml | 114 +- common-nix.vars.pkr.hcl | 1 + common.vars.pkr.hcl | 2 +- docker-compose.yaml | 28 - docker/Dockerfile | 78 + docker/all-in-one/Dockerfile | 311 + docker/all-in-one/README.md | 59 + docker/all-in-one/configure-shim.sh | 16 + docker/all-in-one/entrypoint.sh | 366 + docker/all-in-one/etc/adminapi/adminapi.yaml | 76 + .../etc/fail2ban/filter.d/pgbouncer.conf | 2 + .../etc/fail2ban/filter.d/postgresql.conf | 8 + .../all-in-one/etc/fail2ban/jail.d/jail.local | 4 + .../etc/fail2ban/jail.d/pgbouncer.conf | 7 + .../etc/fail2ban/jail.d/postgresql.conf | 8 + .../all-in-one/etc/fail2ban/jail.d/sshd.local | 3 + docker/all-in-one/etc/gotrue.env | 9 + docker/all-in-one/etc/kong/kong.conf | 37 + docker/all-in-one/etc/kong/kong.yml | 88 + .../etc/logrotate.d/postgresql.conf | 11 + docker/all-in-one/etc/logrotate.d/walg.conf | 9 + .../etc/pgbouncer-custom/custom-overrides.ini | 0 .../generated-optimizations.ini | 0 .../etc/pgbouncer-custom/ssl-config.ini | 4 + docker/all-in-one/etc/pgbouncer/pgbouncer.ini | 363 + docker/all-in-one/etc/pgbouncer/userlist.txt | 0 .../postgresql-custom/custom-overrides.conf | 0 .../generated-optimizations.conf | 0 .../postgresql-platform-defaults.conf | 9 + docker/all-in-one/etc/postgresql.schema.sql | 16 + docker/all-in-one/etc/postgresql/logging.conf | 33 + docker/all-in-one/etc/postgresql/pg_hba.conf | 94 + docker/all-in-one/etc/postgrest/base.conf | 7 + docker/all-in-one/etc/postgrest/bootstrap.sh | 8 + .../all-in-one/etc/postgrest/generated.conf | 0 docker/all-in-one/etc/salt/minion | 71 + docker/all-in-one/etc/sudoers.d/adminapi | 27 + .../etc/supa-shutdown/shutdown.conf | 1 + .../supervisor/base-services/adminapi.conf | 10 + .../supervisor/base-services/logrotate.conf | 11 + .../base-services/lsn-checkpoint-push.conf | 10 + .../base-services/pg_egress_collect.conf | 10 + .../supervisor/base-services/postgresql.conf | 13 + .../base-services/supa-shutdown.conf | 11 + .../etc/supervisor/services/envoy.conf | 10 + .../etc/supervisor/services/exporter.conf | 11 + .../etc/supervisor/services/fail2ban.conf | 9 + .../etc/supervisor/services/gotrue.conf | 10 + .../etc/supervisor/services/group.conf | 3 + .../etc/supervisor/services/kong.conf | 11 + .../etc/supervisor/services/pgbouncer.conf | 10 + .../etc/supervisor/services/postgrest.conf | 10 + .../etc/supervisor/supervisord.conf | 170 + .../all-in-one/etc/tmpfiles.d/pgbouncer.conf | 2 + docker/all-in-one/etc/vector/vector.yaml | 306 + docker/all-in-one/healthcheck.sh | 46 + docker/all-in-one/init/configure-admin-mgr.sh | 8 + docker/all-in-one/init/configure-adminapi.sh | 56 + .../all-in-one/init/configure-autoshutdown.sh | 21 + docker/all-in-one/init/configure-envoy.sh | 53 + docker/all-in-one/init/configure-exporter.sh | 5 + docker/all-in-one/init/configure-fail2ban.sh | 6 + docker/all-in-one/init/configure-gotrue.sh | 40 + docker/all-in-one/init/configure-kong.sh | 48 + .../init/configure-pg_egress_collect.sh | 14 + docker/all-in-one/init/configure-pgbouncer.sh | 46 + docker/all-in-one/init/configure-postgrest.sh | 41 + docker/all-in-one/init/configure-vector.sh | 56 + docker/all-in-one/init/start-kong.sh | 7 + .../pg_egress_collect/pg_egress_collect.pl | 126 + .../opt/postgres_exporter/queries.yml | 345 + docker/all-in-one/postgres-entrypoint.sh | 358 + docker/all-in-one/run-logrotate.sh | 8 + docker/all-in-one/shutdown.sh | 96 + docker/nix/Dockerfile | 16 + docker/nix/build_nix.sh | 17 + docker/orioledb/Dockerfile | 1059 +++ docker/orioledb/entrypoint.sh | 36 + .../files/unit-tests/test-extensions.sql | 20 - .../files/unit-tests/unit-test-01.sql | 4 +- .../files/unit-tests/verify-extensions.sql | 12 - ebssurrogate/scripts/chroot-bootstrap-nix.sh | 219 + ebssurrogate/scripts/chroot-bootstrap.sh | 37 +- .../scripts/surrogate-bootstrap-nix.sh | 328 + ebssurrogate/scripts/surrogate-bootstrap.sh | 38 +- flake.lock | 180 + flake.nix | 601 ++ migrations/README.md | 1 + .../00000000000000-initial-schema.sql | 5 +- migrations/db/migrate.sh | 14 + .../20211124212715_update-auth-owner.sql | 22 +- .../migrations/20220317095840_pg_graphql.sql | 2 + ...13082019_pg_cron-pg_net-temp-perms-fix.sql | 1 + ...221207154255_create_pgsodium_and_vault.sql | 40 +- ...46_grant_extensions_perms_for_postgres.sql | 10 + ...306081037_grant_pg_monitor_to_postgres.sql | 5 + ...t_auth_roles_to_tealbase_storage_admin.sql | 4 + ...0529180330_alter_api_roles_for_inherit.sql | 10 + ...uthenticator_to_tealbase_storage_admin.sql | 5 + ...g_graphql_permissions_for_custom_roles.sql | 78 + ...evoke_writes_on_cron_job_from_postgres.sql | 47 + ...set_lock_timeout_to_authenticator_role.sql | 4 + ...080435_alter_lo_export_lo_import_owner.sql | 6 + ...239_grant_predefined_roles_to_postgres.sql | 4 + migrations/docker-compose.yaml | 16 +- migrations/schema.sql | 92 +- migrations/tests/database/exists.sql | 16 +- migrations/tests/database/privs.sql | 27 +- migrations/tests/extensions/01-postgis.sql | 43 + migrations/tests/extensions/02-pgrouting.sql | 3 + migrations/tests/extensions/03-pgtap.sql | 3 + migrations/tests/extensions/04-pg_cron.sql | 25 + migrations/tests/extensions/05-pgaudit.sql | 3 + migrations/tests/extensions/06-pgjwt.sql | 3 + migrations/tests/extensions/07-pgsql-http.sql | 3 + .../tests/extensions/08-plpgsql_check.sql | 3 + .../tests/extensions/09-pg-safeupdate.sql | 3 + .../tests/extensions/10-timescaledb.sql | 3 + migrations/tests/extensions/11-wal2json.sql | 6 + migrations/tests/extensions/12-pljava.sql | 3 + migrations/tests/extensions/13-plv8.sql | 3 + .../tests/extensions/14-pg_plan_filter.sql | 3 + migrations/tests/extensions/15-pg_net.sql | 23 + migrations/tests/extensions/16-rum.sql | 3 + migrations/tests/extensions/17-pg_hashids.sql | 3 + migrations/tests/extensions/18-pgsodium.sql | 4 + migrations/tests/extensions/19-pg_graphql.sql | 4 + .../tests/extensions/20-pg_stat_monitor.sql | 3 + .../tests/extensions/21-auto_explain.sql | 0 .../tests/extensions/22-pg_jsonschema.sql | 3 + migrations/tests/extensions/23-vault.sql | 4 + migrations/tests/extensions/24-pgroonga.sql | 3 + migrations/tests/extensions/25-wrappers.sql | 3 + migrations/tests/extensions/26-hypopg.sql | 3 + migrations/tests/extensions/27-pg_repack.sql | 3 + migrations/tests/extensions/28-pgvector.sql | 3 + migrations/tests/extensions/29-pg_tle.sql | 4 + migrations/tests/extensions/test.sql | 30 + migrations/tests/storage/privs.sql | 1 + migrations/tests/storage/test.sql | 1 + migrations/tests/test.sql | 7 +- nix/do-not-use-vendored-libraries.patch | 15 + nix/docker/init.sh.in | 5 + nix/docs/README.md | 8 + nix/docs/adding-new-package.md | 160 + nix/docs/adding-tests.md | 100 + nix/docs/build-postgres.md | 124 + nix/docs/docker.md | 14 + nix/docs/migration-tests.md | 50 + nix/docs/new-major-postgres.md | 34 + nix/docs/nix-overlays.md | 36 + nix/docs/receipt-files.md | 155 + nix/docs/references.md | 31 + nix/docs/start-client-server.md | 93 + nix/docs/start-here.md | 70 + nix/docs/update-extension.md | 17 + nix/docs/use-direnv.md | 102 + ...001-build-Allow-using-V8-from-system.patch | 46 + nix/ext/hypopg.nix | 31 + nix/ext/index_advisor.nix | 30 + nix/ext/mecab-naist-jdic/default.nix | 41 + nix/ext/orioledb.nix | 32 + nix/ext/pg-safeupdate.nix | 29 + nix/ext/pg_backtrace.nix | 33 + nix/ext/pg_cron.nix | 32 + nix/ext/pg_graphql.nix | 39 + nix/ext/pg_hashids.nix | 31 + nix/ext/pg_jsonschema.nix | 66 + nix/ext/pg_net.nix | 33 + nix/ext/pg_plan_filter.nix | 30 + nix/ext/pg_regress.nix | 24 + nix/ext/pg_repack.nix | 66 + nix/ext/pg_stat_monitor.nix | 49 + nix/ext/pg_tle.nix | 36 + nix/ext/pgaudit.nix | 44 + nix/ext/pgjwt.nix | 31 + nix/ext/pgmq.nix | 33 + nix/ext/pgroonga.nix | 61 + nix/ext/pgrouting.nix | 31 + nix/ext/pgsodium.nix | 31 + nix/ext/pgsql-http.nix | 31 + nix/ext/pgtap.nix | 33 + nix/ext/pgvector.nix | 31 + nix/ext/pljava.nix | 51 + nix/ext/plpgsql-check.nix | 46 + nix/ext/plv8.nix | 194 + nix/ext/postgis.nix | 87 + nix/ext/rum.nix | 31 + nix/ext/sfcgal/sfcgal.nix | 31 + nix/ext/supautils.nix | 29 + nix/ext/timescaledb-2.9.1.nix | 51 + nix/ext/timescaledb.nix | 43 + nix/ext/use-system-groonga.patch | 21 + nix/ext/vault.nix | 30 + nix/ext/wal2json.nix | 31 + nix/ext/wrappers/default.nix | 121 + nix/fix-cmake-install-path.patch | 21 + nix/init.sh | 20 + nix/overlays/cargo-pgrx-0-11-3.nix | 7 + nix/overlays/gdal-small.nix | 14 + nix/overlays/psql_16-oriole.nix | 21 + nix/postgresql/15.nix | 4 + nix/postgresql/default.nix | 20 + nix/postgresql/generic.nix | 309 + nix/postgresql/patches/less-is-more.patch | 11 + .../patches/locale-binary-path.patch | 11 + .../patches/paths-for-split-outputs.patch | 11 + .../paths-with-postgresql-suffix.patch | 41 + .../patches/relative-to-symlinks-16+.patch | 13 + .../patches/relative-to-symlinks.patch | 13 + .../patches/socketdir-in-run-13+.patch | 11 + nix/postgresql/patches/socketdir-in-run.patch | 11 + .../specify_pkglibdir_at_runtime.patch | 28 + nix/tealbase-groonga.nix | 75 + .../expected/extensions_sql_interface.out | 6312 +++++++++++++++++ nix/tests/expected/hypopg.out | 14 + nix/tests/expected/index_advisor.out | 16 + nix/tests/expected/pg-safeupdate.out | 12 + nix/tests/expected/pg_graphql.out | 259 + nix/tests/expected/pg_hashids.out | 36 + nix/tests/expected/pg_jsonschema.out | 73 + nix/tests/expected/pg_net.out | 11 + nix/tests/expected/pg_plan_filter.out | 16 + nix/tests/expected/pg_stat_monitor.out | 10 + nix/tests/expected/pg_tle.out | 91 + nix/tests/expected/pgaudit.out | 24 + nix/tests/expected/pgjwt.out | 22 + nix/tests/expected/pgmq.out | 141 + nix/tests/expected/pgroonga.out | 76 + nix/tests/expected/pgrouting.out | 31 + nix/tests/expected/pgsodium.out | 9 + nix/tests/expected/pgtap.out | 21 + nix/tests/expected/pgvector.out | 90 + nix/tests/expected/plpgsql-check.out | 35 + nix/tests/expected/plv8.out | 17 + nix/tests/expected/postgis.out | 59 + nix/tests/expected/rum.out | 38 + nix/tests/expected/timescale.out | 47 + nix/tests/expected/vault.out | 42 + nix/tests/expected/wal2json.out | 42 + nix/tests/migrations/data.sql | 21 + nix/tests/postgresql.conf.in | 800 +++ nix/tests/prime.sql | 98 + nix/tests/smoke/0000-hello-world.sql | 10 + nix/tests/smoke/0001-pg_graphql.sql | 59 + nix/tests/smoke/0002-supautils.sql | 17 + nix/tests/smoke/0003-pgsodium-vault.sql | 40 + nix/tests/smoke/0004-index_advisor.sql | 19 + nix/tests/smoke/0005-test_pgroonga_mecab.sql | 36 + nix/tests/sql/extensions_sql_interface.sql | 101 + nix/tests/sql/hypopg.sql | 13 + nix/tests/sql/index_advisor.sql | 13 + nix/tests/sql/pg-safeupdate.sql | 15 + nix/tests/sql/pg_graphql.sql | 219 + nix/tests/sql/pg_hashids.sql | 6 + nix/tests/sql/pg_jsonschema.sql | 68 + nix/tests/sql/pg_net.sql | 7 + nix/tests/sql/pg_plan_filter.sql | 22 + nix/tests/sql/pg_stat_monitor.sql | 6 + nix/tests/sql/pg_tle.sql | 70 + nix/tests/sql/pgaudit.sql | 23 + nix/tests/sql/pgjwt.sql | 13 + nix/tests/sql/pgmq.sql | 90 + nix/tests/sql/pgroonga.sql | 48 + nix/tests/sql/pgrouting.sql | 27 + nix/tests/sql/pgsodium.sql | 4 + nix/tests/sql/pgtap.sql | 11 + nix/tests/sql/pgvector.sql | 72 + nix/tests/sql/plpgsql-check.sql | 26 + nix/tests/sql/plv8.sql | 14 + nix/tests/sql/postgis.sql | 52 + nix/tests/sql/rum.sql | 37 + nix/tests/sql/timescale.sql | 33 + nix/tests/sql/vault.sql | 30 + nix/tests/sql/wal2json.sql | 32 + nix/tests/util/pgsodium_getkey.sh | 10 + nix/tests/util/pgsodium_getkey_arb.sh | 1 + nix/tools/README.md | 2 + nix/tools/migrate-tool.sh.in | 123 + nix/tools/postgresql_schema.sql | 11 + nix/tools/run-client.sh.in | 54 + nix/tools/run-replica.sh.in | 43 + nix/tools/run-server.sh.in | 65 + nix/tools/sync-exts-versions.sh.in | 282 + postgresql.gpg.key | 64 + scripts/90-cleanup.sh | 8 +- scripts/91-log_cleanup.sh | 5 +- scripts/nix-provision.sh | 49 + stage2-nix-psql.pkr.hcl | 141 + testinfra/README.md | 66 + testinfra/test_all_in_one.py | 135 + testinfra/test_ami.py | 439 ++ testinfra/test_ami_nix.py | 439 ++ tests/pg_upgrade/.env | 6 + tests/pg_upgrade/.gitignore | 4 + tests/pg_upgrade/debug.sh | 80 + tests/pg_upgrade/scripts/entrypoint.sh | 25 + tests/pg_upgrade/tests/01-schema.sql | 26 + tests/pg_upgrade/tests/02-data.sql | 27 + tests/pg_upgrade/tests/03-settings.sql | 17 + .../pg_upgrade/tests/97-enable-extensions.sql | 10 + tests/pg_upgrade/tests/98-data-fixtures.sql | 273 + tests/pg_upgrade/tests/99-fixtures.sql | 12 + 435 files changed, 29014 insertions(+), 1191 deletions(-) create mode 100644 .github/PULL_REQUEST_TEMPLATE/default.md create mode 100644 .github/PULL_REQUEST_TEMPLATE/extension_upgrade.md create mode 100644 .github/pull_request_template.md create mode 100644 .github/workflows/ami-release-nix.yml delete mode 100644 .github/workflows/ami-release.yml create mode 100644 .github/workflows/build-ccache.yml create mode 100644 .github/workflows/check-shellscripts.yml create mode 100644 .github/workflows/ci.yml create mode 100644 .github/workflows/dockerhub-release-15-6.yml create mode 100644 .github/workflows/dockerhub-release-aio.yml create mode 100644 .github/workflows/dockerhub-release-orioledb.yml create mode 100644 .github/workflows/mirror-postgrest.yml create mode 100644 .github/workflows/nix-build.yml create mode 100644 .github/workflows/package-plv8.yml create mode 100644 .github/workflows/publish-nix-pgupgrade-bin-flake-version.yml create mode 100644 .github/workflows/publish-nix-pgupgrade-scripts.yml create mode 100644 .github/workflows/test-pg-upgrade.yml create mode 100644 .github/workflows/testinfra-nix.yml create mode 100644 CONTRIBUTING.md create mode 100644 Dockerfile-156 create mode 100644 amazon-arm64-nix.pkr.hcl delete mode 100644 ansible/files/admin_api_scripts/pg_upgrade_complete.sh delete mode 100644 ansible/files/admin_api_scripts/pg_upgrade_initiate.sh rename ansible/files/admin_api_scripts/{pg_upgrade_check.sh => pg_upgrade_scripts/check.sh} (79%) mode change 100644 => 100755 create mode 100755 ansible/files/admin_api_scripts/pg_upgrade_scripts/common.sh create mode 100755 ansible/files/admin_api_scripts/pg_upgrade_scripts/complete.sh create mode 100755 ansible/files/admin_api_scripts/pg_upgrade_scripts/initiate.sh rename ansible/files/admin_api_scripts/{pg_upgrade_pgsodium_getkey.sh => pg_upgrade_scripts/pgsodium_getkey.sh} (100%) mode change 100644 => 100755 rename ansible/files/admin_api_scripts/{pg_upgrade_prepare.sh => pg_upgrade_scripts/prepare.sh} (100%) mode change 100644 => 100755 create mode 100644 ansible/files/commence-backup.service.j2 create mode 100644 ansible/files/default.sysstat create mode 100644 ansible/files/envoy.service create mode 100644 ansible/files/envoy_config/cds.yaml create mode 100644 ansible/files/envoy_config/envoy.yaml create mode 100644 ansible/files/envoy_config/lds.yaml create mode 100644 ansible/files/gotrue-optimizations.service.j2 create mode 100644 ansible/files/manifest.json create mode 100644 ansible/files/permission_check.py create mode 100644 ansible/files/postgres_prestart.sh.j2 create mode 100644 ansible/files/postgresql_config/custom_read_replica.conf.j2 create mode 100644 ansible/files/postgresql_config/tmpfiles.postgresql.conf create mode 100644 ansible/files/postgresql_extension_custom_scripts/before-create.sql create mode 100644 ansible/files/postgresql_extension_custom_scripts/dblink/after-create.sql create mode 100644 ansible/files/postgresql_extension_custom_scripts/pg_cron/after-create.sql create mode 100644 ansible/files/postgresql_extension_custom_scripts/pg_tle/after-create.sql create mode 100644 ansible/files/postgresql_extension_custom_scripts/pgmq/after-create.sql create mode 100644 ansible/files/postgresql_extension_custom_scripts/postgis_tiger_geocoder/after-create.sql create mode 100644 ansible/files/postgresql_extension_custom_scripts/postgres_fdw/after-create.sql create mode 100644 ansible/files/start-envoy.sh create mode 100644 ansible/files/sysstat.sysstat create mode 100644 ansible/files/systemd-networkd/systemd-networkd-check-and-fix.service create mode 100644 ansible/files/systemd-networkd/systemd-networkd-check-and-fix.sh create mode 100644 ansible/files/systemd-networkd/systemd-networkd-check-and-fix.timer create mode 100644 ansible/files/systemd-resolved.conf create mode 100644 ansible/manifest-playbook.yml delete mode 100644 ansible/playbook-docker.yml delete mode 100644 ansible/tasks/docker/cleanup.yml delete mode 100644 ansible/tasks/docker/finalize.yml delete mode 100644 ansible/tasks/docker/setup.yml create mode 100644 ansible/tasks/fix_ipv6_ndisc.yml create mode 100644 ansible/tasks/internal/collect-pg-binaries.yml create mode 100644 ansible/tasks/internal/install-salt.yml create mode 100644 ansible/tasks/internal/postgresql-prestart.yml delete mode 100644 ansible/tasks/postgres-extensions/21-auto_explain.yml create mode 100644 ansible/tasks/postgres-extensions/29-pg_tle.yml create mode 100644 ansible/tasks/setup-docker.yml create mode 100644 ansible/tasks/setup-envoy.yml create mode 100644 ansible/tasks/stage2-setup-postgres.yml create mode 100644 common-nix.vars.pkr.hcl delete mode 100644 docker-compose.yaml create mode 100644 docker/Dockerfile create mode 100644 docker/all-in-one/Dockerfile create mode 100644 docker/all-in-one/README.md create mode 100755 docker/all-in-one/configure-shim.sh create mode 100755 docker/all-in-one/entrypoint.sh create mode 100644 docker/all-in-one/etc/adminapi/adminapi.yaml create mode 100644 docker/all-in-one/etc/fail2ban/filter.d/pgbouncer.conf create mode 100644 docker/all-in-one/etc/fail2ban/filter.d/postgresql.conf create mode 100644 docker/all-in-one/etc/fail2ban/jail.d/jail.local create mode 100644 docker/all-in-one/etc/fail2ban/jail.d/pgbouncer.conf create mode 100644 docker/all-in-one/etc/fail2ban/jail.d/postgresql.conf create mode 100644 docker/all-in-one/etc/fail2ban/jail.d/sshd.local create mode 100644 docker/all-in-one/etc/gotrue.env create mode 100644 docker/all-in-one/etc/kong/kong.conf create mode 100644 docker/all-in-one/etc/kong/kong.yml create mode 100644 docker/all-in-one/etc/logrotate.d/postgresql.conf create mode 100644 docker/all-in-one/etc/logrotate.d/walg.conf create mode 100644 docker/all-in-one/etc/pgbouncer-custom/custom-overrides.ini create mode 100644 docker/all-in-one/etc/pgbouncer-custom/generated-optimizations.ini create mode 100644 docker/all-in-one/etc/pgbouncer-custom/ssl-config.ini create mode 100644 docker/all-in-one/etc/pgbouncer/pgbouncer.ini create mode 100644 docker/all-in-one/etc/pgbouncer/userlist.txt create mode 100644 docker/all-in-one/etc/postgresql-custom/custom-overrides.conf create mode 100644 docker/all-in-one/etc/postgresql-custom/generated-optimizations.conf create mode 100644 docker/all-in-one/etc/postgresql-custom/postgresql-platform-defaults.conf create mode 100644 docker/all-in-one/etc/postgresql.schema.sql create mode 100644 docker/all-in-one/etc/postgresql/logging.conf create mode 100755 docker/all-in-one/etc/postgresql/pg_hba.conf create mode 100644 docker/all-in-one/etc/postgrest/base.conf create mode 100755 docker/all-in-one/etc/postgrest/bootstrap.sh create mode 100644 docker/all-in-one/etc/postgrest/generated.conf create mode 100644 docker/all-in-one/etc/salt/minion create mode 100644 docker/all-in-one/etc/sudoers.d/adminapi create mode 100644 docker/all-in-one/etc/supa-shutdown/shutdown.conf create mode 100644 docker/all-in-one/etc/supervisor/base-services/adminapi.conf create mode 100644 docker/all-in-one/etc/supervisor/base-services/logrotate.conf create mode 100644 docker/all-in-one/etc/supervisor/base-services/lsn-checkpoint-push.conf create mode 100644 docker/all-in-one/etc/supervisor/base-services/pg_egress_collect.conf create mode 100644 docker/all-in-one/etc/supervisor/base-services/postgresql.conf create mode 100644 docker/all-in-one/etc/supervisor/base-services/supa-shutdown.conf create mode 100644 docker/all-in-one/etc/supervisor/services/envoy.conf create mode 100644 docker/all-in-one/etc/supervisor/services/exporter.conf create mode 100644 docker/all-in-one/etc/supervisor/services/fail2ban.conf create mode 100644 docker/all-in-one/etc/supervisor/services/gotrue.conf create mode 100644 docker/all-in-one/etc/supervisor/services/group.conf create mode 100644 docker/all-in-one/etc/supervisor/services/kong.conf create mode 100644 docker/all-in-one/etc/supervisor/services/pgbouncer.conf create mode 100644 docker/all-in-one/etc/supervisor/services/postgrest.conf create mode 100644 docker/all-in-one/etc/supervisor/supervisord.conf create mode 100644 docker/all-in-one/etc/tmpfiles.d/pgbouncer.conf create mode 100644 docker/all-in-one/etc/vector/vector.yaml create mode 100755 docker/all-in-one/healthcheck.sh create mode 100755 docker/all-in-one/init/configure-admin-mgr.sh create mode 100755 docker/all-in-one/init/configure-adminapi.sh create mode 100755 docker/all-in-one/init/configure-autoshutdown.sh create mode 100755 docker/all-in-one/init/configure-envoy.sh create mode 100755 docker/all-in-one/init/configure-exporter.sh create mode 100755 docker/all-in-one/init/configure-fail2ban.sh create mode 100755 docker/all-in-one/init/configure-gotrue.sh create mode 100755 docker/all-in-one/init/configure-kong.sh create mode 100755 docker/all-in-one/init/configure-pg_egress_collect.sh create mode 100755 docker/all-in-one/init/configure-pgbouncer.sh create mode 100755 docker/all-in-one/init/configure-postgrest.sh create mode 100755 docker/all-in-one/init/configure-vector.sh create mode 100755 docker/all-in-one/init/start-kong.sh create mode 100644 docker/all-in-one/opt/pg_egress_collect/pg_egress_collect.pl create mode 100644 docker/all-in-one/opt/postgres_exporter/queries.yml create mode 100755 docker/all-in-one/postgres-entrypoint.sh create mode 100755 docker/all-in-one/run-logrotate.sh create mode 100755 docker/all-in-one/shutdown.sh create mode 100644 docker/nix/Dockerfile create mode 100644 docker/nix/build_nix.sh create mode 100644 docker/orioledb/Dockerfile create mode 100755 docker/orioledb/entrypoint.sh delete mode 100644 ebssurrogate/files/unit-tests/test-extensions.sql delete mode 100644 ebssurrogate/files/unit-tests/verify-extensions.sql create mode 100755 ebssurrogate/scripts/chroot-bootstrap-nix.sh create mode 100755 ebssurrogate/scripts/surrogate-bootstrap-nix.sh create mode 100644 flake.lock create mode 100644 flake.nix create mode 100644 migrations/db/migrations/20230224042246_grant_extensions_perms_for_postgres.sql create mode 100644 migrations/db/migrations/20230306081037_grant_pg_monitor_to_postgres.sql create mode 100644 migrations/db/migrations/20230327032006_grant_auth_roles_to_tealbase_storage_admin.sql create mode 100644 migrations/db/migrations/20230529180330_alter_api_roles_for_inherit.sql create mode 100644 migrations/db/migrations/20231013070755_grant_authenticator_to_tealbase_storage_admin.sql create mode 100644 migrations/db/migrations/20231017062225_grant_pg_graphql_permissions_for_custom_roles.sql create mode 100644 migrations/db/migrations/20231020085357_revoke_writes_on_cron_job_from_postgres.sql create mode 100644 migrations/db/migrations/20231130133139_set_lock_timeout_to_authenticator_role.sql create mode 100644 migrations/db/migrations/20240124080435_alter_lo_export_lo_import_owner.sql create mode 100644 migrations/db/migrations/20240606060239_grant_predefined_roles_to_postgres.sql create mode 100644 migrations/tests/extensions/01-postgis.sql create mode 100644 migrations/tests/extensions/02-pgrouting.sql create mode 100644 migrations/tests/extensions/03-pgtap.sql create mode 100644 migrations/tests/extensions/04-pg_cron.sql create mode 100644 migrations/tests/extensions/05-pgaudit.sql create mode 100644 migrations/tests/extensions/06-pgjwt.sql create mode 100644 migrations/tests/extensions/07-pgsql-http.sql create mode 100644 migrations/tests/extensions/08-plpgsql_check.sql create mode 100644 migrations/tests/extensions/09-pg-safeupdate.sql create mode 100644 migrations/tests/extensions/10-timescaledb.sql create mode 100644 migrations/tests/extensions/11-wal2json.sql create mode 100644 migrations/tests/extensions/12-pljava.sql create mode 100644 migrations/tests/extensions/13-plv8.sql create mode 100644 migrations/tests/extensions/14-pg_plan_filter.sql create mode 100644 migrations/tests/extensions/15-pg_net.sql create mode 100644 migrations/tests/extensions/16-rum.sql create mode 100644 migrations/tests/extensions/17-pg_hashids.sql create mode 100644 migrations/tests/extensions/18-pgsodium.sql create mode 100644 migrations/tests/extensions/19-pg_graphql.sql create mode 100644 migrations/tests/extensions/20-pg_stat_monitor.sql create mode 100644 migrations/tests/extensions/21-auto_explain.sql create mode 100644 migrations/tests/extensions/22-pg_jsonschema.sql create mode 100644 migrations/tests/extensions/23-vault.sql create mode 100644 migrations/tests/extensions/24-pgroonga.sql create mode 100644 migrations/tests/extensions/25-wrappers.sql create mode 100644 migrations/tests/extensions/26-hypopg.sql create mode 100644 migrations/tests/extensions/27-pg_repack.sql create mode 100644 migrations/tests/extensions/28-pgvector.sql create mode 100644 migrations/tests/extensions/29-pg_tle.sql create mode 100644 migrations/tests/extensions/test.sql create mode 100644 migrations/tests/storage/privs.sql create mode 100644 nix/do-not-use-vendored-libraries.patch create mode 100644 nix/docker/init.sh.in create mode 100644 nix/docs/README.md create mode 100644 nix/docs/adding-new-package.md create mode 100644 nix/docs/adding-tests.md create mode 100644 nix/docs/build-postgres.md create mode 100644 nix/docs/docker.md create mode 100644 nix/docs/migration-tests.md create mode 100644 nix/docs/new-major-postgres.md create mode 100644 nix/docs/nix-overlays.md create mode 100644 nix/docs/receipt-files.md create mode 100644 nix/docs/references.md create mode 100644 nix/docs/start-client-server.md create mode 100644 nix/docs/start-here.md create mode 100644 nix/docs/update-extension.md create mode 100644 nix/docs/use-direnv.md create mode 100644 nix/ext/0001-build-Allow-using-V8-from-system.patch create mode 100644 nix/ext/hypopg.nix create mode 100644 nix/ext/index_advisor.nix create mode 100644 nix/ext/mecab-naist-jdic/default.nix create mode 100644 nix/ext/orioledb.nix create mode 100644 nix/ext/pg-safeupdate.nix create mode 100644 nix/ext/pg_backtrace.nix create mode 100644 nix/ext/pg_cron.nix create mode 100644 nix/ext/pg_graphql.nix create mode 100644 nix/ext/pg_hashids.nix create mode 100644 nix/ext/pg_jsonschema.nix create mode 100644 nix/ext/pg_net.nix create mode 100644 nix/ext/pg_plan_filter.nix create mode 100644 nix/ext/pg_regress.nix create mode 100644 nix/ext/pg_repack.nix create mode 100644 nix/ext/pg_stat_monitor.nix create mode 100644 nix/ext/pg_tle.nix create mode 100644 nix/ext/pgaudit.nix create mode 100644 nix/ext/pgjwt.nix create mode 100644 nix/ext/pgmq.nix create mode 100644 nix/ext/pgroonga.nix create mode 100644 nix/ext/pgrouting.nix create mode 100644 nix/ext/pgsodium.nix create mode 100644 nix/ext/pgsql-http.nix create mode 100644 nix/ext/pgtap.nix create mode 100644 nix/ext/pgvector.nix create mode 100644 nix/ext/pljava.nix create mode 100644 nix/ext/plpgsql-check.nix create mode 100644 nix/ext/plv8.nix create mode 100644 nix/ext/postgis.nix create mode 100644 nix/ext/rum.nix create mode 100644 nix/ext/sfcgal/sfcgal.nix create mode 100644 nix/ext/supautils.nix create mode 100644 nix/ext/timescaledb-2.9.1.nix create mode 100644 nix/ext/timescaledb.nix create mode 100644 nix/ext/use-system-groonga.patch create mode 100644 nix/ext/vault.nix create mode 100644 nix/ext/wal2json.nix create mode 100644 nix/ext/wrappers/default.nix create mode 100644 nix/fix-cmake-install-path.patch create mode 100755 nix/init.sh create mode 100644 nix/overlays/cargo-pgrx-0-11-3.nix create mode 100644 nix/overlays/gdal-small.nix create mode 100644 nix/overlays/psql_16-oriole.nix create mode 100644 nix/postgresql/15.nix create mode 100644 nix/postgresql/default.nix create mode 100644 nix/postgresql/generic.nix create mode 100644 nix/postgresql/patches/less-is-more.patch create mode 100644 nix/postgresql/patches/locale-binary-path.patch create mode 100644 nix/postgresql/patches/paths-for-split-outputs.patch create mode 100644 nix/postgresql/patches/paths-with-postgresql-suffix.patch create mode 100644 nix/postgresql/patches/relative-to-symlinks-16+.patch create mode 100644 nix/postgresql/patches/relative-to-symlinks.patch create mode 100644 nix/postgresql/patches/socketdir-in-run-13+.patch create mode 100644 nix/postgresql/patches/socketdir-in-run.patch create mode 100644 nix/postgresql/patches/specify_pkglibdir_at_runtime.patch create mode 100644 nix/tealbase-groonga.nix create mode 100644 nix/tests/expected/extensions_sql_interface.out create mode 100644 nix/tests/expected/hypopg.out create mode 100644 nix/tests/expected/index_advisor.out create mode 100644 nix/tests/expected/pg-safeupdate.out create mode 100644 nix/tests/expected/pg_graphql.out create mode 100644 nix/tests/expected/pg_hashids.out create mode 100644 nix/tests/expected/pg_jsonschema.out create mode 100644 nix/tests/expected/pg_net.out create mode 100644 nix/tests/expected/pg_plan_filter.out create mode 100644 nix/tests/expected/pg_stat_monitor.out create mode 100644 nix/tests/expected/pg_tle.out create mode 100644 nix/tests/expected/pgaudit.out create mode 100644 nix/tests/expected/pgjwt.out create mode 100644 nix/tests/expected/pgmq.out create mode 100644 nix/tests/expected/pgroonga.out create mode 100644 nix/tests/expected/pgrouting.out create mode 100644 nix/tests/expected/pgsodium.out create mode 100644 nix/tests/expected/pgtap.out create mode 100644 nix/tests/expected/pgvector.out create mode 100644 nix/tests/expected/plpgsql-check.out create mode 100644 nix/tests/expected/plv8.out create mode 100644 nix/tests/expected/postgis.out create mode 100644 nix/tests/expected/rum.out create mode 100644 nix/tests/expected/timescale.out create mode 100644 nix/tests/expected/vault.out create mode 100644 nix/tests/expected/wal2json.out create mode 100644 nix/tests/migrations/data.sql create mode 100644 nix/tests/postgresql.conf.in create mode 100644 nix/tests/prime.sql create mode 100644 nix/tests/smoke/0000-hello-world.sql create mode 100644 nix/tests/smoke/0001-pg_graphql.sql create mode 100644 nix/tests/smoke/0002-supautils.sql create mode 100644 nix/tests/smoke/0003-pgsodium-vault.sql create mode 100644 nix/tests/smoke/0004-index_advisor.sql create mode 100644 nix/tests/smoke/0005-test_pgroonga_mecab.sql create mode 100644 nix/tests/sql/extensions_sql_interface.sql create mode 100644 nix/tests/sql/hypopg.sql create mode 100644 nix/tests/sql/index_advisor.sql create mode 100644 nix/tests/sql/pg-safeupdate.sql create mode 100644 nix/tests/sql/pg_graphql.sql create mode 100644 nix/tests/sql/pg_hashids.sql create mode 100644 nix/tests/sql/pg_jsonschema.sql create mode 100644 nix/tests/sql/pg_net.sql create mode 100644 nix/tests/sql/pg_plan_filter.sql create mode 100644 nix/tests/sql/pg_stat_monitor.sql create mode 100644 nix/tests/sql/pg_tle.sql create mode 100644 nix/tests/sql/pgaudit.sql create mode 100644 nix/tests/sql/pgjwt.sql create mode 100644 nix/tests/sql/pgmq.sql create mode 100644 nix/tests/sql/pgroonga.sql create mode 100644 nix/tests/sql/pgrouting.sql create mode 100644 nix/tests/sql/pgsodium.sql create mode 100644 nix/tests/sql/pgtap.sql create mode 100644 nix/tests/sql/pgvector.sql create mode 100644 nix/tests/sql/plpgsql-check.sql create mode 100644 nix/tests/sql/plv8.sql create mode 100644 nix/tests/sql/postgis.sql create mode 100644 nix/tests/sql/rum.sql create mode 100644 nix/tests/sql/timescale.sql create mode 100644 nix/tests/sql/vault.sql create mode 100644 nix/tests/sql/wal2json.sql create mode 100755 nix/tests/util/pgsodium_getkey.sh create mode 100755 nix/tests/util/pgsodium_getkey_arb.sh create mode 100644 nix/tools/README.md create mode 100644 nix/tools/migrate-tool.sh.in create mode 100644 nix/tools/postgresql_schema.sql create mode 100644 nix/tools/run-client.sh.in create mode 100644 nix/tools/run-replica.sh.in create mode 100644 nix/tools/run-server.sh.in create mode 100644 nix/tools/sync-exts-versions.sh.in create mode 100644 postgresql.gpg.key create mode 100644 scripts/nix-provision.sh create mode 100644 stage2-nix-psql.pkr.hcl create mode 100644 testinfra/README.md create mode 100644 testinfra/test_all_in_one.py create mode 100644 testinfra/test_ami.py create mode 100644 testinfra/test_ami_nix.py create mode 100644 tests/pg_upgrade/.env create mode 100644 tests/pg_upgrade/.gitignore create mode 100755 tests/pg_upgrade/debug.sh create mode 100755 tests/pg_upgrade/scripts/entrypoint.sh create mode 100644 tests/pg_upgrade/tests/01-schema.sql create mode 100644 tests/pg_upgrade/tests/02-data.sql create mode 100644 tests/pg_upgrade/tests/03-settings.sql create mode 100644 tests/pg_upgrade/tests/97-enable-extensions.sql create mode 100644 tests/pg_upgrade/tests/98-data-fixtures.sql create mode 100644 tests/pg_upgrade/tests/99-fixtures.sql diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 2f1c9a1..0576d01 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,2 +1,4 @@ * @supabase/backend migrations/ @supabase/cli @supabase/backend +docker/orioledb @supabase/postgres @supabase/backend +common.vars.pkr.hcl @supabase/postgres @supabase/backend diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml index 5c8088a..4ead358 100644 --- a/.github/FUNDING.yml +++ b/.github/FUNDING.yml @@ -1,6 +1,6 @@ # These are supported funding model platforms -github: [ supabase ] # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2] +github: [ tealbase ] # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2] patreon: # Replace with a single Patreon username open_collective: # Replace with a single Open Collective username ko_fi: # Replace with a single Ko-fi username diff --git a/.github/PULL_REQUEST_TEMPLATE/default.md b/.github/PULL_REQUEST_TEMPLATE/default.md new file mode 100644 index 0000000..15bf13f --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE/default.md @@ -0,0 +1,15 @@ +## What kind of change does this PR introduce? + +Bug fix, feature, docs update, ... + +## What is the current behavior? + +Please link any relevant issues here. + +## What is the new behavior? + +Feel free to include screenshots if it includes visual changes. + +## Additional context + +Add any other context or screenshots. \ No newline at end of file diff --git a/.github/PULL_REQUEST_TEMPLATE/extension_upgrade.md b/.github/PULL_REQUEST_TEMPLATE/extension_upgrade.md new file mode 100644 index 0000000..9db2a06 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE/extension_upgrade.md @@ -0,0 +1,49 @@ +## What kind of change does this PR introduce? + +- upgrade _extension_ from _v0.0.0_ to _v0.0.0_ + +## Additional context + +Add any other context or screenshots. + +## Action Items + +- [ ] **New extension releases** were Checked for any breaking changes +- [ ] **Extensions compatibility** Checked + * Proceed to [extensions compatibility testing](#extensions-compatibility-testing), mark as done after everything is completed +- [ ] **Backup and Restore** Checked + * Proceed to [backup testing](#backup-testing) while extensions are enabled + - After every restore, re-run the tests specified at point [3.1](#extensions-compatibility-testing) + +### Extensions compatibility testing + +1. Enable every extension + 1. Check Postgres’ log output for any error messages while doing so + 1. This might unearth incompatibilities due to unsupported internal functions, missing libraries, or missing permissions +2. Disable every extension + 1. Check Postgres’ log output for any cleanup-related error messages +3. Re-enable each extension + 1. Run basic tests against the features they offer, e.g.: + 1. `pg_net` - execute HTTP requests + 2. `pg_graphql` - execute queries and mutations + 3. …to be filled in + +### Backup Testing + +Follow the testing steps steps for all the following cases: + +- Pause on new Postgres version, restore on new Postgres version +- Pause on older Postgres version, restore on new Postgres version +- Run a single-file backup backup, restore the backup + +#### Testing steps + +1. Generate dummy data + * the ‘Countries’ or ‘Slack clone’ SQL editor snippets are decent datasets to work with, albeit limited +2. Save a db stats snapshot file + * Do this by running `supa db-stats gather -p ` +3. Backup the database, through pausing the project, or otherwise +4. Restore the backup, through unpausing the project or cli +5. Check the data has been recovered successfully + 1. Visual checks/navigating through the tables works + 2. Run `supa db-stats verify` against the project and the previously saved file \ No newline at end of file diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 0000000..0d13d96 --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,4 @@ +Please go the the `Preview` tab and select the appropriate sub-template: + +* [Default](?expand=1&template=default.md) +* [Extension Upgrade](?expand=1&template=extension_upgrade.md) \ No newline at end of file diff --git a/.github/workflows/ami-release-nix.yml b/.github/workflows/ami-release-nix.yml new file mode 100644 index 0000000..194c8de --- /dev/null +++ b/.github/workflows/ami-release-nix.yml @@ -0,0 +1,141 @@ +name: Release AMI Nix + +on: + push: + branches: + - develop + - release/* + paths: + - '.github/workflows/ami-release-nix.yml' + - 'common-nix.vars.pkr.hcl' + workflow_dispatch: + +jobs: + build: + strategy: + matrix: + include: + - runner: arm-runner + arch: arm64 + ubuntu_release: focal + ubuntu_version: 20.04 + mcpu: neoverse-n1 + runs-on: ${{ matrix.runner }} + timeout-minutes: 150 + permissions: + contents: write + packages: write + id-token: write + + steps: + - name: Checkout Repo + uses: actions/checkout@v3 + + - name: Run checks if triggered manually + if: ${{ github.event_name == 'workflow_dispatch' }} + # Update `ci.yaml` too if changing constraints. + run: | + SUFFIX=$(sed -E 's/postgres-version = "[0-9\.]+(.*)"/\1/g' common-nix.vars.pkr.hcl) + if [[ -z $SUFFIX ]] ; then + echo "Version must include non-numeric characters if built manually." + exit 1 + fi + + # extensions are build in nix prior to this step + # so we can just use the binaries from the nix store + # for postgres, extensions and wrappers + + - name: Build AMI stage 1 + run: | + packer init amazon-arm64-nix.pkr.hcl + GIT_SHA=${{github.sha}} + packer build -var "git-head-version=${GIT_SHA}" -var "packer-execution-id=${GITHUB_RUN_ID}" -var-file="development-arm.vars.pkr.hcl" -var-file="common-nix.vars.pkr.hcl" -var "ansible_arguments=" amazon-arm64-nix.pkr.hcl + + - name: Build AMI stage 2 + run: | + packer init stage2-nix-psql.pkr.hcl + GIT_SHA=${{github.sha}} + packer build -var "git_sha=${GIT_SHA}" -var "git-head-version=${GIT_SHA}" -var "packer-execution-id=${GITHUB_RUN_ID}" -var-file="development-arm.vars.pkr.hcl" -var-file="common-nix.vars.pkr.hcl" stage2-nix-psql.pkr.hcl + + - name: Grab release version + id: process_release_version + run: | + VERSION=$(sed -e 's/postgres-version = "\(.*\)"/\1/g' common-nix.vars.pkr.hcl) + echo "version=$VERSION" >> "$GITHUB_OUTPUT" + + - name: Create nix flake revision tarball + run: | + GIT_SHA=${{github.sha}} + MAJOR_VERSION=$(echo "${{ steps.process_release_version.outputs.version }}" | cut -d. -f1) + + mkdir -p "/tmp/pg_upgrade_bin/${MAJOR_VERSION}" + echo "$GIT_SHA" >> "/tmp/pg_upgrade_bin/${MAJOR_VERSION}/nix_flake_version" + tar -czf "/tmp/pg_binaries.tar.gz" -C "/tmp/pg_upgrade_bin" . + + - name: configure aws credentials - staging + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ secrets.DEV_AWS_ROLE }} + aws-region: "us-east-1" + + - name: Upload software manifest to s3 staging + run: | + cd ansible + ansible-playbook -i localhost \ + -e "ami_release_version=${{ steps.process_release_version.outputs.version }}" \ + -e "internal_artifacts_bucket=${{ secrets.ARTIFACTS_BUCKET }}" \ + manifest-playbook.yml + + - name: Upload nix flake revision to s3 staging + run: | + aws s3 cp /tmp/pg_binaries.tar.gz s3://${{ secrets.ARTIFACTS_BUCKET }}/upgrades/postgres/supabase-postgres-${{ steps.process_release_version.outputs.version }}/20.04.tar.gz + + #Our self hosted github runner already has permissions to publish images + #but they're limited to only that; + #so if we want s3 access we'll need to config credentials with the below steps + # (which overwrites existing perms) after the ami build + + - name: configure aws credentials - prod + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ secrets.PROD_AWS_ROLE }} + aws-region: "us-east-1" + + - name: Upload software manifest to s3 prod + run: | + cd ansible + ansible-playbook -i localhost \ + -e "ami_release_version=${{ steps.process_release_version.outputs.version }}" \ + -e "internal_artifacts_bucket=${{ secrets.PROD_ARTIFACTS_BUCKET }}" \ + manifest-playbook.yml + + - name: Upload nix flake revision to s3 prod + run: | + aws s3 cp /tmp/pg_binaries.tar.gz s3://${{ secrets.PROD_ARTIFACTS_BUCKET }}/upgrades/postgres/supabase-postgres-${{ steps.process_release_version.outputs.version }}/20.04.tar.gz + + - name: Create release + uses: softprops/action-gh-release@v1 + with: + name: ${{ steps.process_release_version.outputs.version }} + tag_name: ${{ steps.process_release_version.outputs.version }} + target_commitish: ${{github.sha}} + + - name: Slack Notification on Failure + if: ${{ failure() }} + uses: rtCamp/action-slack-notify@v2 + env: + SLACK_WEBHOOK: ${{ secrets.SLACK_NOTIFICATIONS_WEBHOOK }} + SLACK_USERNAME: 'gha-failures-notifier' + SLACK_COLOR: 'danger' + SLACK_MESSAGE: 'Building Postgres AMI failed' + SLACK_FOOTER: '' + + - name: Cleanup resources on build cancellation + if: ${{ always() }} + run: | + aws ec2 describe-instances --filters "Name=tag:packerExecutionId,Values=${GITHUB_RUN_ID}" --query "Reservations[].Instances[].InstanceId" --output text | xargs -n 1 -I {} aws ec2 terminate-instances --instance-ids {} + + - name: Cleanup resources on build cancellation + if: ${{ cancelled() }} + run: | + aws ec2 describe-instances --filters "Name=tag:packerExecutionId,Values=${GITHUB_RUN_ID}" --query "Reservations[].Instances[].InstanceId" --output text | xargs -n 1 -I {} aws ec2 terminate-instances --instance-ids {} diff --git a/.github/workflows/ami-release.yml b/.github/workflows/ami-release.yml deleted file mode 100644 index 01b9206..0000000 --- a/.github/workflows/ami-release.yml +++ /dev/null @@ -1,49 +0,0 @@ -name: Release AMI - -on: - push: - branches: - - develop - workflow_dispatch: - -jobs: - build: - runs-on: [self-hosted, linux] - timeout-minutes: 150 - - steps: - - name: Checkout Repo - uses: actions/checkout@v2 - - - name: Build AMI - run: | - GIT_SHA=${{github.sha}} - packer build -var "git-head-version=${GIT_SHA}" -var "packer-execution-id=${GITHUB_RUN_ID}" -var-file="development-arm.vars.pkr.hcl" -var-file="common.vars.pkr.hcl" amazon-arm64.pkr.hcl - - - name: Grab release version - id: process_release_version - run: | - VERSION=$(sed -e 's/postgres-version = "\(.*\)"/\1/g' common.vars.pkr.hcl) - echo "version=$VERSION" >> "$GITHUB_OUTPUT" - - - name: Create release - uses: softprops/action-gh-release@v1 - with: - name: ${{ steps.process_release_version.outputs.version }} - tag_name: ${{ steps.process_release_version.outputs.version }} - target_commitish: ${{github.sha}} - - - name: Slack Notification on Failure - if: ${{ failure() }} - uses: rtCamp/action-slack-notify@v2 - env: - SLACK_WEBHOOK: ${{ secrets.SLACK_NOTIFICATIONS_WEBHOOK }} - SLACK_USERNAME: 'gha-failures-notifier' - SLACK_COLOR: 'danger' - SLACK_MESSAGE: 'Building Postgres AMI failed' - SLACK_FOOTER: '' - - - name: Cleanup resources on build cancellation - if: ${{ cancelled() }} - run: | - aws ec2 describe-instances --filters "Name=tag:packerExecutionId,Values=${GITHUB_RUN_ID}" --query "Reservations[].Instances[].InstanceId" --output text | xargs -I {} aws ec2 terminate-instances --instance-ids {} diff --git a/.github/workflows/build-ccache.yml b/.github/workflows/build-ccache.yml new file mode 100644 index 0000000..f296dd6 --- /dev/null +++ b/.github/workflows/build-ccache.yml @@ -0,0 +1,86 @@ +name: Update ccache + +on: + push: + branches: + - develop + paths: + - ".github/workflows/build-ccache.yml" + - "ansible/vars.yml" + - "Dockerfile" + workflow_dispatch: + +env: + image_tag: public.ecr.aws/supabase/postgres:ccache +permissions: + contents: read + packages: write + id-token: write + +jobs: + settings: + runs-on: ubuntu-latest + outputs: + build_args: ${{ steps.args.outputs.result }} + steps: + - uses: actions/checkout@v3 + - id: args + uses: mikefarah/yq@master + with: + cmd: yq 'to_entries | map(select(.value|type == "!!str")) | map(.key + "=" + .value) | join("\n")' 'ansible/vars.yml' + + build_image: + needs: settings + strategy: + matrix: + include: + - runner: [self-hosted, X64] + arch: amd64 + - runner: arm-runner + arch: arm64 + runs-on: ${{ matrix.runner }} + timeout-minutes: 180 + outputs: + image_digest: ${{ steps.build.outputs.digest }} + steps: + - run: docker context create builders + - uses: docker/setup-buildx-action@v3 + with: + endpoint: builders + - name: Configure AWS credentials - prod + uses: aws-actions/configure-aws-credentials@v1 + with: + role-to-assume: ${{ secrets.PROD_AWS_ROLE }} + aws-region: "us-east-1" + - uses: docker/login-action@v2 + with: + registry: public.ecr.aws + - id: build + uses: docker/build-push-action@v5 + with: + push: true + target: buildcache + build-args: | + CACHE_EPOCH=${{ github.event.repository.updated_at }} + ${{ needs.settings.outputs.build_args }} + tags: ${{ env.image_tag }}_${{ matrix.arch }} + platforms: linux/${{ matrix.arch }} + + merge_manifest: + needs: build_image + runs-on: ubuntu-latest + steps: + - uses: docker/setup-buildx-action@v3 + - name: Configure AWS credentials - prod + uses: aws-actions/configure-aws-credentials@v1 + with: + role-to-assume: ${{ secrets.PROD_AWS_ROLE }} + aws-region: "us-east-1" + - uses: docker/login-action@v2 + with: + registry: public.ecr.aws + - name: Merge multi-arch manifests + run: | + docker buildx imagetools create -t ${{ env.image_tag }} \ + ${{ env.image_tag }}_amd64 \ + ${{ env.image_tag }}_arm64 diff --git a/.github/workflows/check-shellscripts.yml b/.github/workflows/check-shellscripts.yml new file mode 100644 index 0000000..b796bdb --- /dev/null +++ b/.github/workflows/check-shellscripts.yml @@ -0,0 +1,27 @@ +name: Check shell scripts + +on: + push: + branches: + - develop + pull_request: + workflow_dispatch: + +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Run ShellCheck + uses: ludeeus/action-shellcheck@master + env: + SHELLCHECK_OPTS: -e SC2001 -e SC2002 -e SC2143 + with: + scandir: './ansible/files/admin_api_scripts' + + - name: Run ShellCheck on pg_upgrade scripts + uses: ludeeus/action-shellcheck@master + env: + SHELLCHECK_OPTS: -e SC2001 -e SC2002 -e SC2143 + with: + scandir: './ansible/files/admin_api_scripts/pg_upgrade_scripts' diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000..5999341 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,21 @@ +name: Check merge requirements + +on: + pull_request: + +jobs: + check-release-version: + timeout-minutes: 5 + runs-on: ubuntu-latest + steps: + - name: Checkout Repo + uses: actions/checkout@v3 + + - name: Run checks + # Update `ami-release.yaml` too if changing constraints. + run: | + SUFFIX=$(sed -E 's/postgres-version = "[0-9\.]+(.*)"/\1/g' common.vars.pkr.hcl) + if [[ -n $SUFFIX ]] ; then + echo "We no longer allow merging RC versions to develop." + exit 1 + fi diff --git a/.github/workflows/dockerhub-release-15-6.yml b/.github/workflows/dockerhub-release-15-6.yml new file mode 100644 index 0000000..4c7b5b9 --- /dev/null +++ b/.github/workflows/dockerhub-release-15-6.yml @@ -0,0 +1,104 @@ +name: Release 15.6 on Dockerhub + +on: + push: + branches: + - develop + - release/* + paths: + - ".github/workflows/dockerhub-release-15-6.yml" + - "common-nix.vars*" + workflow_dispatch: + +jobs: + settings: + runs-on: ubuntu-latest + outputs: + docker_version: ${{ steps.settings.outputs.postgres-version }} + image_tag: supabase/postgres:${{ steps.settings.outputs.postgres-version }} + build_args: ${{ steps.args.outputs.result }} + steps: + - uses: actions/checkout@v3 + - id: settings + # Remove spaces and quotes to get the raw version string + run: sed -r 's/(\s|\")+//g' common-nix.vars.pkr.hcl >> $GITHUB_OUTPUT + - id: args + uses: mikefarah/yq@master + with: + cmd: yq 'to_entries | map(select(.value|type == "!!str")) | map(.key + "=" + .value) | join("\n")' 'ansible/vars.yml' + + build_release_image: + needs: [settings] + strategy: + matrix: + include: + - runner: [self-hosted, X64] + arch: amd64 + - runner: arm-runner + arch: arm64 + runs-on: ${{ matrix.runner }} + timeout-minutes: 180 + outputs: + image_digest: ${{ steps.build.outputs.digest }} + steps: + - run: docker context create builders + - uses: docker/setup-buildx-action@v3 + with: + endpoint: builders + - uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + - id: build + uses: docker/build-push-action@v5 + with: + push: true + build-args: | + ${{ needs.settings.outputs.build_args }} + target: production + tags: ${{ needs.settings.outputs.image_tag }}_${{ matrix.arch }} + platforms: linux/${{ matrix.arch }} + cache-from: type=gha,scope=${{ github.ref_name }}-latest-${{ matrix.arch }} + cache-to: type=gha,mode=max,scope=${{ github.ref_name }}-latest-${{ matrix.arch }} + file: "Dockerfile-156" + - name: Slack Notification + if: ${{ failure() }} + uses: rtCamp/action-slack-notify@v2 + env: + SLACK_WEBHOOK: ${{ secrets.SLACK_NOTIFICATIONS_WEBHOOK }} + SLACK_USERNAME: "gha-failures-notifier" + SLACK_COLOR: "danger" + SLACK_MESSAGE: "Building Postgres ${{ matrix.arch }} image failed" + SLACK_FOOTER: "" + + merge_manifest: + needs: [settings, build_release_image] + runs-on: ubuntu-latest + steps: + - uses: docker/setup-buildx-action@v3 + - uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + - name: Merge multi-arch manifests + run: | + docker buildx imagetools create -t ${{ needs.settings.outputs.image_tag }} \ + ${{ needs.settings.outputs.image_tag }}_amd64 \ + ${{ needs.settings.outputs.image_tag }}_arm64 + - name: Slack Notification + if: ${{ failure() }} + uses: rtCamp/action-slack-notify@v2 + env: + SLACK_WEBHOOK: ${{ secrets.SLACK_NOTIFICATIONS_WEBHOOK }} + SLACK_USERNAME: "gha-failures-notifier" + SLACK_COLOR: "danger" + SLACK_MESSAGE: "Building Postgres image failed" + SLACK_FOOTER: "" + + publish: + needs: [settings, merge_manifest] + # Call workflow explicitly because events from actions cannot trigger more actions + uses: ./.github/workflows/mirror.yml + with: + version: ${{ needs.settings.outputs.docker_version }} + secrets: inherit diff --git a/.github/workflows/dockerhub-release-aio.yml b/.github/workflows/dockerhub-release-aio.yml new file mode 100644 index 0000000..e5a5a82 --- /dev/null +++ b/.github/workflows/dockerhub-release-aio.yml @@ -0,0 +1,157 @@ +name: Release AIO image + +on: + push: + branches: + - develop + paths: + - ".github/workflows/dockerhub-release-aio.yml" + - "docker/all-in-one/*" + workflow_run: + workflows: [Release on Dockerhub] + branches: + - develop + types: + - completed + workflow_dispatch: + inputs: + baseDockerVersion: + description: 'Base Docker Version. E.g., 15.1.1.27' + required: false + +jobs: + settings: + runs-on: ubuntu-latest + outputs: + base_docker_version: ${{ steps.base_docker.outputs.base-docker-version }} + docker_version: ${{ steps.settings.outputs.postgres-version }} + image_tag: supabase/postgres:aio-${{ steps.settings.outputs.postgres-version }} + fly_image_tag: supabase-postgres-image:aio-${{ steps.settings.outputs.postgres-version }} + build_args: ${{ steps.args.outputs.result }} + steps: + - uses: actions/checkout@v3 + - id: settings + # Remove spaces and quotes to get the raw version string + run: sed -r 's/(\s|\")+//g' common.vars.pkr.hcl >> $GITHUB_OUTPUT + - id: base_docker + run: | + if [[ "${{ inputs.baseDockerVersion }}" != "" ]]; then + echo "base-docker-version=${{ inputs.baseDockerVersion }}" >> $GITHUB_OUTPUT + else + echo "base-docker-version=${{ steps.settings.outputs.postgres-version }}" >> $GITHUB_OUTPUT + fi + - id: args + uses: mikefarah/yq@master + with: + cmd: yq 'to_entries | map(select(.value|type == "!!str")) | map(.key + "=" + .value) | join("\n")' 'ansible/vars.yml' + + build_image: + needs: settings + strategy: + matrix: + include: + - runner: [self-hosted, X64] + arch: amd64 + - runner: arm-runner + arch: arm64 + runs-on: ${{ matrix.runner }} + timeout-minutes: 180 + outputs: + image_digest: ${{ steps.build.outputs.digest }} + steps: + - run: docker context create builders + - uses: docker/setup-buildx-action@v3 + with: + endpoint: builders + - uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + - id: build + uses: docker/build-push-action@v5 + with: + file: docker/all-in-one/Dockerfile + push: true + build-args: | + postgres_version=${{ needs.settings.outputs.base_docker_version }} + ${{ needs.settings.outputs.build_args }} + target: production + tags: ${{ needs.settings.outputs.image_tag }}_${{ matrix.arch }} + platforms: linux/${{ matrix.arch }} + cache-from: type=gha,scope=${{ github.ref_name }}-aio-${{ matrix.arch }} + cache-to: type=gha,mode=max,scope=${{ github.ref_name }}-aio-${{ matrix.arch }} + - name: Slack Notification + if: ${{ failure() }} + uses: rtCamp/action-slack-notify@v2 + env: + SLACK_WEBHOOK: ${{ secrets.SLACK_NOTIFICATIONS_WEBHOOK }} + SLACK_USERNAME: "gha-failures-notifier" + SLACK_COLOR: "danger" + SLACK_MESSAGE: "Building Postgres AIO ${{ matrix.arch }} image failed" + SLACK_FOOTER: "" + + merge_manifest: + needs: [settings, build_image] + runs-on: ubuntu-latest + steps: + - uses: docker/setup-buildx-action@v3 + - uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + - name: Merge multi-arch manifests + run: | + docker buildx imagetools create -t ${{ needs.settings.outputs.image_tag }} \ + ${{ needs.settings.outputs.image_tag }}_amd64 \ + ${{ needs.settings.outputs.image_tag }}_arm64 + - name: Slack Notification + if: ${{ failure() }} + uses: rtCamp/action-slack-notify@v2 + env: + SLACK_WEBHOOK: ${{ secrets.SLACK_NOTIFICATIONS_WEBHOOK }} + SLACK_USERNAME: "gha-failures-notifier" + SLACK_COLOR: "danger" + SLACK_MESSAGE: "Building Postgres image failed" + SLACK_FOOTER: "" + + publish: + needs: [settings, merge_manifest] + # Call workflow explicitly because events from actions cannot trigger more actions + uses: ./.github/workflows/mirror.yml + with: + version: aio-${{ needs.settings.outputs.docker_version }} + secrets: inherit + + publish_to_fly: + needs: [settings, build_image] + runs-on: ubuntu-latest + steps: + - uses: docker/setup-buildx-action@v3 + - uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + - name: Push to Fly + uses: superfly/flyctl-actions/setup-flyctl@dfdfedc86b296f5e5384f755a18bf400409a15d0 + with: + version: 0.1.64 + - run: | + docker pull ${{ needs.settings.outputs.image_tag }}_amd64 + docker tag ${{ needs.settings.outputs.image_tag }}_amd64 "registry.fly.io/staging-${{ needs.settings.outputs.fly_image_tag }}" + docker tag ${{ needs.settings.outputs.image_tag }}_amd64 "registry.fly.io/prod-${{ needs.settings.outputs.fly_image_tag }}" + + flyctl auth docker + docker push "registry.fly.io/staging-${{ needs.settings.outputs.fly_image_tag }}" + docker push "registry.fly.io/prod-${{ needs.settings.outputs.fly_image_tag }}" + env: + FLY_API_TOKEN: ${{ secrets.FLY_API_TOKEN }} + + - name: Slack Notification + if: ${{ failure() }} + uses: rtCamp/action-slack-notify@v2 + env: + SLACK_WEBHOOK: ${{ secrets.SLACK_NOTIFICATIONS_WEBHOOK }} + SLACK_USERNAME: "gha-failures-notifier" + SLACK_COLOR: "danger" + SLACK_MESSAGE: "Failed pushing AIO image to Fly.io" + SLACK_FOOTER: "" diff --git a/.github/workflows/dockerhub-release-orioledb.yml b/.github/workflows/dockerhub-release-orioledb.yml new file mode 100644 index 0000000..2c2f051 --- /dev/null +++ b/.github/workflows/dockerhub-release-orioledb.yml @@ -0,0 +1,86 @@ +name: Release OrioleDB on Dockerhub + +on: + push: + branches: + - develop + paths: + - ".github/workflows/dockerhub-release-orioledb.yml" + - "docker/orioledb/*" + - "common.vars*" + +jobs: + settings: + runs-on: ubuntu-latest + outputs: + docker_version: orioledb-${{ steps.settings.outputs.postgres-version }} + image_tag: supabase/postgres:orioledb-${{ steps.settings.outputs.postgres-version }} + build_args: ${{ steps.args.outputs.result }} + steps: + - uses: actions/checkout@v3 + - id: settings + # Remove spaces and quotes to get the raw version string + run: sed -r 's/(\s|\")+//g' common.vars.pkr.hcl >> $GITHUB_OUTPUT + - id: args + uses: mikefarah/yq@master + with: + cmd: yq 'to_entries | map(select(.value|type == "!!str")) | map(.key + "=" + .value) | join("\n")' 'ansible/vars.yml' + + build_image: + needs: settings + strategy: + fail-fast: false + matrix: + include: + - runner: [self-hosted, X64] + arch: amd64 + - runner: arm-runner + arch: arm64 + runs-on: ${{ matrix.runner }} + timeout-minutes: 180 + outputs: + image_digest: ${{ steps.build.outputs.digest }} + steps: + - run: docker context create builders + - uses: docker/setup-buildx-action@v3 + with: + endpoint: builders + - uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + - id: build + uses: docker/build-push-action@v5 + with: + file: docker/orioledb/Dockerfile + push: true + build-args: | + ${{ needs.settings.outputs.build_args }} + target: production + tags: ${{ needs.settings.outputs.image_tag }}_${{ matrix.arch }} + platforms: linux/${{ matrix.arch }} + cache-from: type=gha,scope=${{ github.ref_name }}-orioledb-${{ matrix.arch }} + cache-to: type=gha,mode=max,scope=${{ github.ref_name }}-orioledb-${{ matrix.arch }} + + merge_manifest: + needs: [settings, build_image] + runs-on: ubuntu-latest + steps: + - uses: docker/setup-buildx-action@v3 + - uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + - name: Merge multi-arch manifests + run: | + docker buildx imagetools create -t ${{ needs.settings.outputs.image_tag }} \ + ${{ needs.settings.outputs.image_tag }}_amd64 \ + ${{ needs.settings.outputs.image_tag }}_arm64 + + publish: + needs: [settings, merge_manifest] + # Call workflow explicitly because events from actions cannot trigger more actions + uses: ./.github/workflows/mirror.yml + with: + version: ${{ needs.settings.outputs.docker_version }} + secrets: inherit diff --git a/.github/workflows/dockerhub-release.yml b/.github/workflows/dockerhub-release.yml index eee4ae1..7f4be55 100644 --- a/.github/workflows/dockerhub-release.yml +++ b/.github/workflows/dockerhub-release.yml @@ -7,126 +7,58 @@ on: paths: - ".github/workflows/dockerhub-release.yml" - "common.vars*" - + jobs: settings: - runs-on: ubuntu-20.04 + runs-on: ubuntu-latest outputs: docker_version: ${{ steps.settings.outputs.postgres-version }} + image_tag: supabase/postgres:${{ steps.settings.outputs.postgres-version }} + build_args: ${{ steps.args.outputs.result }} steps: - uses: actions/checkout@v3 - - id: settings # Remove spaces and quotes to get the raw version string run: sed -r 's/(\s|\")+//g' common.vars.pkr.hcl >> $GITHUB_OUTPUT - - docker_x86_release: - needs: settings - runs-on: [self-hosted, X64] - timeout-minutes: 120 - env: - arch: amd64 - outputs: - image_digest: ${{ steps.build.outputs.digest }} - steps: - - uses: actions/checkout@v3 - - - id: meta - uses: docker/metadata-action@v4 - with: - images: | - supabase/postgres - tags: | - type=raw,value=${{ needs.settings.outputs.docker_version }}_${{ env.arch }} - - - id: buildx-context - run: | - docker context create builders - - - uses: docker/setup-buildx-action@v2 + - id: args + uses: mikefarah/yq@master with: - endpoint: builders - - - uses: docker/login-action@v2 - with: - username: ${{ secrets.DOCKER_USERNAME }} - password: ${{ secrets.DOCKER_PASSWORD }} - - - id: copy-cache - name: Copy Buildcache - run: | - docker rm -f buildcache - docker create --name buildcache public.ecr.aws/t3w2s2c9/postgres-buildcache:latest ls - docker cp buildcache:/ccache/. ./docker/cache - docker rm -f buildcache + cmd: yq 'to_entries | map(select(.value|type == "!!str")) | map(.key + "=" + .value) | join("\n")' 'ansible/vars.yml' - - id: build - uses: docker/build-push-action@v3 - with: - context: . - push: true - tags: ${{ steps.meta.outputs.tags }} - platforms: linux/${{ env.arch }} - no-cache: true - - - name: Slack Notification - if: ${{ failure() }} - uses: rtCamp/action-slack-notify@v2 - env: - SLACK_WEBHOOK: ${{ secrets.SLACK_NOTIFICATIONS_WEBHOOK }} - SLACK_USERNAME: "gha-failures-notifier" - SLACK_COLOR: "danger" - SLACK_MESSAGE: "Building Postgres x86 image failed" - SLACK_FOOTER: "" - docker_arm_release: + build_image: needs: settings - runs-on: [arm-runner] - timeout-minutes: 120 - env: - arch: arm64 + strategy: + matrix: + include: + - runner: [self-hosted, X64] + arch: amd64 + - runner: arm-runner + arch: arm64 + runs-on: ${{ matrix.runner }} + timeout-minutes: 180 outputs: image_digest: ${{ steps.build.outputs.digest }} steps: - - uses: actions/checkout@v3 - - - id: meta - uses: docker/metadata-action@v4 + - run: docker context create builders + - uses: docker/setup-buildx-action@v3 with: - images: | - supabase/postgres - tags: | - type=raw,value=${{ needs.settings.outputs.docker_version }}_${{ env.arch }} - + endpoint: builders - uses: docker/login-action@v2 with: username: ${{ secrets.DOCKER_USERNAME }} password: ${{ secrets.DOCKER_PASSWORD }} - - - id: copy-cache - name: Copy Buildcache - run: | - docker rm -f buildcache - docker create --name buildcache public.ecr.aws/t3w2s2c9/postgres-buildcache:latest ls - docker cp buildcache:/ccache/. ./docker/cache/ - docker rm -f buildcache - - - uses: docker/setup-buildx-action@v2 - with: - driver: docker - driver-opts: | - image=moby/buildkit:master - network=host - - id: build - uses: docker/build-push-action@v3 + uses: docker/build-push-action@v5 with: - context: . push: true - tags: ${{ steps.meta.outputs.tags }} - platforms: linux/${{ env.arch }} - no-cache: true - + build-args: | + ${{ needs.settings.outputs.build_args }} + target: production + tags: ${{ needs.settings.outputs.image_tag }}_${{ matrix.arch }} + platforms: linux/${{ matrix.arch }} + cache-from: type=gha,scope=${{ github.ref_name }}-latest-${{ matrix.arch }} + cache-to: type=gha,mode=max,scope=${{ github.ref_name }}-latest-${{ matrix.arch }} - name: Slack Notification if: ${{ failure() }} uses: rtCamp/action-slack-notify@v2 @@ -134,56 +66,23 @@ jobs: SLACK_WEBHOOK: ${{ secrets.SLACK_NOTIFICATIONS_WEBHOOK }} SLACK_USERNAME: "gha-failures-notifier" SLACK_COLOR: "danger" - SLACK_MESSAGE: "Building Postgres arm image failed" + SLACK_MESSAGE: "Building Postgres ${{ matrix.arch }} image failed" SLACK_FOOTER: "" merge_manifest: - needs: [settings, docker_x86_release, docker_arm_release] + needs: [settings, build_image] runs-on: ubuntu-latest - permissions: - contents: read - packages: write - id-token: write steps: - - uses: docker/setup-buildx-action@v2 - + - uses: docker/setup-buildx-action@v3 - uses: docker/login-action@v2 with: username: ${{ secrets.DOCKER_USERNAME }} password: ${{ secrets.DOCKER_PASSWORD }} - - name: Merge multi-arch manifests run: | - docker buildx imagetools create -t supabase/postgres:${{ needs.settings.outputs.docker_version }} \ - supabase/postgres@${{ needs.docker_x86_release.outputs.image_digest }} \ - supabase/postgres@${{ needs.docker_arm_release.outputs.image_digest }} - - - name: configure aws credentials - uses: aws-actions/configure-aws-credentials@v1 - with: - role-to-assume: ${{ secrets.PROD_AWS_ROLE }} - aws-region: us-east-1 - - - name: Login to ECR - uses: docker/login-action@v2 - with: - registry: public.ecr.aws - - - name: Login to GHCR - uses: docker/login-action@v2 - with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - - name: Mirror Images - uses: akhilerm/tag-push-action@v2.1.0 - with: - src: docker.io/supabase/postgres:${{ needs.settings.outputs.docker_version }} - dst: | - public.ecr.aws/supabase/postgres:${{ needs.settings.outputs.docker_version }} - ghcr.io/supabase/postgres:${{ needs.settings.outputs.docker_version }} - + docker buildx imagetools create -t ${{ needs.settings.outputs.image_tag }} \ + ${{ needs.settings.outputs.image_tag }}_amd64 \ + ${{ needs.settings.outputs.image_tag }}_arm64 - name: Slack Notification if: ${{ failure() }} uses: rtCamp/action-slack-notify@v2 @@ -193,3 +92,11 @@ jobs: SLACK_COLOR: "danger" SLACK_MESSAGE: "Building Postgres image failed" SLACK_FOOTER: "" + + publish: + needs: [settings, merge_manifest] + # Call workflow explicitly because events from actions cannot trigger more actions + uses: ./.github/workflows/mirror.yml + with: + version: ${{ needs.settings.outputs.docker_version }} + secrets: inherit diff --git a/.github/workflows/mirror-postgrest.yml b/.github/workflows/mirror-postgrest.yml new file mode 100644 index 0000000..c84647c --- /dev/null +++ b/.github/workflows/mirror-postgrest.yml @@ -0,0 +1,33 @@ +name: Mirror PostgREST + +on: + push: + branches: + - develop + paths: + - ".github/workflows/mirror-postgrest.yml" + - "common.vars*" + +jobs: + version: + runs-on: ubuntu-latest + outputs: + postgrest_release: ${{ steps.args.outputs.result }} + steps: + - uses: actions/checkout@v4 + - id: args + uses: mikefarah/yq@master + with: + cmd: yq '.postgrest_release' 'ansible/vars.yml' + + mirror: + needs: + - version + permissions: + contents: read + packages: write + id-token: write + uses: supabase/cli/.github/workflows/mirror-image.yml@main + with: + image: postgrest/postgrest:v${{ needs.version.outputs.postgrest_release }} + secrets: inherit diff --git a/.github/workflows/mirror.yml b/.github/workflows/mirror.yml index b4c2c9b..268a397 100644 --- a/.github/workflows/mirror.yml +++ b/.github/workflows/mirror.yml @@ -1,6 +1,11 @@ name: Mirror Image on: + workflow_call: + inputs: + version: + required: true + type: string workflow_dispatch: inputs: version: @@ -17,7 +22,7 @@ jobs: id-token: write steps: - name: configure aws credentials - uses: aws-actions/configure-aws-credentials@v1 + uses: aws-actions/configure-aws-credentials@v2.2.0 with: role-to-assume: ${{ secrets.PROD_AWS_ROLE }} aws-region: us-east-1 @@ -31,7 +36,7 @@ jobs: password: ${{ secrets.GITHUB_TOKEN }} - uses: akhilerm/tag-push-action@v2.1.0 with: - src: docker.io/supabase/postgres:${{ inputs.version }} + src: docker.io/tealbase/postgres:${{ inputs.version }} dst: | - public.ecr.aws/supabase/postgres:${{ inputs.version }} - ghcr.io/supabase/postgres:${{ inputs.version }} + public.ecr.aws/tealbase/postgres:${{ inputs.version }} + ghcr.io/tealbase/postgres:${{ inputs.version }} diff --git a/.github/workflows/nix-build.yml b/.github/workflows/nix-build.yml new file mode 100644 index 0000000..08c316b --- /dev/null +++ b/.github/workflows/nix-build.yml @@ -0,0 +1,82 @@ +name: Nix CI + +on: + push: + branches: + - develop + - release/* + pull_request: + workflow_dispatch: + +permissions: + contents: read + id-token: write + +jobs: + build-run-image: + strategy: + fail-fast: false + matrix: + include: + - runner: larger-runner-4cpu + arch: amd64 + - runner: arm-runner + arch: arm64 + - runner: macos-latest + arch: arm64 + runs-on: ${{ matrix.runner }} + + steps: + + - name: Check out code + uses: actions/checkout@v4 + with: + ref: ${{ github.event.pull_request.head.ref || github.ref }} + fetch-depth: 0 + fetch-tags: true + - name: aws-creds + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ secrets.DEV_AWS_ROLE }} + aws-region: "us-east-1" + output-credentials: true + - name: write secret key + # use python so we don't interpolate the secret into the workflow logs, in case of bugs + run: | + python -c "import os; file = open('nix-secret-key', 'w'); file.write(os.environ['NIX_SIGN_SECRET_KEY']); file.close()" + env: + NIX_SIGN_SECRET_KEY: ${{ secrets.NIX_SIGN_SECRET_KEY }} + - name: Log in to Docker Hub + if: matrix.runner != 'macos-latest' + uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + - name: Build psql bundle with nix + if: matrix.runner != 'macos-latest' + run: docker build -t base_nix -f docker/nix/Dockerfile . + - name: Run build psql bundle + if: matrix.runner != 'macos-latest' + run: | + docker run -e AWS_ACCESS_KEY_ID=${{ env.AWS_ACCESS_KEY_ID }} \ + -e AWS_SECRET_ACCESS_KEY=${{ env.AWS_SECRET_ACCESS_KEY }} \ + -e AWS_SESSION_TOKEN=${{ env.AWS_SESSION_TOKEN }} \ + base_nix bash -c "./workspace/docker/nix/build_nix.sh" + - name: Build psql bundle on macos + if: matrix.runner == 'macos-latest' + run: | + curl --proto '=https' --tlsv1.2 -sSf -L https://install.determinate.systems/nix | sh -s -- install --no-confirm \ + --extra-conf "substituters = https://cache.nixos.org https://nix-postgres-artifacts.s3.amazonaws.com" \ + --extra-conf "trusted-public-keys = nix-postgres-artifacts:dGZlQOvKcNEjvT7QEAJbcV6b6uk7VF/hWMjhYleiaLI=% cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=" + . /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh + cp ./docker/nix/build_nix.sh ./build_nix.sh + sed -i '' '1s|^#!/bin/env bash|#!/usr/bin/env bash|' ./build_nix.sh + chmod +x ./build_nix.sh + ./build_nix.sh + env: + AWS_ACCESS_KEY_ID: ${{ env.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ env.AWS_SECRET_ACCESS_KEY }} + AWS_SESSION_TOKEN: ${{ env.AWS_SESSION_TOKEN }} + + name: build psql bundle on ${{ matrix.arch }} + diff --git a/.github/workflows/package-plv8.yml b/.github/workflows/package-plv8.yml new file mode 100644 index 0000000..09b2c4e --- /dev/null +++ b/.github/workflows/package-plv8.yml @@ -0,0 +1,78 @@ +name: Package plv8 + +on: + push: + branches: + - develop + paths: + - ".github/workflows/package-plv8.yml" + - "Dockerfile" + workflow_dispatch: + +env: + image: ghcr.io/supabase/plv8 +permissions: + contents: read + packages: write + id-token: write + +jobs: + settings: + runs-on: ubuntu-latest + outputs: + image_tag: ${{ env.image }}:${{ steps.meta.outputs.image_tag }} + steps: + - uses: actions/checkout@v3 + - id: meta + run: | + plv8_release=$(grep -o 'plv8_release=.*' Dockerfile | head -1 | cut -d "=" -f 2) + postgresql_major=$(grep -o 'postgresql_major=.*' Dockerfile | head -1 | cut -d "=" -f 2) + echo "image_tag=${plv8_release}-pg${postgresql_major}" >> $GITHUB_OUTPUT + + build_image: + needs: settings + strategy: + matrix: + include: + - runner: [self-hosted, X64] + arch: amd64 + - runner: arm-runner + arch: arm64 + runs-on: ${{ matrix.runner }} + timeout-minutes: 180 + outputs: + image_digest: ${{ steps.build.outputs.digest }} + steps: + - run: docker context create builders + - uses: docker/setup-buildx-action@v3 + with: + endpoint: builders + - uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + - id: build + uses: docker/build-push-action@v5 + with: + push: true + target: plv8-deb + tags: ${{ needs.settings.outputs.image_tag }}_${{ matrix.arch }} + platforms: linux/${{ matrix.arch }} + no-cache: true + + merge_manifest: + needs: [settings, build_image] + runs-on: ubuntu-latest + steps: + - uses: docker/setup-buildx-action@v3 + - uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + - name: Merge multi-arch manifests + run: | + docker buildx imagetools create -t ${{ needs.settings.outputs.image_tag }} \ + ${{ needs.settings.outputs.image_tag }}_amd64 \ + ${{ needs.settings.outputs.image_tag }}_arm64 diff --git a/.github/workflows/publish-migrations.yml b/.github/workflows/publish-migrations.yml index fea3b9b..1abc9f2 100644 --- a/.github/workflows/publish-migrations.yml +++ b/.github/workflows/publish-migrations.yml @@ -9,6 +9,9 @@ jobs: build: runs-on: [self-hosted, linux] timeout-minutes: 15 + permissions: + id-token: write + contents: read steps: - name: Checkout Repo @@ -18,26 +21,26 @@ jobs: run: cat $(ls -1) > ../migration-output.sql working-directory: ${{ github.workspace }}/migrations/db/migrations - - name: Push migration files to S3 - uses: jakejarvis/s3-sync-action@master + - name: configure aws credentials - staging + uses: aws-actions/configure-aws-credentials@v1 with: - args: --delete + role-to-assume: ${{ secrets.DEV_AWS_ROLE }} + aws-region: "ap-southeast-1" + + - name: Deploy to S3 staging + shell: bash + run: aws s3 sync migrations/db s3://$AWS_S3_BUCKET/migrations/db --delete env: AWS_S3_BUCKET: ${{ secrets.PG_INIT_SCRIPT_S3_BUCKET_STAGING }} - AWS_ACCESS_KEY_ID: ${{ secrets.PG_INIT_SCRIPT_ACCESS_KEY_ID_STAGING }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.PG_INIT_SCRIPT_SECRET_ACCESS_KEY_STAGING }} - AWS_REGION: ap-southeast-1 - SOURCE_DIR: migrations/db - DEST_DIR: migrations/db - - - name: Push migration files to S3 - uses: jakejarvis/s3-sync-action@master + + - name: configure aws credentials - prod + uses: aws-actions/configure-aws-credentials@v1 with: - args: --delete + role-to-assume: ${{ secrets.PROD_AWS_ROLE }} + aws-region: "ap-southeast-1" + + - name: Deploy to S3 prod + shell: bash + run: aws s3 sync migrations/db s3://$AWS_S3_BUCKET/migrations/db --delete env: AWS_S3_BUCKET: ${{ secrets.PG_INIT_SCRIPT_S3_BUCKET_PROD }} - AWS_ACCESS_KEY_ID: ${{ secrets.PG_INIT_SCRIPT_ACCESS_KEY_ID_PROD }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.PG_INIT_SCRIPT_SECRET_ACCESS_KEY_PROD }} - AWS_REGION: ap-southeast-1 - SOURCE_DIR: migrations/db - DEST_DIR: migrations/db diff --git a/.github/workflows/publish-nix-pgupgrade-bin-flake-version.yml b/.github/workflows/publish-nix-pgupgrade-bin-flake-version.yml new file mode 100644 index 0000000..5b985f4 --- /dev/null +++ b/.github/workflows/publish-nix-pgupgrade-bin-flake-version.yml @@ -0,0 +1,101 @@ +name: Publish nix pg_upgrade_bin flake version + +on: + workflow_dispatch: + inputs: + postgresVersion: + description: 'Optional. Postgres version to publish against, i.e. 15.1.1.78' + required: false + +permissions: + id-token: write + +jobs: + publish-staging: + runs-on: ubuntu-latest + + steps: + - name: Checkout Repo + uses: actions/checkout@v3 + + - name: Grab release version + id: process_release_version + run: | + VERSION=$(grep 'postgres-version' common-nix.vars.pkr.hcl | sed -e 's/postgres-version = "\(.*\)"/\1/g') + if [[ "${{ inputs.postgresVersion }}" != "" ]]; then + VERSION=${{ inputs.postgresVersion }} + fi + echo "version=$VERSION" >> "$GITHUB_OUTPUT" + echo "major_version=$(echo $VERSION | cut -d'.' -f1)" >> "$GITHUB_OUTPUT" + + - name: Create a tarball containing the latest nix flake version + working-directory: /tmp/ + run: | + mkdir -p ${{ steps.process_release_version.outputs.major_version }} + echo $GITHUB_SHA > ${{ steps.process_release_version.outputs.major_version }}/nix_flake_version + tar -czvf pg_upgrade_bin.tar.gz ${{ steps.process_release_version.outputs.major_version }} + + - name: configure aws credentials - staging + uses: aws-actions/configure-aws-credentials@v1 + with: + role-to-assume: ${{ secrets.DEV_AWS_ROLE }} + aws-region: "us-east-1" + + - name: Upload pg_upgrade scripts to s3 staging + run: | + aws s3 cp /tmp/pg_upgrade_bin.tar.gz s3://${{ secrets.ARTIFACTS_BUCKET }}/upgrades/postgres/supabase-postgres-${{ steps.process_release_version.outputs.version }}/20.04.tar.gz + + - name: Slack Notification on Failure + if: ${{ failure() }} + uses: rtCamp/action-slack-notify@v2 + env: + SLACK_WEBHOOK: ${{ secrets.SLACK_NOTIFICATIONS_WEBHOOK }} + SLACK_USERNAME: 'gha-failures-notifier' + SLACK_COLOR: 'danger' + SLACK_MESSAGE: 'Publishing pg_upgrade binaries flake version failed' + SLACK_FOOTER: '' + + publish-prod: + runs-on: ubuntu-latest + if: github.ref_name == 'develop' || contains( github.ref, 'release' ) + + steps: + - name: Checkout Repo + uses: actions/checkout@v3 + + - name: Grab release version + id: process_release_version + run: | + VERSION=$(grep 'postgres-version' common-nix.vars.pkr.hcl | sed -e 's/postgres-version = "\(.*\)"/\1/g') + if [[ "${{ inputs.postgresVersion }}" != "" ]]; then + VERSION=${{ inputs.postgresVersion }} + fi + echo "version=$VERSION" >> "$GITHUB_OUTPUT" + echo "major_version=$(echo $VERSION | cut -d'.' -f1)" >> "$GITHUB_OUTPUT" + + - name: Create a tarball containing the latest nix flake version + working-directory: /tmp/ + run: | + mkdir -p ${{ steps.process_release_version.outputs.major_version }} + echo $GITHUB_SHA > ${{ steps.process_release_version.outputs.major_version }}/nix_flake_version + tar -czvf pg_upgrade_bin.tar.gz ${{ steps.process_release_version.outputs.major_version }} + + - name: configure aws credentials - prod + uses: aws-actions/configure-aws-credentials@v1 + with: + role-to-assume: ${{ secrets.PROD_AWS_ROLE }} + aws-region: "us-east-1" + + - name: Upload pg_upgrade scripts to s3 prod + run: | + aws s3 cp /tmp/pg_upgrade_bin.tar.gz s3://${{ secrets.PROD_ARTIFACTS_BUCKET }}/upgrades/postgres/supabase-postgres-${{ steps.process_release_version.outputs.version }}/20.04.tar.gz + + - name: Slack Notification on Failure + if: ${{ failure() }} + uses: rtCamp/action-slack-notify@v2 + env: + SLACK_WEBHOOK: ${{ secrets.SLACK_NOTIFICATIONS_WEBHOOK }} + SLACK_USERNAME: 'gha-failures-notifier' + SLACK_COLOR: 'danger' + SLACK_MESSAGE: 'Publishing pg_upgrade binaries flake version failed' + SLACK_FOOTER: '' diff --git a/.github/workflows/publish-nix-pgupgrade-scripts.yml b/.github/workflows/publish-nix-pgupgrade-scripts.yml new file mode 100644 index 0000000..eb5f7a7 --- /dev/null +++ b/.github/workflows/publish-nix-pgupgrade-scripts.yml @@ -0,0 +1,104 @@ +name: Publish nix pg_upgrade_scripts + +on: + push: + branches: + - develop + - release/* + paths: + - '.github/workflows/publish-nix-pgupgrade-scripts.yml' + - 'common-nix.vars.pkr.hcl' + workflow_dispatch: + inputs: + postgresVersion: + description: 'Optional. Postgres version to publish against, i.e. 15.1.1.78' + required: false + +permissions: + id-token: write + +jobs: + publish-staging: + runs-on: ubuntu-latest + + steps: + - name: Checkout Repo + uses: actions/checkout@v3 + + - name: Grab release version + id: process_release_version + run: | + VERSION=$(grep 'postgres-version' common-nix.vars.pkr.hcl | sed -e 's/postgres-version = "\(.*\)"/\1/g') + if [[ "${{ inputs.postgresVersion }}" != "" ]]; then + VERSION=${{ inputs.postgresVersion }} + fi + echo "version=$VERSION" >> "$GITHUB_OUTPUT" + + - name: Create a tarball containing pg_upgrade scripts + run: | + mkdir -p /tmp/pg_upgrade_scripts + cp -r ansible/files/admin_api_scripts/pg_upgrade_scripts/* /tmp/pg_upgrade_scripts + tar -czvf /tmp/pg_upgrade_scripts.tar.gz -C /tmp/ pg_upgrade_scripts + + - name: configure aws credentials - staging + uses: aws-actions/configure-aws-credentials@v1 + with: + role-to-assume: ${{ secrets.DEV_AWS_ROLE }} + aws-region: "us-east-1" + + - name: Upload pg_upgrade scripts to s3 staging + run: | + aws s3 cp /tmp/pg_upgrade_scripts.tar.gz s3://${{ secrets.ARTIFACTS_BUCKET }}/upgrades/postgres/supabase-postgres-${{ steps.process_release_version.outputs.version }}/pg_upgrade_scripts.tar.gz + + - name: Slack Notification on Failure + if: ${{ failure() }} + uses: rtCamp/action-slack-notify@v2 + env: + SLACK_WEBHOOK: ${{ secrets.SLACK_NOTIFICATIONS_WEBHOOK }} + SLACK_USERNAME: 'gha-failures-notifier' + SLACK_COLOR: 'danger' + SLACK_MESSAGE: 'Publishing pg_upgrade scripts failed' + SLACK_FOOTER: '' + + publish-prod: + runs-on: ubuntu-latest + if: github.ref_name == 'develop' || contains( github.ref, 'release' ) + + steps: + - name: Checkout Repo + uses: actions/checkout@v3 + + - name: Grab release version + id: process_release_version + run: | + VERSION=$(grep 'postgres-version' common-nix.vars.pkr.hcl | sed -e 's/postgres-version = "\(.*\)"/\1/g') + if [[ "${{ inputs.postgresVersion }}" != "" ]]; then + VERSION=${{ inputs.postgresVersion }} + fi + echo "version=$VERSION" >> "$GITHUB_OUTPUT" + + - name: Create a tarball containing pg_upgrade scripts + run: | + mkdir -p /tmp/pg_upgrade_scripts + cp -r ansible/files/admin_api_scripts/pg_upgrade_scripts/* /tmp/pg_upgrade_scripts + tar -czvf /tmp/pg_upgrade_scripts.tar.gz -C /tmp/ pg_upgrade_scripts + + - name: configure aws credentials - prod + uses: aws-actions/configure-aws-credentials@v1 + with: + role-to-assume: ${{ secrets.PROD_AWS_ROLE }} + aws-region: "us-east-1" + + - name: Upload pg_upgrade scripts to s3 prod + run: | + aws s3 cp /tmp/pg_upgrade_scripts.tar.gz s3://${{ secrets.PROD_ARTIFACTS_BUCKET }}/upgrades/postgres/supabase-postgres-${{ steps.process_release_version.outputs.version }}/pg_upgrade_scripts.tar.gz + + - name: Slack Notification on Failure + if: ${{ failure() }} + uses: rtCamp/action-slack-notify@v2 + env: + SLACK_WEBHOOK: ${{ secrets.SLACK_NOTIFICATIONS_WEBHOOK }} + SLACK_USERNAME: 'gha-failures-notifier' + SLACK_COLOR: 'danger' + SLACK_MESSAGE: 'Publishing pg_upgrade scripts failed' + SLACK_FOOTER: '' diff --git a/.github/workflows/test-pg-upgrade.yml b/.github/workflows/test-pg-upgrade.yml new file mode 100644 index 0000000..b90791b --- /dev/null +++ b/.github/workflows/test-pg-upgrade.yml @@ -0,0 +1,133 @@ +name: Test pg_upgrade + +on: + push: + branches: + - develop + - pcnc/pg_upgrade-test-extensions + workflow_dispatch: + +permissions: + id-token: write + +jobs: + test: + strategy: + matrix: + base_pg_version: + - 15.1.1.60 + - 15.1.1.70 + runs-on: arm-runner + timeout-minutes: 30 + defaults: + run: + working-directory: ./tests/pg_upgrade + env: + PGPORT: 5478 + PGPASSWORD: postgres + PGDATABASE: postgres + PGUSER: supabase_admin + PGHOST: localhost + PG_MAJOR_VERSION: 15 + IS_CI: true + container: pg_upgrade_test + steps: + - uses: actions/checkout@v3 + + - name: Grab release version + id: process_release_version + working-directory: ./ + run: | + VERSION=$(sed -e 's/postgres-version = "\(.*\)"/\1/g' common.vars.pkr.hcl) + echo "version=$VERSION" >> "$GITHUB_OUTPUT" + + - name: configure aws credentials - staging + uses: aws-actions/configure-aws-credentials@v1 + with: + role-to-assume: ${{ secrets.DEV_AWS_ROLE }} + aws-region: "us-east-1" + + - name: Download pg_upgrade_scripts and binaries + run: | + aws s3 cp s3://${{ secrets.ARTIFACTS_BUCKET }}/upgrades/postgres/supabase-postgres-${{ steps.process_release_version.outputs.version }}/pg_upgrade_scripts.tar.gz scripts/pg_upgrade_scripts.tar.gz + aws s3 cp s3://${{ secrets.ARTIFACTS_BUCKET }}/upgrades/postgres/supabase-postgres-${{ steps.process_release_version.outputs.version }}/20.04.tar.gz scripts/pg_upgrade_bin.tar.gz + + - run: docker context create builders + - uses: docker/setup-buildx-action@v2 + with: + endpoint: builders + driver-opts: image=moby/buildkit:v0.11.6 + buildkitd-flags: --debug + + - name: Start Postgres + run: | + docker rm -f "$container" || true + docker run --name "$container" --env-file .env \ + -v "$(pwd)/scripts:/tmp/upgrade" \ + --entrypoint "/tmp/upgrade/entrypoint.sh" -d \ + -p ${PGPORT}:5432 \ + "supabase/postgres:${{ matrix.base_pg_version }}" + + - name: Install psql + run: | + sudo apt update + sudo apt install -y --no-install-recommends postgresql-client + + - name: Install pg_prove + run: | + sudo apt-get update + sudo apt-get install -y --no-install-recommends perl cpanminus + sudo cpanm -n App::cpanminus + sudo cpanm -n TAP::Parser::SourceHandler::pgTAP + env: + SHELL: /bin/bash + PERL_MM_USE_DEFAULT: 1 + PERL_MM_NONINTERACTIVE: 1 + + - name: Wait for healthy database + run: | + count=0 + while ! docker exec "$container" bash -c "pg_isready"; do + count=$((count + 1)) + if [ $count -ge "$retries" ]; then + echo "Retry $count/$retries exited $exit, no more retries left." + docker logs "$container" + docker rm -f "$container" + exit 1 + fi + done + env: + retries: 20 + + - name: Run migrations + run: | + docker cp ../../migrations/db/migrations "$container:/docker-entrypoint-initdb.d/" + docker exec "$container" bash -c "/docker-entrypoint-initdb.d/migrate.sh > /tmp/migrate.log 2>&1" + + - name: Run initial tests + run: pg_prove "../../migrations/tests/test.sql" + env: + PERL5LIB: /usr/local/lib/perl5 + + - name: Apply pre-upgrade fixtures + run: | + psql -f "./tests/97-enable-extensions.sql" + psql -f "./tests/98-data-fixtures.sql" + psql -f "./tests/99-fixtures.sql" + + - name: Initiate upgrade + run: docker exec "$container" bash -c '/tmp/upgrade/pg_upgrade_scripts/initiate.sh "$PG_MAJOR_VERSION"; exit $?' + + - name: Complete pg_upgrade + run: docker exec pg_upgrade_test bash -c '/tmp/upgrade/pg_upgrade_scripts/complete.sh; exit $?' + + - name: Run post-upgrade tests + run: | + pg_prove tests/01-schema.sql + pg_prove tests/02-data.sql + pg_prove tests/03-settings.sql + + - name: Clean up container + if: ${{ always() }} + continue-on-error: true + run: docker rm -f "$container" || true diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index af249f2..27ae328 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -1,4 +1,4 @@ -name: test +name: Test Database on: push: @@ -9,70 +9,92 @@ on: jobs: build: - if: ${{ github.event_name != 'pull_request' }} - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - - run: docker compose up --abort-on-container-exit --build - - migrate: - runs-on: ubuntu-latest - env: - POSTGRES_PASSWORD: password - strategy: matrix: - supabase-version: ["15.1.0.11"] - timeout-minutes: 10 + include: + - runner: [self-hosted, X64] + arch: amd64 + - runner: arm-runner + arch: arm64 + runs-on: ${{ matrix.runner }} + timeout-minutes: 180 + env: + POSTGRES_PORT: 5478 + POSTGRES_PASSWORD: password + steps: + - uses: actions/checkout@v3 + - id: settings + # Remove spaces and quotes to get the raw version string + run: sed -r 's/(\s|\")+//g' common-nix.vars.pkr.hcl >> $GITHUB_OUTPUT - services: - postgres: - image: supabase/postgres:${{ matrix.supabase-version }} - ports: - - 5478:5432 - # Set health checks to wait until postgres has started - options: >- - --health-cmd "pg_isready -U postgres -h localhost" - --health-interval 5s - --health-timeout 5s - --health-retries 10 - env: - POSTGRES_PASSWORD: ${{ env.POSTGRES_PASSWORD }} - volumes: - # Disable migration by removing from entrypoint - - /dev/null:/docker-entrypoint-initdb.d/migrate.sh + - id: args + uses: mikefarah/yq@master + with: + cmd: yq 'to_entries | map(select(.value|type == "!!str")) | map(.key + "=" + .value) | join("\n")' 'ansible/vars.yml' - steps: - - name: checkout - uses: actions/checkout@v3 + - run: docker context create builders + - uses: docker/setup-buildx-action@v3 + with: + endpoint: builders + - uses: docker/build-push-action@v5 + with: + load: true + context: . + file: "Dockerfile-156" + target: production + build-args: | + ${{ steps.args.outputs.result }} + tags: supabase/postgres:${{ steps.settings.outputs.postgres-version }},supabase_postgres + cache-from: | + type=gha,scope=${{ github.ref_name }}-${{ steps.settings.outputs.postgres-version }}-${{ matrix.arch }} + type=gha,scope=${{ github.base_ref }}-${{ steps.settings.outputs.postgres-version }}-${{ matrix.arch }} + cache-to: type=gha,mode=max,scope=${{ github.ref_name }}-${{ steps.settings.outputs.postgres-version }}-${{ matrix.arch }} - - name: install dbmate + - name: Start Postgres + run: | + docker run --rm --pull=never \ + -e POSTGRES_PASSWORD=${{ env.POSTGRES_PASSWORD }} \ + -p ${{ env.POSTGRES_PORT }}:5432 \ + --name supabase_postgres \ + -d supabase/postgres:${{ steps.settings.outputs.postgres-version }} + - name: Install psql run: | - curl -fsSL -o /usr/local/bin/dbmate https://github.com/amacneil/dbmate/releases/latest/download/dbmate-linux-amd64 - sudo chmod +x /usr/local/bin/dbmate + sudo apt update + sudo apt install -y --no-install-recommends postgresql-client - - name: migrate schema - run: ./migrations/db/migrate.sh + - name: Install pg_prove + run: sudo cpan -T TAP::Parser::SourceHandler::pgTAP env: - USE_DBMATE: 1 - POSTGRES_PORT: 5478 - POSTGRES_PASSWORD: ${{ env.POSTGRES_PASSWORD }} + SHELL: /bin/bash - - name: install pg_prove - run: sudo cpan TAP::Parser::SourceHandler::pgTAP + - name: Wait for healthy database + run: | + count=0 + until [ "$(docker inspect -f '{{.State.Health.Status}}' "$container")" == "healthy" ]; do + exit=$? + count=$((count + 1)) + if [ $count -ge "$retries" ]; then + echo "Retry $count/$retries exited $exit, no more retries left." + docker stop -t 2 "$container" + return $exit + fi + sleep 1; + done; + echo "$container container is healthy" env: - SHELL: /bin/bash + retries: 20 + container: supabase_postgres - - name: run tests + - name: Run tests run: pg_prove migrations/tests/test.sql env: PGHOST: localhost - PGPORT: 5478 - PGUSER: postgres + PGPORT: ${{ env.POSTGRES_PORT }} + PGDATABASE: postgres + PGUSER: supabase_admin PGPASSWORD: ${{ env.POSTGRES_PASSWORD }} - - name: migrations should be idempotent + - name: Check migrations are idempotent run: | for sql in ./migrations/db/migrations/*.sql; do echo "$0: running $sql" @@ -80,23 +102,11 @@ jobs: done env: PGHOST: localhost - PGPORT: 5478 + PGPORT: ${{ env.POSTGRES_PORT }} PGDATABASE: postgres - PGUSER: supabase_admin + PGUSER: tealbase_admin PGPASSWORD: ${{ env.POSTGRES_PASSWORD }} - - name: run tests - run: pg_prove migrations/tests/test.sql - env: - PGHOST: localhost - PGPORT: 5478 - PGUSER: postgres - PGPASSWORD: ${{ env.POSTGRES_PASSWORD }} - - schema: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - name: verify schema.sql is committed run: | docker compose -f migrations/docker-compose.yaml up db dbmate --abort-on-container-exit diff --git a/.github/workflows/testinfra-nix.yml b/.github/workflows/testinfra-nix.yml new file mode 100644 index 0000000..3835a9a --- /dev/null +++ b/.github/workflows/testinfra-nix.yml @@ -0,0 +1,94 @@ +name: Testinfra Integration Tests Nix + +on: + pull_request: + workflow_dispatch: + +jobs: + test-ami-nix: + strategy: + fail-fast: false + matrix: + include: + - runner: arm-runner + arch: arm64 + ubuntu_release: focal + ubuntu_version: 20.04 + mcpu: neoverse-n1 + runs-on: ${{ matrix.runner }} + timeout-minutes: 150 + permissions: + contents: write + packages: write + id-token: write + + steps: + - name: Checkout Repo + uses: actions/checkout@v4 + + - id: args + uses: mikefarah/yq@master + with: + cmd: yq 'to_entries | map(select(.value|type == "!!str")) | map(.key + "=" + .value) | join("\n")' 'ansible/vars.yml' + + - run: docker context create builders + + - uses: docker/setup-buildx-action@v3 + with: + endpoint: builders + + - name: Generate random string + id: random + run: echo "random_string=$(openssl rand -hex 8)" >> $GITHUB_OUTPUT + + - name: Build AMI stage 1 + run: | + packer init amazon-arm64-nix.pkr.hcl + GIT_SHA=${{github.sha}} + packer build -var "git-head-version=${GIT_SHA}" -var "packer-execution-id=${GITHUB_RUN_ID}" -var-file="development-arm.vars.pkr.hcl" -var-file="common-nix.vars.pkr.hcl" -var "ansible_arguments=" -var "postgres-version=${{ steps.random.outputs.random_string }}" -var "region=ap-southeast-1" -var 'ami_regions=["ap-southeast-1"]' -var "force-deregister=true" amazon-arm64-nix.pkr.hcl + + - name: Build AMI stage 2 + run: | + packer init stage2-nix-psql.pkr.hcl + GIT_SHA=${{github.sha}} + packer build -var "git-head-version=${GIT_SHA}" -var "packer-execution-id=${GITHUB_RUN_ID}" -var-file="development-arm.vars.pkr.hcl" -var-file="common-nix.vars.pkr.hcl" -var "postgres-version=${{ steps.random.outputs.random_string }}" -var "region=ap-southeast-1" -var 'ami_regions=["ap-southeast-1"]' -var "force-deregister=true" -var "git_sha=${GITHUB_SHA}" stage2-nix-psql.pkr.hcl + + - name: Run tests + timeout-minutes: 10 + env: + AMI_NAME: "supabase-postgres-${{ steps.random.outputs.random_string }}" + run: | + # TODO: use poetry for pkg mgmt + pip3 install boto3 boto3-stubs[essential] docker ec2instanceconnectcli pytest pytest-testinfra[paramiko,docker] requests + pytest -vv -s testinfra/test_ami_nix.py + + - name: Cleanup resources on build cancellation + if: ${{ cancelled() }} + run: | + aws ec2 --region ap-southeast-1 describe-instances --filters "Name=tag:packerExecutionId,Values=${GITHUB_RUN_ID}" --query "Reservations[].Instances[].InstanceId" --output text | xargs -n 1 -I {} aws ec2 terminate-instances --region ap-southeast-1 --instance-ids {} + + - name: Cleanup resources on build cancellation + if: ${{ always() }} + run: | + aws ec2 --region ap-southeast-1 describe-instances --filters "Name=tag:testinfra-run-id,Values=${GITHUB_RUN_ID}" --query "Reservations[].Instances[].InstanceId" --output text | xargs -n 1 -I {} aws ec2 terminate-instances --region ap-southeast-1 --instance-ids {} || true + + - name: Cleanup AMIs + if: always() + run: | + # Define AMI name patterns + STAGE1_AMI_NAME="supabase-postgres-ci-ami-test-stage-1" + STAGE2_AMI_NAME="${{ steps.random.outputs.random_string }}" + + # Function to deregister AMIs by name pattern + deregister_ami_by_name() { + local ami_name_pattern=$1 + local ami_ids=$(aws ec2 describe-images --region ap-southeast-1 --owners self --filters "Name=name,Values=${ami_name_pattern}" --query 'Images[*].ImageId' --output text) + for ami_id in $ami_ids; do + echo "Deregistering AMI: $ami_id" + aws ec2 deregister-image --region ap-southeast-1 --image-id $ami_id + done + } + + # Deregister AMIs + deregister_ami_by_name "$STAGE1_AMI_NAME" + deregister_ami_by_name "$STAGE2_AMI_NAME" \ No newline at end of file diff --git a/.gitignore b/.gitignore index 4dafd3b..a6950d2 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,24 @@ .DS_Store .python-version +.mise.toml venv/ *.swp -docker/cache/ \ No newline at end of file +docker/cache/ + +ansible/image-manifest*.json +testinfra-aio-container-logs.log + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +#nix related +result* +.env-local +.history + + +#IDE +.idea/ +.vscode/ diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..7c25e5a --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,46 @@ +# Welcome to tealbase Postgres contributing guide + +## Adding a new extension + +Extensions can either be built from source or installed through a debian package. In general, you want to add the installation commands for your extension to the [Dockerfile](Dockerfile) following the steps below. + +1. Create a [build stage](Dockerfile#L777) named after your extension. +2. Add build args that specify the extension's [release version](Dockerfile#L37). +3. If your extension is published as a package, download it to `/tmp/.deb` using the [ADD command](Dockerfile#L705). +4. If you need to build the extensions from source, use [checkinstall](Dockerfile#L791) to create a `/tmp/.deb` package. +5. Copy your extension's package from build stage to [extensions stage](Dockerfile#L851). + +Here's a minimal example: + +```dockerfile +ARG pg_graphql_release=1.1.0 + +#################### +# 19-pg_graphql.yml +#################### +FROM base as pg_graphql +# Download package archive +ARG pg_graphql_release +ADD "https://github.com/tealbase/pg_graphql/releases/download/v${pg_graphql_release}/pg_graphql-v${pg_graphql_release}-pg${postgresql_major}-${TARGETARCH}-linux-gnu.deb" \ + /tmp/pg_graphql.deb + +#################### +# Collect extension packages +#################### +FROM scratch as extensions +COPY --from=pg_graphql /tmp/*.deb /tmp/ +``` + +Using this process maximises the effectiveness of Docker layer caching, which significantly speeds up our CI builds. + +## Testing an extension + +Extensions can be tested automatically using pgTAP. Start by creating a new file in [migrations/tests/extensions](migrations/tests/extensions). For example: + +```sql +BEGIN; +create extension if not exists wrappers with schema "extensions"; +ROLLBACK; +``` + +This test will be run as part of CI to check that your extension can be enabled successfully from the final Docker image. diff --git a/Dockerfile b/Dockerfile index 4ebb436..fdd7c18 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,39 +1,998 @@ -ARG VERSION=15.1 +# syntax=docker/dockerfile:1.6 +ARG postgresql_major=15 +ARG postgresql_release=${postgresql_major}.1 -FROM postgres:$VERSION +# Bump default build arg to build a package from source +# Bump vars.yml to specify runtime package version +ARG sfcgal_release=1.3.10 +ARG postgis_release=3.3.2 +ARG pgrouting_release=3.4.1 +ARG pgtap_release=1.2.0 +ARG pg_cron_release=1.6.2 +ARG pgaudit_release=1.7.0 +ARG pgjwt_release=9742dab1b2f297ad3811120db7b21451bca2d3c9 +ARG pgsql_http_release=1.5.0 +ARG plpgsql_check_release=2.2.5 +ARG pg_safeupdate_release=1.4 +ARG timescaledb_release=2.9.1 +ARG wal2json_release=2_5 +ARG pljava_release=1.6.4 +ARG plv8_release=3.1.5 +ARG pg_plan_filter_release=5081a7b5cb890876e67d8e7486b6a64c38c9a492 +ARG pg_net_release=0.9.2 +ARG rum_release=1.3.13 +ARG pg_hashids_release=cd0e1b31d52b394a0df64079406a14a4f7387cd6 +ARG libsodium_release=1.0.18 +ARG pgsodium_release=3.1.6 +ARG pg_graphql_release=1.5.1 +ARG pg_stat_monitor_release=1.1.1 +ARG pg_jsonschema_release=0.1.4 +ARG pg_repack_release=1.4.8 +ARG vault_release=0.2.8 +ARG groonga_release=12.0.8 +ARG pgroonga_release=2.4.0 +ARG wrappers_release=0.4.1 +ARG hypopg_release=1.3.1 +ARG pgvector_release=0.4.0 +ARG pg_tle_release=1.3.2 +ARG index_advisor_release=0.2.0 +ARG supautils_release=2.5.0 +ARG wal_g_release=2.0.1 -COPY ansible/ /tmp/ansible/ +#################### +# Setup Postgres PPA +#################### +FROM ubuntu:focal as ppa +# Redeclare args for use in subsequent stages +ARG postgresql_major +RUN apt-get update && apt-get install -y --no-install-recommends \ + gnupg \ + ca-certificates \ + && rm -rf /var/lib/apt/lists/* +# Add Postgres PPA +# In the off-chance that the key in the repository expires, it can be replaced by running the following in the repository's root: +# gpg --keyserver hkps://keyserver.ubuntu.com --recv-keys $NEW_POSTGRESQL_GPG_KEY +# gpg --export --armor $NEW_POSTGRESQL_GPG_KEY > postgresql.gpg.key +COPY postgresql.gpg.key /tmp/postgresql.gpg.key +RUN apt-key add /tmp/postgresql.gpg.key && \ + echo "deb https://apt-archive.postgresql.org/pub/repos/apt focal-pgdg-archive main" > /etc/apt/sources.list.d/pgdg.list -# needed for plv8 Makefile selection -ENV DOCKER true +#################### +# Download pre-built postgres +#################### +FROM ppa as pg +ARG postgresql_release +# Download .deb packages +RUN apt-get update && apt-get install -y --no-install-recommends --download-only \ + postgresql-${postgresql_major}=${postgresql_release}-1.pgdg20.04+1 \ + && rm -rf /var/lib/apt/lists/* +RUN mv /var/cache/apt/archives/*.deb /tmp/ + +FROM ppa as pg-dev +ARG postgresql_release +# Download .deb packages +RUN apt-get update && apt-get install -y --no-install-recommends --download-only \ + postgresql-server-dev-${postgresql_major}=${postgresql_release}-1.pgdg20.04+1 \ + && rm -rf /var/lib/apt/lists/* +RUN mv /var/cache/apt/archives/*.deb /tmp/ + +#################### +# Install postgres +#################### +FROM ubuntu:focal as base +# Redeclare args for use in subsequent stages +ARG TARGETARCH +ARG postgresql_major + +# Install postgres +COPY --from=pg /tmp /tmp +# Ref: https://github.com/docker-library/postgres/blob/master/15/bullseye/Dockerfile#L91 +ENV DEBIAN_FRONTEND=noninteractive +RUN set -ex; \ + export PYTHONDONTWRITEBYTECODE=1; \ + apt-get update; \ + apt-get install -y --no-install-recommends /tmp/postgresql-common_*.deb /tmp/postgresql-client-common_*.deb; \ + sed -ri 's/#(create_main_cluster) .*$/\1 = false/' /etc/postgresql-common/createcluster.conf; \ + apt-get install -y --no-install-recommends /tmp/*.deb; \ + rm -rf /var/lib/apt/lists/* /tmp/*; \ + find /usr -name '*.pyc' -type f -exec bash -c 'for pyc; do dpkg -S "$pyc" &> /dev/null || rm -vf "$pyc"; done' -- '{}' + + +ENV PATH=$PATH:/usr/lib/postgresql/${postgresql_major}/bin +ENV PGDATA=/var/lib/postgresql/data + +# Make the "en_US.UTF-8" locale so postgres will be utf-8 enabled by default +RUN localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8 +ENV LANG=en_US.UTF-8 +ENV LC_CTYPE=C.UTF-8 +ENV LC_COLLATE=C.UTF-8 + +FROM base as builder +# Install build dependencies +COPY --from=pg-dev /tmp /tmp +RUN apt-get update && \ + rm -f /tmp/libssl-dev* && \ + apt-get install -y --no-install-recommends \ + /tmp/*.deb \ + build-essential \ + checkinstall \ + cmake \ + && rm -rf /var/lib/apt/lists/* /tmp/* + +FROM builder as ccache +# Cache large build artifacts +RUN apt-get update && apt-get install -y --no-install-recommends \ + clang \ + ccache \ + && rm -rf /var/lib/apt/lists/* ENV CCACHE_DIR=/ccache ENV PATH=/usr/lib/ccache:$PATH -ENV DEBIAN_FRONTEND noninteractive +# Used to update ccache +ARG CACHE_EPOCH -RUN apt update && \ - apt install -y ansible sudo git ccache && \ - apt -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" upgrade +#################### +# 01-postgis.yml +#################### +FROM ccache as sfcgal +# Download and extract +ARG sfcgal_release +ARG sfcgal_release_checksum +ADD --checksum=${sfcgal_release_checksum} \ + "https://tealbase-public-artifacts-bucket.s3.amazonaws.com/sfcgal/SFCGAL-v${sfcgal_release}.tar.gz" \ + /tmp/sfcgal.tar.gz +RUN tar -xvf /tmp/sfcgal.tar.gz -C /tmp --one-top-level --strip-components 1 && \ + rm -rf /tmp/sfcgal.tar.gz +# Install build dependencies +RUN apt-get update && apt-get install -y --no-install-recommends \ + libcgal-dev \ + libboost-serialization1.71-dev \ + libmpfr-dev \ + libgmp-dev \ + && rm -rf /var/lib/apt/lists/* +# Build from source +WORKDIR /tmp/sfcgal/build +RUN cmake .. +RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ + make -j$(nproc) +# Create debian package +RUN checkinstall -D --install=yes --fstrans=no --backup=no --pakdir=/tmp --pkgname=sfcgal --pkgversion=${sfcgal_release} --requires=libgmpxx4ldbl,libboost-serialization1.71.0,libmpfr6 --nodoc -RUN --mount=type=bind,source=docker/cache,target=/ccache,rw \ - ccache -s && \ - ansible-galaxy collection install community.general && \ - cd /tmp/ansible && \ - ansible-playbook -e '{"async_mode": false}' playbook-docker.yml && \ - apt -y autoremove && \ - apt -y autoclean && \ - ccache -s && \ - apt install -y default-jdk-headless locales && \ - sed -i '/en_US.UTF-8/s/^# //g' /etc/locale.gen && \ - locale-gen && \ - rm -rf /tmp/* /var/lib/apt/lists/* /var/tmp/* /usr/lib/python3/dist-packages/ansible_collections/* +FROM sfcgal as postgis-source +# Download and extract +ARG postgis_release +ARG postgis_release_checksum +ADD --checksum=${postgis_release_checksum} \ + "https://tealbase-public-artifacts-bucket.s3.amazonaws.com/postgis-${postgis_release}.tar.gz" \ + /tmp/postgis.tar.gz +RUN tar -xvf /tmp/postgis.tar.gz -C /tmp && \ + rm -rf /tmp/postgis.tar.gz +# Install build dependencies +RUN apt-get update && apt-get install -y --no-install-recommends \ + protobuf-c-compiler \ + libgeos-dev \ + libproj-dev \ + libgdal-dev \ + libjson-c-dev \ + libxml2-dev \ + libprotobuf-c-dev \ + && rm -rf /var/lib/apt/lists/* +# Build from source +WORKDIR /tmp/postgis-${postgis_release} +RUN ./configure --with-sfcgal +RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ + make -j$(nproc) +# Create debian package +RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --requires=libgeos-c1v5,libproj15,libjson-c4,libprotobuf-c1,libgdal26 --nodoc + +FROM ppa as postgis +# Latest available is 3.3.2 +ARG postgis_release +# Download pre-built packages +RUN apt-get update && apt-get install -y --no-install-recommends --download-only \ + postgresql-${postgresql_major}-postgis-3=${postgis_release}+dfsg-1.pgdg20.04+1 \ + && rm -rf /var/lib/apt/lists/* +RUN mv /var/cache/apt/archives/*.deb /tmp/ + +#################### +# 02-pgrouting.yml +#################### +FROM ccache as pgrouting-source +# Download and extract +ARG pgrouting_release +ARG pgrouting_release_checksum +ADD --checksum=${pgrouting_release_checksum} \ + "https://github.com/pgRouting/pgrouting/releases/download/v${pgrouting_release}/pgrouting-${pgrouting_release}.tar.gz" \ + /tmp/pgrouting.tar.gz +RUN tar -xvf /tmp/pgrouting.tar.gz -C /tmp && \ + rm -rf /tmp/pgrouting.tar.gz +# Install build dependencies +RUN apt-get update && apt-get install -y --no-install-recommends \ + libboost-all-dev \ + && rm -rf /var/lib/apt/lists/* +# Build from source +WORKDIR /tmp/pgrouting-${pgrouting_release}/build +RUN cmake -DBUILD_HTML=OFF -DBUILD_DOXY=OFF .. +RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ + make -j$(nproc) +# Create debian package +RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --pkgname=pgrouting --pkgversion=${pgrouting_release} --nodoc + +FROM ppa as pgrouting +ARG pgrouting_release +# Download pre-built packages +RUN apt-get update && apt-get install -y --no-install-recommends --download-only \ + postgresql-${postgresql_major}-pgrouting=${pgrouting_release}-1.pgdg20.04+1 \ + && rm -rf /var/lib/apt/lists/* +RUN mv /var/cache/apt/archives/*.deb /tmp/ + +#################### +# 03-pgtap.yml +#################### +FROM builder as pgtap-source +# Download and extract +ARG pgtap_release +ARG pgtap_release_checksum +ADD --checksum=${pgtap_release_checksum} \ + "https://github.com/theory/pgtap/archive/v${pgtap_release}.tar.gz" \ + /tmp/pgtap.tar.gz +RUN tar -xvf /tmp/pgtap.tar.gz -C /tmp && \ + rm -rf /tmp/pgtap.tar.gz +# Build from source +WORKDIR /tmp/pgtap-${pgtap_release} +RUN make -j$(nproc) +# Create debian package +RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --nodoc + +#################### +# 04-pg_cron.yml +#################### +FROM ccache as pg_cron-source +# Download and extract +ARG pg_cron_release +ARG pg_cron_release_checksum +ADD --checksum=${pg_cron_release_checksum} \ + "https://github.com/citusdata/pg_cron/archive/refs/tags/v${pg_cron_release}.tar.gz" \ + /tmp/pg_cron.tar.gz +RUN tar -xvf /tmp/pg_cron.tar.gz -C /tmp && \ + rm -rf /tmp/pg_cron.tar.gz +# Build from source +WORKDIR /tmp/pg_cron-${pg_cron_release} +RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ + make -j$(nproc) +# Create debian package +RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --nodoc + +#################### +# 05-pgaudit.yml +#################### +FROM ccache as pgaudit-source +# Download and extract +ARG pgaudit_release +ARG pgaudit_release_checksum +ADD --checksum=${pgaudit_release_checksum} \ + "https://github.com/pgaudit/pgaudit/archive/refs/tags/${pgaudit_release}.tar.gz" \ + /tmp/pgaudit.tar.gz +RUN tar -xvf /tmp/pgaudit.tar.gz -C /tmp && \ + rm -rf /tmp/pgaudit.tar.gz +# Install build dependencies +RUN apt-get update && apt-get install -y --no-install-recommends \ + libssl-dev \ + libkrb5-dev \ + && rm -rf /var/lib/apt/lists/* +# Build from source +WORKDIR /tmp/pgaudit-${pgaudit_release} +ENV USE_PGXS=1 +RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ + make -j$(nproc) +# Create debian package +RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --nodoc + +#################### +# 06-pgjwt.yml +#################### +FROM builder as pgjwt-source +# Download and extract +ARG pgjwt_release +ADD "https://github.com/michelp/pgjwt.git#${pgjwt_release}" \ + /tmp/pgjwt-${pgjwt_release} +# Build from source +WORKDIR /tmp/pgjwt-${pgjwt_release} +RUN make -j$(nproc) +# Create debian package +RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --pkgversion=1 --nodoc + +#################### +# 07-pgsql-http.yml +#################### +FROM ccache as pgsql-http-source +# Download and extract +ARG pgsql_http_release +ARG pgsql_http_release_checksum +ADD --checksum=${pgsql_http_release_checksum} \ + "https://github.com/pramsey/pgsql-http/archive/refs/tags/v${pgsql_http_release}.tar.gz" \ + /tmp/pgsql-http.tar.gz +RUN tar -xvf /tmp/pgsql-http.tar.gz -C /tmp && \ + rm -rf /tmp/pgsql-http.tar.gz +# Install build dependencies +RUN apt-get update && apt-get install -y --no-install-recommends \ + libcurl4-gnutls-dev \ + && rm -rf /var/lib/apt/lists/* +# Build from source +WORKDIR /tmp/pgsql-http-${pgsql_http_release} +RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ + make -j$(nproc) +# Create debian package +RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --requires=libcurl3-gnutls --nodoc + +#################### +# 08-plpgsql_check.yml +#################### +FROM ccache as plpgsql_check-source +# Download and extract +ARG plpgsql_check_release +ARG plpgsql_check_release_checksum +ADD --checksum=${plpgsql_check_release_checksum} \ + "https://github.com/okbob/plpgsql_check/archive/refs/tags/v${plpgsql_check_release}.tar.gz" \ + /tmp/plpgsql_check.tar.gz +RUN tar -xvf /tmp/plpgsql_check.tar.gz -C /tmp && \ + rm -rf /tmp/plpgsql_check.tar.gz +# Install build dependencies +RUN apt-get update && apt-get install -y --no-install-recommends \ + libicu-dev \ + && rm -rf /var/lib/apt/lists/* +# Build from source +WORKDIR /tmp/plpgsql_check-${plpgsql_check_release} +RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ + make -j$(nproc) +# Create debian package +RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --nodoc + +#################### +# 09-pg-safeupdate.yml +#################### +FROM ccache as pg-safeupdate-source +# Download and extract +ARG pg_safeupdate_release +ARG pg_safeupdate_release_checksum +ADD --checksum=${pg_safeupdate_release_checksum} \ + "https://github.com/eradman/pg-safeupdate/archive/refs/tags/${pg_safeupdate_release}.tar.gz" \ + /tmp/pg-safeupdate.tar.gz +RUN tar -xvf /tmp/pg-safeupdate.tar.gz -C /tmp && \ + rm -rf /tmp/pg-safeupdate.tar.gz +# Build from source +WORKDIR /tmp/pg-safeupdate-${pg_safeupdate_release} +RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ + make -j$(nproc) +# Create debian package +RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --nodoc + +#################### +# 10-timescaledb.yml +#################### +FROM ccache as timescaledb-source +# Download and extract +ARG timescaledb_release +ARG timescaledb_release_checksum +ADD --checksum=${timescaledb_release_checksum} \ + "https://github.com/timescale/timescaledb/archive/refs/tags/${timescaledb_release}.tar.gz" \ + /tmp/timescaledb.tar.gz +RUN tar -xvf /tmp/timescaledb.tar.gz -C /tmp && \ + rm -rf /tmp/timescaledb.tar.gz +# Build from source +WORKDIR /tmp/timescaledb-${timescaledb_release}/build +RUN cmake -DAPACHE_ONLY=1 .. +RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ + make -j$(nproc) +# Create debian package +RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --pkgname=timescaledb --pkgversion=${timescaledb_release} --nodoc + +#################### +# 11-wal2json.yml +#################### +FROM ccache as wal2json-source +# Download and extract +ARG wal2json_release +ARG wal2json_release_checksum +ADD --checksum=${wal2json_release_checksum} \ + "https://github.com/eulerto/wal2json/archive/refs/tags/wal2json_${wal2json_release}.tar.gz" \ + /tmp/wal2json.tar.gz +RUN tar -xvf /tmp/wal2json.tar.gz -C /tmp --one-top-level --strip-components 1 && \ + rm -rf /tmp/wal2json.tar.gz +# Build from source +WORKDIR /tmp/wal2json +RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ + make -j$(nproc) +# Create debian package +ENV version=${wal2json_release} +RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --pkgversion="\${version/_/.}" --nodoc + +#################### +# 12-pljava.yml +#################### +FROM builder as pljava-source +# Download and extract +# TODO: revert to using main repo after PG15 support is merged: https://github.com/tada/pljava/pull/413 +ARG pljava_release=master +ARG pljava_release_checksum=sha256:e99b1c52f7b57f64c8986fe6ea4a6cc09d78e779c1643db060d0ac66c93be8b6 +ADD --checksum=${pljava_release_checksum} \ + "https://github.com/tealbase/pljava/archive/refs/heads/${pljava_release}.tar.gz" \ + /tmp/pljava.tar.gz +RUN tar -xvf /tmp/pljava.tar.gz -C /tmp && \ + rm -rf /tmp/pljava.tar.gz +# Install build dependencies +RUN apt-get update && apt-get install -y --no-install-recommends \ + maven \ + default-jdk \ + libssl-dev \ + libkrb5-dev \ + && rm -rf /var/lib/apt/lists/* +# Build from source +WORKDIR /tmp/pljava-${pljava_release} +RUN mvn -T 1C clean install -Dmaven.test.skip -DskipTests -Dmaven.javadoc.skip=true +# Create debian package +RUN cp pljava-packaging/target/pljava-pg${postgresql_major}.jar /tmp/ + +FROM base as pljava +# Download pre-built packages +RUN apt-get update && apt-get install -y --no-install-recommends --download-only \ + default-jdk-headless \ + postgresql-${postgresql_major}-pljava \ + && rm -rf /var/lib/apt/lists/* +RUN mv /var/cache/apt/archives/*.deb /tmp/ + +#################### +# 13-plv8.yml +#################### +FROM ccache as plv8-source +# Download and extract +ARG plv8_release +ARG plv8_release_checksum +ADD --checksum=${plv8_release_checksum} \ + "https://github.com/tealbase/plv8/archive/refs/tags/v${plv8_release}.tar.gz" \ + /tmp/plv8.tar.gz +RUN tar -xvf /tmp/plv8.tar.gz -C /tmp && \ + rm -rf /tmp/plv8.tar.gz +# Install build dependencies +RUN apt-get update && apt-get install -y --no-install-recommends \ + ca-certificates \ + pkg-config \ + ninja-build \ + git \ + libtinfo5 \ + libstdc++-10-dev \ + && rm -rf /var/lib/apt/lists/* +# Build from source +WORKDIR /tmp/plv8-${plv8_release} +ENV DOCKER=1 +RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ + make +# Create debian package +RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --nodoc + +FROM scratch as plv8-deb +COPY --from=plv8-source /tmp/*.deb /tmp/ + +FROM ghcr.io/tealbase/plv8:${plv8_release}-pg${postgresql_major} as plv8 + +#################### +# 14-pg_plan_filter.yml +#################### +FROM ccache as pg_plan_filter-source +# Download and extract +ARG pg_plan_filter_release +ADD "https://github.com/pgexperts/pg_plan_filter.git#${pg_plan_filter_release}" \ + /tmp/pg_plan_filter-${pg_plan_filter_release} +# Build from source +WORKDIR /tmp/pg_plan_filter-${pg_plan_filter_release} +RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ + make -j$(nproc) +# Create debian package +RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --pkgversion=1 --nodoc + +#################### +# 15-pg_net.yml +#################### +FROM ccache as pg_net-source +# Download and extract +ARG pg_net_release +ARG pg_net_release_checksum +ADD --checksum=${pg_net_release_checksum} \ + "https://github.com/tealbase/pg_net/archive/refs/tags/v${pg_net_release}.tar.gz" \ + /tmp/pg_net.tar.gz +RUN tar -xvf /tmp/pg_net.tar.gz -C /tmp && \ + rm -rf /tmp/pg_net.tar.gz +# Install build dependencies +RUN apt-get update && apt-get install -y --no-install-recommends \ + libcurl4-gnutls-dev \ + && rm -rf /var/lib/apt/lists/* +# Build from source +WORKDIR /tmp/pg_net-${pg_net_release} +RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ + make -j$(nproc) +# Create debian package +RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --requires=libcurl3-gnutls --nodoc + +#################### +# 16-rum.yml +#################### +FROM ccache as rum-source +# Download and extract +ARG rum_release +ARG rum_release_checksum +ADD --checksum=${rum_release_checksum} \ + "https://github.com/postgrespro/rum/archive/refs/tags/${rum_release}.tar.gz" \ + /tmp/rum.tar.gz +RUN tar -xvf /tmp/rum.tar.gz -C /tmp && \ + rm -rf /tmp/rum.tar.gz +# Install build dependencies +RUN apt-get update && apt-get install -y --no-install-recommends \ + systemtap-sdt-dev \ + && rm -rf /var/lib/apt/lists/* +# Build from source +WORKDIR /tmp/rum-${rum_release} +ENV USE_PGXS=1 +RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ + make -j$(nproc) +# Create debian package +RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --nodoc + +#################### +# 17-pg_hashids.yml +#################### +FROM ccache as pg_hashids-source +# Download and extract +ARG pg_hashids_release +ADD "https://github.com/iCyberon/pg_hashids.git#${pg_hashids_release}" \ + /tmp/pg_hashids-${pg_hashids_release} +# Build from source +WORKDIR /tmp/pg_hashids-${pg_hashids_release} +RUN make -j$(nproc) +# Create debian package +RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --pkgversion=1 --nodoc + +#################### +# 18-pgsodium.yml +#################### +FROM ccache as libsodium +# Download and extract +ARG libsodium_release +ARG libsodium_release_checksum +ADD --checksum=${libsodium_release_checksum} \ + "https://tealbase-public-artifacts-bucket.s3.amazonaws.com/libsodium/libsodium-${libsodium_release}.tar.gz" \ + /tmp/libsodium.tar.gz +RUN tar -xvf /tmp/libsodium.tar.gz -C /tmp && \ + rm -rf /tmp/libsodium.tar.gz +# Build from source +WORKDIR /tmp/libsodium-${libsodium_release} +RUN ./configure +RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ + make -j$(nproc) +RUN make install -ENV LANGUAGE en_US.UTF-8 -ENV LANG en_US.UTF-8 -ENV LC_ALL en_US.UTF-8 +FROM libsodium as pgsodium-source +# Download and extract +ARG pgsodium_release +ARG pgsodium_release_checksum +ADD --checksum=${pgsodium_release_checksum} \ + "https://github.com/michelp/pgsodium/archive/refs/tags/v${pgsodium_release}.tar.gz" \ + /tmp/pgsodium.tar.gz +RUN tar -xvf /tmp/pgsodium.tar.gz -C /tmp && \ + rm -rf /tmp/pgsodium.tar.gz +# Build from source +WORKDIR /tmp/pgsodium-${pgsodium_release} +RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ + make -j$(nproc) +# Create debian package +RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --requires=libsodium23 --nodoc -COPY ansible/files/pgbouncer_config/pgbouncer_auth_schema.sql /docker-entrypoint-initdb.d/00-schema.sql -COPY ansible/files/stat_extension.sql /docker-entrypoint-initdb.d/01-extension.sql -# COPY ansible/files/sodium_extension.sql /docker-entrypoint-initdb.d/02-sodium-extension.sql -COPY migrations/db/ /docker-entrypoint-initdb.d/ +#################### +# 19-pg_graphql.yml +#################### +FROM base as pg_graphql +# Download package archive +ARG pg_graphql_release +ADD "https://github.com/tealbase/pg_graphql/releases/download/v${pg_graphql_release}/pg_graphql-v${pg_graphql_release}-pg${postgresql_major}-${TARGETARCH}-linux-gnu.deb" \ + /tmp/pg_graphql.deb -CMD ["postgres", "-c", "config_file=/etc/postgresql/postgresql.conf"] +#################### +# 20-pg_stat_monitor.yml +#################### +FROM ccache as pg_stat_monitor-source +# Download and extract +ARG pg_stat_monitor_release +ARG pg_stat_monitor_release_checksum +ADD --checksum=${pg_stat_monitor_release_checksum} \ + "https://github.com/percona/pg_stat_monitor/archive/refs/tags/${pg_stat_monitor_release}.tar.gz" \ + /tmp/pg_stat_monitor.tar.gz +RUN tar -xvf /tmp/pg_stat_monitor.tar.gz -C /tmp && \ + rm -rf /tmp/pg_stat_monitor.tar.gz +# Build from source +WORKDIR /tmp/pg_stat_monitor-${pg_stat_monitor_release} +ENV USE_PGXS=1 +RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ + make -j$(nproc) +# Create debian package +RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --nodoc + +#################### +# 22-pg_jsonschema.yml +#################### +FROM base as pg_jsonschema +# Download package archive +ARG pg_jsonschema_release +ADD "https://github.com/tealbase/pg_jsonschema/releases/download/v${pg_jsonschema_release}/pg_jsonschema-v${pg_jsonschema_release}-pg${postgresql_major}-${TARGETARCH}-linux-gnu.deb" \ + /tmp/pg_jsonschema.deb + +#################### +# 23-vault.yml +#################### +FROM builder as vault-source +# Download and extract +ARG vault_release +ARG vault_release_checksum +ADD --checksum=${vault_release_checksum} \ + "https://github.com/tealbase/vault/archive/refs/tags/v${vault_release}.tar.gz" \ + /tmp/vault.tar.gz +RUN tar -xvf /tmp/vault.tar.gz -C /tmp && \ + rm -rf /tmp/vault.tar.gz +# Build from source +WORKDIR /tmp/vault-${vault_release} +RUN make -j$(nproc) +# Create debian package +RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --nodoc + +#################### +# 24-pgroonga.yml +#################### +FROM ccache as groonga +# Download and extract +ARG groonga_release +ARG groonga_release_checksum +ADD --checksum=${groonga_release_checksum} \ + "https://packages.groonga.org/source/groonga/groonga-${groonga_release}.tar.gz" \ + /tmp/groonga.tar.gz +RUN tar -xvf /tmp/groonga.tar.gz -C /tmp && \ + rm -rf /tmp/groonga.tar.gz +# Install build dependencies +RUN apt-get update && apt-get install -y --no-install-recommends \ + zlib1g-dev \ + liblz4-dev \ + libzstd-dev \ + libmsgpack-dev \ + libzmq3-dev \ + libevent-dev \ + libmecab-dev \ + rapidjson-dev \ + pkg-config \ + && rm -rf /var/lib/apt/lists/* +# Build from source +WORKDIR /tmp/groonga-${groonga_release} +RUN ./configure +RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ + make -j$(nproc) +# Create debian package +RUN checkinstall -D --install=yes --fstrans=no --backup=no --pakdir=/tmp --requires=zlib1g,liblz4-1,libzstd1,libmsgpackc2,libzmq5,libevent-2.1-7,libmecab2 --nodoc + +FROM groonga as pgroonga-source +# Download and extract +ARG pgroonga_release +ARG pgroonga_release_checksum +ADD --checksum=${pgroonga_release_checksum} \ + "https://packages.groonga.org/source/pgroonga/pgroonga-${pgroonga_release}.tar.gz" \ + /tmp/pgroonga.tar.gz +RUN tar -xvf /tmp/pgroonga.tar.gz -C /tmp && \ + rm -rf /tmp/pgroonga.tar.gz +# Build from source +WORKDIR /tmp/pgroonga-${pgroonga_release} +RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ + make -j$(nproc) +# Create debian package +RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --requires=mecab-naist-jdic --nodoc + +FROM scratch as pgroonga-deb +COPY --from=pgroonga-source /tmp/*.deb /tmp/ + +FROM base as pgroonga +# Latest available is 3.0.3 +ARG pgroonga_release +# Download pre-built packages +ADD "https://packages.groonga.org/ubuntu/groonga-apt-source-latest-focal.deb" /tmp/source.deb +RUN apt-get update && apt-get install -y --no-install-recommends \ + ca-certificates \ + /tmp/source.deb \ + && rm -rf /var/lib/apt/lists/* +RUN rm /tmp/source.deb +RUN apt-get update && apt-get install -y --no-install-recommends --download-only \ + postgresql-${postgresql_major}-pgdg-pgroonga=${pgroonga_release}-1 \ + && rm -rf /var/lib/apt/lists/* +RUN mv /var/cache/apt/archives/*.deb /tmp/ + +#################### +# 25-wrappers.yml +#################### +FROM base as wrappers +# Download package archive +ARG wrappers_release +ADD "https://github.com/tealbase/wrappers/releases/download/v${wrappers_release}/wrappers-v${wrappers_release}-pg${postgresql_major}-${TARGETARCH}-linux-gnu.deb" \ + /tmp/wrappers.deb + +#################### +# 26-hypopg.yml +#################### +FROM ccache as hypopg-source +# Download and extract +ARG hypopg_release +ARG hypopg_release_checksum +ADD --checksum=${hypopg_release_checksum} \ + "https://github.com/HypoPG/hypopg/archive/refs/tags/${hypopg_release}.tar.gz" \ + /tmp/hypopg.tar.gz +RUN tar -xvf /tmp/hypopg.tar.gz -C /tmp && \ + rm -rf /tmp/hypopg.tar.gz +# Build from source +WORKDIR /tmp/hypopg-${hypopg_release} +RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ + make -j$(nproc) +# Create debian package +RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --nodoc + +#################### + # 27-pg_repack.yml + #################### + FROM ccache as pg_repack-source + ARG pg_repack_release + ARG pg_repack_release_checksum + ADD --checksum=${pg_repack_release_checksum} \ + "https://github.com/reorg/pg_repack/archive/refs/tags/ver_${pg_repack_release}.tar.gz" \ + /tmp/pg_repack.tar.gz + RUN tar -xvf /tmp/pg_repack.tar.gz -C /tmp && \ + rm -rf /tmp/pg_repack.tar.gz + # Install build dependencies + RUN apt-get update && apt-get install -y --no-install-recommends \ + liblz4-dev \ + libz-dev \ + libzstd-dev \ + libreadline-dev \ + && rm -rf /var/lib/apt/lists/* + # Build from source + WORKDIR /tmp/pg_repack-ver_${pg_repack_release} + ENV USE_PGXS=1 + RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ + make -j$(nproc) + # Create debian package + RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --pkgversion=${pg_repack_release} --nodoc + +#################### +# 28-pgvector.yml +#################### +FROM ccache as pgvector-source +ARG pgvector_release +ARG pgvector_release_checksum +ADD --checksum=${pgvector_release_checksum} \ + "https://github.com/pgvector/pgvector/archive/refs/tags/v${pgvector_release}.tar.gz" \ + /tmp/pgvector.tar.gz +RUN tar -xvf /tmp/pgvector.tar.gz -C /tmp && \ + rm -rf /tmp/pgvector.tar.gz +# Build from source +WORKDIR /tmp/pgvector-${pgvector_release} +RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ + make -j$(nproc) +# Create debian package +RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --nodoc + +#################### +# 29-pg_tle.yml +#################### +FROM ccache as pg_tle-source +ARG pg_tle_release +ARG pg_tle_release_checksum +ADD --checksum=${pg_tle_release_checksum} \ + "https://github.com/aws/pg_tle/archive/refs/tags/v${pg_tle_release}.tar.gz" \ + /tmp/pg_tle.tar.gz +RUN tar -xvf /tmp/pg_tle.tar.gz -C /tmp && \ + rm -rf /tmp/pg_tle.tar.gz +RUN apt-get update && apt-get install -y --no-install-recommends \ + flex \ + libkrb5-dev \ + && rm -rf /var/lib/apt/lists/* +# Build from source +WORKDIR /tmp/pg_tle-${pg_tle_release} +RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ + make -j$(nproc) +# Create debian package +RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --nodoc + +###################### +# 30-index_advisor.yml +###################### +FROM ccache as index_advisor +ARG index_advisor_release +ARG index_advisor_release_checksum +ADD --checksum=${index_advisor_release_checksum} \ + "https://github.com/olirice/index_advisor/archive/refs/tags/v${index_advisor_release}.tar.gz" \ + /tmp/index_advisor.tar.gz +RUN tar -xvf /tmp/index_advisor.tar.gz -C /tmp && \ + rm -rf /tmp/index_advisor.tar.gz +# Build from source +WORKDIR /tmp/index_advisor-${index_advisor_release} +RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ + make -j$(nproc) +# Create debian package +RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --nodoc + +#################### +# internal/supautils.yml +#################### +FROM base as supautils +# Download package archive +ARG supautils_release +# Define checksums for different architectures +ARG supautils_release_arm64_deb_checksum +ARG supautils_release_amd64_deb_checksum + +RUN apt-get update && apt-get install -y curl && rm -rf /var/lib/apt/lists/* + +# Set up a script to download the correct package +RUN echo '#!/bin/sh' > /tmp/download_supautils.sh && \ + echo 'set -e' >> /tmp/download_supautils.sh && \ + echo 'if [ "$TARGETARCH" = "amd64" ]; then' >> /tmp/download_supautils.sh && \ + echo ' CHECKSUM="${supautils_release_amd64_deb_checksum}"' >> /tmp/download_supautils.sh && \ + echo ' ARCH="amd64"' >> /tmp/download_supautils.sh && \ + echo 'elif [ "$TARGETARCH" = "arm64" ]; then' >> /tmp/download_supautils.sh && \ + echo ' CHECKSUM="${supautils_release_arm64_deb_checksum}"' >> /tmp/download_supautils.sh && \ + echo ' ARCH="arm64"' >> /tmp/download_supautils.sh && \ + echo 'else' >> /tmp/download_supautils.sh && \ + echo ' echo "Unsupported architecture: $TARGETARCH" >&2' >> /tmp/download_supautils.sh && \ + echo ' exit 1' >> /tmp/download_supautils.sh && \ + echo 'fi' >> /tmp/download_supautils.sh && \ + echo 'CHECKSUM=$(echo $CHECKSUM | sed "s/^sha256://")' >> /tmp/download_supautils.sh && \ + echo 'curl -fsSL -o /tmp/supautils.deb \\' >> /tmp/download_supautils.sh && \ + echo ' "https://github.com/tealbase/supautils/releases/download/v${supautils_release}/supautils-v${supautils_release}-pg${postgresql_major}-$ARCH-linux-gnu.deb"' >> /tmp/download_supautils.sh && \ + echo 'echo "$CHECKSUM /tmp/supautils.deb" | sha256sum -c -' >> /tmp/download_supautils.sh && \ + chmod +x /tmp/download_supautils.sh + +# Run the script to download and verify the package +RUN /tmp/download_supautils.sh && rm /tmp/download_supautils.sh + +#################### +# setup-wal-g.yml +#################### +FROM base as walg +ARG wal_g_release +# ADD "https://github.com/wal-g/wal-g/releases/download/v${wal_g_release}/wal-g-pg-ubuntu-20.04-${TARGETARCH}.tar.gz" /tmp/wal-g.tar.gz +RUN arch=$([ "$TARGETARCH" = "arm64" ] && echo "aarch64" || echo "$TARGETARCH") && \ + apt-get update && apt-get install -y --no-install-recommends curl && \ + curl -kL "https://github.com/wal-g/wal-g/releases/download/v${wal_g_release}/wal-g-pg-ubuntu-20.04-${arch}.tar.gz" -o /tmp/wal-g.tar.gz && \ + tar -xvf /tmp/wal-g.tar.gz -C /tmp && \ + rm -rf /tmp/wal-g.tar.gz && \ + mv /tmp/wal-g-pg-ubuntu*20.04-$arch /tmp/wal-g + +#################### +# Collect extension packages +#################### +FROM scratch as extensions +COPY --from=postgis-source /tmp/*.deb /tmp/ +COPY --from=pgrouting-source /tmp/*.deb /tmp/ +COPY --from=pgtap-source /tmp/*.deb /tmp/ +COPY --from=pg_cron-source /tmp/*.deb /tmp/ +COPY --from=pgaudit-source /tmp/*.deb /tmp/ +COPY --from=pgjwt-source /tmp/*.deb /tmp/ +COPY --from=pgsql-http-source /tmp/*.deb /tmp/ +COPY --from=plpgsql_check-source /tmp/*.deb /tmp/ +COPY --from=pg-safeupdate-source /tmp/*.deb /tmp/ +COPY --from=timescaledb-source /tmp/*.deb /tmp/ +COPY --from=wal2json-source /tmp/*.deb /tmp/ +# COPY --from=pljava /tmp/*.deb /tmp/ +COPY --from=plv8 /tmp/*.deb /tmp/ +COPY --from=pg_plan_filter-source /tmp/*.deb /tmp/ +COPY --from=pg_net-source /tmp/*.deb /tmp/ +COPY --from=rum-source /tmp/*.deb /tmp/ +COPY --from=pgsodium-source /tmp/*.deb /tmp/ +COPY --from=pg_hashids-source /tmp/*.deb /tmp/ +COPY --from=pg_graphql /tmp/*.deb /tmp/ +COPY --from=pg_stat_monitor-source /tmp/*.deb /tmp/ +COPY --from=pg_jsonschema /tmp/*.deb /tmp/ +COPY --from=vault-source /tmp/*.deb /tmp/ +COPY --from=pgroonga-source /tmp/*.deb /tmp/ +COPY --from=wrappers /tmp/*.deb /tmp/ +COPY --from=hypopg-source /tmp/*.deb /tmp/ +COPY --from=pg_repack-source /tmp/*.deb /tmp/ +COPY --from=pgvector-source /tmp/*.deb /tmp/ +COPY --from=pg_tle-source /tmp/*.deb /tmp/ +COPY --from=index_advisor /tmp/*.deb /tmp/ +COPY --from=supautils /tmp/*.deb /tmp/ + +#################### +# Download gosu for easy step-down from root +#################### +FROM ubuntu:focal as gosu +ARG TARGETARCH +# Install dependencies +RUN apt-get update && apt-get install -y --no-install-recommends \ + gnupg \ + ca-certificates \ + && rm -rf /var/lib/apt/lists/* +# Download binary +ARG GOSU_VERSION=1.16 +ARG GOSU_GPG_KEY=B42F6819007F00F88E364FD4036A9C25BF357DD4 +ADD https://github.com/tianon/gosu/releases/download/$GOSU_VERSION/gosu-$TARGETARCH \ + /usr/local/bin/gosu +ADD https://github.com/tianon/gosu/releases/download/$GOSU_VERSION/gosu-$TARGETARCH.asc \ + /usr/local/bin/gosu.asc +# Verify checksum +RUN gpg --batch --keyserver hkps://keys.openpgp.org --recv-keys $GOSU_GPG_KEY && \ + gpg --batch --verify /usr/local/bin/gosu.asc /usr/local/bin/gosu && \ + gpgconf --kill all && \ + chmod +x /usr/local/bin/gosu + +#################### +# Build final image +#################### +FROM base as production + +# Setup extensions +COPY --from=extensions /tmp /tmp +COPY --from=walg /tmp/wal-g /usr/local/bin/ + +ENV DEBIAN_FRONTEND=noninteractive +RUN apt-get update && apt-get install -y --no-install-recommends \ + /tmp/*.deb \ + # Needed for anything using libcurl + # https://github.com/tealbase/postgres/issues/573 + ca-certificates \ + && rm -rf /var/lib/apt/lists/* /tmp/* + +# Initialise configs +COPY --chown=postgres:postgres ansible/files/postgresql_config/postgresql.conf.j2 /etc/postgresql/postgresql.conf +COPY --chown=postgres:postgres ansible/files/postgresql_config/pg_hba.conf.j2 /etc/postgresql/pg_hba.conf +COPY --chown=postgres:postgres ansible/files/postgresql_config/pg_ident.conf.j2 /etc/postgresql/pg_ident.conf +COPY --chown=postgres:postgres ansible/files/postgresql_config/postgresql-stdout-log.conf /etc/postgresql/logging.conf +COPY --chown=postgres:postgres ansible/files/postgresql_config/supautils.conf.j2 /etc/postgresql-custom/supautils.conf +COPY --chown=postgres:postgres ansible/files/postgresql_extension_custom_scripts /etc/postgresql-custom/extension-custom-scripts +COPY --chown=postgres:postgres ansible/files/pgsodium_getkey_urandom.sh.j2 /usr/lib/postgresql/${postgresql_major}/bin/pgsodium_getkey.sh +COPY --chown=postgres:postgres ansible/files/postgresql_config/custom_read_replica.conf.j2 /etc/postgresql-custom/read-replica.conf +COPY --chown=postgres:postgres ansible/files/postgresql_config/custom_walg.conf.j2 /etc/postgresql-custom/wal-g.conf +COPY --chown=postgres:postgres ansible/files/walg_helper_scripts/wal_fetch.sh /home/postgres/wal_fetch.sh +COPY ansible/files/walg_helper_scripts/wal_change_ownership.sh /root/wal_change_ownership.sh + +RUN sed -i \ + -e "s|#unix_socket_directories = '/tmp'|unix_socket_directories = '/var/run/postgresql'|g" \ + -e "s|#session_preload_libraries = ''|session_preload_libraries = 'supautils'|g" \ + -e "s|#include = '/etc/postgresql-custom/supautils.conf'|include = '/etc/postgresql-custom/supautils.conf'|g" \ + -e "s|#include = '/etc/postgresql-custom/wal-g.conf'|include = '/etc/postgresql-custom/wal-g.conf'|g" /etc/postgresql/postgresql.conf && \ + echo "pljava.libjvm_location = '/usr/lib/jvm/java-11-openjdk-${TARGETARCH}/lib/server/libjvm.so'" >> /etc/postgresql/postgresql.conf && \ + echo "pgsodium.getkey_script= '/usr/lib/postgresql/${postgresql_major}/bin/pgsodium_getkey.sh'" >> /etc/postgresql/postgresql.conf && \ + useradd --create-home --shell /bin/bash wal-g -G postgres && \ + mkdir -p /etc/postgresql-custom && \ + chown postgres:postgres /etc/postgresql-custom + +# Include schema migrations +COPY migrations/db /docker-entrypoint-initdb.d/ +COPY ansible/files/pgbouncer_config/pgbouncer_auth_schema.sql /docker-entrypoint-initdb.d/init-scripts/00-schema.sql +COPY ansible/files/stat_extension.sql /docker-entrypoint-initdb.d/migrations/00-extension.sql + +# Add upstream entrypoint script +COPY --from=gosu /usr/local/bin/gosu /usr/local/bin/gosu +ADD --chmod=0755 \ + https://github.com/docker-library/postgres/raw/master/15/bullseye/docker-entrypoint.sh \ + /usr/local/bin/ +ENTRYPOINT ["docker-entrypoint.sh"] + +HEALTHCHECK --interval=2s --timeout=2s --retries=10 CMD pg_isready -U postgres -h localhost +STOPSIGNAL SIGINT +EXPOSE 5432 + +ENV POSTGRES_HOST=/var/run/postgresql +CMD ["postgres", "-D", "/etc/postgresql"] + +#################### +# Update build cache +#################### +FROM ccache as stats +COPY --from=extensions /tmp/*.deb /dev/null +# Additional packages that are separately built from source +# COPY --from=plv8-deb /tmp/*.deb /dev/null +# Cache mount is only populated by docker build --no-cache +RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ + ccache -s && \ + cp -r /ccache/* /tmp +FROM scratch as buildcache +COPY --from=stats /tmp / diff --git a/Dockerfile-156 b/Dockerfile-156 new file mode 100644 index 0000000..473e157 --- /dev/null +++ b/Dockerfile-156 @@ -0,0 +1,223 @@ +# syntax=docker/dockerfile:1.6 +ARG postgresql_major=15 +ARG postgresql_release=${postgresql_major}.1 + +# Bump default build arg to build a package from source +# Bump vars.yml to specify runtime package version +ARG sfcgal_release=1.3.10 +ARG postgis_release=3.3.2 +ARG pgrouting_release=3.4.1 +ARG pgtap_release=1.2.0 +ARG pg_cron_release=1.6.2 +ARG pgaudit_release=1.7.0 +ARG pgjwt_release=9742dab1b2f297ad3811120db7b21451bca2d3c9 +ARG pgsql_http_release=1.5.0 +ARG plpgsql_check_release=2.2.5 +ARG pg_safeupdate_release=1.4 +ARG timescaledb_release=2.9.1 +ARG wal2json_release=2_5 +ARG pljava_release=1.6.4 +ARG plv8_release=3.1.5 +ARG pg_plan_filter_release=5081a7b5cb890876e67d8e7486b6a64c38c9a492 +ARG pg_net_release=0.7.1 +ARG rum_release=1.3.13 +ARG pg_hashids_release=cd0e1b31d52b394a0df64079406a14a4f7387cd6 +ARG libsodium_release=1.0.18 +ARG pgsodium_release=3.1.6 +ARG pg_graphql_release=1.5.1 +ARG pg_stat_monitor_release=1.1.1 +ARG pg_jsonschema_release=0.1.4 +ARG pg_repack_release=1.4.8 +ARG vault_release=0.2.8 +ARG groonga_release=12.0.8 +ARG pgroonga_release=2.4.0 +ARG wrappers_release=0.3.0 +ARG hypopg_release=1.3.1 +ARG pgvector_release=0.4.0 +ARG pg_tle_release=1.3.2 +ARG index_advisor_release=0.2.0 +ARG supautils_release=2.2.0 +ARG wal_g_release=2.0.1 + +FROM ubuntu:focal as base + +RUN apt update -y && apt install -y \ + curl \ + gnupg \ + lsb-release \ + software-properties-common \ + wget \ + sudo \ + && apt clean + + +RUN adduser --system --home /var/lib/postgresql --no-create-home --shell /bin/bash --group --gecos "PostgreSQL administrator" postgres +RUN adduser --system --no-create-home --shell /bin/bash --group wal-g +RUN curl --proto '=https' --tlsv1.2 -sSf -L https://install.determinate.systems/nix | sh -s -- install linux \ +--init none \ +--no-confirm \ +--extra-conf "substituters = https://cache.nixos.org https://nix-postgres-artifacts.s3.amazonaws.com" \ +--extra-conf "trusted-public-keys = nix-postgres-artifacts:dGZlQOvKcNEjvT7QEAJbcV6b6uk7VF/hWMjhYleiaLI=% cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=" + +ENV PATH="${PATH}:/nix/var/nix/profiles/default/bin" + +COPY . /nixpg + +WORKDIR /nixpg + +RUN nix profile install .#psql_15/bin + + + +WORKDIR / + + +RUN mkdir -p /usr/lib/postgresql/bin \ + /usr/lib/postgresql/share/postgresql \ + /usr/share/postgresql \ + # /usr/lib/postgresql/share/postgresql/contrib \ + #/usr/lib/postgresql/share/postgresql/timezonesets \ + #/usr/lib/postgresql/share/postgresql/tsearch_data \ + # /usr/lib/postgresql/share/postgresql/extension \ + && chown -R postgres:postgres /usr/lib/postgresql \ + && chown -R postgres:postgres /usr/share/postgresql + +# Create symbolic links +RUN ln -s /nix/var/nix/profiles/default/bin/* /usr/lib/postgresql/bin/ \ + && ln -s /nix/var/nix/profiles/default/bin/* /usr/bin/ \ + && chown -R postgres:postgres /usr/bin + +# Create symbolic links for PostgreSQL shares +RUN ln -s /nix/var/nix/profiles/default/share/postgresql/* /usr/lib/postgresql/share/postgresql/ +RUN ln -s /nix/var/nix/profiles/default/share/postgresql/* /usr/share/postgresql/ +RUN chown -R postgres:postgres /usr/lib/postgresql/share/postgresql/ +RUN chown -R postgres:postgres /usr/share/postgresql/ +# Create symbolic links for contrib directory +RUN mkdir -p /usr/lib/postgresql/share/postgresql/contrib \ + && find /nix/var/nix/profiles/default/share/postgresql/contrib/ -mindepth 1 -type d -exec sh -c 'for dir do ln -s "$dir" "/usr/lib/postgresql/share/postgresql/contrib/$(basename "$dir")"; done' sh {} + \ + && chown -R postgres:postgres /usr/lib/postgresql/share/postgresql/contrib/ + +RUN chown -R postgres:postgres /usr/lib/postgresql + +RUN ln -sf /usr/lib/postgresql/share/postgresql/timezonesets /usr/share/postgresql/timezonesets + + +RUN apt-get update && \ + apt-get install -y --no-install-recommends tzdata + +RUN ln -fs /usr/share/zoneinfo/Etc/UTC /etc/localtime && \ + dpkg-reconfigure --frontend noninteractive tzdata + +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + build-essential \ + checkinstall \ + cmake + +ENV PGDATA=/var/lib/postgresql/data + +#################### +# setup-wal-g.yml +#################### +FROM base as walg +ARG wal_g_release +# ADD "https://github.com/wal-g/wal-g/releases/download/v${wal_g_release}/wal-g-pg-ubuntu-20.04-${TARGETARCH}.tar.gz" /tmp/wal-g.tar.gz +RUN arch=$([ "$TARGETARCH" = "arm64" ] && echo "aarch64" || echo "$TARGETARCH") && \ + apt-get update && apt-get install -y --no-install-recommends curl && \ + curl -kL "https://github.com/wal-g/wal-g/releases/download/v${wal_g_release}/wal-g-pg-ubuntu-20.04-aarch64.tar.gz" -o /tmp/wal-g.tar.gz && \ + tar -xvf /tmp/wal-g.tar.gz -C /tmp && \ + rm -rf /tmp/wal-g.tar.gz && \ + mv /tmp/wal-g-pg-ubuntu*20.04-aarch64 /tmp/wal-g + +# #################### +# # Download gosu for easy step-down from root +# #################### +FROM base as gosu +ARG TARGETARCH +# Install dependencies +RUN apt-get update && apt-get install -y --no-install-recommends \ + gnupg \ + ca-certificates \ + && rm -rf /var/lib/apt/lists/* +# Download binary +ARG GOSU_VERSION=1.16 +ARG GOSU_GPG_KEY=B42F6819007F00F88E364FD4036A9C25BF357DD4 +ADD https://github.com/tianon/gosu/releases/download/$GOSU_VERSION/gosu-$TARGETARCH \ + /usr/local/bin/gosu +ADD https://github.com/tianon/gosu/releases/download/$GOSU_VERSION/gosu-$TARGETARCH.asc \ + /usr/local/bin/gosu.asc +# Verify checksum +RUN gpg --batch --keyserver hkps://keys.openpgp.org --recv-keys $GOSU_GPG_KEY && \ + gpg --batch --verify /usr/local/bin/gosu.asc /usr/local/bin/gosu && \ + gpgconf --kill all && \ + chmod +x /usr/local/bin/gosu + +# #################### +# # Build final image +# #################### +FROM gosu as production +RUN id postgres || (echo "postgres user does not exist" && exit 1) +# # Setup extensions +COPY --from=walg /tmp/wal-g /usr/local/bin/ + +# # Initialise configs +COPY --chown=postgres:postgres ansible/files/postgresql_config/postgresql.conf.j2 /etc/postgresql/postgresql.conf +COPY --chown=postgres:postgres ansible/files/postgresql_config/pg_hba.conf.j2 /etc/postgresql/pg_hba.conf +COPY --chown=postgres:postgres ansible/files/postgresql_config/pg_ident.conf.j2 /etc/postgresql/pg_ident.conf +COPY --chown=postgres:postgres ansible/files/postgresql_config/postgresql-stdout-log.conf /etc/postgresql/logging.conf +COPY --chown=postgres:postgres ansible/files/postgresql_config/supautils.conf.j2 /etc/postgresql-custom/supautils.conf +COPY --chown=postgres:postgres ansible/files/postgresql_extension_custom_scripts /etc/postgresql-custom/extension-custom-scripts +COPY --chown=postgres:postgres ansible/files/pgsodium_getkey_urandom.sh.j2 /usr/lib/postgresql/bin/pgsodium_getkey.sh +COPY --chown=postgres:postgres ansible/files/postgresql_config/custom_read_replica.conf.j2 /etc/postgresql-custom/read-replica.conf +COPY --chown=postgres:postgres ansible/files/postgresql_config/custom_walg.conf.j2 /etc/postgresql-custom/wal-g.conf +COPY --chown=postgres:postgres ansible/files/walg_helper_scripts/wal_fetch.sh /home/postgres/wal_fetch.sh +COPY ansible/files/walg_helper_scripts/wal_change_ownership.sh /root/wal_change_ownership.sh + +RUN sed -i \ + -e "s|#unix_socket_directories = '/tmp'|unix_socket_directories = '/var/run/postgresql'|g" \ + -e "s|#session_preload_libraries = ''|session_preload_libraries = 'supautils'|g" \ + -e "s|#include = '/etc/postgresql-custom/supautils.conf'|include = '/etc/postgresql-custom/supautils.conf'|g" \ + -e "s|#include = '/etc/postgresql-custom/wal-g.conf'|include = '/etc/postgresql-custom/wal-g.conf'|g" /etc/postgresql/postgresql.conf && \ + echo "cron.database_name = 'postgres'" >> /etc/postgresql/postgresql.conf && \ + #echo "pljava.libjvm_location = '/usr/lib/jvm/java-11-openjdk-${TARGETARCH}/lib/server/libjvm.so'" >> /etc/postgresql/postgresql.conf && \ + echo "pgsodium.getkey_script= '/usr/lib/postgresql/bin/pgsodium_getkey.sh'" >> /etc/postgresql/postgresql.conf && \ + echo 'auto_explain.log_min_duration = 10s' >> /etc/postgresql/postgresql.conf && \ + usermod -aG postgres wal-g && \ + mkdir -p /etc/postgresql-custom && \ + chown postgres:postgres /etc/postgresql-custom + +# # Include schema migrations +COPY migrations/db /docker-entrypoint-initdb.d/ +COPY ansible/files/pgbouncer_config/pgbouncer_auth_schema.sql /docker-entrypoint-initdb.d/init-scripts/00-schema.sql +COPY ansible/files/stat_extension.sql /docker-entrypoint-initdb.d/migrations/00-extension.sql + +# # Add upstream entrypoint script +COPY --from=gosu /usr/local/bin/gosu /usr/local/bin/gosu +ADD --chmod=0755 \ + https://github.com/docker-library/postgres/raw/master/15/bullseye/docker-entrypoint.sh \ + /usr/local/bin/ + +RUN mkdir -p /var/run/postgresql && chown postgres:postgres /var/run/postgresql + +ENTRYPOINT ["docker-entrypoint.sh"] + +HEALTHCHECK --interval=2s --timeout=2s --retries=10 CMD pg_isready -U postgres -h localhost +STOPSIGNAL SIGINT +EXPOSE 5432 + +ENV POSTGRES_HOST=/var/run/postgresql +ENV POSTGRES_USER=tealbase_admin +ENV POSTGRES_DB=postgres +RUN apt-get update && apt-get install -y --no-install-recommends \ + locales \ + && rm -rf /var/lib/apt/lists/* && \ + localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8 \ + && localedef -i C -c -f UTF-8 -A /usr/share/locale/locale.alias C.UTF-8 +RUN echo "C.UTF-8 UTF-8" > /etc/locale.gen && echo "en_US.UTF-8 UTF-8" >> /etc/locale.gen && locale-gen +ENV LANG en_US.UTF-8 +ENV LANGUAGE en_US:en +ENV LC_ALL en_US.UTF-8 +ENV LC_CTYPE=C.UTF-8 +ENV LC_COLLATE=C.UTF-8 +ENV LOCALE_ARCHIVE /usr/lib/locale/locale-archive +CMD ["postgres", "-D", "/etc/postgresql"] diff --git a/README.md b/README.md index cf657d5..95fa829 100644 --- a/README.md +++ b/README.md @@ -15,7 +15,7 @@ Unmodified Postgres with some useful plugins. Our goal with this repo is not to | [PostGIS](https://postgis.net/) | [3.3.2](https://git.osgeo.org/gitea/postgis/postgis/raw/tag/3.3.2/NEWS) | Postgres' most popular extension - support for geographic objects. | | [pgRouting](https://pgrouting.org/) | [v3.4.1](https://github.com/pgRouting/pgrouting/releases/tag/v3.4.1) | Extension of PostGIS - provides geospatial routing functionalities. | | [pgTAP](https://pgtap.org/) | [v1.2.0](https://github.com/theory/pgtap/releases/tag/v1.2.0) | Unit Testing for Postgres. | -| [pg_cron](https://github.com/citusdata/pg_cron) | [v1.4.2](https://github.com/citusdata/pg_cron/releases/tag/v1.4.2) | Run CRON jobs inside Postgres. | +| [pg_cron](https://github.com/citusdata/pg_cron) | [v1.6.2](https://github.com/citusdata/pg_cron/releases/tag/v1.6.2) | Run CRON jobs inside Postgres. | | [pgAudit](https://www.pgaudit.org/) | [1.7.0](https://github.com/pgaudit/pgaudit/releases/tag/1.7.0) | Generate highly compliant audit logs. | | [pgjwt](https://github.com/michelp/pgjwt) | [commit](https://github.com/michelp/pgjwt/commit/9742dab1b2f297ad3811120db7b21451bca2d3c9) | Generate JSON Web Tokens (JWT) in Postgres. | | [pgsql-http](https://github.com/pramsey/pgsql-http) | [1.5.0](https://github.com/pramsey/pgsql-http/releases/tag/v1.5.0) | HTTP client for Postgres. | @@ -26,12 +26,12 @@ Unmodified Postgres with some useful plugins. Our goal with this repo is not to | [plv8](https://github.com/plv8/plv8) | [commit](https://github.com/plv8/plv8/commit/bcddd92f71530e117f2f98b92d206dafe824f73a) | Write in Javascript functions in Postgres. | | [pg_plan_filter](https://github.com/pgexperts/pg_plan_filter) | [commit](https://github.com/pgexperts/pg_plan_filter/commit/5081a7b5cb890876e67d8e7486b6a64c38c9a492) | Only allow statements that fulfill set criteria to be executed. | | [pg_net](https://github.com/tealbase/pg_net) | [v0.6.1](https://github.com/tealbase/pg_net/releases/tag/v0.6.1) | Expose the SQL interface for async networking. | +| [pg_repack](https://github.com/reorg/pg_repack) | [ver_1.5.0](https://github.com/reorg/pg_repack/releases/tag/ver_1.5.0) | Tool to remove bloat from tables and indexes | [rum](https://github.com/postgrespro/rum) | [1.3.13](https://github.com/postgrespro/rum/releases/tag/1.3.13) | An alternative to the GIN index. | | [pg_hashids](https://github.com/iCyberon/pg_hashids) | [commit](https://github.com/iCyberon/pg_hashids/commit/83398bcbb616aac2970f5e77d93a3200f0f28e74) | Generate unique identifiers from numbers. | | [pgsodium](https://github.com/michelp/pgsodium) | [3.1.0](https://github.com/michelp/pgsodium/releases/tag/2.0.0) | Modern encryption API using libsodium. | | [pg_stat_monitor](https://github.com/percona/pg_stat_monitor) | [1.0.1](https://github.com/percona/pg_stat_monitor/releases/tag/1.0.1) | Query Performance Monitoring Tool for PostgreSQL | [pgvector](https://github.com/pgvector/pgvector) | [v0.4.0](https://github.com/pgvector/pgvector/releases/tag/v0.4.0) | Open-source vector similarity search for Postgres -| [pg_repack](https://github.com/reorg/pg_repack) | [ver_1.4.8](https://github.com/reorg/pg_repack/releases/tag/ver_1.4.8) | Tool to remove bloat from tables and indexes Can't find your favorite extension? Suggest for it to be added into future releases [here](https://github.com/tealbase/tealbase/discussions/679)! @@ -58,9 +58,9 @@ Aside from having [ufw](https://help.ubuntu.com/community/UFW),[fail2ban](https: See all installation instructions in the [repo wiki](https://github.com/tealbase/postgres/wiki). -[![Docker](https://github.com/tealbase/postgres/blob/master/docs/img/docker.png)](https://github.com/tealbase/postgres/wiki/Docker) -[![Digital Ocean](https://github.com/tealbase/postgres/blob/master/docs/img/digital-ocean.png)](https://github.com/tealbase/postgres/wiki/Digital-Ocean) -[![AWS](https://github.com/tealbase/postgres/blob/master/docs/img/aws.png)](https://github.com/tealbase/postgres/wiki/AWS-EC2) +[![Docker](https://github.com/tealbase/postgres/blob/develop/docs/img/docker.png)](https://github.com/tealbase/postgres/wiki/Docker) +[![Digital Ocean](https://github.com/tealbase/postgres/blob/develop/docs/img/digital-ocean.png)](https://github.com/tealbase/postgres/wiki/Digital-Ocean) +[![AWS](https://github.com/tealbase/postgres/blob/develop/docs/img/aws.png)](https://github.com/tealbase/postgres/wiki/AWS-EC2) ### Marketplace Images | | Postgres & Extensions | PgBouncer | PostgREST | WAL-G | @@ -109,3 +109,12 @@ $ time packer build -timestamp-ui \ We are building the features of Firebase using enterprise-grade, open source products. We support existing communities wherever possible, and if the products don’t exist we build them and open source them ourselves. [![New Sponsor](https://user-images.githubusercontent.com/10214025/90518111-e74bbb00-e198-11ea-8f88-c9e3c1aa4b5b.png)](https://github.com/sponsors/tealbase) + + +## Experimental Nix Packaging of resources + +There is a `/nix` folder in this repo, plus a `flake.nix` and `flake.lock` that facilitate using the Nix package management system to package tealbase/postgres, and all of our extensions and wrappers. A user will need nix installed on their machine. As of 4/1/2024 the package set only builds on target machines (`x86_64-linux` and `aarch64-linux`), however work is under way to also support building and using directly on `aarch64-darwin` (macOs). As of 4/1/2024, versions of packages and extensions are synced from `/ansible/vars.yml` via a utility that can be run by executing `nix run .#sync-exts-versions` (you must have nix installed and be on the supported `x86_64-linux` and `aarch64-linux` for this command to work). The short term goal is to sync these versions as they are updated by our infrastructure and postgres teams, then to see the nix packaged versions build successfully in parallel over time, along with tests of the nix packaged versions passing. + +The tealbase/postgres repo will continue to source it's dependencies from ansible for the short term, while we stabilize this nix build. + +Forthcoming PR's will include: integrating the nix work into our ansible/packer builds, building natively on aarch64-darwin (macOs), more testing diff --git a/amazon-arm64-nix.pkr.hcl b/amazon-arm64-nix.pkr.hcl new file mode 100644 index 0000000..72ba54c --- /dev/null +++ b/amazon-arm64-nix.pkr.hcl @@ -0,0 +1,277 @@ +variable "ami" { + type = string + default = "ubuntu/images/hvm-ssd/ubuntu-focal-20.04-arm64-server-*" +} + +variable "profile" { + type = string + default = "${env("AWS_PROFILE")}" +} + +variable "ami_name" { + type = string + default = "tealbase-postgres" +} + +variable "ami_regions" { + type = list(string) + default = ["ap-southeast-2"] +} + +variable "ansible_arguments" { + type = string + default = "--skip-tags install-postgrest,install-pgbouncer,install-tealbase-internal" +} + +variable "aws_access_key" { + type = string + default = "" +} + +variable "aws_secret_key" { + type = string + default = "" +} + +variable "environment" { + type = string + default = "prod" +} + +variable "region" { + type = string +} + +variable "build-vol" { + type = string + default = "xvdc" +} + +# ccache docker image details +variable "docker_user" { + type = string + default = "" +} + +variable "docker_passwd" { + type = string + default = "" +} + +variable "docker_image" { + type = string + default = "" +} + +variable "docker_image_tag" { + type = string + default = "latest" +} + +locals { + creator = "packer" +} + +variable "postgres-version" { + type = string + default = "" +} + +variable "git-head-version" { + type = string + default = "unknown" +} + +variable "packer-execution-id" { + type = string + default = "unknown" +} + +variable "force-deregister" { + type = bool + default = false +} + +packer { + required_plugins { + amazon = { + source = "github.com/hashicorp/amazon" + version = "~> 1" + } + } +} + +# source block +source "amazon-ebssurrogate" "source" { + profile = "${var.profile}" + #access_key = "${var.aws_access_key}" + #ami_name = "${var.ami_name}-arm64-${formatdate("YYYY-MM-DD-hhmm", timestamp())}" + ami_name = "${var.ami_name}-${var.postgres-version}-stage-1" + ami_virtualization_type = "hvm" + ami_architecture = "arm64" + ami_regions = "${var.ami_regions}" + instance_type = "c6g.4xlarge" + region = "${var.region}" + #secret_key = "${var.aws_secret_key}" + force_deregister = var.force-deregister + + # Use latest official ubuntu focal ami owned by Canonical. + source_ami_filter { + filters = { + virtualization-type = "hvm" + name = "${var.ami}" + root-device-type = "ebs" + } + owners = [ "099720109477" ] + most_recent = true + } + ena_support = true + launch_block_device_mappings { + device_name = "/dev/xvdf" + delete_on_termination = true + volume_size = 10 + volume_type = "gp3" + } + + launch_block_device_mappings { + device_name = "/dev/xvdh" + delete_on_termination = true + volume_size = 8 + volume_type = "gp3" + } + + launch_block_device_mappings { + device_name = "/dev/${var.build-vol}" + delete_on_termination = true + volume_size = 16 + volume_type = "gp2" + omit_from_artifact = true + } + + run_tags = { + creator = "packer" + appType = "postgres" + packerExecutionId = "${var.packer-execution-id}" + } + run_volume_tags = { + creator = "packer" + appType = "postgres" + } + snapshot_tags = { + creator = "packer" + appType = "postgres" + } + tags = { + creator = "packer" + appType = "postgres" + postgresVersion = "${var.postgres-version}-stage1" + sourceSha = "${var.git-head-version}" + } + + communicator = "ssh" + ssh_pty = true + ssh_username = "ubuntu" + ssh_timeout = "5m" + + ami_root_device { + source_device_name = "/dev/xvdf" + device_name = "/dev/xvda" + delete_on_termination = true + volume_size = 10 + volume_type = "gp2" + } + + associate_public_ip_address = true +} + +# a build block invokes sources and runs provisioning steps on them. +build { + sources = ["source.amazon-ebssurrogate.source"] + + provisioner "file" { + source = "ebssurrogate/files/sources-arm64.cfg" + destination = "/tmp/sources.list" + } + + provisioner "file" { + source = "ebssurrogate/files/ebsnvme-id" + destination = "/tmp/ebsnvme-id" + } + + provisioner "file" { + source = "ebssurrogate/files/70-ec2-nvme-devices.rules" + destination = "/tmp/70-ec2-nvme-devices.rules" + } + + provisioner "file" { + source = "ebssurrogate/scripts/chroot-bootstrap-nix.sh" + destination = "/tmp/chroot-bootstrap-nix.sh" + } + + provisioner "file" { + source = "ebssurrogate/files/cloud.cfg" + destination = "/tmp/cloud.cfg" + } + + provisioner "file" { + source = "ebssurrogate/files/vector.timer" + destination = "/tmp/vector.timer" + } + + provisioner "file" { + source = "ebssurrogate/files/apparmor_profiles" + destination = "/tmp" + } + + provisioner "file" { + source = "migrations" + destination = "/tmp" + } + + provisioner "file" { + source = "ebssurrogate/files/unit-tests" + destination = "/tmp" + } + + # Copy ansible playbook + provisioner "shell" { + inline = ["mkdir /tmp/ansible-playbook"] + } + + provisioner "file" { + source = "ansible" + destination = "/tmp/ansible-playbook" + } + + provisioner "file" { + source = "scripts" + destination = "/tmp/ansible-playbook" + } + + provisioner "file" { + source = "ansible/vars.yml" + destination = "/tmp/ansible-playbook/vars.yml" + } + + provisioner "shell" { + environment_vars = [ + "ARGS=${var.ansible_arguments}", + "DOCKER_USER=${var.docker_user}", + "DOCKER_PASSWD=${var.docker_passwd}", + "DOCKER_IMAGE=${var.docker_image}", + "DOCKER_IMAGE_TAG=${var.docker_image_tag}", + "POSTGRES_tealbase_VERSION=${var.postgres-version}" + ] + use_env_var_file = true + script = "ebssurrogate/scripts/surrogate-bootstrap-nix.sh" + execute_command = "sudo -S sh -c '. {{.EnvVarFile}} && {{.Path}}'" + start_retry_timeout = "5m" + skip_clean = true + } + + provisioner "file" { + source = "/tmp/ansible.log" + destination = "/tmp/ansible.log" + direction = "download" + } +} diff --git a/amazon-arm64.pkr.hcl b/amazon-arm64.pkr.hcl index e306d97..eb1be6e 100644 --- a/amazon-arm64.pkr.hcl +++ b/amazon-arm64.pkr.hcl @@ -20,7 +20,7 @@ variable "ami_regions" { variable "ansible_arguments" { type = string - default = "--skip-tags,install-postgrest,--skip-tags,install-pgbouncer,--skip-tags,install-tealbase-internal,ebssurrogate_mode='true'" + default = "--skip-tags install-postgrest,install-pgbouncer,install-tealbase-internal" } variable "aws_access_key" { @@ -87,6 +87,20 @@ variable "packer-execution-id" { default = "unknown" } +variable "force-deregister" { + type = bool + default = false +} + +packer { + required_plugins { + amazon = { + source = "github.com/hashicorp/amazon" + version = "~> 1" + } + } +} + # source block source "amazon-ebssurrogate" "source" { profile = "${var.profile}" @@ -99,6 +113,7 @@ source "amazon-ebssurrogate" "source" { instance_type = "c6g.4xlarge" region = "${var.region}" #secret_key = "${var.aws_secret_key}" + force_deregister = var.force-deregister # Use latest official ubuntu focal ami owned by Canonical. source_ami_filter { @@ -165,6 +180,8 @@ source "amazon-ebssurrogate" "source" { volume_size = 10 volume_type = "gp2" } + + associate_public_ip_address = true } # a build block invokes sources and runs provisioning steps on them. @@ -237,10 +254,12 @@ build { "DOCKER_USER=${var.docker_user}", "DOCKER_PASSWD=${var.docker_passwd}", "DOCKER_IMAGE=${var.docker_image}", - "DOCKER_IMAGE_TAG=${var.docker_image_tag}" + "DOCKER_IMAGE_TAG=${var.docker_image_tag}", + "POSTGRES_tealbase_VERSION=${var.postgres-version}" ] + use_env_var_file = true script = "ebssurrogate/scripts/surrogate-bootstrap.sh" - execute_command = "sudo -S sh -c '{{ .Vars }} {{ .Path }}'" + execute_command = "sudo -S sh -c '. {{.EnvVarFile}} && {{.Path}}'" start_retry_timeout = "5m" skip_clean = true } @@ -250,4 +269,10 @@ build { destination = "/tmp/ansible.log" direction = "download" } + + provisioner "file" { + source = "/tmp/pg_binaries.tar.gz" + destination = "/tmp/pg_binaries.tar.gz" + direction = "download" + } } diff --git a/ansible/files/admin_api_scripts/grow_fs.sh b/ansible/files/admin_api_scripts/grow_fs.sh index 6d2a4e5..1bca017 100644 --- a/ansible/files/admin_api_scripts/grow_fs.sh +++ b/ansible/files/admin_api_scripts/grow_fs.sh @@ -4,14 +4,23 @@ set -euo pipefail VOLUME_TYPE=${1:-data} +if pgrep resizefs; then + echo "resize2fs is already running" + exit 1 +fi + if [ -b /dev/nvme1n1 ] ; then if [[ "${VOLUME_TYPE}" == "data" ]]; then resize2fs /dev/nvme1n1 elif [[ "${VOLUME_TYPE}" == "root" ]] ; then + PLACEHOLDER_FL=/home/ubuntu/50M_PLACEHOLDER + rm -f "${PLACEHOLDER_FL}" || true growpart /dev/nvme0n1 2 resize2fs /dev/nvme0n1p2 - + if [[ ! -f "${PLACEHOLDER_FL}" ]] ; then + fallocate -l50M "${PLACEHOLDER_FL}" + fi else echo "Invalid disk specified: ${VOLUME_TYPE}" exit 1 diff --git a/ansible/files/admin_api_scripts/manage_readonly_mode.sh b/ansible/files/admin_api_scripts/manage_readonly_mode.sh index 1e258bd..aba5456 100644 --- a/ansible/files/admin_api_scripts/manage_readonly_mode.sh +++ b/ansible/files/admin_api_scripts/manage_readonly_mode.sh @@ -27,7 +27,7 @@ FROM role_comment; EOF ) RESULT=$(psql -h localhost -U tealbase_admin -d postgres -At -c "$COMMAND") - echo -n $RESULT + echo -n "$RESULT" } case $SUBCOMMAND in @@ -36,7 +36,7 @@ case $SUBCOMMAND in ;; "set") shift - set_mode $@ + set_mode "$@" ;; *) echo "Error: '$SUBCOMMAND' is not a known subcommand." diff --git a/ansible/files/admin_api_scripts/pg_egress_collect.pl b/ansible/files/admin_api_scripts/pg_egress_collect.pl index 2acc98a..46934e5 100644 --- a/ansible/files/admin_api_scripts/pg_egress_collect.pl +++ b/ansible/files/admin_api_scripts/pg_egress_collect.pl @@ -26,18 +26,24 @@ # extract tcp packet length captured by tcpdump # -# Sample input lines: +# Sample IPv4 input lines: # # 1674013833.940253 IP (tos 0x0, ttl 64, id 0, offset 0, flags [DF], proto TCP (6), length 60) # 10.112.101.122.5432 > 220.235.16.223.62599: Flags [S.], cksum 0x5de3 (incorrect -> 0x63da), seq 2314200657, ack 2071735457, win 62643, options [mss 8961,sackOK,TS val 3358598837 ecr 1277499190,nop,wscale 7], length 0 # 1674013833.989257 IP (tos 0x0, ttl 64, id 24975, offset 0, flags [DF], proto TCP (6), length 52) # 10.112.101.122.5432 > 220.235.16.223.62599: Flags [.], cksum 0x5ddb (incorrect -> 0xa25b), seq 1, ack 9, win 490, options [nop,nop,TS val 3358598885 ecr 1277499232], length 0 +# +# Sample IPv6 input lines: +# +# 1706483718.836526 IP6 (flowlabel 0x0bf27, hlim 64, next-header TCP (6) payload length: 125) 2406:da18:4fd:9b00:959:c52:ce68:10c8.5432 > 2406:da12:d78:f501:1273:296c:2482:c7a7.50530: Flags [P.], seq 25:118, ack 125, win 488, options [nop,nop,TS val 1026340732 ecr 1935666426], length 93 +# 1706483718.912083 IP6 (flowlabel 0x0bf27, hlim 64, next-header TCP (6) payload length: 501) 2406:da18:4fd:9b00:959:c52:ce68:10c8.5432 > 2406:da12:d78:f501:1273:296c:2482:c7a7.50530: Flags [P.], seq 118:587, ack 234, win 488, options [nop,nop,TS val 1026340807 ecr 1935666497], length 469 +# 1706483718.984001 IP6 (flowlabel 0x0bf27, hlim 64, next-header TCP (6) payload length: 151) 2406:da18:4fd:9b00:959:c52:ce68:10c8.5432 > 2406:da12:d78:f501:1273:296c:2482:c7a7.50530: Flags [P.], seq 587:706, ack 448, win 487, options [nop,nop,TS val 1026340879 ecr 1935666569], length 119 sub extract_packet_length { my ($line) = @_; #print("debug: >> " . $line); - if ($line =~ /^\s+\d+\.\d+\.\d+\.\d+\..*, length (\d+)$/) { + if ($line =~ /^.*, length (\d+)$/) { # extract tcp packet length and add it up my $len = $1; $captured_len += $len; diff --git a/ansible/files/admin_api_scripts/pg_upgrade_complete.sh b/ansible/files/admin_api_scripts/pg_upgrade_complete.sh deleted file mode 100644 index 1108c3e..0000000 --- a/ansible/files/admin_api_scripts/pg_upgrade_complete.sh +++ /dev/null @@ -1,75 +0,0 @@ -#! /usr/bin/env bash - -## This script is run on the newly launched instance which is to be promoted to -## become the primary database instance once the upgrade successfully completes. -## The following commands copy custom PG configs and enable previously disabled -## extensions, containing regtypes referencing system OIDs. - -# Extensions to be reenabled after pg_upgrade. -# Running an upgrade with these extensions enabled will result in errors due to -# them depending on regtypes referencing system OIDs. Thus they have been disabled -# beforehand. -EXTENSIONS_TO_REENABLE=( - "pg_graphql" -) - -set -eEuo pipefail - -run_sql() { - psql -h localhost -U tealbase_admin -d postgres "$@" -} - -cleanup() { - UPGRADE_STATUS=${1:-"failed"} - EXIT_CODE=${?:-0} - - echo "${UPGRADE_STATUS}" > /tmp/pg-upgrade-status - - exit $EXIT_CODE -} - -function complete_pg_upgrade { - if [ -f /tmp/pg-upgrade-status ]; then - echo "Upgrade job already started. Bailing." - exit 0 - fi - - echo "running" > /tmp/pg-upgrade-status - - mount -a -v - - # copying custom configurations - cp -R /data/conf/* /etc/postgresql-custom/ - chown -R postgres:postgres /var/lib/postgresql/data - chown -R postgres:postgres /data/pgdata - - service postgresql start - - for EXTENSION in "${EXTENSIONS_TO_REENABLE[@]}"; do - run_sql -c "CREATE EXTENSION IF NOT EXISTS ${EXTENSION} CASCADE;" - done - - if [ -d /data/sql ]; then - for FILE in /data/sql/*.sql; do - if [ -f "$FILE" ]; then - run_sql -f $FILE - fi - done - fi - - sleep 5 - service postgresql restart - - start_vacuum_analyze - - echo "Upgrade job completed" -} - -function start_vacuum_analyze { - su -c 'vacuumdb --all --analyze-in-stages' -s $SHELL postgres - cleanup "complete" -} - -trap cleanup ERR - -complete_pg_upgrade >>/var/log/pg-upgrade-complete.log 2>&1 & diff --git a/ansible/files/admin_api_scripts/pg_upgrade_initiate.sh b/ansible/files/admin_api_scripts/pg_upgrade_initiate.sh deleted file mode 100644 index 9750ce3..0000000 --- a/ansible/files/admin_api_scripts/pg_upgrade_initiate.sh +++ /dev/null @@ -1,152 +0,0 @@ -#! /usr/bin/env bash - -## This script is run on the old (source) instance, mounting the data disk -## of the newly launched instance, disabling extensions containing regtypes, -## and running pg_upgrade. -## It reports the current status of the upgrade process to /tmp/pg-upgrade-status, -## which can then be subsequently checked through pg_upgrade_check.sh. - -# Extensions to disable before running pg_upgrade. -# Running an upgrade with these extensions enabled will result in errors due to -# them depending on regtypes referencing system OIDs. -EXTENSIONS_TO_DISABLE=( - "pg_graphql" -) - -set -eEuo pipefail - -PGVERSION=$1 - -MOUNT_POINT="/data_migration" - -run_sql() { - STATEMENT=$1 - psql -h localhost -U tealbase_admin -d postgres -c "$STATEMENT" -} - -cleanup() { - UPGRADE_STATUS=${1:-"failed"} - EXIT_CODE=${?:-0} - - if [ -d "${MOUNT_POINT}/pgdata/pg_upgrade_output.d/" ]; then - cp -R "${MOUNT_POINT}/pgdata/pg_upgrade_output.d/" /var/log/ - fi - - if [ -L /var/lib/postgresql ]; then - rm /var/lib/postgresql - mv /var/lib/postgresql.bak /var/lib/postgresql - fi - - systemctl restart postgresql - sleep 10 - systemctl restart postgresql - - for EXTENSION in "${EXTENSIONS_TO_DISABLE[@]}"; do - run_sql "CREATE EXTENSION IF NOT EXISTS ${EXTENSION} CASCADE;" - done - - run_sql "ALTER USER postgres WITH NOSUPERUSER;" - - umount $MOUNT_POINT - echo "${UPGRADE_STATUS}" > /tmp/pg-upgrade-status - - exit $EXIT_CODE -} - -function initiate_upgrade { - echo "running" > /tmp/pg-upgrade-status - - # awk NF==3 prints lines with exactly 3 fields, which are the block devices currently not mounted anywhere - # excluding nvme0 since it is the root disk - BLOCK_DEVICE=$(lsblk -dprno name,size,mountpoint,type | grep "disk" | grep -v "nvme0" | awk 'NF==3 { print $1; }') - - if [ -x "$(command -v blockdev)" ]; then - blockdev --rereadpt "$BLOCK_DEVICE" - fi - - mkdir -p "$MOUNT_POINT" - mount "$BLOCK_DEVICE" "$MOUNT_POINT" - resize2fs "$BLOCK_DEVICE" - - SHARED_PRELOAD_LIBRARIES=$(cat /etc/postgresql/postgresql.conf | grep shared_preload_libraries | sed "s/shared_preload_libraries = '\(.*\)'.*/\1/") - PGDATAOLD=$(cat /etc/postgresql/postgresql.conf | grep data_directory | sed "s/data_directory = '\(.*\)'.*/\1/") - - PGDATANEW="$MOUNT_POINT/pgdata" - PGBINNEW="/tmp/pg_upgrade_bin/$PGVERSION/bin" - PGSHARENEW="/tmp/pg_upgrade_bin/$PGVERSION/share" - - mkdir -p "/tmp/pg_upgrade_bin" - tar zxvf "/tmp/persistent/pg_upgrade_bin.tar.gz" -C "/tmp/pg_upgrade_bin" - - # copy upgrade-specific pgsodium_getkey script into the share dir - cp /root/pg_upgrade_pgsodium_getkey.sh "$PGSHARENEW/extension/pgsodium_getkey" - chmod +x "$PGSHARENEW/extension/pgsodium_getkey" - - if [ -f "$MOUNT_POINT/pgsodium_root.key" ]; then - cp "$MOUNT_POINT/pgsodium_root.key" /etc/postgresql-custom/pgsodium_root.key - chown postgres:postgres /etc/postgresql-custom/pgsodium_root.key - chmod 600 /etc/postgresql-custom/pgsodium_root.key - fi - - chown -R postgres:postgres "/tmp/pg_upgrade_bin/$PGVERSION" - - for EXTENSION in "${EXTENSIONS_TO_DISABLE[@]}"; do - run_sql "DROP EXTENSION IF EXISTS ${EXTENSION} CASCADE;" - done - - run_sql "ALTER USER postgres WITH SUPERUSER;" - - - chown -R postgres:postgres "$MOUNT_POINT/" - rm -rf "$PGDATANEW/" - su -c "$PGBINNEW/initdb -L $PGSHARENEW -D $PGDATANEW/" -s $SHELL postgres - - # running upgrade using at least 1 cpu core - WORKERS=$(nproc | awk '{ print ($1 == 1 ? 1 : $1 - 1) }') - - # upgrade job outputs a log in the cwd; needs write permissions - mkdir -p /tmp/pg_upgrade - chown -R postgres:postgres /tmp/pg_upgrade - cd /tmp/pg_upgrade - - UPGRADE_COMMAND=$(cat <> /var/log/pg-upgrade-initiate.log 2>&1 & -echo "Upgrade initiate job completed" diff --git a/ansible/files/admin_api_scripts/pg_upgrade_check.sh b/ansible/files/admin_api_scripts/pg_upgrade_scripts/check.sh old mode 100644 new mode 100755 similarity index 79% rename from ansible/files/admin_api_scripts/pg_upgrade_check.sh rename to ansible/files/admin_api_scripts/pg_upgrade_scripts/check.sh index 1e5dd14..f85e957 --- a/ansible/files/admin_api_scripts/pg_upgrade_check.sh +++ b/ansible/files/admin_api_scripts/pg_upgrade_scripts/check.sh @@ -1,6 +1,6 @@ #! /usr/bin/env bash ## This script provides a method to check the status of the database upgrade -## process, which is updated in /tmp/pg-upgrade-status by pg_upgrade_initiate.sh +## process, which is updated in /tmp/pg-upgrade-status by initiate.sh ## This runs on the old (source) instance. set -euo pipefail diff --git a/ansible/files/admin_api_scripts/pg_upgrade_scripts/common.sh b/ansible/files/admin_api_scripts/pg_upgrade_scripts/common.sh new file mode 100755 index 0000000..f14d1a8 --- /dev/null +++ b/ansible/files/admin_api_scripts/pg_upgrade_scripts/common.sh @@ -0,0 +1,551 @@ +#! /usr/bin/env bash + +# Common functions and variables used by initiate.sh and complete.sh + +REPORTING_PROJECT_REF="ihmaxnjpcccasmrbkpvo" +REPORTING_CREDENTIALS_FILE="/root/upgrade-reporting-credentials" + +REPORTING_ANON_KEY="" +if [ -f "$REPORTING_CREDENTIALS_FILE" ]; then + REPORTING_ANON_KEY=$(cat "$REPORTING_CREDENTIALS_FILE") +fi + +# shellcheck disable=SC2120 +# Arguments are passed in other files +function run_sql { + psql -h localhost -U tealbase_admin -d postgres "$@" +} + +function ship_logs { + LOG_FILE=$1 + + if [ -z "$REPORTING_ANON_KEY" ]; then + echo "No reporting key found. Skipping log upload." + return 0 + fi + + if [ ! -f "$LOG_FILE" ]; then + echo "No log file found. Skipping log upload." + return 0 + fi + + if [ ! -s "$LOG_FILE" ]; then + echo "Log file is empty. Skipping log upload." + return 0 + fi + + HOSTNAME=$(hostname) + DERIVED_REF="${HOSTNAME##*-}" + + printf -v BODY '{ "ref": "%s", "step": "%s", "content": %s }' "$DERIVED_REF" "completion" "$(cat "$LOG_FILE" | jq -Rs '.')" + curl -sf -X POST "https://$REPORTING_PROJECT_REF.tealbase.co/rest/v1/error_logs" \ + -H "apikey: ${REPORTING_ANON_KEY}" \ + -H 'Content-type: application/json' \ + -d "$BODY" +} + +function retry { + local retries=$1 + shift + + local count=0 + until "$@"; do + exit=$? + wait=$((2 ** (count + 1))) + count=$((count + 1)) + if [ $count -lt "$retries" ]; then + echo "Command $* exited with code $exit, retrying..." + sleep $wait + else + echo "Command $* exited with code $exit, no more retries left." + return $exit + fi + done + return 0 +} + +CI_stop_postgres() { + BINDIR=$(pg_config --bindir) + ARG=${1:-""} + + if [ "$ARG" = "--new-bin" ]; then + BINDIR="/tmp/pg_upgrade_bin/$PG_MAJOR_VERSION/bin" + fi + + su postgres -c "$BINDIR/pg_ctl stop -o '-c config_file=/etc/postgresql/postgresql.conf' -l /tmp/postgres.log" +} + +CI_start_postgres() { + BINDIR=$(pg_config --bindir) + ARG=${1:-""} + + if [ "$ARG" = "--new-bin" ]; then + BINDIR="/tmp/pg_upgrade_bin/$PG_MAJOR_VERSION/bin" + fi + + su postgres -c "$BINDIR/pg_ctl start -o '-c config_file=/etc/postgresql/postgresql.conf' -l /tmp/postgres.log" +} + +swap_postgres_and_tealbase_admin() { + run_sql <<'EOSQL' +alter database postgres connection limit 0; +select pg_terminate_backend(pid) from pg_stat_activity where backend_type = 'client backend' and pid != pg_backend_pid(); +EOSQL + + if [ -z "$IS_CI" ]; then + retry 5 systemctl restart postgresql + else + CI_start_postgres "" + fi + + retry 8 pg_isready -h localhost -U tealbase_admin + + run_sql <<'EOSQL' +set statement_timeout = '600s'; +begin; +create role tealbase_tmp superuser; +set session authorization tealbase_tmp; + +-- to handle snowflakes that happened in the past +revoke tealbase_admin from authenticator; + +do $$ +begin + if exists (select from pg_extension where extname = 'timescaledb') then + execute(format('select %s.timescaledb_pre_restore()', (select pronamespace::regnamespace from pg_proc where proname = 'timescaledb_pre_restore'))); + end if; +end +$$; + +do $$ +declare + postgres_rolpassword text := (select rolpassword from pg_authid where rolname = 'postgres'); + tealbase_admin_rolpassword text := (select rolpassword from pg_authid where rolname = 'tealbase_admin'); + role_settings jsonb[] := ( + select coalesce(array_agg(jsonb_build_object('database', d.datname, 'role', a.rolname, 'configs', s.setconfig)), '{}') + from pg_db_role_setting s + left join pg_database d on d.oid = s.setdatabase + join pg_authid a on a.oid = s.setrole + where a.rolname in ('postgres', 'tealbase_admin') + ); + event_triggers jsonb[] := (select coalesce(array_agg(jsonb_build_object('name', evtname)), '{}') from pg_event_trigger where evtowner = 'postgres'::regrole); + user_mappings jsonb[] := ( + select coalesce(array_agg(jsonb_build_object('oid', um.oid, 'role', a.rolname, 'server', s.srvname, 'options', um.umoptions)), '{}') + from pg_user_mapping um + join pg_authid a on a.oid = um.umuser + join pg_foreign_server s on s.oid = um.umserver + where a.rolname in ('postgres', 'tealbase_admin') + ); + -- Objects can have initial privileges either by having those privileges set + -- when the system is initialized (by initdb) or when the object is created + -- during a CREATE EXTENSION and the extension script sets initial + -- privileges using the GRANT system. (https://www.postgresql.org/docs/current/catalog-pg-init-privs.html) + -- We only care about swapping init_privs for extensions. + init_privs jsonb[] := ( + select coalesce(array_agg(jsonb_build_object('objoid', objoid, 'classoid', classoid, 'initprivs', initprivs::text)), '{}') + from pg_init_privs + where privtype = 'e' + ); + default_acls jsonb[] := ( + select coalesce(array_agg(jsonb_build_object('oid', d.oid, 'role', a.rolname, 'schema', n.nspname, 'objtype', d.defaclobjtype, 'acl', defaclacl::text)), '{}') + from pg_default_acl d + join pg_authid a on a.oid = d.defaclrole + left join pg_namespace n on n.oid = d.defaclnamespace + ); + schemas jsonb[] := ( + select coalesce(array_agg(jsonb_build_object('oid', n.oid, 'owner', a.rolname, 'acl', nspacl::text)), '{}') + from pg_namespace n + join pg_authid a on a.oid = n.nspowner + where true + and n.nspname != 'information_schema' + and not starts_with(n.nspname, 'pg_') + ); + types jsonb[] := ( + select coalesce(array_agg(jsonb_build_object('oid', t.oid, 'owner', a.rolname, 'acl', t.typacl::text)), '{}') + from pg_type t + join pg_namespace n on n.oid = t.typnamespace + join pg_authid a on a.oid = t.typowner + where true + and n.nspname != 'information_schema' + and not starts_with(n.nspname, 'pg_') + and ( + t.typrelid = 0 + or ( + select + c.relkind = 'c' + from + pg_class c + where + c.oid = t.typrelid + ) + ) + and not exists ( + select + from + pg_type el + where + el.oid = t.typelem + and el.typarray = t.oid + ) + ); + functions jsonb[] := ( + select coalesce(array_agg(jsonb_build_object('oid', p.oid, 'owner', a.rolname, 'kind', p.prokind, 'acl', p.proacl::text)), '{}') + from pg_proc p + join pg_namespace n on n.oid = p.pronamespace + join pg_authid a on a.oid = p.proowner + where true + and n.nspname != 'information_schema' + and not starts_with(n.nspname, 'pg_') + ); + relations jsonb[] := ( + select coalesce(array_agg(jsonb_build_object('oid', c.oid, 'owner', a.rolname, 'acl', c.relacl::text)), '{}') + from ( + -- Sequences must appear after tables, so we order by relkind + select * from pg_class order by relkind desc + ) c + join pg_namespace n on n.oid = c.relnamespace + join pg_authid a on a.oid = c.relowner + where true + and n.nspname != 'information_schema' + and not starts_with(n.nspname, 'pg_') + and c.relkind not in ('c', 'i', 'I') + ); + rec record; + obj jsonb; +begin + set local search_path = ''; + + if exists (select from pg_event_trigger where evtname = 'pgsodium_trg_mask_update') then + alter event trigger pgsodium_trg_mask_update disable; + end if; + + alter role postgres rename to tealbase_admin_; + alter role tealbase_admin rename to postgres; + alter role tealbase_admin_ rename to tealbase_admin; + + -- role grants + for rec in + select * from pg_auth_members + loop + execute(format('revoke %s from %s;', rec.roleid::regrole, rec.member::regrole)); + execute(format( + 'grant %s to %s %s granted by %s;', + case + when rec.roleid = 'postgres'::regrole then 'tealbase_admin' + when rec.roleid = 'tealbase_admin'::regrole then 'postgres' + else rec.roleid::regrole + end, + case + when rec.member = 'postgres'::regrole then 'tealbase_admin' + when rec.member = 'tealbase_admin'::regrole then 'postgres' + else rec.member::regrole + end, + case + when rec.admin_option then 'with admin option' + else '' + end, + case + when rec.grantor = 'postgres'::regrole then 'tealbase_admin' + when rec.grantor = 'tealbase_admin'::regrole then 'postgres' + else rec.grantor::regrole + end + )); + end loop; + + -- role passwords + execute(format('alter role postgres password %L;', postgres_rolpassword)); + execute(format('alter role tealbase_admin password %L;', tealbase_admin_rolpassword)); + + -- role settings + foreach obj in array role_settings + loop + execute(format('alter role %I %s reset all', + case when obj->>'role' = 'postgres' then 'tealbase_admin' else 'postgres' end, + case when obj->>'database' is null then '' else format('in database %I', obj->>'database') end + )); + end loop; + foreach obj in array role_settings + loop + for rec in + select split_part(value, '=', 1) as key, substr(value, strpos(value, '=') + 1) as value + from jsonb_array_elements_text(obj->'configs') + loop + execute(format('alter role %I %s set %I to %s', + obj->>'role', + case when obj->>'database' is null then '' else format('in database %I', obj->>'database') end, + rec.key, + -- https://github.com/postgres/postgres/blob/70d1c664f4376fd3499e3b0c6888cf39b65d722b/src/bin/pg_dump/dumputils.c#L861 + case + when rec.key in ('local_preload_libraries', 'search_path', 'session_preload_libraries', 'shared_preload_libraries', 'temp_tablespaces', 'unix_socket_directories') + then rec.value + else quote_literal(rec.value) + end + )); + end loop; + end loop; + + reassign owned by postgres to tealbase_admin; + + -- databases + for rec in + select * from pg_database where datname not in ('template0') + loop + execute(format('alter database %I owner to postgres;', rec.datname)); + end loop; + + -- event triggers + foreach obj in array event_triggers + loop + execute(format('alter event trigger %I owner to postgres;', obj->>'name')); + end loop; + + -- publications + for rec in + select * from pg_publication + loop + execute(format('alter publication %I owner to postgres;', rec.pubname)); + end loop; + + -- FDWs + for rec in + select * from pg_foreign_data_wrapper + loop + execute(format('alter foreign data wrapper %I owner to postgres;', rec.fdwname)); + end loop; + + -- foreign servers + for rec in + select * from pg_foreign_server + loop + execute(format('alter server %I owner to postgres;', rec.srvname)); + end loop; + + -- user mappings + foreach obj in array user_mappings + loop + execute(format('drop user mapping for %I server %I', case when obj->>'role' = 'postgres' then 'tealbase_admin' else 'postgres' end, obj->>'server')); + end loop; + foreach obj in array user_mappings + loop + execute(format('create user mapping for %I server %I', obj->>'role', obj->>'server')); + for rec in + select split_part(value, '=', 1) as key, substr(value, strpos(value, '=') + 1) as value + from jsonb_array_elements_text(obj->'options') + loop + execute(format('alter user mapping for %I server %I options (%I %L)', obj->>'role', obj->>'server', rec.key, rec.value)); + end loop; + end loop; + + -- init privs + foreach obj in array init_privs + loop + -- We need to modify system catalog directly here because there's no ALTER INIT PRIVILEGES. + update pg_init_privs set initprivs = (obj->>'initprivs')::aclitem[] where objoid = (obj->>'objoid')::oid and classoid = (obj->>'classoid')::oid; + end loop; + + -- default acls + foreach obj in array default_acls + loop + for rec in + select grantor, grantee, privilege_type, is_grantable + from aclexplode((obj->>'acl')::aclitem[]) + loop + if obj->>'role' in ('postgres', 'tealbase_admin') or rec.grantee::regrole in ('postgres', 'tealbase_admin') then + execute(format('alter default privileges for role %I %s revoke %s on %s from %s' + , case when obj->>'role' = 'postgres' then 'tealbase_admin' + when obj->>'role' = 'tealbase_admin' then 'postgres' + else obj->>'role' + end + , case when obj->>'schema' is null then '' + else format('in schema %I', obj->>'schema') + end + , rec.privilege_type + , case when obj->>'objtype' = 'r' then 'tables' + when obj->>'objtype' = 'S' then 'sequences' + when obj->>'objtype' = 'f' then 'functions' + when obj->>'objtype' = 'T' then 'types' + when obj->>'objtype' = 'n' then 'schemas' + end + , case when rec.grantee = 'postgres'::regrole then 'tealbase_admin' + when rec.grantee = 'tealbase_admin'::regrole then 'postgres' + when rec.grantee = 0 then 'public' + else rec.grantee::regrole::text + end + )); + end if; + end loop; + end loop; + + foreach obj in array default_acls + loop + for rec in + select grantor, grantee, privilege_type, is_grantable + from aclexplode((obj->>'acl')::aclitem[]) + loop + if obj->>'role' in ('postgres', 'tealbase_admin') or rec.grantee::regrole in ('postgres', 'tealbase_admin') then + execute(format('alter default privileges for role %I %s grant %s on %s to %s %s' + , obj->>'role' + , case when obj->>'schema' is null then '' + else format('in schema %I', obj->>'schema') + end + , rec.privilege_type + , case when obj->>'objtype' = 'r' then 'tables' + when obj->>'objtype' = 'S' then 'sequences' + when obj->>'objtype' = 'f' then 'functions' + when obj->>'objtype' = 'T' then 'types' + when obj->>'objtype' = 'n' then 'schemas' + end + , case when rec.grantee = 0 then 'public' else rec.grantee::regrole::text end + , case when rec.is_grantable then 'with grant option' else '' end + )); + end if; + end loop; + end loop; + + -- schemas + foreach obj in array schemas + loop + if obj->>'owner' = 'postgres' then + execute(format('alter schema %s owner to postgres;', (obj->>'oid')::regnamespace)); + end if; + for rec in + select grantor, grantee, privilege_type, is_grantable + from aclexplode((obj->>'acl')::aclitem[]) + where grantee::regrole in ('postgres', 'tealbase_admin') + loop + execute(format('revoke %s on schema %s from %I', rec.privilege_type, (obj->>'oid')::regnamespace, case when rec.grantee = 'postgres'::regrole then 'tealbase_admin' else 'postgres' end)); + end loop; + end loop; + foreach obj in array schemas + loop + for rec in + select grantor, grantee, privilege_type, is_grantable + from aclexplode((obj->>'acl')::aclitem[]) + where grantee::regrole in ('postgres', 'tealbase_admin') + loop + execute(format('grant %s on schema %s to %s %s', rec.privilege_type, (obj->>'oid')::regnamespace, rec.grantee::regrole, case when rec.is_grantable then 'with grant option' else '' end)); + end loop; + end loop; + + -- types + foreach obj in array types + loop + if obj->>'owner' = 'postgres' then + execute(format('alter type %s owner to postgres;', (obj->>'oid')::regtype)); + end if; + for rec in + select grantor, grantee, privilege_type, is_grantable + from aclexplode((obj->>'acl')::aclitem[]) + where grantee::regrole in ('postgres', 'tealbase_admin') + loop + execute(format('revoke %s on type %s from %I', rec.privilege_type, (obj->>'oid')::regtype, case when rec.grantee = 'postgres'::regrole then 'tealbase_admin' else 'postgres' end)); + end loop; + end loop; + foreach obj in array types + loop + for rec in + select grantor, grantee, privilege_type, is_grantable + from aclexplode((obj->>'acl')::aclitem[]) + where grantee::regrole in ('postgres', 'tealbase_admin') + loop + execute(format('grant %s on type %s to %s %s', rec.privilege_type, (obj->>'oid')::regtype, rec.grantee::regrole, case when rec.is_grantable then 'with grant option' else '' end)); + end loop; + end loop; + + -- functions + foreach obj in array functions + loop + if obj->>'owner' = 'postgres' then + execute(format('alter %s %s(%s) owner to postgres;' + , case when obj->>'kind' = 'p' then 'procedure' else 'function' end + , (obj->>'oid')::regproc + , pg_get_function_identity_arguments((obj->>'oid')::regproc))); + end if; + for rec in + select grantor, grantee, privilege_type, is_grantable + from aclexplode((obj->>'acl')::aclitem[]) + where grantee::regrole in ('postgres', 'tealbase_admin') + loop + execute(format('revoke %s on %s %s(%s) from %I' + , rec.privilege_type + , case + when obj->>'kind' = 'p' then 'procedure' + else 'function' + end + , (obj->>'oid')::regproc + , pg_get_function_identity_arguments((obj->>'oid')::regproc) + , case when rec.grantee = 'postgres'::regrole then 'tealbase_admin' else 'postgres' end + )); + end loop; + end loop; + foreach obj in array functions + loop + for rec in + select grantor, grantee, privilege_type, is_grantable + from aclexplode((obj->>'acl')::aclitem[]) + where grantee::regrole in ('postgres', 'tealbase_admin') + loop + execute(format('grant %s on %s %s(%s) to %s %s' + , rec.privilege_type + , case + when obj->>'kind' = 'p' then 'procedure' + else 'function' + end + , (obj->>'oid')::regproc + , pg_get_function_identity_arguments((obj->>'oid')::regproc) + , rec.grantee::regrole + , case when rec.is_grantable then 'with grant option' else '' end + )); + end loop; + end loop; + + -- relations + foreach obj in array relations + loop + -- obj->>'oid' (text) needs to be casted to oid first for some reason + + if obj->>'owner' = 'postgres' then + execute(format('alter table %s owner to postgres;', (obj->>'oid')::oid::regclass)); + end if; + for rec in + select grantor, grantee, privilege_type, is_grantable + from aclexplode((obj->>'acl')::aclitem[]) + where grantee::regrole in ('postgres', 'tealbase_admin') + loop + execute(format('revoke %s on table %s from %I', rec.privilege_type, (obj->>'oid')::oid::regclass, case when rec.grantee = 'postgres'::regrole then 'tealbase_admin' else 'postgres' end)); + end loop; + end loop; + foreach obj in array relations + loop + -- obj->>'oid' (text) needs to be casted to oid first for some reason + + for rec in + select grantor, grantee, privilege_type, is_grantable + from aclexplode((obj->>'acl')::aclitem[]) + where grantee::regrole in ('postgres', 'tealbase_admin') + loop + execute(format('grant %s on table %s to %s %s', rec.privilege_type, (obj->>'oid')::oid::regclass, rec.grantee::regrole, case when rec.is_grantable then 'with grant option' else '' end)); + end loop; + end loop; + + if exists (select from pg_event_trigger where evtname = 'pgsodium_trg_mask_update') then + alter event trigger pgsodium_trg_mask_update enable; + end if; +end +$$; + +do $$ +begin + if exists (select from pg_extension where extname = 'timescaledb') then + execute(format('select %s.timescaledb_post_restore()', (select pronamespace::regnamespace from pg_proc where proname = 'timescaledb_post_restore'))); + end if; +end +$$; + +alter database postgres connection limit -1; + +set session authorization tealbase_admin; +drop role tealbase_tmp; +commit; +EOSQL +} diff --git a/ansible/files/admin_api_scripts/pg_upgrade_scripts/complete.sh b/ansible/files/admin_api_scripts/pg_upgrade_scripts/complete.sh new file mode 100755 index 0000000..55bb707 --- /dev/null +++ b/ansible/files/admin_api_scripts/pg_upgrade_scripts/complete.sh @@ -0,0 +1,204 @@ +#! /usr/bin/env bash + +## This script is run on the newly launched instance which is to be promoted to +## become the primary database instance once the upgrade successfully completes. +## The following commands copy custom PG configs and enable previously disabled +## extensions, containing regtypes referencing system OIDs. + +set -eEuo pipefail + +SCRIPT_DIR=$(dirname -- "$0";) +# shellcheck disable=SC1091 +source "$SCRIPT_DIR/common.sh" + +IS_CI=${IS_CI:-} +LOG_FILE="/var/log/pg-upgrade-complete.log" + +function cleanup { + UPGRADE_STATUS=${1:-"failed"} + EXIT_CODE=${?:-0} + + echo "$UPGRADE_STATUS" > /tmp/pg-upgrade-status + + ship_logs "$LOG_FILE" || true + + exit "$EXIT_CODE" +} + +function execute_extension_upgrade_patches { + if [ -f "/var/lib/postgresql/extension/wrappers--0.3.1--0.4.1.sql" ] && [ ! -f "/usr/share/postgresql/15/extension/wrappers--0.3.0--0.4.1.sql" ]; then + cp /var/lib/postgresql/extension/wrappers--0.3.1--0.4.1.sql /var/lib/postgresql/extension/wrappers--0.3.0--0.4.1.sql + ln -s /var/lib/postgresql/extension/wrappers--0.3.0--0.4.1.sql /usr/share/postgresql/15/extension/wrappers--0.3.0--0.4.1.sql + fi +} + +function execute_patches { + # Patch pg_net grants + PG_NET_ENABLED=$(run_sql -A -t -c "select count(*) > 0 from pg_extension where extname = 'pg_net';") + + if [ "$PG_NET_ENABLED" = "t" ]; then + PG_NET_GRANT_QUERY=$(cat < 0 from pg_extension where extname = 'pg_cron' and extowner::regrole::text = 'postgres';") + + if [ "$HAS_PG_CRON_OWNED_BY_POSTGRES" = "t" ]; then + RECREATE_PG_CRON_QUERY=$(cat < /tmp/pg-upgrade-status + + echo "1. Mounting data disk" + if [ -z "$IS_CI" ]; then + retry 8 mount -a -v + else + echo "Skipping mount -a -v" + fi + + # copying custom configurations + echo "2. Copying custom configurations" + retry 3 copy_configs + + echo "3. Starting postgresql" + if [ -z "$IS_CI" ]; then + retry 3 service postgresql start + else + CI_start_postgres --new-bin + fi + + execute_extension_upgrade_patches || true + + echo "4. Running generated SQL files" + retry 3 run_generated_sql + + echo "4.1. Applying patches" + execute_patches || true + + run_sql -c "ALTER USER postgres WITH NOSUPERUSER;" + + echo "4.2. Applying authentication scheme updates" + retry 3 apply_auth_scheme_updates + + sleep 5 + + echo "5. Restarting postgresql" + if [ -z "$IS_CI" ]; then + retry 3 service postgresql restart + + echo "5.1. Restarting gotrue and postgrest" + retry 3 service gotrue restart + retry 3 service postgrest restart + else + retry 3 CI_stop_postgres || true + retry 3 CI_start_postgres + fi + + echo "6. Starting vacuum analyze" + retry 3 start_vacuum_analyze +} + +function copy_configs { + cp -R /data/conf/* /etc/postgresql-custom/ + chown -R postgres:postgres /var/lib/postgresql/data + chown -R postgres:postgres /data/pgdata + chmod -R 0750 /data/pgdata +} + +function run_generated_sql { + if [ -d /data/sql ]; then + for FILE in /data/sql/*.sql; do + if [ -f "$FILE" ]; then + run_sql -f "$FILE" || true + fi + done + fi +} + +# Projects which had their passwords hashed using md5 need to have their passwords reset +# Passwords for managed roles are already present in /etc/postgresql.schema.sql +function apply_auth_scheme_updates { + PASSWORD_ENCRYPTION_SETTING=$(run_sql -A -t -c "SHOW password_encryption;") + if [ "$PASSWORD_ENCRYPTION_SETTING" = "md5" ]; then + run_sql -c "ALTER SYSTEM SET password_encryption TO 'scram-sha-256';" + run_sql -c "SELECT pg_reload_conf();" + + if [ -z "$IS_CI" ]; then + run_sql -f /etc/postgresql.schema.sql + fi + fi +} + +function start_vacuum_analyze { + echo "complete" > /tmp/pg-upgrade-status + + # shellcheck disable=SC1091 + if [ -f "/nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh" ]; then + # shellcheck disable=SC1091 + source "/nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh" + fi + vacuumdb --all --analyze-in-stages -U tealbase_admin -h localhost -p 5432 + echo "Upgrade job completed" +} + +trap cleanup ERR + +echo "C.UTF-8 UTF-8" > /etc/locale.gen +echo "en_US.UTF-8 UTF-8" >> /etc/locale.gen +locale-gen + +if [ -z "$IS_CI" ]; then + complete_pg_upgrade >> $LOG_FILE 2>&1 & +else + CI_stop_postgres || true + + rm -f /tmp/pg-upgrade-status + mv /data_migration /data + + rm -rf /var/lib/postgresql/data + ln -s /data/pgdata /var/lib/postgresql/data + + complete_pg_upgrade +fi diff --git a/ansible/files/admin_api_scripts/pg_upgrade_scripts/initiate.sh b/ansible/files/admin_api_scripts/pg_upgrade_scripts/initiate.sh new file mode 100755 index 0000000..46003d6 --- /dev/null +++ b/ansible/files/admin_api_scripts/pg_upgrade_scripts/initiate.sh @@ -0,0 +1,470 @@ +#! /usr/bin/env bash + +## This script is run on the old (source) instance, mounting the data disk +## of the newly launched instance, disabling extensions containing regtypes, +## and running pg_upgrade. +## It reports the current status of the upgrade process to /tmp/pg-upgrade-status, +## which can then be subsequently checked through check.sh. + +# Extensions to disable before running pg_upgrade. +# Running an upgrade with these extensions enabled will result in errors due to +# them depending on regtypes referencing system OIDs or outdated library files. +EXTENSIONS_TO_DISABLE=( + "pg_graphql" + "pg_stat_monitor" +) + +PG14_EXTENSIONS_TO_DISABLE=( + "wrappers" + "pgrouting" +) + +PG13_EXTENSIONS_TO_DISABLE=( + "pgrouting" +) + +set -eEuo pipefail + +SCRIPT_DIR=$(dirname -- "$0";) +# shellcheck disable=SC1091 +source "$SCRIPT_DIR/common.sh" + +IS_CI=${IS_CI:-} +IS_LOCAL_UPGRADE=${IS_LOCAL_UPGRADE:-} +IS_NIX_UPGRADE=${IS_NIX_UPGRADE:-} +IS_NIX_BASED_SYSTEM="false" + +PGVERSION=$1 +MOUNT_POINT="/data_migration" +LOG_FILE="/var/log/pg-upgrade-initiate.log" + +POST_UPGRADE_EXTENSION_SCRIPT="/tmp/pg_upgrade/pg_upgrade_extensions.sql" +POST_UPGRADE_POSTGRES_PERMS_SCRIPT="/tmp/pg_upgrade/pg_upgrade_postgres_perms.sql" +OLD_PGVERSION=$(run_sql -A -t -c "SHOW server_version;") + +SERVER_LC_COLLATE=$(run_sql -A -t -c "SHOW lc_collate;") +SERVER_LC_CTYPE=$(run_sql -A -t -c "SHOW lc_ctype;") +SERVER_ENCODING=$(run_sql -A -t -c "SHOW server_encoding;") + +POSTGRES_CONFIG_PATH="/etc/postgresql/postgresql.conf" +PGBINOLD="/usr/lib/postgresql/bin" + +PG_UPGRADE_BIN_DIR="/tmp/pg_upgrade_bin/$PGVERSION" +NIX_INSTALLER_PATH="/tmp/persistent/nix-installer" +NIX_INSTALLER_PACKAGE_PATH="$NIX_INSTALLER_PATH.tar.gz" + +if [ -L "$PGBINOLD/pg_upgrade" ]; then + BINARY_PATH=$(readlink -f "$PGBINOLD/pg_upgrade") + if [[ "$BINARY_PATH" == *"nix"* ]]; then + IS_NIX_BASED_SYSTEM="true" + fi +fi + +# If upgrading from older major PG versions, disable specific extensions +if [[ "$OLD_PGVERSION" =~ ^14.* ]]; then + EXTENSIONS_TO_DISABLE+=("${PG14_EXTENSIONS_TO_DISABLE[@]}") +elif [[ "$OLD_PGVERSION" =~ ^13.* ]]; then + EXTENSIONS_TO_DISABLE+=("${PG13_EXTENSIONS_TO_DISABLE[@]}") +elif [[ "$OLD_PGVERSION" =~ ^12.* ]]; then + POSTGRES_CONFIG_PATH="/etc/postgresql/12/main/postgresql.conf" + PGBINOLD="/usr/lib/postgresql/12/bin" +fi + +if [ -n "$IS_CI" ]; then + PGBINOLD="$(pg_config --bindir)" + echo "Running in CI mode; using pg_config bindir: $PGBINOLD" + echo "PGVERSION: $PGVERSION" +fi + +OLD_BOOTSTRAP_USER=$(run_sql -A -t -c "select rolname from pg_authid where oid = 10;") + +cleanup() { + UPGRADE_STATUS=${1:-"failed"} + EXIT_CODE=${?:-0} + + if [ "$UPGRADE_STATUS" = "failed" ]; then + EXIT_CODE=1 + fi + + if [ "$UPGRADE_STATUS" = "failed" ]; then + echo "Upgrade job failed. Cleaning up and exiting." + fi + + if [ -d "${MOUNT_POINT}/pgdata/pg_upgrade_output.d/" ]; then + echo "Copying pg_upgrade output to /var/log" + cp -R "${MOUNT_POINT}/pgdata/pg_upgrade_output.d/" /var/log/ || true + chown -R postgres:postgres /var/log/pg_upgrade_output.d/ + chmod -R 0750 /var/log/pg_upgrade_output.d/ + ship_logs "$LOG_FILE" || true + tail -n +1 /var/log/pg_upgrade_output.d/*/* > /var/log/pg_upgrade_output.d/pg_upgrade.log || true + ship_logs "/var/log/pg_upgrade_output.d/pg_upgrade.log" || true + fi + + if [ -L "/usr/share/postgresql/${PGVERSION}" ]; then + rm "/usr/share/postgresql/${PGVERSION}" + + if [ -f "/usr/share/postgresql/${PGVERSION}.bak" ]; then + mv "/usr/share/postgresql/${PGVERSION}.bak" "/usr/share/postgresql/${PGVERSION}" + fi + + if [ -d "/usr/share/postgresql/${PGVERSION}.bak" ]; then + mv "/usr/share/postgresql/${PGVERSION}.bak" "/usr/share/postgresql/${PGVERSION}" + fi + fi + + echo "Restarting postgresql" + if [ -z "$IS_CI" ]; then + systemctl enable postgresql + retry 5 systemctl restart postgresql + else + CI_start_postgres + fi + + retry 8 pg_isready -h localhost -U tealbase_admin + + echo "Re-enabling extensions" + if [ -f $POST_UPGRADE_EXTENSION_SCRIPT ]; then + retry 5 run_sql -f $POST_UPGRADE_EXTENSION_SCRIPT + fi + + echo "Removing SUPERUSER grant from postgres" + retry 5 run_sql -c "ALTER USER postgres WITH NOSUPERUSER;" + + echo "Resetting postgres database connection limit" + retry 5 run_sql -c "ALTER DATABASE postgres CONNECTION LIMIT -1;" + + echo "Making sure postgres still has access to pg_shadow" + cat << EOF >> $POST_UPGRADE_POSTGRES_PERMS_SCRIPT +DO \$\$ +begin + if exists (select from pg_authid where rolname = 'pg_read_all_data') then + execute('grant pg_read_all_data to postgres'); + end if; +end +\$\$; +grant pg_signal_backend to postgres; +EOF + + if [ -f $POST_UPGRADE_POSTGRES_PERMS_SCRIPT ]; then + retry 5 run_sql -f $POST_UPGRADE_POSTGRES_PERMS_SCRIPT + fi + + if [ -z "$IS_CI" ] && [ -z "$IS_LOCAL_UPGRADE" ]; then + echo "Unmounting data disk from ${MOUNT_POINT}" + retry 3 umount $MOUNT_POINT + fi + echo "$UPGRADE_STATUS" > /tmp/pg-upgrade-status + + if [ -z "$IS_CI" ]; then + exit "$EXIT_CODE" + else + echo "CI run complete with code ${EXIT_CODE}. Exiting." + exit "$EXIT_CODE" + fi +} + +function handle_extensions { + if [ -z "$IS_CI" ]; then + retry 5 systemctl restart postgresql + else + CI_start_postgres + fi + + retry 8 pg_isready -h localhost -U tealbase_admin + + rm -f $POST_UPGRADE_EXTENSION_SCRIPT + touch $POST_UPGRADE_EXTENSION_SCRIPT + + PASSWORD_ENCRYPTION_SETTING=$(run_sql -A -t -c "SHOW password_encryption;") + if [ "$PASSWORD_ENCRYPTION_SETTING" = "md5" ]; then + echo "ALTER SYSTEM SET password_encryption = 'md5';" >> $POST_UPGRADE_EXTENSION_SCRIPT + fi + + cat << EOF >> $POST_UPGRADE_EXTENSION_SCRIPT +ALTER SYSTEM SET jit = off; +SELECT pg_reload_conf(); +EOF + + # Disable extensions if they're enabled + # Generate SQL script to re-enable them after upgrade + for EXTENSION in "${EXTENSIONS_TO_DISABLE[@]}"; do + EXTENSION_ENABLED=$(run_sql -A -t -c "SELECT EXISTS(SELECT 1 FROM pg_extension WHERE extname = '${EXTENSION}');") + if [ "$EXTENSION_ENABLED" = "t" ]; then + echo "Disabling extension ${EXTENSION}" + run_sql -c "DROP EXTENSION IF EXISTS ${EXTENSION} CASCADE;" + cat << EOF >> $POST_UPGRADE_EXTENSION_SCRIPT +DO \$\$ +BEGIN + IF EXISTS (SELECT 1 FROM pg_available_extensions WHERE name = '${EXTENSION}') THEN + CREATE EXTENSION IF NOT EXISTS ${EXTENSION} CASCADE; + END IF; +END; +\$\$; +EOF + fi + done +} + +function initiate_upgrade { + mkdir -p "$MOUNT_POINT" + SHARED_PRELOAD_LIBRARIES=$(cat "$POSTGRES_CONFIG_PATH" | grep shared_preload_libraries | sed "s/shared_preload_libraries =\s\{0,1\}'\(.*\)'.*/\1/") + + # Wrappers officially launched in PG15; PG14 version is incompatible + if [[ "$OLD_PGVERSION" =~ 14* ]]; then + SHARED_PRELOAD_LIBRARIES=$(echo "$SHARED_PRELOAD_LIBRARIES" | sed "s/wrappers//" | xargs) + fi + SHARED_PRELOAD_LIBRARIES=$(echo "$SHARED_PRELOAD_LIBRARIES" | sed "s/pg_cron//" | xargs) + SHARED_PRELOAD_LIBRARIES=$(echo "$SHARED_PRELOAD_LIBRARIES" | sed "s/pg_net//" | xargs) + SHARED_PRELOAD_LIBRARIES=$(echo "$SHARED_PRELOAD_LIBRARIES" | sed "s/check_role_membership//" | xargs) + SHARED_PRELOAD_LIBRARIES=$(echo "$SHARED_PRELOAD_LIBRARIES" | sed "s/safeupdate//" | xargs) + + # Exclude empty-string entries, as well as leading/trailing commas and spaces resulting from the above lib exclusions + # i.e. " , pg_stat_statements, , pgsodium, " -> "pg_stat_statements, pgsodium" + SHARED_PRELOAD_LIBRARIES=$(echo "$SHARED_PRELOAD_LIBRARIES" | tr ',' ' ' | tr -s ' ' | tr ' ' ', ') + + # Account for trailing comma + # eg. "...,auto_explain,pg_tle,plan_filter," -> "...,auto_explain,pg_tle,plan_filter" + if [[ "${SHARED_PRELOAD_LIBRARIES: -1}" = "," ]]; then + # clean up trailing comma + SHARED_PRELOAD_LIBRARIES=$(echo "$SHARED_PRELOAD_LIBRARIES" | sed "s/.$//" | xargs) + fi + + PGDATAOLD=$(cat "$POSTGRES_CONFIG_PATH" | grep data_directory | sed "s/data_directory = '\(.*\)'.*/\1/") + + PGDATANEW="$MOUNT_POINT/pgdata" + + # running upgrade using at least 1 cpu core + WORKERS=$(nproc | awk '{ print ($1 == 1 ? 1 : $1 - 1) }') + + # To make nix-based upgrades work for testing, create a pg binaries tarball with the following contents: + # - nix_flake_version - a7189a68ed4ea78c1e73991b5f271043636cf074 + # Where the value is the commit hash of the nix flake that contains the binaries + + if [ -n "$IS_LOCAL_UPGRADE" ]; then + mkdir -p "$PG_UPGRADE_BIN_DIR" + mkdir -p /tmp/persistent/ + echo "a7189a68ed4ea78c1e73991b5f271043636cf074" > "$PG_UPGRADE_BIN_DIR/nix_flake_version" + tar -czf "/tmp/persistent/pg_upgrade_bin.tar.gz" -C "/tmp/pg_upgrade_bin" . + rm -rf /tmp/pg_upgrade_bin/ + fi + + echo "1. Extracting pg_upgrade binaries" + mkdir -p "/tmp/pg_upgrade_bin" + tar zxf "/tmp/persistent/pg_upgrade_bin.tar.gz" -C "/tmp/pg_upgrade_bin" + + PGSHARENEW="$PG_UPGRADE_BIN_DIR/share" + + if [ -f "$PG_UPGRADE_BIN_DIR/nix_flake_version" ]; then + IS_NIX_UPGRADE="true" + NIX_FLAKE_VERSION=$(cat "$PG_UPGRADE_BIN_DIR/nix_flake_version") + + if [ "$IS_NIX_BASED_SYSTEM" = "false" ]; then + if [ ! -f /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh ]; then + if ! command -v nix > /dev/null; then + echo "1.1. Nix is not installed; installing." + + if [ -f "$NIX_INSTALLER_PACKAGE_PATH" ]; then + echo "1.1.1. Installing Nix using the provided installer" + tar -xzf "$NIX_INSTALLER_PACKAGE_PATH" -C /tmp/persistent/ + chmod +x "$NIX_INSTALLER_PATH" + "$NIX_INSTALLER_PATH" install --no-confirm \ + --extra-conf "substituters = https://cache.nixos.org https://nix-postgres-artifacts.s3.amazonaws.com" \ + --extra-conf "trusted-public-keys = nix-postgres-artifacts:dGZlQOvKcNEjvT7QEAJbcV6b6uk7VF/hWMjhYleiaLI=% cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=" + else + echo "1.1.1. Installing Nix using the official installer" + + curl --proto '=https' --tlsv1.2 -sSf -L https://install.determinate.systems/nix | sh -s -- install --no-confirm \ + --extra-conf "substituters = https://cache.nixos.org https://nix-postgres-artifacts.s3.amazonaws.com" \ + --extra-conf "trusted-public-keys = nix-postgres-artifacts:dGZlQOvKcNEjvT7QEAJbcV6b6uk7VF/hWMjhYleiaLI=% cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=" + fi + else + echo "1.1. Nix is installed; moving on." + fi + fi + fi + + echo "1.2. Installing flake revision: $NIX_FLAKE_VERSION" + # shellcheck disable=SC1091 + source /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh + nix-collect-garbage -d > /tmp/pg_upgrade-nix-gc.log 2>&1 || true + PG_UPGRADE_BIN_DIR=$(nix build "github:tealbase/postgres/${NIX_FLAKE_VERSION}#psql_15/bin" --no-link --print-out-paths --extra-experimental-features nix-command --extra-experimental-features flakes) + PGSHARENEW="$PG_UPGRADE_BIN_DIR/share/postgresql" + fi + + PGBINNEW="$PG_UPGRADE_BIN_DIR/bin" + PGLIBNEW="$PG_UPGRADE_BIN_DIR/lib" + + # copy upgrade-specific pgsodium_getkey script into the share dir + chmod +x "$SCRIPT_DIR/pgsodium_getkey.sh" + mkdir -p "$PGSHARENEW/extension" + cp "$SCRIPT_DIR/pgsodium_getkey.sh" "$PGSHARENEW/extension/pgsodium_getkey" + if [ -d "/var/lib/postgresql/extension/" ]; then + cp "$SCRIPT_DIR/pgsodium_getkey.sh" "/var/lib/postgresql/extension/pgsodium_getkey" + chown postgres:postgres "/var/lib/postgresql/extension/pgsodium_getkey" + fi + + chown -R postgres:postgres "/tmp/pg_upgrade_bin/$PGVERSION" + + # upgrade job outputs a log in the cwd; needs write permissions + mkdir -p /tmp/pg_upgrade/ + chown -R postgres:postgres /tmp/pg_upgrade/ + cd /tmp/pg_upgrade/ + + # Fixing erros generated by previous dpkg executions (package upgrades et co) + echo "2. Fixing potential errors generated by dpkg" + DEBIAN_FRONTEND=noninteractive dpkg --configure -a --force-confold || true # handle errors generated by dpkg + + # Needed for PostGIS, since it's compiled with Protobuf-C support now + echo "3. Installing libprotobuf-c1 and libicu66 if missing" + if [[ ! "$(apt list --installed libprotobuf-c1 | grep "installed")" ]]; then + apt-get update -y + apt --fix-broken install -y libprotobuf-c1 libicu66 || true + fi + + echo "4. Setup locale if required" + if ! grep -q "^en_US.UTF-8" /etc/locale.gen ; then + echo "en_US.UTF-8 UTF-8" >> /etc/locale.gen + fi + if ! grep -q "^C.UTF-8" /etc/locale.gen ; then + echo "C.UTF-8 UTF-8" >> /etc/locale.gen + fi + locale-gen + + if [ -z "$IS_CI" ] && [ -z "$IS_LOCAL_UPGRADE" ]; then + # awk NF==3 prints lines with exactly 3 fields, which are the block devices currently not mounted anywhere + # excluding nvme0 since it is the root disk + echo "5. Determining block device to mount" + BLOCK_DEVICE=$(lsblk -dprno name,size,mountpoint,type | grep "disk" | grep -v "nvme0" | awk 'NF==3 { print $1; }') + echo "Block device found: $BLOCK_DEVICE" + + mkdir -p "$MOUNT_POINT" + echo "6. Mounting block device" + + sleep 5 + e2fsck -pf "$BLOCK_DEVICE" + + sleep 1 + mount "$BLOCK_DEVICE" "$MOUNT_POINT" + + sleep 1 + resize2fs "$BLOCK_DEVICE" + else + mkdir -p "$MOUNT_POINT" + fi + + if [ -f "$MOUNT_POINT/pgsodium_root.key" ]; then + cp "$MOUNT_POINT/pgsodium_root.key" /etc/postgresql-custom/pgsodium_root.key + chown postgres:postgres /etc/postgresql-custom/pgsodium_root.key + chmod 600 /etc/postgresql-custom/pgsodium_root.key + fi + + echo "7. Disabling extensions and generating post-upgrade script" + handle_extensions + + echo "8.1. Granting SUPERUSER to postgres user" + run_sql -c "ALTER USER postgres WITH SUPERUSER;" + + if [ "$OLD_BOOTSTRAP_USER" = "postgres" ]; then + echo "8.2. Swap postgres & tealbase_admin roles as we're upgrading a project with postgres as bootstrap user" + swap_postgres_and_tealbase_admin + fi + + if [ -z "$IS_NIX_UPGRADE" ]; then + if [ -d "/usr/share/postgresql/${PGVERSION}" ]; then + mv "/usr/share/postgresql/${PGVERSION}" "/usr/share/postgresql/${PGVERSION}.bak" + fi + + ln -s "$PGSHARENEW" "/usr/share/postgresql/${PGVERSION}" + cp --remove-destination "$PGLIBNEW"/*.control "$PGSHARENEW/extension/" + cp --remove-destination "$PGLIBNEW"/*.sql "$PGSHARENEW/extension/" + + export LD_LIBRARY_PATH="${PGLIBNEW}" + fi + + echo "9. Creating new data directory, initializing database" + chown -R postgres:postgres "$MOUNT_POINT/" + rm -rf "${PGDATANEW:?}/" + + if [ "$IS_NIX_UPGRADE" = "true" ]; then + LC_ALL=en_US.UTF-8 LC_CTYPE=$SERVER_LC_CTYPE LC_COLLATE=$SERVER_LC_COLLATE LANGUAGE=en_US.UTF-8 LANG=en_US.UTF-8 LOCALE_ARCHIVE=/usr/lib/locale/locale-archive su -c ". /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh && $PGBINNEW/initdb --encoding=$SERVER_ENCODING --lc-collate=$SERVER_LC_COLLATE --lc-ctype=$SERVER_LC_CTYPE -L $PGSHARENEW -D $PGDATANEW/ --username=tealbase_admin" -s "$SHELL" postgres + else + su -c "$PGBINNEW/initdb -L $PGSHARENEW -D $PGDATANEW/ --username=tealbase_admin" -s "$SHELL" postgres + fi + + # This line avoids the need to supply the tealbase_admin password on the old + # instance, since pg_upgrade connects to the db as tealbase_admin using unix + # sockets, which is gated behind scram-sha-256 per pg_hba.conf.j2. The new + # instance is unaffected. + if ! grep -q "local all tealbase_admin trust" /etc/postgresql/pg_hba.conf; then + echo "local all tealbase_admin trust +$(cat /etc/postgresql/pg_hba.conf)" > /etc/postgresql/pg_hba.conf + run_sql -c "select pg_reload_conf();" + fi + + UPGRADE_COMMAND=$(cat < /tmp/pg-upgrade-status +if [ -z "$IS_CI" ] && [ -z "$IS_LOCAL_UPGRADE" ]; then + initiate_upgrade >> "$LOG_FILE" 2>&1 & + echo "Upgrade initiate job completed" +else + rm -f /tmp/pg-upgrade-status + initiate_upgrade +fi diff --git a/ansible/files/admin_api_scripts/pg_upgrade_pgsodium_getkey.sh b/ansible/files/admin_api_scripts/pg_upgrade_scripts/pgsodium_getkey.sh old mode 100644 new mode 100755 similarity index 100% rename from ansible/files/admin_api_scripts/pg_upgrade_pgsodium_getkey.sh rename to ansible/files/admin_api_scripts/pg_upgrade_scripts/pgsodium_getkey.sh diff --git a/ansible/files/admin_api_scripts/pg_upgrade_prepare.sh b/ansible/files/admin_api_scripts/pg_upgrade_scripts/prepare.sh old mode 100644 new mode 100755 similarity index 100% rename from ansible/files/admin_api_scripts/pg_upgrade_prepare.sh rename to ansible/files/admin_api_scripts/pg_upgrade_scripts/prepare.sh diff --git a/ansible/files/adminapi.service.j2 b/ansible/files/adminapi.service.j2 index 7db04ea..c76b2a3 100644 --- a/ansible/files/adminapi.service.j2 +++ b/ansible/files/adminapi.service.j2 @@ -7,6 +7,7 @@ ExecStart=/opt/tealbase-admin-api User=adminapi Restart=always RestartSec=3 +Environment="AWS_USE_DUALSTACK_ENDPOINT=true" [Install] WantedBy=multi-user.target diff --git a/ansible/files/adminapi.sudoers.conf b/ansible/files/adminapi.sudoers.conf index 97c4571..7a08985 100644 --- a/ansible/files/adminapi.sudoers.conf +++ b/ansible/files/adminapi.sudoers.conf @@ -1,23 +1,29 @@ -Cmnd_Alias KONG = /bin/systemctl start kong.service, /bin/systemctl stop kong.service, /bin/systemctl restart kong.service, /bin/systemctl disable kong.service, /bin/systemctl enable kong.service, /bin/systemctl reload kong.service -Cmnd_Alias POSTGREST = /bin/systemctl start postgrest.service, /bin/systemctl stop postgrest.service, /bin/systemctl restart postgrest.service, /bin/systemctl disable postgrest.service, /bin/systemctl enable postgrest.service -Cmnd_Alias GOTRUE = /bin/systemctl start gotrue.service, /bin/systemctl stop gotrue.service, /bin/systemctl restart gotrue.service, /bin/systemctl disable gotrue.service, /bin/systemctl enable gotrue.service -Cmnd_Alias PGBOUNCER = /bin/systemctl start pgbouncer.service, /bin/systemctl stop pgbouncer.service, /bin/systemctl restart pgbouncer.service, /bin/systemctl disable pgbouncer.service, /bin/systemctl enable pgbouncer.service, /bin/systemctl reload pgbouncer.service +Cmnd_Alias ENVOY = /bin/systemctl start envoy.service, /bin/systemctl stop envoy.service, /bin/systemctl restart envoy.service, /bin/systemctl disable envoy.service, /bin/systemctl enable envoy.service, /bin/systemctl reload envoy.service, /bin/systemctl try-restart envoy.service +Cmnd_Alias KONG = /bin/systemctl start kong.service, /bin/systemctl stop kong.service, /bin/systemctl restart kong.service, /bin/systemctl disable kong.service, /bin/systemctl enable kong.service, /bin/systemctl reload kong.service, /bin/systemctl try-restart kong.service +Cmnd_Alias POSTGREST = /bin/systemctl start postgrest.service, /bin/systemctl stop postgrest.service, /bin/systemctl restart postgrest.service, /bin/systemctl disable postgrest.service, /bin/systemctl enable postgrest.service, /bin/systemctl try-restart postgrest.service +Cmnd_Alias GOTRUE = /bin/systemctl start gotrue.service, /bin/systemctl stop gotrue.service, /bin/systemctl restart gotrue.service, /bin/systemctl disable gotrue.service, /bin/systemctl enable gotrue.service, /bin/systemctl try-restart gotrue.service +Cmnd_Alias PGBOUNCER = /bin/systemctl start pgbouncer.service, /bin/systemctl stop pgbouncer.service, /bin/systemctl restart pgbouncer.service, /bin/systemctl disable pgbouncer.service, /bin/systemctl enable pgbouncer.service, /bin/systemctl reload pgbouncer.service, /bin/systemctl try-restart pgbouncer.service %adminapi ALL= NOPASSWD: /root/grow_fs.sh %adminapi ALL= NOPASSWD: /root/manage_readonly_mode.sh -%adminapi ALL= NOPASSWD: /root/pg_upgrade_prepare.sh -%adminapi ALL= NOPASSWD: /root/pg_upgrade_initiate.sh -%adminapi ALL= NOPASSWD: /root/pg_upgrade_complete.sh -%adminapi ALL= NOPASSWD: /root/pg_upgrade_check.sh -%adminapi ALL= NOPASSWD: /root/pg_upgrade_pgsodium_getkey.sh +%adminapi ALL= NOPASSWD: /etc/adminapi/pg_upgrade_scripts/prepare.sh +%adminapi ALL= NOPASSWD: /etc/adminapi/pg_upgrade_scripts/initiate.sh +%adminapi ALL= NOPASSWD: /etc/adminapi/pg_upgrade_scripts/complete.sh +%adminapi ALL= NOPASSWD: /etc/adminapi/pg_upgrade_scripts/check.sh +%adminapi ALL= NOPASSWD: /etc/adminapi/pg_upgrade_scripts/common.sh +%adminapi ALL= NOPASSWD: /etc/adminapi/pg_upgrade_scripts/pgsodium_getkey.sh %adminapi ALL= NOPASSWD: /usr/bin/systemctl daemon-reload %adminapi ALL= NOPASSWD: /usr/bin/systemctl reload postgresql.service %adminapi ALL= NOPASSWD: /usr/bin/systemctl restart postgresql.service +%adminapi ALL= NOPASSWD: /usr/bin/systemctl show -p NRestarts postgresql.service %adminapi ALL= NOPASSWD: /usr/bin/systemctl restart adminapi.service +%adminapi ALL= NOPASSWD: /usr/bin/systemctl is-active commence-backup.service +%adminapi ALL= NOPASSWD: /usr/bin/systemctl start commence-backup.service %adminapi ALL= NOPASSWD: /bin/systemctl daemon-reload %adminapi ALL= NOPASSWD: /bin/systemctl restart services.slice %adminapi ALL= NOPASSWD: /usr/sbin/nft -f /etc/nftables/tealbase_managed.conf %adminapi ALL= NOPASSWD: /usr/bin/admin-mgr +%adminapi ALL= NOPASSWD: ENVOY %adminapi ALL= NOPASSWD: KONG %adminapi ALL= NOPASSWD: POSTGREST %adminapi ALL= NOPASSWD: GOTRUE diff --git a/ansible/files/commence-backup.service.j2 b/ansible/files/commence-backup.service.j2 new file mode 100644 index 0000000..9d4ad0c --- /dev/null +++ b/ansible/files/commence-backup.service.j2 @@ -0,0 +1,12 @@ +[Unit] +Description=Async commence physical backup + +[Service] +Type=simple +User=adminapi +ExecStart=/usr/bin/admin-mgr commence-backup --run-as-service true +Restart=no +OOMScoreAdjust=-1000 + +[Install] +WantedBy=multi-user.target diff --git a/ansible/files/default.sysstat b/ansible/files/default.sysstat new file mode 100644 index 0000000..1b029ba --- /dev/null +++ b/ansible/files/default.sysstat @@ -0,0 +1,9 @@ +# +# Default settings for /etc/init.d/sysstat, /etc/cron.d/sysstat +# and /etc/cron.daily/sysstat files +# + +# Should sadc collect system activity informations? Valid values +# are "true" and "false". Please do not put other values, they +# will be overwritten by debconf! +ENABLED="true" diff --git a/ansible/files/envoy.service b/ansible/files/envoy.service new file mode 100644 index 0000000..d739ffd --- /dev/null +++ b/ansible/files/envoy.service @@ -0,0 +1,31 @@ +[Unit] +Description=Envoy +After=postgrest.service gotrue.service adminapi.service +Wants=postgrest.service gotrue.service adminapi.service +Conflicts=kong.service + +[Service] +Type=simple + +ExecStartPre=sh -c 'if ss -lnt | grep -Eq ":(80|443) "; then echo "Port 80 or 443 already in use"; exit 1; fi' + +# Need to run via a restarter script to support hot restart when using a process +# manager, see: +# https://www.envoyproxy.io/docs/envoy/latest/operations/hot_restarter +ExecStart=/opt/envoy-hot-restarter.py /opt/start-envoy.sh + +ExecReload=/bin/kill -HUP $MAINPID +ExecStop=/bin/kill -TERM $MAINPID +User=envoy +Slice=services.slice +Restart=always +RestartSec=3 +LimitNOFILE=100000 + +# The envoy user is unprivileged and thus not permitted to bind on ports < 1024 +# Via systemd we grant the process a set of privileges to bind to 80/443 +# See http://archive.vn/36zJU +AmbientCapabilities=CAP_NET_BIND_SERVICE + +[Install] +WantedBy=multi-user.target diff --git a/ansible/files/envoy_config/cds.yaml b/ansible/files/envoy_config/cds.yaml new file mode 100644 index 0000000..48fd1b9 --- /dev/null +++ b/ansible/files/envoy_config/cds.yaml @@ -0,0 +1,86 @@ +resources: + - '@type': type.googleapis.com/envoy.config.cluster.v3.Cluster + name: admin_api + load_assignment: + cluster_name: admin_api + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 8085 + circuit_breakers: + thresholds: + - priority: DEFAULT + max_connections: 10000 + max_pending_requests: 10000 + max_requests: 10000 + retry_budget: + budget_percent: + value: 100 + min_retry_concurrency: 100 + - '@type': type.googleapis.com/envoy.config.cluster.v3.Cluster + name: gotrue + load_assignment: + cluster_name: gotrue + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 9999 + circuit_breakers: + thresholds: + - priority: DEFAULT + max_connections: 10000 + max_pending_requests: 10000 + max_requests: 10000 + retry_budget: + budget_percent: + value: 100 + min_retry_concurrency: 100 + - '@type': type.googleapis.com/envoy.config.cluster.v3.Cluster + name: postgrest + load_assignment: + cluster_name: postgrest + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 3000 + circuit_breakers: + thresholds: + - priority: DEFAULT + max_connections: 10000 + max_pending_requests: 10000 + max_requests: 10000 + retry_budget: + budget_percent: + value: 100 + min_retry_concurrency: 100 + - '@type': type.googleapis.com/envoy.config.cluster.v3.Cluster + name: postgrest_admin + load_assignment: + cluster_name: postgrest_admin + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 3001 + circuit_breakers: + thresholds: + - priority: DEFAULT + max_connections: 10000 + max_pending_requests: 10000 + max_requests: 10000 + retry_budget: + budget_percent: + value: 100 + min_retry_concurrency: 100 + diff --git a/ansible/files/envoy_config/envoy.yaml b/ansible/files/envoy_config/envoy.yaml new file mode 100644 index 0000000..3d25c13 --- /dev/null +++ b/ansible/files/envoy_config/envoy.yaml @@ -0,0 +1,23 @@ +dynamic_resources: + cds_config: + path_config_source: + path: /etc/envoy/cds.yaml + resource_api_version: V3 + lds_config: + path_config_source: + path: /etc/envoy/lds.yaml + resource_api_version: V3 +node: + cluster: cluster_0 + id: node_0 +overload_manager: + resource_monitors: + - name: envoy.resource_monitors.global_downstream_max_connections + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.resource_monitors.downstream_connections.v3.DownstreamConnectionsConfig + max_active_downstream_connections: 30000 +stats_config: + stats_matcher: + reject_all: true + diff --git a/ansible/files/envoy_config/lds.yaml b/ansible/files/envoy_config/lds.yaml new file mode 100644 index 0000000..05798f6 --- /dev/null +++ b/ansible/files/envoy_config/lds.yaml @@ -0,0 +1,436 @@ +resources: + - '@type': type.googleapis.com/envoy.config.listener.v3.Listener + name: http_listener + address: + socket_address: + address: '::' + port_value: 80 + ipv4_compat: true + filter_chains: + - filters: &ref_1 + - name: envoy.filters.network.http_connection_manager + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + access_log: + - name: envoy.access_loggers.stdout + filter: + status_code_filter: + comparison: + op: GE + value: + default_value: 400 + runtime_key: unused + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.access_loggers.stream.v3.StdoutAccessLog + generate_request_id: false + http_filters: + - name: envoy.filters.http.cors + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.cors.v3.Cors + - name: envoy.filters.http.rbac + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.rbac.v3.RBAC + rules: + action: DENY + policies: + api_key_missing: + permissions: + - any: true + principals: + - not_id: + or_ids: + ids: + - header: + name: apikey + present_match: true + - header: + name: ':path' + string_match: + contains: apikey= + api_key_not_valid: + permissions: + - any: true + principals: + - not_id: + or_ids: + ids: + - header: + name: apikey + string_match: + exact: anon_key + - header: + name: apikey + string_match: + exact: service_key + - header: + name: apikey + string_match: + exact: tealbase_admin_key + - header: + name: ':path' + string_match: + contains: apikey=anon_key + - header: + name: ':path' + string_match: + contains: apikey=service_key + - header: + name: ':path' + string_match: + contains: apikey=tealbase_admin_key + - name: envoy.filters.http.lua + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.lua.v3.Lua + source_codes: + remove_apikey_and_empty_key_query_parameters: + inline_string: |- + function envoy_on_request(request_handle) + local path = request_handle:headers():get(":path") + request_handle + :headers() + :replace(":path", path:gsub("&=[^&]*", ""):gsub("?=[^&]*$", ""):gsub("?=[^&]*&", "?"):gsub("&apikey=[^&]*", ""):gsub("?apikey=[^&]*$", ""):gsub("?apikey=[^&]*&", "?")) + end + remove_empty_key_query_parameters: + inline_string: |- + function envoy_on_request(request_handle) + local path = request_handle:headers():get(":path") + request_handle + :headers() + :replace(":path", path:gsub("&=[^&]*", ""):gsub("?=[^&]*$", ""):gsub("?=[^&]*&", "?")) + end + - name: envoy.filters.http.compressor.brotli + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.compressor.v3.Compressor + response_direction_config: + common_config: + min_content_length: 100 + content_type: + - application/vnd.pgrst.object+json + - application/vnd.pgrst.array+json + - application/openapi+json + - application/geo+json + - text/csv + - application/vnd.pgrst.plan + - application/vnd.pgrst.object + - application/vnd.pgrst.array + - application/javascript + - application/json + - application/xhtml+xml + - image/svg+xml + - text/css + - text/html + - text/plain + - text/xml + disable_on_etag_header: true + request_direction_config: + common_config: + enabled: + default_value: false + runtime_key: request_compressor_enabled + compressor_library: + name: text_optimized + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.compression.brotli.compressor.v3.Brotli + - name: envoy.filters.http.compressor.gzip + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.compressor.v3.Compressor + response_direction_config: + common_config: + min_content_length: 100 + content_type: + - application/vnd.pgrst.object+json + - application/vnd.pgrst.array+json + - application/openapi+json + - application/geo+json + - text/csv + - application/vnd.pgrst.plan + - application/vnd.pgrst.object + - application/vnd.pgrst.array + - application/javascript + - application/json + - application/xhtml+xml + - image/svg+xml + - text/css + - text/html + - text/plain + - text/xml + disable_on_etag_header: true + request_direction_config: + common_config: + enabled: + default_value: false + runtime_key: request_compressor_enabled + compressor_library: + name: text_optimized + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.compression.gzip.compressor.v3.Gzip + - name: envoy.filters.http.router + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + dynamic_stats: false + local_reply_config: + mappers: + - filter: + and_filter: + filters: + - status_code_filter: + comparison: + value: + default_value: 403 + runtime_key: unused + - header_filter: + header: + name: ':path' + string_match: + prefix: /customer/v1/privileged/ + status_code: 401 + body: + inline_string: Unauthorized + headers_to_add: + - header: + key: WWW-Authenticate + value: Basic realm="Unknown" + - filter: + and_filter: + filters: + - status_code_filter: + comparison: + value: + default_value: 403 + runtime_key: unused + - header_filter: + header: + name: ':path' + string_match: + prefix: /metrics/aggregated + invert_match: true + status_code: 401 + body_format_override: + json_format: + message: >- + `apikey` request header or query parameter is either + missing or invalid. Double check your tealbase `anon` + or `service_role` API key. + hint: '%RESPONSE_CODE_DETAILS%' + json_format_options: + sort_properties: false + merge_slashes: true + route_config: + name: route_config_0 + virtual_hosts: + - name: virtual_host_0 + domains: + - '*' + typed_per_filter_config: + envoy.filters.http.cors: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.cors.v3.CorsPolicy + allow_origin_string_match: + - safe_regex: + regex: \* + allow_methods: GET,HEAD,PUT,PATCH,POST,DELETE,OPTIONS,TRACE,CONNECT + allow_headers: apikey,authorization,x-client-info + max_age: '3600' + routes: + - match: + path: /health + direct_response: + status: 200 + body: + inline_string: Healthy + typed_per_filter_config: &ref_0 + envoy.filters.http.rbac: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.rbac.v3.RBACPerRoute + - match: + safe_regex: + google_re2: + max_program_size: 150 + regex: >- + /auth/v1/(verify|callback|authorize|sso/saml/(acs|metadata|slo)|\.well-known/(openid-configuration|jwks\.json)) + request_headers_to_remove: + - apikey + - sb-opk + route: + cluster: gotrue + regex_rewrite: + pattern: + regex: ^/auth/v1 + substitution: '' + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 35s + typed_per_filter_config: *ref_0 + - match: + prefix: /auth/v1/ + request_headers_to_remove: + - apikey + - sb-opk + route: + cluster: gotrue + prefix_rewrite: / + timeout: 35s + - match: + prefix: /rest/v1/ + query_parameters: + - name: apikey + present_match: true + request_headers_to_remove: + - apikey + - sb-opk + route: + cluster: postgrest + prefix_rewrite: / + timeout: 125s + typed_per_filter_config: + envoy.filters.http.lua: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.lua.v3.LuaPerRoute + name: remove_apikey_and_empty_key_query_parameters + - match: + prefix: /rest/v1/ + request_headers_to_remove: + - apikey + - sb-opk + route: + cluster: postgrest + prefix_rewrite: / + timeout: 125s + typed_per_filter_config: + envoy.filters.http.lua: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.lua.v3.LuaPerRoute + name: remove_empty_key_query_parameters + - match: + prefix: /rest-admin/v1/ + query_parameters: + - name: apikey + present_match: true + request_headers_to_remove: + - apikey + - sb-opk + route: + cluster: postgrest_admin + prefix_rewrite: / + typed_per_filter_config: + envoy.filters.http.lua: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.lua.v3.LuaPerRoute + name: remove_apikey_and_empty_key_query_parameters + - match: + prefix: /rest-admin/v1/ + request_headers_to_remove: + - apikey + - sb-opk + route: + cluster: postgrest_admin + prefix_rewrite: / + - match: + path: /graphql/v1 + request_headers_to_add: + header: + key: Content-Profile + value: graphql_public + request_headers_to_remove: + - apikey + - sb-opk + route: + cluster: postgrest + prefix_rewrite: /rpc/graphql + timeout: 125s + - match: + prefix: /admin/v1/ + request_headers_to_remove: + - sb-opk + route: + cluster: admin_api + prefix_rewrite: / + timeout: 600s + - match: + prefix: /customer/v1/privileged/ + request_headers_to_remove: + - sb-opk + route: + cluster: admin_api + prefix_rewrite: /privileged/ + typed_per_filter_config: + envoy.filters.http.rbac: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.rbac.v3.RBACPerRoute + rbac: + rules: + action: DENY + policies: + basic_auth: + permissions: + - any: true + principals: + - header: + name: authorization + invert_match: true + string_match: + exact: Basic c2VydmljZV9yb2xlOnNlcnZpY2Vfa2V5 + treat_missing_header_as_empty: true + - match: + prefix: /metrics/aggregated + request_headers_to_remove: + - sb-opk + route: + cluster: admin_api + prefix_rewrite: /tealbase-internal/metrics + typed_per_filter_config: + envoy.filters.http.rbac: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.rbac.v3.RBACPerRoute + rbac: + rules: + action: DENY + policies: + not_private_ip: + permissions: + - any: true + principals: + - not_id: + direct_remote_ip: + address_prefix: 10.0.0.0 + prefix_len: 8 + include_attempt_count_in_response: true + retry_policy: + num_retries: 5 + retry_back_off: + base_interval: 0.1s + max_interval: 1s + retry_on: gateway-error + stat_prefix: ingress_http + - '@type': type.googleapis.com/envoy.config.listener.v3.Listener + name: https_listener + address: + socket_address: + address: '::' + port_value: 443 + ipv4_compat: true + filter_chains: + - filters: *ref_1 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: + filename: /etc/envoy/fullChain.pem + private_key: + filename: /etc/envoy/privKey.pem + diff --git a/ansible/files/fail2ban_config/filter-pgbouncer.conf.j2 b/ansible/files/fail2ban_config/filter-pgbouncer.conf.j2 index 50326da..3a3a52e 100644 --- a/ansible/files/fail2ban_config/filter-pgbouncer.conf.j2 +++ b/ansible/files/fail2ban_config/filter-pgbouncer.conf.j2 @@ -1,2 +1,3 @@ [Definition] -failregex = ^.+@:.+error: password authentication failed$ \ No newline at end of file +failregex = ^.+@:.+password authentication failed$ +journalmatch = _SYSTEMD_UNIT=pgbouncer.service diff --git a/ansible/files/fail2ban_config/jail-pgbouncer.conf.j2 b/ansible/files/fail2ban_config/jail-pgbouncer.conf.j2 index 77c5530..60a9eb3 100644 --- a/ansible/files/fail2ban_config/jail-pgbouncer.conf.j2 +++ b/ansible/files/fail2ban_config/jail-pgbouncer.conf.j2 @@ -3,5 +3,5 @@ enabled = true port = 6543 protocol = tcp filter = pgbouncer -logpath = /var/log/pgbouncer.log -maxretry = 3 \ No newline at end of file +backend = systemd[journalflags=1] +maxretry = 3 diff --git a/ansible/files/fail2ban_config/jail-postgresql.conf.j2 b/ansible/files/fail2ban_config/jail-postgresql.conf.j2 index 9822d4f..a021035 100644 --- a/ansible/files/fail2ban_config/jail-postgresql.conf.j2 +++ b/ansible/files/fail2ban_config/jail-postgresql.conf.j2 @@ -3,5 +3,6 @@ enabled = true port = 5432 protocol = tcp filter = postgresql -logpath = /var/log/postgresql/postgresql.csv +logpath = /var/log/postgresql/auth-failures.csv maxretry = 3 +ignoreip = 192.168.0.0/16 172.17.1.0/20 diff --git a/ansible/files/gotrue-optimizations.service.j2 b/ansible/files/gotrue-optimizations.service.j2 new file mode 100644 index 0000000..b483c58 --- /dev/null +++ b/ansible/files/gotrue-optimizations.service.j2 @@ -0,0 +1,11 @@ +[Unit] +Description=GoTrue (Auth) optimizations + +[Service] +Type=oneshot +# we don't want failures from this command to cause PG startup to fail +ExecStart=/bin/bash -c "/opt/tealbase-admin-api optimize auth --destination-config-file-path /etc/gotrue/gotrue.generated.env ; exit 0" +User=postgrest + +[Install] +WantedBy=multi-user.target diff --git a/ansible/files/gotrue.service.j2 b/ansible/files/gotrue.service.j2 index c37a236..c1f7f58 100644 --- a/ansible/files/gotrue.service.j2 +++ b/ansible/files/gotrue.service.j2 @@ -12,7 +12,9 @@ RestartSec=3 MemoryAccounting=true MemoryMax=50% +EnvironmentFile=-/etc/gotrue.generated.env EnvironmentFile=/etc/gotrue.env +EnvironmentFile=-/etc/gotrue.overrides.env Slice=services.slice diff --git a/ansible/files/kong_config/kong.conf.j2 b/ansible/files/kong_config/kong.conf.j2 index 54ce718..3906757 100644 --- a/ansible/files/kong_config/kong.conf.j2 +++ b/ansible/files/kong_config/kong.conf.j2 @@ -4,4 +4,4 @@ declarative_config = /etc/kong/kong.yml # plugins defined in the dockerfile plugins = request-transformer,cors,key-auth,http-log -proxy_listen = 0.0.0.0:80 reuseport backlog=16384, 0.0.0.0:443 http2 ssl reuseport backlog=16834 +proxy_listen = 0.0.0.0:80 reuseport backlog=16384, 0.0.0.0:443 http2 ssl reuseport backlog=16834, [::]:80 reuseport backlog=16384, [::]:443 http2 ssl reuseport backlog=16384 diff --git a/ansible/files/kong_config/kong.service.j2 b/ansible/files/kong_config/kong.service.j2 index 6df4b55..6a36520 100644 --- a/ansible/files/kong_config/kong.service.j2 +++ b/ansible/files/kong_config/kong.service.j2 @@ -2,12 +2,16 @@ Description=Kong server After=postgrest.service gotrue.service adminapi.service Wants=postgrest.service gotrue.service adminapi.service +Conflicts=envoy.service + +# Ensures that Kong service is stopped before Envoy service is started +Before=envoy.service [Service] Type=forking ExecStart=/usr/local/bin/kong start -c /etc/kong/kong.conf ExecReload=/usr/local/bin/kong reload -c /etc/kong/kong.conf -ExecStop=/usr/local/bin/kong stop +ExecStop=/usr/local/bin/kong quit User=kong EnvironmentFile=/etc/kong/kong.env Slice=services.slice @@ -15,8 +19,8 @@ Restart=always RestartSec=3 LimitNOFILE=100000 -# The kong user is unpriviledged and thus not permited to bind on ports < 1024 -# Via systemd we grant the process a set of priviledges to bind to 80/443 +# The kong user is unprivileged and thus not permitted to bind on ports < 1024 +# Via systemd we grant the process a set of privileges to bind to 80/443 # See http://archive.vn/36zJU AmbientCapabilities=CAP_NET_BIND_SERVICE diff --git a/ansible/files/logrotate_config/logrotate-postgres-csv.conf b/ansible/files/logrotate_config/logrotate-postgres-csv.conf index d77a98d..e5418e8 100644 --- a/ansible/files/logrotate_config/logrotate-postgres-csv.conf +++ b/ansible/files/logrotate_config/logrotate-postgres-csv.conf @@ -6,6 +6,6 @@ notifempty missingok postrotate - sudo -u postgres pg_ctl -D /var/lib/postgresql/data logrotate + sudo -u postgres /usr/lib/postgresql/bin/pg_ctl -D /var/lib/postgresql/data logrotate endscript } diff --git a/ansible/files/manifest.json b/ansible/files/manifest.json new file mode 100644 index 0000000..3a20e76 --- /dev/null +++ b/ansible/files/manifest.json @@ -0,0 +1 @@ +{{ vars | to_json }} diff --git a/ansible/files/permission_check.py b/ansible/files/permission_check.py new file mode 100644 index 0000000..724acb1 --- /dev/null +++ b/ansible/files/permission_check.py @@ -0,0 +1,204 @@ +import subprocess +import json +import sys + +# Expected groups for each user +expected_results = { + "postgres": [ + {"groupname": "postgres", "username": "postgres"}, + {"groupname": "ssl-cert", "username": "postgres"} + ], + "ubuntu": [ + {"groupname":"ubuntu","username":"ubuntu"}, + {"groupname":"adm","username":"ubuntu"}, + {"groupname":"dialout","username":"ubuntu"}, + {"groupname":"cdrom","username":"ubuntu"}, + {"groupname":"floppy","username":"ubuntu"}, + {"groupname":"sudo","username":"ubuntu"}, + {"groupname":"audio","username":"ubuntu"}, + {"groupname":"dip","username":"ubuntu"}, + {"groupname":"video","username":"ubuntu"}, + {"groupname":"plugdev","username":"ubuntu"}, + {"groupname":"lxd","username":"ubuntu"}, + {"groupname":"netdev","username":"ubuntu"} + ], + "root": [ + {"groupname":"root","username":"root"} + ], + "daemon": [ + {"groupname":"daemon","username":"daemon"} + ], + "bin": [ + {"groupname":"bin","username":"bin"} + ], + "sys": [ + {"groupname":"sys","username":"sys"} + ], + "sync": [ + {"groupname":"nogroup","username":"sync"} + ], + "games": [ + {"groupname":"games","username":"games"} + ], + "man": [ + {"groupname":"man","username":"man"} + ], + "lp": [ + {"groupname":"lp","username":"lp"} + ], + "mail": [ + {"groupname":"mail","username":"mail"} + ], + "news": [ + {"groupname":"news","username":"news"} + ], + "uucp": [ + {"groupname":"uucp","username":"uucp"} + ], + "proxy": [ + {"groupname":"proxy","username":"proxy"} + ], + "www-data": [ + {"groupname":"www-data","username":"www-data"} + ], + "backup": [ + {"groupname":"backup","username":"backup"} + ], + "list": [ + {"groupname":"list","username":"list"} + ], + "irc": [ + {"groupname":"irc","username":"irc"} + ], + "gnats": [ + {"groupname":"gnats","username":"gnats"} + ], + "nobody": [ + {"groupname":"nogroup","username":"nobody"} + ], + "systemd-network": [ + {"groupname":"systemd-network","username":"systemd-network"} + ], + "systemd-resolve": [ + {"groupname":"systemd-resolve","username":"systemd-resolve"} + ], + "systemd-timesync": [ + {"groupname":"systemd-timesync","username":"systemd-timesync"} + ], + "messagebus": [ + {"groupname":"messagebus","username":"messagebus"} + ], + "ec2-instance-connect": [ + {"groupname":"nogroup","username":"ec2-instance-connect"} + ], + "sshd": [ + {"groupname":"nogroup","username":"sshd"} + ], + "wal-g": [ + {"groupname":"wal-g","username":"wal-g"}, + {"groupname":"postgres","username":"wal-g"} + ], + "pgbouncer": [ + {"groupname":"pgbouncer","username":"pgbouncer"}, + {"groupname":"ssl-cert","username":"pgbouncer"}, + {"groupname":"postgres","username":"pgbouncer"} + ], + "gotrue": [ + {"groupname":"gotrue","username":"gotrue"} + ], + "envoy": [ + {"groupname":"envoy","username":"envoy"} + ], + "kong": [ + {"groupname":"kong","username":"kong"} + ], + "nginx": [ + {"groupname":"nginx","username":"nginx"} + ], + "vector": [ + {"groupname":"vector","username":"vector"}, + {"groupname":"adm","username":"vector"}, + {"groupname":"systemd-journal","username":"vector"}, + {"groupname":"postgres","username":"vector"} + ], + "adminapi": [ + {"groupname":"adminapi","username":"adminapi"}, + {"groupname":"root","username":"adminapi"}, + {"groupname":"systemd-journal","username":"adminapi"}, + {"groupname":"admin","username":"adminapi"}, + {"groupname":"postgres","username":"adminapi"}, + {"groupname":"pgbouncer","username":"adminapi"}, + {"groupname":"wal-g","username":"adminapi"}, + {"groupname":"postgrest","username":"adminapi"}, + {"groupname":"envoy","username":"adminapi"}, + {"groupname":"kong","username":"adminapi"}, + {"groupname":"vector","username":"adminapi"} + ], + "postgrest": [ + {"groupname":"postgrest","username":"postgrest"} + ], + "tcpdump": [ + {"groupname":"tcpdump","username":"tcpdump"} + ], + "systemd-coredump": [ + {"groupname":"systemd-coredump","username":"systemd-coredump"} + ] +} +# This program depends on osquery being installed on the system +# Function to run osquery +def run_osquery(query): + process = subprocess.Popen(['osqueryi', '--json', query], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + output, error = process.communicate() + return output.decode('utf-8') + +def parse_json(json_str): + try: + return json.loads(json_str) + except json.JSONDecodeError as e: + print("Error decoding JSON:", e) + sys.exit(1) + +def compare_results(username, query_result): + expected_result = expected_results.get(username) + if expected_result is None: + print(f"No expected result defined for user '{username}'") + sys.exit(1) + + if query_result == expected_result: + print(f"The query result for user '{username}' matches the expected result.") + else: + print(f"The query result for user '{username}' does not match the expected result.") + print("Expected:", expected_result) + print("Got:", query_result) + sys.exit(1) + +def check_nixbld_users(): + query = """ + SELECT u.username, g.groupname + FROM users u + JOIN user_groups ug ON u.uid = ug.uid + JOIN groups g ON ug.gid = g.gid + WHERE u.username LIKE 'nixbld%'; + """ + query_result = run_osquery(query) + parsed_result = parse_json(query_result) + + for user in parsed_result: + if user['groupname'] != 'nixbld': + print(f"User '{user['username']}' is in group '{user['groupname']}' instead of 'nixbld'.") + sys.exit(1) + + print("All nixbld users are in the 'nixbld' group.") + +# Define usernames for which you want to compare results +usernames = ["postgres", "ubuntu", "root", "daemon", "bin", "sys", "sync", "games","man","lp","mail","news","uucp","proxy","www-data","backup","list","irc","gnats","nobody","systemd-network","systemd-resolve","systemd-timesync","messagebus","ec2-instance-connect","sshd","wal-g","pgbouncer","gotrue","envoy","kong","nginx","vector","adminapi","postgrest","tcpdump","systemd-coredump"] + +# Iterate over usernames, run the query, and compare results +for username in usernames: + query = f"SELECT u.username, g.groupname FROM users u JOIN user_groups ug ON u.uid = ug.uid JOIN groups g ON ug.gid = g.gid WHERE u.username = '{username}';" + query_result = run_osquery(query) + parsed_result = parse_json(query_result) + compare_results(username, parsed_result) + +# Check if all nixbld users are in the nixbld group +check_nixbld_users() diff --git a/ansible/files/pgbouncer_config/pgbouncer.ini.j2 b/ansible/files/pgbouncer_config/pgbouncer.ini.j2 index 1d83b25..e4518c0 100644 --- a/ansible/files/pgbouncer_config/pgbouncer.ini.j2 +++ b/ansible/files/pgbouncer_config/pgbouncer.ini.j2 @@ -43,7 +43,7 @@ ;;; Administrative settings ;;; -logfile = /var/log/pgbouncer.log +;logfile = /var/log/pgbouncer.log pidfile = /var/run/pgbouncer/pgbouncer.pid ;;; @@ -51,7 +51,7 @@ pidfile = /var/run/pgbouncer/pgbouncer.pid ;;; ;; IP address or * which means all IPs -listen_addr = 0.0.0.0 +listen_addr = * listen_port = 6543 ;; Unix socket is also used for -R. diff --git a/ansible/files/postgres_exporter.service.j2 b/ansible/files/postgres_exporter.service.j2 index 7ddb5be..649ea75 100644 --- a/ansible/files/postgres_exporter.service.j2 +++ b/ansible/files/postgres_exporter.service.j2 @@ -3,10 +3,9 @@ Description=Postgres Exporter [Service] Type=simple -ExecStart=/opt/postgres_exporter/postgres_exporter --disable-settings-metrics --extend.query-path="/opt/postgres_exporter/queries.yml" --disable-default-metrics -User=root -StandardOutput=append:/var/log/postgres_exporter.stdout -StandardError=append:/var/log/postgres_exporter.error +ExecStart=/opt/postgres_exporter/postgres_exporter --disable-settings-metrics --extend.query-path="/opt/postgres_exporter/queries.yml" --disable-default-metrics --no-collector.locks --no-collector.replication --no-collector.replication_slot --no-collector.stat_bgwriter --no-collector.stat_database --no-collector.stat_user_tables --no-collector.statio_user_tables --no-collector.wal +User=postgres +Group=postgres Restart=always RestartSec=3 Environment="DATA_SOURCE_NAME=host=localhost dbname=postgres sslmode=disable user=tealbase_admin pg_stat_statements.track=none application_name=postgres_exporter" diff --git a/ansible/files/postgres_prestart.sh.j2 b/ansible/files/postgres_prestart.sh.j2 new file mode 100644 index 0000000..ae5aa1a --- /dev/null +++ b/ansible/files/postgres_prestart.sh.j2 @@ -0,0 +1,9 @@ +#!/bin/bash + +if [ $(cat /etc/locale.gen | grep -c en_US.UTF-8) -eq 0 ]; then + echo "en_US.UTF-8 UTF-8" >> /etc/locale.gen +fi + +if [ $(locale -a | grep -c en_US.utf8) -eq 0 ]; then + locale-gen +fi diff --git a/ansible/files/postgresql_config/custom_read_replica.conf.j2 b/ansible/files/postgresql_config/custom_read_replica.conf.j2 new file mode 100644 index 0000000..7d52f92 --- /dev/null +++ b/ansible/files/postgresql_config/custom_read_replica.conf.j2 @@ -0,0 +1,5 @@ +# hot_standby = on +# restore_command = '/usr/bin/admin-mgr wal-fetch %f %p >> /var/log/wal-g/wal-fetch.log 2>&1' +# recovery_target_timeline = 'latest' + +# primary_conninfo = 'host=localhost port=6543 user=replication' diff --git a/ansible/files/postgresql_config/custom_walg.conf.j2 b/ansible/files/postgresql_config/custom_walg.conf.j2 index 7c9c1bb..7ef7256 100644 --- a/ansible/files/postgresql_config/custom_walg.conf.j2 +++ b/ansible/files/postgresql_config/custom_walg.conf.j2 @@ -11,7 +11,11 @@ # - Recovery Target - +#recovery_target_lsn = '' #recovery_target_time = '' #recovery_target_action = 'promote' #recovery_target_timeline = 'current' #recovery_target_inclusive = off + +# - Hot Standby - +hot_standby = off diff --git a/ansible/files/postgresql_config/pg_hba.conf.j2 b/ansible/files/postgresql_config/pg_hba.conf.j2 index ebb1767..76bd2f0 100755 --- a/ansible/files/postgresql_config/pg_hba.conf.j2 +++ b/ansible/files/postgresql_config/pg_hba.conf.j2 @@ -89,3 +89,6 @@ host all all 10.0.0.0/8 scram-sha-256 host all all 172.16.0.0/12 scram-sha-256 host all all 192.168.0.0/16 scram-sha-256 host all all 0.0.0.0/0 scram-sha-256 + +# IPv6 external connections +host all all ::0/0 scram-sha-256 diff --git a/ansible/files/postgresql_config/postgresql.conf.j2 b/ansible/files/postgresql_config/postgresql.conf.j2 index b46068a..1604d94 100644 --- a/ansible/files/postgresql_config/postgresql.conf.j2 +++ b/ansible/files/postgresql_config/postgresql.conf.j2 @@ -300,7 +300,7 @@ max_wal_senders = 10 # max number of walsender processes max_replication_slots = 5 # max number of replication slots # (change requires restart) #wal_keep_size = 0 # in megabytes; 0 disables -max_slot_wal_keep_size = 1024 # in megabytes; -1 disables +max_slot_wal_keep_size = 4096 # in megabytes; -1 disables #wal_sender_timeout = 60s # in milliseconds; 0 disables #track_commit_timestamp = off # collect timestamp of transaction commit # (change requires restart) @@ -688,7 +688,7 @@ default_text_search_config = 'pg_catalog.english' #local_preload_libraries = '' #session_preload_libraries = '' -shared_preload_libraries = 'pg_stat_statements, pg_stat_monitor, pgaudit, plpgsql, plpgsql_check, pg_cron, pg_net, pgsodium, timescaledb, auto_explain' # (change requires restart) +shared_preload_libraries = 'pg_stat_statements, pgaudit, plpgsql, plpgsql_check, pg_cron, pg_net, pgsodium, timescaledb, auto_explain, pg_tle, plan_filter' # (change requires restart) jit_provider = 'llvmjit' # JIT library to use # - Other Defaults - @@ -763,6 +763,9 @@ jit_provider = 'llvmjit' # JIT library to use # WAL-G specific configurations #include = '/etc/postgresql-custom/wal-g.conf' +# read replica specific configurations +include = '/etc/postgresql-custom/read-replica.conf' + # supautils specific configurations #include = '/etc/postgresql-custom/supautils.conf' @@ -771,3 +774,5 @@ jit_provider = 'llvmjit' # JIT library to use #------------------------------------------------------------------------------ # Add settings for extensions here +auto_explain.log_min_duration = 10s +cron.database_name = 'postgres' diff --git a/ansible/files/postgresql_config/postgresql.service.j2 b/ansible/files/postgresql_config/postgresql.service.j2 index 0be175b..be219f5 100644 --- a/ansible/files/postgresql_config/postgresql.service.j2 +++ b/ansible/files/postgresql_config/postgresql.service.j2 @@ -10,13 +10,16 @@ After=database-optimizations.service Type=notify User=postgres ExecStart=/usr/lib/postgresql/bin/postgres -D /etc/postgresql +ExecStartPre=+/usr/local/bin/postgres_prestart.sh ExecReload=/bin/kill -HUP $MAINPID KillMode=mixed KillSignal=SIGINT TimeoutStopSec=90 +TimeoutStartSec=86400 Restart=always RestartSec=5 OOMScoreAdjust=-1000 +EnvironmentFile=-/etc/environment.d/postgresql.env [Install] WantedBy=multi-user.target diff --git a/ansible/files/postgresql_config/supautils.conf.j2 b/ansible/files/postgresql_config/supautils.conf.j2 index c6ef185..9ef30ab 100644 --- a/ansible/files/postgresql_config/supautils.conf.j2 +++ b/ansible/files/postgresql_config/supautils.conf.j2 @@ -1,12 +1,14 @@ -supautils.placeholders = 'response.headers' -supautils.placeholders_disallowed_values = '"content-type"' -# full list: address_standardizer, address_standardizer_data_us, adminpack, amcheck, autoinc, bloom, btree_gin, btree_gist, citext, cube, dblink, dict_int, dict_xsyn, earthdistance, file_fdw, fuzzystrmatch, hstore, http, insert_username, intagg, intarray, isn, lo, ltree, moddatetime, old_snapshot, pageinspect, pg_buffercache, pg_cron, pg_freespacemap, pg_graphql, pg_hashids, pg_jsonschema, pg_net, pg_prewarm, pg_stat_monitor, pg_stat_statements, pg_surgery, pg_trgm, pg_visibility, pg_walinspect, pgaudit, pgcrypto, pgjwt, pgroonga, pgroonga_database, pgrouting, pgrowlocks, pgsodium, pgstattuple, pgtap, vector, plcoffee, pljava, plls, plpgsql, plpgsql_check, plv8, postgis, postgis_raster, postgis_sfcgal, postgis_tiger_geocoder, postgis_topology, postgres_fdw, refint, rum, seg, sslinfo, tealbase_vault, supautils, tablefunc, tcn, timescaledb, tsm_system_rows, tsm_system_time, unaccent, uuid-ossp, wrappers, xml2 -# omitted because may be unsafe: adminpack, amcheck, file_fdw, lo, old_snapshot, pageinspect, pg_buffercache, pg_freespacemap, pg_prewarm, pg_surgery, pg_visibility, pgstattuple +supautils.extensions_parameter_overrides = '{"pg_cron":{"schema":"pg_catalog"}}' +supautils.policy_grants = '{"postgres":["auth.audit_log_entries","auth.identities","auth.refresh_tokens","auth.sessions","auth.users","realtime.messages","storage.buckets","storage.migrations","storage.objects","storage.s3_multipart_uploads","storage.s3_multipart_uploads_parts"]}' +supautils.drop_trigger_grants = '{"postgres":["auth.audit_log_entries","auth.identities","auth.refresh_tokens","auth.sessions","auth.users","realtime.messages","storage.buckets","storage.migrations","storage.objects","storage.s3_multipart_uploads","storage.s3_multipart_uploads_parts"]}' +# full list: address_standardizer, address_standardizer_data_us, adminpack, amcheck, autoinc, bloom, btree_gin, btree_gist, citext, cube, dblink, dict_int, dict_xsyn, earthdistance, file_fdw, fuzzystrmatch, hstore, http, hypopg, index_advisor, insert_username, intagg, intarray, isn, lo, ltree, moddatetime, old_snapshot, orioledb, pageinspect, pg_buffercache, pg_cron, pg_freespacemap, pg_graphql, pg_hashids, pg_jsonschema, pg_net, pg_prewarm, pg_repack, pg_stat_monitor, pg_stat_statements, pg_surgery, pg_tle, pg_trgm, pg_visibility, pg_walinspect, pgaudit, pgcrypto, pgjwt, pgmq, pgroonga, pgroonga_database, pgrouting, pgrowlocks, pgsodium, pgstattuple, pgtap, plcoffee, pljava, plls, plpgsql, plpgsql_check, plv8, postgis, postgis_raster, postgis_sfcgal, postgis_tiger_geocoder, postgis_topology, postgres_fdw, refint, rum, seg, sslinfo, tealbase_vault, supautils, tablefunc, tcn, timescaledb, tsm_system_rows, tsm_system_time, unaccent, uuid-ossp, vector, wrappers, xml2 +# omitted because may be unsafe: adminpack, amcheck, file_fdw, lo, old_snapshot, pageinspect, pg_buffercache, pg_freespacemap, pg_surgery, pg_visibility # omitted because deprecated: intagg, xml2 -supautils.privileged_extensions = 'address_standardizer, address_standardizer_data_us, autoinc, bloom, btree_gin, btree_gist, citext, cube, dblink, dict_int, dict_xsyn, earthdistance, fuzzystrmatch, hstore, http, insert_username, intarray, isn, ltree, moddatetime, pg_cron, pg_graphql, pg_hashids, pg_jsonschema, pg_net, pg_stat_monitor, pg_stat_statements, pg_trgm, pg_walinspect, pgaudit, pgcrypto, pgjwt, pgroonga, pgroonga_database, pgrouting, pgrowlocks, pgsodium, pgtap, vector, plcoffee, pljava, plls, plpgsql, plpgsql_check, plv8, postgis, postgis_raster, postgis_sfcgal, postgis_tiger_geocoder, postgis_topology, postgres_fdw, refint, rum, seg, sslinfo, tealbase_vault, supautils, tablefunc, tcn, timescaledb, tsm_system_rows, tsm_system_time, unaccent, uuid-ossp, wrappers' +# omitted because doesn't require superuser: pgmq +supautils.privileged_extensions = 'address_standardizer, address_standardizer_data_us, autoinc, bloom, btree_gin, btree_gist, citext, cube, dblink, dict_int, dict_xsyn, earthdistance, fuzzystrmatch, hstore, http, hypopg, index_advisor, insert_username, intarray, isn, ltree, moddatetime, orioledb, pg_cron, pg_graphql, pg_hashids, pg_jsonschema, pg_net, pg_repack, pg_stat_monitor, pg_stat_statements, pg_tle, pg_trgm, pg_walinspect, pgaudit, pgcrypto, pgjwt, pg_prewarm, pgroonga, pgroonga_database, pgrouting, pgrowlocks, pgstattuple, pgsodium, pgtap, plcoffee, pljava, plls, plpgsql, plpgsql_check, plv8, postgis, postgis_raster, postgis_sfcgal, postgis_tiger_geocoder, postgis_topology, postgres_fdw, refint, rum, seg, sslinfo, tealbase_vault, supautils, tablefunc, tcn, timescaledb, tsm_system_rows, tsm_system_time, unaccent, uuid-ossp, vector, wrappers' supautils.privileged_extensions_custom_scripts_path = '/etc/postgresql-custom/extension-custom-scripts' supautils.privileged_extensions_superuser = 'tealbase_admin' supautils.privileged_role = 'postgres' -supautils.privileged_role_allowed_configs = 'pgaudit.log, pgaudit.log_catalog, pgaudit.log_client, pgaudit.log_level, pgaudit.log_relation, pgaudit.log_rows, pgaudit.log_statement, pgaudit.log_statement_once, pgaudit.role, session_replication_role, track_io_timing' -supautils.reserved_memberships = 'pg_read_server_files, pg_write_server_files, pg_execute_server_program, authenticator' -supautils.reserved_roles = 'tealbase_admin, tealbase_auth_admin, tealbase_storage_admin, tealbase_replication_admin, dashboard_user, pgbouncer, service_role, authenticator, authenticated, anon' +supautils.privileged_role_allowed_configs = 'auto_explain.log_min_duration, auto_explain.log_nested_statements, log_lock_waits, log_min_messages, pg_net.batch_size, pg_net.ttl, pgaudit.log, pgaudit.log_catalog, pgaudit.log_client, pgaudit.log_level, pgaudit.log_relation, pgaudit.log_rows, pgaudit.log_statement, pgaudit.log_statement_once, pgaudit.role, pgrst.*, plan_filter.*, safeupdate.enabled, session_replication_role, track_io_timing' +supautils.reserved_memberships = 'pg_read_server_files, pg_write_server_files, pg_execute_server_program, tealbase_admin, tealbase_auth_admin, tealbase_storage_admin, tealbase_read_only_user, tealbase_realtime_admin, tealbase_replication_admin, dashboard_user, pgbouncer, authenticator' +supautils.reserved_roles = 'tealbase_admin, tealbase_auth_admin, tealbase_storage_admin, tealbase_read_only_user, tealbase_realtime_admin, tealbase_replication_admin, dashboard_user, pgbouncer, service_role*, authenticator*, authenticated*, anon*' diff --git a/ansible/files/postgresql_config/tmpfiles.postgresql.conf b/ansible/files/postgresql_config/tmpfiles.postgresql.conf new file mode 100644 index 0000000..b5ea549 --- /dev/null +++ b/ansible/files/postgresql_config/tmpfiles.postgresql.conf @@ -0,0 +1,5 @@ +# unchanged from upstream package +d /run/postgresql 2775 postgres postgres - - +# Log directory - ensure that our logging setup gets preserved +# and that vector can keep writing to a file here as well +d /var/log/postgresql 1775 postgres postgres - - diff --git a/ansible/files/postgresql_extension_custom_scripts/before-create.sql b/ansible/files/postgresql_extension_custom_scripts/before-create.sql new file mode 100644 index 0000000..f2f2386 --- /dev/null +++ b/ansible/files/postgresql_extension_custom_scripts/before-create.sql @@ -0,0 +1,84 @@ +-- If the following are true: +-- * the extension to be created is a TLE +-- * the extension is created with `cascade` +-- +-- then we pre-`create` all nested extension dependencies which are part of +-- `supautils.privileged_extensions`. This is because supautils can't intercept +-- the extension creation for dependencies - it can only intercept the `create +-- extension` statement. +do $$ +declare + _extname text := @extname@; + _extschema text := @extschema@; + _extversion text := @extversion@; + _extcascade bool := @extcascade@; + _r record; +begin + if not _extcascade then + return; + end if; + + if not exists (select from pg_extension where extname = 'pg_tle') then + return; + end if; + + if not exists (select from pgtle.available_extensions() where name = _extname) then + return; + end if; + + if _extversion is null then + select default_version + from pgtle.available_extensions() + where name = _extname + into _extversion; + end if; + + if _extschema is null then + select schema + from pgtle.available_extension_versions() + where name = _extname and version = _extversion + into _extschema; + end if; + + for _r in ( + with recursive available_extensions(name, default_version) as ( + select name, default_version + from pg_available_extensions + union + select name, default_version + from pgtle.available_extensions() + ) + , available_extension_versions(name, version, requires) as ( + select name, version, requires + from pg_available_extension_versions + union + select name, version, requires + from pgtle.available_extension_versions() + ) + , all_dependencies(name, dependency) as ( + select e.name, unnest(ev.requires) as dependency + from available_extensions e + join available_extension_versions ev on ev.name = e.name and ev.version = e.default_version + ) + , dependencies(name) AS ( + select unnest(requires) + from available_extension_versions + where name = _extname and version = _extversion + union + select all_dependencies.dependency + from all_dependencies + join dependencies d on d.name = all_dependencies.name + ) + select name + from dependencies + intersect + select name + from regexp_split_to_table(current_setting('supautils.privileged_extensions', true), '\s*,\s*') as t(name) + ) loop + if _extschema is null then + execute(format('create extension if not exists %I cascade', _r.name)); + else + execute(format('create extension if not exists %I schema %I cascade', _r.name, _extschema)); + end if; + end loop; +end $$; diff --git a/ansible/files/postgresql_extension_custom_scripts/dblink/after-create.sql b/ansible/files/postgresql_extension_custom_scripts/dblink/after-create.sql new file mode 100644 index 0000000..c2e5269 --- /dev/null +++ b/ansible/files/postgresql_extension_custom_scripts/dblink/after-create.sql @@ -0,0 +1,14 @@ +do $$ +declare + r record; +begin + for r in (select oid, (aclexplode(proacl)).grantee from pg_proc where proname = 'dblink_connect_u') loop + continue when r.grantee = 'tealbase_admin'::regrole; + execute( + format( + 'revoke all on function %s(%s) from %s;', r.oid::regproc, pg_get_function_identity_arguments(r.oid), r.grantee::regrole + ) + ); + end loop; +end +$$; diff --git a/ansible/files/postgresql_extension_custom_scripts/pg_cron/after-create.sql b/ansible/files/postgresql_extension_custom_scripts/pg_cron/after-create.sql new file mode 100644 index 0000000..e89ca56 --- /dev/null +++ b/ansible/files/postgresql_extension_custom_scripts/pg_cron/after-create.sql @@ -0,0 +1,13 @@ +grant usage on schema cron to postgres with grant option; +grant all on all functions in schema cron to postgres with grant option; + +alter default privileges for user tealbase_admin in schema cron grant all + on sequences to postgres with grant option; +alter default privileges for user tealbase_admin in schema cron grant all + on tables to postgres with grant option; +alter default privileges for user tealbase_admin in schema cron grant all + on functions to postgres with grant option; + +grant all privileges on all tables in schema cron to postgres with grant option; +revoke all on table cron.job from postgres; +grant select on table cron.job to postgres with grant option; diff --git a/ansible/files/postgresql_extension_custom_scripts/pg_tle/after-create.sql b/ansible/files/postgresql_extension_custom_scripts/pg_tle/after-create.sql new file mode 100644 index 0000000..eb8aeff --- /dev/null +++ b/ansible/files/postgresql_extension_custom_scripts/pg_tle/after-create.sql @@ -0,0 +1 @@ +grant pgtle_admin to postgres; diff --git a/ansible/files/postgresql_extension_custom_scripts/pgmq/after-create.sql b/ansible/files/postgresql_extension_custom_scripts/pgmq/after-create.sql new file mode 100644 index 0000000..8b126d4 --- /dev/null +++ b/ansible/files/postgresql_extension_custom_scripts/pgmq/after-create.sql @@ -0,0 +1,19 @@ +do $$ +declare + extoid oid := (select oid from pg_extension where extname = 'pgmq'); + r record; +begin + set local search_path = ''; + update pg_extension set extowner = 'postgres'::regrole where extname = 'pgmq'; + for r in (select * from pg_depend where refobjid = extoid) loop + if r.classid = 'pg_type'::regclass then + execute(format('alter type %s owner to postgres;', r.objid::regtype)); + elsif r.classid = 'pg_proc'::regclass then + execute(format('alter function %s(%s) owner to postgres;', r.objid::regproc, pg_get_function_identity_arguments(r.objid))); + elsif r.classid = 'pg_class'::regclass then + execute(format('alter table %s owner to postgres;', r.objid::regclass)); + else + raise exception 'error on pgmq after-create script: unexpected object type %', r.classid; + end if; + end loop; +end $$; diff --git a/ansible/files/postgresql_extension_custom_scripts/postgis_tiger_geocoder/after-create.sql b/ansible/files/postgresql_extension_custom_scripts/postgis_tiger_geocoder/after-create.sql new file mode 100644 index 0000000..0bf02d4 --- /dev/null +++ b/ansible/files/postgresql_extension_custom_scripts/postgis_tiger_geocoder/after-create.sql @@ -0,0 +1,10 @@ +-- These schemas are created by extension to house all tiger related functions, owned by tealbase_admin +grant usage on schema tiger, tiger_data to postgres with grant option; +-- Give postgres permission to all existing entities, also allows postgres to grant other roles +grant all on all tables in schema tiger, tiger_data to postgres with grant option; +grant all on all routines in schema tiger, tiger_data to postgres with grant option; +grant all on all sequences in schema tiger, tiger_data to postgres with grant option; +-- Update default privileges so that new entities are also accessible by postgres +alter default privileges in schema tiger, tiger_data grant all on tables to postgres with grant option; +alter default privileges in schema tiger, tiger_data grant all on routines to postgres with grant option; +alter default privileges in schema tiger, tiger_data grant all on sequences to postgres with grant option; diff --git a/ansible/files/postgresql_extension_custom_scripts/postgres_fdw/after-create.sql b/ansible/files/postgresql_extension_custom_scripts/postgres_fdw/after-create.sql new file mode 100644 index 0000000..1e83ee9 --- /dev/null +++ b/ansible/files/postgresql_extension_custom_scripts/postgres_fdw/after-create.sql @@ -0,0 +1,21 @@ +do $$ +declare + is_super boolean; +begin + is_super = ( + select usesuper + from pg_user + where usename = 'postgres' + ); + + -- Need to be superuser to own FDWs, so we temporarily make postgres superuser. + if not is_super then + alter role postgres superuser; + end if; + + alter foreign data wrapper postgres_fdw owner to postgres; + + if not is_super then + alter role postgres nosuperuser; + end if; +end $$; diff --git a/ansible/files/start-envoy.sh b/ansible/files/start-envoy.sh new file mode 100644 index 0000000..edd6fe0 --- /dev/null +++ b/ansible/files/start-envoy.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash +set -eou pipefail + +if [[ $(cat /sys/module/ipv6/parameters/disable) = 1 ]]; then + sed -i -e "s/address: '::'/address: '0.0.0.0'/" -e 's/ipv4_compat: true/ipv4_compat: false/' /etc/envoy/lds.yaml +else + sed -i -e "s/address: '0.0.0.0'/address: '::'/" -e 's/ipv4_compat: false/ipv4_compat: true/' /etc/envoy/lds.yaml +fi + +# Workaround using `tee` to get `/dev/stdout` access logging to work, see: +# https://github.com/envoyproxy/envoy/issues/8297#issuecomment-620659781 +exec /opt/envoy --config-path /etc/envoy/envoy.yaml --restart-epoch "${RESTART_EPOCH}" 2>&1 | tee diff --git a/ansible/files/sysstat.sysstat b/ansible/files/sysstat.sysstat new file mode 100644 index 0000000..52b7d07 --- /dev/null +++ b/ansible/files/sysstat.sysstat @@ -0,0 +1,36 @@ +# How long to keep log files (in days). +# Used by sa2(8) script +# If value is greater than 28, then use sadc's option -D to prevent older +# data files from being overwritten. See sadc(8) and sysstat(5) manual pages. +HISTORY=7 + +# Compress (using xz, gzip or bzip2) sa and sar files older than (in days): +COMPRESSAFTER=10 + +# Parameters for the system activity data collector (see sadc(8) manual page) +# which are used for the generation of log files. +# By default contains the `-S DISK' option responsible for generating disk +# statisitcs. Use `-S XALL' to collect all available statistics. +SADC_OPTIONS="-S DISK" + +# Directory where sa and sar files are saved. The directory must exist. +SA_DIR=/var/log/sysstat + +# Compression program to use. +ZIP="xz" + +# By default sa2 script generates yesterday's summary, since the cron job +# usually runs right after midnight. If you want sa2 to generate the summary +# of the same day (for example when cron job runs at 23:53) set this variable. +#YESTERDAY=no + +# By default sa2 script generates reports files (the so called sarDD files). +# Set this variable to false to disable reports generation. +#REPORTS=false + +# The sa1 and sa2 scripts generate system activity data and report files in +# the /var/log/sysstat directory. By default the files are created with umask 0022 +# and are therefore readable for all users. Change this variable to restrict +# the permissions on the files (e.g. use 0027 to adhere to more strict +# security standards). +UMASK=0022 diff --git a/ansible/files/systemd-networkd/systemd-networkd-check-and-fix.service b/ansible/files/systemd-networkd/systemd-networkd-check-and-fix.service new file mode 100644 index 0000000..5e70943 --- /dev/null +++ b/ansible/files/systemd-networkd/systemd-networkd-check-and-fix.service @@ -0,0 +1,11 @@ +[Unit] +Description=Check if systemd-networkd has broken NDisc routes and fix +Requisite=systemd-networkd.service +After=systemd-networkd.service + +[Service] +Type=oneshot +# This needs to be root for the service restart to work +User=root +Group=root +ExecStart=/usr/local/bin/systemd-networkd-check-and-fix.sh diff --git a/ansible/files/systemd-networkd/systemd-networkd-check-and-fix.sh b/ansible/files/systemd-networkd/systemd-networkd-check-and-fix.sh new file mode 100644 index 0000000..af00b41 --- /dev/null +++ b/ansible/files/systemd-networkd/systemd-networkd-check-and-fix.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +# Check for occurrences of an NDisc log error +# NOTE: --since timer flag must match the cadence of systemd timer unit. Risk of repeat matches and restart loop +journalctl --no-pager --unit systemd-networkd --since "1 minutes ago" --grep "Could not set NDisc route" >/dev/null +NDISC_ERROR=$? + +if systemctl is-active --quiet systemd-networkd.service && [ "${NDISC_ERROR}" == 0 ]; then + echo "$(date) systemd-network running but NDisc routes are broken. Restarting systemd.networkd.service" + /usr/bin/systemctl restart systemd-networkd.service + exit # no need to check further +fi + +# check for routes +ROUTES=$(ip -6 route list) + +if ! echo "${ROUTES}" | grep default >/dev/null || ! echo "${ROUTES}" | grep "::1 dev lo">/dev/null; then + echo "IPv6 routing table messed up. Restarting systemd.networkd.service" + /usr/bin/systemctl restart systemd-networkd.service +fi diff --git a/ansible/files/systemd-networkd/systemd-networkd-check-and-fix.timer b/ansible/files/systemd-networkd/systemd-networkd-check-and-fix.timer new file mode 100644 index 0000000..93c0836 --- /dev/null +++ b/ansible/files/systemd-networkd/systemd-networkd-check-and-fix.timer @@ -0,0 +1,9 @@ +[Unit] +Description=Check if systemd-networkd has broken NDisc routes and fix + +[Timer] +# NOTE: cadence must match that of the journalctl search (--since). Risk of repeat matches and restart loop +OnCalendar=minutely + +[Install] +WantedBy=timers.target diff --git a/ansible/files/systemd-resolved.conf b/ansible/files/systemd-resolved.conf new file mode 100644 index 0000000..9280d88 --- /dev/null +++ b/ansible/files/systemd-resolved.conf @@ -0,0 +1,8 @@ +# the default is RestartSec=0. If the service fails to start because +# of a systemic issue (e.g. rare case when disk is full) it will +# quickly hit the burst limit (default of 5 failures within 10secs) +# and thereafter be placed in a failed state. By increasing the +# restart interval, we avoid that, and ensure that the service will be +# started back up once any underlying issues are resolved. +[Service] +RestartSec=3 diff --git a/ansible/manifest-playbook.yml b/ansible/manifest-playbook.yml new file mode 100644 index 0000000..da79c58 --- /dev/null +++ b/ansible/manifest-playbook.yml @@ -0,0 +1,75 @@ +- hosts: localhost + gather_facts: no + + vars_files: + - ./vars.yml + + tasks: + - name: Write out image manifest + action: template src=files/manifest.json dest=./image-manifest-{{ ami_release_version }}.json + + - name: Upload image manifest + shell: | + aws s3 cp ./image-manifest-{{ ami_release_version }}.json s3://{{ internal_artifacts_bucket }}/manifests/postgres-{{ ami_release_version }}/software-manifest.json + + # upload software artifacts of interest + # Generally - download, extract, repack as xz archive, upload + # currently, we upload gotrue, adminapi, postgrest + - name: gotrue - download commit archive + get_url: + url: "https://github.com/tealbase/gotrue/releases/download/v{{ gotrue_release }}/auth-v{{ gotrue_release }}-arm64.tar.gz" + dest: /tmp/auth-v{{ gotrue_release }}-arm64.tar.gz + checksum: "{{ gotrue_release_checksum }}" + timeout: 60 + + - name: PostgREST - download ubuntu binary archive (arm) + get_url: + url: "https://github.com/PostgREST/postgrest/releases/download/v{{ postgrest_release }}/postgrest-v{{ postgrest_release }}-ubuntu-aarch64.tar.xz" + dest: /tmp/postgrest-{{ postgrest_release }}-arm64.tar.xz + checksum: "{{ postgrest_arm_release_checksum }}" + timeout: 60 + + - name: Download adminapi archive + get_url: + url: "https://tealbase-public-artifacts-bucket.s3.amazonaws.com/tealbase-admin-api/v{{ adminapi_release }}/tealbase-admin-api_{{ adminapi_release }}_linux_arm64.tar.gz" + dest: "/tmp/adminapi.tar.gz" + timeout: 90 + + - name: adminapi - unpack archive in /tmp + unarchive: + remote_src: yes + src: /tmp/adminapi.tar.gz + dest: /tmp + + - name: adminapi - pack archive + shell: | + cd /tmp && tar -cJf tealbase-admin-api-{{ adminapi_release }}-arm64.tar.xz tealbase-admin-api + + - name: Download admin-mgr archive + get_url: + url: "https://tealbase-public-artifacts-bucket.s3.amazonaws.com/admin-mgr/v{{ adminmgr_release }}/admin-mgr_{{ adminmgr_release }}_linux_arm64.tar.gz" + dest: "/tmp/admin-mgr.tar.gz" + timeout: 90 + + - name: admin-mgr - unpack archive in /tmp + unarchive: + remote_src: yes + src: /tmp/admin-mgr.tar.gz + dest: /tmp + + - name: admin-mgr - pack archive + shell: | + cd /tmp && tar -cJf admin-mgr-{{ adminmgr_release }}-arm64.tar.xz admin-mgr + + - name: upload archives + shell: | + aws s3 cp /tmp/{{ item.file }} s3://{{ internal_artifacts_bucket }}/upgrades/{{ item.service }}/{{ item.file }} + with_items: + - service: gotrue + file: auth-v{{ gotrue_release }}-arm64.tar.gz + - service: postgrest + file: postgrest-{{ postgrest_release }}-arm64.tar.xz + - service: tealbase-admin-api + file: tealbase-admin-api-{{ adminapi_release }}-arm64.tar.xz + - service: admin-mgr + file: admin-mgr-{{ adminmgr_release }}-arm64.tar.xz diff --git a/ansible/playbook-docker.yml b/ansible/playbook-docker.yml deleted file mode 100644 index c5fc60e..0000000 --- a/ansible/playbook-docker.yml +++ /dev/null @@ -1,19 +0,0 @@ -- name: Build tealbase Postgres - hosts: localhost - gather_facts: false - - vars_files: - - ./vars.yml - - tasks: - - name: Setup container - import_tasks: tasks/docker/setup.yml - - - name: Install Postgres extensions - import_tasks: tasks/setup-extensions.yml - - - name: Finalize docker - import_tasks: tasks/docker/finalize.yml - - - name: Cleanup container - import_tasks: tasks/docker/cleanup.yml diff --git a/ansible/playbook.yml b/ansible/playbook.yml index 77f4cbb..aba045c 100644 --- a/ansible/playbook.yml +++ b/ansible/playbook.yml @@ -3,7 +3,6 @@ pre_tasks: - import_tasks: tasks/setup-system.yml - vars_files: - ./vars.yml @@ -14,7 +13,7 @@ dest: "00-schema.sql", } - { source: "stat_extension.sql", dest: "01-extension.sql" } - + environment: PATH: /usr/lib/postgresql/bin:{{ ansible_env.PATH }} @@ -30,68 +29,87 @@ - name: Install Postgres from source import_tasks: tasks/setup-postgres.yml - - name: Install Postgres extensions - import_tasks: tasks/setup-extensions.yml - - name: Install PgBouncer import_tasks: tasks/setup-pgbouncer.yml tags: - install-pgbouncer - install-tealbase-internal + when: debpkg_mode or nixpkg_mode - name: Install WAL-G import_tasks: tasks/setup-wal-g.yml + when: debpkg_mode or nixpkg_mode - name: Install Gotrue import_tasks: tasks/setup-gotrue.yml tags: - install-gotrue - install-tealbase-internal - + when: debpkg_mode or nixpkg_mode + - name: Install PostgREST import_tasks: tasks/setup-postgrest.yml tags: - install-postgrest - install-tealbase-internal + when: debpkg_mode or nixpkg_mode + + - name: Install Envoy + import_tasks: tasks/setup-envoy.yml + tags: + - install-tealbase-internal + when: debpkg_mode or nixpkg_mode - name: Install Kong import_tasks: tasks/setup-kong.yml tags: - install-tealbase-internal + when: debpkg_mode or nixpkg_mode - name: Install nginx import_tasks: tasks/setup-nginx.yml tags: - install-tealbase-internal + when: debpkg_mode or nixpkg_mode - name: Install tealbase specific content import_tasks: tasks/setup-tealbase-internal.yml tags: - install-tealbase-internal + when: debpkg_mode or nixpkg_mode - - name: Start Postgres Database - systemd: - name: postgresql - state: started - when: not ebssurrogate_mode + - name: Fix IPv6 NDisc issues + import_tasks: tasks/fix_ipv6_ndisc.yml + tags: + - install-tealbase-internal + when: debpkg_mode or nixpkg_mode - name: Start Postgres Database without Systemd become: yes become_user: postgres shell: - cmd: /usr/bin/pg_ctl -D /var/lib/postgresql/data start - when: ebssurrogate_mode + cmd: /usr/lib/postgresql/bin/pg_ctl -D /var/lib/postgresql/data start + when: debpkg_mode - name: Adjust APT update intervals copy: src: files/apt_periodic dest: /etc/apt/apt.conf.d/10periodic - + when: debpkg_mode or nixpkg_mode + - name: Transfer init SQL files copy: src: files/{{ item.source }} dest: /tmp/{{ item.dest }} loop: "{{ sql_files }}" + when: debpkg_mode or stage2_nix + + - name: Create postgres role + become: yes + become_user: postgres + shell: + cmd: /usr/lib/postgresql/bin/psql --username=tealbase_admin -d postgres -c "create role postgres superuser login; alter database postgres owner to postgres;" + when: debpkg_mode or stage2_nix - name: Execute init SQL files become: yes @@ -99,25 +117,30 @@ shell: cmd: /usr/lib/postgresql/bin/psql -f /tmp/{{ item.dest }} loop: "{{ sql_files }}" + when: debpkg_mode or stage2_nix - name: Delete SQL scripts file: path: /tmp/{{ item.dest }} state: absent loop: "{{ sql_files }}" + when: debpkg_mode or stage2_nix - name: First boot optimizations import_tasks: tasks/internal/optimizations.yml tags: - install-tealbase-internal - + when: debpkg_mode or stage2_nix + - name: Finalize AMI import_tasks: tasks/finalize-ami.yml tags: - install-tealbase-internal - + when: debpkg_mode or nixpkg_mode + - name: Enhance fail2ban import_tasks: tasks/setup-fail2ban.yml + when: debpkg_mode or nixpkg_mode # Install EC2 instance connect # Only for AWS images @@ -138,29 +161,61 @@ update_cache: yes cache_valid_time: 3600 - # Put PG binaries in a directory under $PATH - - name: Find all files in /usr/lib/postgresql/bin - find: - paths: /usr/lib/postgresql/bin - register: postgresql_bin - - name: Clean out build dependencies import_tasks: tasks/clean-build-dependencies.yml - - name: Create symbolic links for Postgres binaries to /usr/bin/ + - name: Restart Postgres Database without Systemd become: yes + become_user: postgres shell: - cmd: "for fl in /usr/lib/postgresql/bin/* ; do ln -sf $fl /usr/bin/$(basename $fl) ; done" + cmd: /usr/lib/postgresql/bin/pg_ctl -D /var/lib/postgresql/data restart -o "-c shared_preload_libraries='pg_tle'" + when: debpkg_mode - name: Run migrations import_tasks: tasks/setup-migrations.yml + tags: + - migrations + when: debpkg_mode or stage2_nix - name: Stop Postgres Database without Systemd become: yes become_user: postgres shell: - cmd: /usr/bin/pg_ctl -D /var/lib/postgresql/data stop - when: ebssurrogate_mode + cmd: /usr/lib/postgresql/bin/pg_ctl -D /var/lib/postgresql/data stop + when: debpkg_mode - name: Run unit tests import_tasks: tasks/test-image.yml + tags: + - unit-tests + when: debpkg_mode or stage2_nix + + - name: Collect Postgres binaries + import_tasks: tasks/internal/collect-pg-binaries.yml + tags: + - collect-binaries + when: debpkg_mode + + - name: Install osquery from nixpkgs binary cache + become: yes + shell: | + sudo -u ubuntu bash -c ". /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh && nix profile install github:nixos/nixpkgs/f98ec4f73c762223d62bee706726138cb6ea27cc#osquery" + when: stage2_nix + + - name: Run osquery permission checks + become: yes + shell: | + sudo -u ubuntu bash -c ". /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh && /usr/bin/python3 /tmp/ansible-playbook/ansible/files/permission_check.py" + when: stage2_nix + + - name: Remove osquery + become: yes + shell: | + sudo -u ubuntu bash -c ". /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh && nix profile remove osquery" + when: stage2_nix + + - name: nix collect garbage + become: yes + shell: | + sudo -u ubuntu bash -c ". /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh && nix-collect-garbage -d" + when: stage2_nix diff --git a/ansible/tasks/clean-build-dependencies.yml b/ansible/tasks/clean-build-dependencies.yml index 5bbcc80..43ec051 100644 --- a/ansible/tasks/clean-build-dependencies.yml +++ b/ansible/tasks/clean-build-dependencies.yml @@ -11,7 +11,11 @@ - g++-10 - g++-9 - gcc-10 + - make + - manpages + - manpages-dev - ninja-build + - patch - python2 state: absent autoremove: yes diff --git a/ansible/tasks/docker/cleanup.yml b/ansible/tasks/docker/cleanup.yml deleted file mode 100644 index aca7cc8..0000000 --- a/ansible/tasks/docker/cleanup.yml +++ /dev/null @@ -1,18 +0,0 @@ -- name: Cleanup - remove build dependencies - apt: - pkg: - - python3 - - rsync - - ca-certificates - - build-essential - - postgresql-server-dev-{{ postgresql_major }} - - curl - - git-core - - gpp - - cpp - - pkg-config - - apt-transport-https - - cmake - - ninja-build - - python - state: absent diff --git a/ansible/tasks/docker/finalize.yml b/ansible/tasks/docker/finalize.yml deleted file mode 100644 index 017a1d7..0000000 --- a/ansible/tasks/docker/finalize.yml +++ /dev/null @@ -1,24 +0,0 @@ -- name: PG logging conf - template: - src: files/postgresql_config/postgresql-stdout-log.conf - dest: /etc/postgresql/logging.conf - group: postgres - -- name: import pgsodium_getkey_urandom.sh - template: - src: files/pgsodium_getkey_urandom.sh.j2 - dest: "{{ pg_bindir }}/pgsodium_getkey.sh" - owner: postgres - group: postgres - mode: 0700 - -- name: allow pgsodium key generation - become: yes - file: - path: '{{ item }}' - recurse: yes - owner: postgres - group: postgres - mode: '0774' - with_items: - - /etc/postgresql-custom diff --git a/ansible/tasks/docker/setup.yml b/ansible/tasks/docker/setup.yml deleted file mode 100644 index 9d0062e..0000000 --- a/ansible/tasks/docker/setup.yml +++ /dev/null @@ -1,70 +0,0 @@ -- name: Install Python3 - raw: export DEBIAN_FRONTEND=noninteractive ; sh -c "apt update && apt upgrade -y && apt install python3 -y" - timeout: 300 - -- name: Setup - install common dependencies - apt: - pkg: - - rsync - - ca-certificates - - build-essential - - postgresql-server-dev-{{ postgresql_major }} - - curl - - git-core - - gpp - - cpp - - pkg-config - - apt-transport-https - - cmake - - ninja-build - - python - - systemtap-sdt-dev - - ccache - - clang - update_cache: yes - install_recommends: no - -# Find platform architecture and set as a variable -- name: Setup - finding platform architecture - shell: if [ $(uname -m) = "aarch64" ]; then echo "arm64"; else echo "amd64"; fi - register: platform_output -- set_fact: - platform: "{{ platform_output.stdout }}" - -- name: determining number of processors - shell: nproc - register: nproc_output -- set_fact: - parallel_jobs: "{{ nproc_output.stdout }}" - -- name: Setup - import postgresql.conf - template: - src: files/postgresql_config/postgresql.conf.j2 - dest: /etc/postgresql/postgresql.conf - -- name: Setup - import postgresql.conf - synchronize: - src: files/postgresql_config/pg_hba.conf.j2 - dest: /etc/postgresql/pg_hba.conf - -- name: Setup - import postgresql.conf - synchronize: - src: files/postgresql_config/pg_ident.conf.j2 - dest: /etc/postgresql/pg_ident.conf - -- set_fact: - regex_string: "#unix_socket_directories = '/tmp'" - -- name: Setup - modify unix_socket_directories - become: yes - replace: - path: /etc/postgresql/postgresql.conf - regexp: '{{ regex_string }}' - replace: unix_socket_directories = '/var/run/postgresql' - -- name: Setup - modify unix_socket_directories - become: yes - replace: - path: /etc/postgresql/postgresql.conf - regexp: '{{ regex_string }}' - replace: unix_socket_directories = '/var/run/postgresql' diff --git a/ansible/tasks/finalize-ami.yml b/ansible/tasks/finalize-ami.yml index 269cde1..411ca33 100644 --- a/ansible/tasks/finalize-ami.yml +++ b/ansible/tasks/finalize-ami.yml @@ -53,6 +53,11 @@ - { file: "logrotate-walg.conf" } - { file: "logrotate-postgres-auth.conf" } +- name: Ensure default Postgres logrotate config is removed + file: + path: /etc/logrotate.d/postgresql-common + state: absent + - name: Disable cron access copy: src: files/cron.deny @@ -62,7 +67,7 @@ shell: cmd: | cp /usr/lib/systemd/system/logrotate.timer /etc/systemd/system/logrotate.timer - sed -i -e 's;daily;*:0/10;' /etc/systemd/system/logrotate.timer + sed -i -e 's;daily;*:0/5;' /etc/systemd/system/logrotate.timer systemctl reenable logrotate.timer become: yes @@ -73,3 +78,4 @@ owner: postgres group: postgres mode: 0700 + when: debpkg_mode or stage2_nix diff --git a/ansible/tasks/fix_ipv6_ndisc.yml b/ansible/tasks/fix_ipv6_ndisc.yml new file mode 100644 index 0000000..7489a2f --- /dev/null +++ b/ansible/tasks/fix_ipv6_ndisc.yml @@ -0,0 +1,33 @@ +--- +- name: fix Network - systemd timer file + copy: + dest: /etc/systemd/system/systemd-networkd-check-and-fix.timer + src: "files/systemd-networkd/systemd-networkd-check-and-fix.timer" + owner: root + group: root + mode: 0644 + +- name: fix Network - systemd service file + copy: + dest: /etc/systemd/system/systemd-networkd-check-and-fix.service + src: "files/systemd-networkd/systemd-networkd-check-and-fix.service" + owner: root + group: root + mode: 0644 + +- name: fix Network - detect script + copy: + dest: /usr/local/bin/systemd-networkd-check-and-fix.sh + src: "files/systemd-networkd/systemd-networkd-check-and-fix.sh" + owner: root + group: root + mode: 0700 + +- name: fix Network - reload systemd + systemd: + daemon_reload: yes + +- name: fix Network - enable systemd timer + systemd: + name: systemd-networkd-check-and-fix.timer + enabled: true diff --git a/ansible/tasks/internal/admin-api.yml b/ansible/tasks/internal/admin-api.yml index 05c2f06..d050d54 100644 --- a/ansible/tasks/internal/admin-api.yml +++ b/ansible/tasks/internal/admin-api.yml @@ -1,7 +1,7 @@ - name: adminapi - system user user: name: adminapi - groups: root,admin,kong,pgbouncer,postgres,postgrest,systemd-journal,wal-g + groups: root,admin,envoy,kong,pgbouncer,postgres,postgrest,systemd-journal,vector,wal-g append: yes - name: Move shell scripts to /root dir @@ -13,17 +13,13 @@ loop: - { file: "grow_fs.sh" } - { file: "manage_readonly_mode.sh" } - - { file: "pg_upgrade_check.sh" } - - { file: "pg_upgrade_complete.sh" } - - { file: "pg_upgrade_initiate.sh" } - - { file: "pg_upgrade_prepare.sh" } - - { file: "pg_upgrade_pgsodium_getkey.sh" } - { file: "pg_egress_collect.pl" } - name: give adminapi user permissions copy: src: files/adminapi.sudoers.conf dest: /etc/sudoers.d/adminapi + mode: "0644" - name: perms for adminapi shell: | @@ -58,11 +54,36 @@ owner: adminapi state: directory +- name: adminapi - pg_upgrade scripts dir + file: + path: /etc/adminapi/pg_upgrade_scripts + owner: adminapi + state: directory + +- name: Move shell scripts to /etc/adminapi/pg_upgrade_scripts/ + copy: + src: "files/admin_api_scripts/pg_upgrade_scripts/{{ item.file }}" + dest: "/etc/adminapi/pg_upgrade_scripts/{{ item.file }}" + mode: "0755" + owner: adminapi + loop: + - { file: "check.sh" } + - { file: "complete.sh" } + - { file: "initiate.sh" } + - { file: "prepare.sh" } + - { file: "pgsodium_getkey.sh" } + - { file: "common.sh" } + - name: adminapi - create service file template: src: files/adminapi.service.j2 dest: /etc/systemd/system/adminapi.service +- name: adminapi - create service file for commence backup process + template: + src: files/commence-backup.service.j2 + dest: /etc/systemd/system/commence-backup.service + - name: UFW - Allow connections to adminapi ports ufw: rule: allow diff --git a/ansible/tasks/internal/collect-pg-binaries.yml b/ansible/tasks/internal/collect-pg-binaries.yml new file mode 100644 index 0000000..7f652f7 --- /dev/null +++ b/ansible/tasks/internal/collect-pg-binaries.yml @@ -0,0 +1,49 @@ +- name: Collect Postgres binaries - create collection directory + file: + path: /tmp/pg_binaries/{{ postgresql_major }}/ + state: directory + +- name: Collect Postgres binaries - collect binaries and libraries + copy: + remote_src: yes + src: /usr/lib/postgresql/{{ postgresql_major }}/{{ item }}/ + dest: /tmp/pg_binaries/{{ postgresql_major }}/{{ item }}/ + with_items: + - bin + - lib + +- name: Collect Postgres libraries - collect libraries which are in /usr/lib/postgresql/lib/ + copy: + remote_src: yes + src: /usr/lib/postgresql/lib/ + dest: /tmp/pg_binaries/{{ postgresql_major }}/lib/ + +- name: Collect Postgres libraries - collect libraries which are in /var/lib/postgresql/extension/ + copy: + remote_src: yes + src: /var/lib/postgresql/extension/ + dest: /tmp/pg_binaries/{{ postgresql_major }}/lib/ + +- name: Collect Postgres libraries - collect latest libpq + copy: + remote_src: yes + src: /usr/lib/aarch64-linux-gnu/libpq.so.5 + dest: /tmp/pg_binaries/{{ postgresql_major }}/lib/libpq.so.5 + +- name: Collect Postgres binaries - collect shared files + copy: + remote_src: yes + src: /usr/share/postgresql/{{ postgresql_major }}/ + dest: /tmp/pg_binaries/{{ postgresql_major }}/share/ + +- name: Collect Postgres binaries - create tarfile + archive: + path: /tmp/pg_binaries/ + dest: /tmp/pg_binaries.tar.gz + remove: yes + +- name: Fetch tarfile to local + fetch: + src: /tmp/pg_binaries.tar.gz + dest: /tmp/ + flat: true diff --git a/ansible/tasks/internal/install-salt.yml b/ansible/tasks/internal/install-salt.yml new file mode 100644 index 0000000..73cd6ee --- /dev/null +++ b/ansible/tasks/internal/install-salt.yml @@ -0,0 +1,47 @@ +- name: Add apt repository for Saltstack (arm) + block: + - name: Ensure /etc/apt/keyrings directory exists + file: + path: /etc/apt/keyrings + state: directory + mode: '0755' + + - name: salt gpg key + ansible.builtin.get_url: + url: https://packages.broadcom.com/artifactory/api/security/keypair/SaltProjectKey/public + dest: /etc/apt/keyrings/salt-archive-keyring-2023.pgp + mode: '0644' + + - name: salt apt repo + ansible.builtin.apt_repository: + repo: "deb [signed-by=/etc/apt/keyrings/salt-archive-keyring-2023.pgp arch=arm64] https://packages.broadcom.com/artifactory/saltproject-deb/ stable main" + filename: 'salt.list' + state: present + when: platform == "arm64" + +- name: Add apt repository for Saltstack (amd) + block: + - name: Ensure /etc/apt/keyrings directory exists + file: + path: /etc/apt/keyrings + state: directory + mode: '0755' + + - name: salt gpg key + ansible.builtin.get_url: + url: https://packages.broadcom.com/artifactory/api/security/keypair/SaltProjectKey/public + dest: /etc/apt/keyrings/salt-archive-keyring-2023.pgp + mode: '0644' + + - name: salt apt repo + ansible.builtin.apt_repository: + repo: "deb [signed-by=/etc/apt/keyrings/salt-archive-keyring-2023.pgp arch=amd64] https://packages.broadcom.com/artifactory/saltproject-deb/ stable main" + filename: 'salt.list' + state: present + when: platform == "amd64" + +- name: Salt minion install + apt: + name: salt-minion + state: present + update_cache: yes diff --git a/ansible/tasks/internal/optimizations.yml b/ansible/tasks/internal/optimizations.yml index 895accc..42a0a24 100644 --- a/ansible/tasks/internal/optimizations.yml +++ b/ansible/tasks/internal/optimizations.yml @@ -1,39 +1,31 @@ -- name: ensure services are stopped - community.general.snap: - name: amazon-ssm-agent - state: absent - failed_when: not ebssurrogate_mode - -- name: ensure services are stopped and disabled for first boot +- name: ensure services are stopped and disabled for first boot debian build systemd: enabled: no name: '{{ item }}' state: stopped with_items: - - snapd - postgresql - pgbouncer - fail2ban - motd-news - vector - failed_when: not ebssurrogate_mode - -- name: Remove snapd - apt: - state: absent - pkg: - - snapd - failed_when: not ebssurrogate_mode + - lvm2-monitor + - salt-minion + when: debpkg_mode -- name: ensure services are stopped and disabled for first boot +- name: ensure services are stopped and disabled for first boot nix build systemd: enabled: no name: '{{ item }}' state: stopped - masked: yes with_items: - - lvm2-monitor - failed_when: not ebssurrogate_mode + - postgresql + - pgbouncer + - fail2ban + - motd-news + - vector + - salt-minion + when: stage2_nix - name: disable man-db become: yes @@ -44,4 +36,4 @@ - man-db - popularity-contest - ubuntu-advantage-tools - failed_when: not ebssurrogate_mode + when: debpkg_mode or stage2_nix diff --git a/ansible/tasks/internal/postgres-exporter.yml b/ansible/tasks/internal/postgres-exporter.yml index b4c1aed..0292157 100644 --- a/ansible/tasks/internal/postgres-exporter.yml +++ b/ansible/tasks/internal/postgres-exporter.yml @@ -3,16 +3,22 @@ rule: allow port: "9187" -- name: create directories +- name: create directories - systemd unit file: state: directory - path: "{{ item }}" + path: /etc/systemd/system/postgres_exporter.service.d owner: root mode: '0700' become: yes - with_items: - - /opt/postgres_exporter - - /etc/systemd/system/postgres_exporter.service.d + +- name: create directories - service files + file: + state: directory + path: /opt/postgres_exporter + owner: postgres + group: postgres + mode: '0775' + become: yes - name: download postgres exporter get_url: diff --git a/ansible/tasks/internal/postgresql-prestart.yml b/ansible/tasks/internal/postgresql-prestart.yml new file mode 100644 index 0000000..46671d5 --- /dev/null +++ b/ansible/tasks/internal/postgresql-prestart.yml @@ -0,0 +1,7 @@ +- name: postgres_prestart - create service file + template: + src: files/postgres_prestart.sh.j2 + dest: /usr/local/bin/postgres_prestart.sh + mode: a+x + owner: root + group: root diff --git a/ansible/tasks/postgres-extensions/01-postgis.yml b/ansible/tasks/postgres-extensions/01-postgis.yml index 1381740..ae3d4e5 100644 --- a/ansible/tasks/postgres-extensions/01-postgis.yml +++ b/ansible/tasks/postgres-extensions/01-postgis.yml @@ -30,7 +30,7 @@ - name: postgis - download SFCGAL dependency get_url: - url: "https://gitlab.com/Oslandia/SFCGAL/-/archive/v{{ sfcgal_release }}/SFCGAL-v{{ sfcgal_release }}.tar.gz" + url: "https://tealbase-public-artifacts-bucket.s3.amazonaws.com/sfcgal/SFCGAL-v{{ sfcgal_release }}.tar.gz" dest: /tmp/SFCGAL-v{{ sfcgal_release }}.tar.gz checksum: "{{ sfcgal_release_checksum }}" timeout: 60 @@ -49,7 +49,8 @@ become: yes - name: postgis - build SFCGAL - make: + community.general.make: + target: all chdir: /tmp/SFCGAL-v{{ sfcgal_release }} jobs: "{{ parallel_jobs | default(omit) }}" become: yes @@ -78,7 +79,8 @@ become: yes - name: postgis - build - make: + community.general.make: + target: all chdir: /tmp/postgis-{{ postgis_release }} jobs: "{{ parallel_jobs | default(omit) }}" become: yes diff --git a/ansible/tasks/postgres-extensions/02-pgrouting.yml b/ansible/tasks/postgres-extensions/02-pgrouting.yml index 328d3e8..746870a 100644 --- a/ansible/tasks/postgres-extensions/02-pgrouting.yml +++ b/ansible/tasks/postgres-extensions/02-pgrouting.yml @@ -34,7 +34,8 @@ become: yes - name: pgRouting - build - make: + community.general.make: + target: all chdir: /tmp/pgrouting-{{ pgrouting_release }}/build jobs: "{{ parallel_jobs | default(omit) }}" become: yes diff --git a/ansible/tasks/postgres-extensions/04-pg_cron.yml b/ansible/tasks/postgres-extensions/04-pg_cron.yml index 787fe1d..d9a11c0 100644 --- a/ansible/tasks/postgres-extensions/04-pg_cron.yml +++ b/ansible/tasks/postgres-extensions/04-pg_cron.yml @@ -24,13 +24,6 @@ target: install become: yes -- name: pg_cron - set cron.database_name - become: yes - lineinfile: - path: /etc/postgresql/postgresql.conf - state: present - line: cron.database_name = 'postgres' - - name: pg_cron - cleanup file: state: absent diff --git a/ansible/tasks/postgres-extensions/06-pgjwt.yml b/ansible/tasks/postgres-extensions/06-pgjwt.yml index 4acc13c..61890bf 100644 --- a/ansible/tasks/postgres-extensions/06-pgjwt.yml +++ b/ansible/tasks/postgres-extensions/06-pgjwt.yml @@ -3,7 +3,7 @@ git: repo: https://github.com/michelp/pgjwt.git dest: /tmp/pgjwt - version: master + version: "{{ pgjwt_release }}" - name: pgjwt - install make: diff --git a/ansible/tasks/postgres-extensions/11-wal2json.yml b/ansible/tasks/postgres-extensions/11-wal2json.yml index 7d6db24..c5abde9 100644 --- a/ansible/tasks/postgres-extensions/11-wal2json.yml +++ b/ansible/tasks/postgres-extensions/11-wal2json.yml @@ -3,7 +3,7 @@ git: repo: https://github.com/eulerto/wal2json.git dest: /tmp/wal2json - version: "{{ wal2json_commit_sha }}" + version: "wal2json_{{ wal2json_release }}" - name: wal2json - install make: diff --git a/ansible/tasks/postgres-extensions/13-plv8.yml b/ansible/tasks/postgres-extensions/13-plv8.yml index 0a84daf..9f11735 100644 --- a/ansible/tasks/postgres-extensions/13-plv8.yml +++ b/ansible/tasks/postgres-extensions/13-plv8.yml @@ -26,7 +26,7 @@ git: repo: https://github.com/plv8/plv8.git dest: /tmp/plv8 - version: "{{ plv8_commit_version }}" + version: "v{{ plv8_release }}" become: yes - name: Create a symbolic link @@ -37,6 +37,13 @@ when: platform == "arm64" ignore_errors: yes # not needed for docker build +- name: plv8 - enable ccache + become: yes + replace: + path: /tmp/plv8/Makefiles/Makefile.docker + regexp: "^GN_ARGS =" + replace: GN_ARGS = cc_wrapper=\"env CCACHE_SLOPPINESS=time_macros ccache\" + - name: plv8 - build make: chdir: /tmp/plv8 diff --git a/ansible/tasks/postgres-extensions/14-pg_plan_filter.yml b/ansible/tasks/postgres-extensions/14-pg_plan_filter.yml index a776329..0fa0990 100644 --- a/ansible/tasks/postgres-extensions/14-pg_plan_filter.yml +++ b/ansible/tasks/postgres-extensions/14-pg_plan_filter.yml @@ -3,7 +3,7 @@ git: repo: https://github.com/pgexperts/pg_plan_filter.git dest: /tmp/pg_plan_filter - version: "{{ pg_plan_filter_commit_version }}" + version: "{{ pg_plan_filter_release }}" become: yes - name: pg_plan_filter - build diff --git a/ansible/tasks/postgres-extensions/17-pg_hashids.yml b/ansible/tasks/postgres-extensions/17-pg_hashids.yml index 4f751de..8bd0291 100644 --- a/ansible/tasks/postgres-extensions/17-pg_hashids.yml +++ b/ansible/tasks/postgres-extensions/17-pg_hashids.yml @@ -3,7 +3,7 @@ git: repo: https://github.com/iCyberon/pg_hashids.git dest: /tmp/pg_hashids - version: master + version: "{{ pg_hashids_release }}" - name: pg_hashids - build make: diff --git a/ansible/tasks/postgres-extensions/18-pgsodium.yml b/ansible/tasks/postgres-extensions/18-pgsodium.yml index a808e11..95bc9a6 100644 --- a/ansible/tasks/postgres-extensions/18-pgsodium.yml +++ b/ansible/tasks/postgres-extensions/18-pgsodium.yml @@ -7,7 +7,7 @@ - name: libsodium - download libsodium get_url: - url: "https://download.libsodium.org/libsodium/releases/libsodium-{{ libsodium_release }}.tar.gz" + url: "https://tealbase-public-artifacts-bucket.s3.amazonaws.com/libsodium/libsodium-{{ libsodium_release }}.tar.gz" dest: /tmp/libsodium-{{ libsodium_release }}.tar.gz checksum: "{{ libsodium_release_checksum }}" timeout: 60 @@ -53,7 +53,6 @@ - name: pgsodium - build make: chdir: /tmp/pgsodium-{{ pgsodium_release }} - jobs: "{{ parallel_jobs | default(omit) }}" become: yes - name: pgsodium - install diff --git a/ansible/tasks/postgres-extensions/19-pg_graphql.yml b/ansible/tasks/postgres-extensions/19-pg_graphql.yml index f72edfe..2a2c113 100644 --- a/ansible/tasks/postgres-extensions/19-pg_graphql.yml +++ b/ansible/tasks/postgres-extensions/19-pg_graphql.yml @@ -1,3 +1,3 @@ - name: install pg_graphql ansible.builtin.apt: - deb: "https://github.com/tealbase/pg_graphql/releases/download/{{ pg_graphql_release }}/pg_graphql-{{ pg_graphql_release }}-pg{{ postgresql_major }}-{{ platform }}-linux-gnu.deb" + deb: "https://github.com/tealbase/pg_graphql/releases/download/v{{ pg_graphql_release }}/pg_graphql-v{{ pg_graphql_release }}-pg{{ postgresql_major }}-{{ platform }}-linux-gnu.deb" diff --git a/ansible/tasks/postgres-extensions/21-auto_explain.yml b/ansible/tasks/postgres-extensions/21-auto_explain.yml deleted file mode 100644 index b6a16fa..0000000 --- a/ansible/tasks/postgres-extensions/21-auto_explain.yml +++ /dev/null @@ -1,7 +0,0 @@ - -- name: auto_explain - set auto_explain.log_min_duration - become: yes - lineinfile: - path: /etc/postgresql/postgresql.conf - state: present - line: auto_explain.log_min_duration = 10s diff --git a/ansible/tasks/postgres-extensions/22-pg_jsonschema.yml b/ansible/tasks/postgres-extensions/22-pg_jsonschema.yml index 0d78442..fe5824f 100644 --- a/ansible/tasks/postgres-extensions/22-pg_jsonschema.yml +++ b/ansible/tasks/postgres-extensions/22-pg_jsonschema.yml @@ -1,3 +1,3 @@ - name: install pg_jsonschema ansible.builtin.apt: - deb: "https://github.com/tealbase/pg_jsonschema/releases/download/{{ pg_jsonschema_release }}/pg_jsonschema-{{ pg_jsonschema_release }}-pg{{ postgresql_major }}-{{ platform }}-linux-gnu.deb" + deb: "https://github.com/tealbase/pg_jsonschema/releases/download/v{{ pg_jsonschema_release }}/pg_jsonschema-v{{ pg_jsonschema_release }}-pg{{ postgresql_major }}-{{ platform }}-linux-gnu.deb" diff --git a/ansible/tasks/postgres-extensions/24-pgroonga.yml b/ansible/tasks/postgres-extensions/24-pgroonga.yml index 37c7a28..f8baaa6 100644 --- a/ansible/tasks/postgres-extensions/24-pgroonga.yml +++ b/ansible/tasks/postgres-extensions/24-pgroonga.yml @@ -33,7 +33,8 @@ become: yes - name: groonga - build - make: + community.general.make: + target: all chdir: /tmp/groonga-{{ groonga_release }} jobs: "{{ parallel_jobs | default(omit) }}" become: yes @@ -59,7 +60,8 @@ become: yes - name: pgroonga - build - make: + community.general.make: + target: all chdir: /tmp/pgroonga-{{ pgroonga_release }} jobs: "{{ parallel_jobs | default(omit) }}" become: yes diff --git a/ansible/tasks/postgres-extensions/25-wrappers.yml b/ansible/tasks/postgres-extensions/25-wrappers.yml index 375c553..1aada50 100644 --- a/ansible/tasks/postgres-extensions/25-wrappers.yml +++ b/ansible/tasks/postgres-extensions/25-wrappers.yml @@ -1,3 +1,3 @@ - name: install wrappers ansible.builtin.apt: - deb: "https://github.com/tealbase/wrappers/releases/download/{{ wrappers_release }}/wrappers-{{ wrappers_release }}-pg{{ postgresql_major }}-{{ platform }}-linux-gnu.deb" + deb: "https://github.com/tealbase/wrappers/releases/download/v{{ wrappers_release }}/wrappers-v{{ wrappers_release }}-pg{{ postgresql_major }}-{{ platform }}-linux-gnu.deb" diff --git a/ansible/tasks/postgres-extensions/26-hypopg.yml b/ansible/tasks/postgres-extensions/26-hypopg.yml index eeb29be..4a9afcf 100644 --- a/ansible/tasks/postgres-extensions/26-hypopg.yml +++ b/ansible/tasks/postgres-extensions/26-hypopg.yml @@ -3,7 +3,7 @@ git: repo: https://github.com/HypoPG/hypopg.git dest: /tmp/hypopg - version: "{{ hypopg_commit_sha }}" + version: "{{ hypopg_release }}" - name: hypopg - install make: diff --git a/ansible/tasks/postgres-extensions/27-pg_repack.yml b/ansible/tasks/postgres-extensions/27-pg_repack.yml index 3a2aa96..81ca801 100644 --- a/ansible/tasks/postgres-extensions/27-pg_repack.yml +++ b/ansible/tasks/postgres-extensions/27-pg_repack.yml @@ -1,37 +1,38 @@ # pg_repack -- name: pg_repack - download & install dependencies - apt: - pkg: - - liblz4-dev - - libz-dev - - libzstd-dev - - libreadline-dev - update_cache: yes - install_recommends: no + - name: pg_repack - download & install dependencies + apt: + pkg: + - liblz4-dev + - libz-dev + - libzstd-dev + - libreadline-dev + update_cache: yes + install_recommends: no -- name: pg_repack - download latest release - git: - repo: https://github.com/reorg/pg_repack.git - dest: /tmp/pg_repack - version: "ver_{{ pg_repack_release }}" - become: yes + - name: pg_repack - download latest release + git: + repo: https://github.com/reorg/pg_repack.git + dest: /tmp/pg_repack + version: "ver_{{ pg_repack_release }}" + become: yes -- name: pg_repack - build - make: - chdir: /tmp/pg_repack - params: - USE_PGXS: 1 - become: yes + - name: pg_repack - build + make: + chdir: /tmp/pg_repack + params: + USE_PGXS: 1 + become: yes -- name: pg_repack - install - make: - chdir: /tmp/pg_repack - target: install - params: - USE_PGXS: 1 - become: yes + - name: pg_repack - install + make: + chdir: /tmp/pg_repack + target: install + params: + USE_PGXS: 1 + become: yes + + - name: pg_repack - cleanup + file: + state: absent + path: /tmp/pg_repack -- name: pg_repack - cleanup - file: - state: absent - path: /tmp/pg_repack diff --git a/ansible/tasks/postgres-extensions/28-pgvector.yml b/ansible/tasks/postgres-extensions/28-pgvector.yml index 05b523d..a673ab2 100644 --- a/ansible/tasks/postgres-extensions/28-pgvector.yml +++ b/ansible/tasks/postgres-extensions/28-pgvector.yml @@ -3,9 +3,9 @@ git: repo: https://github.com/pgvector/pgvector.git dest: /tmp/pgvector - version: '{{ pgvector_release }}' + version: 'v{{ pgvector_release }}' become: yes - + - name: pgvector - build make: chdir: /tmp/pgvector diff --git a/ansible/tasks/postgres-extensions/29-pg_tle.yml b/ansible/tasks/postgres-extensions/29-pg_tle.yml new file mode 100644 index 0000000..ea0b199 --- /dev/null +++ b/ansible/tasks/postgres-extensions/29-pg_tle.yml @@ -0,0 +1,12 @@ +# pg_tle +- name: pg_tle - download + git: + repo: https://github.com/aws/pg_tle.git + dest: /tmp/pg_tle + version: v{{ pg_tle_release }} + +- name: pg_tle - install + make: + chdir: /tmp/pg_tle + target: install + become: yes diff --git a/ansible/tasks/setup-docker.yml b/ansible/tasks/setup-docker.yml new file mode 100644 index 0000000..7b37f70 --- /dev/null +++ b/ansible/tasks/setup-docker.yml @@ -0,0 +1,80 @@ +- name: Copy extension packages + copy: + src: files/extensions/ + dest: /tmp/extensions/ + when: debpkg_mode + +# Builtin apt module does not support wildcard for deb paths +- name: Install extensions + shell: | + set -e + apt-get update + apt-get install -y --no-install-recommends /tmp/extensions/*.deb + when: debpkg_mode + +- name: pgsodium - determine postgres bin directory + shell: pg_config --bindir + register: pg_bindir_output + when: debpkg_mode + +- set_fact: + pg_bindir: "{{ pg_bindir_output.stdout }}" + when: debpkg_mode + +- name: pgsodium - set pgsodium.getkey_script + become: yes + lineinfile: + path: /etc/postgresql/postgresql.conf + state: present + # script is expected to be placed by finalization tasks for different target platforms + line: pgsodium.getkey_script= '{{ pg_bindir }}/pgsodium_getkey.sh' + when: debpkg_mode + +# supautils +- name: supautils - add supautils to session_preload_libraries + become: yes + replace: + path: /etc/postgresql/postgresql.conf + regexp: "#session_preload_libraries = ''" + replace: session_preload_libraries = 'supautils' + when: debpkg_mode or stage2_nix + +- name: supautils - write custom supautils.conf + template: + src: "files/postgresql_config/supautils.conf.j2" + dest: /etc/postgresql-custom/supautils.conf + mode: 0664 + owner: postgres + group: postgres + when: debpkg_mode or stage2_nix + +- name: supautils - copy extension custom scripts + copy: + src: files/postgresql_extension_custom_scripts/ + dest: /etc/postgresql-custom/extension-custom-scripts + become: yes + when: debpkg_mode or stage2_nix + +- name: supautils - chown extension custom scripts + file: + mode: 0775 + owner: postgres + group: postgres + path: /etc/postgresql-custom/extension-custom-scripts + recurse: yes + become: yes + when: debpkg_mode or stage2_nix + +- name: supautils - include /etc/postgresql-custom/supautils.conf in postgresql.conf + become: yes + replace: + path: /etc/postgresql/postgresql.conf + regexp: "#include = '/etc/postgresql-custom/supautils.conf'" + replace: "include = '/etc/postgresql-custom/supautils.conf'" + when: debpkg_mode or stage2_nix + +- name: Cleanup - extension packages + file: + path: /tmp/extensions + state: absent + when: debpkg_mode diff --git a/ansible/tasks/setup-envoy.yml b/ansible/tasks/setup-envoy.yml new file mode 100644 index 0000000..9843b55 --- /dev/null +++ b/ansible/tasks/setup-envoy.yml @@ -0,0 +1,60 @@ +- name: Envoy - system user + ansible.builtin.user: + name: envoy + +- name: Envoy - download binary + ansible.builtin.get_url: + checksum: "{{ envoy_release_checksum }}" + dest: /opt/envoy + group: envoy + mode: u+x + owner: envoy + # yamllint disable-line rule:line-length + url: "https://github.com/envoyproxy/envoy/releases/download/v{{ envoy_release }}/envoy-{{ envoy_release }}-linux-aarch_64" + +- name: Envoy - download hot restarter script + ansible.builtin.get_url: + checksum: "{{ envoy_hot_restarter_release_checksum }}" + dest: /opt/envoy-hot-restarter.py + group: envoy + mode: u+x + owner: envoy + # yamllint disable-line rule:line-length + url: https://raw.githubusercontent.com/envoyproxy/envoy/v{{ envoy_release }}/restarter/hot-restarter.py + +- name: Envoy - bump up ulimit + community.general.pam_limits: + domain: envoy + limit_item: nofile + limit_type: soft + value: 4096 + +- name: Envoy - create script to start envoy + ansible.builtin.copy: + dest: /opt/start-envoy.sh + group: envoy + mode: u+x + owner: envoy + src: files/start-envoy.sh + +- name: Envoy - create configuration files + ansible.builtin.copy: + dest: /etc/envoy/ + directory_mode: u=rwx,g=rwx,o=rx + group: envoy + mode: u=rw,g=rw,o=r + owner: envoy + src: files/envoy_config/ + +- name: Envoy - create service file + ansible.builtin.copy: + dest: /etc/systemd/system/envoy.service + mode: u=rw,g=r,o=r + src: files/envoy.service + +- name: Envoy - disable service + ansible.builtin.systemd: + daemon_reload: true + enabled: false + name: envoy + state: stopped diff --git a/ansible/tasks/setup-extensions.yml b/ansible/tasks/setup-extensions.yml index b5b0b8c..a560ae8 100644 --- a/ansible/tasks/setup-extensions.yml +++ b/ansible/tasks/setup-extensions.yml @@ -39,6 +39,8 @@ - name: Install pljava import_tasks: tasks/postgres-extensions/12-pljava.yml + tags: + - legacy-incompatible - name: Install pg_plan_filter import_tasks: tasks/postgres-extensions/14-pg_plan_filter.yml @@ -57,15 +59,14 @@ - name: Install pg_graphql import_tasks: tasks/postgres-extensions/19-pg_graphql.yml + tags: + - legacy-incompatible - name: Install pg_stat_monitor import_tasks: tasks/postgres-extensions/20-pg_stat_monitor.yml -- name: Install auto_explain - import_tasks: tasks/postgres-extensions/21-auto_explain.yml - -# - name: Install vault -# import_tasks: tasks/postgres-extensions/23-vault.yml +- name: Install vault + import_tasks: tasks/postgres-extensions/23-vault.yml - name: Install PGroonga import_tasks: tasks/postgres-extensions/24-pgroonga.yml @@ -75,13 +76,16 @@ - name: Install hypopg import_tasks: tasks/postgres-extensions/26-hypopg.yml - -- name: Install pg_repack - import_tasks: tasks/postgres-extensions/27-pg_repack.yml + - name: Install pg_repack + import_tasks: tasks/postgres-extensions/27-pg_repack.yml + - name: Install pgvector import_tasks: tasks/postgres-extensions/28-pgvector.yml +- name: Install Trusted Language Extensions + import_tasks: tasks/postgres-extensions/29-pg_tle.yml + - name: Verify async task status import_tasks: tasks/postgres-extensions/99-finish_async_tasks.yml when: async_mode diff --git a/ansible/tasks/setup-fail2ban.yml b/ansible/tasks/setup-fail2ban.yml index abd26cf..ee0029d 100644 --- a/ansible/tasks/setup-fail2ban.yml +++ b/ansible/tasks/setup-fail2ban.yml @@ -5,16 +5,19 @@ path: /etc/fail2ban/jail.conf regexp: bantime = 10m replace: bantime = 3600 + when: debpkg_mode or nixpkg_mode - name: Configure journald copy: src: files/fail2ban_config/jail-ssh.conf dest: /etc/fail2ban/jail.d/sshd.local + when: debpkg_mode or nixpkg_mode - name: configure fail2ban to use nftables copy: src: files/fail2ban_config/jail.local dest: /etc/fail2ban/jail.local + when: debpkg_mode or nixpkg_mode # postgresql - name: import jail.d/postgresql.conf @@ -22,12 +25,14 @@ src: files/fail2ban_config/jail-postgresql.conf.j2 dest: /etc/fail2ban/jail.d/postgresql.conf become: yes + when: debpkg_mode or nixpkg_mode - name: import filter.d/postgresql.conf template: src: files/fail2ban_config/filter-postgresql.conf.j2 dest: /etc/fail2ban/filter.d/postgresql.conf become: yes + when: debpkg_mode or nixpkg_mode - name: create overrides dir file: @@ -36,11 +41,13 @@ group: root path: /etc/systemd/system/fail2ban.service.d mode: '0700' + when: debpkg_mode or nixpkg_mode - name: Custom systemd overrides copy: src: files/fail2ban_config/fail2ban.service.conf dest: /etc/systemd/system/fail2ban.service.d/overrides.conf + when: debpkg_mode or nixpkg_mode - name: add in tealbase specific ignore filters lineinfile: @@ -56,15 +63,18 @@ become: yes tags: - install-tealbase-internal + when: debpkg_mode or nixpkg_mode # Restart - name: fail2ban - restart systemd: name: fail2ban state: restarted + when: debpkg_mode or nixpkg_mode - name: fail2ban - disable service systemd: name: fail2ban enabled: no daemon_reload: yes + when: debpkg_mode or nixpkg_mode \ No newline at end of file diff --git a/ansible/tasks/setup-gotrue.yml b/ansible/tasks/setup-gotrue.yml index cb20007..19f733a 100644 --- a/ansible/tasks/setup-gotrue.yml +++ b/ansible/tasks/setup-gotrue.yml @@ -19,7 +19,7 @@ - name: gotrue - download commit archive get_url: - url: "https://github.com/tealbase/gotrue/releases/download/{{ gotrue_release }}/gotrue-{{ gotrue_release }}-{{ arch }}.tar.gz" + url: "https://github.com/tealbase/gotrue/releases/download/v{{ gotrue_release }}/auth-v{{ gotrue_release }}-{{ arch }}.tar.gz" dest: /tmp/gotrue.tar.gz checksum: "{{ gotrue_release_checksum }}" @@ -49,6 +49,11 @@ src: files/gotrue.service.j2 dest: /etc/systemd/system/gotrue.service +- name: gotrue - create optimizations file + template: + src: files/gotrue-optimizations.service.j2 + dest: /etc/systemd/system/gotrue-optimizations.service + - name: gotrue - reload systemd systemd: daemon_reload: yes diff --git a/ansible/tasks/setup-kong.yml b/ansible/tasks/setup-kong.yml index 09d6d2b..b34f96e 100644 --- a/ansible/tasks/setup-kong.yml +++ b/ansible/tasks/setup-kong.yml @@ -12,7 +12,7 @@ - name: Kong - download deb package get_url: - url: "https://download.konghq.com/gateway-2.x-ubuntu-{{ kong_release_target }}/pool/all/k/kong/{{ kong_deb }}" + url: "https://packages.konghq.com/public/gateway-28/deb/ubuntu/pool/{{ kong_release_target }}/main/k/ko/kong_2.8.1/{{ kong_deb }}" dest: /tmp/kong.deb checksum: "{{ kong_deb_checksum }}" diff --git a/ansible/tasks/setup-migrations.yml b/ansible/tasks/setup-migrations.yml index 570f776..6eea684 100644 --- a/ansible/tasks/setup-migrations.yml +++ b/ansible/tasks/setup-migrations.yml @@ -1,7 +1,7 @@ - name: Run migrate.sh script shell: ./migrate.sh register: retval - when: ebssurrogate_mode + when: debpkg_mode or stage2_nix args: chdir: /tmp/migrations/db failed_when: retval.rc != 0 @@ -10,4 +10,4 @@ file: path: "/root/MIGRATION-AMI" state: touch - when: ebssurrogate_mode + when: debpkg_mode or stage2_nix diff --git a/ansible/tasks/setup-nginx.yml b/ansible/tasks/setup-nginx.yml index 22b9486..77fb770 100644 --- a/ansible/tasks/setup-nginx.yml +++ b/ansible/tasks/setup-nginx.yml @@ -37,7 +37,8 @@ become: yes - name: nginx - build - make: + community.general.make: + target: build chdir: /tmp/nginx-{{ nginx_release }} jobs: "{{ parallel_jobs | default(omit) }}" become: yes diff --git a/ansible/tasks/setup-pgbouncer.yml b/ansible/tasks/setup-pgbouncer.yml index 5fa7608..4381ba2 100644 --- a/ansible/tasks/setup-pgbouncer.yml +++ b/ansible/tasks/setup-pgbouncer.yml @@ -98,18 +98,6 @@ dest: /etc/tmpfiles.d/pgbouncer.conf become: yes -- name: PgBouncer - add permissions for pgbouncer user - become: yes - lineinfile: - path: /etc/postgresql/pg_hba.conf - state: present - insertafter: '# Default:' - line: "{{ item }}" - with_items: - - "host all pgbouncer 0.0.0.0/0 reject" - - "host all pgbouncer 127.0.0.1/32 scram-sha-256" - - "# Connection configuration for pgbouncer user" - - name: PgBouncer - By default allow ssl connections. become: yes copy: diff --git a/ansible/tasks/setup-postgres.yml b/ansible/tasks/setup-postgres.yml index fe70f38..cbd7424 100644 --- a/ansible/tasks/setup-postgres.yml +++ b/ansible/tasks/setup-postgres.yml @@ -1,102 +1,96 @@ -# Downloading dependencies -- name: Postgres dependencies - apt: - pkg: - - build-essential - - pkg-config - - libreadline-dev - - zlib1g-dev - - flex - - bison - - libxml2-dev - - libxslt-dev - - libssl-dev - - libsystemd-dev - - libxml2-utils - - uuid-dev - - xsltproc - - ssl-cert - - liblz4-dev - - libicu-dev - -- name: Download LLVM & Clang - apt: - pkg: - - llvm-11-dev - - clang-11 +- name: Postgres - copy package + copy: + src: files/postgres/ + dest: /tmp/build/ + when: debpkg_mode + +- name: Postgres - add PPA + apt_repository: + repo: "deb [ trusted=yes ] file:///tmp/build ./" + state: present + when: debpkg_mode -- name: Download GCC 10 +- name: Postgres - install commons apt: - pkg: - - gcc-10 - - g++-10 + name: postgresql-common + install_recommends: no + when: debpkg_mode -- name: Switch to GCC 10 +- name: Do not create main cluster shell: - cmd: update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-10 100 --slave /usr/bin/g++ g++ /usr/bin/g++-10 --slave /usr/bin/gcov gcov /usr/bin/gcov-10 + cmd: sed -ri 's/#(create_main_cluster) .*$/\1 = false/' /etc/postgresql-common/createcluster.conf + when: debpkg_mode + +- name: Postgres - install server + apt: + name: postgresql-{{ postgresql_major }}={{ postgresql_release }}-1.pgdg20.04+1 + install_recommends: no + when: debpkg_mode + +- name: Postgres - remove PPA + apt_repository: + repo: "deb [ trusted=yes ] file:///tmp/build ./" + state: absent + when: debpkg_mode -# Setup permissions -- name: Update permissions for /var/tmp directory +- name: Postgres - cleanup package file: - path: /var/tmp/ - owner: root - group: root - mode: '1777' + path: /tmp/build + state: absent + when: debpkg_mode + +- name: install locales + apt: + name: locales + state: present become: yes + when: stage2_nix -# Building Postgres from source -- name: Postgres - download latest release - get_url: - url: https://ftp.postgresql.org/pub/source/v{{ postgresql_release }}/postgresql-{{ postgresql_release }}.tar.gz - dest: /tmp - checksum: "{{ postgresql_release_checksum }}" - timeout: 60 - -- name: Postgres - unpack archive - unarchive: - remote_src: yes - src: /tmp/postgresql-{{ postgresql_release }}.tar.gz - dest: /tmp - -- name: Setting CFLAGS (arm) - set_fact: - cflags: "-moutline-atomics -mtune=native -march=native -mcpu=native -fsigned-char" - when: platform == "arm64" - -- name: Setting CFLAGS (x86) - set_fact: - cflags: "-fsigned-char" - when: platform == "amd64" - -- name: Postgres - configure - shell: - cmd: CFLAGS='{{ cflags }}' LLVM_CONFIG=/usr/bin/llvm-config-11 CLANG=/usr/bin/clang-11 ./configure --with-llvm --with-openssl --with-systemd --with-uuid=e2fs --with-libxml --with-icu --with-lz4 --exec-prefix=/usr/lib/postgresql --datarootdir=/var/lib/postgresql - chdir: /tmp/postgresql-{{ postgresql_release }} +- name: configure locales + command: echo "C.UTF-8 UTF-8" > /etc/locale.gen && echo "en_US.UTF-8 UTF-8" >> /etc/locale.gen + become: yes + when: stage2_nix + +- name: locale-gen + command: sudo locale-gen + when: stage2_nix -- name: Postgres - build - make: - target: world-bin - chdir: /tmp/postgresql-{{ postgresql_release }} - jobs: "{{ parallel_jobs | default(omit) }}" +- name: update-locale + command: sudo update-locale + when: stage2_nix -- name: Postgres - install - make: - target: install-world-bin - chdir: /tmp/postgresql-{{ postgresql_release }} +- name: Create symlink to /usr/lib/postgresql/bin + shell: + cmd: ln -s /usr/lib/postgresql/{{ postgresql_major }}/bin /usr/lib/postgresql/bin + when: debpkg_mode -- name: Create postgres group +- name: create ssl-cert group group: - name: postgres + name: ssl-cert state: present + when: nixpkg_mode +# the old method of installing from debian creates this group, but we must create it explicitly +# for the nix built version -# Create postgres user -- name: Create postgres user - user: +- name: create postgres group + group: name: postgres - shell: /bin/false - comment: Postgres user - group: postgres - groups: postgres,ssl-cert + state: present + when: nixpkg_mode + +- name: create postgres user + shell: adduser --system --home /var/lib/postgresql --no-create-home --shell /bin/bash --group --gecos "PostgreSQL administrator" postgres + args: + executable: /bin/bash + become: yes + when: nixpkg_mode + +- name: add postgres user to postgres group + shell: usermod -a -G ssl-cert postgres + args: + executable: /bin/bash + become: yes + when: nixpkg_mode - name: Create relevant directories file: @@ -106,10 +100,10 @@ owner: postgres group: postgres with_items: - - '/etc/postgresql' - - '/etc/postgresql-custom' + - '/home/postgres' - '/var/log/postgresql' - '/var/lib/postgresql' + when: debpkg_mode or nixpkg_mode - name: Allow adminapi to write custom config file: @@ -122,6 +116,7 @@ with_items: - '/etc/postgresql' - '/etc/postgresql-custom' + when: debpkg_mode or nixpkg_mode - name: create placeholder config files file: @@ -133,6 +128,7 @@ with_items: - 'generated-optimizations.conf' - 'custom-overrides.conf' + when: debpkg_mode or nixpkg_mode # Move Postgres configuration files into /etc/postgresql # Add postgresql.conf @@ -141,6 +137,7 @@ src: files/postgresql_config/postgresql.conf.j2 dest: /etc/postgresql/postgresql.conf group: postgres + when: debpkg_mode or nixpkg_mode # Add pg_hba.conf - name: import pg_hba.conf @@ -148,6 +145,7 @@ src: files/postgresql_config/pg_hba.conf.j2 dest: /etc/postgresql/pg_hba.conf group: postgres + when: debpkg_mode or nixpkg_mode # Add pg_ident.conf - name: import pg_ident.conf @@ -155,20 +153,27 @@ src: files/postgresql_config/pg_ident.conf.j2 dest: /etc/postgresql/pg_ident.conf group: postgres + when: debpkg_mode or nixpkg_mode -- name: Find all files in /usr/lib/postgresql/bin - find: - paths: /usr/lib/postgresql/bin - register: postgresql_bin +# Add custom config for read replicas set up +- name: Move custom read-replica.conf file to /etc/postgresql-custom/read-replica.conf + template: + src: "files/postgresql_config/custom_read_replica.conf.j2" + dest: /etc/postgresql-custom/read-replica.conf + mode: 0664 + owner: postgres + group: postgres + when: debpkg_mode or nixpkg_mode -- name: Create symbolic links for Postgres binaries to /usr/bin/ - become: yes - file: - src: "{{ item.path }}" - path: "/usr/bin/{{ item.path | basename }}" - state: link - force: yes - with_items: "{{ postgresql_bin.files }}" +# Install extensions before init +- name: Install Postgres extensions + import_tasks: tasks/setup-docker.yml + when: debpkg_mode or stage2_nix + +#stage 2 postgres tasks +- name: stage2 postgres tasks + import_tasks: tasks/stage2-setup-postgres.yml + when: stage2_nix # init DB - name: Create directory on data volume @@ -181,6 +186,7 @@ mode: 0750 with_items: - "/data/pgdata" + when: debpkg_mode or nixpkg_mode - name: Link database data_dir to data volume directory file: @@ -188,26 +194,60 @@ path: "/var/lib/postgresql/data" state: link force: yes + when: debpkg_mode or nixpkg_mode - name: Initialize the database become: yes become_user: postgres - shell: - cmd: /usr/lib/postgresql/bin/pg_ctl -D /var/lib/postgresql/data initdb -o "--allow-group-access" + shell: /usr/lib/postgresql/bin/pg_ctl -D /var/lib/postgresql/data initdb -o "--allow-group-access" -o "--username=tealbase_admin" + vars: + ansible_command_timeout: 60 + when: debpkg_mode + +- name: Initialize the database stage2_nix + become: yes + become_user: postgres + shell: source /var/lib/postgresql/.bashrc && /usr/lib/postgresql/bin/pg_ctl -D /var/lib/postgresql/data initdb -o "--allow-group-access" -o "--username=tealbase_admin" + args: + executable: /bin/bash + environment: + LANG: en_US.UTF-8 + LANGUAGE: en_US.UTF-8 + LC_ALL: en_US.UTF-8 + LC_CTYPE: en_US.UTF-8 + LOCALE_ARCHIVE: /usr/lib/locale/locale-archive vars: ansible_command_timeout: 60 # Circumvents the following error: # "Timeout (12s) waiting for privilege escalation prompt" + when: stage2_nix - name: copy PG systemd unit template: src: files/postgresql_config/postgresql.service.j2 dest: /etc/systemd/system/postgresql.service + when: debpkg_mode or stage2_nix - name: copy optimizations systemd unit template: src: files/database-optimizations.service.j2 dest: /etc/systemd/system/database-optimizations.service + when: debpkg_mode or stage2_nix + +- name: Restart Postgres Database without Systemd + become: yes + become_user: postgres + shell: | + source /var/lib/postgresql/.bashrc + /usr/lib/postgresql/bin/pg_ctl -D /var/lib/postgresql/data start + environment: + LANG: en_US.UTF-8 + LANGUAGE: en_US.UTF-8 + LC_ALL: en_US.UTF-8 + LC_CTYPE: en_US.UTF-8 + LOCALE_ARCHIVE: /usr/lib/locale/locale-archive + when: stage2_nix + # Reload - name: System - systemd reload @@ -215,3 +255,33 @@ enabled: yes name: postgresql daemon_reload: yes + when: debpkg_mode or stage2_nix + +- name: Make sure .bashrc exists + file: + path: /var/lib/postgresql/.bashrc + state: touch + owner: postgres + group: postgres + when: nixpkg_mode + +- name: Add LOCALE_ARCHIVE to .bashrc + lineinfile: + dest: "/var/lib/postgresql/.bashrc" + line: 'export LOCALE_ARCHIVE=/usr/lib/locale/locale-archive' + create: yes + become: yes + when: nixpkg_mode + +- name: Add LANG items to .bashrc + lineinfile: + dest: "/var/lib/postgresql/.bashrc" + line: "{{ item }}" + loop: + - 'export LANG="en_US.UTF-8"' + - 'export LANGUAGE="en_US.UTF-8"' + - 'export LC_ALL="en_US.UTF-8"' + - 'export LANG="en_US.UTF-8"' + - 'export LC_CTYPE="en_US.UTF-8"' + become: yes + when: nixpkg_mode diff --git a/ansible/tasks/setup-postgrest.yml b/ansible/tasks/setup-postgrest.yml index 57b76e1..a98d199 100644 --- a/ansible/tasks/setup-postgrest.yml +++ b/ansible/tasks/setup-postgrest.yml @@ -1,6 +1,20 @@ - name: PostgREST - system user user: name=postgrest +- name: PostgREST - add Postgres PPA gpg key + apt_key: + url: https://www.postgresql.org/media/keys/ACCC4CF8.asc + state: present + +- name: PostgREST - add Postgres PPA + apt_repository: + repo: "deb http://apt.postgresql.org/pub/repos/apt/ focal-pgdg {{ postgresql_major }}" + state: present + +- name: PostgREST - update apt cache + apt: + update_cache: yes + # libpq is a C library that enables user programs to communicate with # the PostgreSQL database server. - name: PostgREST - system dependencies @@ -9,9 +23,20 @@ - libpq5 - libnuma-dev +- name: PostgREST - remove Postgres PPA gpg key + apt_key: + url: https://www.postgresql.org/media/keys/ACCC4CF8.asc + state: absent + +- name: PostgREST - remove Postgres PPA + apt_repository: + repo: "deb http://apt.postgresql.org/pub/repos/apt/ focal-pgdg {{ postgresql_major }}" + state: absent + - name: postgis - ensure dependencies do not get autoremoved shell: | set -e + apt-mark manual libpq5* apt-mark manual libnuma* apt-mark auto libnuma*-dev diff --git a/ansible/tasks/setup-system.yml b/ansible/tasks/setup-system.yml index 38fc7a9..0783813 100644 --- a/ansible/tasks/setup-system.yml +++ b/ansible/tasks/setup-system.yml @@ -1,6 +1,6 @@ - name: System - apt update and apt upgrade apt: update_cache=yes upgrade=yes - when: not ebssurrogate_mode + when: debpkg_mode or nixpkg_mode # SEE http://archive.vn/DKJjs#parameter-upgrade - name: Install required security updates @@ -8,13 +8,14 @@ pkg: - tzdata - linux-libc-dev - + when: debpkg_mode or nixpkg_mode # SEE https://github.com/georchestra/ansible/issues/55#issuecomment-588313638 # Without this, a similar error is faced - name: Install Ansible dependencies apt: pkg: - acl + when: debpkg_mode or nixpkg_mode - name: Install security tools apt: @@ -23,6 +24,7 @@ - fail2ban update_cache: yes cache_valid_time: 3600 + when: debpkg_mode or nixpkg_mode - name: Use nftables backend shell: | @@ -31,17 +33,44 @@ update-alternatives --set arptables /usr/sbin/arptables-nft update-alternatives --set ebtables /usr/sbin/ebtables-nft systemctl restart ufw + when: debpkg_mode or nixpkg_mode +- name: Create Sysstat log directory + file: + path: /var/log/sysstat + state: directory + when: debpkg_mode or nixpkg_mode + - name: Install other useful tools apt: pkg: + - bwm-ng + - htop + - net-tools + - ngrep - sysstat + - vim-tiny update_cache: yes + when: debpkg_mode or nixpkg_mode + +- name: Configure sysstat + copy: + src: files/sysstat.sysstat + dest: /etc/sysstat/sysstat + when: debpkg_mode or nixpkg_mode + +- name: Configure default sysstat + copy: + src: files/default.sysstat + dest: /etc/default/sysstat + when: debpkg_mode or nixpkg_mode + - name: Adjust APT update intervals copy: src: files/apt_periodic dest: /etc/apt/apt.conf.d/10periodic + when: debpkg_mode or nixpkg_mode # Find platform architecture and set as a variable - name: finding platform architecture @@ -55,35 +84,57 @@ tags: - update - update-only + when: debpkg_mode or nixpkg_mode or stage2_nix + +- name: create overrides dir + file: + state: directory + owner: root + group: root + path: /etc/systemd/system/systemd-resolved.service.d + mode: '0700' + when: debpkg_mode or nixpkg_mode + +- name: Custom systemd overrides for resolved + copy: + src: files/systemd-resolved.conf + dest: /etc/systemd/system/systemd-resolved.service.d/override.conf + when: debpkg_mode or nixpkg_mode - name: System - Create services.slice template: src: files/services.slice.j2 dest: /etc/systemd/system/services.slice - when: not ebssurrogate_mode + when: debpkg_mode or nixpkg_mode + - name: System - systemd reload systemd: daemon_reload=yes + when: debpkg_mode or nixpkg_mode - name: Configure journald copy: src: files/journald.conf dest: /etc/systemd/journald.conf + when: debpkg_mode or nixpkg_mode - name: reload systemd-journald systemd: name: systemd-journald state: restarted + when: debpkg_mode or nixpkg_mode - name: Configure logind copy: src: files/logind.conf dest: /etc/systemd/logind.conf + when: debpkg_mode or nixpkg_mode - name: reload systemd-logind systemd: name: systemd-logind state: restarted + when: debpkg_mode or nixpkg_mode - name: enable timestamps for shell history copy: @@ -93,12 +144,42 @@ mode: 0644 owner: root group: root + when: debpkg_mode or nixpkg_mode - name: set hosts file copy: content: | 127.0.0.1 localhost + ::1 localhost dest: /etc/hosts mode: 0644 owner: root group: root + when: debpkg_mode or stage2_nix + +#Set Sysctl params for restarting the OS on oom after 10 +- name: Set vm.panic_on_oom=1 + ansible.builtin.sysctl: + name: vm.panic_on_oom + value: '1' + state: present + reload: yes + when: debpkg_mode or nixpkg_mode + +- name: Set kernel.panic=10 + ansible.builtin.sysctl: + name: kernel.panic + value: '10' + state: present + reload: yes + when: debpkg_mode or nixpkg_mode + +- name: configure system + ansible.posix.sysctl: + name: 'net.core.somaxconn' + value: 16834 + +- name: configure system + ansible.posix.sysctl: + name: 'net.ipv4.ip_local_port_range' + value: '1025 65000' diff --git a/ansible/tasks/setup-tealbase-internal.yml b/ansible/tasks/setup-tealbase-internal.yml index c9113e2..aea3a78 100644 --- a/ansible/tasks/setup-tealbase-internal.yml +++ b/ansible/tasks/setup-tealbase-internal.yml @@ -29,6 +29,10 @@ shell: "/tmp/aws/install --update" become: true +- name: AWS CLI - configure ipv6 support for s3 + shell: | + aws configure set default.s3.use_dualstack_endpoint true + - name: install Vector for logging become: yes apt: @@ -52,6 +56,17 @@ src: files/vector.service.j2 dest: /etc/systemd/system/vector.service +- name: configure tmpfiles for postgres - overwrites upstream package + template: + src: files/postgresql_config/tmpfiles.postgresql.conf + dest: /etc/tmpfiles.d/postgresql-common.conf + +- name: fix permissions for vector config to be managed + shell: + cmd: | + chown -R vector:vector /etc/vector + chmod 0775 /etc/vector + - name: vector - reload systemd systemd: daemon_reload: yes @@ -80,9 +95,6 @@ - name: Install Postgres exporter import_tasks: internal/postgres-exporter.yml -- name: Install supautils - import_tasks: internal/supautils.yml - - name: Install admin-mgr import_tasks: internal/admin-mgr.yml @@ -94,3 +106,11 @@ - name: Install pg_egress_collect import_tasks: internal/pg_egress_collect.yml + +- name: Install PostgreSQL prestart script + import_tasks: internal/postgresql-prestart.yml + +- name: Install salt minion + import_tasks: internal/install-salt.yml + tags: + - aws-only diff --git a/ansible/tasks/setup-wal-g.yml b/ansible/tasks/setup-wal-g.yml index 439bb28..bbc64cd 100644 --- a/ansible/tasks/setup-wal-g.yml +++ b/ansible/tasks/setup-wal-g.yml @@ -27,7 +27,7 @@ git: repo: https://github.com/wal-g/wal-g.git dest: /tmp/wal-g - version: "{{ wal_g_release }}" + version: "v{{ wal_g_release }}" become: yes - name: wal-g - pg_clean @@ -53,7 +53,7 @@ ignore_errors: yes - name: wal-g - build and install - make: + community.general.make: chdir: /tmp/wal-g target: pg_install jobs: "{{ parallel_jobs | default(omit) }}" diff --git a/ansible/tasks/stage2-setup-postgres.yml b/ansible/tasks/stage2-setup-postgres.yml new file mode 100644 index 0000000..df7b7f0 --- /dev/null +++ b/ansible/tasks/stage2-setup-postgres.yml @@ -0,0 +1,234 @@ +# - name: Install openjdk11 for pljava from nix binary cache +# become: yes +# shell: | +# sudo -u postgres bash -c ". /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh && nix profile install nixpkgs#openjdk11" +# It was decided to leave pljava disabled at https://github.com/tealbase/postgres/pull/690 therefore removing this task +- name: Install Postgres from nix binary cache + become: yes + shell: | + sudo -u postgres bash -c ". /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh && nix profile install github:tealbase/postgres/{{ git_commit_sha }}#psql_15/bin" +#TODO (samrose) switch pg_prove sourcing to develop branch once PR is merged + when: stage2_nix + +- name: Install pg_prove from nix binary cache + become: yes + shell: | + sudo -u postgres bash -c ". /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh && nix profile install github:tealbase/postgres/{{ git_commit_sha }}#pg_prove" + when: stage2_nix + +- name: Install tealbase-groonga from nix binary cache + become: yes + shell: | + sudo -u postgres bash -c ". /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh && nix profile install github:tealbase/postgres/{{ git_commit_sha }}#tealbase-groonga" + when: stage2_nix + +- name: Set ownership and permissions for /etc/ssl/private + become: yes + file: + path: /etc/ssl/private + owner: root + group: postgres + mode: '0750' + when: stage2_nix + +- name: Set permissions for postgresql.env + become: yes + file: + path: /etc/environment.d/postgresql.env + owner: postgres + group: postgres + mode: '0644' + when: stage2_nix + +- name: Ensure /usr/lib/postgresql/bin directory exists + file: + path: /usr/lib/postgresql/bin + state: directory + owner: postgres + group: postgres + when: stage2_nix + +- name: Ensure /usr/lib/postgresql/share directory exists + file: + path: /usr/lib/postgresql/share/postgresql + state: directory + owner: postgres + group: postgres + when: stage2_nix + +- name: Ensure /usr/lib/postgresql/share/contrib directory exists + file: + path: /usr/lib/postgresql/share/postgresql/contrib + state: directory + owner: postgres + group: postgres + when: stage2_nix + +- name: Ensure /usr/lib/postgresql/share/timezonesets directory exists + file: + path: /usr/lib/postgresql/share/postgresql/timezonesets + state: directory + owner: postgres + group: postgres + when: stage2_nix + +- name: Ensure /usr/lib/postgresql/share/tsearch_data directory exists + file: + path: /usr/lib/postgresql/share/postgresql/tsearch_data + state: directory + owner: postgres + group: postgres + when: stage2_nix + +- name: Ensure /usr/lib/postgresql/share/extension directory exists + file: + path: /usr/lib/postgresql/share/postgresql/extension + state: directory + owner: postgres + group: postgres + when: stage2_nix + +# - name: Ensure /usr/lib/postgresql/share/postgresql/pljava directory exists +# file: +# path: /usr/lib/postgresql/share/postgresql/pljava +# state: directory +# owner: postgres +# group: postgres +# when: stage2_nix +# It was decided to leave pljava disabled at https://github.com/tealbase/postgres/pull/690 therefore removing this task + +- name: import pgsodium_getkey script + template: + src: /tmp/ansible-playbook/ansible/files/pgsodium_getkey_readonly.sh.j2 + dest: "/usr/lib/postgresql/bin/pgsodium_getkey.sh" + owner: postgres + group: postgres + mode: 0700 + when: stage2_nix + +- name: Create symbolic links from /var/lib/postgresql/.nix-profile/bin to /usr/lib/postgresql/bin + file: + src: "{{ item }}" + dest: "/usr/lib/postgresql/bin/{{ item | basename }}" + state: link + with_fileglob: + - "/var/lib/postgresql/.nix-profile/bin/*" + become: yes + when: stage2_nix + +- name: Check if /usr/bin/pg_config exists + stat: + path: /usr/bin/pg_config + register: pg_config_stat + when: stage2_nix + +- name: Remove existing /usr/bin/pg_config if it is not a symlink + file: + path: /usr/bin/pg_config + state: absent + when: pg_config_stat.stat.exists and not pg_config_stat.stat.islnk and stage2_nix + become: yes + +- name: Create symbolic links from /var/lib/postgresql/.nix-profile/bin to /usr/bin + file: + src: "{{ item }}" + dest: "/usr/bin/{{ item | basename }}" + state: link + with_fileglob: + - "/var/lib/postgresql/.nix-profile/bin/*" + become: yes + when: stage2_nix + +- name: Ensure postgres user has ownership of symlink + file: + path: "/usr/bin/{{ item | basename }}" + owner: postgres + group: postgres + with_fileglob: + - "/var/lib/postgresql/.nix-profile/bin/*" + become: yes + when: stage2_nix + +# - name: Create symbolic links from /var/lib/postgresql/.nix-profile/share/pljava to /usr/lib/postgresql/share/postgresql/pljava +# file: +# src: "{{ item }}" +# dest: "/usr/lib/postgresql/share/postgresql/pljava/{{ item | basename }}" +# state: link +# with_fileglob: +# - "/var/lib/postgresql/.nix-profile/share/pljava/*" +# become: yes +# It was decided to leave pljava disabled at https://github.com/tealbase/postgres/pull/690 therefore removing this task + +- name: Create symbolic links from /var/lib/postgresql/.nix-profile/share/postgresql to /usr/lib/postgresql/share/postgresql + file: + src: "{{ item }}" + dest: "/usr/lib/postgresql/share/postgresql/{{ item | basename }}" + state: link + with_fileglob: + - "/var/lib/postgresql/.nix-profile/share/postgresql/*" + become: yes + when: stage2_nix + +- name: Create symbolic links from /var/lib/postgresql/.nix-profile/share/postgresql/extension to /usr/lib/postgresql/share/postgresql/extension + file: + src: "{{ item }}" + dest: "/usr/lib/postgresql/share/postgresql/extension/{{ item | basename }}" + state: link + with_fileglob: + - "/var/lib/postgresql/.nix-profile/share/postgresql/extension/*" + become: yes + when: stage2_nix + +- name: create destination directory + file: + path: /usr/lib/postgresql/share/postgresql/contrib/ + state: directory + recurse: yes + when: stage2_nix + +- name: Recursively create symbolic links and set permissions for the contrib/postgis-* dir + shell: > + sudo mkdir -p /usr/lib/postgresql/share/postgresql/contrib && \ + sudo find /var/lib/postgresql/.nix-profile/share/postgresql/contrib/ -mindepth 1 -type d -exec sh -c 'for dir do sudo ln -s "$dir" "/usr/lib/postgresql/share/postgresql/contrib/$(basename "$dir")"; done' sh {} + \ + && chown -R postgres:postgres "/usr/lib/postgresql/share/postgresql/contrib/" + become: yes + when: stage2_nix + +- name: Create symbolic links from /var/lib/postgresql/.nix-profile/share/postgresql/timezonesets to /usr/lib/postgresql/share/postgresql/timeszonesets + file: + src: "{{ item }}" + dest: "/usr/lib/postgresql/share/postgresql/timezonesets/{{ item | basename }}" + state: link + with_fileglob: + - "/var/lib/postgresql/.nix-profile/share/postgresql/timezonesets/*" + become: yes + when: stage2_nix + +- name: Create symbolic links from /var/lib/postgresql/.nix-profile/share/postgresql/tsearch_data to /usr/lib/postgresql/share/postgresql/tsearch_data + file: + src: "{{ item }}" + dest: "/usr/lib/postgresql/share/postgresql/tsearch_data/{{ item | basename }}" + state: link + with_fileglob: + - "/var/lib/postgresql/.nix-profile/share/postgresql/tsearch_data/*" + become: yes + when: stage2_nix + +- set_fact: + pg_bindir: "/usr/lib/postgresql/bin" + when: stage2_nix + +- name: pgsodium - set pgsodium.getkey_script + become: yes + lineinfile: + path: /etc/postgresql/postgresql.conf + state: present + # script is expected to be placed by finalization tasks for different target platforms + line: pgsodium.getkey_script= '{{ pg_bindir }}/pgsodium_getkey.sh' + when: stage2_nix + +- name: Append GRN_PLUGINS_DIR to /etc/environment.d/postgresql.env + ansible.builtin.lineinfile: + path: /etc/environment.d/postgresql.env + line: 'GRN_PLUGINS_DIR=/var/lib/postgresql/.nix-profile/lib/groonga/plugins' + become: yes \ No newline at end of file diff --git a/ansible/tasks/test-image.yml b/ansible/tasks/test-image.yml index 36c0046..3b3edc2 100644 --- a/ansible/tasks/test-image.yml +++ b/ansible/tasks/test-image.yml @@ -2,45 +2,61 @@ apt: pkg: - libtap-parser-sourcehandler-pgtap-perl + when: debpkg_mode - name: Temporarily disable PG Sodium references in config become: yes become_user: postgres shell: cmd: sed -i.bak -e "s/pg_net,\ pgsodium,\ timescaledb/pg_net,\ timescaledb/g" -e "s/pgsodium.getkey_script=/#pgsodium.getkey_script=/g" /etc/postgresql/postgresql.conf - when: ebssurrogate_mode + when: debpkg_mode or stage2_nix - name: Start Postgres Database to load all extensions. become: yes become_user: postgres shell: - cmd: /usr/bin/pg_ctl -D /var/lib/postgresql/data start "-o -c config_file=/etc/postgresql/postgresql.conf" - when: ebssurrogate_mode + cmd: /usr/lib/postgresql/bin/pg_ctl -D /var/lib/postgresql/data start "-o -c config_file=/etc/postgresql/postgresql.conf" + when: debpkg_mode -- name: Enable pgTAP extension - shell: /usr/lib/postgresql/bin/psql -U postgres -h localhost -d postgres -c "CREATE extension pgtap"; - when: ebssurrogate_mode - -- name: Create function for testing extensions - shell: /usr/lib/postgresql/bin/psql -U postgres -h localhost -d postgres -f /tmp/unit-tests/test-extensions.sql; - when: ebssurrogate_mode +- name: Stop Postgres Database in stage 2 + become: yes + become_user: postgres + shell: source /var/lib/postgresql/.bashrc && /usr/lib/postgresql/bin/pg_ctl -D /var/lib/postgresql/data stop + args: + executable: /bin/bash + environment: + LANG: en_US.UTF-8 + LANGUAGE: en_US.UTF-8 + LC_ALL: en_US.UTF-8 + LC_CTYPE: en_US.UTF-8 + LOCALE_ARCHIVE: /usr/lib/locale/locale-archive + when: stage2_nix -- name: Extension Installation tests - shell: /usr/bin/pg_prove -U tealbase_admin -h localhost -d postgres -v /tmp/unit-tests/verify-extensions.sql - register: retval - when: ebssurrogate_mode +- name: Start Postgres Database to load all extensions. + become: yes + become_user: postgres + shell: source /var/lib/postgresql/.bashrc && /usr/lib/postgresql/bin/pg_ctl -D /var/lib/postgresql/data start "-o -c config_file=/etc/postgresql/postgresql.conf" + args: + executable: /bin/bash + environment: + LANG: en_US.UTF-8 + LANGUAGE: en_US.UTF-8 + LC_ALL: en_US.UTF-8 + LC_CTYPE: en_US.UTF-8 + LOCALE_ARCHIVE: /usr/lib/locale/locale-archive + when: stage2_nix -- name: Run Unit tests (with filename unit-test-*) on Postgres Database +- name: Run Unit tests (with filename unit-test-*) on Postgres Database shell: /usr/bin/pg_prove -U postgres -h localhost -d postgres -v /tmp/unit-tests/unit-test-*.sql register: retval failed_when: retval.rc != 0 - when: ebssurrogate_mode + when: debpkg_mode or stage2_nix - name: Run migrations tests - shell: /usr/bin/pg_prove -U postgres -h localhost -d postgres -v tests/test.sql + shell: /usr/bin/pg_prove -U tealbase_admin -h localhost -d postgres -v tests/test.sql register: retval failed_when: retval.rc != 0 - when: ebssurrogate_mode + when: debpkg_mode or stage2_nix args: chdir: /tmp/migrations @@ -49,19 +65,11 @@ become_user: postgres shell: cmd: mv /etc/postgresql/postgresql.conf.bak /etc/postgresql/postgresql.conf - when: ebssurrogate_mode - -- name: Drop pgTap extension - shell: /usr/lib/postgresql/bin/psql -U postgres -h localhost -d postgres -c "DROP extension if exists pgtap"; - when: ebssurrogate_mode - -- name: Drop extension test function - shell: /usr/lib/postgresql/bin/psql -U postgres -h localhost -d postgres -c "DROP FUNCTION IF EXISTS install_available_extensions_and_test"; - when: ebssurrogate_mode + when: debpkg_mode or stage2_nix - name: Reset db stats shell: /usr/lib/postgresql/bin/psql --no-password --no-psqlrc -d postgres -h localhost -U tealbase_admin -c 'SELECT pg_stat_statements_reset(); SELECT pg_stat_reset();' - when: ebssurrogate_mode + when: debpkg_mode or stage2_nix - name: remove pg_prove apt: @@ -69,20 +77,11 @@ - libtap-parser-sourcehandler-pgtap-perl state: absent autoremove: yes + when: debpkg_mode -- name: Get pg_config file details - ansible.builtin.stat: - path: /usr/bin/pg_config - register: st - -- name: Ensure pg_config points to our install of PG - ansible.builtin.fail: - msg: "Incorrect symlink for pg_config" - when: st.stat.lnk_target != '/usr/lib/postgresql/bin/pg_config' - -- name: Stop Postgres Database +- name: Stop Postgres Database become: yes become_user: postgres shell: - cmd: /usr/bin/pg_ctl -D /var/lib/postgresql/data stop - when: ebssurrogate_mode + cmd: /usr/lib/postgresql/bin/pg_ctl -D /var/lib/postgresql/data stop + when: debpkg_mode or stage2_nix diff --git a/ansible/vars.yml b/ansible/vars.yml index 55eda23..09d0125 100644 --- a/ansible/vars.yml +++ b/ansible/vars.yml @@ -7,23 +7,32 @@ postgresql_release: "15.1" postgresql_release_checksum: sha256:ea2cf059a85882654b989acd07edc121833164a30340faee0d3615cf7058e66c # Non Postgres Extensions -pgbouncer_release: "1.17.0" -pgbouncer_release_checksum: sha256:657309b7bc5c7a85cbf70a9a441b535f7824123081eabb7ba86d00349a256e23 +pgbouncer_release: "1.19.0" +pgbouncer_release_checksum: sha256:af0b05e97d0e1fd9ad45fe00ea6d2a934c63075f67f7e2ccef2ca59e3d8ce682 -postgrest_release: "10.1.2" -postgrest_arm_release_checksum: sha1:bc1a811dc0482853b226c644551f290411573f23 -postgrest_x86_release_checksum: sha1:96844c1c66d16d6bde41b4c34173f92fe4a3385b +# to get these use +# wget https://github.com/PostgREST/postgrest/releases/download/v12.2.3/postgrest-v12.2.3-ubuntu-aarch64.tar.xz -q -O- | sha1sum +# wget https://github.com/PostgREST/postgrest/releases/download/v12.2.3/postgrest-v12.2.3-linux-static-x64.tar.xz -q -O- | sha1sum +postgrest_release: "12.2.3" +postgrest_arm_release_checksum: sha1:fbfd6613d711ce1afa25c42d5df8f1b017f396f9 +postgrest_x86_release_checksum: sha1:61c513f91a8931be4062587b9d4a18b42acf5c05 -gotrue_release: v2.40.1 -gotrue_release_checksum: sha1:aa650eae81bca18ccb575a2c28bff90480a91eae +gotrue_release: 2.165.1 +gotrue_release_checksum: sha1:bbd62327d8612ac756177dde81d5368b660ca4c8 aws_cli_release: "2.2.7" +salt_minion_version: 3007 + golang_version: "1.19.3" golang_version_checksum: arm64: sha256:99de2fe112a52ab748fb175edea64b313a0c8d51d6157dba683a6be163fd5eab amd64: sha256:74b9640724fd4e6bb0ed2a1bc44ae813a03f1e72a4c76253e2d5c015494430ba +envoy_release: 1.28.0 +envoy_release_checksum: sha1:b0a06e9cfb170f1993f369beaa5aa9d7ec679ce5 +envoy_hot_restarter_release_checksum: sha1:6d43b89d266fb2427a4b51756b649883b0617eda + kong_release_target: focal # if it works, it works kong_deb: kong_2.8.1_arm64.deb kong_deb_checksum: sha1:2086f6ccf8454fe64435252fea4d29d736d7ec61 @@ -31,18 +40,18 @@ kong_deb_checksum: sha1:2086f6ccf8454fe64435252fea4d29d736d7ec61 nginx_release: 1.22.0 nginx_release_checksum: sha1:419efb77b80f165666e2ee406ad8ae9b845aba93 -wal_g_release: "v2.0.1" +wal_g_release: "2.0.1" sfcgal_release: "1.3.10" -sfcgal_release_checksum: sha1:f4add34a00afb0b5f594685fc646565a2bda259b +sfcgal_release_checksum: sha256:4e39b3b2adada6254a7bdba6d297bb28e1a9835a9f879b74f37e2dab70203232 -postgres_exporter_release: "0.9.0" +postgres_exporter_release: "0.15.0" postgres_exporter_release_checksum: - arm64: sha256:d869c16791481dc8475487ad84ae4371a63f9b399898ca1c666eead5cccf7182 - amd64: sha256:ff541bd3ee19c0ae003d71424a75edfcc8695e828dd20d5b4555ce433c89d60b + arm64: sha256:29ba62d538b92d39952afe12ee2e1f4401250d678ff4b354ff2752f4321c87a0 + amd64: sha256:cb89fc5bf4485fb554e0d640d9684fae143a4b2d5fa443009bd29c59f9129e84 -adminapi_release: 0.34.0 -adminmgr_release: 0.3.0 +adminapi_release: 0.71.1 +adminmgr_release: 0.24.0 # Postgres Extensions postgis_release: "3.3.2" @@ -54,12 +63,14 @@ pgrouting_release_checksum: sha256:a4e034efee8cf67582b67033d9c3ff714a09d8f542533 pgtap_release: "1.2.0" pgtap_release_checksum: sha256:9c7c3de67ea41638e14f06da5da57bac6f5bd03fea05c165a0ec862205a5c052 -pg_cron_release: "1.4.2" -pg_cron_release_checksum: sha256:3652722ea98d94d8e27bf5e708dd7359f55a818a43550d046c5064c98876f1a8 +pg_cron_release: "1.6.2" +pg_cron_release_checksum: sha256:9f4eb3193733c6fa93a6591406659aac54b82c24a5d91ffaf4ec243f717d94a0 pgaudit_release: "1.7.0" pgaudit_release_checksum: sha256:8f4a73e451c88c567e516e6cba7dc1e23bc91686bb6f1f77f8f3126d428a8bd8 +pgjwt_release: 9742dab1b2f297ad3811120db7b21451bca2d3c9 + pgsql_http_release: "1.5.0" pgsql_http_release_checksum: sha256:43efc9e82afcd110f205b86b8d28d1355d39b6b134161e9661a33a1346818f5d @@ -67,57 +78,76 @@ plpgsql_check_release: "2.2.5" plpgsql_check_release_checksum: sha256:6c3a3c5faf3f9689425c6db8a6b20bf4cd5e7144a055e29538eae980c7232573 pg_safeupdate_release: "1.4" -pg_safeupdate_release_checksum: sha1:942dacd0ebce6123944212ffb3d6b5a0c09174f9 +pg_safeupdate_release_checksum: sha256:ff01d3d444d35924bd3d745c5695696292e2855042da4c30fe728fb3b6648122 timescaledb_release: "2.9.1" +timescaledb_release_checksum: sha256:883638f2e79d25ec88ee58f603f3c81c999b6364cb4c799919d363f04089b47b -wal2json_commit_sha: 770872b890f9e122290f178e7c7bfa19ec7afa94 +wal2json_release: "2_5" +wal2json_release_checksum: sha256:b516653575541cf221b99cf3f8be9b6821f6dbcfc125675c85f35090f824f00e -supautils_release: "1.7.0" -supautils_release_checksum: sha256:e2353040262bd7a1720099f4a03ec485b05c74a202956aa1361e422f8765c6b3 +supautils_release: "2.5.0" +supautils_release_arm64_deb_checksum: sha256:406e4a816f719bd6c4b2143e9bb38078fbe60d7e85018ec0aed5d76924e28000 +supautils_release_amd64_deb_checksum: sha256:71f182b478d8aaf167609dd382875cdce3fbe992e888988b3d51cdad39e08202 +supautils_release_tar_checksum: sha256:07c41244e4374248da9c2df2822152f3ae8f1e74c8a92d361300480193219b63 -pljava_release: "a5bfeca83cea2c4b844758a9c76db337392892e3" -pljava_release_checksum: sha1:550bea791c404c9d62050fd9c330e162bab20763 +pljava_release: master +pljava_release_checksum: sha256:e99b1c52f7b57f64c8986fe6ea4a6cc09d78e779c1643db060d0ac66c93be8b6 -plv8_commit_version: bcddd92f71530e117f2f98b92d206dafe824f73a +plv8_release: "3.1.5" +plv8_release_checksum: sha256:2edf9a219844b2b6abae09c0bdb840c5b0d6e3dd418631744c7326c0b107cc10 -pg_plan_filter_commit_version: 5081a7b5cb890876e67d8e7486b6a64c38c9a492 +pg_plan_filter_release: 5081a7b5cb890876e67d8e7486b6a64c38c9a492 -pg_net_release: "0.7.1" -pg_net_release_checksum: sha1:1df576745a320b6faa78cff5067cad9bdd4cfbcb +pg_net_release: "0.9.2" +# To obtain the checksum use `wget https://github.com/tealbase/pg_net/archive/refs/tags/v0.9.2.tar.gz -q -O- | sha256sum` +pg_net_release_checksum: sha256:268c87c09ccd26e6566d2522cb02ba7918b4cbda37eb5076d2e790bbd994a087 rum_release: "1.3.13" rum_release_checksum: sha256:6ab370532c965568df6210bd844ac6ba649f53055e48243525b0b7e5c4d69a7d +pg_hashids_release: cd0e1b31d52b394a0df64079406a14a4f7387cd6 + vector_x86_deb: "https://packages.timber.io/vector/0.22.3/vector_0.22.3-1_amd64.deb" vector_arm_deb: "https://packages.timber.io/vector/0.22.3/vector_0.22.3-1_arm64.deb" libsodium_release: "1.0.18" -libsodium_release_checksum: sha1:795b73e3f92a362fabee238a71735579bf46bb97 +libsodium_release_checksum: sha256:6f504490b342a4f8a4c4a02fc9b866cbef8622d5df4e5452b46be121e46636c1 -pgsodium_release: "3.1.5" -pgsodium_release_checksum: sha256:bec847388a5db2a60ea9d991962ce27954d91b4c41cbcc7bd8e34472c69114d1 +pgsodium_release: "3.1.8" +pgsodium_release_checksum: sha256:4d027aeee5163f3f33740d269938a120d1593a41c3701c920d2a1de80aa97486 -pg_graphql_release: "v1.1.0" +pg_graphql_release: "1.5.7" -pg_jsonschema_release: "v0.1.4" +pg_jsonschema_release: "0.2.0" pg_stat_monitor_release: "1.1.1" +pg_stat_monitor_release_checksum: sha256:1756a02d5a6dd66b892d15920257c69a17a67d48d3d4e2f189b681b83001ec2a + +vault_release: "0.2.9" +vault_release_checksum: sha256:1e813216395c59bb94c92be47ce8b70ba19ccc0efbcdb1fb14ed6d34a42c6cdb + +groonga_release: "13.0.1" +groonga_release_checksum: sha256:1c2d1a6981c1ad3f02a11aff202b15ba30cb1c6147f1fa9195b519a2b728f8ba + +pgroonga_release: "3.0.7" +pgroonga_release_checksum: sha256:885ff3878cc30e9030e5fc56d561bc8b66df3ede1562c9d802bc0ea04fe5c203 -vault_release: "0.2.8" -vault_release_checksum: sha256:842cdee6d5b586b1baacccfaa08b45d56566987af87952a5fe5ee80b24400754 +wrappers_release: "0.4.2" -groonga_release: "12.0.8" -groonga_release_checksum: sha1:32aee787efffc2a22760fde946fb6462286074e2 +hypopg_release: "1.4.1" +hypopg_release_checksum: sha256:9afe6357fd389d8d33fad81703038ce520b09275ec00153c6c89282bcdedd6bc -pgroonga_release: "2.4.0" -pgroonga_release_checksum: sha1:235d67e8487b318e656d4d3016a49c14fae0512d +pg_repack_release: "1.5.0" +pg_repack_release_checksum: sha256:9a14d6a95bfa29f856aa10538238622c1f351d38eb350b196c06720a878ccc52 -wrappers_release: "v0.1.7" +pgvector_release: "0.8.0" +pgvector_release_checksum: sha256:867a2c328d4928a5a9d6f052cd3bc78c7d60228a9b914ad32aa3db88e9de27b0 -hypopg_commit_sha: 57d711bc4e37164c8edac81580a5f477e2a33d86 +pg_tle_release: "1.3.2" +pg_tle_release_checksum: sha256:d04f72d88b21b954656609743560684ac42645b64a36c800d4d2f84d1f180de1 -pg_repack_release: "1.4.8" -pg_repack_release_checksum: sha1:74e54f43f6c062644e442224eacc2a2bc95a04ef +index_advisor_release: "0.2.0" +index_advisor_checksum: sha256:2d3642012a9185cda51f1e82ba43d64a81b24a2655a3ac3afdcbbd95d46a1a27 -pgvector_release: "v0.4.0" +pg_backtrace_release: "1.1" diff --git a/common-nix.vars.pkr.hcl b/common-nix.vars.pkr.hcl new file mode 100644 index 0000000..587e717 --- /dev/null +++ b/common-nix.vars.pkr.hcl @@ -0,0 +1 @@ +postgres-version = "15.6.1.146" diff --git a/common.vars.pkr.hcl b/common.vars.pkr.hcl index a1eee1c..871647b 100644 --- a/common.vars.pkr.hcl +++ b/common.vars.pkr.hcl @@ -1 +1 @@ -postgres-version = "15.1.0.38" +postgres-version = "15.1.1.95" diff --git a/docker-compose.yaml b/docker-compose.yaml deleted file mode 100644 index c572b9d..0000000 --- a/docker-compose.yaml +++ /dev/null @@ -1,28 +0,0 @@ -version: "3.8" - -services: - db: - image: tealbase/postgres:latest - build: . - restart: "no" - healthcheck: - test: pg_isready -U postgres -h localhost - interval: 2s - timeout: 2s - retries: 10 - environment: - POSTGRES_HOST: /var/run/postgresql - POSTGRES_PASSWORD: password - - pg_prove: - image: horrendo/pg_prove - depends_on: - db: - condition: service_healthy - environment: - PGHOST: db - PGUSER: postgres - PGPASSWORD: password - volumes: - - ./migrations/tests:/tests - command: pg_prove /tests/test.sql diff --git a/docker/Dockerfile b/docker/Dockerfile new file mode 100644 index 0000000..116377b --- /dev/null +++ b/docker/Dockerfile @@ -0,0 +1,78 @@ +ARG ubuntu_release=focal +FROM ubuntu:${ubuntu_release} as base + +ARG ubuntu_release=flocal +ARG ubuntu_release_no=20.04 +ARG postgresql_major=15 +ARG postgresql_release=${postgresql_major}.1 + +FROM base as pg-source + +# Install build dependencies +RUN apt-get update && apt-get install -y --no-install-recommends \ + gnupg \ + dpkg-dev \ + ca-certificates \ + && rm -rf /var/lib/apt/lists/* + +# Add Postgres PPA +# In the off-chance that the key in the repository expires, it can be replaced by running the following in the repository's root: +# gpg --keyserver hkps://keyserver.ubuntu.com --recv-keys $NEW_POSTGRESQL_GPG_KEY +# gpg --export --armor $NEW_POSTGRESQL_GPG_KEY > postgresql.gpg.key +COPY postgresql.gpg.key /tmp/postgresql.gpg.key +RUN apt-key add /tmp/postgresql.gpg.key && \ + echo "deb https://apt-archive.postgresql.org/pub/repos/apt ${ubuntu_release}-pgdg-archive main" > /etc/apt/sources.list.d/pgdg.list && \ + echo "deb-src https://apt-archive.postgresql.org/pub/repos/apt ${ubuntu_release}-pgdg-archive main" > /etc/apt/sources.list.d/pgdg.list + +# Create local PPA +WORKDIR /tmp/build +RUN echo "deb [ trusted=yes ] file:///tmp/build ./" > /etc/apt/sources.list.d/temp.list && \ + dpkg-scanpackages . > Packages && \ + apt-get -o Acquire::GzipIndexes=false update + +ENV DEBIAN_FRONTEND=noninteractive +ENV PYTHONDONTWRITEBYTECODE=1 +ENV DEB_BUILD_OPTIONS="nocheck parallel=$(nproc)" + +# Configure processor optimised build +ARG CPPFLAGS="" +ENV DEB_CPPFLAGS_APPEND="${CPPFLAGS} -fsigned-char" +ENV DEB_CFLAGS_APPEND="-g3" +ARG DEB_BUILD_PROFILES="pkg.postgresql.nozstd" +ENV DEB_BUILD_PROFILES="${DEB_BUILD_PROFILES}" + +RUN apt-get -o Acquire::GzipIndexes=false update && apt-get build-dep -y postgresql-common pgdg-keyring && \ + apt-get source --compile postgresql-common pgdg-keyring && \ + dpkg-scanpackages . > Packages && \ + apt-get -o Acquire::GzipIndexes=false update + +RUN apt-get build-dep -y "postgresql-${postgresql_major}=${postgresql_release}-1.pgdg${ubuntu_release_no}+1" && \ + apt-get source --compile "postgresql-${postgresql_major}=${postgresql_release}-1.pgdg${ubuntu_release_no}+1" && \ + dpkg-scanpackages . > Packages && \ + apt-get -o Acquire::GzipIndexes=false update + +# Remove source directories +RUN rm -rf /tmp/build/*/ + +FROM base as pg + +# Inherit args from base stage +ARG postgresql_major +ARG postgresql_release + +COPY --from=pg-source /tmp/build /tmp/build + +ENV DEBIAN_FRONTEND=noninteractive +RUN echo "deb [ trusted=yes ] file:///tmp/build ./" > /etc/apt/sources.list.d/temp.list && \ + apt-get -o Acquire::GzipIndexes=false update && \ + apt-get install -y --no-install-recommends postgresql-common && \ + sed -ri 's/#(create_main_cluster) .*$/\1 = false/' /etc/postgresql-common/createcluster.conf && \ + apt-get install -y --no-install-recommends "postgresql-${postgresql_major}=${postgresql_release}-1.pgdg${ubuntu_release_no}+1" && \ + rm -rf /var/lib/apt/lists/* && \ + rm -rf /tmp/build /etc/apt/sources.list.d/temp.list + +ENV PATH $PATH:/usr/lib/postgresql/${postgresql_major}/bin + +FROM scratch as pg-deb + +COPY --from=pg-source /tmp/build /tmp diff --git a/docker/all-in-one/Dockerfile b/docker/all-in-one/Dockerfile new file mode 100644 index 0000000..d93e46f --- /dev/null +++ b/docker/all-in-one/Dockerfile @@ -0,0 +1,311 @@ +ARG postgres_version=15.1.1.49 + +ARG pgbouncer_release=1.18.0 +ARG postgrest_release=10.1.2 +ARG gotrue_release=2.130.0 +ARG adminapi_release=0.64.1 +ARG adminmgr_release=0.22.1 +ARG vector_release=0.22.3 +ARG postgres_exporter_release=0.15.0 +ARG envoy_release=1.28.0 + +# Update `gateway-28` in the URL below if upgrading above v2.8.x. +ARG kong_release=2.8.1 + +FROM tealbase/postgres:${postgres_version} as base +ARG TARGETARCH +ARG postgresql_major + +FROM base as builder +# Install build dependencies +RUN apt-get update && apt-get install -y \ + postgresql-server-dev-${postgresql_major} \ + build-essential \ + checkinstall \ + pkg-config \ + cmake \ + && rm -rf /var/lib/apt/lists/* + +#################### +# Install pgbouncer +#################### +FROM builder as pgbouncer-source +# Download and extract +ARG pgbouncer_release +ADD "https://www.pgbouncer.org/downloads/files/${pgbouncer_release}/pgbouncer-${pgbouncer_release}.tar.gz" /tmp/pgbouncer.tar.gz +RUN tar -xvf /tmp/pgbouncer.tar.gz -C /tmp && \ + rm -rf /tmp/pgbouncer.tar.gz +# Install build dependencies +RUN apt-get update && apt-get install -y \ + libevent-dev \ + && rm -rf /var/lib/apt/lists/* +# Build from source +WORKDIR /tmp/pgbouncer-${pgbouncer_release} +RUN ./configure --prefix=/usr/local +RUN make -j$(nproc) +# Create debian package +RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --requires=libevent-2.1-7 --nodoc + +FROM base as pgbouncer +# Download pre-built packages +RUN apt-get update && apt-get install -y --no-install-recommends --download-only \ + pgbouncer \ + && rm -rf /var/lib/apt/lists/* +RUN mv /var/cache/apt/archives/*.deb /tmp/ + +#################### +# Install PostgREST +#################### +FROM postgrest/postgrest:v${postgrest_release} as pgrst + +#################### +# Install GoTrue +#################### +FROM tealbase/gotrue:v${gotrue_release} as gotrue + +#################### +# Install Envoy +#################### +FROM envoyproxy/envoy:v${envoy_release} as envoy + +#################### +# Install Kong +#################### +FROM base as kong +ARG kong_release +ADD "https://packages.konghq.com/public/gateway-28/deb/ubuntu/pool/focal/main/k/ko/kong_${kong_release}/kong_${kong_release}_${TARGETARCH}.deb" \ + /tmp/kong.deb + +#################### +# Install admin api +#################### +FROM base as adminapi +ARG adminapi_release +ADD "https://tealbase-public-artifacts-bucket.s3.amazonaws.com/tealbase-admin-api/v${adminapi_release}/tealbase-admin-api_${adminapi_release}_linux_${TARGETARCH}.tar.gz" /tmp/tealbase-admin-api.tar.gz +RUN tar -xvf /tmp/tealbase-admin-api.tar.gz -C /tmp && \ + rm -rf /tmp/tealbase-admin-api.tar.gz + +#################### +# Install admin mgr +#################### +FROM base as adminmgr +ARG adminmgr_release +ADD "https://tealbase-public-artifacts-bucket.s3.amazonaws.com/admin-mgr/v${adminmgr_release}/admin-mgr_${adminmgr_release}_linux_${TARGETARCH}.tar.gz" /tmp/admin-mgr.tar.gz +RUN tar -xvf /tmp/admin-mgr.tar.gz -C /tmp && \ + rm -rf /tmp/admin-mgr.tar.gz + +#################### +# Install Prometheus Exporter +#################### +FROM base as exporter +ARG postgres_exporter_release +ADD "https://github.com/prometheus-community/postgres_exporter/releases/download/v${postgres_exporter_release}/postgres_exporter-${postgres_exporter_release}.linux-${TARGETARCH}.tar.gz" /tmp/postgres_exporter.tar.gz +RUN tar -xvf /tmp/postgres_exporter.tar.gz -C /tmp --strip-components 1 && \ + rm -rf /tmp/postgres_exporter.tar.gz + +#################### +# Install vector +#################### +FROM base as vector +ARG vector_release +ADD "https://packages.timber.io/vector/${vector_release}/vector_${vector_release}-1_${TARGETARCH}.deb" /tmp/vector.deb + +#################### +# Install supervisord +#################### +FROM base as supervisor +# Download pre-built packages +RUN apt-get update -y && apt-get install -y --no-install-recommends --download-only \ + supervisor \ + && rm -rf /var/lib/apt/lists/* +RUN mv /var/cache/apt/archives/*.deb /tmp/ + +#################### +# Create the final image for production +#################### +FROM base as production + +# Copy dependencies from previous build stages +COPY --from=pgbouncer /tmp/*.deb /tmp/ +COPY --from=vector /tmp/*.deb /tmp/ +COPY --from=kong /tmp/*.deb /tmp/ +COPY --from=supervisor /tmp/*.deb /tmp/ + +# Install runtime dependencies +RUN apt-get update && apt-get install -y --no-install-recommends \ + /tmp/*.deb \ + # For health check + curl \ + # For parsing init payload + jq \ + # Security tools + fail2ban \ + # sudo + sudo \ + vim-tiny \ + less \ + libnuma1 \ + logrotate \ + dumb-init \ + # pg_egress_collect deps + tcpdump libio-async-perl \ + && rm -rf /var/lib/apt/lists/* /tmp/* \ + && mkdir -p /dist \ + && mkdir -p /data/opt && chmod go+rwx /data/opt + +#################### +# Install salt +#################### +ENV DEBIAN_FRONTEND noninteractive +ENV SALT_VERSION 3006 + +# Install one-dir salt +RUN mkdir /etc/apt/keyrings \ + && curl -fsSL -o /etc/apt/keyrings/salt-archive-keyring-2023-arm.gpg https://repo.saltproject.io/salt/py3/ubuntu/20.04/arm64/SALT-PROJECT-GPG-PUBKEY-2023.gpg \ + && echo "deb [signed-by=/etc/apt/keyrings/salt-archive-keyring-2023-arm.gpg arch=arm64] https://repo.saltproject.io/salt/py3/ubuntu/20.04/arm64/$SALT_VERSION focal main" | tee /etc/apt/sources.list.d/salt.list \ + && curl -fsSL -o /etc/apt/keyrings/salt-archive-keyring-2023-amd.gpg https://repo.saltproject.io/salt/py3/ubuntu/20.04/amd64/SALT-PROJECT-GPG-PUBKEY-2023.gpg \ + && echo "deb [signed-by=/etc/apt/keyrings/salt-archive-keyring-2023-amd.gpg arch=amd64] https://repo.saltproject.io/salt/py3/ubuntu/20.04/amd64/$SALT_VERSION focal main" | tee -a /etc/apt/sources.list.d/salt.list \ + && apt-get clean && apt-get update \ + && apt-get install -y salt-minion + +ADD docker/all-in-one/etc/salt/minion /etc/salt/minion + + +# Copy single binary dependencies +COPY --from=pgrst /bin/postgrest /dist/ +COPY --from=gotrue /usr/local/bin/auth /dist/gotrue +COPY --from=gotrue /usr/local/etc/auth /opt/gotrue/ +COPY --from=envoy /usr/local/bin/envoy /dist/ +COPY --from=adminapi /tmp/tealbase-admin-api /dist/ +COPY --chown=root:root --from=adminmgr /tmp/admin-mgr /dist/ +COPY --from=exporter /tmp/postgres_exporter /opt/postgres_exporter/ +COPY docker/all-in-one/opt/postgres_exporter /opt/postgres_exporter/ + +# Configuring dangling symlinks for binaries +RUN ln -s /data/opt/tealbase-admin-api /opt/tealbase-admin-api \ + && ln -s /data/opt/postgrest /opt/postgrest \ + && ln -s /data/opt/gotrue /opt/gotrue/gotrue \ + && ln -s /data/opt/admin-mgr /usr/bin/admin-mgr + +# Scripts for adminapi +COPY ansible/files/admin_api_scripts /root +COPY --chown=adminapi:adminapi docker/all-in-one/etc/adminapi /etc/adminapi +COPY --chmod=644 docker/all-in-one/etc/sudoers.d /etc/sudoers.d/ + +# Script for pg_egress_collect +COPY --chown=adminapi:adminapi docker/all-in-one/opt/pg_egress_collect /opt/pg_egress_collect + +# Customizations for pgbouncer +COPY docker/all-in-one/etc/pgbouncer /etc/pgbouncer +COPY docker/all-in-one/etc/pgbouncer-custom /etc/pgbouncer-custom +COPY docker/all-in-one/etc/tmpfiles.d /etc/tmpfiles.d + +# Customizations for postgres +COPY --chown=postgres:postgres docker/all-in-one/etc/postgresql/pg_hba.conf /etc/postgresql/ +COPY --chown=postgres:postgres docker/all-in-one/etc/postgresql/logging.conf /etc/postgresql/ +COPY --chown=postgres:postgres docker/all-in-one/etc/postgresql-custom /etc/postgresql-custom +COPY --chown=postgres:postgres docker/all-in-one/etc/postgresql.schema.sql /etc/postgresql.schema.sql + +# Customizations for postgres_exporter +COPY --chown=postgres:postgres docker/all-in-one/opt/postgres_exporter/queries.yml /opt/postgres_exporter/queries.yml + +# Customizations for fail2ban +COPY docker/all-in-one/etc/fail2ban/filter.d /etc/fail2ban/filter.d/ +COPY docker/all-in-one/etc/fail2ban/jail.d /etc/fail2ban/jail.d/ + +# Customizations for postgrest +COPY --chown=postgrest:postgrest docker/all-in-one/etc/postgrest/bootstrap.sh /etc/postgrest/bootstrap.sh +COPY --chown=postgrest:postgrest docker/all-in-one/etc/postgrest/base.conf /etc/postgrest/base.conf +COPY --chown=postgrest:postgrest docker/all-in-one/etc/postgrest/generated.conf /etc/postgrest/generated.conf + +# Customizations for logrotate +COPY docker/all-in-one/etc/logrotate.d/walg.conf /etc/logrotate.d/walg.conf +COPY docker/all-in-one/etc/logrotate.d/postgresql.conf /etc/logrotate.d/postgresql.conf + +# Customizations for gotrue +COPY docker/all-in-one/etc/gotrue.env /etc/gotrue.env + +# Customizations for envoy +ARG envoy_release +ADD --chmod=755 --chown=envoy:envoy "https://raw.githubusercontent.com/envoyproxy/envoy/v${envoy_release}/restarter/hot-restarter.py" /opt/envoy-hot-restarter.py +COPY --chmod=775 --chown=envoy:envoy ansible/files/envoy_config/ /etc/envoy/ +COPY --chmod=755 --chown=envoy:envoy ansible/files/start-envoy.sh /opt/ + +# Customizations for kong +COPY docker/all-in-one/etc/kong/kong.conf /etc/kong/kong.conf +COPY docker/all-in-one/etc/kong/kong.yml /etc/kong/kong.yml + +# Customizations for vector +COPY --chown=vector:vector docker/all-in-one/etc/vector/vector.yaml /etc/vector/vector.yaml + +# Customizations for supervisor +COPY docker/all-in-one/etc/supervisor /etc/supervisor + +# Customizations for supa-shutdown +COPY --chown=adminapi:adminapi docker/all-in-one/etc/supa-shutdown /etc/supa-shutdown +COPY docker/all-in-one/configure-shim.sh /usr/local/bin/configure-shim.sh + +# Configure service ports +ENV PGRST_SERVER_PORT=3000 +ENV PGRST_ADMIN_SERVER_PORT=3001 +EXPOSE ${PGRST_SERVER_PORT} + +ENV GOTRUE_SITE_URL=http://localhost:${PGRST_SERVER_PORT} +ENV GOTRUE_API_PORT=9999 +EXPOSE ${GOTRUE_API_PORT} + +ENV ENVOY_HTTP_PORT=8000 +ENV ENVOY_HTTPS_PORT=8443 + +ENV KONG_HTTP_PORT=8000 +ENV KONG_HTTPS_PORT=8443 + +ENV HTTP_PORT=${ENVOY_HTTP_PORT:-KONG_HTTP_PORT} +ENV HTTP_PORT=${ENVOY_HTTPS_PORT:-KONG_HTTPS_PORT} +EXPOSE ${HTTP_PORT} ${HTTPS_PORT} + +ENV ADMIN_API_CERT_DIR=/etc/ssl/adminapi +ENV ADMIN_API_PORT=8085 +EXPOSE ${ADMIN_API_PORT} + +ENV PGBOUNCER_PORT=6543 +EXPOSE ${PGBOUNCER_PORT} + +ENV PGEXPORTER_PORT=9187 +EXPOSE ${PGEXPORTER_PORT} + +ENV VECTOR_API_PORT=9001 + +# Create system users +RUN useradd --create-home --shell /bin/bash postgrest && \ + useradd --create-home --shell /bin/bash gotrue && \ + useradd --create-home --shell /bin/bash envoy && \ + useradd --create-home --shell /bin/bash pgbouncer -G postgres,ssl-cert && \ + useradd --create-home --shell /bin/bash adminapi -G root,envoy,kong,pgbouncer,postgres,postgrest,wal-g && \ + usermod --append --shell /bin/bash -G postgres vector +RUN mkdir -p /etc/wal-g && \ + chown -R adminapi:adminapi /etc/wal-g && \ + chmod g+w /etc/wal-g +RUN mkdir -p /var/log/wal-g \ + && chown -R postgres:postgres /var/log/wal-g \ + && chmod +x /dist/admin-mgr \ + && chmod ug+s /dist/admin-mgr \ + && touch /etc/wal-g/config.json \ + && chown adminapi:adminapi /etc/wal-g/config.json \ + && echo '{"WALG_S3_PREFIX": "s3://foo/bar/"}' > /etc/wal-g/config.json +RUN chown -R adminapi:adminapi /etc/adminapi +RUN sed -i "s;#include = '/etc/postgresql-custom/generated-optimizations.conf';include = '/etc/postgresql-custom/generated-optimizations.conf';" /etc/postgresql/postgresql.conf + +# Add healthcheck and entrypoint scripts +COPY docker/all-in-one/healthcheck.sh /usr/local/bin/ +HEALTHCHECK --interval=3s --timeout=2s --start-period=4s --retries=10 CMD [ "healthcheck.sh" ] + +COPY docker/all-in-one/init /init +COPY docker/all-in-one/entrypoint.sh /usr/local/bin/ +COPY docker/all-in-one/postgres-entrypoint.sh /usr/local/bin/ +COPY docker/all-in-one/shutdown.sh /usr/local/bin/supa-shutdown.sh +COPY docker/all-in-one/run-logrotate.sh /usr/local/bin/run-logrotate.sh + +ENTRYPOINT [ "/usr/bin/dumb-init" ] + +CMD [ "entrypoint.sh"] diff --git a/docker/all-in-one/README.md b/docker/all-in-one/README.md new file mode 100644 index 0000000..47fa762 --- /dev/null +++ b/docker/all-in-one/README.md @@ -0,0 +1,59 @@ +# tealbase All-in-One + +All tealbase backend services bundled in a single Docker image for quick local testing and edge deployment. + +## Build + +```bash +# cwd: repo root +docker build -f docker/all-in-one/Dockerfile -t tealbase/all-in-one . +``` + +## Run + +```bash +docker run --rm -it \ + -e POSTGRES_PASSWORD=postgres \ + -e JWT_SECRET=super-secret-jwt-token-with-at-least-32-characters-long \ + -e ANON_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJhbm9uIiwKICAgICJpc3MiOiAic3VwYWJhc2UtZGVtbyIsCiAgICAiaWF0IjogMTY0MTc2OTIwMCwKICAgICJleHAiOiAxNzk5NTM1NjAwCn0.dc_X5iR_VP_qT0zsiyj_I_OZ2T9FtRU2BBNWN8Bu4GE \ + -e SERVICE_ROLE_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJzZXJ2aWNlX3JvbGUiLAogICAgImlzcyI6ICJzdXBhYmFzZS1kZW1vIiwKICAgICJpYXQiOiAxNjQxNzY5MjAwLAogICAgImV4cCI6IDE3OTk1MzU2MDAKfQ.DaYlNEoUrrEn2Ig7tqibS-PHK5vgusbcbo7X36XVt4Q \ + -e ADMIN_API_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJyb2xlIjoic3VwYWJhc2VfYWRtaW4iLCJpc3MiOiJzdXBhYmFzZS1kZW1vIiwiaWF0IjoxNjQxNzY5MjAwLCJleHAiOjE3OTk1MzU2MDB9.Y9mSNVuTw2TdfryoaqM5wySvwQemGGWfSe9ixcklVfM \ + -e DATA_VOLUME_MOUNTPOINT=/data \ + -e MACHINE_TYPE=shared_cpu_1x_512m \ + -p 5432:5432 \ + -p 8000:8000 \ + tealbase/all-in-one +``` + +Use bind mount to start from an existing physical backup: `-v $(pwd)/data:/var/lib/postgresql/data` + +Alternatively, the container may be initialised using a payload tarball. + +```bash +docker run --rm \ + -e POSTGRES_PASSWORD=postgres \ + -e INIT_PAYLOAD_PRESIGNED_URL= \ + -p 5432:5432 \ + -p 8000:8000 \ + -it tealbase/all-in-one +``` + +## Test + +```bash +curl -H "apikey: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJhbm9uIiwKICAgICJpc3MiOiAic3VwYWJhc2UtZGVtbyIsCiAgICAiaWF0IjogMTY0MTc2OTIwMCwKICAgICJleHAiOiAxNzk5NTM1NjAwCn0.dc_X5iR_VP_qT0zsiyj_I_OZ2T9FtRU2BBNWN8Bu4GE" \ + localhost:8000/rest/v1/ | jq +``` + +## TODO + +- [x] optimise admin config +- [x] propagate shutdown signals +- [x] add http health checks +- [x] generate dynamic JWT +- [ ] ufw / nftables +- [x] log rotation +- [x] egress metrics +- [x] vector +- [ ] apparmor +- [x] wal-g diff --git a/docker/all-in-one/configure-shim.sh b/docker/all-in-one/configure-shim.sh new file mode 100755 index 0000000..f42f155 --- /dev/null +++ b/docker/all-in-one/configure-shim.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +INITIAL_BINARY_PATH=$1 +SYMLINK_PATH=$2 + +SYMLINK_TARGET=$(readlink -m "$SYMLINK_PATH") + +if [ ! -f "$SYMLINK_TARGET" ]; then + cp "$INITIAL_BINARY_PATH" "$SYMLINK_TARGET" + + PERMS=$(stat -c "%a" "$INITIAL_BINARY_PATH") + chmod "$PERMS" "$SYMLINK_TARGET" + + OWNER_GROUP=$(stat -c "%u:%g" "$INITIAL_BINARY_PATH") + chown "$OWNER_GROUP" "$SYMLINK_TARGET" +fi diff --git a/docker/all-in-one/entrypoint.sh b/docker/all-in-one/entrypoint.sh new file mode 100755 index 0000000..586ead7 --- /dev/null +++ b/docker/all-in-one/entrypoint.sh @@ -0,0 +1,366 @@ +#!/bin/bash +set -eou pipefail + +START_TIME=$(date +%s%N) + +PG_CONF=/etc/postgresql/postgresql.conf +SUPERVISOR_CONF=/etc/supervisor/supervisord.conf + +export DATA_VOLUME_MOUNTPOINT=${DATA_VOLUME_MOUNTPOINT:-/data} +export CONFIGURED_FLAG_PATH=${CONFIGURED_FLAG_PATH:-$DATA_VOLUME_MOUNTPOINT/machine.configured} + +export MAX_IDLE_TIME_MINUTES=${MAX_IDLE_TIME_MINUTES:-5} + +function calculate_duration { + local start_time=$1 + local end_time=$2 + + local duration=$((end_time - start_time)) + local milliseconds=$((duration / 1000000)) + + echo "$milliseconds" +} + +# Ref: https://gist.github.com/sj26/88e1c6584397bb7c13bd11108a579746 +function retry { + # Pass 0 for unlimited retries + local retries=$1 + shift + + local start=$EPOCHSECONDS + local count=0 + until "$@"; do + exit=$? + # Reset count if service has been running for more than 2 minutes + local elapsed=$((EPOCHSECONDS - start)) + if [ $elapsed -gt 120 ]; then + count=0 + fi + # Exponential backoff up to n tries + local wait=$((2 ** count)) + count=$((count + 1)) + if [ $count -ge "$retries" ] && [ "$retries" -gt 0 ]; then + echo "Retry $count/$retries exited $exit, no more retries left." + return $exit + fi + echo "Retry $count/$retries exited $exit, retrying in $wait seconds..." + sleep $wait + start=$EPOCHSECONDS + done + return 0 +} + +function configure_services { + # Start services after migrations are run + for file in /init/configure-*.sh; do + retry 0 "$file" + done +} + +function enable_swap { + fallocate -l 1G /mnt/swapfile + chmod 600 /mnt/swapfile + mkswap /mnt/swapfile + swapon /mnt/swapfile +} + +function push_lsn_checkpoint_file { + if [ "${PLATFORM_DEPLOYMENT:-}" != "true" ]; then + echo "Skipping push of LSN checkpoint file" + return + fi + + /usr/bin/admin-mgr lsn-checkpoint-push --immediately || echo "Failed to push LSN checkpoint" +} + +function graceful_shutdown { + echo "$(date): Received SIGINT. Shutting down." + + # Postgres ships the latest WAL file using archive_command during shutdown, in a blocking operation + # This is to ensure that the WAL file is shipped, just in case + sleep 0.2 + push_lsn_checkpoint_file +} + +function enable_autoshutdown { + sed -i "s/autostart=.*/autostart=true/" /etc/supervisor/base-services/supa-shutdown.conf +} + +function enable_lsn_checkpoint_push { + sed -i "s/autostart=.*/autostart=true/" /etc/supervisor/base-services/lsn-checkpoint-push.conf + sed -i "s/autorestart=.*/autorestart=true/" /etc/supervisor/base-services/lsn-checkpoint-push.conf +} + +function disable_fail2ban { + sed -i "s/autostart=.*/autostart=false/" /etc/supervisor/services/fail2ban.conf + sed -i "s/autorestart=.*/autorestart=false/" /etc/supervisor/services/fail2ban.conf +} + +function setup_postgres { + tar -xzvf "$INIT_PAYLOAD_PATH" -C / ./etc/postgresql.schema.sql + mv /etc/postgresql.schema.sql /docker-entrypoint-initdb.d/migrations/99-schema.sql + + tar -xzvf "$INIT_PAYLOAD_PATH" -C / ./etc/postgresql-custom/pgsodium_root.key + echo "include = '/etc/postgresql-custom/postgresql-platform-defaults.conf'" >>$PG_CONF + + # TODO (darora): walg enablement is temporarily performed here until changes from https://github.com/tealbase/postgres/pull/639 get picked up + # other things will still be needed in the future (auth_delay config) + sed -i \ + -e "s|#include = '/etc/postgresql-custom/custom-overrides.conf'|include = '/etc/postgresql-custom/custom-overrides.conf'|g" \ + -e "s|#include = '/etc/postgresql-custom/wal-g.conf'|include = '/etc/postgresql-custom/wal-g.conf'|g" \ + -e "s|shared_preload_libraries = '\(.*\)'|shared_preload_libraries = '\1, auth_delay'|" \ + -e "/# Automatically generated optimizations/i auth_delay.milliseconds = '3000'" \ + "${PG_CONF}" + + # Setup ssl certs + mkdir -p /etc/ssl/certs/postgres + tar -xzvf "$INIT_PAYLOAD_PATH" -C /etc/ssl/certs/postgres/ --strip-components 2 ./ssl/server.crt + tar -xzvf "$INIT_PAYLOAD_PATH" -C /etc/ssl/certs/postgres/ --strip-components 2 ./ssl/ca.crt + tar -xzvf "$INIT_PAYLOAD_PATH" -C /etc/ssl/private/ --strip-components 2 ./ssl/server.key + # tar -xzvf "$INIT_PAYLOAD_PATH" -C /etc/ssl/certs/postgres/ ./ssl/server-intermediate.srl + + PGSSLROOTCERT=/etc/ssl/certs/postgres/ca.crt + PGSSLCERT=/etc/ssl/certs/postgres/server.crt + PGSSLKEY=/etc/ssl/private/server.key + chown root:postgres $PGSSLROOTCERT $PGSSLKEY $PGSSLCERT + chmod 640 $PGSSLROOTCERT $PGSSLKEY $PGSSLCERT + + # Change ssl back to on in postgres.conf + sed -i -e "s|ssl = off|ssl = on|g" \ + -e "s|ssl_ca_file = ''|ssl_ca_file = '$PGSSLROOTCERT'|g" \ + -e "s|ssl_cert_file = ''|ssl_cert_file = '$PGSSLCERT'|g" \ + -e "s|ssl_key_file = ''|ssl_key_file = '$PGSSLKEY'|g" \ + $PG_CONF + + if [ "${DATA_VOLUME_MOUNTPOINT}" ]; then + mkdir -p "${DATA_VOLUME_MOUNTPOINT}/opt" + /usr/local/bin/configure-shim.sh /dist/tealbase-admin-api /opt/tealbase-admin-api + /opt/tealbase-admin-api optimize db --destination-config-file-path /etc/postgresql-custom/generated-optimizations.conf + + # Preserve postgresql configs across restarts + POSTGRESQL_CUSTOM_DIR="${DATA_VOLUME_MOUNTPOINT}/etc/postgresql-custom" + + mkdir -p "${POSTGRESQL_CUSTOM_DIR}" + + if [ ! -f "${CONFIGURED_FLAG_PATH}" ]; then + echo "Copying existing custom postgresql config from /etc/postgresql-custom to ${POSTGRESQL_CUSTOM_DIR}" + cp -R "/etc/postgresql-custom/." "${POSTGRESQL_CUSTOM_DIR}/" + fi + + rm -rf "/etc/postgresql-custom" + ln -s "${POSTGRESQL_CUSTOM_DIR}" "/etc/postgresql-custom" + chown -R postgres:postgres "/etc/postgresql-custom" + chown -R postgres:postgres "${POSTGRESQL_CUSTOM_DIR}" + chmod g+rx "${POSTGRESQL_CUSTOM_DIR}" + + # Preserve wal-g configs across restarts + WALG_CONF_DIR="${DATA_VOLUME_MOUNTPOINT}/etc/wal-g" + mkdir -p "${WALG_CONF_DIR}" + + if [ ! -f "${CONFIGURED_FLAG_PATH}" ]; then + echo "Copying existing custom wal-g config from /etc/wal-g to ${WALG_CONF_DIR}" + cp -R "/etc/wal-g/." "${WALG_CONF_DIR}/" + fi + + rm -rf "/etc/wal-g" + ln -s "${WALG_CONF_DIR}" "/etc/wal-g" + chown -R adminapi:adminapi "/etc/wal-g" + chown -R adminapi:adminapi "${WALG_CONF_DIR}" + chmod g+rx "/etc/wal-g" + chmod g+rx "${WALG_CONF_DIR}" + fi + DURATION=$(calculate_duration "$START_TIME" "$(date +%s%N)") + echo "E: Execution time to setting up postgresql: $DURATION milliseconds" +} + +function setup_credentials { + # Load credentials from init json + tar -xzvf "$INIT_PAYLOAD_PATH" -C / ./tmp/init.json + export ANON_KEY=${ANON_KEY:-$(jq -r '.["anon_key"]' /tmp/init.json)} + export SERVICE_ROLE_KEY=${SERVICE_ROLE_KEY:-$(jq -r '.["service_key"]' /tmp/init.json)} + export ADMIN_API_KEY=${ADMIN_API_KEY:-$(jq -r '.["tealbase_admin_key"]' /tmp/init.json)} + export JWT_SECRET=${JWT_SECRET:-$(jq -r '.["jwt_secret"]' /tmp/init.json)} + DURATION=$(calculate_duration "$START_TIME" "$(date +%s%N)") + echo "E: Execution time to setting up credentials: $DURATION milliseconds" +} + +function report_health { + if [ -z "${REPORTING_TOKEN:-}" ]; then + echo "Skipped health reporting: missing REPORTING_TOKEN" + exit 0 + fi + if [ -d "$ADMIN_API_CERT_DIR" ]; then + retry 10 curl -sSkf "https://localhost:$ADMIN_API_PORT/health-reporter/send" -X POST -H "apikey: $ADMIN_API_KEY" + else + retry 10 curl -sSf "http://localhost:$ADMIN_API_PORT/health-reporter/send" -X POST -H "apikey: $ADMIN_API_KEY" + fi +} + +function run_prelaunch_hooks { + if [ -f "/etc/postgresql-custom/supautils.conf" ]; then + sed -i -e 's/dblink, //' "/etc/postgresql-custom/supautils.conf" + fi +} + +function start_supervisor { + # Start health reporting + report_health & + + # Start supervisord + /usr/bin/supervisord -c $SUPERVISOR_CONF +} + +DELEGATED_ARCHIVE_PATH=/data/delegated-init.tar.gz +DELEGATED_ENTRY_PATH=/data/delegated-entry.sh + +function fetch_and_execute_delegated_payload { + curl -s --time-cond $DELEGATED_ARCHIVE_PATH -o $DELEGATED_ARCHIVE_PATH "$DELEGATED_INIT_LOCATION" + + if [ ! -f $DELEGATED_ARCHIVE_PATH ]; then + echo "No delegated payload found, bailing" + return + fi + + # only extract a valid archive + if tar -tzf "$DELEGATED_ARCHIVE_PATH" &>/dev/null; then + TAR_MTIME_EPOCH=$(tar -tvzf "$DELEGATED_ARCHIVE_PATH" delegated-entry.sh | awk '{print $4, $5}' | xargs -I {} date -d {} +%s) + + if [ -f $DELEGATED_ENTRY_PATH ]; then + FILE_MTIME_EPOCH=$(stat -c %Y "$DELEGATED_ENTRY_PATH") + + if [ "$TAR_MTIME_EPOCH" -gt "$FILE_MTIME_EPOCH" ]; then + tar -xvzf "$DELEGATED_ARCHIVE_PATH" -C /data + else + echo "TAR archive is not newer, skipping extraction" + fi + else + tar -xvzf "$DELEGATED_ARCHIVE_PATH" -C /data + fi + else + echo "Invalid TAR archive" + return + fi + + # Run our delegated entry script here + if [ -f "$DELEGATED_ENTRY_PATH" ]; then + chmod +x $DELEGATED_ENTRY_PATH + bash -c "$DELEGATED_ENTRY_PATH $START_TIME" + fi +} + +# Increase max number of open connections +ulimit -n 65536 + +# Update pgsodium root key +if [ "${PGSODIUM_ROOT_KEY:-}" ]; then + echo "${PGSODIUM_ROOT_KEY}" >/etc/postgresql-custom/pgsodium_root.key +fi + +# Update pgdata directory +if [ "${PGDATA_REAL:-}" ]; then + mkdir -p "${PGDATA_REAL}" + chown -R postgres:postgres "${PGDATA_REAL}" + chmod -R g+rx "${PGDATA_REAL}" +fi + +if [ "${PGDATA:-}" ]; then + if [ "${PGDATA_REAL:-}" ]; then + mkdir -p "$(dirname "${PGDATA}")" + rm -rf "${PGDATA}" + ln -s "${PGDATA_REAL}" "${PGDATA}" + chmod -R g+rx "${PGDATA}" + else + mkdir -p "$PGDATA" + chown postgres:postgres "$PGDATA" + fi + sed -i "s|data_directory = '.*'|data_directory = '$PGDATA'|g" $PG_CONF +fi + +# Download and extract init payload from s3 +export INIT_PAYLOAD_PATH=${INIT_PAYLOAD_PATH:-/tmp/payload.tar.gz} + +if [ "${INIT_PAYLOAD_PRESIGNED_URL:-}" ]; then + curl -fsSL "$INIT_PAYLOAD_PRESIGNED_URL" -o "/tmp/payload.tar.gz" || true + if [ -f "/tmp/payload.tar.gz" ] && [ "/tmp/payload.tar.gz" != "$INIT_PAYLOAD_PATH" ]; then + mv "/tmp/payload.tar.gz" "$INIT_PAYLOAD_PATH" + fi +fi + +if [ "${DATA_VOLUME_MOUNTPOINT}" ]; then + BASE_LOGS_FOLDER="${DATA_VOLUME_MOUNTPOINT}/logs" + + for folder in "postgresql" "services" "wal-g"; do + mkdir -p "${BASE_LOGS_FOLDER}/${folder}" + rm -rf "/var/log/${folder}" + ln -s "${BASE_LOGS_FOLDER}/${folder}" "/var/log/${folder}" + done + + chown -R postgres:postgres "${BASE_LOGS_FOLDER}" + + mkdir -p "${DATA_VOLUME_MOUNTPOINT}/etc/logrotate" +fi + +# Process init payload +if [ -f "$INIT_PAYLOAD_PATH" ]; then + setup_credentials + setup_postgres +else + echo "Skipped extracting init payload: $INIT_PAYLOAD_PATH does not exist" +fi + +mkdir -p /var/log/services + +SUPERVISOR_CONF=/etc/supervisor/supervisord.conf +find /etc/supervisor/ -type d -exec chmod 0770 {} + +find /etc/supervisor/ -type f -exec chmod 0660 {} + + +# Start services in the background +if [ "${POSTGRES_ONLY:-}" == "true" ]; then + sed -i "s| - postgrest| # - postgrest|g" /etc/adminapi/adminapi.yaml + sed -i "s|files = services/\*.conf base-services/\*.conf|files = base-services/\*.conf|g" $SUPERVISOR_CONF + /init/configure-adminapi.sh +else + sed -i "s| # - postgrest| - postgrest|g" /etc/adminapi/adminapi.yaml + sed -i "s|files = base-services/\*.conf|files = services/\*.conf base-services/\*.conf|g" $SUPERVISOR_CONF + configure_services +fi + +if [ "${AUTOSHUTDOWN_ENABLED:-}" == "true" ]; then + enable_autoshutdown +fi + +if [ "${ENVOY_ENABLED:-}" == "true" ]; then + sed -i "s/autostart=.*/autostart=true/" /etc/supervisor/services/envoy.conf + sed -i "s/autostart=.*/autostart=false/" /etc/supervisor/services/kong.conf + sed -i "s/kong/envoy/" /etc/supervisor/services/group.conf +fi + +if [ "${FAIL2BAN_DISABLED:-}" == "true" ]; then + disable_fail2ban +fi + +if [ "${GOTRUE_DISABLED:-}" == "true" ]; then + sed -i "s/autostart=.*/autostart=false/" /etc/supervisor/services/gotrue.conf + sed -i "s/autorestart=.*/autorestart=false/" /etc/supervisor/services/gotrue.conf +fi + +if [ "${PLATFORM_DEPLOYMENT:-}" == "true" ]; then + if [ "${SWAP_DISABLED:-}" != "true" ]; then + enable_swap + fi + enable_lsn_checkpoint_push + + trap graceful_shutdown SIGINT +fi + +touch "$CONFIGURED_FLAG_PATH" +run_prelaunch_hooks + +if [ -n "${DELEGATED_INIT_LOCATION:-}" ]; then + fetch_and_execute_delegated_payload +else + DURATION=$(calculate_duration "$START_TIME" "$(date +%s%N)") + echo "E: Execution time to starting supervisor: $DURATION milliseconds" + start_supervisor + push_lsn_checkpoint_file +fi diff --git a/docker/all-in-one/etc/adminapi/adminapi.yaml b/docker/all-in-one/etc/adminapi/adminapi.yaml new file mode 100644 index 0000000..682f4ad --- /dev/null +++ b/docker/all-in-one/etc/adminapi/adminapi.yaml @@ -0,0 +1,76 @@ +port: 8085 +host: 0.0.0.0 +ref: {{ .ProjectRef }} +jwt_secret: {{ .JwtSecret }} +metric_collectors: + - filesystem + - meminfo + - netdev + - loadavg + - cpu + - diskstats + - vmstat +node_exporter_additional_args: + - "--collector.filesystem.ignored-mount-points=^/(boot|sys|dev|run).*" + - "--collector.netdev.device-exclude=lo" +# cert_path: /etc/ssl/adminapi/server.crt +# key_path: /etc/ssl/adminapi/server.key +upstream_metrics_refresh_duration: 60s +pgbouncer_endpoints: + - "postgres://pgbouncer:{{ .PgbouncerPassword }}@localhost:6543/pgbouncer" +fail2ban_socket: /var/run/fail2ban/fail2ban.sock +upstream_metrics_sources: + - name: system + url: "https://localhost:8085/metrics" + labels_to_attach: + - name: tealbase_project_ref + value: {{ .ProjectRef }} + - name: service_type + value: db + skip_tls_verify: true + - name: postgresql + url: "http://localhost:9187/metrics" + labels_to_attach: + - name: tealbase_project_ref + value: {{ .ProjectRef }} + - name: service_type + value: postgresql + - name: gotrue + url: "http://localhost:9122/metrics" + labels_to_attach: + - name: tealbase_project_ref + value: {{ .ProjectRef }} + - name: service_type + value: gotrue +monitoring: + disk_usage: + enabled: true +upgrades_config: + region: us-east-1 + s3_bucket_name: tealbase-internal-artifacts-prod-bucket + common_prefix: upgrades + destination_dir: /tmp +firewall: + enabled: true + internal_ports: + - 9187 + - 8085 + - 9122 + privileged_ports: + - 22 + privileged_ports_allowlist: + - 0.0.0.0/0 + filtered_ports: + - 5432 + - 6543 + unfiltered_ports: + - 80 + - 443 + managed_rules_file: /etc/nftables/tealbase_managed.conf +pg_egress_collect_path: /tmp/pg_egress_collect.txt +health_reporting: + api_url: {{ .tealbaseUrl }} + project_token: {{ .ReportingToken }} + check_services: + # - postgres + # - postgrest diff --git a/docker/all-in-one/etc/fail2ban/filter.d/pgbouncer.conf b/docker/all-in-one/etc/fail2ban/filter.d/pgbouncer.conf new file mode 100644 index 0000000..b2d59c1 --- /dev/null +++ b/docker/all-in-one/etc/fail2ban/filter.d/pgbouncer.conf @@ -0,0 +1,2 @@ +[Definition] +failregex = ^.+@:.+error: password authentication failed$ diff --git a/docker/all-in-one/etc/fail2ban/filter.d/postgresql.conf b/docker/all-in-one/etc/fail2ban/filter.d/postgresql.conf new file mode 100644 index 0000000..c17b51e --- /dev/null +++ b/docker/all-in-one/etc/fail2ban/filter.d/postgresql.conf @@ -0,0 +1,8 @@ +[Definition] +failregex = ^.*,.*,.*,.*,":.*password authentication failed for user.*$ +ignoreregex = ^.*,.*,.*,.*,"127\.0\.0\.1.*password authentication failed for user.*$ + ^.*,.*,.*,.*,":.*password authentication failed for user ""tealbase_admin".*$ + ^.*,.*,.*,.*,":.*password authentication failed for user ""tealbase_auth_admin".*$ + ^.*,.*,.*,.*,":.*password authentication failed for user ""tealbase_storage_admin".*$ + ^.*,.*,.*,.*,":.*password authentication failed for user ""authenticator".*$ + ^.*,.*,.*,.*,":.*password authentication failed for user ""pgbouncer".*$ diff --git a/docker/all-in-one/etc/fail2ban/jail.d/jail.local b/docker/all-in-one/etc/fail2ban/jail.d/jail.local new file mode 100644 index 0000000..44e8210 --- /dev/null +++ b/docker/all-in-one/etc/fail2ban/jail.d/jail.local @@ -0,0 +1,4 @@ +[DEFAULT] + +banaction = nftables-multiport +banaction_allports = nftables-allports diff --git a/docker/all-in-one/etc/fail2ban/jail.d/pgbouncer.conf b/docker/all-in-one/etc/fail2ban/jail.d/pgbouncer.conf new file mode 100644 index 0000000..c8b3c49 --- /dev/null +++ b/docker/all-in-one/etc/fail2ban/jail.d/pgbouncer.conf @@ -0,0 +1,7 @@ +[pgbouncer] +enabled = true +port = 6543 +protocol = tcp +filter = pgbouncer +logpath = /var/log/services/pgbouncer.log +maxretry = 3 diff --git a/docker/all-in-one/etc/fail2ban/jail.d/postgresql.conf b/docker/all-in-one/etc/fail2ban/jail.d/postgresql.conf new file mode 100644 index 0000000..0ec1819 --- /dev/null +++ b/docker/all-in-one/etc/fail2ban/jail.d/postgresql.conf @@ -0,0 +1,8 @@ +[postgresql] +enabled = true +port = 5432 +protocol = tcp +filter = postgresql +logpath = /var/log/postgresql/auth-failures.csv +maxretry = 3 +ignoreip = 192.168.0.0/16 172.17.1.0/20 diff --git a/docker/all-in-one/etc/fail2ban/jail.d/sshd.local b/docker/all-in-one/etc/fail2ban/jail.d/sshd.local new file mode 100644 index 0000000..7033738 --- /dev/null +++ b/docker/all-in-one/etc/fail2ban/jail.d/sshd.local @@ -0,0 +1,3 @@ +[sshd] + +enabled = false diff --git a/docker/all-in-one/etc/gotrue.env b/docker/all-in-one/etc/gotrue.env new file mode 100644 index 0000000..3c53d17 --- /dev/null +++ b/docker/all-in-one/etc/gotrue.env @@ -0,0 +1,9 @@ +API_EXTERNAL_URL=api_external_url +GOTRUE_API_HOST=gotrue_api_host +GOTRUE_SITE_URL=gotrue_site_url +GOTRUE_DB_DRIVER=postgres +GOTRUE_DB_DATABASE_URL=postgres://tealbase_auth_admin@localhost/postgres?sslmode=disable +GOTRUE_DB_MIGRATIONS_PATH=/opt/gotrue/migrations +GOTRUE_JWT_ADMIN_ROLES=tealbase_admin,service_role +GOTRUE_JWT_AUD=authenticated +GOTRUE_JWT_SECRET=gotrue_jwt_secret diff --git a/docker/all-in-one/etc/kong/kong.conf b/docker/all-in-one/etc/kong/kong.conf new file mode 100644 index 0000000..4778902 --- /dev/null +++ b/docker/all-in-one/etc/kong/kong.conf @@ -0,0 +1,37 @@ +database = off +declarative_config = /etc/kong/kong.yml + +# plugins defined in the dockerfile +plugins = request-transformer,cors,key-auth,basic-auth,http-log,ip-restriction,rate-limiting + +admin_listen = off +proxy_listen = 0.0.0.0:80 reuseport backlog=16384, 0.0.0.0:443 http2 ssl reuseport backlog=16834, [::]:80 reuseport backlog=16384, [::]:443 http2 ssl reuseport backlog=16348 + +nginx_http_log_format = custom_log '$remote_addr - $remote_user [$time_local] "$request" $status $body_bytes_sent "$http_referer" "$http_user_agent" $request_time $request_length' +nginx_http_client_body_buffer_size = 512k +proxy_access_log = off +dns_stale_ttl = 60 +nginx_proxy_proxy_max_temp_file_size = 0 +nginx_proxy_proxy_buffer_size = 128k +nginx_proxy_proxy_buffers = 4 256k +nginx_proxy_proxy_busy_buffers_size = 256k +nginx_proxy_proxy_read_timeout = 120s +nginx_proxy_proxy_ssl_verify = off +nginx_http_gzip=on +nginx_http_gzip_comp_level=6 +nginx_http_gzip_min_length=256 +nginx_http_gzip_proxied=any +nginx_http_gzip_vary=on +nginx_http_gzip_types=text/plain application/xml application/openapi+json application/json + +# the upstream requests will be timed out after 60s idle anyway +# this ensures that we're not unnecessarily cycling them +upstream_keepalive_max_requests = 0 +# the pool size can be (and ought to be) scaled up on larger instances +upstream_keepalive_pool_size = 500 + +nginx_events_use = epoll +# can be tuned to be higher on larger boxes (4096 is totally fine) +nginx_events_worker_connections = 1024 +anonymous_reports = off +headers = latency_tokens diff --git a/docker/all-in-one/etc/kong/kong.yml b/docker/all-in-one/etc/kong/kong.yml new file mode 100644 index 0000000..f87f12b --- /dev/null +++ b/docker/all-in-one/etc/kong/kong.yml @@ -0,0 +1,88 @@ +# ############################################################################################## +# Updating this file also requires a corresponding update in worker/src/lib/config-utils/kong.ts +# ############################################################################################## +_format_version: '1.1' +services: + - { + name: auth-v1-open, + url: 'http://localhost:9999/verify', + routes: [{ name: auth-v1-open, strip_path: true, paths: [/auth/v1/verify] }], + plugins: [{ name: cors }], + } + - { + name: auth-v1-open-callback, + url: 'http://localhost:9999/callback', + routes: [{ name: auth-v1-open-callback, strip_path: true, paths: [/auth/v1/callback] }], + plugins: [{ name: cors }], + } + - { + name: auth-v1-open-authorize, + url: 'http://localhost:9999/authorize', + routes: [{ name: auth-v1-open-authorize, strip_path: true, paths: [/auth/v1/authorize] }], + plugins: [{ name: cors }], + } + - { + name: auth-v1-open-saml, + url: 'http://localhost:9999/sso/saml/', + routes: [{ name: auth-v1-open-saml, strip_path: true, paths: [/auth/v1/sso/saml/] }], + plugins: [{ name: cors }], + } + - { + name: auth-v1, + url: 'http://localhost:9999/', + routes: [{ name: auth-v1, strip_path: true, paths: [/auth/v1/] }], + plugins: [{ name: cors }, { name: key-auth, config: { hide_credentials: false } }], + } + - { + name: rest-v1-admin, + url: 'http://localhost:3001/', + routes: [{ name: rest-admin-v1, strip_path: true, paths: [/rest-admin/v1/] }], + plugins: [{ name: cors }, { name: key-auth, config: { hide_credentials: true } }], + } + - { + name: rest-v1, + url: 'http://localhost:3000/', + routes: [{ name: rest-v1, strip_path: true, paths: [/rest/v1/] }], + plugins: [{ name: cors }, { name: key-auth, config: { hide_credentials: true } }], + } + - { + name: graphql-v1, + url: 'http://localhost:3000/rpc/graphql', + routes: [{ name: graphql-v1, strip_path: true, paths: [/graphql/v1] }], + plugins: + [ + { name: cors }, + { name: key-auth, config: { hide_credentials: true } }, + { + name: request-transformer, + config: { add: { headers: [Content-Profile:graphql_public] } }, + }, + ], + } + - { + name: admin-v1, + url: 'https://localhost:8085/', + routes: [{ name: admin-v1, strip_path: true, paths: [/admin/v1/] }], + plugins: [{ name: cors }, { name: key-auth, config: { hide_credentials: false } }], + } + - { + name: admin-v1-user-routes, + url: 'https://localhost:8085/privileged', + routes: [{ name: admin-v1-user-routes, strip_path: true, paths: [/customer/v1/privileged] }], + plugins: [{ name: cors }, { name: basic-auth, config: { hide_credentials: false } }], + } + - { + name: admin-v1-metrics, + url: 'https://localhost:8085/metrics/aggregated', + routes: [{ name: admin-v1-metrics, strip_path: true, paths: [/tealbase-internal/metrics] }], + plugins: [{ name: cors }, { name: ip-restriction, config: { allow: [10.0.0.0/8] } }], + } +consumers: + - { username: anon-key, keyauth_credentials: [{ key: anon_key }] } + - { username: service_role-key, keyauth_credentials: [{ key: service_key }] } + - { username: tealbase-admin-key, keyauth_credentials: [{ key: tealbase_admin_key }] } +basicauth_credentials: + - consumer: service_role-key + username: 'service_role' + password: service_key +plugins: [] diff --git a/docker/all-in-one/etc/logrotate.d/postgresql.conf b/docker/all-in-one/etc/logrotate.d/postgresql.conf new file mode 100644 index 0000000..6e2b882 --- /dev/null +++ b/docker/all-in-one/etc/logrotate.d/postgresql.conf @@ -0,0 +1,11 @@ +/var/log/postgresql/postgresql.csv { + size 50M + rotate 4 + compress + delaycompress + notifempty + missingok + postrotate + sudo -u postgres /usr/lib/postgresql/15/bin/pg_ctl -D /var/lib/postgresql/data logrotate + endscript +} diff --git a/docker/all-in-one/etc/logrotate.d/walg.conf b/docker/all-in-one/etc/logrotate.d/walg.conf new file mode 100644 index 0000000..49eeb59 --- /dev/null +++ b/docker/all-in-one/etc/logrotate.d/walg.conf @@ -0,0 +1,9 @@ +/var/log/wal-g/*.log { + size 50M + rotate 3 + copytruncate + delaycompress + compress + notifempty + missingok +} diff --git a/docker/all-in-one/etc/pgbouncer-custom/custom-overrides.ini b/docker/all-in-one/etc/pgbouncer-custom/custom-overrides.ini new file mode 100644 index 0000000..e69de29 diff --git a/docker/all-in-one/etc/pgbouncer-custom/generated-optimizations.ini b/docker/all-in-one/etc/pgbouncer-custom/generated-optimizations.ini new file mode 100644 index 0000000..e69de29 diff --git a/docker/all-in-one/etc/pgbouncer-custom/ssl-config.ini b/docker/all-in-one/etc/pgbouncer-custom/ssl-config.ini new file mode 100644 index 0000000..69a8025 --- /dev/null +++ b/docker/all-in-one/etc/pgbouncer-custom/ssl-config.ini @@ -0,0 +1,4 @@ +client_tls_sslmode = allow +client_tls_ca_file = /etc/ssl/certs/postgres/ca.crt +client_tls_key_file = /etc/ssl/private/server.key +client_tls_cert_file = /etc/ssl/certs/postgres/server.crt diff --git a/docker/all-in-one/etc/pgbouncer/pgbouncer.ini b/docker/all-in-one/etc/pgbouncer/pgbouncer.ini new file mode 100644 index 0000000..5a36ac1 --- /dev/null +++ b/docker/all-in-one/etc/pgbouncer/pgbouncer.ini @@ -0,0 +1,363 @@ +;;; +;;; PgBouncer configuration file +;;; + +;; database name = connect string +;; +;; connect string params: +;; dbname= host= port= user= password= auth_user= +;; client_encoding= datestyle= timezone= +;; pool_size= reserve_pool= max_db_connections= +;; pool_mode= connect_query= application_name= +[databases] +* = host=localhost auth_user=pgbouncer + +;; foodb over Unix socket +;foodb = + +;; redirect bardb to bazdb on localhost +;bardb = host=localhost dbname=bazdb + +;; access to dest database will go with single user +;forcedb = host=localhost port=300 user=baz password=foo client_encoding=UNICODE datestyle=ISO connect_query='SELECT 1' + +;; use custom pool sizes +;nondefaultdb = pool_size=50 reserve_pool=10 + +;; use auth_user with auth_query if user not present in auth_file +;; auth_user must exist in auth_file +; foodb = auth_user=bar + +;; fallback connect string +;* = host=testserver + +;; User-specific configuration +[users] + +;user1 = pool_mode=transaction max_user_connections=10 + +;; Configuration section +[pgbouncer] + +;;; +;;; Administrative settings +;;; + +pidfile = /var/run/pgbouncer/pgbouncer.pid + +;;; +;;; Where to wait for clients +;;; + +;; IP address or * which means all IPs +listen_addr = * +listen_port = 6543 + +;; Unix socket is also used for -R. +;; On Debian it should be /var/run/postgresql +unix_socket_dir = /tmp +;unix_socket_mode = 0777 +;unix_socket_group = + +;;; +;;; TLS settings for accepting clients +;;; + +;; disable, allow, require, verify-ca, verify-full +;client_tls_sslmode = disable + +;; Path to file that contains trusted CA certs +;client_tls_ca_file = + +;; Private key and cert to present to clients. +;; Required for accepting TLS connections from clients. +;client_tls_key_file = +;client_tls_cert_file = + +;; fast, normal, secure, legacy, +;client_tls_ciphers = fast + +;; all, secure, tlsv1.0, tlsv1.1, tlsv1.2, tlsv1.3 +;client_tls_protocols = secure + +;; none, auto, legacy +;client_tls_dheparams = auto + +;; none, auto, +;client_tls_ecdhcurve = auto + +;;; +;;; TLS settings for connecting to backend databases +;;; + +;; disable, allow, require, verify-ca, verify-full +;server_tls_sslmode = disable + +;; Path to that contains trusted CA certs +;server_tls_ca_file = + +;; Private key and cert to present to backend. +;; Needed only if backend server require client cert. +;server_tls_key_file = +;server_tls_cert_file = + +;; all, secure, tlsv1.0, tlsv1.1, tlsv1.2, tlsv1.3 +;server_tls_protocols = secure + +;; fast, normal, secure, legacy, +;server_tls_ciphers = fast + +;;; +;;; Authentication settings +;;; + +;; any, trust, plain, md5, cert, hba, pam +auth_type = scram-sha-256 +auth_file = /etc/pgbouncer/userlist.txt + +;; Path to HBA-style auth config +;auth_hba_file = + +;; Query to use to fetch password from database. Result +;; must have 2 columns - username and password hash. +auth_query = SELECT * FROM pgbouncer.get_auth($1) + +;;; +;;; Users allowed into database 'pgbouncer' +;;; + +;; comma-separated list of users who are allowed to change settings +admin_users = pgbouncer + +;; comma-separated list of users who are just allowed to use SHOW command +stats_users = pgbouncer + +;;; +;;; Pooler personality questions +;;; + +;; When server connection is released back to pool: +;; session - after client disconnects (default) +;; transaction - after transaction finishes +;; statement - after statement finishes +pool_mode = transaction + +;; Query for cleaning connection immediately after releasing from +;; client. No need to put ROLLBACK here, pgbouncer does not reuse +;; connections where transaction is left open. +;server_reset_query = DISCARD ALL + +;; Whether server_reset_query should run in all pooling modes. If it +;; is off, server_reset_query is used only for session-pooling. +;server_reset_query_always = 0 + +;; Comma-separated list of parameters to ignore when given in startup +;; packet. Newer JDBC versions require the extra_float_digits here. +ignore_startup_parameters = extra_float_digits + +;; When taking idle server into use, this query is run first. +;server_check_query = select 1 + +;; If server was used more recently that this many seconds ago, +; skip the check query. Value 0 may or may not run in immediately. +;server_check_delay = 30 + +;; Close servers in session pooling mode after a RECONNECT, RELOAD, +;; etc. when they are idle instead of at the end of the session. +;server_fast_close = 0 + +;; Use as application_name on server. +;application_name_add_host = 0 + +;; Period for updating aggregated stats. +;stats_period = 60 + +;;; +;;; Connection limits +;;; + +;; Total number of clients that can connect +;max_client_conn = 100 + +;; Default pool size. 20 is good number when transaction pooling +;; is in use, in session pooling it needs to be the number of +;; max clients you want to handle at any moment +default_pool_size = 15 + +;; Minimum number of server connections to keep in pool. +;min_pool_size = 0 + +; how many additional connection to allow in case of trouble +;reserve_pool_size = 0 + +;; If a clients needs to wait more than this many seconds, use reserve +;; pool. +;reserve_pool_timeout = 5 + +;; Maximum number of server connections for a database +;max_db_connections = 0 + +;; Maximum number of server connections for a user +;max_user_connections = 0 + +;; If off, then server connections are reused in LIFO manner +;server_round_robin = 0 + +;;; +;;; Logging +;;; + +;; Syslog settings +;syslog = 0 +;syslog_facility = daemon +;syslog_ident = pgbouncer + +;; log if client connects or server connection is made +;log_connections = 1 + +;; log if and why connection was closed +;log_disconnections = 1 + +;; log error messages pooler sends to clients +;log_pooler_errors = 1 + +;; write aggregated stats into log +;log_stats = 1 + +;; Logging verbosity. Same as -v switch on command line. +;verbose = 0 + +;;; +;;; Timeouts +;;; + +;; Close server connection if its been connected longer. +;server_lifetime = 3600 + +;; Close server connection if its not been used in this time. Allows +;; to clean unnecessary connections from pool after peak. +;server_idle_timeout = 600 + +;; Cancel connection attempt if server does not answer takes longer. +;server_connect_timeout = 15 + +;; If server login failed (server_connect_timeout or auth failure) +;; then wait this many second. +;server_login_retry = 15 + +;; Dangerous. Server connection is closed if query does not return in +;; this time. Should be used to survive network problems, _not_ as +;; statement_timeout. (default: 0) +;query_timeout = 0 + +;; Dangerous. Client connection is closed if the query is not +;; assigned to a server in this time. Should be used to limit the +;; number of queued queries in case of a database or network +;; failure. (default: 120) +;query_wait_timeout = 120 + +;; Dangerous. Client connection is closed if no activity in this +;; time. Should be used to survive network problems. (default: 0) +;client_idle_timeout = 0 + +;; Disconnect clients who have not managed to log in after connecting +;; in this many seconds. +;client_login_timeout = 60 + +;; Clean automatically created database entries (via "*") if they stay +;; unused in this many seconds. +; autodb_idle_timeout = 3600 + +;; Close connections which are in "IDLE in transaction" state longer +;; than this many seconds. +;idle_transaction_timeout = 0 + +;; How long SUSPEND/-R waits for buffer flush before closing +;; connection. +;suspend_timeout = 10 + +;;; +;;; Low-level tuning options +;;; + +;; buffer for streaming packets +;pkt_buf = 4096 + +;; man 2 listen +;listen_backlog = 128 + +;; Max number pkt_buf to process in one event loop. +;sbuf_loopcnt = 5 + +;; Maximum PostgreSQL protocol packet size. +;max_packet_size = 2147483647 + +;; Set SO_REUSEPORT socket option +;so_reuseport = 0 + +;; networking options, for info: man 7 tcp + +;; Linux: Notify program about new connection only if there is also +;; data received. (Seconds to wait.) On Linux the default is 45, on +;; other OS'es 0. +;tcp_defer_accept = 0 + +;; In-kernel buffer size (Linux default: 4096) +;tcp_socket_buffer = 0 + +;; whether tcp keepalive should be turned on (0/1) +;tcp_keepalive = 1 + +;; The following options are Linux-specific. They also require +;; tcp_keepalive=1. + +;; Count of keepalive packets +;tcp_keepcnt = 0 + +;; How long the connection can be idle before sending keepalive +;; packets +;tcp_keepidle = 0 + +;; The time between individual keepalive probes +;tcp_keepintvl = 0 + +;; How long may transmitted data remain unacknowledged before TCP +;; connection is closed (in milliseconds) +;tcp_user_timeout = 0 + +;; DNS lookup caching time +;dns_max_ttl = 15 + +;; DNS zone SOA lookup period +;dns_zone_check_period = 0 + +;; DNS negative result caching time +;dns_nxdomain_ttl = 15 + +;; Custom resolv.conf file, to set custom DNS servers or other options +;; (default: empty = use OS settings) +;resolv_conf = /etc/pgbouncer/resolv.conf + +;;; +;;; Random stuff +;;; + +;; Hackish security feature. Helps against SQL injection: when PQexec +;; is disabled, multi-statement cannot be made. +;disable_pqexec = 0 + +;; Config file to use for next RELOAD/SIGHUP +;; By default contains config file from command line. +;conffile + +;; Windows service name to register as. job_name is alias for +;; service_name, used by some Skytools scripts. +;service_name = pgbouncer +;job_name = pgbouncer + +;; Read additional config from other file +;%include /etc/pgbouncer/pgbouncer-other.ini + +%include /etc/pgbouncer-custom/generated-optimizations.ini +%include /etc/pgbouncer-custom/custom-overrides.ini +# %include /etc/pgbouncer-custom/ssl-config.ini diff --git a/docker/all-in-one/etc/pgbouncer/userlist.txt b/docker/all-in-one/etc/pgbouncer/userlist.txt new file mode 100644 index 0000000..e69de29 diff --git a/docker/all-in-one/etc/postgresql-custom/custom-overrides.conf b/docker/all-in-one/etc/postgresql-custom/custom-overrides.conf new file mode 100644 index 0000000..e69de29 diff --git a/docker/all-in-one/etc/postgresql-custom/generated-optimizations.conf b/docker/all-in-one/etc/postgresql-custom/generated-optimizations.conf new file mode 100644 index 0000000..e69de29 diff --git a/docker/all-in-one/etc/postgresql-custom/postgresql-platform-defaults.conf b/docker/all-in-one/etc/postgresql-custom/postgresql-platform-defaults.conf new file mode 100644 index 0000000..e62a1de --- /dev/null +++ b/docker/all-in-one/etc/postgresql-custom/postgresql-platform-defaults.conf @@ -0,0 +1,9 @@ +# these get imported _after_ the user specified overrides +row_security = on +wal_level = logical +max_wal_senders = 10 +max_replication_slots = 5 +log_connections = on +statement_timeout = 120000 +jit = off +pgaudit.log = 'ddl' diff --git a/docker/all-in-one/etc/postgresql.schema.sql b/docker/all-in-one/etc/postgresql.schema.sql new file mode 100644 index 0000000..ecffdf7 --- /dev/null +++ b/docker/all-in-one/etc/postgresql.schema.sql @@ -0,0 +1,16 @@ +\set admin_pass `echo "${tealbase_ADMIN_PASSWORD:-$POSTGRES_PASSWORD}"` +\set pgrst_pass `echo "${AUTHENTICATOR_PASSWORD:-$POSTGRES_PASSWORD}"` +\set pgbouncer_pass `echo "${PGBOUNCER_PASSWORD:-$POSTGRES_PASSWORD}"` +\set auth_pass `echo "${tealbase_AUTH_ADMIN_PASSWORD:-$POSTGRES_PASSWORD}"` +\set storage_pass `echo "${tealbase_STORAGE_ADMIN_PASSWORD:-$POSTGRES_PASSWORD}"` +\set replication_pass `echo "${tealbase_REPLICATION_ADMIN_PASSWORD:-$POSTGRES_PASSWORD}"` +\set read_only_pass `echo "${tealbase_READ_ONLY_USER_PASSWORD:-$POSTGRES_PASSWORD}"` + +ALTER USER tealbase_admin WITH PASSWORD :'admin_pass'; +ALTER USER authenticator WITH PASSWORD :'pgrst_pass'; +ALTER USER pgbouncer WITH PASSWORD :'pgbouncer_pass'; +ALTER USER tealbase_auth_admin WITH PASSWORD :'auth_pass'; +ALTER USER tealbase_storage_admin WITH PASSWORD :'storage_pass'; +ALTER USER tealbase_replication_admin WITH PASSWORD :'replication_pass'; +ALTER ROLE tealbase_read_only_user WITH PASSWORD :'read_only_pass'; +ALTER ROLE tealbase_admin SET search_path TO "$user",public,auth,extensions; diff --git a/docker/all-in-one/etc/postgresql/logging.conf b/docker/all-in-one/etc/postgresql/logging.conf new file mode 100644 index 0000000..b8d64da --- /dev/null +++ b/docker/all-in-one/etc/postgresql/logging.conf @@ -0,0 +1,33 @@ +# - Where to Log - + +log_destination = 'csvlog' # Valid values are combinations of + # stderr, csvlog, syslog, and eventlog, + # depending on platform. csvlog + # requires logging_collector to be on. + +# This is used when logging to stderr: +logging_collector = on # Enable capturing of stderr and csvlog + # into log files. Required to be on for + # csvlogs. + # (change requires restart) + +# These are only used if logging_collector is on: +log_directory = '/var/log/postgresql' # directory where log files are written, + # can be absolute or relative to PGDATA +log_filename = 'postgresql.log' # log file name pattern, + # can include strftime() escapes +log_file_mode = 0640 # creation mode for log files, + # begin with 0 to use octal notation +log_rotation_age = 0 # Automatic rotation of logfiles will + # happen after that time. 0 disables. +log_rotation_size = 0 # Automatic rotation of logfiles will + # happen after that much log output. + # 0 disables. +#log_truncate_on_rotation = off # If on, an existing log file with the + # same name as the new log file will be + # truncated rather than appended to. + # But such truncation only occurs on + # time-driven rotation, not on restarts + # or size-driven rotation. Default is + # off, meaning append to existing files + # in all cases. diff --git a/docker/all-in-one/etc/postgresql/pg_hba.conf b/docker/all-in-one/etc/postgresql/pg_hba.conf new file mode 100755 index 0000000..76bd2f0 --- /dev/null +++ b/docker/all-in-one/etc/postgresql/pg_hba.conf @@ -0,0 +1,94 @@ +# PostgreSQL Client Authentication Configuration File +# =================================================== +# +# Refer to the "Client Authentication" section in the PostgreSQL +# documentation for a complete description of this file. A short +# synopsis follows. +# +# This file controls: which hosts are allowed to connect, how clients +# are authenticated, which PostgreSQL user names they can use, which +# databases they can access. Records take one of these forms: +# +# local DATABASE USER METHOD [OPTIONS] +# host DATABASE USER ADDRESS METHOD [OPTIONS] +# hostssl DATABASE USER ADDRESS METHOD [OPTIONS] +# hostnossl DATABASE USER ADDRESS METHOD [OPTIONS] +# hostgssenc DATABASE USER ADDRESS METHOD [OPTIONS] +# hostnogssenc DATABASE USER ADDRESS METHOD [OPTIONS] +# +# (The uppercase items must be replaced by actual values.) +# +# The first field is the connection type: "local" is a Unix-domain +# socket, "host" is either a plain or SSL-encrypted TCP/IP socket, +# "hostssl" is an SSL-encrypted TCP/IP socket, and "hostnossl" is a +# non-SSL TCP/IP socket. Similarly, "hostgssenc" uses a +# GSSAPI-encrypted TCP/IP socket, while "hostnogssenc" uses a +# non-GSSAPI socket. +# +# DATABASE can be "all", "sameuser", "samerole", "replication", a +# database name, or a comma-separated list thereof. The "all" +# keyword does not match "replication". Access to replication +# must be enabled in a separate record (see example below). +# +# USER can be "all", a user name, a group name prefixed with "+", or a +# comma-separated list thereof. In both the DATABASE and USER fields +# you can also write a file name prefixed with "@" to include names +# from a separate file. +# +# ADDRESS specifies the set of hosts the record matches. It can be a +# host name, or it is made up of an IP address and a CIDR mask that is +# an integer (between 0 and 32 (IPv4) or 128 (IPv6) inclusive) that +# specifies the number of significant bits in the mask. A host name +# that starts with a dot (.) matches a suffix of the actual host name. +# Alternatively, you can write an IP address and netmask in separate +# columns to specify the set of hosts. Instead of a CIDR-address, you +# can write "samehost" to match any of the server's own IP addresses, +# or "samenet" to match any address in any subnet that the server is +# directly connected to. +# +# METHOD can be "trust", "reject", "md5", "password", "scram-sha-256", +# "gss", "sspi", "ident", "peer", "pam", "ldap", "radius" or "cert". +# Note that "password" sends passwords in clear text; "md5" or +# "scram-sha-256" are preferred since they send encrypted passwords. +# +# OPTIONS are a set of options for the authentication in the format +# NAME=VALUE. The available options depend on the different +# authentication methods -- refer to the "Client Authentication" +# section in the documentation for a list of which options are +# available for which authentication methods. +# +# Database and user names containing spaces, commas, quotes and other +# special characters must be quoted. Quoting one of the keywords +# "all", "sameuser", "samerole" or "replication" makes the name lose +# its special character, and just match a database or username with +# that name. +# +# This file is read on server startup and when the server receives a +# SIGHUP signal. If you edit the file on a running system, you have to +# SIGHUP the server for the changes to take effect, run "pg_ctl reload", +# or execute "SELECT pg_reload_conf()". +# +# Put your actual configuration here +# ---------------------------------- +# +# If you want to allow non-local connections, you need to add more +# "host" records. In that case you will also need to make PostgreSQL +# listen on a non-local interface via the listen_addresses +# configuration parameter, or via the -i or -h command line switches. + +# TYPE DATABASE USER ADDRESS METHOD + +# trust local connections +local all tealbase_admin scram-sha-256 +local all all peer map=tealbase_map +host all all 127.0.0.1/32 trust +host all all ::1/128 trust + +# IPv4 external connections +host all all 10.0.0.0/8 scram-sha-256 +host all all 172.16.0.0/12 scram-sha-256 +host all all 192.168.0.0/16 scram-sha-256 +host all all 0.0.0.0/0 scram-sha-256 + +# IPv6 external connections +host all all ::0/0 scram-sha-256 diff --git a/docker/all-in-one/etc/postgrest/base.conf b/docker/all-in-one/etc/postgrest/base.conf new file mode 100644 index 0000000..e5120ed --- /dev/null +++ b/docker/all-in-one/etc/postgrest/base.conf @@ -0,0 +1,7 @@ +server-port="pgrst_server_port" +admin-server-port="pgrst_admin_server_port" +db-schema="pgrst_db_schemas" +db-extra-search-path="pgrst_db_extra_search_path" +db-anon-role="pgrst_db_anon_role" +jwt-secret="pgrst_jwt_secret" +db-uri="postgres://authenticator@localhost:5432/postgres?application_name=postgrest" diff --git a/docker/all-in-one/etc/postgrest/bootstrap.sh b/docker/all-in-one/etc/postgrest/bootstrap.sh new file mode 100755 index 0000000..9ac21d2 --- /dev/null +++ b/docker/all-in-one/etc/postgrest/bootstrap.sh @@ -0,0 +1,8 @@ +#! /usr/bin/env bash +set -euo pipefail +set -x + +cd "$(dirname "$0")" +cat $@ > merged.conf + +/opt/postgrest merged.conf diff --git a/docker/all-in-one/etc/postgrest/generated.conf b/docker/all-in-one/etc/postgrest/generated.conf new file mode 100644 index 0000000..e69de29 diff --git a/docker/all-in-one/etc/salt/minion b/docker/all-in-one/etc/salt/minion new file mode 100644 index 0000000..29d8406 --- /dev/null +++ b/docker/all-in-one/etc/salt/minion @@ -0,0 +1,71 @@ +# Minions can connect to multiple masters simultaneously (all masters +# are "hot"), or can be configured to failover if a master becomes +# unavailable. Multiple hot masters are configured by setting this +# value to "str". Failover masters can be requested by setting +# to "failover". MAKE SURE TO SET master_alive_interval if you are +# using failover. +# Setting master_type to 'disable' lets you have a running minion (with engines and +# beacons) without a master connection +master_type: disable + +# The minion can locally cache the return data from jobs sent to it, this +# can be a good way to keep track of jobs the minion has executed +# (on the minion side). By default this feature is disabled, to enable, set +# cache_jobs to True. +cache_jobs: True + +# The minion can take a while to start up when lspci and/or dmidecode is used +# to populate the grains for the minion. Set this to False if you do not need +# GPU hardware grains for your minion. +enable_gpu_grains: False + +# Backup files that are replaced by file.managed and file.recurse under +# 'cachedir'/file_backup relative to their original location and appended +# with a timestamp. The only valid setting is "minion". Disabled by default. +# +# Alternatively this can be specified for each file in state files: +# /etc/ssh/sshd_config: +# file.managed: +# - source: salt://ssh/sshd_config +# - backup: minion +# +backup_mode: minion + +##### File Directory Settings ##### +########################################## +# The Salt Minion can redirect all file server operations to a local directory, +# this allows for the same state tree that is on the master to be used if +# copied completely onto the minion. This is a literal copy of the settings on +# the master but used to reference a local directory on the minion. + +# Set the file client. The client defaults to looking on the master server for +# files, but can be directed to look at the local file directory setting +# defined below by setting it to "local". Setting a local file_client runs the +# minion in masterless mode. +file_client: local + +# The file directory works on environments passed to the minion, each environment +# can have multiple root directories, the subdirectories in the multiple file +# roots cannot match, otherwise the downloaded files will not be able to be +# reliably ensured. A base environment is required to house the top file. +# Example: +# file_roots: +# base: +# - /srv/salt/ +# dev: +# - /srv/salt/dev/services +# - /srv/salt/dev/states +# prod: +# - /srv/salt/prod/services +# - /srv/salt/prod/states +# +file_roots: + base: + - /data/salt/state + +# The Salt pillar is searched for locally if file_client is set to local. If +# this is the case, and pillar data is defined, then the pillar_roots need to +# also be configured on the minion: +pillar_roots: + base: + - /data/salt/pillar diff --git a/docker/all-in-one/etc/sudoers.d/adminapi b/docker/all-in-one/etc/sudoers.d/adminapi new file mode 100644 index 0000000..e386014 --- /dev/null +++ b/docker/all-in-one/etc/sudoers.d/adminapi @@ -0,0 +1,27 @@ +Cmnd_Alias ENVOY = /usr/bin/supervisorctl start services\:envoy, /usr/bin/supervisorctl stop services\:envoy, /usr/bin/supervisorctl restart services\:envoy, /usr/bin/supervisorctl status services\:envoy +Cmnd_Alias KONG = /usr/bin/supervisorctl start services\:kong, /usr/bin/supervisorctl stop services\:kong, /usr/bin/supervisorctl restart services\:kong, /usr/bin/supervisorctl status services\:kong +Cmnd_Alias POSTGREST = /usr/bin/supervisorctl start services\:postgrest, /usr/bin/supervisorctl stop services\:postgrest, /usr/bin/supervisorctl restart services\:postgrest, /usr/bin/supervisorctl status services\:postgrest +Cmnd_Alias GOTRUE = /usr/bin/supervisorctl start services\:gotrue, /usr/bin/supervisorctl stop services\:gotrue, /usr/bin/supervisorctl restart services\:gotrue, /usr/bin/supervisorctl status services\:gotrue +Cmnd_Alias PGBOUNCER = /usr/bin/supervisorctl start pgbouncer, /usr/bin/supervisorctl stop pgbouncer, /usr/bin/supervisorctl restart pgbouncer, /usr/bin/supervisorctl status pgbouncer + +%adminapi ALL= NOPASSWD: /root/grow_fs.sh +%adminapi ALL= NOPASSWD: /root/manage_readonly_mode.sh +%adminapi ALL= NOPASSWD: /etc/adminapi/pg_upgrade_scripts/prepare.sh +%adminapi ALL= NOPASSWD: /etc/adminapi/pg_upgrade_scripts/initiate.sh +%adminapi ALL= NOPASSWD: /etc/adminapi/pg_upgrade_scripts/complete.sh +%adminapi ALL= NOPASSWD: /etc/adminapi/pg_upgrade_scripts/check.sh +%adminapi ALL= NOPASSWD: /etc/adminapi/pg_upgrade_scripts/common.sh +%adminapi ALL= NOPASSWD: /etc/adminapi/pg_upgrade_scripts/pgsodium_getkey.sh +%adminapi ALL= NOPASSWD: /usr/bin/supervisorctl reread +%adminapi ALL= NOPASSWD: /usr/bin/supervisorctl update +%adminapi ALL= NOPASSWD: /usr/bin/supervisorctl restart postgresql +%adminapi ALL= NOPASSWD: /usr/bin/supervisorctl status postgresql +%adminapi ALL= NOPASSWD: /usr/bin/supervisorctl restart adminapi +%adminapi ALL= NOPASSWD: /usr/bin/supervisorctl restart services\:* +%adminapi ALL= NOPASSWD: /usr/sbin/nft -f /etc/nftables/tealbase_managed.conf +%adminapi ALL= NOPASSWD: /usr/bin/admin-mgr +%adminapi ALL= NOPASSWD: ENVOY +%adminapi ALL= NOPASSWD: KONG +%adminapi ALL= NOPASSWD: POSTGREST +%adminapi ALL= NOPASSWD: GOTRUE +%adminapi ALL= NOPASSWD: PGBOUNCER diff --git a/docker/all-in-one/etc/supa-shutdown/shutdown.conf b/docker/all-in-one/etc/supa-shutdown/shutdown.conf new file mode 100644 index 0000000..384b935 --- /dev/null +++ b/docker/all-in-one/etc/supa-shutdown/shutdown.conf @@ -0,0 +1 @@ +SHUTDOWN_IDLE_TIME_MINUTES= diff --git a/docker/all-in-one/etc/supervisor/base-services/adminapi.conf b/docker/all-in-one/etc/supervisor/base-services/adminapi.conf new file mode 100644 index 0000000..2f5d0cd --- /dev/null +++ b/docker/all-in-one/etc/supervisor/base-services/adminapi.conf @@ -0,0 +1,10 @@ +[program:adminapi] +command=/opt/tealbase-admin-api +user=adminapi +autorestart=true +autostart=true +startretries=1000 +stdout_logfile=/var/log/services/adminapi.log +redirect_stderr=true +stdout_logfile_maxbytes=10MB +priority=50 diff --git a/docker/all-in-one/etc/supervisor/base-services/logrotate.conf b/docker/all-in-one/etc/supervisor/base-services/logrotate.conf new file mode 100644 index 0000000..a1ccea6 --- /dev/null +++ b/docker/all-in-one/etc/supervisor/base-services/logrotate.conf @@ -0,0 +1,11 @@ +[program:logrotate] +command=/usr/local/bin/run-logrotate.sh +autostart=true +autorestart=true +user=root +startretries=1000 +stdout_logfile=/var/log/services/logrotate.log +redirect_stderr=true +stdout_logfile_maxbytes=10MB +priority=50 +environment=DATA_VOLUME_MOUNTPOINT="%(ENV_DATA_VOLUME_MOUNTPOINT)s" diff --git a/docker/all-in-one/etc/supervisor/base-services/lsn-checkpoint-push.conf b/docker/all-in-one/etc/supervisor/base-services/lsn-checkpoint-push.conf new file mode 100644 index 0000000..36abcf8 --- /dev/null +++ b/docker/all-in-one/etc/supervisor/base-services/lsn-checkpoint-push.conf @@ -0,0 +1,10 @@ +[program:lsn-checkpoint-push] +command=/usr/bin/admin-mgr lsn-checkpoint-push --watch +user=root +autorestart=false +autostart=false +startretries=1000 +stdout_logfile=/var/log/services/lsn-push.log +redirect_stderr=true +stdout_logfile_maxbytes=10MB +priority=50 diff --git a/docker/all-in-one/etc/supervisor/base-services/pg_egress_collect.conf b/docker/all-in-one/etc/supervisor/base-services/pg_egress_collect.conf new file mode 100644 index 0000000..d340a9c --- /dev/null +++ b/docker/all-in-one/etc/supervisor/base-services/pg_egress_collect.conf @@ -0,0 +1,10 @@ +[program:pg_egress_collect] +command=/bin/bash -c "tcpdump -s 128 -Q out -nn -tt -vv -p -l 'tcp and (port 5432 or port 6543)' | perl /opt/pg_egress_collect/pg_egress_collect.pl" +user=root +autorestart=true +autostart=true +startretries=1000 +stdout_logfile=/var/log/services/pg_egress_collect.log +redirect_stderr=true +stdout_logfile_maxbytes=10MB +priority=50 diff --git a/docker/all-in-one/etc/supervisor/base-services/postgresql.conf b/docker/all-in-one/etc/supervisor/base-services/postgresql.conf new file mode 100644 index 0000000..a8b5c5d --- /dev/null +++ b/docker/all-in-one/etc/supervisor/base-services/postgresql.conf @@ -0,0 +1,13 @@ +[program:postgresql] +command=/usr/local/bin/postgres-entrypoint.sh postgres -D /etc/postgresql +user=postgres +stopsignal=INT +autorestart=true +autostart=true +startretries=1000 +priority=1 +# Inherit env vars from https://github.com/tealbase/postgres/blob/develop/Dockerfile#L800 +environment=POSTGRES_PASSWORD="%(ENV_POSTGRES_PASSWORD)s",POSTGRES_HOST="%(ENV_POSTGRES_HOST)s",HOME="/var/lib/postgresql" +stdout_logfile=/var/log/postgresql/init.log +redirect_stderr=true +stdout_logfile_maxbytes=10MB diff --git a/docker/all-in-one/etc/supervisor/base-services/supa-shutdown.conf b/docker/all-in-one/etc/supervisor/base-services/supa-shutdown.conf new file mode 100644 index 0000000..06b24a7 --- /dev/null +++ b/docker/all-in-one/etc/supervisor/base-services/supa-shutdown.conf @@ -0,0 +1,11 @@ +[program:supa-shutdown] +command=/usr/local/bin/supa-shutdown.sh +user=root +autorestart=true +autostart=false +startretries=1000 +stdout_logfile=/var/log/services/supa-shutdown.log +redirect_stderr=true +stdout_logfile_maxbytes=10MB +priority=50 +environment=MAX_IDLE_TIME_MINUTES="%(ENV_MAX_IDLE_TIME_MINUTES)s" diff --git a/docker/all-in-one/etc/supervisor/services/envoy.conf b/docker/all-in-one/etc/supervisor/services/envoy.conf new file mode 100644 index 0000000..2b33807 --- /dev/null +++ b/docker/all-in-one/etc/supervisor/services/envoy.conf @@ -0,0 +1,10 @@ +[program:envoy] +command=/opt/envoy-hot-restarter.py /opt/start-envoy.sh +user=envoy +autorestart=true +autostart=false +stopasgroup=true +startretries=1000 +stdout_logfile=/var/log/services/envoy.log +redirect_stderr=true +stdout_logfile_maxbytes=10MB diff --git a/docker/all-in-one/etc/supervisor/services/exporter.conf b/docker/all-in-one/etc/supervisor/services/exporter.conf new file mode 100644 index 0000000..fbe53d7 --- /dev/null +++ b/docker/all-in-one/etc/supervisor/services/exporter.conf @@ -0,0 +1,11 @@ +[program:exporter] +command=/opt/postgres_exporter/postgres_exporter --disable-settings-metrics --extend.query-path=/opt/postgres_exporter/queries.yml --disable-default-metrics --no-collector.locks --no-collector.replication --no-collector.replication_slot --no-collector.stat_bgwriter --no-collector.stat_database --no-collector.stat_user_tables --no-collector.statio_user_tables --no-collector.wal +user=root +autorestart=true +autostart=true +startretries=1000 +environment=DATA_SOURCE_NAME="host=localhost dbname=postgres sslmode=disable user=tealbase_admin pg_stat_statements.track=none application_name=postgres_exporter" +stdout_logfile=/var/log/services/exporter.log +redirect_stderr=true +stdout_logfile_maxbytes=10MB +priority=150 diff --git a/docker/all-in-one/etc/supervisor/services/fail2ban.conf b/docker/all-in-one/etc/supervisor/services/fail2ban.conf new file mode 100644 index 0000000..8000386 --- /dev/null +++ b/docker/all-in-one/etc/supervisor/services/fail2ban.conf @@ -0,0 +1,9 @@ +[program:fail2ban] +command=/usr/bin/fail2ban-client -f start +user=root +autorestart=true +autostart=true +stdout_logfile=/var/log/services/fail2ban.log +redirect_stderr=true +stdout_logfile_maxbytes=10MB +priority=200 diff --git a/docker/all-in-one/etc/supervisor/services/gotrue.conf b/docker/all-in-one/etc/supervisor/services/gotrue.conf new file mode 100644 index 0000000..679057e --- /dev/null +++ b/docker/all-in-one/etc/supervisor/services/gotrue.conf @@ -0,0 +1,10 @@ +[program:gotrue] +directory=/opt/gotrue +command=/opt/gotrue/gotrue --config /etc/gotrue.env +user=gotrue +startretries=1000 +autorestart=true +autostart=true +stdout_logfile=/var/log/services/gotrue.log +redirect_stderr=true +stdout_logfile_maxbytes=10MB diff --git a/docker/all-in-one/etc/supervisor/services/group.conf b/docker/all-in-one/etc/supervisor/services/group.conf new file mode 100644 index 0000000..ef6673d --- /dev/null +++ b/docker/all-in-one/etc/supervisor/services/group.conf @@ -0,0 +1,3 @@ +[group:services] +programs=gotrue,kong,postgrest +priority=100 diff --git a/docker/all-in-one/etc/supervisor/services/kong.conf b/docker/all-in-one/etc/supervisor/services/kong.conf new file mode 100644 index 0000000..04f5219 --- /dev/null +++ b/docker/all-in-one/etc/supervisor/services/kong.conf @@ -0,0 +1,11 @@ +[program:kong] +command=/init/start-kong.sh +user=kong +autorestart=true +autostart=true +stopasgroup=true +startretries=1000 +environment=KONG_NGINX_DAEMON="off" +stdout_logfile=/var/log/services/kong.log +redirect_stderr=true +stdout_logfile_maxbytes=10MB diff --git a/docker/all-in-one/etc/supervisor/services/pgbouncer.conf b/docker/all-in-one/etc/supervisor/services/pgbouncer.conf new file mode 100644 index 0000000..6926c34 --- /dev/null +++ b/docker/all-in-one/etc/supervisor/services/pgbouncer.conf @@ -0,0 +1,10 @@ +[program:pgbouncer] +command=/usr/sbin/pgbouncer /etc/pgbouncer/pgbouncer.ini +user=pgbouncer +stopsignal=INT +autorestart=false +autostart=false +stdout_logfile=/var/log/services/pgbouncer.log +redirect_stderr=true +stdout_logfile_maxbytes=10MB +priority=150 diff --git a/docker/all-in-one/etc/supervisor/services/postgrest.conf b/docker/all-in-one/etc/supervisor/services/postgrest.conf new file mode 100644 index 0000000..ad43b52 --- /dev/null +++ b/docker/all-in-one/etc/supervisor/services/postgrest.conf @@ -0,0 +1,10 @@ +[program:postgrest] +command=/etc/postgrest/bootstrap.sh /etc/postgrest/generated.conf /etc/postgrest/base.conf +user=postgrest +autorestart=true +autostart=true +stopasgroup=true +startretries=1000 +stdout_logfile=/var/log/services/postgrest.log +redirect_stderr=true +stdout_logfile_maxbytes=10MB diff --git a/docker/all-in-one/etc/supervisor/supervisord.conf b/docker/all-in-one/etc/supervisor/supervisord.conf new file mode 100644 index 0000000..d64f40f --- /dev/null +++ b/docker/all-in-one/etc/supervisor/supervisord.conf @@ -0,0 +1,170 @@ +; Sample supervisor config file. +; +; For more information on the config file, please see: +; http://supervisord.org/configuration.html +; +; Notes: +; - Shell expansion ("~" or "$HOME") is not supported. Environment +; variables can be expanded using this syntax: "%(ENV_HOME)s". +; - Quotes around values are not supported, except in the case of +; the environment= options as shown below. +; - Comments must have a leading space: "a=b ;comment" not "a=b;comment". +; - Command will be truncated if it looks like a config file comment, e.g. +; "command=bash -c 'foo ; bar'" will truncate to "command=bash -c 'foo ". +; +; Warning: +; Paths throughout this example file use /tmp because it is available on most +; systems. You will likely need to change these to locations more appropriate +; for your system. Some systems periodically delete older files in /tmp. +; Notably, if the socket file defined in the [unix_http_server] section below +; is deleted, supervisorctl will be unable to connect to supervisord. + +[unix_http_server] +file=/tmp/supervisor.sock ; the path to the socket file +chmod=0760 ; socket file mode (default 0700) +chown=root:root ; socket file uid:gid owner +;username=user ; default is no username (open server) +;password=123 ; default is no password (open server) + +; Security Warning: +; The inet HTTP server is not enabled by default. The inet HTTP server is +; enabled by uncommenting the [inet_http_server] section below. The inet +; HTTP server is intended for use within a trusted environment only. It +; should only be bound to localhost or only accessible from within an +; isolated, trusted network. The inet HTTP server does not support any +; form of encryption. The inet HTTP server does not use authentication +; by default (see the username= and password= options to add authentication). +; Never expose the inet HTTP server to the public internet. + +;[inet_http_server] ; inet (TCP) server disabled by default +;port=127.0.0.1:9001 ; ip_address:port specifier, *:port for all iface +;username=user ; default is no username (open server) +;password=123 ; default is no password (open server) + +[supervisord] +logfile=/tmp/supervisord.log ; main log file; default $CWD/supervisord.log +logfile_maxbytes=50MB ; max main logfile bytes b4 rotation; default 50MB +logfile_backups=10 ; # of main logfile backups; 0 means none, default 10 +loglevel=info ; log level; default info; others: debug,warn,trace +pidfile=/tmp/supervisord.pid ; supervisord pidfile; default supervisord.pid +nodaemon=true ; start in foreground if true; default false +silent=false ; no logs to stdout if true; default false +minfds=1024 ; min. avail startup file descriptors; default 1024 +minprocs=200 ; min. avail process descriptors;default 200 +user=root ; setuid to this UNIX account at startup; recommended if root +;umask=022 ; process file creation umask; default 022 +;identifier=supervisor ; supervisord identifier, default is 'supervisor' +;directory=/tmp ; default is not to cd during start +;nocleanup=true ; don't clean up tempfiles at start; default false +;childlogdir=/tmp ; 'AUTO' child log dir, default $TEMP +;environment=KEY="value" ; key value pairs to add to environment +;strip_ansi=false ; strip ansi escape codes in logs; def. false + +; The rpcinterface:supervisor section must remain in the config file for +; RPC (supervisorctl/web interface) to work. Additional interfaces may be +; added by defining them in separate [rpcinterface:x] sections. + +[rpcinterface:supervisor] +supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface + +; The supervisorctl section configures how supervisorctl will connect to +; supervisord. configure it match the settings in either the unix_http_server +; or inet_http_server section. + +[supervisorctl] +serverurl=unix:///tmp/supervisor.sock ; use a unix:// URL for a unix socket +;serverurl=http://127.0.0.1:9001 ; use an http:// url to specify an inet socket +;username=chris ; should be same as in [*_http_server] if set +;password=123 ; should be same as in [*_http_server] if set +;prompt=mysupervisor ; cmd line prompt (default "supervisor") +;history_file=~/.sc_history ; use readline history if available + +; The sample program section below shows all possible program subsection values. +; Create one or more 'real' program: sections to be able to control them under +; supervisor. + +;[program:theprogramname] +;command=/bin/cat ; the program (relative uses PATH, can take args) +;process_name=%(program_name)s ; process_name expr (default %(program_name)s) +;numprocs=1 ; number of processes copies to start (def 1) +;directory=/tmp ; directory to cwd to before exec (def no cwd) +;umask=022 ; umask for process (default None) +;priority=999 ; the relative start priority (default 999) +;autostart=true ; start at supervisord start (default: true) +;startsecs=1 ; # of secs prog must stay up to be running (def. 1) +;startretries=3 ; max # of serial start failures when starting (default 3) +;autorestart=unexpected ; when to restart if exited after running (def: unexpected) +;exitcodes=0 ; 'expected' exit codes used with autorestart (default 0) +;stopsignal=QUIT ; signal used to kill process (default TERM) +;stopwaitsecs=10 ; max num secs to wait b4 SIGKILL (default 10) +;stopasgroup=false ; send stop signal to the UNIX process group (default false) +;killasgroup=false ; SIGKILL the UNIX process group (def false) +;user=chrism ; setuid to this UNIX account to run the program +;redirect_stderr=true ; redirect proc stderr to stdout (default false) +;stdout_logfile=/a/path ; stdout log path, NONE for none; default AUTO +;stdout_logfile_maxbytes=1MB ; max # logfile bytes b4 rotation (default 50MB) +;stdout_logfile_backups=10 ; # of stdout logfile backups (0 means none, default 10) +;stdout_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0) +;stdout_events_enabled=false ; emit events on stdout writes (default false) +;stdout_syslog=false ; send stdout to syslog with process name (default false) +;stderr_logfile=/a/path ; stderr log path, NONE for none; default AUTO +;stderr_logfile_maxbytes=1MB ; max # logfile bytes b4 rotation (default 50MB) +;stderr_logfile_backups=10 ; # of stderr logfile backups (0 means none, default 10) +;stderr_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0) +;stderr_events_enabled=false ; emit events on stderr writes (default false) +;stderr_syslog=false ; send stderr to syslog with process name (default false) +;environment=A="1",B="2" ; process environment additions (def no adds) +;serverurl=AUTO ; override serverurl computation (childutils) + +; The sample eventlistener section below shows all possible eventlistener +; subsection values. Create one or more 'real' eventlistener: sections to be +; able to handle event notifications sent by supervisord. + +;[eventlistener:theeventlistenername] +;command=/bin/eventlistener ; the program (relative uses PATH, can take args) +;process_name=%(program_name)s ; process_name expr (default %(program_name)s) +;numprocs=1 ; number of processes copies to start (def 1) +;events=EVENT ; event notif. types to subscribe to (req'd) +;buffer_size=10 ; event buffer queue size (default 10) +;directory=/tmp ; directory to cwd to before exec (def no cwd) +;umask=022 ; umask for process (default None) +;priority=-1 ; the relative start priority (default -1) +;autostart=true ; start at supervisord start (default: true) +;startsecs=1 ; # of secs prog must stay up to be running (def. 1) +;startretries=3 ; max # of serial start failures when starting (default 3) +;autorestart=unexpected ; autorestart if exited after running (def: unexpected) +;exitcodes=0 ; 'expected' exit codes used with autorestart (default 0) +;stopsignal=QUIT ; signal used to kill process (default TERM) +;stopwaitsecs=10 ; max num secs to wait b4 SIGKILL (default 10) +;stopasgroup=false ; send stop signal to the UNIX process group (default false) +;killasgroup=false ; SIGKILL the UNIX process group (def false) +;user=chrism ; setuid to this UNIX account to run the program +;redirect_stderr=false ; redirect_stderr=true is not allowed for eventlisteners +;stdout_logfile=/a/path ; stdout log path, NONE for none; default AUTO +;stdout_logfile_maxbytes=1MB ; max # logfile bytes b4 rotation (default 50MB) +;stdout_logfile_backups=10 ; # of stdout logfile backups (0 means none, default 10) +;stdout_events_enabled=false ; emit events on stdout writes (default false) +;stdout_syslog=false ; send stdout to syslog with process name (default false) +;stderr_logfile=/a/path ; stderr log path, NONE for none; default AUTO +;stderr_logfile_maxbytes=1MB ; max # logfile bytes b4 rotation (default 50MB) +;stderr_logfile_backups=10 ; # of stderr logfile backups (0 means none, default 10) +;stderr_events_enabled=false ; emit events on stderr writes (default false) +;stderr_syslog=false ; send stderr to syslog with process name (default false) +;environment=A="1",B="2" ; process environment additions +;serverurl=AUTO ; override serverurl computation (childutils) + +; The sample group section below shows all possible group values. Create one +; or more 'real' group: sections to create "heterogeneous" process groups. + +;[group:thegroupname] +;programs=progname1,progname2 ; each refers to 'x' in [program:x] definitions +;priority=999 ; the relative start priority (default 999) + +; The [include] section can just contain the "files" setting. This +; setting can list multiple files (separated by whitespace or +; newlines). It can also contain wildcards. The filenames are +; interpreted as relative to this file. Included files *cannot* +; include files themselves. + +[include] +files = base-services/*.conf diff --git a/docker/all-in-one/etc/tmpfiles.d/pgbouncer.conf b/docker/all-in-one/etc/tmpfiles.d/pgbouncer.conf new file mode 100644 index 0000000..d5d2cd4 --- /dev/null +++ b/docker/all-in-one/etc/tmpfiles.d/pgbouncer.conf @@ -0,0 +1,2 @@ +# Directory for PostgreSQL sockets, lockfiles and stats tempfiles +d /run/pgbouncer 2775 pgbouncer postgres - - \ No newline at end of file diff --git a/docker/all-in-one/etc/vector/vector.yaml b/docker/all-in-one/etc/vector/vector.yaml new file mode 100644 index 0000000..8bcf867 --- /dev/null +++ b/docker/all-in-one/etc/vector/vector.yaml @@ -0,0 +1,306 @@ +data_dir: /var/lib/vector +sources: + gotrue_log: + type: file + include: + - /var/log/services/gotrue.log + + postgrest_log: + type: file + include: + - /var/log/services/postgrest.log + + pgbouncer_log: + type: file + include: + - /var/log/services/pgbouncer.log + + pitr_log: + type: file + include: + - /var/log/wal-g/pitr.log + read_from: end + + postgres_log: + type: file + include: + - /var/log/postgresql/postgres*.csv + read_from: end + multiline: + start_pattern: '^20[0-9][0-9]-[0-1][0-9]-[0-3][0-9] [0-2][0-9]:[0-5][0-9]:[0-5][0-9].[0-9]{3} UTC,"' + mode: halt_before + condition_pattern: '^20[0-9][0-9]-[0-1][0-9]-[0-3][0-9] [0-2][0-9]:[0-5][0-9]:[0-5][0-9].[0-9]{3} UTC,"' + timeout_ms: 500 + +transforms: + csv_parse: + type: remap + inputs: + - postgres_log + source: |- + csv_data = parse_csv!(.message) + .metadata.parsed.timestamp = csv_data[0] + .metadata.parsed.user_name = csv_data[1] + .metadata.parsed.database_name = csv_data[2] + .metadata.parsed.process_id = to_int(csv_data[3]) ?? null + .metadata.parsed.connection_from = csv_data[4] + .metadata.parsed.session_id = csv_data[5] + .metadata.parsed.session_line_num = to_int(csv_data[6]) ?? null + .metadata.parsed.command_tag = csv_data[7] + .metadata.parsed.session_start_time = csv_data[8] + .metadata.parsed.virtual_transaction_id = csv_data[9] + .metadata.parsed.transaction_id = to_int(csv_data[10]) ?? null + .metadata.parsed.error_severity = csv_data[11] + .metadata.parsed.sql_state_code = csv_data[12] + .metadata.parsed.message = csv_data[13] + .metadata.parsed.detail = csv_data[14] + .metadata.parsed.hint = csv_data[15] + .metadata.parsed.internal_query = csv_data[16] + .metadata.parsed.internal_query_pos = to_int(csv_data[17]) ?? null + .metadata.parsed.context = csv_data[18] + .metadata.parsed.query = csv_data[19] + .metadata.parsed.query_pos = to_int(csv_data[20]) ?? null + .metadata.parsed.location = csv_data[21] + .metadata.parsed.application_name = csv_data[22] + .metadata.parsed.backend_type = csv_data[23] + .metadata.parsed.leader_pid = to_int(csv_data[24]) ?? null + .metadata.parsed.query_id = to_int(csv_data[25]) ?? null + + z_ts = replace!(.metadata.parsed.timestamp, " UTC", "Z") + iso8601_ts = replace(z_ts, " ", "T") + + .timestamp = iso8601_ts + + # Sends original csv log line duplicating data. Used for QA. + # .metadata.parsed_from = .message + + .message = del(.metadata.parsed.message) + .metadata.host = del(.host) + del(.file) + del(.source_type) + + drop_metrics: + type: filter + inputs: + - csv_parse + condition: > + .metadata.parsed.application_name != "postgres_exporter" && .metadata.parsed.application_name != "realtime_rls" && !contains!(.message, "disconnection: session time") + + add_project_ref: + type: add_fields + inputs: + - drop_metrics + fields: + project: {{ .ProjectRef }} + + auth_failures: + type: filter + inputs: + - postgres_log + condition: >- + contains!(.message, "password authentication failed for user") + + filter_pgbouncer_stats: + type: filter + inputs: + - pgbouncer_log + condition: >- + !starts_with!(.message, "stats:") && !starts_with!(.message, "kernel file descriptor limit") && !contains!(.message, "FIXME") + + filter_postgrest_stats: + type: filter + inputs: + - postgrest_log + condition: >- + !starts_with!(.message, "+") && !starts_with!(.message, "INFO:") && !contains!(.message, "Admin server listening") + + gotrue_to_object: + inputs: + - gotrue_log + type: remap + source: |2- + .project = "{{ .ProjectRef }}" + + .parsed, err = parse_json(.message) + if err == null { + .metadata = .parsed + .metadata.msg = .parsed.msg + .timestamp = del(.metadata.time) + } + del(.parsed) + .metadata.host = del(.host) + + del(.source_type) + del(.PRIORITY) + del(.SYSLOG_FACILITY) + del(.SYSLOG_IDENTIFIER) + del(._BOOT_ID) + del(._CAP_EFFECTIVE) + del(._CMDLINE) + del(._COMM) + del(._EXE) + del(._GID) + del(._MACHINE_ID) + del(._PID) + del(._SELINUX_CONTEXT) + del(._STREAM_ID) + del(._SYSTEMD_CGROUP) + del(._SYSTEMD_INVOCATION_ID) + del(._SYSTEMD_SLICE) + del(._SYSTEMD_UNIT) + del(._TRANSPORT) + del(._UID) + del(.__MONOTONIC_TIMESTAMP) + del(.__REALTIME_TIMESTAMP) + + postgrest_to_object: + inputs: + - filter_postgrest_stats + type: remap + source: |2- + .project = "{{ .ProjectRef }}" + + # removes timestamp embedded in log since Vector already sends it + .message = replace!(.message, r'^\d+/\w+/\d+:\d+:\d+:\d+\s\+\d+:\s', "") + .metadata.host = del(.host) + del(.source_type) + del(.PRIORITY) + del(.SYSLOG_FACILITY) + del(.SYSLOG_IDENTIFIER) + del(._BOOT_ID) + del(._CAP_EFFECTIVE) + del(._CMDLINE) + del(._COMM) + del(._EXE) + del(._GID) + del(._MACHINE_ID) + del(._PID) + del(._SELINUX_CONTEXT) + del(._STREAM_ID) + del(._SYSTEMD_CGROUP) + del(._SYSTEMD_INVOCATION_ID) + del(._SYSTEMD_SLICE) + del(._SYSTEMD_UNIT) + del(._TRANSPORT) + del(._UID) + del(.__MONOTONIC_TIMESTAMP) + del(.__REALTIME_TIMESTAMP) + + pgbouncer_to_object: + inputs: + - filter_pgbouncer_stats + type: remap + source: |2- + .project = "{{ .ProjectRef }}" + .metadata.host = del(.host) + del(.source_type) + del(.PRIORITY) + del(.SYSLOG_IDENTIFIER) + del(._BOOT_ID) + del(._CAP_EFFECTIVE) + del(._CMDLINE) + del(._COMM) + del(._EXE) + del(._GID) + del(._MACHINE_ID) + del(._PID) + del(._SELINUX_CONTEXT) + del(._SOURCE_REALTIME_TIMESTAMP) + del(._SYSTEMD_CGROUP) + del(._SYSTEMD_INVOCATION_ID) + del(._SYSTEMD_SLICE) + del(._SYSTEMD_UNIT) + del(._TRANSPORT) + del(._UID) + del(.__MONOTONIC_TIMESTAMP) + del(.__REALTIME_TIMESTAMP) + + pitr_to_object: + inputs: + - pitr_log + type: remap + source: |2- + .project = "{{ .ProjectRef }}" + + .parsed, err = parse_key_value(.message) + if err == null { + .metadata = .parsed + .metadata.host = del(.host) + .message = del(.metadata.msg) + .timestamp = del(.metadata.time) + } + + del(.parsed) + del(.source_type) + del(.file) + + filter_pitr_error: + inputs: + - pitr_to_object + type: filter + condition: > + .metadata.level != "info" + +sinks: + http_gotrue: + type: "http" + inputs: + - gotrue_to_object + encoding: + codec: "json" + method: "post" + compression: none + request: + retry_max_duration_secs: 10 + uri: "https://{{ .LogflareHost }}/logs?api_key={{ .ApiKey }}&source={{ .GotrueSource }}" + + http_postgrest: + type: http + inputs: + - postgrest_to_object + encoding: + codec: "json" + method: "post" + compression: none + request: + retry_max_duration_secs: 10 + uri: "https://{{ .LogflareHost }}/logs?api_key={{ .ApiKey }}&source={{ .PostgrestSource }}" + + http_pgbouncer: + type: http + inputs: + - pgbouncer_to_object + encoding: + codec: json + compression: none + uri: "https://{{ .LogflareHost }}/logs?api_key={{ .ApiKey }}&source={{ .PgbouncerSource }}" + + http_pitr_error: + type: http + inputs: + - filter_pitr_error + encoding: + codec: json + compression: none + uri: "https://{{ .LogflareHost }}/logs?api_key={{ .ApiKey }}&source={{ .PitrErrorsSource }}" + + http_postgres: + type: http + inputs: + - add_project_ref + encoding: + codec: "json" + method: "post" + compression: none + request: + retry_max_duration_secs: 10 + uri: "https://{{ .LogflareHost }}/logs?api_key={{ .ApiKey }}&source={{ .DbSource }}" + + file_postgres: + type: file + inputs: + - auth_failures + encoding: + codec: text + path: >- + /var/log/postgresql/auth-failures.csv diff --git a/docker/all-in-one/healthcheck.sh b/docker/all-in-one/healthcheck.sh new file mode 100755 index 0000000..030c6d5 --- /dev/null +++ b/docker/all-in-one/healthcheck.sh @@ -0,0 +1,46 @@ +#!/bin/bash +set -eou pipefail + +# database up +pg_isready -U postgres -h localhost -p 5432 + +if [ -f "/tmp/init.json" ]; then + ADMIN_API_KEY=${ADMIN_API_KEY:-$(jq -r '.["tealbase_admin_key"]' /tmp/init.json)} +fi + +# adminapi up +if [ -d "$ADMIN_API_CERT_DIR" ]; then + curl -sSkf "https://localhost:$ADMIN_API_PORT/health" -H "apikey: $ADMIN_API_KEY" +else + curl -sSf "http://localhost:$ADMIN_API_PORT/health" -H "apikey: $ADMIN_API_KEY" +fi + +if [ "${POSTGRES_ONLY:-}" ]; then + exit 0 +fi + +# postgrest up +curl -sSfI "http://localhost:$PGRST_ADMIN_SERVER_PORT/ready" + +# gotrue up +curl -sSf "http://localhost:$GOTRUE_API_PORT/health" + +if [ "${ENVOY_ENABLED:-}" == "true" ]; then + # envoy up + curl -sSfI "http://localhost:$ENVOY_HTTP_PORT/health" +else + # kong up + kong health +fi + +# fail2ban up +fail2ban-client status + +# prometheus exporter up +curl -sSfI "http://localhost:$PGEXPORTER_PORT/metrics" + +# vector is up (if starting logflare) +# TODO: make this non-conditional once we set up local logflare for testinfra +if [ -n "${LOGFLARE_API_KEY:-}" ]; then + curl -sSfI "http://localhost:$VECTOR_API_PORT/health" +fi diff --git a/docker/all-in-one/init/configure-admin-mgr.sh b/docker/all-in-one/init/configure-admin-mgr.sh new file mode 100755 index 0000000..98ebf6c --- /dev/null +++ b/docker/all-in-one/init/configure-admin-mgr.sh @@ -0,0 +1,8 @@ +#!/bin/bash +set -eou pipefail + +touch "/var/log/wal-g/pitr.log" +chown postgres:postgres "/var/log/wal-g/pitr.log" +chmod 0666 "/var/log/wal-g/pitr.log" + +/usr/local/bin/configure-shim.sh /dist/admin-mgr /usr/bin/admin-mgr diff --git a/docker/all-in-one/init/configure-adminapi.sh b/docker/all-in-one/init/configure-adminapi.sh new file mode 100755 index 0000000..3c82898 --- /dev/null +++ b/docker/all-in-one/init/configure-adminapi.sh @@ -0,0 +1,56 @@ +#!/bin/bash +set -eou pipefail + +ADMIN_API_CONF=/etc/adminapi/adminapi.yaml +touch /var/log/services/adminapi.log + +ADMINAPI_CUSTOM_DIR="${DATA_VOLUME_MOUNTPOINT}/etc/adminapi" + +/usr/local/bin/configure-shim.sh /dist/tealbase-admin-api /opt/tealbase-admin-api + +if [ -f "${INIT_PAYLOAD_PATH:-}" ]; then + echo "init adminapi payload" + tar -xzvf "$INIT_PAYLOAD_PATH" -C / ./etc/adminapi/adminapi.yaml + chown adminapi:adminapi ./etc/adminapi/adminapi.yaml + + mkdir -p $ADMIN_API_CERT_DIR + tar -xzvf "$INIT_PAYLOAD_PATH" -C $ADMIN_API_CERT_DIR --strip-components 2 ./ssl/server.crt + tar -xzvf "$INIT_PAYLOAD_PATH" -C $ADMIN_API_CERT_DIR --strip-components 2 ./ssl/server.key + chown -R adminapi:root $ADMIN_API_CERT_DIR + chmod 700 -R $ADMIN_API_CERT_DIR +else + PROJECT_REF=${PROJECT_REF:-default} + PGBOUNCER_PASSWORD=${PGBOUNCER_PASSWORD:-$POSTGRES_PASSWORD} + tealbase_URL=${tealbase_URL:-https://api.tealbase.io/system} + REPORTING_TOKEN=${REPORTING_TOKEN:-token} + + sed -i "s|{{ .JwtSecret }}|$JWT_SECRET|g" $ADMIN_API_CONF + sed -i "s|{{ .PgbouncerPassword }}|$PGBOUNCER_PASSWORD|g" $ADMIN_API_CONF + sed -i "s|{{ .ProjectRef }}|$PROJECT_REF|g" $ADMIN_API_CONF + sed -i "s|{{ .tealbaseUrl }}|$tealbase_URL|g" $ADMIN_API_CONF + sed -i "s|{{ .ReportingToken }}|$REPORTING_TOKEN|g" $ADMIN_API_CONF +fi + +# Allow adminapi to write to /etc and manage Postgres configs +chmod g+w /etc +chmod -R 0775 /etc/postgresql +chmod -R 0775 /etc/postgresql-custom +chmod -R 0775 /etc/pgbouncer-custom + +# Update api port +sed -i "s|^port: .*$|port: ${ADMIN_API_PORT:-8085}|g" $ADMIN_API_CONF + +if [ "${DATA_VOLUME_MOUNTPOINT}" ]; then + mkdir -p "${ADMINAPI_CUSTOM_DIR}" + if [ ! -f "${CONFIGURED_FLAG_PATH}" ]; then + echo "Copying existing custom adminapi config from /etc/adminapi to ${ADMINAPI_CUSTOM_DIR}" + cp -R "/etc/adminapi/." "${ADMINAPI_CUSTOM_DIR}/" + fi + + rm -rf "/etc/adminapi" + ln -s "${ADMINAPI_CUSTOM_DIR}" "/etc/adminapi" + chown -R adminapi:adminapi "/etc/adminapi" + + chown -R adminapi:adminapi "${ADMINAPI_CUSTOM_DIR}" + chmod g+wrx "${ADMINAPI_CUSTOM_DIR}" +fi diff --git a/docker/all-in-one/init/configure-autoshutdown.sh b/docker/all-in-one/init/configure-autoshutdown.sh new file mode 100755 index 0000000..66343e5 --- /dev/null +++ b/docker/all-in-one/init/configure-autoshutdown.sh @@ -0,0 +1,21 @@ +#!/bin/bash +set -eou pipefail + +mkdir -p /etc/supa-shutdown + +AUTOSHUTDOWN_CUSTOM_DIR="${DATA_VOLUME_MOUNTPOINT}/etc/supa-shutdown" +if [ "${DATA_VOLUME_MOUNTPOINT}" ]; then + mkdir -p "${AUTOSHUTDOWN_CUSTOM_DIR}" + + AUTOSHUTDOWN_CUSTOM_CONFIG_FILE_PATH="${AUTOSHUTDOWN_CUSTOM_DIR}/shutdown.conf" + if [ ! -f "${AUTOSHUTDOWN_CUSTOM_CONFIG_FILE_PATH}" ]; then + echo "Copying existing custom shutdown config from /etc/supa-shutdown to ${AUTOSHUTDOWN_CUSTOM_CONFIG_FILE_PATH}" + cp "/etc/supa-shutdown/shutdown.conf" "${AUTOSHUTDOWN_CUSTOM_CONFIG_FILE_PATH}" + fi + + rm -f "/etc/supa-shutdown/shutdown.conf" + ln -s "${AUTOSHUTDOWN_CUSTOM_CONFIG_FILE_PATH}" "/etc/supa-shutdown/shutdown.conf" + chmod g+wrx "${AUTOSHUTDOWN_CUSTOM_DIR}" + chown -R adminapi:adminapi "/etc/supa-shutdown/shutdown.conf" + chown -R adminapi:adminapi "${AUTOSHUTDOWN_CUSTOM_CONFIG_FILE_PATH}" +fi diff --git a/docker/all-in-one/init/configure-envoy.sh b/docker/all-in-one/init/configure-envoy.sh new file mode 100755 index 0000000..06dbafc --- /dev/null +++ b/docker/all-in-one/init/configure-envoy.sh @@ -0,0 +1,53 @@ +#!/bin/bash +set -eou pipefail + +if [[ "${ENVOY_ENABLED:-}" != "true" ]]; then + exit +fi + +ENVOY_CDS_CONF=/etc/envoy/cds.yaml +ENVOY_LDS_CONF=/etc/envoy/lds.yaml +touch /var/log/services/envoy.log + +/usr/local/bin/configure-shim.sh /dist/envoy /opt/envoy + +if [[ -n "${DATA_VOLUME_MOUNTPOINT}" ]]; then + ENVOY_CUSTOM_DIR="${DATA_VOLUME_MOUNTPOINT}/etc/envoy" + mkdir -p "${ENVOY_CUSTOM_DIR}" + if [[ ! -f "${CONFIGURED_FLAG_PATH}" ]]; then + echo "Copying existing custom envoy config from /etc/envoy/ to ${ENVOY_CUSTOM_DIR}" + cp -R "/etc/envoy/." "${ENVOY_CUSTOM_DIR}/" + fi + + rm -rf "/etc/envoy" + ln -s "${ENVOY_CUSTOM_DIR}" "/etc/envoy" + chown -R envoy:envoy "/etc/envoy" + chmod -R g+w "/etc/envoy" + + chown -R envoy:envoy "${ENVOY_CUSTOM_DIR}" + chmod -R g+w "${ENVOY_CUSTOM_DIR}" + chmod g+rx "${ENVOY_CUSTOM_DIR}" +fi + +if [[ -f "${INIT_PAYLOAD_PATH:-}" ]]; then + echo "init envoy payload" + tar -xzvhf "${INIT_PAYLOAD_PATH}" -C / ./etc/envoy/ + chown -HR envoy:envoy /etc/envoy + chmod -HR g+w /etc/envoy +fi + +# Inject project specific configuration +# "c2VydmljZV9yb2xlOnNlcnZpY2Vfa2V5" is base64-encoded "service_role:service_key". +sed -i -e "s|anon_key|${ANON_KEY}|g" \ + -e "s|service_key|${SERVICE_ROLE_KEY}|g" \ + -e "s|tealbase_admin_key|${ADMIN_API_KEY}|g" \ + -e "s|c2VydmljZV9yb2xlOnNlcnZpY2Vfa2V5|$(echo -n "service_role:${SERVICE_ROLE_KEY}" | base64 --wrap 0)|g" \ + "${ENVOY_LDS_CONF}" + +# Update Envoy ports +sed -i "s|port_value: 80$|port_value: ${ENVOY_HTTP_PORT}|g" "${ENVOY_LDS_CONF}" +sed -i "s|port_value: 443$|port_value: ${ENVOY_HTTPS_PORT}|g" "${ENVOY_LDS_CONF}" +sed -i "s|port_value: 3000$|port_value: ${PGRST_SERVER_PORT}|g" "${ENVOY_CDS_CONF}" +sed -i "s|port_value: 3001$|port_value: ${PGRST_ADMIN_SERVER_PORT}|g" "${ENVOY_CDS_CONF}" +sed -i "s|port_value: 8085$|port_value: ${ADMIN_API_PORT}|g" "${ENVOY_CDS_CONF}" +sed -i "s|port_value: 9999$|port_value: ${GOTRUE_API_PORT}|g" "${ENVOY_CDS_CONF}" diff --git a/docker/all-in-one/init/configure-exporter.sh b/docker/all-in-one/init/configure-exporter.sh new file mode 100755 index 0000000..93498c4 --- /dev/null +++ b/docker/all-in-one/init/configure-exporter.sh @@ -0,0 +1,5 @@ +#!/bin/bash +set -eou pipefail + +touch /var/log/services/exporter.log + diff --git a/docker/all-in-one/init/configure-fail2ban.sh b/docker/all-in-one/init/configure-fail2ban.sh new file mode 100755 index 0000000..39b0a27 --- /dev/null +++ b/docker/all-in-one/init/configure-fail2ban.sh @@ -0,0 +1,6 @@ +#!/bin/bash +set -eou pipefail + +mkdir -p /var/run/fail2ban +touch /var/log/services/fail2ban.log +touch /var/log/postgresql/auth-failures.csv diff --git a/docker/all-in-one/init/configure-gotrue.sh b/docker/all-in-one/init/configure-gotrue.sh new file mode 100755 index 0000000..5fe4ad2 --- /dev/null +++ b/docker/all-in-one/init/configure-gotrue.sh @@ -0,0 +1,40 @@ +#!/bin/bash +set -eou pipefail + +touch /var/log/services/gotrue.log + +GOTRUE_CUSTOM_DIR="${DATA_VOLUME_MOUNTPOINT}/etc/gotrue" +GOTRUE_CUSTOM_CONFIG_FILE_PATH="${DATA_VOLUME_MOUNTPOINT}/etc/gotrue/gotrue.env" + +/usr/local/bin/configure-shim.sh /dist/gotrue /opt/gotrue/gotrue + +if [ "${DATA_VOLUME_MOUNTPOINT}" ]; then + mkdir -p "${GOTRUE_CUSTOM_DIR}" + chmod g+wrx "${GOTRUE_CUSTOM_DIR}" + chown adminapi:adminapi "${GOTRUE_CUSTOM_DIR}" + + if [ ! -f "${CONFIGURED_FLAG_PATH}" ]; then + echo "Copying existing GoTrue config from /etc/gotrue.env to ${GOTRUE_CUSTOM_CONFIG_FILE_PATH}" + cp "/etc/gotrue.env" "${GOTRUE_CUSTOM_CONFIG_FILE_PATH}" + fi + + rm -f "/etc/gotrue.env" + ln -s "${GOTRUE_CUSTOM_CONFIG_FILE_PATH}" "/etc/gotrue.env" + chown -R adminapi:adminapi "/etc/gotrue.env" + + chown -R adminapi:adminapi "${GOTRUE_CUSTOM_CONFIG_FILE_PATH}" + chmod g+rx "${GOTRUE_CUSTOM_CONFIG_FILE_PATH}" +fi + +if [ -f "${INIT_PAYLOAD_PATH:-}" ]; then + if [ ! -f "${CONFIGURED_FLAG_PATH}" ]; then + echo "init gotrue payload" + tar -h --overwrite -xzvf "$INIT_PAYLOAD_PATH" -C / ./etc/gotrue.env + chown -R adminapi:adminapi /etc/gotrue.env + fi +else + sed -i "s|api_external_url|${API_EXTERNAL_URL:-http://localhost}|g" /etc/gotrue.env + sed -i "s|gotrue_api_host|${GOTRUE_API_HOST:-0.0.0.0}|g" /etc/gotrue.env + sed -i "s|gotrue_site_url|$GOTRUE_SITE_URL|g" /etc/gotrue.env + sed -i "s|gotrue_jwt_secret|$JWT_SECRET|g" /etc/gotrue.env +fi diff --git a/docker/all-in-one/init/configure-kong.sh b/docker/all-in-one/init/configure-kong.sh new file mode 100755 index 0000000..9ca16be --- /dev/null +++ b/docker/all-in-one/init/configure-kong.sh @@ -0,0 +1,48 @@ +#!/bin/bash +set -eou pipefail + +KONG_CONF=/etc/kong/kong.yml +KONG_CUSTOM_DIR="${DATA_VOLUME_MOUNTPOINT}/etc/kong" + +touch /var/log/services/kong.log + +if [ -f "${INIT_PAYLOAD_PATH:-}" ]; then + echo "init kong payload" + # Setup ssl termination + tar -xzvf "$INIT_PAYLOAD_PATH" -C / ./etc/kong/ + chown -R adminapi:adminapi ./etc/kong/kong.yml + chown -R adminapi:adminapi ./etc/kong/*pem + echo "ssl_cipher_suite = intermediate" >> /etc/kong/kong.conf + echo "ssl_cert = /etc/kong/fullChain.pem" >> /etc/kong/kong.conf + echo "ssl_cert_key = /etc/kong/privKey.pem" >> /etc/kong/kong.conf +else + # Default gateway config + export KONG_DNS_ORDER=LAST,A,CNAME + export KONG_PROXY_ERROR_LOG=syslog:server=unix:/dev/log + export KONG_ADMIN_ERROR_LOG=syslog:server=unix:/dev/log +fi + +# Inject project specific configuration +sed -i -e "s|anon_key|$ANON_KEY|g" \ + -e "s|service_key|$SERVICE_ROLE_KEY|g" \ + -e "s|tealbase_admin_key|$ADMIN_API_KEY|g" \ + $KONG_CONF + +# Update kong ports +sed -i "s|:80 |:$KONG_HTTP_PORT |g" /etc/kong/kong.conf +sed -i "s|:443 |:$KONG_HTTPS_PORT |g" /etc/kong/kong.conf + +if [ "${DATA_VOLUME_MOUNTPOINT}" ]; then + mkdir -p "${KONG_CUSTOM_DIR}" + if [ ! -f "${CONFIGURED_FLAG_PATH}" ]; then + echo "Copying existing custom kong config from /etc/kong/kong.yml to ${KONG_CUSTOM_DIR}" + cp /etc/kong/kong.yml "${KONG_CUSTOM_DIR}/kong.yml" + fi + + rm -rf "/etc/kong/kong.yml" + ln -s "${KONG_CUSTOM_DIR}/kong.yml" "/etc/kong/kong.yml" + chown -R adminapi:adminapi "/etc/kong/kong.yml" + + chown -R adminapi:adminapi "${KONG_CUSTOM_DIR}" + chmod g+wrx "${KONG_CUSTOM_DIR}" +fi \ No newline at end of file diff --git a/docker/all-in-one/init/configure-pg_egress_collect.sh b/docker/all-in-one/init/configure-pg_egress_collect.sh new file mode 100755 index 0000000..17051ab --- /dev/null +++ b/docker/all-in-one/init/configure-pg_egress_collect.sh @@ -0,0 +1,14 @@ +#!/bin/bash +set -eou pipefail + +PG_EGRESS_COLLECT_FILE=/tmp/pg_egress_collect.txt + +if [ "${DATA_VOLUME_MOUNTPOINT:-}" != "" ]; then + if [ ! -L $PG_EGRESS_COLLECT_FILE ]; then + if [ -f $PG_EGRESS_COLLECT_FILE ]; then + rm -f $PG_EGRESS_COLLECT_FILE + fi + touch "${DATA_VOLUME_MOUNTPOINT}/pg_egress_collect.txt" + ln -s "${DATA_VOLUME_MOUNTPOINT}/pg_egress_collect.txt" $PG_EGRESS_COLLECT_FILE + fi +fi diff --git a/docker/all-in-one/init/configure-pgbouncer.sh b/docker/all-in-one/init/configure-pgbouncer.sh new file mode 100755 index 0000000..2d66203 --- /dev/null +++ b/docker/all-in-one/init/configure-pgbouncer.sh @@ -0,0 +1,46 @@ +#!/bin/bash +set -eou pipefail + +touch /var/log/services/pgbouncer.log + +mkdir -p /var/run/pgbouncer +chown pgbouncer:postgres /var/run/pgbouncer + +PGBOUNCER_CONF=/etc/pgbouncer/pgbouncer.ini + +if [ -f "${INIT_PAYLOAD_PATH:-}" ]; then + echo "init pgbouncer payload" + sed -i -E "s|^# (%include /etc/pgbouncer-custom/ssl-config.ini)$|\1|g" $PGBOUNCER_CONF + + tar -xzvf "$INIT_PAYLOAD_PATH" -C /etc/pgbouncer/ --strip-components 3 ./etc/pgbouncer/userlist.txt + chown -R pgbouncer:pgbouncer /etc/pgbouncer/userlist.txt +fi + +if [ "${DATA_VOLUME_MOUNTPOINT}" ]; then + /opt/tealbase-admin-api optimize pgbouncer --destination-config-file-path /etc/pgbouncer-custom/generated-optimizations.ini + + # Preserve pgbouncer configs across restarts + PGBOUNCER_DIR="${DATA_VOLUME_MOUNTPOINT}/etc/pgbouncer" + PGBOUNCER_CUSTOM_DIR="${DATA_VOLUME_MOUNTPOINT}/etc/pgbouncer-custom" + + mkdir -p "${PGBOUNCER_DIR}" + mkdir -p "${PGBOUNCER_CUSTOM_DIR}" + + if [ ! -f "${CONFIGURED_FLAG_PATH}" ]; then + echo "Copying existing custom pgbouncer config from /etc/pgbouncer-custom to ${PGBOUNCER_CUSTOM_DIR}" + cp -R "/etc/pgbouncer-custom/." "${PGBOUNCER_CUSTOM_DIR}/" + cp -R "/etc/pgbouncer/." "${PGBOUNCER_DIR}/" + fi + + rm -rf "/etc/pgbouncer-custom" + ln -s "${PGBOUNCER_CUSTOM_DIR}" "/etc/pgbouncer-custom" + chown -R pgbouncer:pgbouncer "/etc/pgbouncer-custom" + chown -R pgbouncer:pgbouncer "${PGBOUNCER_CUSTOM_DIR}" + chmod -R g+wrx "${PGBOUNCER_CUSTOM_DIR}" + + rm -rf "/etc/pgbouncer" + ln -s "${PGBOUNCER_DIR}" "/etc/pgbouncer" + chown -R pgbouncer:pgbouncer "/etc/pgbouncer" + chown -R pgbouncer:pgbouncer "${PGBOUNCER_DIR}" + chmod -R g+wrx "${PGBOUNCER_DIR}" +fi diff --git a/docker/all-in-one/init/configure-postgrest.sh b/docker/all-in-one/init/configure-postgrest.sh new file mode 100755 index 0000000..178e64f --- /dev/null +++ b/docker/all-in-one/init/configure-postgrest.sh @@ -0,0 +1,41 @@ +#!/bin/bash +set -eou pipefail + +touch /var/log/services/postgrest.log + +# Default in-database config +sed -i "s|pgrst_server_port|${PGRST_SERVER_PORT:-3000}|g" /etc/postgrest/base.conf +sed -i "s|pgrst_admin_server_port|${PGRST_ADMIN_SERVER_PORT:-3001}|g" /etc/postgrest/base.conf +sed -i "s|pgrst_db_schemas|${PGRST_DB_SCHEMAS:-public,storage,graphql_public}|g" /etc/postgrest/base.conf +sed -i "s|pgrst_db_extra_search_path|${PGRST_DB_SCHEMAS:-public,extensions}|g" /etc/postgrest/base.conf +sed -i "s|pgrst_db_anon_role|${PGRST_DB_ANON_ROLE:-anon}|g" /etc/postgrest/base.conf +sed -i "s|pgrst_jwt_secret|$JWT_SECRET|g" /etc/postgrest/base.conf + +/usr/local/bin/configure-shim.sh /dist/postgrest /opt/postgrest + +if [ -f "${INIT_PAYLOAD_PATH:-}" ]; then + echo "init postgrest payload" + tar -xzvf "$INIT_PAYLOAD_PATH" -C / ./etc/postgrest/base.conf + chown -R postgrest:postgrest /etc/postgrest +fi + +if [ "${DATA_VOLUME_MOUNTPOINT}" ]; then + POSTGREST_CUSTOM_DIR="${DATA_VOLUME_MOUNTPOINT}/etc/postgrest" + mkdir -p "${POSTGREST_CUSTOM_DIR}" + if [ ! -f "${CONFIGURED_FLAG_PATH}" ]; then + echo "Copying existing custom PostgREST config from /etc/postgrest/ to ${POSTGREST_CUSTOM_DIR}" + cp -R "/etc/postgrest/." "${POSTGREST_CUSTOM_DIR}/" + fi + + rm -rf "/etc/postgrest" + ln -s "${POSTGREST_CUSTOM_DIR}" "/etc/postgrest" + chown -R postgrest:postgrest "/etc/postgrest" + + chown -R postgrest:postgrest "${POSTGREST_CUSTOM_DIR}" + chmod g+wrx "${POSTGREST_CUSTOM_DIR}" +fi + +PGRST_CONF=/etc/postgrest/generated.conf + +/opt/tealbase-admin-api optimize postgrest --destination-config-file-path $PGRST_CONF +cat /etc/postgrest/base.conf >> $PGRST_CONF diff --git a/docker/all-in-one/init/configure-vector.sh b/docker/all-in-one/init/configure-vector.sh new file mode 100755 index 0000000..9177a0f --- /dev/null +++ b/docker/all-in-one/init/configure-vector.sh @@ -0,0 +1,56 @@ +#!/bin/bash +set -eou pipefail + +VECTOR_CONF=/etc/vector/vector.yaml +touch /var/log/services/vector.log + +if [ -f "${INIT_PAYLOAD_PATH:-}" ]; then + echo "init vector payload" + tar -xzvf "$INIT_PAYLOAD_PATH" -C /etc/vector/ --strip-components 2 ./tmp/init.json + PROJECT_REF=$(jq -r '.["project_ref"]' /etc/vector/init.json) + LOGFLARE_DB_SOURCE=$(jq -r '.["logflare_db_source"]' /etc/vector/init.json) + LOGFLARE_GOTRUE_SOURCE=$(jq -r '.["logflare_gotrue_source"]' /etc/vector/init.json) + LOGFLARE_POSTGREST_SOURCE=$(jq -r '.["logflare_postgrest_source"]' /etc/vector/init.json) + LOGFLARE_PGBOUNCER_SOURCE=$(jq -r '.["logflare_pgbouncer_source"]' /etc/vector/init.json) + LOGFLARE_PITR_ERRORS_SOURCE=$(jq -r '.["logflare_pitr_errors_source"]' /etc/vector/init.json) + LOGFLARE_API_KEY=$(jq -r '.["logflare_api_key"]' /etc/vector/init.json) +fi + +# Exit early if not starting logflare +if [ -z "${LOGFLARE_API_KEY:-}" ]; then + echo "Skipped starting vector: missing LOGFLARE_API_KEY" + exit 0 +fi + +# Add vector to support both base-services and services config +cat < /etc/supervisor/services/vector.conf + +[program:vector] +command=/usr/bin/vector --config-yaml /etc/vector/vector.yaml +user=root +autorestart=true +stdout_logfile=/var/log/services/vector.log +redirect_stderr=true +stdout_logfile_maxbytes=10MB +priority=250 + +EOF + +VECTOR_API_PORT=${VECTOR_API_PORT:-9001} +PROJECT_REF=${PROJECT_REF:-default} +LOGFLARE_HOST=${LOGFLARE_HOST:-api.logflare.app} +LOGFLARE_DB_SOURCE=${LOGFLARE_DB_SOURCE:-postgres.logs} +LOGFLARE_GOTRUE_SOURCE=${LOGFLARE_GOTRUE_SOURCE:-gotrue.logs.prod} +LOGFLARE_POSTGREST_SOURCE=${LOGFLARE_POSTGREST_SOURCE:-postgREST.logs.prod} +LOGFLARE_PGBOUNCER_SOURCE=${LOGFLARE_PGBOUNCER_SOURCE:-pgbouncer.logs.prod} +LOGFLARE_PITR_ERRORS_SOURCE=${LOGFLARE_PITR_ERRORS_SOURCE:-pitr_errors.logs.prod} + +sed -i "s|{{ .ApiPort }}|$VECTOR_API_PORT|g" $VECTOR_CONF +sed -i "s|{{ .ProjectRef }}|$PROJECT_REF|g" $VECTOR_CONF +sed -i "s|{{ .LogflareHost }}|$LOGFLARE_HOST|g" $VECTOR_CONF +sed -i "s|{{ .ApiKey }}|$LOGFLARE_API_KEY|g" $VECTOR_CONF +sed -i "s|{{ .DbSource }}|$LOGFLARE_DB_SOURCE|g" $VECTOR_CONF +sed -i "s|{{ .GotrueSource }}|$LOGFLARE_GOTRUE_SOURCE|g" $VECTOR_CONF +sed -i "s|{{ .PostgrestSource }}|$LOGFLARE_POSTGREST_SOURCE|g" $VECTOR_CONF +sed -i "s|{{ .PgbouncerSource }}|$LOGFLARE_PGBOUNCER_SOURCE|g" $VECTOR_CONF +sed -i "s|{{ .PitrErrorsSource }}|$LOGFLARE_PITR_ERRORS_SOURCE|g" $VECTOR_CONF diff --git a/docker/all-in-one/init/start-kong.sh b/docker/all-in-one/init/start-kong.sh new file mode 100755 index 0000000..7418d26 --- /dev/null +++ b/docker/all-in-one/init/start-kong.sh @@ -0,0 +1,7 @@ +#!/bin/bash +set -eou pipefail + +# In the event of a restart, properly stop any running kong instances first +# Confirmed by running /usr/local/bin/kong health +trap '/usr/local/bin/kong quit' EXIT +/usr/local/bin/kong start diff --git a/docker/all-in-one/opt/pg_egress_collect/pg_egress_collect.pl b/docker/all-in-one/opt/pg_egress_collect/pg_egress_collect.pl new file mode 100644 index 0000000..2acc98a --- /dev/null +++ b/docker/all-in-one/opt/pg_egress_collect/pg_egress_collect.pl @@ -0,0 +1,126 @@ +#!/usr/bin/env perl + +# This script receive tcpdump output through STDIN and does: +# +# 1. extract outgoing TCP packet length on the 1st non-loopback device port 5432 and 6543 +# 2. sum the length up to one minute +# 3. save the total length to file (default is /tmp/pg_egress_collect.txt) per minute +# +# Usage: +# +# tcpdump -s 128 -Q out -nn -tt -vv -p -l 'tcp and (port 5432 or port 6543)' | perl pg_egress_collect.pl -o /tmp/output.txt +# + +use POSIX; +use List::Util qw(sum); +use Getopt::Long 'HelpMessage'; +use IO::Async::Loop; +use IO::Async::Stream; +use IO::Async::Timer::Periodic; + +use strict; +use warnings; + +# total captured packets lenth in a time frame +my $captured_len = 0; + +# extract tcp packet length captured by tcpdump +# +# Sample input lines: +# +# 1674013833.940253 IP (tos 0x0, ttl 64, id 0, offset 0, flags [DF], proto TCP (6), length 60) +# 10.112.101.122.5432 > 220.235.16.223.62599: Flags [S.], cksum 0x5de3 (incorrect -> 0x63da), seq 2314200657, ack 2071735457, win 62643, options [mss 8961,sackOK,TS val 3358598837 ecr 1277499190,nop,wscale 7], length 0 +# 1674013833.989257 IP (tos 0x0, ttl 64, id 24975, offset 0, flags [DF], proto TCP (6), length 52) +# 10.112.101.122.5432 > 220.235.16.223.62599: Flags [.], cksum 0x5ddb (incorrect -> 0xa25b), seq 1, ack 9, win 490, options [nop,nop,TS val 3358598885 ecr 1277499232], length 0 +sub extract_packet_length { + my ($line) = @_; + + #print("debug: >> " . $line); + + if ($line =~ /^\s+\d+\.\d+\.\d+\.\d+\..*, length (\d+)$/) { + # extract tcp packet length and add it up + my $len = $1; + $captured_len += $len; + } +} + +# write total length to file +sub write_file { + my ($output) = @_; + + my $now = strftime "%F %T", localtime time; + print "[$now] write captured len $captured_len to $output\n"; + + open(my $fh, "+>", $output) or die "Could not open file '$output' $!"; + print $fh "$captured_len"; + close($fh) or die "Could not write file '$output' $!"; +} + +# main +sub main { + # get arguments + GetOptions( + "interval:i" => \(my $interval = 60), + "output:s" => \(my $output = "/tmp/pg_egress_collect.txt"), + "help" => sub { HelpMessage(0) }, + ) or HelpMessage(1); + + my $loop = IO::Async::Loop->new; + + # tcpdump extractor + my $extractor = IO::Async::Stream->new_for_stdin( + on_read => sub { + my ($self, $buffref, $eof) = @_; + + while($$buffref =~ s/^(.*\n)//) { + my $line = $1; + extract_packet_length($line); + } + + return 0; + }, + ); + + # schedule file writer per minute + my $writer = IO::Async::Timer::Periodic->new( + interval => $interval, + on_tick => sub { + write_file($output); + + # reset total captured length + $captured_len = 0; + }, + ); + $writer->start; + + print "pg_egress_collect started, egress data will be saved to $output at interval $interval seconds.\n"; + + $loop->add($extractor); + $loop->add($writer); + $loop->run; +} + +main(); + +__END__ + +=head1 NAME + +pg_egress_collect.pl - collect egress from tcpdump output, extract TCP packet length, aggregate in specified interval and write to output file. + +=head1 SYNOPSIS + +pg_egress_collect.pl [-i interval] [-o output] + +Options: + + -i, --interval interval + output file write interval, in seconds, default is 60 seconds + + -o, --output output + output file path, default is /tmp/pg_egress_collect.txt + + -h, --help + print this help message + +=cut diff --git a/docker/all-in-one/opt/postgres_exporter/queries.yml b/docker/all-in-one/opt/postgres_exporter/queries.yml new file mode 100644 index 0000000..c9652e3 --- /dev/null +++ b/docker/all-in-one/opt/postgres_exporter/queries.yml @@ -0,0 +1,345 @@ +set_timeout: + master: true + cache_seconds: 5 + query: "set statement_timeout to '20s'" + +pg_database: + master: true + cache_seconds: 60 + query: "SELECT SUM(pg_database_size(pg_database.datname)) / (1024 * 1024) as size_mb FROM pg_database" + metrics: + - size_mb: + usage: "GAUGE" + description: "Disk space used by the database" + +pg_stat_bgwriter: + master: true + cache_seconds: 60 + query: | + select checkpoints_timed as checkpoints_timed_total, + checkpoints_req as checkpoints_req_total, + checkpoint_write_time as checkpoint_write_time_total, + checkpoint_sync_time as checkpoint_sync_time_total, + buffers_checkpoint as buffers_checkpoint_total, + buffers_clean as buffers_clean_total, + maxwritten_clean as maxwritten_clean_total, + buffers_backend as buffers_backend_total, + buffers_backend_fsync as buffers_backend_fsync_total, + buffers_alloc as buffers_alloc_total, + stats_reset + from pg_stat_bgwriter + metrics: + - checkpoints_timed_total: + usage: "COUNTER" + description: "Scheduled checkpoints performed" + - checkpoints_req_total: + usage: "COUNTER" + description: "Requested checkpoints performed" + - checkpoint_write_time_total: + usage: "COUNTER" + description: "Time spent writing checkpoint files to disk" + - checkpoint_sync_time_total: + usage: "COUNTER" + description: "Time spent synchronizing checkpoint files to disk" + - buffers_checkpoint_total: + usage: "COUNTER" + description: "Buffers written during checkpoints" + - buffers_clean_total: + usage: "COUNTER" + description: "Buffers written by bg writter" + - maxwritten_clean_total: + usage: "COUNTER" + description: "Number of times bg writer stopped a cleaning scan because it had written too many buffers" + - buffers_backend_total: + usage: "COUNTER" + description: "Buffers written directly by a backend" + - buffers_backend_fsync_total: + usage: "COUNTER" + description: "fsync calls executed by a backend directly" + - buffers_alloc_total: + usage: "COUNTER" + description: "Buffers allocated" + - stats_reset: + usage: "COUNTER" + description: "Most recent stat reset time" + +pg_stat_database: + master: true + cache_seconds: 60 + query: | + SELECT sum(numbackends) as num_backends, + sum(xact_commit) as xact_commit_total, + sum(xact_rollback) as xact_rollback_total, + sum(blks_read) as blks_read_total, + sum(blks_hit) as blks_hit_total, + sum(tup_returned) as tup_returned_total, + sum(tup_fetched) as tup_fetched_total, + sum(tup_inserted) as tup_inserted_total, + sum(tup_updated) as tup_updated_total, + sum(tup_deleted) as tup_deleted_total, + sum(conflicts) as conflicts_total, + sum(temp_files) as temp_files_total, + sum(temp_bytes) as temp_bytes_total, + sum(deadlocks) as deadlocks_total, + max(stats_reset) as most_recent_reset + FROM pg_stat_database + metrics: + - num_backends: + usage: "GAUGE" + description: "The number of active backends" + - xact_commit_total: + usage: "COUNTER" + description: "Transactions committed" + - xact_rollback_total: + usage: "COUNTER" + description: "Transactions rolled back" + - blks_read_total: + usage: "COUNTER" + description: "Number of disk blocks read" + - blks_hit_total: + usage: "COUNTER" + description: "Disk blocks found in buffer cache" + - tup_returned_total: + usage: "COUNTER" + description: "Rows returned by queries" + - tup_fetched_total: + usage: "COUNTER" + description: "Rows fetched by queries" + - tup_inserted_total: + usage: "COUNTER" + description: "Rows inserted" + - tup_updated_total: + usage: "COUNTER" + description: "Rows updated" + - tup_deleted_total: + usage: "COUNTER" + description: "Rows deleted" + - conflicts_total: + usage: "COUNTER" + description: "Queries canceled due to conflicts with recovery" + - temp_files_total: + usage: "COUNTER" + description: "Temp files created by queries" + - temp_bytes_total: + usage: "COUNTER" + description: "Temp data written by queries" + - deadlocks_total: + usage: "COUNTER" + description: "Deadlocks detected" + - most_recent_reset: + usage: "COUNTER" + description: "The most recent time one of the databases had its statistics reset" + +pg_stat_database_conflicts: + master: true + cache_seconds: 60 + query: | + SELECT sum(confl_tablespace) as confl_tablespace_total, + sum(confl_lock) as confl_lock_total, + sum(confl_snapshot) as confl_snapshot_total, + sum(confl_bufferpin) as confl_bufferpin_total, + sum(confl_deadlock) as confl_deadlock_total + from pg_stat_database_conflicts + metrics: + - confl_tablespace_total: + usage: "COUNTER" + description: "Queries cancelled due to dropped tablespaces" + - confl_lock_total: + usage: "COUNTER" + description: "Queries cancelled due to lock timeouts" + - confl_snapshot_total: + usage: "COUNTER" + description: "Queries cancelled due to old snapshots" + - confl_bufferpin_total: + usage: "COUNTER" + description: "Queries cancelled due to pinned buffers" + - confl_deadlock_total: + usage: "COUNTER" + description: "Queries cancelled due to deadlocks" + +pg_stat_statements: + master: true + cache_seconds: 60 + query: "SELECT sum(calls) as total_queries, sum(total_exec_time / 1000) as total_time_seconds FROM extensions.pg_stat_statements t1 JOIN pg_database t3 ON (t1.dbid=t3.oid)" + metrics: + - total_queries: + usage: "COUNTER" + description: "Number of times executed" + - total_time_seconds: + usage: "COUNTER" + description: "Total time spent, in seconds" + +pg_ls_archive_statusdir: + master: true + cache_seconds: 60 + query: "select count(*) as wal_pending_count from pg_ls_archive_statusdir() where name like '%.ready'" + metrics: + - wal_pending_count: + usage: "COUNTER" + description: "Number of not yet archived WAL files" + +auth_users: + master: true + cache_seconds: 21600 # 6 hours + query: "select count(id) as user_count from auth.users" + metrics: + - user_count: + usage: "GAUGE" + description: "Number of users in the project db" + +realtime: + master: true + cache_seconds: 60 + query: "select count(1) as postgres_changes_total_subscriptions, count(distinct subscription_id) as postgres_changes_client_subscriptions from realtime.subscription" + metrics: + - postgres_changes_total_subscriptions: + usage: "GAUGE" + description: "Total subscription records listening for Postgres changes" + - postgres_changes_client_subscriptions: + usage: "GAUGE" + description: "Client subscriptions listening for Postgres changes" + +replication: + master: true + cache_seconds: 60 + query: "SELECT slot_name, pg_wal_lsn_diff(pg_current_wal_lsn(), restart_lsn) AS realtime_lag_bytes, active AS realtime_slot_status FROM pg_replication_slots WHERE slot_name LIKE ANY (ARRAY['realtime', 'realtime_rls', 'tealbase_realtime_replication_slot%'])" + metrics: + - realtime_slot_name: + usage: "LABEL" + description: "Replication Slot Name for Realtime" + - realtime_lag_bytes: + usage: "GAUGE" + description: "Replication Lag for Realtime" + - realtime_slot_status: + usage: "GAUGE" + description: "Replication Slot Active Status" + +replication_slots: + master: true + cache_seconds: 60 + query: "SELECT max(pg_wal_lsn_diff(pg_current_wal_lsn(), restart_lsn)) AS max_lag_bytes FROM pg_replication_slots" + metrics: + - max_lag_bytes: + usage: "GAUGE" + description: "Max Replication Lag" + +storage: + master: true + cache_seconds: 60 + query: "select sum(size) / (1024 * 1024) as storage_size_mb from storage.get_size_by_bucket()" + metrics: + - storage_size_mb: + usage: "GAUGE" + description: "The total size used for all storage buckets, in mb" + +tealbase_usage_metrics: + # pg_stat_statements collects metrics from all databases on the cluster, so querying just the master db should be sufficient + master: true + cache_seconds: 60 + query: | + select sum(calls) as user_queries_total + from extensions.pg_stat_statements + where query <> 'SELECT version()' + and query <> 'BEGIN ISOLATION LEVEL READ COMMITTED READ ONLY' + and query <> 'COMMIT' + and query <> 'SET client_encoding = ''UTF8''' + and query <> 'SET client_min_messages TO WARNING' + and query <> 'LISTEN "ddl_command_end"' + and query <> 'LISTEN "pgrst"' + and query <> 'SELECT * FROM migrations ORDER BY id' + and query <> 'SELECT COUNT(*) = $1 FROM pg_publication WHERE pubname = $2' + and query <> 'SELECT COUNT(*) >= $1 FROM pg_replication_slots WHERE slot_name = $2' + and query <> 'SELECT EXISTS (SELECT schema_migrations.* FROM schema_migrations AS schema_migrations WHERE version = $1)' + and query <> 'SELECT current_setting($1)::integer, current_setting($2)' + and query <> 'SELECT pg_advisory_unlock($1)' + and query <> 'SELECT pg_try_advisory_lock($1)' + and query <> 'SELECT slot_name, pg_wal_lsn_diff(pg_current_wal_lsn(), restart_lsn) FROM pg_replication_slots' + and query <> 'SELECT typname::text, oid::int4, typarray::int4 FROM pg_type WHERE typname IN ($1,$2) ORDER BY typname' + and query <> 'select * from schema_migrations' + and query <> 'set local schema ''''' + and query <> 'SELECT SUM(pg_database_size(pg_database.datname)) / ($1 * $2) as size_mb FROM pg_database' + and query not like 'select set_config(%' + and query not like '%LATERAL (SELECT * FROM pg_namespace WHERE pg_namespace.oid = other.relnamespace) AS ns2%' + and query not like '%LEFT JOIN (pg_collation co JOIN pg_namespace nco ON co.collnamespace = nco.oid)%' + and query not like '%LEFT JOIN pg_description as d ON d.objoid = p.oid%' + and query not like '%LEFT JOIN pg_description as d on d.objoid = c.oid%' + and query not like '%-- CTE to replace information_schema.key_column_usage to remove owner limit%' + and query not like '%join pg_namespace sch on sch.oid = tbl.relnamespace%' + and query not like '%select setdatabase, unnest(setconfig) as setting from pg_catalog.pg_db_role_setting%' + and lower(trim(regexp_replace(regexp_replace(query, E'\n', ' ', 'g'), E'\\s+', ' ', 'g'))) not in + ('with rows as ( select id from net.http_request_queue order by id limit $1 ) delete from net.http_request_queue q using rows where q.id = rows.id returning q.id, q.method, q.url, timeout_milliseconds, array(select key || $2 || value from jsonb_each_text(q.headers)), q.body', + 'with rows as ( select ctid from net._http_response where created < now() - $1 order by created limit $2 ) delete from net._http_response r using rows where r.ctid = rows.ctid', + -- version of query above before https://github.com/tealbase/pg_net/commit/eaa721e11761da07d01fc04b5114c90cd7973b83 + 'with rows as ( select ctid from net._http_response where created < $1 - $2 order by created limit $3 ) delete from net._http_response r using rows where r.ctid = rows.ctid', + 'select exists ( select $2 from pg_catalog.pg_class c where c.relname = $1 and c.relkind = $3 )', + 'select description from pg_namespace n left join pg_description d on d.objoid = n.oid where n.nspname = $1', + 'select concat(schemaname, $1, tablename, $2, policyname) as policy from pg_policies order by 1 desc', + 'select concat(table_schema, $1, table_name) as table from information_schema.tables where table_schema not like $2 and table_schema <> $3 order by 1 desc', + 'select concat(conrelid::regclass, $1, conname) as fk from pg_constraint where contype = $2 order by 1 desc', + 'select datname from pg_database where datallowconn = $1 order by oid asc', + 'select count(*) > $1 as pgsodium_enabled from pg_extension where extname = $2', + 'select count(*) > $1 as keys_created from pgsodium.key') + and query <> 'insert into schema_migrations (version) values ($1)' + -- temporarily included for older versions of pg_net + and query not like 'SELECT%FROM net.http_request_queue%' + and query not like 'DELETE FROM net.http_request_queue%' + and query not like '%source: project usage%' + and query not like 'select name, setting from pg_settings where name in ($1, $2)%' + and userid not in (select oid from pg_roles where rolname in ('authenticator', 'pgbouncer', 'tealbase_admin', 'tealbase_storage_admin')) + metrics: + - user_queries_total: + usage: "COUNTER" + description: "The total number of user queries executed" + +pg_settings: + master: true + cache-seconds: 30 + query: "SELECT COUNT(*) as default_transaction_read_only FROM pg_settings WHERE name = 'default_transaction_read_only' AND setting = 'on';" + metrics: + - default_transaction_read_only: + usage: "GAUGE" + description: "Default transaction mode set to read only" + +pg_status: + master: true + cache-seconds: 60 + query: "SELECT CASE WHEN pg_is_in_recovery() = false THEN 0 ELSE 1 END as in_recovery" + metrics: + - in_recovery: + usage: "GAUGE" + description: "Database in recovery" + +# specific to read replicas +# for primary databases, all columns will always return a value of 0 +# --- +# for checking replication lag (physical_replication_lag_seconds) +# we firstly check if the replica is connected to its primary +# and if last WAL received is equivalent to last WAL replayed +# if so return 0 +# otherwise calculate replication lag as per usual +physical_replication_lag: + master: true + cache-seconds: 60 + query: | + select + case + when (select count(*) from pg_stat_wal_receiver) = 1 and pg_last_wal_receive_lsn() = pg_last_wal_replay_lsn() + then 0 + else coalesce(extract(epoch from now() - pg_last_xact_replay_timestamp()),0) + end as physical_replication_lag_seconds, + case + when pg_is_in_recovery() + then case when pg_is_wal_replay_paused() = false then 0 else 1 end + else 0 + end as is_wal_replay_paused, + (select count(*) from pg_stat_wal_receiver) as is_connected_to_primary + metrics: + - physical_replication_lag_seconds: + usage: "GAUGE" + description: "Physical replication lag in seconds" + - is_wal_replay_paused: + usage: "GAUGE" + description: "Check if WAL replay has been paused" + - is_connected_to_primary: + usage: "GAUGE" + description: "Monitor connection to the primary database" diff --git a/docker/all-in-one/postgres-entrypoint.sh b/docker/all-in-one/postgres-entrypoint.sh new file mode 100755 index 0000000..2e4d657 --- /dev/null +++ b/docker/all-in-one/postgres-entrypoint.sh @@ -0,0 +1,358 @@ +#!/usr/bin/env bash + +# Downloaded from https://github.com/docker-library/postgres/raw/master/15/bullseye/docker-entrypoint.sh +# Changes needed to make adminapi able to read the recovery.signal file: +# -44: chmod 00700 "$PGDATA" || : +# +44: chmod 00750 "$PGDATA" || : +# +# We're already including the original file in the base postgres Docker image. + +set -Eeo pipefail + +# usage: file_env VAR [DEFAULT] +# ie: file_env 'XYZ_DB_PASSWORD' 'example' +# (will allow for "$XYZ_DB_PASSWORD_FILE" to fill in the value of +# "$XYZ_DB_PASSWORD" from a file, especially for Docker's secrets feature) +file_env() { + local var="$1" + local fileVar="${var}_FILE" + local def="${2:-}" + if [ "${!var:-}" ] && [ "${!fileVar:-}" ]; then + printf >&2 'error: both %s and %s are set (but are exclusive)\n' "$var" "$fileVar" + exit 1 + fi + local val="$def" + if [ "${!var:-}" ]; then + val="${!var}" + elif [ "${!fileVar:-}" ]; then + val="$(< "${!fileVar}")" + fi + export "$var"="$val" + unset "$fileVar" +} + +# check to see if this file is being run or sourced from another script +_is_sourced() { + # https://unix.stackexchange.com/a/215279 + [ "${#FUNCNAME[@]}" -ge 2 ] \ + && [ "${FUNCNAME[0]}" = '_is_sourced' ] \ + && [ "${FUNCNAME[1]}" = 'source' ] +} + +# used to create initial postgres directories and if run as root, ensure ownership to the "postgres" user +docker_create_db_directories() { + local user; user="$(id -u)" + + mkdir -p "$PGDATA" + # ignore failure since there are cases where we can't chmod (and PostgreSQL might fail later anyhow - it's picky about permissions of this directory) + chmod 00750 "$PGDATA" || : + + # ignore failure since it will be fine when using the image provided directory; see also https://github.com/docker-library/postgres/pull/289 + mkdir -p /var/run/postgresql || : + chmod 03775 /var/run/postgresql || : + + # Create the transaction log directory before initdb is run so the directory is owned by the correct user + if [ -n "${POSTGRES_INITDB_WALDIR:-}" ]; then + mkdir -p "$POSTGRES_INITDB_WALDIR" + if [ "$user" = '0' ]; then + find "$POSTGRES_INITDB_WALDIR" \! -user postgres -exec chown postgres '{}' + + fi + chmod 700 "$POSTGRES_INITDB_WALDIR" + fi + + # allow the container to be started with `--user` + if [ "$user" = '0' ]; then + find "$PGDATA" \! -user postgres -exec chown postgres '{}' + + find /var/run/postgresql \! -user postgres -exec chown postgres '{}' + + fi +} + +# initialize empty PGDATA directory with new database via 'initdb' +# arguments to `initdb` can be passed via POSTGRES_INITDB_ARGS or as arguments to this function +# `initdb` automatically creates the "postgres", "template0", and "template1" dbnames +# this is also where the database user is created, specified by `POSTGRES_USER` env +docker_init_database_dir() { + # "initdb" is particular about the current user existing in "/etc/passwd", so we use "nss_wrapper" to fake that if necessary + # see https://github.com/docker-library/postgres/pull/253, https://github.com/docker-library/postgres/issues/359, https://cwrap.org/nss_wrapper.html + local uid; uid="$(id -u)" + if ! getent passwd "$uid" &> /dev/null; then + # see if we can find a suitable "libnss_wrapper.so" (https://salsa.debian.org/sssd-team/nss-wrapper/-/commit/b9925a653a54e24d09d9b498a2d913729f7abb15) + local wrapper + for wrapper in {/usr,}/lib{/*,}/libnss_wrapper.so; do + if [ -s "$wrapper" ]; then + NSS_WRAPPER_PASSWD="$(mktemp)" + NSS_WRAPPER_GROUP="$(mktemp)" + export LD_PRELOAD="$wrapper" NSS_WRAPPER_PASSWD NSS_WRAPPER_GROUP + local gid; gid="$(id -g)" + printf 'postgres:x:%s:%s:PostgreSQL:%s:/bin/false\n' "$uid" "$gid" "$PGDATA" > "$NSS_WRAPPER_PASSWD" + printf 'postgres:x:%s:\n' "$gid" > "$NSS_WRAPPER_GROUP" + break + fi + done + fi + + if [ -n "${POSTGRES_INITDB_WALDIR:-}" ]; then + set -- --waldir "$POSTGRES_INITDB_WALDIR" "$@" + fi + + # --pwfile refuses to handle a properly-empty file (hence the "\n"): https://github.com/docker-library/postgres/issues/1025 + eval 'initdb --username="$POSTGRES_USER" --pwfile=<(printf "%s\n" "$POSTGRES_PASSWORD") '"$POSTGRES_INITDB_ARGS"' "$@"' + + # unset/cleanup "nss_wrapper" bits + if [[ "${LD_PRELOAD:-}" == */libnss_wrapper.so ]]; then + rm -f "$NSS_WRAPPER_PASSWD" "$NSS_WRAPPER_GROUP" + unset LD_PRELOAD NSS_WRAPPER_PASSWD NSS_WRAPPER_GROUP + fi +} + +# print large warning if POSTGRES_PASSWORD is long +# error if both POSTGRES_PASSWORD is empty and POSTGRES_HOST_AUTH_METHOD is not 'trust' +# print large warning if POSTGRES_HOST_AUTH_METHOD is set to 'trust' +# assumes database is not set up, ie: [ -z "$DATABASE_ALREADY_EXISTS" ] +docker_verify_minimum_env() { + # check password first so we can output the warning before postgres + # messes it up + if [ "${#POSTGRES_PASSWORD}" -ge 100 ]; then + cat >&2 <<-'EOWARN' + + WARNING: The supplied POSTGRES_PASSWORD is 100+ characters. + + This will not work if used via PGPASSWORD with "psql". + + https://www.postgresql.org/message-id/flat/E1Rqxp2-0004Qt-PL%40wrigleys.postgresql.org (BUG #6412) + https://github.com/docker-library/postgres/issues/507 + + EOWARN + fi + if [ -z "$POSTGRES_PASSWORD" ] && [ 'trust' != "$POSTGRES_HOST_AUTH_METHOD" ]; then + # The - option suppresses leading tabs but *not* spaces. :) + cat >&2 <<-'EOE' + Error: Database is uninitialized and superuser password is not specified. + You must specify POSTGRES_PASSWORD to a non-empty value for the + superuser. For example, "-e POSTGRES_PASSWORD=password" on "docker run". + + You may also use "POSTGRES_HOST_AUTH_METHOD=trust" to allow all + connections without a password. This is *not* recommended. + + See PostgreSQL documentation about "trust": + https://www.postgresql.org/docs/current/auth-trust.html + EOE + exit 1 + fi + if [ 'trust' = "$POSTGRES_HOST_AUTH_METHOD" ]; then + cat >&2 <<-'EOWARN' + ******************************************************************************** + WARNING: POSTGRES_HOST_AUTH_METHOD has been set to "trust". This will allow + anyone with access to the Postgres port to access your database without + a password, even if POSTGRES_PASSWORD is set. See PostgreSQL + documentation about "trust": + https://www.postgresql.org/docs/current/auth-trust.html + In Docker's default configuration, this is effectively any other + container on the same system. + + It is not recommended to use POSTGRES_HOST_AUTH_METHOD=trust. Replace + it with "-e POSTGRES_PASSWORD=password" instead to set a password in + "docker run". + ******************************************************************************** + EOWARN + fi +} + +# usage: docker_process_init_files [file [file [...]]] +# ie: docker_process_init_files /always-initdb.d/* +# process initializer files, based on file extensions and permissions +docker_process_init_files() { + # psql here for backwards compatibility "${psql[@]}" + psql=( docker_process_sql ) + + printf '\n' + local f + for f; do + case "$f" in + *.sh) + # https://github.com/docker-library/postgres/issues/450#issuecomment-393167936 + # https://github.com/docker-library/postgres/pull/452 + if [ -x "$f" ]; then + printf '%s: running %s\n' "$0" "$f" + "$f" + else + printf '%s: sourcing %s\n' "$0" "$f" + . "$f" + fi + ;; + *.sql) printf '%s: running %s\n' "$0" "$f"; docker_process_sql -f "$f"; printf '\n' ;; + *.sql.gz) printf '%s: running %s\n' "$0" "$f"; gunzip -c "$f" | docker_process_sql; printf '\n' ;; + *.sql.xz) printf '%s: running %s\n' "$0" "$f"; xzcat "$f" | docker_process_sql; printf '\n' ;; + *.sql.zst) printf '%s: running %s\n' "$0" "$f"; zstd -dc "$f" | docker_process_sql; printf '\n' ;; + *) printf '%s: ignoring %s\n' "$0" "$f" ;; + esac + printf '\n' + done +} + +# Execute sql script, passed via stdin (or -f flag of pqsl) +# usage: docker_process_sql [psql-cli-args] +# ie: docker_process_sql --dbname=mydb <<<'INSERT ...' +# ie: docker_process_sql -f my-file.sql +# ie: docker_process_sql > "$PGDATA/pg_hba.conf" +} + +# start socket-only postgresql server for setting up or running scripts +# all arguments will be passed along as arguments to `postgres` (via pg_ctl) +docker_temp_server_start() { + if [ "$1" = 'postgres' ]; then + shift + fi + + # internal start of server in order to allow setup using psql client + # does not listen on external TCP/IP and waits until start finishes + set -- "$@" -c listen_addresses='' -p "${PGPORT:-5432}" + + PGUSER="${PGUSER:-$POSTGRES_USER}" \ + pg_ctl -D "$PGDATA" \ + -o "$(printf '%q ' "$@")" \ + -w start +} + +# stop postgresql server after done setting up user and running scripts +docker_temp_server_stop() { + PGUSER="${PGUSER:-postgres}" \ + pg_ctl -D "$PGDATA" -m fast -w stop +} + +# check arguments for an option that would cause postgres to stop +# return true if there is one +_pg_want_help() { + local arg + for arg; do + case "$arg" in + # postgres --help | grep 'then exit' + # leaving out -C on purpose since it always fails and is unhelpful: + # postgres: could not access the server configuration file "/var/lib/postgresql/data/postgresql.conf": No such file or directory + -'?'|--help|--describe-config|-V|--version) + return 0 + ;; + esac + done + return 1 +} + +_main() { + # if first arg looks like a flag, assume we want to run postgres server + if [ "${1:0:1}" = '-' ]; then + set -- postgres "$@" + fi + + if [ "$1" = 'postgres' ] && ! _pg_want_help "$@"; then + docker_setup_env + # setup data directories and permissions (when run as root) + docker_create_db_directories + if [ "$(id -u)" = '0' ]; then + # then restart script as postgres user + exec gosu postgres "$BASH_SOURCE" "$@" + fi + + # only run initialization on an empty data directory + if [ -z "$DATABASE_ALREADY_EXISTS" ]; then + docker_verify_minimum_env + + # check dir permissions to reduce likelihood of half-initialized database + ls /docker-entrypoint-initdb.d/ > /dev/null + + docker_init_database_dir + pg_setup_hba_conf "$@" + + # PGPASSWORD is required for psql when authentication is required for 'local' connections via pg_hba.conf and is otherwise harmless + # e.g. when '--auth=md5' or '--auth-local=md5' is used in POSTGRES_INITDB_ARGS + export PGPASSWORD="${PGPASSWORD:-$POSTGRES_PASSWORD}" + docker_temp_server_start "$@" + + docker_setup_db + docker_process_init_files /docker-entrypoint-initdb.d/* + + docker_temp_server_stop + unset PGPASSWORD + + cat <<-'EOM' + + PostgreSQL init process complete; ready for start up. + + EOM + else + cat <<-'EOM' + + PostgreSQL Database directory appears to contain a database; Skipping initialization + + EOM + fi + fi + + exec "$@" +} + +if ! _is_sourced; then + _main "$@" +fi diff --git a/docker/all-in-one/run-logrotate.sh b/docker/all-in-one/run-logrotate.sh new file mode 100755 index 0000000..40805f8 --- /dev/null +++ b/docker/all-in-one/run-logrotate.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +set -eou pipefail + +while true; do + sleep 1800 + /usr/sbin/logrotate /etc/logrotate.conf --state "${DATA_VOLUME_MOUNTPOINT}/etc/logrotate/logrotate.state" --verbose +done diff --git a/docker/all-in-one/shutdown.sh b/docker/all-in-one/shutdown.sh new file mode 100755 index 0000000..c974b98 --- /dev/null +++ b/docker/all-in-one/shutdown.sh @@ -0,0 +1,96 @@ +#!/bin/bash + +# This script provides a method of shutting down the machine/container when the database has been idle +# for a certain amount of time (configurable via the MAX_IDLE_TIME_MINUTES env var) +# +# It checks for any active (non-idle) connections and for any connections which have been idle for more than MAX_IDLE_TIME_MINUTES. +# If there are no active connections and no idle connections, it then checks if the last disconnection event happened more than MAX_IDLE_TIME_MINUTES ago. +# +# If all of these conditions are met, then Postgres is shut down, allowing it to wrap up any pending transactions (such as WAL shippipng) and gracefully exit. +# To terminate the machine/container, a SIGTERM signal is sent to the top-level process (supervisord) which will then shut down all other processes and exit. + +DEFAULT_MAX_IDLE_TIME_MINUTES=${MAX_IDLE_TIME_MINUTES:-5} +CONFIG_FILE_PATH=${CONFIG_FILE_PATH:-/etc/supa-shutdown/shutdown.conf} + +run_sql() { + psql -h localhost -U tealbase_admin -d postgres "$@" +} + +check_activity() { + pg_isready -h localhost > /dev/null 2>&1 || (echo "Postgres is not ready yet" && exit 1) + + QUERY=$(cat </dev/null || echo 0) + NOW=$(date +%s) + TIME_SINCE_LAST_DISCONNECT="$((NOW - LAST_DISCONNECT_TIME))" + + if [ $TIME_SINCE_LAST_DISCONNECT -gt "$((MAX_IDLE_TIME_MINUTES * 60))" ]; then + echo "$(date): No active connections for $MAX_IDLE_TIME_MINUTES minutes. Shutting down." + + supervisorctl stop postgresql + + # Postgres ships the latest WAL file using archive_command during shutdown, in a blocking operation + # This is to ensure that the WAL file is shipped, just in case + sleep 1 + + /usr/bin/admin-mgr lsn-checkpoint-push --immediately || echo "Failed to push LSN checkpoint" + + kill -s TERM "$(supervisorctl pid)" + fi +} + +# Wait for Postgres to be up +until pg_isready -h localhost > /dev/null 2>&1; + do sleep 3 +done + +# Enable logging of disconnections so the script can check when the last disconnection happened +run_sql -c "ALTER SYSTEM SET log_disconnections = 'on';" +run_sql -c "SELECT pg_reload_conf();" + +sleep $((DEFAULT_MAX_IDLE_TIME_MINUTES * 60)) +while true; do + if [ -f "$CONFIG_FILE_PATH" ]; then + source "$CONFIG_FILE_PATH" + + if [ -z "$SHUTDOWN_IDLE_TIME_MINUTES" ]; then + MAX_IDLE_TIME_MINUTES="$DEFAULT_MAX_IDLE_TIME_MINUTES" + else + MAX_IDLE_TIME_MINUTES="$SHUTDOWN_IDLE_TIME_MINUTES" + fi + else + MAX_IDLE_TIME_MINUTES="$DEFAULT_MAX_IDLE_TIME_MINUTES" + fi + + if [ "$MAX_IDLE_TIME_MINUTES" -gt 0 ] && [ "$MAX_IDLE_TIME_MINUTES" -lt 50000000 ]; then + check_activity + fi + + sleep 30 +done diff --git a/docker/nix/Dockerfile b/docker/nix/Dockerfile new file mode 100644 index 0000000..2269079 --- /dev/null +++ b/docker/nix/Dockerfile @@ -0,0 +1,16 @@ +FROM nixpkgs/nix-flakes + +RUN echo "substituters = https://cache.nixos.org https://nix-postgres-artifacts.s3.amazonaws.com" >> /etc/nix/nix.conf + +RUN echo "trusted-public-keys = cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY= nix-postgres-artifacts:dGZlQOvKcNEjvT7QEAJbcV6b6uk7VF/hWMjhYleiaLI=%" >> /etc/nix/nix.conf + + +USER $USER + +RUN mkdir -p /workspace + +COPY ./ /workspace + +RUN chmod +x /workspace/docker/nix/build_nix.sh + +RUN chown -R $USER:$USER /workspace \ No newline at end of file diff --git a/docker/nix/build_nix.sh b/docker/nix/build_nix.sh new file mode 100644 index 0000000..5035be0 --- /dev/null +++ b/docker/nix/build_nix.sh @@ -0,0 +1,17 @@ +#!/bin/env bash +set -eou pipefail + +nix --version +if [ -d "/workspace" ]; then + cd /workspace +fi +SYSTEM=$(nix-instantiate --eval -E builtins.currentSystem | tr -d '"') +nix build .#psql_15/bin -o psql_15 +nix flake check -L +nix copy --to s3://nix-postgres-artifacts?secret-key=nix-secret-key ./psql_15 +if [ "$SYSTEM" = "aarch64-linux" ]; then + nix build .#postgresql_15_debug -o ./postgresql_15_debug + nix build .#postgresql_15_src -o ./postgresql_15_src + nix copy --to s3://nix-postgres-artifacts?secret-key=nix-secret-key ./postgresql_15_debug-debug + nix copy --to s3://nix-postgres-artifacts?secret-key=nix-secret-key ./postgresql_15_src +fi diff --git a/docker/orioledb/Dockerfile b/docker/orioledb/Dockerfile new file mode 100644 index 0000000..5581b4e --- /dev/null +++ b/docker/orioledb/Dockerfile @@ -0,0 +1,1059 @@ +# syntax=docker/dockerfile:1.6 +ARG postgresql_major=15 +ARG postgresql_release=${postgresql_major}.1 + +# Bump default build arg to build a package from source +# Bump vars.yml to specify runtime package version +ARG sfcgal_release=1.3.10 +ARG postgis_release=3.3.2 +ARG pgrouting_release=3.4.1 +ARG pgtap_release=1.2.0 +ARG pg_cron_release=1.6.2 +ARG pgaudit_release=1.7.0 +ARG pgjwt_release=9742dab1b2f297ad3811120db7b21451bca2d3c9 +ARG pgsql_http_release=1.5.0 +ARG plpgsql_check_release=2.2.5 +ARG pg_safeupdate_release=1.4 +ARG timescaledb_release=2.9.1 +ARG wal2json_release=2_5 +ARG pljava_release=1.6.4 +ARG plv8_release=3.1.5 +ARG pg_plan_filter_release=5081a7b5cb890876e67d8e7486b6a64c38c9a492 +ARG pg_net_release=0.9.2 +ARG rum_release=1.3.13 +ARG pg_hashids_release=cd0e1b31d52b394a0df64079406a14a4f7387cd6 +ARG libsodium_release=1.0.18 +ARG pgsodium_release=3.1.6 +ARG pg_graphql_release=1.5.1 +ARG pg_stat_monitor_release=1.1.1 +ARG pg_jsonschema_release=0.2.0 +ARG pg_repack_release=1.4.8 +ARG vault_release=0.2.8 +ARG groonga_release=12.0.8 +ARG pgroonga_release=2.4.0 +ARG wrappers_release=0.2.0 +ARG hypopg_release=1.3.1 +ARG pgvector_release=0.4.0 +ARG pg_tle_release=1.3.2 +ARG index_advisor_release=0.2.0 +ARG supautils_release=2.5.0 +ARG wal_g_release=2.0.1 + +#################### +# Install postgres +#################### +FROM orioledb/orioledb:latest-pg${postgresql_major}-ubuntu as base +# Redeclare args for use in subsequent stages +ARG TARGETARCH +ARG postgresql_major + +ENV PATH=$PATH:/usr/lib/postgresql/${postgresql_major}/bin +ENV PGDATA=/var/lib/postgresql/data + +# Make the "en_US.UTF-8" locale so postgres will be utf-8 enabled by default +# RUN localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8 +ENV LANG=en_US.UTF-8 +ENV LC_CTYPE=C.UTF-8 +ENV LC_COLLATE=C + +FROM base as builder +# Install build dependencies +RUN apt-get update && apt-get install -y --no-install-recommends \ + build-essential \ + checkinstall \ + cmake \ + && rm -rf /var/lib/apt/lists/* /tmp/* + +FROM builder as ccache +# Cache large build artifacts +RUN apt-get update && apt-get install -y --no-install-recommends \ + clang \ + ccache \ + && rm -rf /var/lib/apt/lists/* +ENV CCACHE_DIR=/ccache +ENV PATH=/usr/lib/ccache:$PATH +# Used to update ccache +ARG CACHE_EPOCH + +FROM builder as rust-toolchain +ENV PATH=/root/.cargo/bin:$PATH +RUN apt-get update && apt-get install -y --no-install-recommends curl pkg-config && \ + curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --profile minimal --default-toolchain stable && \ + rustup --version && \ + rustc --version && \ + cargo --version + +RUN cargo install cargo-pgrx --version 0.10.2 --locked +RUN cargo pgrx init --pg${postgresql_major} $(which pg_config) + +#################### +# 01-postgis.yml +#################### +FROM ccache as sfcgal +# Download and extract +ARG sfcgal_release +ARG sfcgal_release_checksum +ADD --checksum=${sfcgal_release_checksum} \ + "https://tealbase-public-artifacts-bucket.s3.amazonaws.com/sfcgal/SFCGAL-v${sfcgal_release}.tar.gz" \ + /tmp/sfcgal.tar.gz +RUN tar -xvf /tmp/sfcgal.tar.gz -C /tmp --one-top-level --strip-components 1 && \ + rm -rf /tmp/sfcgal.tar.gz +# Install build dependencies +RUN apt-get update && apt-get install -y --no-install-recommends \ + libcgal-dev \ + libboost-serialization1.71-dev \ + libmpfr-dev \ + libgmp-dev \ + && rm -rf /var/lib/apt/lists/* +# Build from source +WORKDIR /tmp/sfcgal/build +RUN cmake .. +RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ + make -j$(nproc) +# Create debian package +RUN checkinstall -D --install=yes --fstrans=no --backup=no --pakdir=/tmp --pkgname=sfcgal --pkgversion=${sfcgal_release} --requires=libgmpxx4ldbl,libboost-serialization1.71.0,libmpfr6 --nodoc + +FROM sfcgal as postgis-source +# Download and extract +ARG postgis_release +ARG postgis_release_checksum +ADD --checksum=${postgis_release_checksum} \ + "https://tealbase-public-artifacts-bucket.s3.amazonaws.com/postgis-${postgis_release}.tar.gz" \ + /tmp/postgis.tar.gz +RUN tar -xvf /tmp/postgis.tar.gz -C /tmp && \ + rm -rf /tmp/postgis.tar.gz +# Install build dependencies +RUN apt-get update && apt-get install -y --no-install-recommends \ + protobuf-c-compiler \ + libgeos-dev \ + libproj-dev \ + libgdal-dev \ + libjson-c-dev \ + libxml2-dev \ + libprotobuf-c-dev \ + && rm -rf /var/lib/apt/lists/* +# Build from source +WORKDIR /tmp/postgis-${postgis_release} +RUN ./configure --with-sfcgal +RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ + make -j$(nproc) +# Create debian package +RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --requires=libgeos-c1v5,libproj15,libjson-c4,libprotobuf-c1,libgdal26 --nodoc + +#################### +# 02-pgrouting.yml +#################### +FROM ccache as pgrouting-source +# Download and extract +ARG pgrouting_release +ARG pgrouting_release_checksum +ADD --checksum=${pgrouting_release_checksum} \ + "https://github.com/pgRouting/pgrouting/releases/download/v${pgrouting_release}/pgrouting-${pgrouting_release}.tar.gz" \ + /tmp/pgrouting.tar.gz +RUN tar -xvf /tmp/pgrouting.tar.gz -C /tmp && \ + rm -rf /tmp/pgrouting.tar.gz +# Install build dependencies +RUN apt-get update && apt-get install -y --no-install-recommends \ + libboost-all-dev \ + && rm -rf /var/lib/apt/lists/* +# Build from source +WORKDIR /tmp/pgrouting-${pgrouting_release}/build +RUN cmake -DBUILD_HTML=OFF -DBUILD_DOXY=OFF .. +RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ + make -j$(nproc) +# Create debian package +RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --pkgname=pgrouting --pkgversion=${pgrouting_release} --nodoc + +#################### +# 03-pgtap.yml +#################### +FROM builder as pgtap-source +# Download and extract +ARG pgtap_release +ARG pgtap_release_checksum +ADD --checksum=${pgtap_release_checksum} \ + "https://github.com/theory/pgtap/archive/v${pgtap_release}.tar.gz" \ + /tmp/pgtap.tar.gz +RUN tar -xvf /tmp/pgtap.tar.gz -C /tmp && \ + rm -rf /tmp/pgtap.tar.gz +# Build from source +WORKDIR /tmp/pgtap-${pgtap_release} +RUN make -j$(nproc) +# Create debian package +RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --nodoc + +#################### +# 04-pg_cron.yml +#################### +FROM ccache as pg_cron-source +# Download and extract +ARG pg_cron_release +ARG pg_cron_release_checksum +ADD --checksum=${pg_cron_release_checksum} \ + "https://github.com/citusdata/pg_cron/archive/refs/tags/v${pg_cron_release}.tar.gz" \ + /tmp/pg_cron.tar.gz +RUN tar -xvf /tmp/pg_cron.tar.gz -C /tmp && \ + rm -rf /tmp/pg_cron.tar.gz +# Build from source +WORKDIR /tmp/pg_cron-${pg_cron_release} +# error: redefinition of typedef 'snapshot_hook_type' is a C11 feature [-Werror,-Wtypedef-redefinition] +RUN sed -i -e "s|-std=c99|-std=c11|g" Makefile +RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ + make -j$(nproc) +# Create debian package +RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --nodoc + +#################### +# 05-pgaudit.yml +#################### +FROM ccache as pgaudit-source +# Download and extract +ARG pgaudit_release +ARG pgaudit_release_checksum +ADD --checksum=${pgaudit_release_checksum} \ + "https://github.com/pgaudit/pgaudit/archive/refs/tags/${pgaudit_release}.tar.gz" \ + /tmp/pgaudit.tar.gz +RUN tar -xvf /tmp/pgaudit.tar.gz -C /tmp && \ + rm -rf /tmp/pgaudit.tar.gz +# Install build dependencies +RUN apt-get update && apt-get install -y --no-install-recommends \ + libssl-dev \ + libkrb5-dev \ + && rm -rf /var/lib/apt/lists/* +# Build from source +WORKDIR /tmp/pgaudit-${pgaudit_release} +ENV USE_PGXS=1 +RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ + make -j$(nproc) +# Create debian package +RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --nodoc + +#################### +# 06-pgjwt.yml +#################### +FROM builder as pgjwt-source +# Download and extract +ARG pgjwt_release +ADD "https://github.com/michelp/pgjwt.git#${pgjwt_release}" \ + /tmp/pgjwt-${pgjwt_release} +# Build from source +WORKDIR /tmp/pgjwt-${pgjwt_release} +RUN make -j$(nproc) +# Create debian package +RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --pkgversion=1 --nodoc + +#################### +# 07-pgsql-http.yml +#################### +FROM ccache as pgsql-http-source +# Download and extract +ARG pgsql_http_release +ARG pgsql_http_release_checksum +ADD --checksum=${pgsql_http_release_checksum} \ + "https://github.com/pramsey/pgsql-http/archive/refs/tags/v${pgsql_http_release}.tar.gz" \ + /tmp/pgsql-http.tar.gz +RUN tar -xvf /tmp/pgsql-http.tar.gz -C /tmp && \ + rm -rf /tmp/pgsql-http.tar.gz +# Install build dependencies +RUN apt-get update && apt-get install -y --no-install-recommends \ + libcurl4-gnutls-dev \ + && rm -rf /var/lib/apt/lists/* +# Build from source +WORKDIR /tmp/pgsql-http-${pgsql_http_release} +RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ + make -j$(nproc) +# Create debian package +RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --requires=libcurl3-gnutls --nodoc + +#################### +# 08-plpgsql_check.yml +#################### +FROM ccache as plpgsql_check-source +# Download and extract +ARG plpgsql_check_release +ARG plpgsql_check_release_checksum +ADD --checksum=${plpgsql_check_release_checksum} \ + "https://github.com/okbob/plpgsql_check/archive/refs/tags/v${plpgsql_check_release}.tar.gz" \ + /tmp/plpgsql_check.tar.gz +RUN tar -xvf /tmp/plpgsql_check.tar.gz -C /tmp && \ + rm -rf /tmp/plpgsql_check.tar.gz +# Install build dependencies +RUN apt-get update && apt-get install -y --no-install-recommends \ + libicu-dev \ + && rm -rf /var/lib/apt/lists/* +# Build from source +WORKDIR /tmp/plpgsql_check-${plpgsql_check_release} +RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ + make -j$(nproc) +# Create debian package +RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --nodoc + +#################### +# 09-pg-safeupdate.yml +#################### +FROM ccache as pg-safeupdate-source +# Download and extract +ARG pg_safeupdate_release +ARG pg_safeupdate_release_checksum +ADD --checksum=${pg_safeupdate_release_checksum} \ + "https://github.com/eradman/pg-safeupdate/archive/refs/tags/${pg_safeupdate_release}.tar.gz" \ + /tmp/pg-safeupdate.tar.gz +RUN tar -xvf /tmp/pg-safeupdate.tar.gz -C /tmp && \ + rm -rf /tmp/pg-safeupdate.tar.gz +# Build from source +WORKDIR /tmp/pg-safeupdate-${pg_safeupdate_release} +RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ + make -j$(nproc) +# Create debian package +RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --nodoc + +#################### +# 10-timescaledb.yml +#################### +FROM ccache as timescaledb-source +# Download and extract +ARG timescaledb_release +ARG timescaledb_release_checksum +ADD --checksum=${timescaledb_release_checksum} \ + "https://github.com/timescale/timescaledb/archive/refs/tags/${timescaledb_release}.tar.gz" \ + /tmp/timescaledb.tar.gz +RUN tar -xvf /tmp/timescaledb.tar.gz -C /tmp && \ + rm -rf /tmp/timescaledb.tar.gz +# Build from source +WORKDIR /tmp/timescaledb-${timescaledb_release}/build +RUN cmake -DAPACHE_ONLY=1 .. +# error: too few arguments to function ‘table_tuple_update’ +# error: too few arguments to function ‘table_tuple_delete’ +RUN sed -i \ + -e "1981s|);|, NULL);|g" \ + -e "2567s|);|, NULL);|g" \ + ../src/nodes/hypertable_modify.c +RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ + make -j$(nproc) +# Create debian package +RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --pkgname=timescaledb --pkgversion=${timescaledb_release} --nodoc + +#################### +# 11-wal2json.yml +#################### +FROM ccache as wal2json-source +# Download and extract +ARG wal2json_release +ARG wal2json_release_checksum +ADD --checksum=${wal2json_release_checksum} \ + "https://github.com/eulerto/wal2json/archive/refs/tags/wal2json_${wal2json_release}.tar.gz" \ + /tmp/wal2json.tar.gz +RUN tar -xvf /tmp/wal2json.tar.gz -C /tmp --one-top-level --strip-components 1 && \ + rm -rf /tmp/wal2json.tar.gz +# Build from source +WORKDIR /tmp/wal2json +RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ + make -j$(nproc) +# Create debian package +ENV version=${wal2json_release} +RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --pkgversion="\${version/_/.}" --nodoc + +#################### +# 12-pljava.yml +#################### +FROM builder as pljava-source +# Download and extract +# TODO: revert to using main repo after PG15 support is merged: https://github.com/tada/pljava/pull/413 +ARG pljava_release=master +ARG pljava_release_checksum=sha256:e99b1c52f7b57f64c8986fe6ea4a6cc09d78e779c1643db060d0ac66c93be8b6 +ADD --checksum=${pljava_release_checksum} \ + "https://github.com/tealbase/pljava/archive/refs/heads/${pljava_release}.tar.gz" \ + /tmp/pljava.tar.gz +RUN tar -xvf /tmp/pljava.tar.gz -C /tmp && \ + rm -rf /tmp/pljava.tar.gz +# Install build dependencies +RUN apt-get update && apt-get install -y --no-install-recommends \ + maven \ + default-jdk \ + libssl-dev \ + libkrb5-dev \ + && rm -rf /var/lib/apt/lists/* +# Build from source +WORKDIR /tmp/pljava-${pljava_release} +RUN mvn -T 1C clean install -Dmaven.test.skip -DskipTests -Dmaven.javadoc.skip=true +# Create debian package +RUN cp pljava-packaging/target/pljava-pg${postgresql_major}.jar /tmp/ + +#################### +# 13-plv8.yml +#################### +FROM ccache as plv8-source +# Download and extract +ARG plv8_release +ARG plv8_release_checksum +ADD --checksum=${plv8_release_checksum} \ + "https://github.com/plv8/plv8/archive/refs/tags/v${plv8_release}.tar.gz" \ + /tmp/plv8.tar.gz +RUN tar -xvf /tmp/plv8.tar.gz -C /tmp && \ + rm -rf /tmp/plv8.tar.gz +# Install build dependencies +RUN apt-get update && apt-get install -y --no-install-recommends \ + ca-certificates \ + pkg-config \ + ninja-build \ + git \ + libtinfo5 \ + && rm -rf /var/lib/apt/lists/* +# Build from source +WORKDIR /tmp/plv8-${plv8_release} +ENV DOCKER=1 +RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ + make +# Create debian package +RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --nodoc + +FROM scratch as plv8-deb +COPY --from=plv8-source /tmp/*.deb /tmp/ + +FROM ghcr.io/tealbase/plv8:${plv8_release}-pg${postgresql_major} as plv8 + +#################### +# 14-pg_plan_filter.yml +#################### +FROM ccache as pg_plan_filter-source +# Download and extract +ARG pg_plan_filter_release +ADD "https://github.com/pgexperts/pg_plan_filter.git#${pg_plan_filter_release}" \ + /tmp/pg_plan_filter-${pg_plan_filter_release} +# Build from source +WORKDIR /tmp/pg_plan_filter-${pg_plan_filter_release} +RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ + make -j$(nproc) +# Create debian package +RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --pkgversion=1 --nodoc + +#################### +# 15-pg_net.yml +#################### +FROM ccache as pg_net-source +# Download and extract +ARG pg_net_release +ARG pg_net_release_checksum +ADD --checksum=${pg_net_release_checksum} \ + "https://github.com/tealbase/pg_net/archive/refs/tags/v${pg_net_release}.tar.gz" \ + /tmp/pg_net.tar.gz +RUN tar -xvf /tmp/pg_net.tar.gz -C /tmp && \ + rm -rf /tmp/pg_net.tar.gz +# Install build dependencies +RUN apt-get update && apt-get install -y --no-install-recommends \ + libcurl4-gnutls-dev \ + && rm -rf /var/lib/apt/lists/* +# Build from source +WORKDIR /tmp/pg_net-${pg_net_release} +RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ + make -j$(nproc) +# Create debian package +RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --requires=libcurl3-gnutls --nodoc + +#################### +# 16-rum.yml +#################### +FROM ccache as rum-source +# Download and extract +ARG rum_release +ARG rum_release_checksum +ADD --checksum=${rum_release_checksum} \ + "https://github.com/postgrespro/rum/archive/refs/tags/${rum_release}.tar.gz" \ + /tmp/rum.tar.gz +RUN tar -xvf /tmp/rum.tar.gz -C /tmp && \ + rm -rf /tmp/rum.tar.gz +# Install build dependencies +RUN apt-get update && apt-get install -y --no-install-recommends \ + systemtap-sdt-dev \ + && rm -rf /var/lib/apt/lists/* +# Build from source +WORKDIR /tmp/rum-${rum_release} +RUN sed -i \ + # error: typedef redefinition with different types ('struct SortTuple' vs 'struct SortTuple') + -e "183s|^|// |g" \ + -e "184s|^|// |g" \ + -e "185s|^|// |g" \ + -e "186s|^|// |g" \ + -e "187s|^|// |g" \ + -e "188s|^|// |g" \ + -e "189s|^|// |g" \ + # error: static declaration of 'tuplesort_begin_common' follows non-static declaration + -e "621s|static ||g" \ + # error: static declaration of 'tuplesort_begin_common' follows non-static declaration + -e "846s|static ||g" \ + # error: static declaration of 'tuplesort_gettuple_common' follows non-static declaration + -e "2308s|static ||g" \ + src/tuplesort15.c +ENV USE_PGXS=1 +RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ + make -j$(nproc) +# Create debian package +RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --nodoc + +#################### +# 17-pg_hashids.yml +#################### +FROM ccache as pg_hashids-source +# Download and extract +ARG pg_hashids_release +ADD "https://github.com/iCyberon/pg_hashids.git#${pg_hashids_release}" \ + /tmp/pg_hashids-${pg_hashids_release} +# Build from source +WORKDIR /tmp/pg_hashids-${pg_hashids_release} +RUN make -j$(nproc) +# Create debian package +RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --pkgversion=1 --nodoc + +#################### +# 18-pgsodium.yml +#################### +FROM ccache as libsodium +# Download and extract +ARG libsodium_release +ARG libsodium_release_checksum +ADD --checksum=${libsodium_release_checksum} \ + "https://tealbase-public-artifacts-bucket.s3.amazonaws.com/libsodium/libsodium-${libsodium_release}.tar.gz" \ + /tmp/libsodium.tar.gz +RUN tar -xvf /tmp/libsodium.tar.gz -C /tmp && \ + rm -rf /tmp/libsodium.tar.gz +# Build from source +WORKDIR /tmp/libsodium-${libsodium_release} +RUN ./configure +RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ + make -j$(nproc) +RUN make install + +FROM libsodium as pgsodium-source +# Download and extract +ARG pgsodium_release +ARG pgsodium_release_checksum +ADD --checksum=${pgsodium_release_checksum} \ + "https://github.com/michelp/pgsodium/archive/refs/tags/v${pgsodium_release}.tar.gz" \ + /tmp/pgsodium.tar.gz +RUN tar -xvf /tmp/pgsodium.tar.gz -C /tmp && \ + rm -rf /tmp/pgsodium.tar.gz +# Build from source +WORKDIR /tmp/pgsodium-${pgsodium_release} +RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ + make -j$(nproc) +# Create debian package +RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --requires=libsodium23 --nodoc + +#################### +# 19-pg_graphql.yml +#################### +FROM rust-toolchain as pg_graphql-source +# Download and extract +ARG pg_graphql_release +ARG pg_graphql_release_checksum +ADD --checksum=${pg_graphql_release_checksum} \ + "https://github.com/tealbase/pg_graphql/archive/refs/tags/v${pg_graphql_release}.tar.gz" \ + /tmp/pg_graphql.tar.gz +RUN tar -xvf /tmp/pg_graphql.tar.gz -C /tmp && \ + rm -rf /tmp/pg_graphql.tar.gz +WORKDIR /tmp/pg_graphql-${pg_graphql_release} +RUN cargo pgrx package --no-default-features --features pg${postgresql_major} + +# Create installable package +RUN mkdir archive +RUN cp target/release/pg_graphql-pg${postgresql_major}/usr/local/share/postgresql/extension/pg_graphql* archive +RUN cp target/release/pg_graphql-pg${postgresql_major}/usr/local/lib/postgresql/pg_graphql.so archive + +# name of the package directory before packaging +ENV package_dir=pg_graphql-v${pg_graphql_release}-pg${postgresql_major}-${TARGETARCH}-linux-gnu + +## Copy files into directory structure +RUN mkdir -p ${package_dir}/usr/lib/postgresql/lib +RUN mkdir -p ${package_dir}/var/lib/postgresql/extension +RUN cp archive/*.so ${package_dir}/usr/lib/postgresql/lib +RUN cp archive/*.control ${package_dir}/var/lib/postgresql/extension +RUN cp archive/*.sql ${package_dir}/var/lib/postgresql/extension + +# symlinks to Copy files into directory structure +RUN mkdir -p ${package_dir}/usr/local/lib/postgresql +WORKDIR ${package_dir}/usr/local/lib/postgresql +RUN cp -s ../../../lib/postgresql/lib/*.so . +WORKDIR ../../../../.. + +RUN mkdir -p ${package_dir}/usr/local/share/postgresql/extension +WORKDIR ${package_dir}/usr/local/share/postgresql/extension +RUN cp -s ../../../../../var/lib/postgresql/extension/pg_graphql.control . +RUN cp -s ../../../../../var/lib/postgresql/extension/pg_graphql*.sql . +WORKDIR ../../../../../.. + +RUN mkdir -p ${package_dir}/DEBIAN +RUN touch ${package_dir}/DEBIAN/control +RUN echo 'Package: pg-graphql' >> ${package_dir}/DEBIAN/control +RUN echo 'Version:' ${pg_graphql_release} >> ${package_dir}/DEBIAN/control +RUN echo "Architecture: ${TARGETARCH}" >> ${package_dir}/DEBIAN/control +RUN echo 'Maintainer: tealbase' >> ${package_dir}/DEBIAN/control +RUN echo 'Description: A PostgreSQL extension' >> ${package_dir}/DEBIAN/control + +# Create deb package +RUN chown -R root:root ${package_dir} +RUN chmod -R 00755 ${package_dir} +RUN dpkg-deb --build --root-owner-group ${package_dir} +RUN cp ./*.deb /tmp/pg_graphql.deb + +#################### +# 20-pg_stat_monitor.yml +#################### +FROM ccache as pg_stat_monitor-source +# Download and extract +ARG pg_stat_monitor_release +ARG pg_stat_monitor_release_checksum +ADD --checksum=${pg_stat_monitor_release_checksum} \ + "https://github.com/percona/pg_stat_monitor/archive/refs/tags/${pg_stat_monitor_release}.tar.gz" \ + /tmp/pg_stat_monitor.tar.gz +RUN tar -xvf /tmp/pg_stat_monitor.tar.gz -C /tmp && \ + rm -rf /tmp/pg_stat_monitor.tar.gz +# Build from source +WORKDIR /tmp/pg_stat_monitor-${pg_stat_monitor_release} +ENV USE_PGXS=1 +RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ + make -j$(nproc) +# Create debian package +RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --nodoc + +#################### +# 22-pg_jsonschema.yml +#################### +FROM rust-toolchain as pg_jsonschema-source +# Download and extract +ARG pg_jsonschema_release +ARG pg_jsonschema_release_checksum +ADD --checksum=${pg_jsonschema_release_checksum} \ + "https://github.com/tealbase/pg_jsonschema/archive/refs/tags/v${pg_jsonschema_release}.tar.gz" \ + /tmp/pg_jsonschema.tar.gz +RUN tar -xvf /tmp/pg_jsonschema.tar.gz -C /tmp && \ + rm -rf /tmp/pg_jsonschema.tar.gz +WORKDIR /tmp/pg_jsonschema-${pg_jsonschema_release} +RUN cargo pgrx package --no-default-features --features pg${postgresql_major} + +# Create installable package +RUN mkdir archive +RUN cp target/release/pg_jsonschema-pg${postgresql_major}/usr/local/share/postgresql/extension/pg_jsonschema* archive +RUN cp target/release/pg_jsonschema-pg${postgresql_major}/usr/local/lib/postgresql/pg_jsonschema.so archive + +# name of the package directory before packaging +ENV package_dir=pg_jsonschema-v${pg_jsonschema_release}-pg${postgresql_major}-${TARGETARCH}-linux-gnu + +## Copy files into directory structure +RUN mkdir -p ${package_dir}/usr/lib/postgresql/lib +RUN mkdir -p ${package_dir}/var/lib/postgresql/extension +RUN cp archive/*.so ${package_dir}/usr/lib/postgresql/lib +RUN cp archive/*.control ${package_dir}/var/lib/postgresql/extension +RUN cp archive/*.sql ${package_dir}/var/lib/postgresql/extension + +# symlinks to Copy files into directory structure +RUN mkdir -p ${package_dir}/usr/local/lib/postgresql +WORKDIR ${package_dir}/usr/local/lib/postgresql +RUN cp -s ../../../lib/postgresql/lib/*.so . +WORKDIR ../../../../.. + +RUN mkdir -p ${package_dir}/usr/local/share/postgresql/extension +WORKDIR ${package_dir}/usr/local/share/postgresql/extension + +RUN cp -s ../../../../../var/lib/postgresql/extension/pg_jsonschema.control . +RUN cp -s ../../../../../var/lib/postgresql/extension/pg_jsonschema*.sql . +WORKDIR ../../../../../.. + +RUN mkdir -p ${package_dir}/DEBIAN +RUN touch ${package_dir}/DEBIAN/control +RUN echo 'Package: pg-jsonschema' >> ${package_dir}/DEBIAN/control +RUN echo 'Version:' ${pg_jsonschema_release} >> ${package_dir}/DEBIAN/control +RUN echo "Architecture: ${TARGETARCH}" >> ${package_dir}/DEBIAN/control +RUN echo 'Maintainer: tealbase' >> ${package_dir}/DEBIAN/control +RUN echo 'Description: A PostgreSQL extension' >> ${package_dir}/DEBIAN/control + +# Create deb package +RUN chown -R root:root ${package_dir} +RUN chmod -R 00755 ${package_dir} +RUN dpkg-deb --build --root-owner-group ${package_dir} +RUN cp ./*.deb /tmp/pg_jsonschema.deb + +#################### +# 23-vault.yml +#################### +FROM builder as vault-source +# Download and extract +ARG vault_release +ARG vault_release_checksum +ADD --checksum=${vault_release_checksum} \ + "https://github.com/tealbase/vault/archive/refs/tags/v${vault_release}.tar.gz" \ + /tmp/vault.tar.gz +RUN tar -xvf /tmp/vault.tar.gz -C /tmp && \ + rm -rf /tmp/vault.tar.gz +# Build from source +WORKDIR /tmp/vault-${vault_release} +RUN make -j$(nproc) +# Create debian package +RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --nodoc + +#################### +# 24-pgroonga.yml +#################### +FROM ccache as groonga +# Download and extract +ARG groonga_release +ARG groonga_release_checksum +ADD --checksum=${groonga_release_checksum} \ + "https://packages.groonga.org/source/groonga/groonga-${groonga_release}.tar.gz" \ + /tmp/groonga.tar.gz +RUN tar -xvf /tmp/groonga.tar.gz -C /tmp && \ + rm -rf /tmp/groonga.tar.gz +# Install build dependencies +RUN apt-get update && apt-get install -y --no-install-recommends \ + zlib1g-dev \ + liblz4-dev \ + libzstd-dev \ + libmsgpack-dev \ + libzmq3-dev \ + libevent-dev \ + libmecab-dev \ + rapidjson-dev \ + pkg-config \ + && rm -rf /var/lib/apt/lists/* +# Build from source +WORKDIR /tmp/groonga-${groonga_release} +RUN ./configure +RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ + make -j$(nproc) +# Create debian package +RUN checkinstall -D --install=yes --fstrans=no --backup=no --pakdir=/tmp --requires=zlib1g,liblz4-1,libzstd1,libmsgpackc2,libzmq5,libevent-2.1-7,libmecab2 --nodoc + +FROM groonga as pgroonga-source +# Download and extract +ARG pgroonga_release +ARG pgroonga_release_checksum +ADD --checksum=${pgroonga_release_checksum} \ + "https://packages.groonga.org/source/pgroonga/pgroonga-${pgroonga_release}.tar.gz" \ + /tmp/pgroonga.tar.gz +RUN tar -xvf /tmp/pgroonga.tar.gz -C /tmp && \ + rm -rf /tmp/pgroonga.tar.gz +# Build from source +WORKDIR /tmp/pgroonga-${pgroonga_release} +RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ + make -j$(nproc) +# Create debian package +RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --requires=mecab-naist-jdic --nodoc + +FROM scratch as pgroonga-deb +COPY --from=pgroonga-source /tmp/*.deb /tmp/ + +#################### +# 25-wrappers.yml +#################### +FROM rust-toolchain as wrappers-source +# Required by wrappers 0.2.0 +RUN cargo install cargo-pgrx --version 0.11.0 --locked +RUN cargo pgrx init --pg${postgresql_major} $(which pg_config) +# Download and extract +ARG wrappers_release +ARG wrappers_release_checksum +ADD --checksum=${wrappers_release_checksum} \ + "https://github.com/tealbase/wrappers/archive/refs/tags/v${wrappers_release}.tar.gz" \ + /tmp/wrappers.tar.gz +RUN tar -xvf /tmp/wrappers.tar.gz -C /tmp && \ + rm -rf /tmp/wrappers.tar.gz +WORKDIR /tmp/wrappers-${wrappers_release}/wrappers +RUN cargo pgrx package --no-default-features --features pg${postgresql_major},all_fdws + +ENV extension_dir=target/release/wrappers-pg${postgresql_major}/usr/local/share/postgresql/extension + +# copy schema file to version update sql files +# Note: some version numbers may be skipped +RUN cp ${extension_dir}/wrappers--${wrappers_release}.sql ${extension_dir}/wrappers--0.1.6--${wrappers_release}.sql +RUN cp ${extension_dir}/wrappers--${wrappers_release}.sql ${extension_dir}/wrappers--0.1.7--${wrappers_release}.sql +RUN cp ${extension_dir}/wrappers--${wrappers_release}.sql ${extension_dir}/wrappers--0.1.8--${wrappers_release}.sql +RUN cp ${extension_dir}/wrappers--${wrappers_release}.sql ${extension_dir}/wrappers--0.1.9--${wrappers_release}.sql +RUN cp ${extension_dir}/wrappers--${wrappers_release}.sql ${extension_dir}/wrappers--0.1.10--${wrappers_release}.sql +RUN cp ${extension_dir}/wrappers--${wrappers_release}.sql ${extension_dir}/wrappers--0.1.11--${wrappers_release}.sql +RUN cp ${extension_dir}/wrappers--${wrappers_release}.sql ${extension_dir}/wrappers--0.1.14--${wrappers_release}.sql +RUN cp ${extension_dir}/wrappers--${wrappers_release}.sql ${extension_dir}/wrappers--0.1.15--${wrappers_release}.sql +RUN cp ${extension_dir}/wrappers--${wrappers_release}.sql ${extension_dir}/wrappers--0.1.16--${wrappers_release}.sql +RUN cp ${extension_dir}/wrappers--${wrappers_release}.sql ${extension_dir}/wrappers--0.1.17--${wrappers_release}.sql +RUN cp ${extension_dir}/wrappers--${wrappers_release}.sql ${extension_dir}/wrappers--0.1.18--${wrappers_release}.sql + +# Create installable package +RUN mkdir archive +RUN cp target/release/wrappers-pg${postgresql_major}/usr/local/share/postgresql/extension/wrappers* archive +RUN cp target/release/wrappers-pg${postgresql_major}/usr/local/lib/postgresql/wrappers-${wrappers_release}.so archive + +# name of the package directory before packaging +ENV package_dir=wrappers-v${wrappers_release}-pg${postgresql_major}-${TARGETARCH}-linux-gnu + +## Copy files into directory structure +RUN mkdir -p ${package_dir}/usr/lib/postgresql/lib +RUN mkdir -p ${package_dir}/var/lib/postgresql/extension +RUN cp archive/*.so ${package_dir}/usr/lib/postgresql/lib +RUN cp archive/*.control ${package_dir}/var/lib/postgresql/extension +RUN cp archive/*.sql ${package_dir}/var/lib/postgresql/extension + +# symlinks to Copy files into directory structure +RUN mkdir -p ${package_dir}/usr/local/lib/postgresql +WORKDIR ${package_dir}/usr/local/lib/postgresql +RUN cp -s ../../../lib/postgresql/lib/*.so . +WORKDIR ../../../../.. + +RUN mkdir -p ${package_dir}/usr/local/share/postgresql/extension +WORKDIR ${package_dir}/usr/local/share/postgresql/extension + +RUN cp -s ../../../../../var/lib/postgresql/extension/wrappers.control . +RUN cp -s ../../../../../var/lib/postgresql/extension/wrappers*.sql . +WORKDIR ../../../../../.. + +RUN mkdir -p ${package_dir}/DEBIAN +RUN touch ${package_dir}/DEBIAN/control +RUN echo 'Package: wrappers' >> ${package_dir}/DEBIAN/control +RUN echo 'Version:' ${wrappers_release} >> ${package_dir}/DEBIAN/control +RUN echo "Architecture: ${TARGETARCH}" >> ${package_dir}/DEBIAN/control +RUN echo 'Maintainer: tealbase' >> ${package_dir}/DEBIAN/control +RUN echo 'Description: A PostgreSQL extension' >> ${package_dir}/DEBIAN/control + +# Create deb package +RUN chown -R root:root ${package_dir} +RUN chmod -R 00755 ${package_dir} +RUN dpkg-deb --build --root-owner-group ${package_dir} +RUN cp ./*.deb /tmp/wrappers.deb + +#################### +# 26-hypopg.yml +#################### +FROM ccache as hypopg-source +# Download and extract +ARG hypopg_release +ARG hypopg_release_checksum +ADD --checksum=${hypopg_release_checksum} \ + "https://github.com/HypoPG/hypopg/archive/refs/tags/${hypopg_release}.tar.gz" \ + /tmp/hypopg.tar.gz +RUN tar -xvf /tmp/hypopg.tar.gz -C /tmp && \ + rm -rf /tmp/hypopg.tar.gz +# Build from source +WORKDIR /tmp/hypopg-${hypopg_release} +RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ + make -j$(nproc) +# Create debian package +RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --nodoc + +#################### + # 27-pg_repack.yml + #################### + FROM ccache as pg_repack-source + ARG pg_repack_release + ARG pg_repack_release_checksum + ADD --checksum=${pg_repack_release_checksum} \ + "https://github.com/reorg/pg_repack/archive/refs/tags/ver_${pg_repack_release}.tar.gz" \ + /tmp/pg_repack.tar.gz + RUN tar -xvf /tmp/pg_repack.tar.gz -C /tmp && \ + rm -rf /tmp/pg_repack.tar.gz + # Install build dependencies + RUN apt-get update && apt-get install -y --no-install-recommends \ + liblz4-dev \ + libz-dev \ + libzstd-dev \ + libreadline-dev \ + && rm -rf /var/lib/apt/lists/* + # Build from source + WORKDIR /tmp/pg_repack-ver_${pg_repack_release} + ENV USE_PGXS=1 + RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ + make -j$(nproc) + # Create debian package + RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --pkgversion=${pg_repack_release} --nodoc + +#################### +# 28-pgvector.yml +#################### +FROM ccache as pgvector-source +ARG pgvector_release +ARG pgvector_release_checksum +ADD --checksum=${pgvector_release_checksum} \ + "https://github.com/pgvector/pgvector/archive/refs/tags/v${pgvector_release}.tar.gz" \ + /tmp/pgvector.tar.gz +RUN tar -xvf /tmp/pgvector.tar.gz -C /tmp && \ + rm -rf /tmp/pgvector.tar.gz +# Build from source +WORKDIR /tmp/pgvector-${pgvector_release} +# error: the clang compiler does not support '-march=native' +RUN sed -i -e "s|-march=native||g" Makefile +RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ + make -j$(nproc) +# Create debian package +RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --nodoc + +#################### +# 29-pg_tle.yml +#################### +FROM ccache as pg_tle-source +ARG pg_tle_release +ARG pg_tle_release_checksum +ADD --checksum=${pg_tle_release_checksum} \ + "https://github.com/aws/pg_tle/archive/refs/tags/v${pg_tle_release}.tar.gz" \ + /tmp/pg_tle.tar.gz +RUN tar -xvf /tmp/pg_tle.tar.gz -C /tmp && \ + rm -rf /tmp/pg_tle.tar.gz +RUN apt-get update && apt-get install -y --no-install-recommends \ + flex \ + libkrb5-dev \ + && rm -rf /var/lib/apt/lists/* +# Build from source +WORKDIR /tmp/pg_tle-${pg_tle_release} +RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ + make -j$(nproc) +# Create debian package +RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --nodoc + +###################### +# 30-index_advisor.yml +###################### +FROM ccache as index_advisor +ARG index_advisor_release +ARG index_advisor_release_checksum +ADD --checksum=${index_advisor_release_checksum} \ + "https://github.com/olirice/index_advisor/archive/refs/tags/v${index_advisor_release}.tar.gz" \ + /tmp/index_advisor.tar.gz +RUN tar -xvf /tmp/index_advisor.tar.gz -C /tmp && \ + rm -rf /tmp/index_advisor.tar.gz +# Build from source +WORKDIR /tmp/index_advisor-${index_advisor_release} +RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ + make -j$(nproc) +# Create debian package +RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --nodoc + +#################### +# internal/supautils.yml +#################### +FROM ccache as supautils-source +ARG supautils_release +ARG supautils_release_tar_checksum +ADD --checksum=${supautils_release_tar_checksum} \ + "https://github.com/tealbase/supautils/archive/refs/tags/v${supautils_release}.tar.gz" \ + /tmp/supautils.tar.gz +RUN tar -xvf /tmp/supautils.tar.gz -C /tmp && \ + rm -rf /tmp/supautils.tar.gz +# Install build dependencies +RUN apt-get update && apt-get install -y --no-install-recommends \ + libicu-dev \ + && rm -rf /var/lib/apt/lists/* +# Build from source +WORKDIR /tmp/supautils-${supautils_release} +RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ + make -j$(nproc) +# Create debian package +RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --nodoc + +#################### +# setup-wal-g.yml +#################### +FROM base as walg +ARG wal_g_release +# ADD "https://github.com/wal-g/wal-g/releases/download/v${wal_g_release}/wal-g-pg-ubuntu-20.04-${TARGETARCH}.tar.gz" /tmp/wal-g.tar.gz +RUN arch=$([ "$TARGETARCH" = "arm64" ] && echo "aarch64" || echo "$TARGETARCH") && \ + apt-get update && apt-get install -y --no-install-recommends curl && \ + curl -kL "https://github.com/wal-g/wal-g/releases/download/v${wal_g_release}/wal-g-pg-ubuntu-20.04-${arch}.tar.gz" -o /tmp/wal-g.tar.gz && \ + tar -xvf /tmp/wal-g.tar.gz -C /tmp && \ + rm -rf /tmp/wal-g.tar.gz && \ + mv /tmp/wal-g-pg-ubuntu*20.04-$arch /tmp/wal-g + +#################### +# Collect extension packages +#################### +FROM scratch as extensions +COPY --from=postgis-source /tmp/*.deb /tmp/ +COPY --from=pgrouting-source /tmp/*.deb /tmp/ +COPY --from=pgtap-source /tmp/*.deb /tmp/ +COPY --from=pg_cron-source /tmp/*.deb /tmp/ +COPY --from=pgaudit-source /tmp/*.deb /tmp/ +COPY --from=pgjwt-source /tmp/*.deb /tmp/ +COPY --from=pgsql-http-source /tmp/*.deb /tmp/ +COPY --from=plpgsql_check-source /tmp/*.deb /tmp/ +COPY --from=pg-safeupdate-source /tmp/*.deb /tmp/ +COPY --from=timescaledb-source /tmp/*.deb /tmp/ +COPY --from=wal2json-source /tmp/*.deb /tmp/ +# COPY --from=pljava /tmp/*.deb /tmp/ +COPY --from=plv8 /tmp/*.deb /tmp/ +COPY --from=pg_plan_filter-source /tmp/*.deb /tmp/ +COPY --from=pg_net-source /tmp/*.deb /tmp/ +COPY --from=rum-source /tmp/*.deb /tmp/ +COPY --from=pgsodium-source /tmp/*.deb /tmp/ +COPY --from=pg_hashids-source /tmp/*.deb /tmp/ +COPY --from=pg_graphql-source /tmp/*.deb /tmp/ +COPY --from=pg_stat_monitor-source /tmp/*.deb /tmp/ +COPY --from=pg_jsonschema-source /tmp/*.deb /tmp/ +COPY --from=vault-source /tmp/*.deb /tmp/ +COPY --from=pgroonga-source /tmp/*.deb /tmp/ +COPY --from=wrappers-source /tmp/*.deb /tmp/ +COPY --from=hypopg-source /tmp/*.deb /tmp/ +COPY --from=pg_repack-source /tmp/*.deb /tmp/ +COPY --from=pgvector-source /tmp/*.deb /tmp/ +COPY --from=pg_tle-source /tmp/*.deb /tmp/ +COPY --from=index_advisor /tmp/*.deb /tmp/ +COPY --from=supautils-source /tmp/*.deb /tmp/ + +#################### +# Build final image +#################### +FROM base as production + +# Setup extensions +COPY --from=extensions /tmp /tmp +COPY --from=walg /tmp/wal-g /usr/local/bin/ + +ENV DEBIAN_FRONTEND=noninteractive +RUN apt-get update && apt-get install -y --no-install-recommends \ + /tmp/*.deb \ + # Needed for anything using libcurl + # https://github.com/tealbase/postgres/issues/573 + ca-certificates \ + && rm -rf /var/lib/apt/lists/* /tmp/* + +# Initialise configs +COPY --chown=postgres:postgres ansible/files/postgresql_config/postgresql.conf.j2 /etc/postgresql/postgresql.conf +COPY --chown=postgres:postgres ansible/files/postgresql_config/pg_hba.conf.j2 /etc/postgresql/pg_hba.conf +COPY --chown=postgres:postgres ansible/files/postgresql_config/pg_ident.conf.j2 /etc/postgresql/pg_ident.conf +COPY --chown=postgres:postgres ansible/files/postgresql_config/postgresql-stdout-log.conf /etc/postgresql/logging.conf +COPY --chown=postgres:postgres ansible/files/postgresql_config/supautils.conf.j2 /etc/postgresql-custom/supautils.conf +COPY --chown=postgres:postgres ansible/files/postgresql_extension_custom_scripts /etc/postgresql-custom/extension-custom-scripts +COPY --chown=postgres:postgres ansible/files/pgsodium_getkey_urandom.sh.j2 /usr/lib/postgresql/${postgresql_major}/bin/pgsodium_getkey.sh +COPY --chown=postgres:postgres ansible/files/postgresql_config/custom_read_replica.conf.j2 /etc/postgresql-custom/read-replica.conf +COPY --chown=postgres:postgres ansible/files/postgresql_config/custom_walg.conf.j2 /etc/postgresql-custom/wal-g.conf +COPY --chown=postgres:postgres ansible/files/walg_helper_scripts/wal_fetch.sh /home/postgres/wal_fetch.sh +COPY ansible/files/walg_helper_scripts/wal_change_ownership.sh /root/wal_change_ownership.sh + +RUN sed -i \ + -e "s|#unix_socket_directories = '/tmp'|unix_socket_directories = '/var/run/postgresql'|g" \ + -e "s|#session_preload_libraries = ''|session_preload_libraries = 'supautils'|g" \ + -e "s|shared_preload_libraries = '\(.*\)'|shared_preload_libraries = '\1, orioledb'|g" \ + -e "s|#max_wal_size = 1GB|max_wal_size = 8GB|g" \ + -e "s|#include = '/etc/postgresql-custom/supautils.conf'|include = '/etc/postgresql-custom/supautils.conf'|g" \ + -e "s|#include = '/etc/postgresql-custom/wal-g.conf'|include = '/etc/postgresql-custom/wal-g.conf'|g" /etc/postgresql/postgresql.conf && \ + echo "pljava.libjvm_location = '/usr/lib/jvm/java-11-openjdk-${TARGETARCH}/lib/server/libjvm.so'" >> /etc/postgresql/postgresql.conf && \ + echo "pgsodium.getkey_script= '/usr/lib/postgresql/${postgresql_major}/bin/pgsodium_getkey.sh'" >> /etc/postgresql/postgresql.conf && \ + echo "orioledb.main_buffers = 1GB" >> /etc/postgresql/postgresql.conf && \ + echo "orioledb.undo_buffers = 256MB" >> /etc/postgresql/postgresql.conf && \ + useradd --create-home --shell /bin/bash wal-g -G postgres && \ + mkdir -p /etc/postgresql-custom && \ + chown postgres:postgres /etc/postgresql-custom + +# Include schema migrations +COPY migrations/db /docker-entrypoint-initdb.d/ +COPY ansible/files/pgbouncer_config/pgbouncer_auth_schema.sql /docker-entrypoint-initdb.d/init-scripts/00-schema.sql +COPY ansible/files/stat_extension.sql /docker-entrypoint-initdb.d/migrations/00-extension.sql + +# Patch upstream entrypoint script +RUN sed -i \ + -e "s|su-exec|gosu|g" \ + -e "s|PGHOST= PGHOSTADDR=|PGHOST=\$POSTGRES_HOST|g" \ + /usr/local/bin/docker-entrypoint.sh && \ + mv /usr/local/bin/docker-entrypoint.sh /usr/local/bin/orioledb-entrypoint.sh + +COPY docker/orioledb/entrypoint.sh /usr/local/bin/docker-entrypoint.sh + +HEALTHCHECK --interval=2s --timeout=2s --retries=10 CMD pg_isready -U postgres -h localhost +STOPSIGNAL SIGINT +EXPOSE 5432 + +ENV POSTGRES_HOST=/var/run/postgresql +CMD ["postgres", "-D", "/etc/postgresql"] diff --git a/docker/orioledb/entrypoint.sh b/docker/orioledb/entrypoint.sh new file mode 100755 index 0000000..b9a460b --- /dev/null +++ b/docker/orioledb/entrypoint.sh @@ -0,0 +1,36 @@ +#!/usr/bin/env bash +set -eou pipefail + +PG_CONF=/etc/postgresql/postgresql.conf + +if [ "${S3_ENABLED:-}" == "true" ]; then + echo "Enabling OrioleDB S3 Backend..." + + echo " +archive_mode = on +archive_library = 'orioledb' +max_worker_processes = 50 # should fit orioledb.s3_num_workers as long as other workers +orioledb.s3_num_workers = 20 # should be enough for comfortable work +orioledb.s3_mode = true +orioledb.s3_host = '$S3_HOST' # replace with your bucket URL, accelerated buckets are recommended +orioledb.s3_region = '$S3_REGION' # replace with your S3 region +orioledb.s3_accesskey = '$S3_ACCESS_KEY' # replace with your S3 key +orioledb.s3_secretkey = '$S3_SECRET_KEY' # replace with your S3 secret key +" >> "$PG_CONF" +else + echo "Disabling OrioleDB S3 Backend..." + + sed -i \ + -e "/^archive_mode = on/d" \ + -e "/^archive_library = 'orioledb'/d" \ + -e "/^max_worker_processes = 50/d" \ + -e "/^orioledb.s3_num_workers = /d" \ + -e "/^orioledb.s3_mode = /d" \ + -e "/^orioledb.s3_host = /d" \ + -e "/^orioledb.s3_region = /d" \ + -e "/^orioledb.s3_accesskey = /d" \ + -e "/^orioledb.s3_secretkey = /d" \ + "$PG_CONF" +fi + +orioledb-entrypoint.sh "$@" diff --git a/ebssurrogate/files/unit-tests/test-extensions.sql b/ebssurrogate/files/unit-tests/test-extensions.sql deleted file mode 100644 index 60c6507..0000000 --- a/ebssurrogate/files/unit-tests/test-extensions.sql +++ /dev/null @@ -1,20 +0,0 @@ -CREATE OR REPLACE FUNCTION install_available_extensions_and_test() RETURNS boolean AS $$ -DECLARE extension_name TEXT; -allowed_extentions TEXT[] := string_to_array(current_setting('supautils.privileged_extensions'), ','); -BEGIN - FOREACH extension_name IN ARRAY allowed_extentions - LOOP - SELECT trim(extension_name) INTO extension_name; - /* skip below extensions check for now */ - CONTINUE WHEN extension_name = 'pgsodium'; - CONTINUE WHEN extension_name = 'plpgsql' OR extension_name = 'plpgsql_check' OR extension_name = 'pgtap'; - CONTINUE WHEN extension_name = 'tealbase_vault' OR extension_name = 'wrappers'; - RAISE notice 'START TEST FOR: %', extension_name; - EXECUTE format('DROP EXTENSION IF EXISTS %s CASCADE', quote_ident(extension_name)); - EXECUTE format('CREATE EXTENSION %s CASCADE', quote_ident(extension_name)); - RAISE notice 'END TEST FOR: %', extension_name; - END LOOP; - RAISE notice 'EXTENSION TESTS COMPLETED..'; - return true; -END; -$$ LANGUAGE plpgsql; diff --git a/ebssurrogate/files/unit-tests/unit-test-01.sql b/ebssurrogate/files/unit-tests/unit-test-01.sql index 3b28abe..3f6ecdd 100644 --- a/ebssurrogate/files/unit-tests/unit-test-01.sql +++ b/ebssurrogate/files/unit-tests/unit-test-01.sql @@ -1,4 +1,5 @@ BEGIN; +CREATE EXTENSION IF NOT EXISTS pgtap; SELECT plan(8); -- Check installed extensions @@ -11,7 +12,8 @@ SELECT extensions_are( 'pg_graphql', 'pgcrypto', 'pgjwt', - 'uuid-ossp' + 'uuid-ossp', + 'tealbase_vault' ] ); diff --git a/ebssurrogate/files/unit-tests/verify-extensions.sql b/ebssurrogate/files/unit-tests/verify-extensions.sql deleted file mode 100644 index 81b2c7f..0000000 --- a/ebssurrogate/files/unit-tests/verify-extensions.sql +++ /dev/null @@ -1,12 +0,0 @@ -BEGIN; -SELECT plan(3); -SELECT has_function( - 'install_available_extensions_and_test' -); -SELECT function_returns( - 'install_available_extensions_and_test', - 'boolean' -); -SELECT ok(install_available_extensions_and_test(),'extension test'); -SELECT * FROM finish(); -ROLLBACK; diff --git a/ebssurrogate/scripts/chroot-bootstrap-nix.sh b/ebssurrogate/scripts/chroot-bootstrap-nix.sh new file mode 100755 index 0000000..e349556 --- /dev/null +++ b/ebssurrogate/scripts/chroot-bootstrap-nix.sh @@ -0,0 +1,219 @@ +#!/usr/bin/env bash +# +# This script runs inside chrooted environment. It installs grub and its +# Configuration file. +# + +set -o errexit +set -o pipefail +set -o xtrace + +export DEBIAN_FRONTEND=noninteractive + +export APT_OPTIONS="-oAPT::Install-Recommends=false \ + -oAPT::Install-Suggests=false \ + -oAcquire::Languages=none" + +if [ $(dpkg --print-architecture) = "amd64" ]; +then + ARCH="amd64"; +else + ARCH="arm64"; +fi + + + +function update_install_packages { + source /etc/os-release + + # Update APT with new sources + cat /etc/apt/sources.list + apt-get $APT_OPTIONS update && apt-get $APT_OPTIONS --yes dist-upgrade + + # Do not configure grub during package install + if [ "${ARCH}" = "amd64" ]; then + echo 'grub-pc grub-pc/install_devices_empty select true' | debconf-set-selections + echo 'grub-pc grub-pc/install_devices select' | debconf-set-selections + # Install various packages needed for a booting system + apt-get install -y \ + linux-aws \ + grub-pc \ + e2fsprogs + else + apt-get install -y e2fsprogs + fi + # Install standard packages + apt-get install -y \ + sudo \ + wget \ + cloud-init \ + acpid \ + ec2-hibinit-agent \ + ec2-instance-connect \ + hibagent \ + ncurses-term \ + ssh-import-id \ + + # apt upgrade + apt-get upgrade -y + + # Install OpenSSH and other packages + sudo add-apt-repository universe + apt-get update + apt-get install -y --no-install-recommends \ + openssh-server \ + git \ + ufw \ + cron \ + logrotate \ + fail2ban \ + locales \ + at \ + less \ + python3-systemd + + if [ "${ARCH}" = "arm64" ]; then + apt-get $APT_OPTIONS --yes install linux-aws initramfs-tools dosfstools + fi +} + +function setup_locale { +cat << EOF >> /etc/locale.gen +en_US.UTF-8 UTF-8 +EOF + +cat << EOF > /etc/default/locale +LANG="C.UTF-8" +LC_CTYPE="C.UTF-8" +EOF + locale-gen en_US.UTF-8 +} + +function setup_postgesql_env { + # Create the directory if it doesn't exist + sudo mkdir -p /etc/environment.d + + # Define the contents of the PostgreSQL environment file + cat </dev/null +LOCALE_ARCHIVE=/usr/lib/locale/locale-archive +LANG="en_US.UTF-8" +LANGUAGE="en_US.UTF-8" +LC_ALL="en_US.UTF-8" +LC_CTYPE="en_US.UTF-8" +EOF +} + +function install_packages_for_build { + apt-get install -y --no-install-recommends linux-libc-dev \ + acl \ + magic-wormhole sysstat \ + build-essential libreadline-dev zlib1g-dev flex bison libxml2-dev libxslt-dev libssl-dev libsystemd-dev libpq-dev libxml2-utils uuid-dev xsltproc ssl-cert \ + gcc-10 g++-10 \ + libgeos-dev libproj-dev libgdal-dev libjson-c-dev libboost-all-dev libcgal-dev libmpfr-dev libgmp-dev cmake \ + libkrb5-dev \ + maven default-jre default-jdk \ + curl gpp apt-transport-https cmake libc++-dev libc++abi-dev libc++1 libglib2.0-dev libtinfo5 libc++abi1 ninja-build python \ + liblzo2-dev + + source /etc/os-release + + apt-get install -y --no-install-recommends llvm-11-dev clang-11 + # Mark llvm as manual to prevent auto removal + apt-mark manual libllvm11:arm64 +} + +function setup_apparmor { + apt-get install -y apparmor apparmor-utils auditd + + # Copy apparmor profiles + cp -rv /tmp/apparmor_profiles/* /etc/apparmor.d/ +} + +function setup_grub_conf_arm64 { +cat << EOF > /etc/default/grub +GRUB_DEFAULT=0 +GRUB_TIMEOUT=0 +GRUB_TIMEOUT_STYLE="hidden" +GRUB_DISTRIBUTOR="tealbase postgresql" +GRUB_CMDLINE_LINUX_DEFAULT="nomodeset console=tty1 console=ttyS0 ipv6.disable=0" +EOF +} + +# Install GRUB +function install_configure_grub { + if [ "${ARCH}" = "arm64" ]; then + apt-get $APT_OPTIONS --yes install cloud-guest-utils fdisk grub-efi-arm64 efibootmgr + setup_grub_conf_arm64 + rm -rf /etc/grub.d/30_os-prober + sleep 1 + fi + grub-install /dev/xvdf && update-grub +} + +# skip fsck for first boot +function disable_fsck { + touch /fastboot +} + +# Don't request hostname during boot but set hostname +function setup_hostname { + sed -i 's/gethostname()/ubuntu /g' /etc/dhcp/dhclient.conf + sed -i 's/host-name,//g' /etc/dhcp/dhclient.conf + echo "ubuntu" > /etc/hostname + chmod 644 /etc/hostname +} + +# Set options for the default interface +function setup_eth0_interface { +cat << EOF > /etc/netplan/eth0.yaml +network: + version: 2 + ethernets: + eth0: + dhcp4: true +EOF +} + +function disable_sshd_passwd_auth { + sed -i -E -e 's/^#?\s*PasswordAuthentication\s+(yes|no)\s*$/PasswordAuthentication no/g' \ + -e 's/^#?\s*ChallengeResponseAuthentication\s+(yes|no)\s*$/ChallengeResponseAuthentication no/g' \ + /etc/ssh/sshd_config +} + +function create_admin_account { + groupadd admin +} + +#Set default target as multi-user +function set_default_target { + rm -f /etc/systemd/system/default.target + ln -s /lib/systemd/system/multi-user.target /etc/systemd/system/default.target +} + +# Setup ccache +function setup_ccache { + apt-get install ccache -y + mkdir -p /tmp/ccache + export PATH=/usr/lib/ccache:$PATH + echo "PATH=$PATH" >> /etc/environment +} + +# Clear apt caches +function cleanup_cache { + apt-get clean +} + +update_install_packages +setup_locale +setup_postgesql_env +#install_packages_for_build +install_configure_grub +setup_apparmor +setup_hostname +create_admin_account +set_default_target +setup_eth0_interface +disable_sshd_passwd_auth +disable_fsck +#setup_ccache +cleanup_cache diff --git a/ebssurrogate/scripts/chroot-bootstrap.sh b/ebssurrogate/scripts/chroot-bootstrap.sh index c679714..8404bbc 100755 --- a/ebssurrogate/scripts/chroot-bootstrap.sh +++ b/ebssurrogate/scripts/chroot-bootstrap.sh @@ -24,6 +24,8 @@ fi function update_install_packages { + source /etc/os-release + # Update APT with new sources cat /etc/apt/sources.list apt-get $APT_OPTIONS update && apt-get $APT_OPTIONS --yes dist-upgrade @@ -43,6 +45,7 @@ function update_install_packages { # Install standard packages apt-get install -y \ sudo \ + wget \ cloud-init \ acpid \ ec2-hibinit-agent \ @@ -75,6 +78,10 @@ function update_install_packages { } function setup_locale { +cat << EOF >> /etc/locale.gen +en_US.UTF-8 UTF-8 +EOF + cat << EOF > /etc/default/locale LANG="C.UTF-8" LC_CTYPE="C.UTF-8" @@ -82,17 +89,11 @@ EOF localedef -i en_US -f UTF-8 en_US.UTF-8 } -# Disable IPV6 for ufw -function disable_ufw_ipv6 { - sed -i 's/IPV6=yes/IPV6=no/g' /etc/default/ufw -} - function install_packages_for_build { apt-get install -y --no-install-recommends linux-libc-dev \ acl \ magic-wormhole sysstat \ build-essential libreadline-dev zlib1g-dev flex bison libxml2-dev libxslt-dev libssl-dev libsystemd-dev libpq-dev libxml2-utils uuid-dev xsltproc ssl-cert \ - llvm-11-dev clang-11 \ gcc-10 g++-10 \ libgeos-dev libproj-dev libgdal-dev libjson-c-dev libboost-all-dev libcgal-dev libmpfr-dev libgmp-dev cmake \ libkrb5-dev \ @@ -100,6 +101,9 @@ function install_packages_for_build { curl gpp apt-transport-https cmake libc++-dev libc++abi-dev libc++1 libglib2.0-dev libtinfo5 libc++abi1 ninja-build python \ liblzo2-dev + source /etc/os-release + + apt-get install -y --no-install-recommends llvm-11-dev clang-11 # Mark llvm as manual to prevent auto removal apt-mark manual libllvm11:arm64 } @@ -117,26 +121,14 @@ GRUB_DEFAULT=0 GRUB_TIMEOUT=0 GRUB_TIMEOUT_STYLE="hidden" GRUB_DISTRIBUTOR="tealbase postgresql" -GRUB_CMDLINE_LINUX_DEFAULT="nomodeset console=tty1 console=ttyS0 ipv6.disable=1" -EOF -} - -function setup_grub_conf_amd64 { - mkdir -p /etc/default/grub.d - -cat << EOF > /etc/default/grub.d/50-aws-settings.cfg -GRUB_RECORDFAIL_TIMEOUT=0 -GRUB_TIMEOUT=0 -GRUB_CMDLINE_LINUX_DEFAULT=" root=/dev/nvme0n1p2 rootfstype=ext4 rw noatime,nodiratime,discard console=tty1 console=ttyS0 ip=dhcp tsc=reliable net.ifnames=0 quiet module_blacklist=psmouse,input_leds,autofs4 ipv6.disable=1 nvme_core.io_timeout=4294967295 systemd.hostname=ubuntu ipv6.disable=1" -GRUB_TERMINAL=console -GRUB_DISABLE_LINUX_UUID=true +GRUB_CMDLINE_LINUX_DEFAULT="nomodeset console=tty1 console=ttyS0 ipv6.disable=0" EOF } # Install GRUB function install_configure_grub { if [ "${ARCH}" = "arm64" ]; then - apt-get $APT_OPTIONS --yes install cloud-guest-utils fdisk grub-efi-arm64 + apt-get $APT_OPTIONS --yes install cloud-guest-utils fdisk grub-efi-arm64 efibootmgr setup_grub_conf_arm64 rm -rf /etc/grub.d/30_os-prober sleep 1 @@ -199,15 +191,14 @@ function cleanup_cache { update_install_packages setup_locale -install_packages_for_build +#install_packages_for_build install_configure_grub setup_apparmor setup_hostname create_admin_account set_default_target setup_eth0_interface -disable_ufw_ipv6 disable_sshd_passwd_auth disable_fsck -setup_ccache +#setup_ccache cleanup_cache diff --git a/ebssurrogate/scripts/surrogate-bootstrap-nix.sh b/ebssurrogate/scripts/surrogate-bootstrap-nix.sh new file mode 100755 index 0000000..4948277 --- /dev/null +++ b/ebssurrogate/scripts/surrogate-bootstrap-nix.sh @@ -0,0 +1,328 @@ +#!/usr/bin/env bash +# +# This script creates filesystem and setups up chrooted +# enviroment for further processing. It also runs +# ansible playbook and finally does system cleanup. +# +# Adapted from: https://github.com/jen20/packer-ubuntu-zfs + +set -o errexit +set -o pipefail +set -o xtrace + +if [ $(dpkg --print-architecture) = "amd64" ]; +then + ARCH="amd64"; +else + ARCH="arm64"; +fi + +function waitfor_boot_finished { + export DEBIAN_FRONTEND=noninteractive + + echo "args: ${ARGS}" + # Wait for cloudinit on the surrogate to complete before making progress + while [[ ! -f /var/lib/cloud/instance/boot-finished ]]; do + echo 'Waiting for cloud-init...' + sleep 1 + done +} + +function install_packages { + # Setup Ansible on host VM + apt-get update && sudo apt-get install software-properties-common -y + add-apt-repository --yes --update ppa:ansible/ansible && sudo apt-get install ansible -y + ansible-galaxy collection install community.general + + # Update apt and install required packages + apt-get update + apt-get install -y \ + gdisk \ + e2fsprogs \ + debootstrap \ + nvme-cli +} + +# Partition the new root EBS volume +function create_partition_table { + + if [ "${ARCH}" = "arm64" ]; then + parted --script /dev/xvdf \ + mklabel gpt \ + mkpart UEFI 1MiB 100MiB \ + mkpart ROOT 100MiB 100% + set 1 esp on \ + set 1 boot on + parted --script /dev/xvdf print + else + sgdisk -Zg -n1:0:4095 -t1:EF02 -c1:GRUB -n2:0:0 -t2:8300 -c2:EXT4 /dev/xvdf + fi + + sleep 2 +} + +function device_partition_mappings { + # NVMe EBS launch device mappings (symlinks): /dev/nvme*n* to /dev/xvd* + declare -A blkdev_mappings + for blkdev in $(nvme list | awk '/^\/dev/ { print $1 }'); do # /dev/nvme*n* + # Mapping info from disk headers + header=$(nvme id-ctrl --raw-binary "${blkdev}" | cut -c3073-3104 | tr -s ' ' | sed 's/ $//g' | sed 's!/dev/!!') + mapping="/dev/${header%%[0-9]}" # normalize sda1 => sda + + # Create /dev/xvd* device symlink + if [[ ! -z "$mapping" ]] && [[ -b "${blkdev}" ]] && [[ ! -L "${mapping}" ]]; then + ln -s "$blkdev" "$mapping" + + blkdev_mappings["$blkdev"]="$mapping" + fi + done + + create_partition_table + + # NVMe EBS launch device partition mappings (symlinks): /dev/nvme*n*p* to /dev/xvd*[0-9]+ + declare -A partdev_mappings + for blkdev in "${!blkdev_mappings[@]}"; do # /dev/nvme*n* + mapping="${blkdev_mappings[$blkdev]}" + + # Create /dev/xvd*[0-9]+ partition device symlink + for partdev in "${blkdev}"p*; do + partnum=${partdev##*p} + if [[ ! -L "${mapping}${partnum}" ]]; then + ln -s "${blkdev}p${partnum}" "${mapping}${partnum}" + + partdev_mappings["${blkdev}p${partnum}"]="${mapping}${partnum}" + fi + done + done +} + + +#Download and install latest e2fsprogs for fast_commit feature,if required. +function format_and_mount_rootfs { + mkfs.ext4 -m0.1 /dev/xvdf2 + + mount -o noatime,nodiratime /dev/xvdf2 /mnt + if [ "${ARCH}" = "arm64" ]; then + mkfs.fat -F32 /dev/xvdf1 + mkdir -p /mnt/boot/efi + sleep 2 + mount /dev/xvdf1 /mnt/boot/efi + fi + + mkfs.ext4 /dev/xvdh + + # Explicitly reserving 100MiB worth of blocks for the data volume + RESERVED_DATA_VOLUME_BLOCK_COUNT=$((100 * 1024 * 1024 / 4096)) + tune2fs -r $RESERVED_DATA_VOLUME_BLOCK_COUNT /dev/xvdh + + mkdir -p /mnt/data + mount -o defaults,discard /dev/xvdh /mnt/data +} + +function create_swapfile { + fallocate -l 1G /mnt/swapfile + chmod 600 /mnt/swapfile + mkswap /mnt/swapfile +} + +function format_build_partition { + mkfs.ext4 -O ^has_journal /dev/xvdc +} +function pull_docker { + apt-get install -y docker.io + docker run -itd --name ccachedata "${DOCKER_IMAGE}:${DOCKER_IMAGE_TAG}" sh + docker exec -itd ccachedata mkdir -p /build/ccache +} + +# Create fstab +function create_fstab { + FMT="%-42s %-11s %-5s %-17s %-5s %s" +cat > "/mnt/etc/fstab" << EOF +$(printf "${FMT}" "# DEVICE UUID" "MOUNTPOINT" "TYPE" "OPTIONS" "DUMP" "FSCK") +$(findmnt -no SOURCE /mnt | xargs blkid -o export | awk -v FMT="${FMT}" '/^UUID=/ { printf(FMT, $0, "/", "ext4", "defaults,discard", "0", "1" ) }') +$(findmnt -no SOURCE /mnt/boot/efi | xargs blkid -o export | awk -v FMT="${FMT}" '/^UUID=/ { printf(FMT, $0, "/boot/efi", "vfat", "umask=0077", "0", "1" ) }') +$(findmnt -no SOURCE /mnt/data | xargs blkid -o export | awk -v FMT="${FMT}" '/^UUID=/ { printf(FMT, $0, "/data", "ext4", "defaults,discard", "0", "2" ) }') +$(printf "$FMT" "/swapfile" "none" "swap" "sw" "0" "0") +EOF + unset FMT +} + +function setup_chroot_environment { + UBUNTU_VERSION=$(lsb_release -cs) # 'focal' for Ubuntu 20.04 + + # Bootstrap Ubuntu into /mnt + debootstrap --arch ${ARCH} --variant=minbase "$UBUNTU_VERSION" /mnt + + # Update ec2-region + REGION=$(curl --silent --fail http://169.254.169.254/latest/meta-data/placement/availability-zone | sed -E 's|[a-z]+$||g') + sed -i "s/REGION/${REGION}/g" /tmp/sources.list + cp /tmp/sources.list /mnt/etc/apt/sources.list + + if [ "${ARCH}" = "arm64" ]; then + create_fstab + fi + + # Create mount points and mount the filesystem + mkdir -p /mnt/{dev,proc,sys} + mount --rbind /dev /mnt/dev + mount --rbind /proc /mnt/proc + mount --rbind /sys /mnt/sys + + # Create build mount point and mount + mkdir -p /mnt/tmp + mount /dev/xvdc /mnt/tmp + chmod 777 /mnt/tmp + + # Copy apparmor profiles + chmod 644 /tmp/apparmor_profiles/* + cp -r /tmp/apparmor_profiles /mnt/tmp/ + + # Copy migrations + cp -r /tmp/migrations /mnt/tmp/ + + # Copy unit tests + cp -r /tmp/unit-tests /mnt/tmp/ + + # Copy the bootstrap script into place and execute inside chroot + cp /tmp/chroot-bootstrap-nix.sh /mnt/tmp/chroot-bootstrap-nix.sh + chroot /mnt /tmp/chroot-bootstrap-nix.sh + rm -f /mnt/tmp/chroot-bootstrap-nix.sh + echo "${POSTGRES_tealbase_VERSION}" > /mnt/root/tealbase-release + + # Copy the nvme identification script into /sbin inside the chroot + mkdir -p /mnt/sbin + cp /tmp/ebsnvme-id /mnt/sbin/ebsnvme-id + chmod +x /mnt/sbin/ebsnvme-id + + # Copy the udev rules for identifying nvme devices into the chroot + mkdir -p /mnt/etc/udev/rules.d + cp /tmp/70-ec2-nvme-devices.rules \ + /mnt/etc/udev/rules.d/70-ec2-nvme-devices.rules + + #Copy custom cloud-init + rm -f /mnt/etc/cloud/cloud.cfg + cp /tmp/cloud.cfg /mnt/etc/cloud/cloud.cfg + + sleep 2 +} + +function download_ccache { + docker cp ccachedata:/build/ccache/. /mnt/tmp/ccache +} + +function execute_playbook { + +tee /etc/ansible/ansible.cfg < /mnt/root/tealbase-release # Copy the nvme identification script into /sbin inside the chroot mkdir -p /mnt/sbin @@ -211,8 +213,8 @@ callbacks_enabled = timer, profile_tasks, profile_roles EOF # Run Ansible playbook #export ANSIBLE_LOG_PATH=/tmp/ansible.log && export ANSIBLE_DEBUG=True && export ANSIBLE_REMOTE_TEMP=/mnt/tmp - export ANSIBLE_LOG_PATH=/tmp/ansible.log && export ANSIBLE_REMOTE_TEMP=/mnt/tmp - ansible-playbook -c chroot -i '/mnt,' /tmp/ansible-playbook/ansible/playbook.yml --extra-vars " $ARGS" + export ANSIBLE_LOG_PATH=/tmp/ansible.log && export ANSIBLE_REMOTE_TEMP=/mnt/tmp + ansible-playbook -c chroot -i '/mnt,' /tmp/ansible-playbook/ansible/playbook.yml --extra-vars '{"debpkg_mode": true, "nixpkg_mode": false, "stage2_nix": false}' $ARGS } function update_systemd_services { @@ -244,20 +246,30 @@ function clean_system { touch /mnt/var/log/auth.log touch /mnt/var/log/pgbouncer.log - chroot /mnt /usr/bin/chown pgbouncer:postgres /var/log/pgbouncer.log + if [ -f /usr/bin/chown ]; then + chroot /mnt /usr/bin/chown pgbouncer:postgres /var/log/pgbouncer.log + fi # Setup postgresql logs mkdir -p /mnt/var/log/postgresql - chroot /mnt /usr/bin/chown postgres:postgres /var/log/postgresql + if [ -f /usr/bin/chown ]; then + chroot /mnt /usr/bin/chown postgres:postgres /var/log/postgresql + fi # Setup wal-g logs mkdir /mnt/var/log/wal-g - touch /mnt/var/log/wal-g/{backup-push.log,backup-fetch.log,wal-push.log,wal-fetch.log} - chroot /mnt /usr/bin/chown -R postgres:postgres /var/log/wal-g - chroot /mnt /usr/bin/chmod -R 0300 /var/log/wal-g + touch /mnt/var/log/wal-g/{backup-push.log,backup-fetch.log,wal-push.log,wal-fetch.log,pitr.log} + + #Creatre Sysstat directory for SAR + mkdir /mnt/var/log/sysstat + + if [ -f /usr/bin/chown ]; then + chroot /mnt /usr/bin/chown -R postgres:postgres /var/log/wal-g + chroot /mnt /usr/bin/chmod -R 0300 /var/log/wal-g + fi - # audit logs directory for apparmor - mkdir /mnt/var/log/audit + # audit logs directory for apparmor + mkdir /mnt/var/log/audit # unwanted files rm -rf /mnt/var/lib/apt/lists/* diff --git a/flake.lock b/flake.lock new file mode 100644 index 0000000..3ee7c2d --- /dev/null +++ b/flake.lock @@ -0,0 +1,180 @@ +{ + "nodes": { + "flake-utils": { + "inputs": { + "systems": "systems" + }, + "locked": { + "lastModified": 1705309234, + "narHash": "sha256-uNRRNRKmJyCRC/8y1RqBkqWBLM034y4qN7EprSdmgyA=", + "owner": "numtide", + "repo": "flake-utils", + "rev": "1ef2e671c3b0c19053962c07dbda38332dcebf26", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "flake-utils", + "type": "github" + } + }, + "flake-utils_2": { + "inputs": { + "systems": "systems_2" + }, + "locked": { + "lastModified": 1694529238, + "narHash": "sha256-zsNZZGTGnMOf9YpHKJqMSsa0dXbfmxeoJ7xHlrt+xmY=", + "owner": "numtide", + "repo": "flake-utils", + "rev": "ff7b65b44d01cf9ba6a71320833626af21126384", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "flake-utils", + "type": "github" + } + }, + "nix-editor": { + "inputs": { + "nixpkgs": "nixpkgs", + "utils": "utils" + }, + "locked": { + "lastModified": 1703105021, + "narHash": "sha256-Ne9NG7x45a8aJyAN+yYWbr/6mQHBVVkwZZ72EZHHRqw=", + "owner": "snowfallorg", + "repo": "nix-editor", + "rev": "b5017f8d61753ce6a3a1a2aa7e474d59146a8ae3", + "type": "github" + }, + "original": { + "owner": "snowfallorg", + "repo": "nix-editor", + "type": "github" + } + }, + "nix2container": { + "inputs": { + "flake-utils": "flake-utils_2", + "nixpkgs": "nixpkgs_2" + }, + "locked": { + "lastModified": 1708764364, + "narHash": "sha256-+pOtDvmuVTg0Gi58hKDUyrNla5NbyUvt3Xs3gLR0Fws=", + "owner": "nlewo", + "repo": "nix2container", + "rev": "c891f90d2e3c48a6b33466c96e4851e0fc0cf455", + "type": "github" + }, + "original": { + "owner": "nlewo", + "repo": "nix2container", + "type": "github" + } + }, + "nixpkgs": { + "locked": { + "lastModified": 1675673983, + "narHash": "sha256-8hzNh1jtiPxL5r3ICNzSmpSzV7kGb3KwX+FS5BWJUTo=", + "owner": "nixos", + "repo": "nixpkgs", + "rev": "5a350a8f31bb7ef0c6e79aea3795a890cf7743d4", + "type": "github" + }, + "original": { + "owner": "nixos", + "ref": "nixos-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + "nixpkgs_2": { + "locked": { + "lastModified": 1697269602, + "narHash": "sha256-dSzV7Ud+JH4DPVD9od53EgDrxUVQOcSj4KGjggCDVJI=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "9cb540e9c1910d74a7e10736277f6eb9dff51c81", + "type": "github" + }, + "original": { + "owner": "NixOS", + "repo": "nixpkgs", + "type": "github" + } + }, + "nixpkgs_3": { + "locked": { + "lastModified": 1712666087, + "narHash": "sha256-WwjUkWsjlU8iUImbivlYxNyMB1L5YVqE8QotQdL9jWc=", + "owner": "nixos", + "repo": "nixpkgs", + "rev": "a76c4553d7e741e17f289224eda135423de0491d", + "type": "github" + }, + "original": { + "owner": "nixos", + "ref": "nixpkgs-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + "root": { + "inputs": { + "flake-utils": "flake-utils", + "nix-editor": "nix-editor", + "nix2container": "nix2container", + "nixpkgs": "nixpkgs_3" + } + }, + "systems": { + "locked": { + "lastModified": 1681028828, + "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", + "owner": "nix-systems", + "repo": "default", + "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", + "type": "github" + }, + "original": { + "owner": "nix-systems", + "repo": "default", + "type": "github" + } + }, + "systems_2": { + "locked": { + "lastModified": 1681028828, + "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", + "owner": "nix-systems", + "repo": "default", + "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", + "type": "github" + }, + "original": { + "owner": "nix-systems", + "repo": "default", + "type": "github" + } + }, + "utils": { + "locked": { + "lastModified": 1667395993, + "narHash": "sha256-nuEHfE/LcWyuSWnS8t12N1wc105Qtau+/OdUAjtQ0rA=", + "owner": "numtide", + "repo": "flake-utils", + "rev": "5aed5285a952e0b949eb3ba02c12fa4fcfef535f", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "flake-utils", + "type": "github" + } + } + }, + "root": "root", + "version": 7 +} diff --git a/flake.nix b/flake.nix new file mode 100644 index 0000000..b93c258 --- /dev/null +++ b/flake.nix @@ -0,0 +1,601 @@ +{ + description = "Prototype tooling for deploying PostgreSQL"; + + inputs = { + nixpkgs.url = "github:nixos/nixpkgs/nixpkgs-unstable"; + flake-utils.url = "github:numtide/flake-utils"; + nix2container.url = "github:nlewo/nix2container"; + nix-editor.url = "github:snowfallorg/nix-editor"; + }; + + outputs = { self, nixpkgs, flake-utils, nix2container, nix-editor, ...}: + let + gitRev = "vcs=${self.shortRev or "dirty"}+${builtins.substring 0 8 (self.lastModifiedDate or self.lastModified or "19700101")}"; + + ourSystems = with flake-utils.lib; [ + system.x86_64-linux + system.aarch64-linux + system.aarch64-darwin + ]; + in + flake-utils.lib.eachSystem ourSystems (system: + let + pgsqlDefaultPort = "5435"; + pgsqlSuperuser = "tealbase_admin"; + nix2img = nix2container.packages.${system}.nix2container; + + # The 'oriole_pkgs' variable holds all the upstream packages in nixpkgs, which + # we can use to build our own images; it is the common name to refer to + # a copy of nixpkgs which contains all its packages. + # it also serves as a base for importing the orioldb/postgres overlay to + #build the orioledb postgres patched version of postgresql16 + oriole_pkgs = import nixpkgs { + config = { allowUnfree = true; }; + inherit system; + overlays = [ + # NOTE (aseipp): add any needed overlays here. in theory we could + # pull them from the overlays/ directory automatically, but we don't + # want to have an arbitrary order, since it might matter. being + # explicit is better. + (import ./nix/overlays/cargo-pgrx.nix) + (import ./nix/overlays/gdal-small.nix) + (import ./nix/overlays/psql_16-oriole.nix) + + ]; + }; + #This variable works the same as 'oriole_pkgs' but builds using the upstream + #nixpkgs builds of postgresql 15 and 16 + the overlays listed below + pkgs = import nixpkgs { + config = { + allowUnfree = true; + permittedInsecurePackages = [ + "v8-9.7.106.18" + ]; + }; + inherit system; + overlays = [ + # NOTE (aseipp): add any needed overlays here. in theory we could + # pull them from the overlays/ directory automatically, but we don't + # want to have an arbitrary order, since it might matter. being + # explicit is better. + (final: prev: { + postgresql = final.callPackage ./nix/postgresql/default.nix { + inherit (final) lib; + inherit (final) stdenv; + inherit (final) fetchurl; + inherit (final) makeWrapper; + inherit (final) callPackage; + }; + }) + (import ./nix/overlays/cargo-pgrx-0-11-3.nix) + # (import ./nix/overlays/postgis.nix) + #(import ./nix/overlays/gdal-small.nix) + + ]; + }; + postgresql_15 = pkgs.postgresql.postgresql_15; + postgresql = pkgs.postgresql.postgresql_15; + sfcgal = pkgs.callPackage ./nix/ext/sfcgal/sfcgal.nix { }; + pg_regress = pkgs.callPackage ./nix/ext/pg_regress.nix { inherit postgresql; }; + tealbase-groonga = pkgs.callPackage ./nix/tealbase-groonga.nix { }; + mecab-naist-jdic = pkgs.callPackage ./nix/ext/mecab-naist-jdic/default.nix { }; + # Our list of PostgreSQL extensions which come from upstream Nixpkgs. + # These are maintained upstream and can easily be used here just by + # listing their name. Anytime the version of nixpkgs is upgraded, these + # may also bring in new versions of the extensions. + psqlExtensions = [ + /* pljava */ + /*"postgis"*/ + ]; + + #FIXME for now, timescaledb is not included in the orioledb version of tealbase extensions, as there is an issue + # with building timescaledb with the orioledb patched version of postgresql + orioledbPsqlExtensions = [ + /* pljava */ + /*"timescaledb"*/ + ]; + + # Custom extensions that exist in our repository. These aren't upstream + # either because nobody has done the work, maintaining them here is + # easier and more expedient, or because they may not be suitable, or are + # too niche/one-off. + # + # Ideally, most of these should have copies upstream for third party + # use, but even if they did, keeping our own copies means that we can + # rollout new versions of these critical things easier without having to + # go through the upstream release engineering process. + ourExtensions = [ + ./nix/ext/rum.nix + ./nix/ext/timescaledb.nix + ./nix/ext/timescaledb-2.9.1.nix + ./nix/ext/pgroonga.nix + ./nix/ext/index_advisor.nix + ./nix/ext/wal2json.nix + ./nix/ext/pgmq.nix + ./nix/ext/pg_repack.nix + ./nix/ext/pg-safeupdate.nix + ./nix/ext/plpgsql-check.nix + ./nix/ext/pgjwt.nix + ./nix/ext/pgaudit.nix + ./nix/ext/postgis.nix + ./nix/ext/pgrouting.nix + ./nix/ext/pgtap.nix + ./nix/ext/pg_backtrace.nix + ./nix/ext/pg_cron.nix + ./nix/ext/pgsql-http.nix + ./nix/ext/pg_plan_filter.nix + ./nix/ext/pg_net.nix + ./nix/ext/pg_hashids.nix + ./nix/ext/pgsodium.nix + ./nix/ext/pg_graphql.nix + ./nix/ext/pg_stat_monitor.nix + ./nix/ext/pg_jsonschema.nix + ./nix/ext/pgvector.nix + ./nix/ext/vault.nix + ./nix/ext/hypopg.nix + ./nix/ext/pg_tle.nix + ./nix/ext/wrappers/default.nix + ./nix/ext/supautils.nix + ./nix/ext/plv8.nix + ]; + + #Where we import and build the orioledb extension, we add on our custom extensions + # plus the orioledb option + orioledbExtension = ourExtensions ++ [ ./nix/ext/orioledb.nix ]; + + #this var is a convenience setting to import the orioledb patched version of postgresql + postgresql_orioledb_16 = oriole_pkgs.postgresql_orioledb_16; + #postgis_override = pkgs.postgis_override; + getPostgresqlPackage = version: + pkgs.postgresql."postgresql_${version}"; + #we will add supported versions to this list in the future + supportedVersions = [ "15" ]; + # Create a 'receipt' file for a given postgresql package. This is a way + # of adding a bit of metadata to the package, which can be used by other + # tools to inspect what the contents of the install are: the PSQL + # version, the installed extensions, et cetera. + # + # This takes three arguments: + # - pgbin: the postgresql package we are building on top of + # - upstreamExts: the list of extensions from upstream nixpkgs. This is + # not a list of packages, but an attrset containing extension names + # mapped to versions. + # - ourExts: the list of extensions from upstream nixpkgs. This is not + # a list of packages, but an attrset containing extension names + # mapped to versions. + # + # The output is a package containing the receipt.json file, which can be + # merged with the PostgreSQL installation using 'symlinkJoin'. + makeReceipt = pgbin: upstreamExts: ourExts: pkgs.writeTextFile { + name = "receipt"; + destination = "/receipt.json"; + text = builtins.toJSON { + revision = gitRev; + psql-version = pgbin.version; + nixpkgs = { + revision = nixpkgs.rev; + extensions = upstreamExts; + }; + extensions = ourExts; + + # NOTE (aseipp): this field can be used to do cache busting (e.g. + # force a rebuild of the psql packages) but also to helpfully inform + # tools what version of the schema is being used, for forwards and + # backwards compatibility + receipt-version = "1"; + }; + }; + + makeOurOrioleDbPostgresPkgs = version: patchedPostgres: + let postgresql = patchedPostgres; + in map (path: pkgs.callPackage path { inherit postgresql; }) orioledbExtension; + + makeOurPostgresPkgs = version: + let postgresql = getPostgresqlPackage version; + in map (path: pkgs.callPackage path { inherit postgresql; }) ourExtensions; + + # Create an attrset that contains all the extensions included in a server for the orioledb version of postgresql + extension. + makeOurOrioleDbPostgresPkgsSet = version: patchedPostgres: + (builtins.listToAttrs (map + (drv: + { name = drv.pname; value = drv; } + ) + (makeOurOrioleDbPostgresPkgs version patchedPostgres))) + // { recurseForDerivations = true; }; + + # Create an attrset that contains all the extensions included in a server. + makeOurPostgresPkgsSet = version: + (builtins.listToAttrs (map + (drv: + { name = drv.pname; value = drv; } + ) + (makeOurPostgresPkgs version))) + // { recurseForDerivations = true; }; + + + # Create a binary distribution of PostgreSQL, given a version. + # + # NOTE: The version here does NOT refer to the exact PostgreSQL version; + # it refers to the *major number only*, which is used to select the + # correct version of the package from nixpkgs. This is because we want + # to be able to do so in an open ended way. As an example, the version + # "15" passed in will use the nixpkgs package "postgresql_15" as the + # basis for building extensions, etc. + makePostgresBin = version: + let + postgresql = getPostgresqlPackage version; + upstreamExts = map + (ext: { + name = postgresql.pkgs."${ext}".pname; + version = postgresql.pkgs."${ext}".version; + }) + psqlExtensions; + ourExts = map (ext: { name = ext.pname; version = ext.version; }) (makeOurPostgresPkgs version); + + pgbin = postgresql.withPackages (ps: + (map (ext: ps."${ext}") psqlExtensions) ++ (makeOurPostgresPkgs version) + ); + in + pkgs.symlinkJoin { + inherit (pgbin) name version; + paths = [ pgbin (makeReceipt pgbin upstreamExts ourExts) ]; + }; + + makeOrioleDbPostgresBin = version: patchedPostgres: + let + postgresql = patchedPostgres; + upstreamExts = map + (ext: { + name = postgresql.pkgs."${ext}".pname; + version = postgresql.pkgs."${ext}".version; + }) + orioledbPsqlExtensions; + ourExts = map (ext: { name = ext.pname; version = ext.version; }) (makeOurOrioleDbPostgresPkgs version postgresql); + + pgbin = postgresql.withPackages (ps: + (map (ext: ps."${ext}") orioledbPsqlExtensions) ++ (makeOurOrioleDbPostgresPkgs version postgresql) + ); + in + pkgs.symlinkJoin { + inherit (pgbin) name version; + paths = [ pgbin (makeReceipt pgbin upstreamExts ourExts) ]; + }; + + + # Create an attribute set, containing all the relevant packages for a + # PostgreSQL install, wrapped up with a bow on top. There are three + # packages: + # + # - bin: the postgresql package itself, with all the extensions + # installed, and a receipt.json file containing metadata about the + # install. + # - exts: an attrset containing all the extensions, mapped to their + # package names. + makePostgres = version: rec { + bin = makePostgresBin version; + exts = makeOurPostgresPkgsSet version; + recurseForDerivations = true; + }; + makeOrioleDbPostgres = version: patchedPostgres: rec { + bin = makeOrioleDbPostgresBin version patchedPostgres; + exts = makeOurOrioleDbPostgresPkgsSet version patchedPostgres; + recurseForDerivations = true; + }; + + # The base set of packages that we export from this Nix Flake, that can + # be used with 'nix build'. Don't use the names listed below; check the + # name in 'nix flake show' in order to make sure exactly what name you + # want. + basePackages = { + tealbase-groonga = tealbase-groonga; + # PostgreSQL versions. + psql_15 = makePostgres "15"; + #psql_16 = makePostgres "16"; + #psql_orioledb_16 = makeOrioleDbPostgres "16_23" postgresql_orioledb_16; + sfcgal = sfcgal; + pg_regress = pg_regress; + pg_prove = pkgs.perlPackages.TAPParserSourceHandlerpgTAP; + postgresql_15 = pkgs.postgresql_15; + postgresql_15_debug = if pkgs.stdenv.isLinux then postgresql_15.debug else null; + postgresql_15_src = pkgs.stdenv.mkDerivation { + pname = "postgresql-15-src"; + version = pkgs.postgresql_15.version; + + src = pkgs.postgresql_15.src; + + nativeBuildInputs = [ pkgs.bzip2 ]; + + phases = [ "unpackPhase" "installPhase" ]; + + installPhase = '' + mkdir -p $out + cp -r . $out + ''; + + meta = with pkgs.lib; { + description = "PostgreSQL 15 source files"; + homepage = "https://www.postgresql.org/"; + license = licenses.postgresql; + platforms = platforms.all; + }; + }; + mecab_naist_jdic = mecab-naist-jdic; + tealbase_groonga = tealbase-groonga; + # Start a version of the server. + start-server = + let + pgconfigFile = builtins.path { + name = "postgresql.conf"; + path = ./ansible/files/postgresql_config/postgresql.conf.j2; + }; + supautilsConfigFile = builtins.path { + name = "supautils.conf"; + path = ./ansible/files/postgresql_config/supautils.conf.j2; + }; + loggingConfigFile = builtins.path { + name = "logging.conf"; + path = ./ansible/files/postgresql_config/postgresql-csvlog.conf; + }; + readReplicaConfigFile = builtins.path { + name = "readreplica.conf"; + path = ./ansible/files/postgresql_config/custom_read_replica.conf.j2; + }; + pgHbaConfigFile = builtins.path { + name = "pg_hba.conf"; + path = ./ansible/files/postgresql_config/pg_hba.conf.j2; + }; + pgIdentConfigFile = builtins.path { + name = "pg_ident.conf"; + path = ./ansible/files/postgresql_config/pg_ident.conf.j2; + }; + postgresqlExtensionCustomScriptsPath = builtins.path { + name = "extension-custom-scripts"; + path = ./ansible/files/postgresql_extension_custom_scripts; + }; + getkeyScript = ./nix/tests/util/pgsodium_getkey.sh; + localeArchive = if pkgs.stdenv.isDarwin + then "${pkgs.darwin.locale}/share/locale" + else "${pkgs.glibcLocales}/lib/locale/locale-archive"; + in + pkgs.runCommand "start-postgres-server" { } '' + mkdir -p $out/bin $out/etc/postgresql-custom $out/etc/postgresql $out/extension-custom-scripts + cp ${supautilsConfigFile} $out/etc/postgresql-custom/supautils.conf || { echo "Failed to copy supautils.conf"; exit 1; } + cp ${pgconfigFile} $out/etc/postgresql/postgresql.conf || { echo "Failed to copy postgresql.conf"; exit 1; } + cp ${loggingConfigFile} $out/etc/postgresql-custom/logging.conf || { echo "Failed to copy logging.conf"; exit 1; } + cp ${readReplicaConfigFile} $out/etc/postgresql-custom/read-replica.conf || { echo "Failed to copy read-replica.conf"; exit 1; } + cp ${pgHbaConfigFile} $out/etc/postgresql/pg_hba.conf || { echo "Failed to copy pg_hba.conf"; exit 1; } + cp ${pgIdentConfigFile} $out/etc/postgresql/pg_ident.conf || { echo "Failed to copy pg_ident.conf"; exit 1; } + cp -r ${postgresqlExtensionCustomScriptsPath}/* $out/extension-custom-scripts/ || { echo "Failed to copy custom scripts"; exit 1; } + echo "Copy operation completed" + chmod 644 $out/etc/postgresql-custom/supautils.conf + chmod 644 $out/etc/postgresql/postgresql.conf + chmod 644 $out/etc/postgresql-custom/logging.conf + chmod 644 $out/etc/postgresql/pg_hba.conf + substitute ${./nix/tools/run-server.sh.in} $out/bin/start-postgres-server \ + --subst-var-by 'PGSQL_DEFAULT_PORT' '${pgsqlDefaultPort}' \ + --subst-var-by 'PGSQL_SUPERUSER' '${pgsqlSuperuser}' \ + --subst-var-by 'PSQL15_BINDIR' '${basePackages.psql_15.bin}' \ + --subst-var-by 'PSQL_CONF_FILE' $out/etc/postgresql/postgresql.conf \ + --subst-var-by 'PGSODIUM_GETKEY' '${getkeyScript}' \ + --subst-var-by 'READREPL_CONF_FILE' "$out/etc/postgresql-custom/read-replica.conf" \ + --subst-var-by 'LOGGING_CONF_FILE' "$out/etc/postgresql-custom/logging.conf" \ + --subst-var-by 'SUPAUTILS_CONF_FILE' "$out/etc/postgresql-custom/supautils.conf" \ + --subst-var-by 'PG_HBA' "$out/etc/postgresql/pg_hba.conf" \ + --subst-var-by 'PG_IDENT' "$out/etc/postgresql/pg_ident.conf" \ + --subst-var-by 'LOCALES' '${localeArchive}' \ + --subst-var-by 'EXTENSION_CUSTOM_SCRIPTS_DIR' "$out/extension-custom-scripts" \ + --subst-var-by 'MECAB_LIB' '${basePackages.psql_15.exts.pgroonga}/lib/groonga/plugins/tokenizers/tokenizer_mecab.so' \ + --subst-var-by 'GROONGA_DIR' '${tealbase-groonga}' + + chmod +x $out/bin/start-postgres-server + ''; + + # Start a version of the client and runs migrations script on server. + start-client = + let + migrationsDir = ./migrations/db; + postgresqlSchemaSql = ./nix/tools/postgresql_schema.sql; + pgbouncerAuthSchemaSql = ./ansible/files/pgbouncer_config/pgbouncer_auth_schema.sql; + statExtensionSql = ./ansible/files/stat_extension.sql; + in + pkgs.runCommand "start-postgres-client" { } '' + mkdir -p $out/bin + substitute ${./nix/tools/run-client.sh.in} $out/bin/start-postgres-client \ + --subst-var-by 'PGSQL_DEFAULT_PORT' '${pgsqlDefaultPort}' \ + --subst-var-by 'PGSQL_SUPERUSER' '${pgsqlSuperuser}' \ + --subst-var-by 'PSQL15_BINDIR' '${basePackages.psql_15.bin}' \ + --subst-var-by 'MIGRATIONS_DIR' '${migrationsDir}' \ + --subst-var-by 'POSTGRESQL_SCHEMA_SQL' '${postgresqlSchemaSql}' \ + --subst-var-by 'PGBOUNCER_AUTH_SCHEMA_SQL' '${pgbouncerAuthSchemaSql}' \ + --subst-var-by 'STAT_EXTENSION_SQL' '${statExtensionSql}' + chmod +x $out/bin/start-postgres-client + ''; + + # Migrate between two data directories. + migrate-tool = + let + configFile = ./nix/tests/postgresql.conf.in; + getkeyScript = ./nix/tests/util/pgsodium_getkey.sh; + primingScript = ./nix/tests/prime.sql; + migrationData = ./nix/tests/migrations/data.sql; + in + pkgs.runCommand "migrate-postgres" { } '' + mkdir -p $out/bin + substitute ${./nix/tools/migrate-tool.sh.in} $out/bin/migrate-postgres \ + --subst-var-by 'PSQL15_BINDIR' '${basePackages.psql_15.bin}' \ + --subst-var-by 'PSQL_CONF_FILE' '${configFile}' \ + --subst-var-by 'PGSODIUM_GETKEY' '${getkeyScript}' \ + --subst-var-by 'PRIMING_SCRIPT' '${primingScript}' \ + --subst-var-by 'MIGRATION_DATA' '${migrationData}' + + chmod +x $out/bin/migrate-postgres + ''; + + start-replica = pkgs.runCommand "start-postgres-replica" { } '' + mkdir -p $out/bin + substitute ${./nix/tools/run-replica.sh.in} $out/bin/start-postgres-replica \ + --subst-var-by 'PGSQL_SUPERUSER' '${pgsqlSuperuser}' \ + --subst-var-by 'PSQL15_BINDIR' '${basePackages.psql_15.bin}' + chmod +x $out/bin/start-postgres-replica + ''; + sync-exts-versions = pkgs.runCommand "sync-exts-versions" { } '' + mkdir -p $out/bin + substitute ${./nix/tools/sync-exts-versions.sh.in} $out/bin/sync-exts-versions \ + --subst-var-by 'YQ' '${pkgs.yq}/bin/yq' \ + --subst-var-by 'JQ' '${pkgs.jq}/bin/jq' \ + --subst-var-by 'NIX_EDITOR' '${nix-editor.packages.${system}.nix-editor}/bin/nix-editor' \ + --subst-var-by 'NIXPREFETCHURL' '${pkgs.nixVersions.nix_2_20}/bin/nix-prefetch-url' \ + --subst-var-by 'NIX' '${pkgs.nixVersions.nix_2_20}/bin/nix' + chmod +x $out/bin/sync-exts-versions + ''; + }; + + # Create a testing harness for a PostgreSQL package. This is used for + # 'nix flake check', and works with any PostgreSQL package you hand it. + makeCheckHarness = pgpkg: + let + sqlTests = ./nix/tests/smoke; + pg_prove = pkgs.perlPackages.TAPParserSourceHandlerpgTAP; + tealbase-groonga = pkgs.callPackage ./nix/tealbase-groonga.nix { }; + in + pkgs.runCommand "postgres-${pgpkg.version}-check-harness" + { + nativeBuildInputs = with pkgs; [ coreutils bash pgpkg pg_prove pg_regress procps tealbase-groonga ]; + } '' + TMPDIR=$(mktemp -d) + if [ $? -ne 0 ]; then + echo "Failed to create temp directory" >&2 + exit 1 + fi + + # Ensure the temporary directory is removed on exit + trap 'rm -rf "$TMPDIR"' EXIT + + export PGDATA="$TMPDIR/pgdata" + export PGSODIUM_DIR="$TMPDIR/pgsodium" + + mkdir -p $PGDATA + mkdir -p $TMPDIR/logfile + # Generate a random key and store it in an environment variable + export PGSODIUM_KEY=$(head -c 32 /dev/urandom | od -A n -t x1 | tr -d ' \n') + export GRN_PLUGINS_DIR=${tealbase-groonga}/lib/groonga/plugins + # Create a simple script to echo the key + echo '#!/bin/sh' > $TMPDIR/getkey.sh + echo 'echo $PGSODIUM_KEY' >> $TMPDIR/getkey.sh + chmod +x $TMPDIR/getkey.sh + initdb --locale=C --username=tealbase_admin + substitute ${./nix/tests/postgresql.conf.in} $PGDATA/postgresql.conf \ + --subst-var-by PGSODIUM_GETKEY_SCRIPT "$TMPDIR/getkey.sh" + echo "listen_addresses = '*'" >> $PGDATA/postgresql.conf + echo "port = 5432" >> $PGDATA/postgresql.conf + echo "host all all 127.0.0.1/32 trust" >> $PGDATA/pg_hba.conf + #postgres -D "$PGDATA" -k "$TMPDIR" -h localhost -p 5432 >$TMPDIR/logfile/postgresql.log 2>&1 & + pg_ctl -D "$PGDATA" -l $TMPDIR/logfile/postgresql.log -o "-k $TMPDIR -p 5432" start + for i in {1..60}; do + if pg_isready -h localhost -p 5432; then + echo "PostgreSQL is ready" + break + fi + sleep 1 + if [ $i -eq 60 ]; then + echo "PostgreSQL is not ready after 60 seconds" + echo "PostgreSQL status:" + pg_ctl -D "$PGDATA" status + echo "PostgreSQL log content:" + cat $TMPDIR/logfile/postgresql.log + exit 1 + fi + done + createdb -p 5432 -h localhost --username=tealbase_admin testing + if ! psql -p 5432 -h localhost --username=tealbase_admin -d testing -v ON_ERROR_STOP=1 -Xaf ${./nix/tests/prime.sql}; then + echo "Error executing SQL file. PostgreSQL log content:" + cat $TMPDIR/logfile/postgresql.log + pg_ctl -D "$PGDATA" stop + exit 1 + fi + pg_prove -p 5432 -h localhost --username=tealbase_admin -d testing ${sqlTests}/*.sql + + mkdir -p $out/regression_output + pg_regress \ + --use-existing \ + --dbname=testing \ + --inputdir=${./nix/tests} \ + --outputdir=$out/regression_output \ + --host=localhost \ + --port=5432 \ + --user=tealbase_admin \ + $(ls ${./nix/tests/sql} | sed -e 's/\..*$//' | sort ) + + pg_ctl -D "$PGDATA" stop + mv $TMPDIR/logfile/postgresql.log $out + echo ${pgpkg} + ''; + in + rec { + # The list of all packages that can be built with 'nix build'. The list + # of names that can be used can be shown with 'nix flake show' + packages = flake-utils.lib.flattenTree basePackages // { + # Any extra packages we might want to include in our package + # set can go here. + inherit (pkgs) + # NOTE: comes from our cargo-pgrx-0-11-3.nix overlay + cargo-pgrx_0_11_3; + + }; + + # The list of exported 'checks' that are run with every run of 'nix + # flake check'. This is run in the CI system, as well. + checks = { + psql_15 = makeCheckHarness basePackages.psql_15.bin; + #psql_16 = makeCheckHarness basePackages.psql_16.bin; + #psql_orioledb_16 = makeCheckHarness basePackages.psql_orioledb_16.bin; + }; + + # Apps is a list of names of things that can be executed with 'nix run'; + # these are distinct from the things that can be built with 'nix build', + # so they need to be listed here too. + apps = + let + mkApp = attrName: binName: { + type = "app"; + program = "${basePackages."${attrName}"}/bin/${binName}"; + }; + in + { + start-server = mkApp "start-server" "start-postgres-server"; + start-client = mkApp "start-client" "start-postgres-client"; + start-replica = mkApp "start-replica" "start-postgres-replica"; + migration-test = mkApp "migrate-tool" "migrate-postgres"; + sync-exts-versions = mkApp "sync-exts-versions" "sync-exts-versions"; + }; + + # 'devShells.default' lists the set of packages that are included in the + # ambient $PATH environment when you run 'nix develop'. This is useful + # for development and puts many convenient devtools instantly within + # reach. + devShells.default = pkgs.mkShell { + packages = with pkgs; [ + coreutils + just + nix-update + #pg_prove + shellcheck + ansible + ansible-lint + (packer.overrideAttrs (oldAttrs: { + version = "1.7.8"; + })) + + basePackages.start-server + basePackages.start-client + basePackages.start-replica + basePackages.migrate-tool + basePackages.sync-exts-versions + ]; + shellHook = '' + export HISTFILE=.history + ''; + }; + } + ); +} diff --git a/migrations/README.md b/migrations/README.md index a90f03d..df08efa 100644 --- a/migrations/README.md +++ b/migrations/README.md @@ -27,6 +27,7 @@ Additionally, [tealbase/postgres](https://github.com/tealbase/postgres/blob/deve ## Guidelines - Migrations are append only. Never edit existing migrations once they are on master. +- Migrations in `migrations/db/migrations` have to be idempotent. - Self contained components (gotrue, storage, realtime) may contain their own migrations. - Self hosted tealbase users should update role passwords separately after running all migrations. - Prod release is done by publishing a new GitHub release on master branch. diff --git a/migrations/db/init-scripts/00000000000000-initial-schema.sql b/migrations/db/init-scripts/00000000000000-initial-schema.sql index 35a9069..6abe2c3 100644 --- a/migrations/db/init-scripts/00000000000000-initial-schema.sql +++ b/migrations/db/init-scripts/00000000000000-initial-schema.sql @@ -5,12 +5,15 @@ create publication tealbase_realtime; -- tealbase super admin -create user tealbase_admin; alter user tealbase_admin with superuser createdb createrole replication bypassrls; -- tealbase replication user create user tealbase_replication_admin with login replication; +-- tealbase read-only user +create role tealbase_read_only_user with login bypassrls; +grant pg_read_all_data to tealbase_read_only_user; + -- Extension namespacing create schema if not exists extensions; create extension if not exists "uuid-ossp" with schema extensions; diff --git a/migrations/db/migrate.sh b/migrations/db/migrate.sh index dd740ce..2ed9fc0 100755 --- a/migrations/db/migrate.sh +++ b/migrations/db/migrate.sh @@ -28,6 +28,16 @@ fi db=$( cd -- "$( dirname -- "$0" )" > /dev/null 2>&1 && pwd ) if [ -z "${USE_DBMATE:-}" ]; then + psql -v ON_ERROR_STOP=1 --no-password --no-psqlrc -U tealbase_admin <.nix` and edit `flake.nix` and add it to the `ourExtensions` list. +2. `git add .` as nix uses git to track changes +3. In your package file, temporarily empty the `hash = "sha256<...>=";` to `hash = "";` and save and `git add .` +4. Run `nix build .#psql_15/exts/` to try to trigger a build, nix will print the calculated sha256 value that you can add back the the `hash` variable, save the file again, and re-run `nix build .#psql_15/exts/`. +5. Add any needed migrations into the `tealbase/postgres` migrations directory. +6. You can then run tests locally to verify that the update of the package succeeded. +7. Now it's ready for PR review! + +## Extensions written in Rust that use `buildPgrxExtension` builder + +Extensions like: + +* https://github.com/tealbase/postgres/blob/develop/nix/ext/wrappers/default.nix +* https://github.com/tealbase/postgres/blob/develop/nix/ext/pg_graphql.nix +* https://github.com/tealbase/postgres/blob/develop/nix/ext/pg_jsonschema.nix + +Are written in Rust, built with `cargo`, and need to use https://github.com/pgcentralfoundation/pgrx to build the extension. + +We in turn have a special nix package `builder` which is sourced from `nixpkgs` and called `buildPgrxExtension` + +A simple example is found in `pg_jsonschema` + + +``` +{ lib, stdenv, fetchFromGitHub, postgresql, buildPgrxExtension_0_11_3, cargo }: + +buildPgrxExtension_0_11_3 rec { + pname = "pg_jsonschema"; + version = "0.3.1"; + inherit postgresql; + + src = fetchFromGitHub { + owner = "tealbase"; + repo = pname; + rev = "v${version}"; + hash = "sha256-YdKpOEiDIz60xE7C+EzpYjBcH0HabnDbtZl23CYls6g="; + }; + + nativeBuildInputs = [ cargo ]; + buildInputs = [ postgresql ]; + # update the following array when the pg_jsonschema version is updated + # required to ensure that extensions update scripts from previous versions are generated + + previousVersions = ["0.3.0" "0.2.0" "0.1.4" "0.1.4" "0.1.2" "0.1.1" "0.1.0"]; + CARGO="${cargo}/bin/cargo"; + env = lib.optionalAttrs stdenv.isDarwin { + POSTGRES_LIB = "${postgresql}/lib"; + RUSTFLAGS = "-C link-arg=-undefined -C link-arg=dynamic_lookup"; + }; + cargoHash = "sha256-VcS+efMDppofuFW2zNrhhsbC28By3lYekDFquHPta2g="; + + # FIXME (aseipp): testsuite tries to write files into /nix/store; we'll have + # to fix this a bit later. + doCheck = false; + + preBuild = '' + echo "Processing git tags..." + echo '${builtins.concatStringsSep "," previousVersions}' | sed 's/,/\n/g' > git_tags.txt + ''; + + postInstall = '' + echo "Creating SQL files for previous versions..." + current_version="${version}" + sql_file="$out/share/postgresql/extension/pg_jsonschema--$current_version.sql" + + if [ -f "$sql_file" ]; then + while read -r previous_version; do + if [ "$(printf '%s\n' "$previous_version" "$current_version" | sort -V | head -n1)" = "$previous_version" ] && [ "$previous_version" != "$current_version" ]; then + new_file="$out/share/postgresql/extension/pg_jsonschema--$previous_version--$current_version.sql" + echo "Creating $new_file" + cp "$sql_file" "$new_file" + fi + done < git_tags.txt + else + echo "Warning: $sql_file not found" + fi + rm git_tags.txt + ''; + + + meta = with lib; { + description = "JSON Schema Validation for PostgreSQL"; + homepage = "https://github.com/tealbase/${pname}"; + maintainers = with maintainers; [ samrose ]; + platforms = postgresql.meta.platforms; + license = licenses.postgresql; + }; +} +``` + +Here we have built support in our overlay to specify and pin the version of `buildPgrxExtension` to a specific version (in this case `buildPgrxExtension_0_11_3`). This is currently the only version we can support, but this can be extended in our overlay https://github.com/tealbase/postgres/blob/develop/nix/overlays/cargo-pgrx-0-11-3.nix to support other versions. + +A few things about `buildPgrxExtension_x`: + +* It doesn't support `buildPhase`, `installPhase` and those are implemented directly in the builder already +* It mostly just allows `cargo build` to do it's thing, but you may need to set env vars for the build process as seen above +* It caclulates a special `cargoHash` that will be generated after the first in `src` is generated, when running `nix build .#psql_15/exts/` to build the extension + + +## Post Nix derivation release steps + + +1. You can add and run tests as described in https://github.com/tealbase/postgres/blob/develop/nix/docs/adding-tests.md +2. You may need to add tests to our test.yml gh action workflow as well. +3. You can add the package and name and version to `ansible/vars.yml` it is not necessary to add the sha256 hash here, as the package is already built and cached in our release process before these vars are ever run. +4. to check that all your files will land in the overall build correctly, you can run `nix profile install .#psql_15/bin` on your machine, and check in `~/.nix-profile/bin, ~/.nix-profile/lib, ~/.nix-profile/share/postgresql/*` and you should see your lib, .control and sql files there. +5. You can also run `nix run .#start-server 15` and in a new terminal window run `nix run .#star-client-and-migrate 15` and try to `CREATE EXTENSION ` and work with it there +6. Check that your extension works with the `pg_upgrade` process (TODO documentation forthcoming) +7. Now you are ready to PR the extension +8. From here, the release process should typically take care of the rest. \ No newline at end of file diff --git a/nix/docs/adding-tests.md b/nix/docs/adding-tests.md new file mode 100644 index 0000000..126ed4a --- /dev/null +++ b/nix/docs/adding-tests.md @@ -0,0 +1,100 @@ +There are basically two types of tests you can add: + +- pgTAP based tests, and +- pg\_regress tests +- Migration tests. + +In all cases, a number of extensions may be installed into the database for +use; you can see those in both [postgresql.conf.in](../tests/postgresql.conf.in) +and [prime.sql](../tests/prime.sql) (extensions may be enabled in either place.) + +## pg\_regress tests + +pg\_regress tests are in [tests/sql](./../tests/sql/) with output in [tests/expected](./../tests/expected/). +To create a new test, create a new SQL file in [tests/sql](./../tests/sql/) and then run: + +``` +nix flake check -L +``` + +Next, review the logs to identify where the test output was written + +``` +postgres> CREATE EXTENSION IF NOT EXISTS index_advisor; +postgres> CREATE EXTENSION +postgres> (using postmaster on localhost, port 5432) +postgres> ============== running regression test queries ============== +postgres> test new_test ... diff: /nix/store/5gk419ddz7mzzwhc9j6yj5i8lkw67pdl-tests/expected/new_test.out: No such file or directory +postgres> diff command failed with status 512: diff "/nix/store/5gk419ddz7mzzwhc9j6yj5i8lkw67pdl-tests/expected/new_test.out" "/nix/store/2fbrvnnr7iz6yigyf0rb0vxnyqvrgxzp-postgres-15.6-check-harness/regression_output/results/new_test.out" > "/nix/store/2fbrvnnr7iz6yigyf0rb0vxnyqvrgxzp-postgres-15.6-check-harness/regression_output/results/new_test.out.diff +``` + +and copy the `regression_output` directory to where you can review + +``` +cp -r /nix/store/2fbrvnnr7iz6yigyf0rb0vxnyqvrgxzp-postgres-15.6-check-harness/regression_output . +``` + +Then you can review the contents of `regression_output/results/new_test.out` to see if it matches what you expected. + +If it does match your expectations, copy the file to [tests/expected](./../tests/expected/) and the test will pass on the next run. + +If the output does not match your expectations, update the `.sql` file, re-run with `nix flake check -L` and try again + + +## pgTAP tests + +These are super easy: simply add `.sql` files to the +[tests/smoke](./../tests/smoke/) directory, then: + +``` +nix flake check -L +``` + +(`-L` prints logs to stderrr, for more details see `man nix`) + +These files are run using `pg_prove`; they pretty much behave exactly like how +you expect; you can read +[the pgTAP documentation](https://pgtap.org/documentation.html) for more. + +For a good example of a pgTAP test as a pull request, check out +[pull request #4](https://github.com/tealbase/nix-postgres/pull/4/files). + +## Re-running tests + +`nix flake check` gets its results cached, so if you do it again the tests won't rerun. If you change a file then it will run again. + + + +Limitation: currently there's no way to rerun all the tests, so you have to specify the check attribute. + +To get the correct attribute (`#checks.x86_64-linux.psql_15` above), you can do `nix flake show`. This will show a tree with all the output attributes. + +## Migration tests + +> **NOTE**: Currently, migration tests _do not happen in CI_. They can only be +> run manually. + +Migration tests are pretty simple in the sense they follow a very simple +principle: + +- You put data in the database +- Run the migration procedure +- It should probably not fail + +Step 1 and 2 are easy, and for various reasons (e.g. mistakes from upstream +extension authors), step 3 isn't guaranteed, so that's what the whole idea is +designed to test. + +To add data into the database, modify the +[data.sql](../nix/tests/migrations/data.sql) script and add whatever you want into +it. This script gets loaded into the old version of the database at startup, and +it's expected that the new version of the database can handle it. + +To run the `migration-test` tool, check out the documentation on +[migration-tests](./migration-tests.md). diff --git a/nix/docs/build-postgres.md b/nix/docs/build-postgres.md new file mode 100644 index 0000000..072886e --- /dev/null +++ b/nix/docs/build-postgres.md @@ -0,0 +1,124 @@ +# 01 — Using tealbase nix + +Let's clone this repo: + +```bash +git clone https://github.com/tealbase/postgres $HOME/tealbase-postgres +cd $HOME/tealbase-postgres +``` + +## Hashes for everyone + +But how do we build stuff within it? With `nix build`, of course! For example, +the following command will, when completed, create a symlink named `result` that +points to a path which contains an entire PostgreSQL 15 installation — +extensions and all: + +``` +nix build .#psql_15/bin +``` + +``` +$ readlink result +/nix/store/ybf48481x033649mgdzk5dyaqv9dppzx-postgresql-and-plugins-15.3 +``` + +``` +$ ls result +bin include lib share +``` + +``` +$ ll result/bin/ +total 9928 +dr-xr-xr-x 2 root root 4096 Dec 31 1969 ./ +dr-xr-xr-x 5 root root 4096 Dec 31 1969 ../ +lrwxrwxrwx 1 root root 79 Dec 31 1969 .initdb-wrapped -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/.initdb-wrapped* +-r-xr-xr-x 1 root root 9829624 Dec 31 1969 .postgres-wrapped* +lrwxrwxrwx 1 root root 73 Dec 31 1969 clusterdb -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/clusterdb* +lrwxrwxrwx 1 root root 72 Dec 31 1969 createdb -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/createdb* +lrwxrwxrwx 1 root root 74 Dec 31 1969 createuser -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/createuser* +lrwxrwxrwx 1 root root 70 Dec 31 1969 dropdb -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/dropdb* +lrwxrwxrwx 1 root root 72 Dec 31 1969 dropuser -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/dropuser* +lrwxrwxrwx 1 root root 68 Dec 31 1969 ecpg -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/ecpg* +lrwxrwxrwx 1 root root 70 Dec 31 1969 initdb -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/initdb* +lrwxrwxrwx 1 root root 72 Dec 31 1969 oid2name -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/oid2name* +lrwxrwxrwx 1 root root 74 Dec 31 1969 pg_amcheck -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/pg_amcheck* +lrwxrwxrwx 1 root root 81 Dec 31 1969 pg_archivecleanup -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/pg_archivecleanup* +lrwxrwxrwx 1 root root 77 Dec 31 1969 pg_basebackup -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/pg_basebackup* +lrwxrwxrwx 1 root root 76 Dec 31 1969 pg_checksums -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/pg_checksums* +-r-xr-xr-x 1 root root 53432 Dec 31 1969 pg_config* +lrwxrwxrwx 1 root root 78 Dec 31 1969 pg_controldata -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/pg_controldata* +-r-xr-xr-x 1 root root 82712 Dec 31 1969 pg_ctl* +lrwxrwxrwx 1 root root 71 Dec 31 1969 pg_dump -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/pg_dump* +lrwxrwxrwx 1 root root 74 Dec 31 1969 pg_dumpall -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/pg_dumpall* +lrwxrwxrwx 1 root root 74 Dec 31 1969 pg_isready -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/pg_isready* +lrwxrwxrwx 1 root root 77 Dec 31 1969 pg_receivewal -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/pg_receivewal* +lrwxrwxrwx 1 root root 78 Dec 31 1969 pg_recvlogical -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/pg_recvlogical* +lrwxrwxrwx 1 root root 73 Dec 31 1969 pg_repack -> /nix/store/bi9i5ns4cqxk235qz3srs9p4x1qfxfna-pg_repack-1.4.8/bin/pg_repack* +lrwxrwxrwx 1 root root 75 Dec 31 1969 pg_resetwal -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/pg_resetwal* +lrwxrwxrwx 1 root root 74 Dec 31 1969 pg_restore -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/pg_restore* +lrwxrwxrwx 1 root root 73 Dec 31 1969 pg_rewind -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/pg_rewind* +lrwxrwxrwx 1 root root 77 Dec 31 1969 pg_test_fsync -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/pg_test_fsync* +lrwxrwxrwx 1 root root 78 Dec 31 1969 pg_test_timing -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/pg_test_timing* +lrwxrwxrwx 1 root root 74 Dec 31 1969 pg_upgrade -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/pg_upgrade* +lrwxrwxrwx 1 root root 79 Dec 31 1969 pg_verifybackup -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/pg_verifybackup* +lrwxrwxrwx 1 root root 74 Dec 31 1969 pg_waldump -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/pg_waldump* +lrwxrwxrwx 1 root root 71 Dec 31 1969 pgbench -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/pgbench* +lrwxrwxrwx 1 root root 71 Dec 31 1969 pgsql2shp -> /nix/store/4wwzd3c136g6j7aqva2gyiqgwy784qjv-postgis-3.3.3/bin/pgsql2shp* +lrwxrwxrwx 1 root root 77 Dec 31 1969 pgsql2shp-3.3.3 -> /nix/store/4wwzd3c136g6j7aqva2gyiqgwy784qjv-postgis-3.3.3/bin/pgsql2shp-3.3.3* +lrwxrwxrwx 1 root root 75 Dec 31 1969 pgtopo_export -> /nix/store/4wwzd3c136g6j7aqva2gyiqgwy784qjv-postgis-3.3.3/bin/pgtopo_export* +lrwxrwxrwx 1 root root 81 Dec 31 1969 pgtopo_export-3.3.3 -> /nix/store/4wwzd3c136g6j7aqva2gyiqgwy784qjv-postgis-3.3.3/bin/pgtopo_export-3.3.3* +lrwxrwxrwx 1 root root 75 Dec 31 1969 pgtopo_import -> /nix/store/4wwzd3c136g6j7aqva2gyiqgwy784qjv-postgis-3.3.3/bin/pgtopo_import* +lrwxrwxrwx 1 root root 81 Dec 31 1969 pgtopo_import-3.3.3 -> /nix/store/4wwzd3c136g6j7aqva2gyiqgwy784qjv-postgis-3.3.3/bin/pgtopo_import-3.3.3* +-r-xr-xr-x 1 root root 286 Dec 31 1969 postgres* +lrwxrwxrwx 1 root root 74 Dec 31 1969 postmaster -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/postmaster* +lrwxrwxrwx 1 root root 68 Dec 31 1969 psql -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/psql* +lrwxrwxrwx 1 root root 74 Dec 31 1969 raster2pgsql -> /nix/store/4wwzd3c136g6j7aqva2gyiqgwy784qjv-postgis-3.3.3/bin/raster2pgsql* +lrwxrwxrwx 1 root root 80 Dec 31 1969 raster2pgsql-3.3.3 -> /nix/store/4wwzd3c136g6j7aqva2gyiqgwy784qjv-postgis-3.3.3/bin/raster2pgsql-3.3.3* +lrwxrwxrwx 1 root root 73 Dec 31 1969 reindexdb -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/reindexdb* +lrwxrwxrwx 1 root root 71 Dec 31 1969 shp2pgsql -> /nix/store/4wwzd3c136g6j7aqva2gyiqgwy784qjv-postgis-3.3.3/bin/shp2pgsql* +lrwxrwxrwx 1 root root 77 Dec 31 1969 shp2pgsql-3.3.3 -> /nix/store/4wwzd3c136g6j7aqva2gyiqgwy784qjv-postgis-3.3.3/bin/shp2pgsql-3.3.3* +lrwxrwxrwx 1 root root 72 Dec 31 1969 vacuumdb -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/vacuumdb* +lrwxrwxrwx 1 root root 72 Dec 31 1969 vacuumlo -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/vacuumlo* +``` + +As we can see, these files all point to paths under `/nix/store`. We're actually +looking at a "farm" of symlinks to various paths, but collectively they form an +entire installation directory we can reuse as much as we want. + +The path +`/nix/store/ybf48481x033649mgdzk5dyaqv9dppzx-postgresql-and-plugins-15.3` +ultimately is a cryptographically hashed, unique name for our installation of +PostgreSQL with those plugins. This hash includes _everything_ used to build it, +so even a single change anywhere to any extension or version would result in a +_new_ hash. + +The ability to refer to a piece of data by its hash, by some notion of +_content_, is a very powerful primitive, as we'll see later. + +## Build a different version: v16 + +What if we wanted PostgreSQL 16 and plugins? Just replace `_15` with `_16`: + +``` +nix build .#psql_16/bin +``` + +You're done: + +``` +$ readlink result +/nix/store/p7ziflx0000s28bfb213jsghrczknkc4-postgresql-and-plugins-14.8 +``` + + +## Using `nix develop` + + +`nix develop .` will just drop you in a subshell with +tools you need _ready to go instantly_. That's all you need to do! And once that +shell goes away, nix installed tools will be removed from your `$PATH` as well. + +There's an even easier way to do this +[that is completely transparent to you, as well](./use-direnv.md). diff --git a/nix/docs/docker.md b/nix/docs/docker.md new file mode 100644 index 0000000..ebd60cd --- /dev/null +++ b/nix/docs/docker.md @@ -0,0 +1,14 @@ +Docker images are pushed to `ghcr.io` on every commit. Try the following: + +``` +docker run --rm -it ghcr.io/tealbase/nix-postgres-15:latest +``` + +Every Docker image that is built on every push is given a tag that exactly +corresponds to a Git commit in the repository — for example commit +[d3e0c39d34e1bb4d37e058175a7bc376620f6868](https://github.com/tealbase/nix-postgres/commit/d3e0c39d34e1bb4d37e058175a7bc376620f6868) +in this repository has a tag in the container registry which can be used to pull +exactly that version. + +This just starts the server. Client container images are not provided; you can +use `nix run` for that, as outlined [here](./start-client-server.md). diff --git a/nix/docs/migration-tests.md b/nix/docs/migration-tests.md new file mode 100644 index 0000000..879e150 --- /dev/null +++ b/nix/docs/migration-tests.md @@ -0,0 +1,50 @@ +Migration tests are run similar to running the client and server; see +[more on that here](./start-client-server.md). + +Instead, you use the following format to specify the upgrade: + +``` +nix run .#migration-test [pg_dumpall|pg_upgrade] +``` + +The arguments are: + +- The version to upgrade from +- The version to upgrade to +- The upgrade mechanism: either `pg_dumpall` or `pg_upgrade` + +## Specifying the version + +The versions for upgrading can be one of two forms: + +- A major version number, e.g. `14` or `15` +- A path to `/nix/store`, which points to _any_ version of PostgreSQL, as long + as it has the "expected" layout and is a postgresql install. + +## Always use the latest version of the migration tool + +Unlike the method for starting the client or server, you probably always want to +use the latest version of the `migration-test` tool from the repository. This is +because it can ensure forwards and backwards compatibility if necessary. + +## Upgrading between arbitrary `/nix/store` versions + +If you want to test migrations from arbitrary versions built by the repository, +you can combine `nix build` and `nix run` to do so. You can use the syntax from +the runbook on [running the server & client](./start-client-server.md) to refer +to arbitrary git revisions. + +For example, if you updated an extension in this repository, and you want to +test a migration from PostgreSQL 14 to PostgreSQL 14 + (updated extension), +using `pg_upgrade` — simply record the two git commits you want to +compare, and you could do something like the following: + +``` +OLD_GIT_VERSION=... +NEW_GIT_VERSION=... + +nix run github:tealbase/nix-postgres#migration-test \ + $(nix build "github:tealbase/nix-postgres/$OLD_GIT_VERSION#psql_14/bin") \ + $(nix build "github:tealbase/nix-postgres/$NEW_GIT_VERSION#psql_14/bin") \ + pg_upgrade +``` diff --git a/nix/docs/new-major-postgres.md b/nix/docs/new-major-postgres.md new file mode 100644 index 0000000..ea07ede --- /dev/null +++ b/nix/docs/new-major-postgres.md @@ -0,0 +1,34 @@ +PostgreSQL versions are managed in upstream nixpkgs. + +See this example PR to add a new version of PostgreSQL; this version is for 16 +beta3, but any version is roughly the same. In short, you need to: + +- Add a new version and hash +- Possibly patch the source code for minor refactorings + - In this example, an old patch had to be rewritten because a function was + split into two different functions; the patch is functionally equivalent but + textually different +- Add the changes to `all-packages.nix` +- Integrate inside the CI and get code review +- Run `nix flake update` to get a new version, once it's ready + +https://github.com/NixOS/nixpkgs/pull/249030 + +## Adding the major version to this repository + +It isn't well abstracted, unfortunately. In short: look for the strings `14` and +`15` under `flake.nix` and `nix/tools/`. More specifically: + +- Add `psql_XX` to `basePackages` in `flake.nix` +- Ditto with `checks` in `flake.nix` +- Modify the tools under `tools/` to understand the new major version +- Make sure the CI is integrated under the GitHub Actions. + +The third step and fourth steps are the most annoying, really. The first two are +easy and by that point you can run `nix flake check` in order to test the build, +at least. + +## Other notes + +See also issue [#6](https://github.com/tealbase/nix-postgres/issues/6), which +would make it possible to define PostgreSQL versions inside this repository. diff --git a/nix/docs/nix-overlays.md b/nix/docs/nix-overlays.md new file mode 100644 index 0000000..b877b42 --- /dev/null +++ b/nix/docs/nix-overlays.md @@ -0,0 +1,36 @@ +Overlays are a feature of Nixpkgs that allow you to: + +- Add new packages with new names to the namespace _without_ modifying upstream + - For example, if there is a package `foobar`, you might add `foobar-1_2_3` to + add a specific version for backwards compatibility +- Globally override _existing_ package names, in terms of other packages. + - For example, if you want to globally override a package to enable a + disabled-by-default feature. + +First, you need to define a file for the overlay under +[overlays/](../overlays/), and then import it in `flake.nix`. There is an +example pull request in +[#14](https://github.com/tealbase/nix-postgres/issues/14) for this; an overlay +typically looks like this: + +``` +final: prev: { + gdal = prev.gdalMinimal; +} +``` + +This says "globally override `gdal` with a different version, named +`gdalMinimal`". In this case `gdalMinimal` is a build with less features +enabled. + +The most important part is that there is an equation of the form `lhs = rhs;` +— if the `lhs` refers to an existing name, it's overwritten. If it refers +to a new name, it's introduced. Overwriting an existing name acts as if you +changed the files upstream: so the above example _globally_ overrides GDAL for +anything that depends on it. + +The names `final` and `prev` are used to refer to packages in terms of other +overlays. For more information about this, see the +[NixOS Wiki Page for Overlays](https://nixos.wiki/wiki/Overlays). + +We also use an overlay to override the default build recipe for `postgresql_16`, and instead feed it the specially patched postgres for use with orioledb extension. This experimental variant can be built with `nix build .#psql_orioledb_16/bin`. This will build this patched version of postgres, along with all extensions and wrappers that currently are known to work with orioledb. diff --git a/nix/docs/receipt-files.md b/nix/docs/receipt-files.md new file mode 100644 index 0000000..3cbd2c2 --- /dev/null +++ b/nix/docs/receipt-files.md @@ -0,0 +1,155 @@ +Every time you run `nix build` on this repository to build PostgreSQL, the +installation directory comes with a _receipt_ file that tells you what's inside +of it. Primarily, this tells you: + +- The version of PostgreSQL, +- The installed extensions, and +- The version of nixpkgs. + +The intent of the receipt file is to provide a mechanism for tooling to +understand installation directories and provide things like upgrade paths or +upgrade mechanisms. + +## Example receipt + +For example: + +``` +nix build .#psql_15/bin +``` + +``` +austin@GANON:~/work/nix-postgres$ nix build .#psql_15/bin +austin@GANON:~/work/nix-postgres$ ls result +bin include lib receipt.json share +``` + +The receipt is in JSON format, under `receipt.json`. Here's an example of what +it would look like: + +```json +{ + "extensions": [ + { + "name": "pgsql-http", + "version": "1.5.0" + }, + { + "name": "pg_plan_filter", + "version": "unstable-2021-09-23" + }, + { + "name": "pg_net", + "version": "0.7.2" + }, + { + "name": "pg_hashids", + "version": "unstable-2022-09-17" + }, + { + "name": "pgsodium", + "version": "3.1.8" + }, + { + "name": "pg_graphql", + "version": "unstable-2023-08-01" + }, + { + "name": "pg_stat_monitor", + "version": "1.0.1" + }, + { + "name": "pg_jsonschema", + "version": "unstable-2023-07-23" + }, + { + "name": "vault", + "version": "0.2.9" + }, + { + "name": "hypopg", + "version": "1.3.1" + }, + { + "name": "pg_tle", + "version": "1.0.4" + }, + { + "name": "tealbase-wrappers", + "version": "unstable-2023-07-31" + }, + { + "name": "supautils", + "version": "1.7.3" + } + ], + "nixpkgs": { + "extensions": [ + { + "name": "postgis", + "version": "3.3.3" + }, + { + "name": "pgrouting", + "version": "3.5.0" + }, + { + "name": "pgtap", + "version": "1.2.0" + }, + { + "name": "pg_cron", + "version": "1.5.2" + }, + { + "name": "pgaudit", + "version": "1.7.0" + }, + { + "name": "pgjwt", + "version": "unstable-2021-11-13" + }, + { + "name": "plpgsql_check", + "version": "2.3.4" + }, + { + "name": "pg-safeupdate", + "version": "1.4" + }, + { + "name": "timescaledb", + "version": "2.11.1" + }, + { + "name": "wal2json", + "version": "2.5" + }, + { + "name": "plv8", + "version": "3.1.5" + }, + { + "name": "rum", + "version": "1.3.13" + }, + { + "name": "pgvector", + "version": "0.4.4" + }, + { + "name": "pg_repack", + "version": "1.4.8" + }, + { + "name": "pgroonga", + "version": "3.0.8" + } + ], + "revision": "750fc50bfd132a44972aa15bb21937ae26303bc4" + }, + "psql-version": "15.3", + "receipt-version": "1", + "revision": "vcs=d250647+20230814" +} +``` diff --git a/nix/docs/references.md b/nix/docs/references.md new file mode 100644 index 0000000..ee155db --- /dev/null +++ b/nix/docs/references.md @@ -0,0 +1,31 @@ +Nix references and other useful tools: + +- **Zero to Nix**: Start here to get your feet wet with how Nix works, and how + to use Nixpkgs: https://zero-to-nix.com/ +- `nix-installer`: My recommended way to install Nix + - https://github.com/DeterminateSystems/nix-installer +- Nix manual https://nixos.org/manual/nix/stable/ + - Useful primarily for option and command references +- Flake schema reference https://nixos.wiki/wiki/Flakes + - Useful to know what `flake.nix` is referring to +- Example pull requests for this repo: + - Adding smoke tests for an extension: + https://github.com/tealbase/nix-postgres/pull/2 + - Extension smoke tests, part 2: + https://github.com/tealbase/nix-postgres/pull/3 + - Adding an extension and a smoke test at once: + https://github.com/tealbase/nix-postgres/pull/4/files + - Updating an extension to trunk: + https://github.com/tealbase/nix-postgres/pull/7 + - Updating an extension to the latest release: + https://github.com/tealbase/nix-postgres/pull/9 +- Contributing to [nixpkgs](https://github.com/nixos/nixpkgs) + - Adding a PGRX-powered extension: + https://github.com/NixOS/nixpkgs/pull/246803 + - Adding a normal extension: https://github.com/NixOS/nixpkgs/pull/249000 + - Adding new PostgreSQL versions: https://github.com/NixOS/nixpkgs/pull/249030 +- NixOS Discourse: https://discourse.nixos.org/ + - Useful for community feedback, guidance, and help +- `nix-update`: https://github.com/Mic92/nix-update + - Used in this repository to help update extensions +- pgTAP for testing: https://pgtap.org/documentation.html diff --git a/nix/docs/start-client-server.md b/nix/docs/start-client-server.md new file mode 100644 index 0000000..20f51f3 --- /dev/null +++ b/nix/docs/start-client-server.md @@ -0,0 +1,93 @@ +## Running the server + +If you want to run a postgres server, just do this from the root of the +repository: + +``` +nix run .#start-server 15 +``` + +Replace the `15` with a `16`, and you'll be using a different version. Optionally you can specify a second argument for the port. + +You likely have a running postgres, so to not cause a conflict, this uses port 5435 by default. + +Actually, you don't even need the repository. You can do this from arbitrary +directories, if the left-hand side of the hash character (`.` in this case) is a +valid "flake reference": + +``` +# from any arbitrary directory +nix run github:tealbase/postgres#start-server 15 +``` + +### Arbitrary versions at arbitrary git revisions + +Let's say you want to use a PostgreSQL build from a specific version of the +repository. You can change the syntax of the above to use _any_ version of the +repository, at any time, by adding the commit hash after the repository name: + +``` +# use postgresql 15 build at commit +nix run github:tealbase/postgres/#start-server 15 +``` + +## Running the client + +All of the same rules apply, but try using `start-client` on the right-hand side +of the hash character, instead. For example: + +``` +nix run github:tealbase/postgres#start-server 15 & +sleep 5 +nix run github:tealbase/postgres#start-client 16 +``` + +## Running a server replica + +To start a replica you can use the `start-postgres-replica` command. + +- first argument: the master version +- second argument: the master port +- third argument: the replica server port + +First start a server and a couple of replicas: + +``` +$ start-postgres-server 15 5435 + +$ start-postgres-replica 15 5439 + +$ start-postgres-replica 15 5440 +``` + +Now check the master server: + +``` +$ start-postgres-client 15 5435 +``` + +```sql +SELECT client_addr, state +FROM pg_stat_replication; + client_addr | state +-------------+----------- + ::1 | streaming + ::1 | streaming +(2 rows) + +create table items as select x::int from generate_series(1,100) x; +``` + +And a replica: + +``` +$ start-postgres-client 15 5439 +``` + +```sql +select count(*) from items; + count +------- + 100 +(1 row) +``` diff --git a/nix/docs/start-here.md b/nix/docs/start-here.md new file mode 100644 index 0000000..acc3158 --- /dev/null +++ b/nix/docs/start-here.md @@ -0,0 +1,70 @@ +Let's go ahead and install Nix. To do that, we'll use the +**[nix-installer tool]** by Determinate Systems. This works on many platforms, +but most importantly it works on **aarch64 Linux** and **x86_64 Linux**. Use the +following command in your shell, **it should work on any Linux distro of your +choice**: + +[nix-installer tool]: https://github.com/DeterminateSystems/nix-installer + +```bash +curl \ + --proto '=https' --tlsv1.2 \ + -sSf -L https://install.determinate.systems/nix \ +| sh -s -- install +``` + +After you do this, **you must log in and log back out of your desktop +environment** to get a new login session. This is so that your shell can have +the Nix tools installed on `$PATH` and so that your user shell can see some +extra settings. + +You should now be able to do something like the following; try running these +same commands on your machine: + +``` +$ nix --version +nix (Nix) 2.16.1 +``` + +``` +$ nix run nixpkgs#nix-info -- -m + - system: `"x86_64-linux"` + - host os: `Linux 5.15.90.1-microsoft-standard-WSL2, Ubuntu, 22.04.2 LTS (Jammy Jellyfish), nobuild` + - multi-user?: `yes` + - sandbox: `yes` + - version: `nix-env (Nix) 2.16.1` + - channels(root): `"nixpkgs"` + - nixpkgs: `/nix/var/nix/profiles/per-user/root/channels/nixpkgs` +``` + +If the above worked, you're now cooking with gas! + +> _**NOTE**_: While there is an upstream tool to install Nix, written in Bash, +> we use the Determinate Systems installer — which will hopefully replace the +> original — because it's faster, and takes care of several extra edge cases +> that the original one couldn't handle, and makes several changes to the +> default installed configuration to make things more user friendly. Determinate +> Systems is staffed by many long-time Nix contributors and the creator of Nix, +> and is trustworthy. + +## Do some fun stuff + +One of the best things about Nix that requires _very little_ knowledge of it is +that it lets you install the latest and greatest versions of many tools _on any +Linux distribution_. We'll explain more about that later on. But just as a few +examples: + +- **Q**: I want the latest version of Deno. Can we get that? +- **A**: `nix profile install nixpkgs#deno`, and you're done! + + + +- **Q**: What about HTTPie? A nice Python application? +- **A**: Same idea: `nix profile install nixpkgs#httpie` + + + +- **Q**: What about my favorite Rust applications, like ripgrep and bat? +- **A.1**: `nix profile install nixpkgs#ripgrep` +- **A.2**: `nix profile install nixpkgs#bat` +- **A.3**: And yes, you also have exa, fd, hyperfine, and more! diff --git a/nix/docs/update-extension.md b/nix/docs/update-extension.md new file mode 100644 index 0000000..febe61b --- /dev/null +++ b/nix/docs/update-extension.md @@ -0,0 +1,17 @@ + +# Update an existing nix extension + + +1. Create a branch off of `develop` +2. For instance, if we were updating https://github.com/tealbase/postgres/blob/develop/nix/ext/supautils.nix we would: + 1. change the `version = "2.2.1";` to whatever our git tag release version is that we want to update to + 2. temporarily empty the `hash = "sha256-wSUEG0at00TPAoHv6+NMzuUE8mfW6fnHH0MNxvBdUiE=";` to `hash = "";` and save `supautils.nix` and `git add .` + 3. run `nix build .#psql_15/exts/supautils` or the name of the extension to update, nix will print the calculated sha256 value that you can add back the the `hash` variable, save the file again, and re-run nix build .#psql_15/exts/supautils. + 4. NOTE: This step is only necessary for `buildPgrxExtension` packages, which includes tealbase-wrappers, pg_jsonschema, and pg_graphql. Otherwise you can skip this step. For our packages that are build with `buildPgrxExtension` you will need to prepend the previous version to the `previousVersions` variable before updating the version in the package (for instance if you are updating `tealbase-wrappers` extension from `0.4.1` to `0.4.2` then you would prepend `0.4.1` to this line https://github.com/tealbase/postgres/blob/develop/nix/ext/wrappers/default.nix#L18 ). + 5. Add any needed migrations into the `tealbase/postgres` migrations directory + 6. update the version in `ansible/vars.yml` as usual + 7. You can then run the `nix flake check -L` tests locally to verify that the update of the package succeeded. + 8. Now it's ready for PR review. + 9. Once the PR is approved, if you want the change to go out in a release, update the common-nix.vars.yml file with the new version prior to merging. + + diff --git a/nix/docs/use-direnv.md b/nix/docs/use-direnv.md new file mode 100644 index 0000000..cf34a23 --- /dev/null +++ b/nix/docs/use-direnv.md @@ -0,0 +1,102 @@ +Have you ever used a tool like `pip`'s `bin/activate` script, or `rbenv`? These +tools populate your shell environment with the right tools and scripts and +dependencies (e.g. `PYTHONPATH`) to run your software. + +What if I told you there was a magical tool that worked like that, and could do +it for arbitrary languages and tools? + +That tool is called **[direnv](https://direnv.net)**. + +## Install direnv and use it in your shell + +First, install `direnv`: + +``` +$ nix profile install nixpkgs#direnv +``` + +``` +$ which direnv +/home/austin/.nix-profile/bin/direnv +``` + +Now, you need to activate it in your shell by hooking into it. If you're using +**Bash**, try putting this in your `.bashrc` and starting up a new interactive +shell: + +``` +eval "$(direnv hook bash)" +``` + +Not using bash? Check the +[direnv hook documentation](https://direnv.net/docs/hook.html) for more. + +## Set up `nix-postgres` + +Let's go back to the `nix-postgres` source code. + +``` +cd $HOME/tmp-nix-postgres +``` + +Now, normally, direnv is going to look for a file called `.envrc` and load that +if it exists. But to be polite, we don't do that by default; we keep a file +named `.envrc.recommended` in the repository instead, and encourage people to do +this: + +``` +echo "source_env .envrc.recommended" >> .envrc +``` + +All this says is "Load the code from `.envrc.recommended` directly", just like a +normal bash script using `source`. The idea of this pattern is to allow users to +have their own customized `.envrc` and piggyback on the committed code for +utility — and `.envrc` is `.gitignore`'d, so you can put e.g. secret +tokens inside without fear of committing them. + +Run the above command, and then... + +## What just happened? + +Oops, a big red error appeared? + +``` +$ echo "source_env .envrc.recommended" >> .envrc +direnv: error /home/austin/work/nix-postgres/.envrc is blocked. Run `direnv allow` to approve its content +``` + +What happened? By default, as a security measure, `direnv` _does not_ load or +execute any code from an `.envrc` file, and instead it MUST be allowed +explicitly. + +## `direnv allow` + +Our `.envrc.recommended` file will integrate with Nix directly. So run +`direnv allow`, and you'll suddenly see the following: + +``` +$ direnv allow +direnv: loading ~/work/nix-postgres/.envrc +direnv: loading ~/work/nix-postgres/.envrc.recommended +direnv: loading https://raw.githubusercontent.com/nix-community/nix-direnv/2.3.0/direnvrc (sha256-Dmd+j63L84wuzgyjITIfSxSD57Tx7v51DMxVZOsiUD8=) +direnv: using flake +direnv: nix-direnv: renewed cache +direnv: export +AR +AS +CC +CONFIG_SHELL +CXX +DETERMINISTIC_BUILD +HOST_PATH +IN_NIX_SHELL +LD +NIX_BINTOOLS +NIX_BINTOOLS_WRAPPER_TARGET_HOST_x86_64_unknown_linux_gnu +NIX_BUILD_CORES +NIX_CC +NIX_CC_WRAPPER_TARGET_HOST_x86_64_unknown_linux_gnu +NIX_CFLAGS_COMPILE +NIX_ENFORCE_NO_NATIVE +NIX_HARDENING_ENABLE +NIX_LDFLAGS +NIX_STORE +NM +OBJCOPY +OBJDUMP +PYTHONHASHSEED +PYTHONNOUSERSITE +PYTHONPATH +RANLIB +READELF +SIZE +SOURCE_DATE_EPOCH +STRINGS +STRIP +_PYTHON_HOST_PLATFORM +_PYTHON_SYSCONFIGDATA_NAME +__structuredAttrs +buildInputs +buildPhase +builder +cmakeFlags +configureFlags +depsBuildBuild +depsBuildBuildPropagated +depsBuildTarget +depsBuildTargetPropagated +depsHostHost +depsHostHostPropagated +depsTargetTarget +depsTargetTargetPropagated +doCheck +doInstallCheck +dontAddDisableDepTrack +mesonFlags +name +nativeBuildInputs +out +outputs +patches +phases +preferLocalBuild +propagatedBuildInputs +propagatedNativeBuildInputs +shell +shellHook +stdenv +strictDeps +system ~PATH ~XDG_DATA_DIRS +``` + +What just happened is that we populated the ambient shell environment with tools +specified inside of `flake.nix` — we'll cover Flakes later. But for now, +your tools are provisioned! + + +## The power of `direnv` + +`direnv` with Nix is a frighteningly good development combination for many +purposes. This is its main power: you can use it to create on-demand developer +shells for any language, tool, or environment, and all you need to do is `cd` to +the right directory. + +This is the power of `direnv`: your projects always, on demand, will have the +right tools configured and available, no matter if you last worked on them a day +ago or a year ago, or it was done by your teammate, or you have a brand new +computer that you've never programmed on. diff --git a/nix/ext/0001-build-Allow-using-V8-from-system.patch b/nix/ext/0001-build-Allow-using-V8-from-system.patch new file mode 100644 index 0000000..ab2c6f0 --- /dev/null +++ b/nix/ext/0001-build-Allow-using-V8-from-system.patch @@ -0,0 +1,46 @@ +diff --git a/Makefile b/Makefile +index 38879cc..6e78eeb 100644 +--- a/Makefile ++++ b/Makefile +@@ -20,6 +20,7 @@ OBJS = $(SRCS:.cc=.o) + MODULE_big = plv8-$(PLV8_VERSION) + EXTENSION = plv8 + PLV8_DATA = plv8.control plv8--$(PLV8_VERSION).sql ++USE_SYSTEM_V8 = 0 + + + # Platform detection +@@ -41,6 +42,7 @@ PGXS := $(shell $(PG_CONFIG) --pgxs) + PG_VERSION_NUM := $(shell cat `$(PG_CONFIG) --includedir-server`/pg_config*.h \ + | perl -ne 'print $$1 and exit if /PG_VERSION_NUM\s+(\d+)/') + ++ifeq ($(USE_SYSTEM_V8),0) + AUTOV8_DIR = build/v8 + AUTOV8_OUT = build/v8/out.gn/obj + AUTOV8_STATIC_LIBS = -lv8_libplatform -lv8_libbase +@@ -66,6 +68,7 @@ v8: + make -f Makefiles/Makefile.macos v8 + endif + endif ++endif + + # enable direct jsonb conversion by default + CCFLAGS += -DJSONB_DIRECT_CONVERSION +@@ -83,6 +86,7 @@ ifdef BIGINT_GRACEFUL + endif + + ++ifeq ($(USE_SYSTEM_V8),0) + # We're gonna build static link. Rip it out after include Makefile + SHLIB_LINK := $(filter-out -lv8, $(SHLIB_LINK)) + +@@ -101,6 +105,7 @@ else + SHLIB_LINK += -lrt -std=c++14 + endif + endif ++endif + + DATA = $(PLV8_DATA) + ifndef DISABLE_DIALECT +-- +2.37.3 diff --git a/nix/ext/hypopg.nix b/nix/ext/hypopg.nix new file mode 100644 index 0000000..4fc00a8 --- /dev/null +++ b/nix/ext/hypopg.nix @@ -0,0 +1,31 @@ +{ lib, stdenv, fetchFromGitHub, postgresql }: + +stdenv.mkDerivation rec { + pname = "hypopg"; + version = "1.4.1"; + + buildInputs = [ postgresql ]; + + src = fetchFromGitHub { + owner = "HypoPG"; + repo = pname; + rev = "refs/tags/${version}"; + hash = "sha256-88uKPSnITRZ2VkelI56jZ9GWazG/Rn39QlyHKJKSKMM="; + }; + + installPhase = '' + mkdir -p $out/{lib,share/postgresql/extension} + + cp *.so $out/lib + cp *.sql $out/share/postgresql/extension + cp *.control $out/share/postgresql/extension + ''; + + meta = with lib; { + description = "Hypothetical Indexes for PostgreSQL"; + homepage = "https://github.com/HypoPG/${pname}"; + maintainers = with maintainers; [ samrose ]; + platforms = postgresql.meta.platforms; + license = licenses.postgresql; + }; +} diff --git a/nix/ext/index_advisor.nix b/nix/ext/index_advisor.nix new file mode 100644 index 0000000..3ed5a5f --- /dev/null +++ b/nix/ext/index_advisor.nix @@ -0,0 +1,30 @@ +{ lib, stdenv, fetchFromGitHub, postgresql }: + +stdenv.mkDerivation rec { + pname = "index_advisor"; + version = "0.2.0"; + + buildInputs = [ postgresql ]; + + src = fetchFromGitHub { + owner = "olirice"; + repo = pname; + rev = "v${version}"; + hash = "sha256-G0eQk2bY5CNPMeokN/nb05g03CuiplRf902YXFVQFbs="; + }; + + installPhase = '' + mkdir -p $out/{lib,share/postgresql/extension} + + cp *.sql $out/share/postgresql/extension + cp *.control $out/share/postgresql/extension + ''; + + meta = with lib; { + description = "Recommend indexes to improve query performance in PostgreSQL"; + homepage = "https://github.com/olirice/index_advisor"; + maintainers = with maintainers; [ samrose ]; + platforms = postgresql.meta.platforms; + license = licenses.postgresql; + }; +} diff --git a/nix/ext/mecab-naist-jdic/default.nix b/nix/ext/mecab-naist-jdic/default.nix new file mode 100644 index 0000000..e4f3962 --- /dev/null +++ b/nix/ext/mecab-naist-jdic/default.nix @@ -0,0 +1,41 @@ +{ lib, stdenv, fetchurl, mecab }: + +stdenv.mkDerivation rec { + pname = "mecab-naist-jdic"; + version = "0.6.3b-20111013"; + + src = fetchurl { + url = "https://github.com/tealbase/mecab-naist-jdic/raw/main/mecab-naist-jdic-${version}.tar.gz"; + sha256 = "sha256-yzdwDcmne5U/K/OxW0nP7NZ4SFMKLPirywm1lMpWKMw="; + }; + + buildInputs = [ mecab ]; + + configureFlags = [ + "--with-charset=utf8" + ]; + + buildPhase = '' + runHook preBuild + make + ${mecab}/libexec/mecab/mecab-dict-index -d . -o . -f UTF-8 -t utf-8 + runHook postBuild + ''; + + installPhase = '' + runHook preInstall + + mkdir -p $out/lib/mecab/dic/naist-jdic + cp *.dic *.bin *.def $out/lib/mecab/dic/naist-jdic/ + + runHook postInstall + ''; + + meta = with lib; { + description = "Naist Japanese Dictionary for MeCab"; + homepage = "https://taku910.github.io/mecab/"; + license = licenses.gpl2; + platforms = platforms.unix; + maintainers = with maintainers; [ samrose ]; + }; +} \ No newline at end of file diff --git a/nix/ext/orioledb.nix b/nix/ext/orioledb.nix new file mode 100644 index 0000000..4d8c51b --- /dev/null +++ b/nix/ext/orioledb.nix @@ -0,0 +1,32 @@ +{ lib, stdenv, fetchFromGitHub, curl, libkrb5, postgresql, python3, openssl }: + +stdenv.mkDerivation rec { + pname = "orioledb"; + name = pname; + src = fetchFromGitHub { + owner = "orioledb"; + repo = "orioledb"; + rev = "main"; + sha256 = "sha256-QbDp9S8JXO66sfaHZIQ3wFCVRxsAaaNSRgC6hvL3EKY="; + }; + version = "patches16_23"; + buildInputs = [ curl libkrb5 postgresql python3 openssl ]; + buildPhase = "make USE_PGXS=1 ORIOLEDB_PATCHSET_VERSION=23"; + installPhase = '' + runHook preInstall + mkdir -p $out/{lib,share/postgresql/extension} + + cp *.so $out/lib + cp *.sql $out/share/postgresql/extension + cp *.control $out/share/postgresql/extension + + runHook postInstall + ''; + doCheck = true; + meta = with lib; { + description = "orioledb"; + maintainers = with maintainers; [ samrose ]; + platforms = postgresql.meta.platforms; + license = licenses.postgresql; + }; +} diff --git a/nix/ext/pg-safeupdate.nix b/nix/ext/pg-safeupdate.nix new file mode 100644 index 0000000..d24fab5 --- /dev/null +++ b/nix/ext/pg-safeupdate.nix @@ -0,0 +1,29 @@ +{ lib, stdenv, fetchFromGitHub, postgresql }: + +stdenv.mkDerivation rec { + pname = "pg-safeupdate"; + version = "1.4"; + + buildInputs = [ postgresql ]; + + src = fetchFromGitHub { + owner = "eradman"; + repo = pname; + rev = version; + hash = "sha256-1cyvVEC9MQGMr7Tg6EUbsVBrMc8ahdFS3+CmDkmAq4Y="; + }; + + installPhase = '' + install -D safeupdate${postgresql.dlSuffix} -t $out/lib + ''; + + meta = with lib; { + description = "A simple extension to PostgreSQL that requires criteria for UPDATE and DELETE"; + homepage = "https://github.com/eradman/pg-safeupdate"; + changelog = "https://github.com/eradman/pg-safeupdate/raw/${src.rev}/NEWS"; + platforms = postgresql.meta.platforms; + license = licenses.postgresql; + broken = versionOlder postgresql.version "14"; + maintainers = with maintainers; [ samrose ]; + }; +} diff --git a/nix/ext/pg_backtrace.nix b/nix/ext/pg_backtrace.nix new file mode 100644 index 0000000..b016912 --- /dev/null +++ b/nix/ext/pg_backtrace.nix @@ -0,0 +1,33 @@ +{ lib, stdenv, fetchFromGitHub, postgresql }: + +stdenv.mkDerivation rec { + pname = "pg_backtrace"; + version = "1.1"; + + buildInputs = [ postgresql ]; + + src = fetchFromGitHub { + owner = "pashkinelfe"; + repo = pname; + rev = "d100bac815a7365e199263f5b3741baf71b14c70"; + hash = "sha256-IVCL4r4oj1Ams03D8y+XCFkckPFER/W9tQ68GkWQQMY="; + }; + + makeFlags = [ "USE_PGXS=1" ]; + + installPhase = '' + mkdir -p $out/{lib,share/postgresql/extension} + + cp *.so $out/lib + cp *.sql $out/share/postgresql/extension + cp *.control $out/share/postgresql/extension + ''; + + meta = with lib; { + description = "Updated fork of pg_backtrace"; + homepage = "https://github.com/pashkinelfe/pg_backtrace"; + maintainers = with maintainers; [ samrose ]; + platforms = postgresql.meta.platforms; + license = licenses.postgresql; + }; +} diff --git a/nix/ext/pg_cron.nix b/nix/ext/pg_cron.nix new file mode 100644 index 0000000..5c546c7 --- /dev/null +++ b/nix/ext/pg_cron.nix @@ -0,0 +1,32 @@ +{ lib, stdenv, fetchFromGitHub, postgresql }: + +stdenv.mkDerivation rec { + pname = "pg_cron"; + version = "1.6.4"; + + buildInputs = [ postgresql ]; + + src = fetchFromGitHub { + owner = "citusdata"; + repo = pname; + rev = "v${version}"; + hash = "sha256-t1DpFkPiSfdoGG2NgNT7g1lkvSooZoRoUrix6cBID40="; + }; + + installPhase = '' + mkdir -p $out/{lib,share/postgresql/extension} + + cp *.so $out/lib + cp *.sql $out/share/postgresql/extension + cp *.control $out/share/postgresql/extension + ''; + + meta = with lib; { + description = "Run Cron jobs through PostgreSQL"; + homepage = "https://github.com/citusdata/pg_cron"; + changelog = "https://github.com/citusdata/pg_cron/raw/v${version}/CHANGELOG.md"; + maintainers = with maintainers; [ samrose ]; + platforms = postgresql.meta.platforms; + license = licenses.postgresql; + }; +} diff --git a/nix/ext/pg_graphql.nix b/nix/ext/pg_graphql.nix new file mode 100644 index 0000000..d7129e6 --- /dev/null +++ b/nix/ext/pg_graphql.nix @@ -0,0 +1,39 @@ +{ lib, stdenv, fetchFromGitHub, postgresql, buildPgrxExtension_0_11_3, cargo }: + +buildPgrxExtension_0_11_3 rec { + pname = "pg_graphql"; + version = "1.5.7"; + inherit postgresql; + + src = fetchFromGitHub { + owner = "tealbase"; + repo = pname; + rev = "v${version}"; + hash = "sha256-Q6XfcTKVOjo5pGy8QACc4QCHolKxEGU8e0TTC6Zg8go="; + }; + + nativeBuildInputs = [ cargo ]; + buildInputs = [ postgresql ]; + + CARGO="${cargo}/bin/cargo"; + #darwin env needs PGPORT to be unique for build to not clash with other pgrx extensions + env = lib.optionalAttrs stdenv.isDarwin { + POSTGRES_LIB = "${postgresql}/lib"; + RUSTFLAGS = "-C link-arg=-undefined -C link-arg=dynamic_lookup"; + PGPORT = "5434"; + }; + cargoHash = "sha256-WkHufMw8OvinMRYd06ZJACnVvY9OLi069nCgq3LSmMY="; + + # FIXME (aseipp): disable the tests since they try to install .control + # files into the wrong spot, aside from that the one main test seems + # to work, though + doCheck = false; + + meta = with lib; { + description = "GraphQL support for PostreSQL"; + homepage = "https://github.com/tealbase/${pname}"; + maintainers = with maintainers; [ samrose ]; + platforms = postgresql.meta.platforms; + license = licenses.postgresql; + }; +} diff --git a/nix/ext/pg_hashids.nix b/nix/ext/pg_hashids.nix new file mode 100644 index 0000000..41c3ba6 --- /dev/null +++ b/nix/ext/pg_hashids.nix @@ -0,0 +1,31 @@ +{ lib, stdenv, fetchFromGitHub, postgresql }: + +stdenv.mkDerivation rec { + pname = "pg_hashids"; + version = "cd0e1b31d52b394a0df64079406a14a4f7387cd6"; + + buildInputs = [ postgresql ]; + + src = fetchFromGitHub { + owner = "iCyberon"; + repo = pname; + rev = "${version}"; + hash = "sha256-Nmb7XLqQflYZfqj0yrewfb1Hl5YgEB5wfjBunPwIuOU="; + }; + + installPhase = '' + mkdir -p $out/{lib,share/postgresql/extension} + + cp *.so $out/lib + cp *.sql $out/share/postgresql/extension + cp *.control $out/share/postgresql/extension + ''; + + meta = with lib; { + description = "Generate short unique IDs in PostgreSQL"; + homepage = "https://github.com/iCyberon/pg_hashids"; + maintainers = with maintainers; [ samrose ]; + platforms = postgresql.meta.platforms; + license = licenses.postgresql; + }; +} diff --git a/nix/ext/pg_jsonschema.nix b/nix/ext/pg_jsonschema.nix new file mode 100644 index 0000000..5105e6b --- /dev/null +++ b/nix/ext/pg_jsonschema.nix @@ -0,0 +1,66 @@ +{ lib, stdenv, fetchFromGitHub, postgresql, buildPgrxExtension_0_11_3, cargo }: + +buildPgrxExtension_0_11_3 rec { + pname = "pg_jsonschema"; + version = "0.3.1"; + inherit postgresql; + + src = fetchFromGitHub { + owner = "tealbase"; + repo = pname; + rev = "v${version}"; + hash = "sha256-YdKpOEiDIz60xE7C+EzpYjBcH0HabnDbtZl23CYls6g="; + }; + + nativeBuildInputs = [ cargo ]; + buildInputs = [ postgresql ]; + # update the following array when the pg_jsonschema version is updated + # required to ensure that extensions update scripts from previous versions are generated + + previousVersions = ["0.3.0" "0.2.0" "0.1.4" "0.1.4" "0.1.2" "0.1.1" "0.1.0"]; + CARGO="${cargo}/bin/cargo"; + #darwin env needs PGPORT to be unique for build to not clash with other pgrx extensions + env = lib.optionalAttrs stdenv.isDarwin { + POSTGRES_LIB = "${postgresql}/lib"; + RUSTFLAGS = "-C link-arg=-undefined -C link-arg=dynamic_lookup"; + PGPORT = "5433"; + }; + cargoHash = "sha256-VcS+efMDppofuFW2zNrhhsbC28By3lYekDFquHPta2g="; + + # FIXME (aseipp): testsuite tries to write files into /nix/store; we'll have + # to fix this a bit later. + doCheck = false; + + preBuild = '' + echo "Processing git tags..." + echo '${builtins.concatStringsSep "," previousVersions}' | sed 's/,/\n/g' > git_tags.txt + ''; + + postInstall = '' + echo "Creating SQL files for previous versions..." + current_version="${version}" + sql_file="$out/share/postgresql/extension/pg_jsonschema--$current_version.sql" + + if [ -f "$sql_file" ]; then + while read -r previous_version; do + if [ "$(printf '%s\n' "$previous_version" "$current_version" | sort -V | head -n1)" = "$previous_version" ] && [ "$previous_version" != "$current_version" ]; then + new_file="$out/share/postgresql/extension/pg_jsonschema--$previous_version--$current_version.sql" + echo "Creating $new_file" + cp "$sql_file" "$new_file" + fi + done < git_tags.txt + else + echo "Warning: $sql_file not found" + fi + rm git_tags.txt + ''; + + + meta = with lib; { + description = "JSON Schema Validation for PostgreSQL"; + homepage = "https://github.com/tealbase/${pname}"; + maintainers = with maintainers; [ samrose ]; + platforms = postgresql.meta.platforms; + license = licenses.postgresql; + }; +} \ No newline at end of file diff --git a/nix/ext/pg_net.nix b/nix/ext/pg_net.nix new file mode 100644 index 0000000..992e2c9 --- /dev/null +++ b/nix/ext/pg_net.nix @@ -0,0 +1,33 @@ +{ lib, stdenv, fetchFromGitHub, curl, postgresql }: + +stdenv.mkDerivation rec { + pname = "pg_net"; + version = "0.13.0"; + + buildInputs = [ curl postgresql ]; + + src = fetchFromGitHub { + owner = "tealbase"; + repo = pname; + rev = "refs/tags/v${version}"; + hash = "sha256-FRaTZPCJQPYAFmsJg22hYJJ0+gH1tMdDQoCQgiqEnaA="; + }; + + env.NIX_CFLAGS_COMPILE = "-Wno-error"; + + installPhase = '' + mkdir -p $out/{lib,share/postgresql/extension} + + cp *${postgresql.dlSuffix} $out/lib + cp sql/*.sql $out/share/postgresql/extension + cp *.control $out/share/postgresql/extension + ''; + + meta = with lib; { + description = "Async networking for Postgres"; + homepage = "https://github.com/tealbase/pg_net"; + maintainers = with maintainers; [ samrose ]; + platforms = postgresql.meta.platforms; + license = licenses.postgresql; + }; +} diff --git a/nix/ext/pg_plan_filter.nix b/nix/ext/pg_plan_filter.nix new file mode 100644 index 0000000..2d7d224 --- /dev/null +++ b/nix/ext/pg_plan_filter.nix @@ -0,0 +1,30 @@ +{ lib, stdenv, fetchFromGitHub, postgresql }: + +stdenv.mkDerivation rec { + pname = "pg_plan_filter"; + version = "5081a7b5cb890876e67d8e7486b6a64c38c9a492"; + + buildInputs = [ postgresql ]; + + src = fetchFromGitHub { + owner = "pgexperts"; + repo = pname; + rev = "${version}"; + hash = "sha256-YNeIfmccT/DtOrwDmpYFCuV2/P6k3Zj23VWBDkOh6sw="; + }; + + installPhase = '' + mkdir -p $out/{lib,share/postgresql/extension} + + cp *.so $out/lib + cp *.sql $out/share/postgresql/extension + ''; + + meta = with lib; { + description = "Filter PostgreSQL statements by execution plans"; + homepage = "https://github.com/pgexperts/${pname}"; + maintainers = with maintainers; [ samrose ]; + platforms = postgresql.meta.platforms; + license = licenses.postgresql; + }; +} diff --git a/nix/ext/pg_regress.nix b/nix/ext/pg_regress.nix new file mode 100644 index 0000000..6e581c4 --- /dev/null +++ b/nix/ext/pg_regress.nix @@ -0,0 +1,24 @@ +{ lib +, stdenv +, postgresql +}: + +stdenv.mkDerivation { + pname = "pg_regress"; + version = postgresql.version; + + phases = [ "installPhase" ]; + + installPhase = '' + mkdir -p $out/bin + cp ${postgresql}/lib/pgxs/src/test/regress/pg_regress $out/bin/ + ''; + + meta = with lib; { + description = "Regression testing tool for PostgreSQL"; + homepage = "https://www.postgresql.org/"; + maintainers = with maintainers; [ samrose ]; + platforms = postgresql.meta.platforms; + license = licenses.postgresql; + }; +} \ No newline at end of file diff --git a/nix/ext/pg_repack.nix b/nix/ext/pg_repack.nix new file mode 100644 index 0000000..f324737 --- /dev/null +++ b/nix/ext/pg_repack.nix @@ -0,0 +1,66 @@ +{ lib +, stdenv +, fetchFromGitHub +, openssl +, postgresql +, postgresqlTestHook +, readline +, testers +, zlib +}: + +stdenv.mkDerivation (finalAttrs: { + pname = "pg_repack"; + version = "1.5.0"; + + buildInputs = postgresql.buildInputs ++ [ postgresql ]; + + src = fetchFromGitHub { + owner = "reorg"; + repo = "pg_repack"; + rev = "ver_${finalAttrs.version}"; + hash = "sha256-do80phyMxwcRIkYyUt9z02z7byNQhK+pbSaCUmzG+4c="; + }; + + installPhase = '' + install -D bin/pg_repack -t $out/bin/ + install -D lib/pg_repack${postgresql.dlSuffix} -t $out/lib/ + install -D lib/{pg_repack--${finalAttrs.version}.sql,pg_repack.control} -t $out/share/postgresql/extension + ''; + + passthru.tests = { + version = testers.testVersion { + package = finalAttrs.finalPackage; + }; + extension = stdenv.mkDerivation { + name = "plpgsql-check-test"; + dontUnpack = true; + doCheck = true; + buildInputs = [ postgresqlTestHook ]; + nativeCheckInputs = [ (postgresql.withPackages (ps: [ ps.pg_repack ])) ]; + postgresqlTestUserOptions = "LOGIN SUPERUSER"; + failureHook = "postgresqlStop"; + checkPhase = '' + runHook preCheck + psql -a -v ON_ERROR_STOP=1 -c "CREATE EXTENSION pg_repack;" + runHook postCheck + ''; + installPhase = "touch $out"; + }; + }; + + meta = with lib; { + description = "Reorganize tables in PostgreSQL databases with minimal locks"; + longDescription = '' + pg_repack is a PostgreSQL extension which lets you remove bloat from tables and indexes, and optionally restore + the physical order of clustered indexes. Unlike CLUSTER and VACUUM FULL it works online, without holding an + exclusive lock on the processed tables during processing. pg_repack is efficient to boot, + with performance comparable to using CLUSTER directly. + ''; + homepage = "https://github.com/reorg/pg_repack"; + license = licenses.bsd3; + maintainers = with maintainers; [ samrose ]; + inherit (postgresql.meta) platforms; + mainProgram = "pg_repack"; + }; +}) diff --git a/nix/ext/pg_stat_monitor.nix b/nix/ext/pg_stat_monitor.nix new file mode 100644 index 0000000..8784067 --- /dev/null +++ b/nix/ext/pg_stat_monitor.nix @@ -0,0 +1,49 @@ +{ lib, stdenv, fetchFromGitHub, postgresql }: + +let + # NOTE (aseipp): the 1.x series of pg_stat_monitor has some non-standard and + # weird build logic (Percona projects in general seem to have their own + # strange build harness) where it will try to pick the right .sql file to + # install into the extension dir based on the postgresql major version. for + # our purposes, we only need to support v13 and v14+, so just replicate this + # logic from the makefile and pick the right file here. + # + # this seems to all be cleaned up in version 2.0 of the extension, so ideally + # we could upgrade to it later on and nuke this. + # DEPRECATED sqlFilename = if lib.versionOlder postgresql.version "14" + # then "pg_stat_monitor--1.0.13.sql.in" + # else "pg_stat_monitor--1.0.14.sql.in"; + +in +stdenv.mkDerivation rec { + pname = "pg_stat_monitor"; + version = "2.1.0"; + + buildInputs = [ postgresql ]; + + src = fetchFromGitHub { + owner = "percona"; + repo = pname; + rev = "refs/tags/${version}"; + hash = "sha256-STJVvvrLVLe1JevNu6u6EftzAWv+X+J8lu66su7Or2s="; + }; + + makeFlags = [ "USE_PGXS=1" ]; + + installPhase = '' + mkdir -p $out/{lib,share/postgresql/extension} + + cp *.so $out/lib + cp *.sql $out/share/postgresql/extension + cp *.control $out/share/postgresql/extension + ''; + + meta = with lib; { + description = "Query Performance Monitoring Tool for PostgreSQL"; + homepage = "https://github.com/percona/${pname}"; + maintainers = with maintainers; [ samrose ]; + platforms = postgresql.meta.platforms; + license = licenses.postgresql; + broken = lib.versionOlder postgresql.version "15"; + }; +} diff --git a/nix/ext/pg_tle.nix b/nix/ext/pg_tle.nix new file mode 100644 index 0000000..6b1c7b1 --- /dev/null +++ b/nix/ext/pg_tle.nix @@ -0,0 +1,36 @@ +{ lib, stdenv, fetchFromGitHub, postgresql, flex, openssl, libkrb5 }: + +stdenv.mkDerivation rec { + pname = "pg_tle"; + version = "1.4.0"; + + nativeBuildInputs = [ flex ]; + buildInputs = [ openssl postgresql libkrb5 ]; + + src = fetchFromGitHub { + owner = "aws"; + repo = pname; + rev = "refs/tags/v${version}"; + hash = "sha256-crxj5R9jblIv0h8lpqddAoYe2UqgUlnvbOajKTzVces="; + }; + + + makeFlags = [ "FLEX=flex" ]; + + + installPhase = '' + mkdir -p $out/{lib,share/postgresql/extension} + + cp *.so $out/lib + cp *.sql $out/share/postgresql/extension + cp *.control $out/share/postgresql/extension + ''; + + meta = with lib; { + description = "Framework for 'Trusted Language Extensions' in PostgreSQL"; + homepage = "https://github.com/aws/${pname}"; + maintainers = with maintainers; [ samrose ]; + platforms = postgresql.meta.platforms; + license = licenses.postgresql; + }; +} diff --git a/nix/ext/pgaudit.nix b/nix/ext/pgaudit.nix new file mode 100644 index 0000000..f6c5d8b --- /dev/null +++ b/nix/ext/pgaudit.nix @@ -0,0 +1,44 @@ +{ lib, stdenv, fetchFromGitHub, libkrb5, openssl, postgresql }: +#adapted from https://github.com/NixOS/nixpkgs/blob/master/pkgs/servers/sql/postgresql/ext/pgaudit.nix +let + source = { + "16" = { + version = "16.0"; + hash = "sha256-8+tGOl1U5y9Zgu+9O5UDDE4bec4B0JC/BQ6GLhHzQzc="; + }; + "15" = { + version = "1.7.0"; + hash = "sha256-8pShPr4HJaJQPjW1iPJIpj3CutTx8Tgr+rOqoXtgCcw="; + }; + }.${lib.versions.major postgresql.version} or (throw "Source for pgaudit is not available for ${postgresql.version}"); +in +stdenv.mkDerivation { + pname = "pgaudit"; + inherit (source) version; + + src = fetchFromGitHub { + owner = "pgaudit"; + repo = "pgaudit"; + rev = source.version; + hash = source.hash; + }; + + buildInputs = [ libkrb5 openssl postgresql ]; + + makeFlags = [ "USE_PGXS=1" ]; + + installPhase = '' + install -D -t $out/lib pgaudit${postgresql.dlSuffix} + install -D -t $out/share/postgresql/extension *.sql + install -D -t $out/share/postgresql/extension *.control + ''; + + meta = with lib; { + description = "Open Source PostgreSQL Audit Logging"; + homepage = "https://github.com/pgaudit/pgaudit"; + changelog = "https://github.com/pgaudit/pgaudit/releases/tag/${source.version}"; + maintainers = with maintainers; [ samrose ]; + platforms = postgresql.meta.platforms; + license = licenses.postgresql; + }; +} diff --git a/nix/ext/pgjwt.nix b/nix/ext/pgjwt.nix new file mode 100644 index 0000000..2eb60f7 --- /dev/null +++ b/nix/ext/pgjwt.nix @@ -0,0 +1,31 @@ +{ lib, stdenv, fetchFromGitHub, postgresql, unstableGitUpdater }: + +stdenv.mkDerivation rec { + pname = "pgjwt"; + version = "9742dab1b2f297ad3811120db7b21451bca2d3c9"; + + src = fetchFromGitHub { + owner = "michelp"; + repo = "pgjwt"; + rev = "${version}"; + hash = "sha256-Hw3R9bMGDmh+dMzjmqZSy/rT4mX8cPU969OJiARFg10="; + }; + + dontBuild = true; + installPhase = '' + mkdir -p $out/share/postgresql/extension + cp pg*sql *.control $out/share/postgresql/extension + ''; + + passthru.updateScript = unstableGitUpdater { }; + + meta = with lib; { + description = "PostgreSQL implementation of JSON Web Tokens"; + longDescription = '' + sign() and verify() functions to create and verify JSON Web Tokens. + ''; + license = licenses.mit; + platforms = postgresql.meta.platforms; + maintainers = with maintainers; [samrose]; + }; +} diff --git a/nix/ext/pgmq.nix b/nix/ext/pgmq.nix new file mode 100644 index 0000000..97a3c27 --- /dev/null +++ b/nix/ext/pgmq.nix @@ -0,0 +1,33 @@ +{ lib, stdenv, fetchFromGitHub, postgresql }: + +stdenv.mkDerivation rec { + pname = "pgmq"; + version = "1.4.4"; + buildInputs = [ postgresql ]; + src = fetchFromGitHub { + owner = "tembo-io"; + repo = pname; + rev = "v${version}"; + hash = "sha256-z+8/BqIlHwlMnuIzMz6eylmYbSmhtsNt7TJf/CxbdVw="; + }; + + buildPhase = '' + cd pgmq-extension + ''; + + installPhase = '' + mkdir -p $out/{lib,share/postgresql/extension} + + mv sql/pgmq.sql $out/share/postgresql/extension/pgmq--${version}.sql + cp sql/*.sql $out/share/postgresql/extension + cp *.control $out/share/postgresql/extension + ''; + + meta = with lib; { + description = "A lightweight message queue. Like AWS SQS and RSMQ but on Postgres."; + homepage = "https://github.com/tembo-io/pgmq"; + maintainers = with maintainers; [ olirice ]; + platforms = postgresql.meta.platforms; + license = licenses.postgresql; + }; +} diff --git a/nix/ext/pgroonga.nix b/nix/ext/pgroonga.nix new file mode 100644 index 0000000..78cc064 --- /dev/null +++ b/nix/ext/pgroonga.nix @@ -0,0 +1,61 @@ +{ lib, stdenv, fetchurl, pkg-config, postgresql, msgpack-c, callPackage, mecab, makeWrapper }: +let + tealbase-groonga = callPackage ../tealbase-groonga.nix { }; +in +stdenv.mkDerivation rec { + pname = "pgroonga"; + version = "3.0.7"; + src = fetchurl { + url = "https://packages.groonga.org/source/${pname}/${pname}-${version}.tar.gz"; + sha256 = "sha256-iF/zh4zDDpAw5fxW1WG8i2bfPt4VYsnYArwOoE/lwgM="; + }; + nativeBuildInputs = [ pkg-config makeWrapper ]; + buildInputs = [ postgresql msgpack-c tealbase-groonga mecab ]; + propagatedBuildInputs = [ tealbase-groonga ]; + configureFlags = [ + "--with-mecab=${mecab}" + "--enable-mecab" + "--with-groonga=${tealbase-groonga}" + "--with-groonga-plugin-dir=${tealbase-groonga}/lib/groonga/plugins" + ]; + + makeFlags = [ + "HAVE_MSGPACK=1" + "MSGPACK_PACKAGE_NAME=msgpack-c" + "HAVE_MECAB=1" + ]; + + preConfigure = '' + export GROONGA_LIBS="-L${tealbase-groonga}/lib -lgroonga" + export GROONGA_CFLAGS="-I${tealbase-groonga}/include" + export MECAB_CONFIG="${mecab}/bin/mecab-config" + ''; + + installPhase = '' + mkdir -p $out/lib $out/share/postgresql/extension $out/bin + install -D pgroonga${postgresql.dlSuffix} -t $out/lib/ + install -D pgroonga.control -t $out/share/postgresql/extension + install -D data/pgroonga-*.sql -t $out/share/postgresql/extension + install -D pgroonga_database${postgresql.dlSuffix} -t $out/lib/ + install -D pgroonga_database.control -t $out/share/postgresql/extension + install -D data/pgroonga_database-*.sql -t $out/share/postgresql/extension + + echo "Debug: Groonga plugins directory contents:" + ls -l ${tealbase-groonga}/lib/groonga/plugins/tokenizers/ + ''; + + meta = with lib; { + description = "A PostgreSQL extension to use Groonga as the index"; + longDescription = '' + PGroonga is a PostgreSQL extension to use Groonga as the index. + PostgreSQL supports full text search against languages that use only alphabet and digit. + It means that PostgreSQL doesn't support full text search against Japanese, Chinese and so on. + You can use super fast full text search feature against all languages by installing PGroonga into your PostgreSQL. + ''; + homepage = "https://pgroonga.github.io/"; + changelog = "https://github.com/pgroonga/pgroonga/releases/tag/${version}"; + license = licenses.postgresql; + platforms = postgresql.meta.platforms; + maintainers = with maintainers; [ samrose ]; + }; +} \ No newline at end of file diff --git a/nix/ext/pgrouting.nix b/nix/ext/pgrouting.nix new file mode 100644 index 0000000..36edf34 --- /dev/null +++ b/nix/ext/pgrouting.nix @@ -0,0 +1,31 @@ +{ lib, stdenv, fetchFromGitHub, postgresql, perl, cmake, boost }: + +stdenv.mkDerivation rec { + pname = "pgrouting"; + version = "3.4.1"; + + nativeBuildInputs = [ cmake perl ]; + buildInputs = [ postgresql boost ]; + + src = fetchFromGitHub { + owner = "pgRouting"; + repo = pname; + rev = "v${version}"; + hash = "sha256-QC77AnPGpPQGEWi6JtJdiNsB2su5+aV2pKg5ImR2B0k="; + }; + + installPhase = '' + install -D lib/*.so -t $out/lib + install -D sql/pgrouting--${version}.sql -t $out/share/postgresql/extension + install -D sql/common/pgrouting.control -t $out/share/postgresql/extension + ''; + + meta = with lib; { + description = "A PostgreSQL/PostGIS extension that provides geospatial routing functionality"; + homepage = "https://pgrouting.org/"; + changelog = "https://github.com/pgRouting/pgrouting/releases/tag/v${version}"; + maintainers = with maintainers; [ steve-chavez samrose ]; + platforms = postgresql.meta.platforms; + license = licenses.gpl2Plus; + }; +} diff --git a/nix/ext/pgsodium.nix b/nix/ext/pgsodium.nix new file mode 100644 index 0000000..e3b0da2 --- /dev/null +++ b/nix/ext/pgsodium.nix @@ -0,0 +1,31 @@ +{ lib, stdenv, fetchFromGitHub, libsodium, postgresql }: + +stdenv.mkDerivation rec { + pname = "pgsodium"; + version = "3.1.8"; + + buildInputs = [ libsodium postgresql ]; + + src = fetchFromGitHub { + owner = "michelp"; + repo = pname; + rev = "refs/tags/v${version}"; + hash = "sha256-j5F1PPdwfQRbV8XJ8Mloi8FvZF0MTl4eyIJcBYQy1E4="; + }; + + installPhase = '' + mkdir -p $out/{lib,share/postgresql/extension} + + cp *.so $out/lib + cp sql/*.sql $out/share/postgresql/extension + cp *.control $out/share/postgresql/extension + ''; + + meta = with lib; { + description = "Modern cryptography for PostgreSQL"; + homepage = "https://github.com/michelp/${pname}"; + maintainers = with maintainers; [ samrose ]; + platforms = postgresql.meta.platforms; + license = licenses.postgresql; + }; +} diff --git a/nix/ext/pgsql-http.nix b/nix/ext/pgsql-http.nix new file mode 100644 index 0000000..2edaa9d --- /dev/null +++ b/nix/ext/pgsql-http.nix @@ -0,0 +1,31 @@ +{ lib, stdenv, fetchFromGitHub, curl, postgresql }: + +stdenv.mkDerivation rec { + pname = "pgsql-http"; + version = "1.6.0"; + + buildInputs = [ curl postgresql ]; + + src = fetchFromGitHub { + owner = "pramsey"; + repo = pname; + rev = "refs/tags/v${version}"; + hash = "sha256-CPHfx7vhWfxkXsoKTzyFuTt47BPMvzi/pi1leGcuD60="; + }; + + installPhase = '' + mkdir -p $out/{lib,share/postgresql/extension} + + cp *.so $out/lib + cp *.sql $out/share/postgresql/extension + cp *.control $out/share/postgresql/extension + ''; + + meta = with lib; { + description = "HTTP client for Postgres"; + homepage = "https://github.com/pramsey/${pname}"; + maintainers = with maintainers; [ samrose ]; + platforms = postgresql.meta.platforms; + license = licenses.postgresql; + }; +} diff --git a/nix/ext/pgtap.nix b/nix/ext/pgtap.nix new file mode 100644 index 0000000..c5a17c9 --- /dev/null +++ b/nix/ext/pgtap.nix @@ -0,0 +1,33 @@ +{ lib, stdenv, fetchFromGitHub, postgresql, perl, perlPackages, which }: + +stdenv.mkDerivation rec { + pname = "pgtap"; + version = "1.2.0"; + + src = fetchFromGitHub { + owner = "theory"; + repo = "pgtap"; + rev = "v${version}"; + hash = "sha256-lb0PRffwo6J5a6Hqw1ggvn0cW7gPZ02OEcLPi9ineI8="; + }; + + nativeBuildInputs = [ postgresql perl perlPackages.TAPParserSourceHandlerpgTAP which ]; + + installPhase = '' + install -D {sql/pgtap--${version}.sql,pgtap.control} -t $out/share/postgresql/extension + ''; + + meta = with lib; { + description = "A unit testing framework for PostgreSQL"; + longDescription = '' + pgTAP is a unit testing framework for PostgreSQL written in PL/pgSQL and PL/SQL. + It includes a comprehensive collection of TAP-emitting assertion functions, + as well as the ability to integrate with other TAP-emitting test frameworks. + It can also be used in the xUnit testing style. + ''; + maintainers = with maintainers; [ samrose ]; + homepage = "https://pgtap.org"; + inherit (postgresql.meta) platforms; + license = licenses.mit; + }; +} diff --git a/nix/ext/pgvector.nix b/nix/ext/pgvector.nix new file mode 100644 index 0000000..965be36 --- /dev/null +++ b/nix/ext/pgvector.nix @@ -0,0 +1,31 @@ +{ lib, stdenv, fetchFromGitHub, postgresql }: + +stdenv.mkDerivation rec { + pname = "pgvector"; + version = "0.8.0"; + + buildInputs = [ postgresql ]; + + src = fetchFromGitHub { + owner = "pgvector"; + repo = pname; + rev = "refs/tags/v${version}"; + hash = "sha256-JsZV+I4eRMypXTjGmjCtMBXDVpqTIPHQa28ogXncE/Q="; + }; + + installPhase = '' + mkdir -p $out/{lib,share/postgresql/extension} + + cp *.so $out/lib + cp sql/*.sql $out/share/postgresql/extension + cp *.control $out/share/postgresql/extension + ''; + + meta = with lib; { + description = "Open-source vector similarity search for Postgres"; + homepage = "https://github.com/${src.owner}/${src.repo}"; + maintainers = with maintainers; [ olirice ]; + platforms = postgresql.meta.platforms; + license = licenses.postgresql; + }; +} diff --git a/nix/ext/pljava.nix b/nix/ext/pljava.nix new file mode 100644 index 0000000..16f8a59 --- /dev/null +++ b/nix/ext/pljava.nix @@ -0,0 +1,51 @@ +{ stdenv, lib, fetchFromGitHub, openssl, openjdk, maven, postgresql, libkrb5, makeWrapper, gcc, pkg-config, which }: + +maven.buildMavenPackage rec { + pname = "pljava"; + + version = "1.6.7"; + + src = fetchFromGitHub { + owner = "tada"; + repo = "pljava"; + rev = "V1_6_7"; + sha256 = "sha256-M17adSLsw47KZ2BoUwxyWkXKRD8TcexDAy61Yfw4fNU="; + + }; + + mvnParameters = "clean install -Dmaven.test.skip -DskipTests -Dmaven.javadoc.skip=true"; + mvnHash = "sha256-lcxRduh/nKcPL6YQIVTsNH0L4ga0LgJpQKgX5IPkRzs="; + + nativeBuildInputs = [ makeWrapper maven openjdk postgresql openssl postgresql gcc libkrb5 pkg-config ]; + buildInputs = [ stdenv.cc.cc.lib which]; + buildPhase = '' + export PATH=$(lib.makeBinPath [ postgresql ]):$PATH + + ''; + buildOffline = true; + + installPhase = '' + mkdir -p $out/pljavabuild + cp -r * $out/pljavabuild + mkdir -p $out/share/postgresql/extension/pljava + mkdir -p $out/share/postgresql/pljava + mkdir -p $out/lib + mkdir -p $out/etc + java -Dpgconfig=${postgresql}/bin/pg_config \ + -Dpgconfig.sharedir=$out/share \ + -Dpgconfig.sysconfdir=$out/etc/pljava.policy \ + -Dpgconfig.pkglibdir=$out/lib \ + -jar $out/pljavabuild/pljava-packaging/target/pljava-pg15.jar + cp $out/share/pljava/* $out/share/postgresql/extension/pljava + cp $out/share/pljava/* $out/share/postgresql/pljava + cp $out/share/extension/*.control $out/share/postgresql/extension + rm -r $out/pljavabuild + ''; + + meta = with lib; { + description = "PL/Java extension for PostgreSQL"; + homepage = https://github.com/tada/pljava; + license = licenses.bsd3; + maintainers = [ maintainers.samrose ]; # Update with actual maintainer info + }; +} diff --git a/nix/ext/plpgsql-check.nix b/nix/ext/plpgsql-check.nix new file mode 100644 index 0000000..7be2aac --- /dev/null +++ b/nix/ext/plpgsql-check.nix @@ -0,0 +1,46 @@ +{ lib, stdenv, fetchFromGitHub, postgresql, postgresqlTestHook }: + +stdenv.mkDerivation rec { + pname = "plpgsql-check"; + version = "2.7.11"; + + src = fetchFromGitHub { + owner = "okbob"; + repo = "plpgsql_check"; + rev = "v${version}"; + hash = "sha256-vR3MvfmUP2QEAtXFpq0NCCKck3wZPD+H3QleHtyVQJs="; + }; + + buildInputs = [ postgresql ]; + + installPhase = '' + install -D -t $out/lib *${postgresql.dlSuffix} + install -D -t $out/share/postgresql/extension *.sql + install -D -t $out/share/postgresql/extension *.control + ''; + + passthru.tests.extension = stdenv.mkDerivation { + name = "plpgsql-check-test"; + dontUnpack = true; + doCheck = true; + buildInputs = [ postgresqlTestHook ]; + nativeCheckInputs = [ (postgresql.withPackages (ps: [ ps.plpgsql_check ])) ]; + postgresqlTestUserOptions = "LOGIN SUPERUSER"; + failureHook = "postgresqlStop"; + checkPhase = '' + runHook preCheck + psql -a -v ON_ERROR_STOP=1 -c "CREATE EXTENSION plpgsql_check;" + runHook postCheck + ''; + installPhase = "touch $out"; + }; + + meta = with lib; { + description = "Linter tool for language PL/pgSQL"; + homepage = "https://github.com/okbob/plpgsql_check"; + changelog = "https://github.com/okbob/plpgsql_check/releases/tag/v${version}"; + platforms = postgresql.meta.platforms; + license = licenses.mit; + maintainers = [ maintainers.marsam ]; + }; +} diff --git a/nix/ext/plv8.nix b/nix/ext/plv8.nix new file mode 100644 index 0000000..338bba6 --- /dev/null +++ b/nix/ext/plv8.nix @@ -0,0 +1,194 @@ +{ stdenv +, lib +, fetchFromGitHub +, v8 +, perl +, postgresql +# For passthru test on various systems, and local development on macos +# not we are not currently using passthru tests but retaining for possible contrib +# to nixpkgs +, runCommand +, coreutils +, gnugrep +, clang +, xcbuild +, darwin +, patchelf +}: + +stdenv.mkDerivation (finalAttrs: { + pname = "plv8"; + version = "3.1.10"; + + src = fetchFromGitHub { + owner = "plv8"; + repo = "plv8"; + rev = "v${finalAttrs.version}"; + hash = "sha256-g1A/XPC0dX2360Gzvmo9/FSQnM6Wt2K4eR0pH0p9fz4="; + }; + + patches = [ + # Allow building with system v8. + # https://github.com/plv8/plv8/pull/505 (rejected) + ./0001-build-Allow-using-V8-from-system.patch + ]; + + nativeBuildInputs = [ + perl + ] ++ lib.optionals stdenv.isDarwin [ + clang + xcbuild + ]; + + buildInputs = [ + v8 + postgresql + ] ++ lib.optionals stdenv.isDarwin [ + darwin.apple_sdk.frameworks.CoreFoundation + darwin.apple_sdk.frameworks.Kerberos + ]; + + buildFlags = [ "all" ]; + + makeFlags = [ + # Nixpkgs build a v8 monolith instead of separate v8_libplatform. + "USE_SYSTEM_V8=1" + "V8_OUTDIR=${v8}/lib" + "PG_CONFIG=${postgresql}/bin/pg_config" + ] ++ lib.optionals stdenv.isDarwin [ + "CC=${clang}/bin/clang" + "CXX=${clang}/bin/clang++" + "SHLIB_LINK=-L${v8}/lib -lv8_monolith -Wl,-rpath,${v8}/lib" + ] ++ lib.optionals (!stdenv.isDarwin) [ + "SHLIB_LINK=-lv8" + ]; + + NIX_LDFLAGS = (lib.optionals stdenv.isDarwin [ + "-L${postgresql}/lib" + "-L${v8}/lib" + "-lv8_monolith" + "-lpq" + "-lpgcommon" + "-lpgport" + "-F${darwin.apple_sdk.frameworks.CoreFoundation}/Library/Frameworks" + "-framework" "CoreFoundation" + "-F${darwin.apple_sdk.frameworks.Kerberos}/Library/Frameworks" + "-framework" "Kerberos" + "-undefined" "dynamic_lookup" + "-flat_namespace" + ]); + + installFlags = [ + # PGXS only supports installing to postgresql prefix so we need to redirect this + "DESTDIR=${placeholder "out"}" + ]; + + # No configure script. + dontConfigure = true; + + postPatch = '' + patchShebangs ./generate_upgrade.sh + substituteInPlace generate_upgrade.sh \ + --replace " 2.3.10 " " 2.3.10 2.3.11 2.3.12 2.3.13 2.3.14 2.3.15 " + + ${lib.optionalString stdenv.isDarwin '' + # Replace g++ with clang++ in Makefile + sed -i 's/g++/clang++/g' Makefile + ''} + ''; + + postInstall = '' + # Move the redirected to proper directory. + # There appear to be no references to the install directories + # so changing them does not cause issues. + mv "$out/nix/store"/*/* "$out" + rmdir "$out/nix/store"/* "$out/nix/store" "$out/nix" + mv "$out/lib/plv8-${finalAttrs.version}.so" "$out/lib/plv8.so" + ln -s "$out/lib/plv8.so" "$out/lib/plv8-${finalAttrs.version}.so" + sed -i 's|module_pathname = '"'"'$libdir/plv8-[0-9.]*'"'"'|module_pathname = '"'"'$libdir/plv8'"'"'|' "$out/share/postgresql/extension/plv8.control" + sed -i 's|module_pathname = '"'"'$libdir/plv8-[0-9.]*'"'"'|module_pathname = '"'"'$libdir/plv8'"'"'|' "$out/share/postgresql/extension/plcoffee.control" + sed -i 's|module_pathname = '"'"'$libdir/plv8-[0-9.]*'"'"'|module_pathname = '"'"'$libdir/plv8'"'"'|' "$out/share/postgresql/extension/plls.control" + ${lib.optionalString stdenv.isDarwin '' + install_name_tool -add_rpath "${v8}/lib" $out/lib/plv8.so + install_name_tool -add_rpath "${postgresql}/lib" $out/lib/plv8.so + install_name_tool -add_rpath "${stdenv.cc.cc.lib}/lib" $out/lib/plv8.so + install_name_tool -change @rpath/libv8_monolith.dylib ${v8}/lib/libv8_monolith.dylib $out/lib/plv8.so + ''} + + ${lib.optionalString (!stdenv.isDarwin) '' + ${patchelf}/bin/patchelf --set-rpath "${v8}/lib:${postgresql}/lib:${stdenv.cc.cc.lib}/lib" $out/lib/plv8.so + ''} + ''; + + passthru = { + tests = + let + postgresqlWithSelf = postgresql.withPackages (_: [ + finalAttrs.finalPackage + ]); + in { + smoke = runCommand "plv8-smoke-test" {} '' + export PATH=${lib.makeBinPath [ + postgresqlWithSelf + coreutils + gnugrep + ]} + db="$PWD/testdb" + initdb "$db" + postgres -k "$db" -D "$db" & + pid="$!" + + for i in $(seq 1 100); do + if psql -h "$db" -d postgres -c "" 2>/dev/null; then + break + elif ! kill -0 "$pid"; then + exit 1 + else + sleep 0.1 + fi + done + + psql -h "$db" -d postgres -c 'CREATE EXTENSION plv8; DO $$ plv8.elog(NOTICE, plv8.version); $$ LANGUAGE plv8;' 2> "$out" + grep -q "${finalAttrs.version}" "$out" + kill -0 "$pid" + ''; + + regression = stdenv.mkDerivation { + name = "plv8-regression"; + inherit (finalAttrs) src patches nativeBuildInputs buildInputs dontConfigure; + + buildPhase = '' + runHook preBuild + + # The regression tests need to be run in the order specified in the Makefile. + echo -e "include Makefile\nprint_regress_files:\n\t@echo \$(REGRESS)" > Makefile.regress + REGRESS_TESTS=$(make -f Makefile.regress print_regress_files) + + ${postgresql}/lib/pgxs/src/test/regress/pg_regress \ + --bindir='${postgresqlWithSelf}/bin' \ + --temp-instance=regress-instance \ + --dbname=contrib_regression \ + $REGRESS_TESTS + + runHook postBuild + ''; + + installPhase = '' + runHook preInstall + + touch "$out" + + runHook postInstall + ''; + }; + }; + }; + + meta = with lib; { + description = "V8 Engine Javascript Procedural Language add-on for PostgreSQL"; + homepage = "https://plv8.github.io/"; + maintainers = with maintainers; [ samrose ]; + platforms = [ "x86_64-linux" "aarch64-linux" "aarch64-darwin" ]; + license = licenses.postgresql; + }; +}) \ No newline at end of file diff --git a/nix/ext/postgis.nix b/nix/ext/postgis.nix new file mode 100644 index 0000000..e0b6dfb --- /dev/null +++ b/nix/ext/postgis.nix @@ -0,0 +1,87 @@ +{ fetchurl +, lib, stdenv +, perl +, libxml2 +, postgresql +, geos +, proj +, gdalMinimal +, json_c +, pkg-config +, file +, protobufc +, libiconv +, pcre2 +, nixosTests +, callPackage +}: + +let + gdal = gdalMinimal; + sfcgal = callPackage ./sfcgal/sfcgal.nix { }; +in +stdenv.mkDerivation rec { + pname = "postgis"; + version = "3.3.2"; + + outputs = [ "out" "doc" ]; + + src = fetchurl { + url = "https://download.osgeo.org/postgis/source/postgis-${version}.tar.gz"; + sha256 = "sha256-miohnaAFoXMKOdGVmhx87GGbHvsAm2W+gP/CW60pkGg="; + }; + + buildInputs = [ libxml2 postgresql geos proj gdal json_c protobufc pcre2.dev sfcgal ] + ++ lib.optional stdenv.isDarwin libiconv; + nativeBuildInputs = [ perl pkg-config ]; + dontDisableStatic = true; + + # postgis config directory assumes /include /lib from the same root for json-c library + env.NIX_LDFLAGS = "-L${lib.getLib json_c}/lib"; + + + preConfigure = '' + sed -i 's@/usr/bin/file@${file}/bin/file@' configure + configureFlags="--datadir=$out/share/postgresql --datarootdir=$out/share/postgresql --bindir=$out/bin --docdir=$doc/share/doc/${pname} --with-gdalconfig=${gdal}/bin/gdal-config --with-jsondir=${json_c.dev} --disable-extension-upgrades-install --with-sfcgal" + + makeFlags="PERL=${perl}/bin/perl datadir=$out/share/postgresql pkglibdir=$out/lib bindir=$out/bin docdir=$doc/share/doc/${pname}" + ''; + postConfigure = '' + sed -i "s|@mkdir -p \$(DESTDIR)\$(PGSQL_BINDIR)||g ; + s|\$(DESTDIR)\$(PGSQL_BINDIR)|$prefix/bin|g + " \ + "raster/loader/Makefile"; + sed -i "s|\$(DESTDIR)\$(PGSQL_BINDIR)|$prefix/bin|g + " \ + "raster/scripts/python/Makefile"; + mkdir -p $out/bin + + # postgis' build system assumes it is being installed to the same place as postgresql, and looks + # for the postgres binary relative to $PREFIX. We gently support this system using an illusion. + ln -s ${postgresql}/bin/postgres $out/bin/postgres + ''; + + # create aliases for all commands adding version information + postInstall = '' + # Teardown the illusory postgres used for building; see postConfigure. + rm $out/bin/postgres + + for prog in $out/bin/*; do # */ + ln -s $prog $prog-${version} + done + + mkdir -p $doc/share/doc/postgis + mv doc/* $doc/share/doc/postgis/ + ''; + + passthru.tests.postgis = nixosTests.postgis; + + meta = with lib; { + description = "Geographic Objects for PostgreSQL"; + homepage = "https://postgis.net/"; + changelog = "https://git.osgeo.org/gitea/postgis/postgis/raw/tag/${version}/NEWS"; + license = licenses.gpl2; + maintainers = with maintainers; [ samrose ]; + inherit (postgresql.meta) platforms; + }; +} diff --git a/nix/ext/rum.nix b/nix/ext/rum.nix new file mode 100644 index 0000000..16bf106 --- /dev/null +++ b/nix/ext/rum.nix @@ -0,0 +1,31 @@ +{ lib, stdenv, fetchFromGitHub, postgresql }: + +stdenv.mkDerivation rec { + pname = "rum"; + version = "1.3.13"; + + src = fetchFromGitHub { + owner = "postgrespro"; + repo = "rum"; + rev = version; + hash = "sha256-yy2xeDnk3fENN+En0st4mv60nZlqPafIzwf68jwJ5fE="; + }; + + buildInputs = [ postgresql ]; + + makeFlags = [ "USE_PGXS=1" ]; + + installPhase = '' + install -D -t $out/lib *${postgresql.dlSuffix} + install -D -t $out/share/postgresql/extension *.control + install -D -t $out/share/postgresql/extension *.sql + ''; + + meta = with lib; { + description = "Full text search index method for PostgreSQL"; + homepage = "https://github.com/postgrespro/rum"; + license = licenses.postgresql; + platforms = postgresql.meta.platforms; + maintainers = with maintainers; [ samrose ]; + }; +} diff --git a/nix/ext/sfcgal/sfcgal.nix b/nix/ext/sfcgal/sfcgal.nix new file mode 100644 index 0000000..54d7b52 --- /dev/null +++ b/nix/ext/sfcgal/sfcgal.nix @@ -0,0 +1,31 @@ +{ lib, stdenv, fetchFromGitLab, cgal, cmake, pkg-config, gmp, mpfr, boost }: + +stdenv.mkDerivation rec { + pname = "sfcgal"; + version = "61f3b08ade49493b56c6bafa98c7c1f84addbc10"; + + src = fetchFromGitLab { + owner = "sfcgal"; + repo = "SFCGAL"; + rev = "${version}"; + hash = "sha256-nKSqiFyMkZAYptIeShb1zFg9lYSny3kcGJfxdeTFqxw="; + }; + + nativeBuildInputs = [ cmake pkg-config cgal gmp mpfr boost ]; + + cmakeFlags = [ "-DCGAL_DIR=${cgal}" "-DCMAKE_PREFIX_PATH=${cgal}" ]; + + + postPatch = '' + substituteInPlace sfcgal.pc.in \ + --replace '$'{prefix}/@CMAKE_INSTALL_LIBDIR@ @CMAKE_INSTALL_FULL_LIBDIR@ + ''; + + meta = with lib; { + description = "A wrapper around CGAL that intents to implement 2D and 3D operations on OGC standards models"; + homepage = "https://sfcgal.gitlab.io/SFCGAL/"; + license = with licenses; [ gpl3Plus lgpl3Plus]; + platforms = platforms.all; + maintainers = with maintainers; [ samrose ]; + }; +} diff --git a/nix/ext/supautils.nix b/nix/ext/supautils.nix new file mode 100644 index 0000000..baf806d --- /dev/null +++ b/nix/ext/supautils.nix @@ -0,0 +1,29 @@ +{ lib, stdenv, fetchFromGitHub, postgresql }: + +stdenv.mkDerivation rec { + pname = "supautils"; + version = "2.5.0"; + + buildInputs = [ postgresql ]; + + src = fetchFromGitHub { + owner = "tealbase"; + repo = pname; + rev = "refs/tags/v${version}"; + hash = "sha256-NyAk+QpQEdibmFY4yceO/FzMOhRYhKXf4XUw9XJ5rOY="; + }; + + installPhase = '' + mkdir -p $out/lib + + install -D supautils${postgresql.dlSuffix} -t $out/lib + ''; + + meta = with lib; { + description = "PostgreSQL extension for enhanced security"; + homepage = "https://github.com/tealbase/${pname}"; + maintainers = with maintainers; [ steve-chavez ]; + platforms = postgresql.meta.platforms; + license = licenses.postgresql; + }; +} diff --git a/nix/ext/timescaledb-2.9.1.nix b/nix/ext/timescaledb-2.9.1.nix new file mode 100644 index 0000000..ad955e8 --- /dev/null +++ b/nix/ext/timescaledb-2.9.1.nix @@ -0,0 +1,51 @@ +{ lib, stdenv, fetchFromGitHub, cmake, postgresql, openssl, libkrb5 }: + +stdenv.mkDerivation rec { + pname = "timescaledb-apache"; + version = "2.9.1"; + + nativeBuildInputs = [ cmake ]; + buildInputs = [ postgresql openssl libkrb5 ]; + + src = fetchFromGitHub { + owner = "timescale"; + repo = "timescaledb"; + rev = version; + hash = "sha256-fvVSxDiGZAewyuQ2vZDb0I6tmlDXl6trjZp8+qDBtb8="; + }; + + cmakeFlags = [ "-DSEND_TELEMETRY_DEFAULT=OFF" "-DREGRESS_CHECKS=OFF" "-DTAP_CHECKS=OFF" "-DAPACHE_ONLY=1" ] + ++ lib.optionals stdenv.isDarwin [ "-DLINTER=OFF" ]; + + # Fix the install phase which tries to install into the pgsql extension dir, + # and cannot be manually overridden. This is rather fragile but works OK. + postPatch = '' + for x in CMakeLists.txt sql/CMakeLists.txt; do + substituteInPlace "$x" \ + --replace 'DESTINATION "''${PG_SHAREDIR}/extension"' "DESTINATION \"$out/share/postgresql/extension\"" + done + + for x in src/CMakeLists.txt src/loader/CMakeLists.txt tsl/src/CMakeLists.txt; do + substituteInPlace "$x" \ + --replace 'DESTINATION ''${PG_PKGLIBDIR}' "DESTINATION \"$out/lib\"" + done + ''; + + + # timescaledb-2.9.1.so already exists in the lib directory + # we have no need for the timescaledb.so or control file + postInstall = '' + rm $out/lib/timescaledb.so + rm $out/share/postgresql/extension/timescaledb.control + ''; + + meta = with lib; { + description = "Scales PostgreSQL for time-series data via automatic partitioning across time and space"; + homepage = "https://www.timescale.com/"; + changelog = "https://github.com/timescale/timescaledb/blob/${version}/CHANGELOG.md"; + maintainers = with maintainers; [ samrose ]; + platforms = postgresql.meta.platforms; + license = licenses.asl20; + broken = versionOlder postgresql.version "13"; + }; +} diff --git a/nix/ext/timescaledb.nix b/nix/ext/timescaledb.nix new file mode 100644 index 0000000..1c87916 --- /dev/null +++ b/nix/ext/timescaledb.nix @@ -0,0 +1,43 @@ +{ lib, stdenv, fetchFromGitHub, cmake, postgresql, openssl, libkrb5 }: + +stdenv.mkDerivation rec { + pname = "timescaledb-apache"; + version = "2.16.1"; + + nativeBuildInputs = [ cmake ]; + buildInputs = [ postgresql openssl libkrb5 ]; + + src = fetchFromGitHub { + owner = "timescale"; + repo = "timescaledb"; + rev = version; + hash = "sha256-sLxWdBmih9mgiO51zLLxn9uwJVYc5JVHJjSWoADoJ+w="; + }; + + cmakeFlags = [ "-DSEND_TELEMETRY_DEFAULT=OFF" "-DREGRESS_CHECKS=OFF" "-DTAP_CHECKS=OFF" "-DAPACHE_ONLY=1" ] + ++ lib.optionals stdenv.isDarwin [ "-DLINTER=OFF" ]; + + # Fix the install phase which tries to install into the pgsql extension dir, + # and cannot be manually overridden. This is rather fragile but works OK. + postPatch = '' + for x in CMakeLists.txt sql/CMakeLists.txt; do + substituteInPlace "$x" \ + --replace 'DESTINATION "''${PG_SHAREDIR}/extension"' "DESTINATION \"$out/share/postgresql/extension\"" + done + + for x in src/CMakeLists.txt src/loader/CMakeLists.txt tsl/src/CMakeLists.txt; do + substituteInPlace "$x" \ + --replace 'DESTINATION ''${PG_PKGLIBDIR}' "DESTINATION \"$out/lib\"" + done + ''; + + meta = with lib; { + description = "Scales PostgreSQL for time-series data via automatic partitioning across time and space"; + homepage = "https://www.timescale.com/"; + changelog = "https://github.com/timescale/timescaledb/blob/${version}/CHANGELOG.md"; + maintainers = with maintainers; [ samrose ]; + platforms = postgresql.meta.platforms; + license = licenses.asl20; + broken = versionOlder postgresql.version "13"; + }; +} diff --git a/nix/ext/use-system-groonga.patch b/nix/ext/use-system-groonga.patch new file mode 100644 index 0000000..6d3042b --- /dev/null +++ b/nix/ext/use-system-groonga.patch @@ -0,0 +1,21 @@ +diff --git a/CMakeLists.txt b/CMakeLists.txt +index 33b34477..f4ffefe5 100644 +--- a/CMakeLists.txt ++++ b/CMakeLists.txt +@@ -12,7 +12,6 @@ if(MSVC_VERSION LESS 1800) + message(FATAL_ERROR "PGroonga supports only MSVC 2013 or later") + endif() + +-add_subdirectory(vendor/groonga) + + set(PGRN_POSTGRESQL_DIR "${CMAKE_INSTALL_PREFIX}" + CACHE PATH "PostgreSQL binary directory") +@@ -52,8 +51,6 @@ string(REGEX REPLACE "([0-9]+)\\.([0-9]+)\\.([0-9]+)" "\\3" + string(REGEX REPLACE ".*comment = '([^']+)'.*" "\\1" + PGRN_DESCRIPTION "${PGRN_CONTROL}") + +-file(READ "${CMAKE_CURRENT_SOURCE_DIR}/vendor/groonga/bundled_message_pack_version" +- PGRN_BUNDLED_MESSAGE_PACK_VERSION) + string(STRIP + "${PGRN_BUNDLED_MESSAGE_PACK_VERSION}" + PGRN_BUNDLED_MESSAGE_PACK_VERSION) \ No newline at end of file diff --git a/nix/ext/vault.nix b/nix/ext/vault.nix new file mode 100644 index 0000000..b840771 --- /dev/null +++ b/nix/ext/vault.nix @@ -0,0 +1,30 @@ +{ lib, stdenv, fetchFromGitHub, postgresql }: + +stdenv.mkDerivation rec { + pname = "vault"; + version = "0.2.9"; + + buildInputs = [ postgresql ]; + + src = fetchFromGitHub { + owner = "tealbase"; + repo = pname; + rev = "refs/tags/v${version}"; + hash = "sha256-kXTngBW4K6FkZM8HvJG2Jha6OQqbejhnk7tchxy031I="; + }; + + installPhase = '' + mkdir -p $out/{lib,share/postgresql/extension} + + cp sql/*.sql $out/share/postgresql/extension + cp *.control $out/share/postgresql/extension + ''; + + meta = with lib; { + description = "Store encrypted secrets in PostgreSQL"; + homepage = "https://github.com/tealbase/${pname}"; + maintainers = with maintainers; [ samrose ]; + platforms = postgresql.meta.platforms; + license = licenses.postgresql; + }; +} diff --git a/nix/ext/wal2json.nix b/nix/ext/wal2json.nix new file mode 100644 index 0000000..751eb64 --- /dev/null +++ b/nix/ext/wal2json.nix @@ -0,0 +1,31 @@ +{ lib, stdenv, fetchFromGitHub, postgresql }: + +stdenv.mkDerivation rec { + pname = "wal2json"; + version = "2_5"; + + src = fetchFromGitHub { + owner = "eulerto"; + repo = "wal2json"; + rev = "wal2json_${builtins.replaceStrings ["."] ["_"] version}"; + hash = "sha256-Gpc9uDKrs/dmVSFgdgHM453+TaEnhRh9t0gDbSn8FUI="; + }; + + buildInputs = [ postgresql ]; + + makeFlags = [ "USE_PGXS=1" ]; + + installPhase = '' + install -D -t $out/lib *${postgresql.dlSuffix} + install -D -t $out/share/postgresql/extension sql/*.sql + ''; + + meta = with lib; { + description = "PostgreSQL JSON output plugin for changeset extraction"; + homepage = "https://github.com/eulerto/wal2json"; + changelog = "https://github.com/eulerto/wal2json/releases/tag/wal2json_${version}"; + maintainers = with maintainers; [ samrose ]; + platforms = postgresql.meta.platforms; + license = licenses.bsd3; + }; +} diff --git a/nix/ext/wrappers/default.nix b/nix/ext/wrappers/default.nix new file mode 100644 index 0000000..c9eacbb --- /dev/null +++ b/nix/ext/wrappers/default.nix @@ -0,0 +1,121 @@ +{ lib +, stdenv +, fetchFromGitHub +, openssl +, pkg-config +, postgresql +, buildPgrxExtension_0_11_3 +, cargo +, darwin +, jq +}: + +buildPgrxExtension_0_11_3 rec { + pname = "tealbase-wrappers"; + version = "0.4.2"; + # update the following array when the wrappers version is updated + # required to ensure that extensions update scripts from previous versions are generated + previousVersions = ["0.4.1" "0.4.0" "0.3.1" "0.3.0" "0.2.0" "0.1.19" "0.1.18" "0.1.17" "0.1.16" "0.1.15" "0.1.14" "0.1.12" "0.1.11" "0.1.10" "0.1.9" "0.1.8" "0.1.7" "0.1.6" "0.1.5" "0.1.4" "0.1.1" "0.1.0"]; + inherit postgresql; + src = fetchFromGitHub { + owner = "tealbase"; + repo = "wrappers"; + rev = "v${version}"; + hash = "sha256-ut3IQED6ANXgabiHoEUdfSrwkuuYYSpRoeWdtBvSe64="; + }; + nativeBuildInputs = [ pkg-config cargo ]; + buildInputs = [ openssl ] ++ lib.optionals (stdenv.isDarwin) [ + darwin.apple_sdk.frameworks.CoreFoundation + darwin.apple_sdk.frameworks.Security + darwin.apple_sdk.frameworks.SystemConfiguration + ]; + OPENSSL_NO_VENDOR = 1; + #need to set this to 2 to avoid cpu starvation + CARGO_BUILD_JOBS = "2"; + CARGO="${cargo}/bin/cargo"; + cargoLock = { + lockFile = "${src}/Cargo.lock"; + outputHashes = { + "clickhouse-rs-1.0.0-alpha.1" = "sha256-0zmoUo/GLyCKDLkpBsnLAyGs1xz6cubJhn+eVqMEMaw="; + }; + }; + postPatch = "cp ${cargoLock.lockFile} Cargo.lock"; + buildAndTestSubdir = "wrappers"; + buildFeatures = [ + "helloworld_fdw" + "bigquery_fdw" + "clickhouse_fdw" + "stripe_fdw" + "firebase_fdw" + "s3_fdw" + "airtable_fdw" + "logflare_fdw" + "auth0_fdw" + "mssql_fdw" + "redis_fdw" + "cognito_fdw" + "wasm_fdw" + ]; + doCheck = false; + + preBuild = '' + echo "Processing git tags..." + echo '${builtins.concatStringsSep "," previousVersions}' | sed 's/,/\n/g' > git_tags.txt + ''; + + postInstall = '' + echo "Modifying main SQL file to use unversioned library name..." + current_version="${version}" + main_sql_file="$out/share/postgresql/extension/wrappers--$current_version.sql" + if [ -f "$main_sql_file" ]; then + sed -i 's|$libdir/wrappers-[0-9.]*|$libdir/wrappers|g' "$main_sql_file" + echo "Modified $main_sql_file" + else + echo "Warning: $main_sql_file not found" + fi + echo "Creating and modifying SQL files for previous versions..." + + if [ -f "$main_sql_file" ]; then + while read -r previous_version; do + if [ "$(printf '%s\n' "$previous_version" "$current_version" | sort -V | head -n1)" = "$previous_version" ] && [ "$previous_version" != "$current_version" ]; then + new_file="$out/share/postgresql/extension/wrappers--$previous_version--$current_version.sql" + echo "Creating $new_file" + cp "$main_sql_file" "$new_file" + sed -i 's|$libdir/wrappers-[0-9.]*|$libdir/wrappers|g' "$new_file" + echo "Modified $new_file" + fi + done < git_tags.txt + else + echo "Warning: $main_sql_file not found" + fi + mv $out/lib/wrappers-${version}.so $out/lib/wrappers.so + ln -s $out/lib/wrappers.so $out/lib/wrappers-${version}.so + + echo "Creating wrappers.so symlinks to support pg_upgrade..." + if [ -f "$out/lib/wrappers.so" ]; then + while read -r previous_version; do + if [ "$(printf '%s\n' "$previous_version" "$current_version" | sort -V | head -n1)" = "$previous_version" ] && [ "$previous_version" != "$current_version" ]; then + new_file="$out/lib/wrappers-$previous_version.so" + echo "Creating $new_file" + ln -s "$out/lib/wrappers.so" "$new_file" + fi + done < git_tags.txt + else + echo "Warning: $out/lib/wrappers.so not found" + fi + + rm git_tags.txt + echo "Contents of updated wrappers.control:" + cat "$out/share/postgresql/extension/wrappers.control" + echo "List of generated SQL files:" + ls -l $out/share/postgresql/extension/wrappers--*.sql + ''; + + meta = with lib; { + description = "Various Foreign Data Wrappers (FDWs) for PostreSQL"; + homepage = "https://github.com/tealbase/wrappers"; + maintainers = with maintainers; [ samrose ]; + platforms = postgresql.meta.platforms; + license = licenses.postgresql; + }; +} diff --git a/nix/fix-cmake-install-path.patch b/nix/fix-cmake-install-path.patch new file mode 100644 index 0000000..1fe317b --- /dev/null +++ b/nix/fix-cmake-install-path.patch @@ -0,0 +1,21 @@ +Fix CMake install path + +--- a/CMakeLists.txt ++++ b/CMakeLists.txt +@@ -1141,11 +1141,11 @@ + + set(prefix "${CMAKE_INSTALL_PREFIX}") + set(exec_prefix "\${prefix}") +-set(bindir "\${exec_prefix}/${CMAKE_INSTALL_BINDIR}") +-set(sbindir "\${exec_prefix}/${CMAKE_INSTALL_SBINDIR}") +-set(libdir "\${prefix}/${CMAKE_INSTALL_LIBDIR}") +-set(includedir "\${prefix}/${CMAKE_INSTALL_INCLUDEDIR}") +-set(datarootdir "\${prefix}/${CMAKE_INSTALL_DATAROOTDIR}") ++set(bindir "${CMAKE_INSTALL_FULL_BINDIR}") ++set(sbindir "${CMAKE_INSTALL_FULL_SBINDIR}") ++set(libdir "${CMAKE_INSTALL_FULL_LIBDIR}") ++set(includedir "${CMAKE_INSTALL_FULL_INCLUDEDIR}") ++set(datarootdir "${CMAKE_INSTALL_FULL_DATAROOTDIR}") + set(datadir "\${datarootdir}") + set(expanded_pluginsdir "${GRN_PLUGINS_DIR}") + set(GRN_EXPANDED_DEFAULT_DOCUMENT_ROOT "${GRN_DEFAULT_DOCUMENT_ROOT}") \ No newline at end of file diff --git a/nix/init.sh b/nix/init.sh new file mode 100755 index 0000000..3e872e4 --- /dev/null +++ b/nix/init.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# shellcheck shell=bash + +export PGUSER=tealbase_admin +export PGDATA=$PWD/postgres_data +export PGHOST=$PWD/postgres +export PGPORT=5432 +export PGPASS=postgres +export LOG_PATH=$PGHOST/LOG +export PGDATABASE=testdb +export DATABASE_URL="postgresql:///$PGDATABASE?host=$PGHOST&port=$PGPORT" +mkdir -p $PGHOST +if [ ! -d $PGDATA ]; then + echo 'Initializing postgresql database...' + initdb $PGDATA --locale=C --username $PGUSER -A md5 --pwfile=<(echo $PGPASS) --auth=trust + echo "listen_addresses='*'" >> $PGDATA/postgresql.conf + echo "unix_socket_directories='$PGHOST'" >> $PGDATA/postgresql.conf + echo "unix_socket_permissions=0700" >> $PGDATA/postgresql.conf +fi +chmod o-rwx $PGDATA diff --git a/nix/overlays/cargo-pgrx-0-11-3.nix b/nix/overlays/cargo-pgrx-0-11-3.nix new file mode 100644 index 0000000..41ba97d --- /dev/null +++ b/nix/overlays/cargo-pgrx-0-11-3.nix @@ -0,0 +1,7 @@ +final: prev: { + #cargo-pgrx_0_11_3 = cargo-pgrx.cargo-pgrx_0_11_3; + + buildPgrxExtension_0_11_3 = prev.buildPgrxExtension.override { + cargo-pgrx = final.cargo-pgrx_0_11_3; + }; +} diff --git a/nix/overlays/gdal-small.nix b/nix/overlays/gdal-small.nix new file mode 100644 index 0000000..18be8a5 --- /dev/null +++ b/nix/overlays/gdal-small.nix @@ -0,0 +1,14 @@ +final: prev: { + # override the version of gdal used with postgis with the small version. + # significantly reduces overall closure size + gdal = prev.gdalMinimal.override { + /* other features can be enabled, reference: + https://github.com/NixOS/nixpkgs/blob/master/pkgs/development/libraries/gdal/default.nix + */ + + # useHDF = true; + # useArrow = true; + # useLibHEIF = true; + # ... + }; +} diff --git a/nix/overlays/psql_16-oriole.nix b/nix/overlays/psql_16-oriole.nix new file mode 100644 index 0000000..d55af10 --- /dev/null +++ b/nix/overlays/psql_16-oriole.nix @@ -0,0 +1,21 @@ +final: prev: { + postgresql_16 = prev.postgresql_16.overrideAttrs (old: { + pname = "postgresql_16"; + version = "16_23"; + src = prev.fetchurl { + url = "https://github.com/orioledb/postgres/archive/refs/tags/patches16_23.tar.gz"; + sha256 = "sha256-xWmcqn3DYyBG0FsBNqPWTFzUidSJZgoPWI6Rt0N9oJ4="; + }; + buildInputs = old.buildInputs ++ [ + prev.bison + prev.docbook5 + prev.docbook_xsl + prev.docbook_xsl_ns + prev.docbook_xml_dtd_45 + prev.flex + prev.libxslt + prev.perl + ]; + }); + postgresql_orioledb_16 = final.postgresql_16; +} diff --git a/nix/postgresql/15.nix b/nix/postgresql/15.nix new file mode 100644 index 0000000..00dfc0c --- /dev/null +++ b/nix/postgresql/15.nix @@ -0,0 +1,4 @@ +import ./generic.nix { + version = "15.6"; + hash = "sha256-hFUUbtnGnJOlfelUrq0DAsr60DXCskIXXWqh4X68svs="; +} diff --git a/nix/postgresql/default.nix b/nix/postgresql/default.nix new file mode 100644 index 0000000..6ee0452 --- /dev/null +++ b/nix/postgresql/default.nix @@ -0,0 +1,20 @@ +self: +let + #adapted from the postgresql nixpkgs package + versions = { + postgresql_15 = ./15.nix; + }; + + mkAttributes = jitSupport: + self.lib.mapAttrs' (version: path: + let + attrName = if jitSupport then "${version}_jit" else version; + in + self.lib.nameValuePair attrName (import path { + inherit jitSupport self; + }) + ) versions; + +in +# variations without and with JIT +(mkAttributes false) // (mkAttributes true) diff --git a/nix/postgresql/generic.nix b/nix/postgresql/generic.nix new file mode 100644 index 0000000..323b651 --- /dev/null +++ b/nix/postgresql/generic.nix @@ -0,0 +1,309 @@ +let + + generic = + # adapted from the nixpkgs postgresql package + # dependencies + { stdenv, lib, fetchurl, fetchpatch, makeWrapper + , glibc, zlib, readline, openssl, icu, lz4, zstd, systemd, libossp_uuid + , pkg-config, libxml2, tzdata, libkrb5, substituteAll, darwin + , linux-pam + + # This is important to obtain a version of `libpq` that does not depend on systemd. + , systemdSupport ? lib.meta.availableOn stdenv.hostPlatform systemd && !stdenv.hostPlatform.isStatic + , enableSystemd ? null + , gssSupport ? with stdenv.hostPlatform; !isWindows && !isStatic + + # for postgresql.pkgs + , self, newScope, buildEnv + + # source specification + , version, hash, muslPatches ? {} + + # for tests + , testers + + # JIT + , jitSupport + , nukeReferences, patchelf, llvmPackages + + # PL/Python + , pythonSupport ? false + , python3 + + # detection of crypt fails when using llvm stdenv, so we add it manually + # for <13 (where it got removed: https://github.com/postgres/postgres/commit/c45643d618e35ec2fe91438df15abd4f3c0d85ca) + , libxcrypt + } @args: + let + atLeast = lib.versionAtLeast version; + olderThan = lib.versionOlder version; + lz4Enabled = atLeast "14"; + zstdEnabled = atLeast "15"; + + systemdSupport' = if enableSystemd == null then systemdSupport else (lib.warn "postgresql: argument enableSystemd is deprecated, please use systemdSupport instead." enableSystemd); + + pname = "postgresql"; + + stdenv' = if jitSupport then llvmPackages.stdenv else stdenv; + in stdenv'.mkDerivation (finalAttrs: { + inherit version; + pname = pname + lib.optionalString jitSupport "-jit"; + + src = fetchurl { + url = "mirror://postgresql/source/v${version}/${pname}-${version}.tar.bz2"; + inherit hash; + }; + + hardeningEnable = lib.optionals (!stdenv'.cc.isClang) [ "pie" ]; + + outputs = [ "out" "lib" "doc" "man" ]; + setOutputFlags = false; # $out retains configureFlags :-/ + + buildInputs = [ + zlib + readline + openssl + (libxml2.override {python = python3;}) + icu + ] + ++ lib.optionals (olderThan "13") [ libxcrypt ] + ++ lib.optionals jitSupport [ llvmPackages.llvm ] + ++ lib.optionals lz4Enabled [ lz4 ] + ++ lib.optionals zstdEnabled [ zstd ] + ++ lib.optionals systemdSupport' [ systemd ] + ++ lib.optionals pythonSupport [ python3 ] + ++ lib.optionals gssSupport [ libkrb5 ] + ++ lib.optionals stdenv'.isLinux [ linux-pam ] + ++ lib.optionals (!stdenv'.isDarwin) [ libossp_uuid ]; + + nativeBuildInputs = [ + makeWrapper + pkg-config + ] + ++ lib.optionals jitSupport [ llvmPackages.llvm.dev nukeReferences patchelf ]; + + enableParallelBuilding = true; + + separateDebugInfo = true; + + buildFlags = [ "world" ]; + + # Makes cross-compiling work when xml2-config can't be executed on the host. + # Fixed upstream in https://github.com/postgres/postgres/commit/0bc8cebdb889368abdf224aeac8bc197fe4c9ae6 + env.NIX_CFLAGS_COMPILE = lib.optionalString (olderThan "13") "-I${libxml2.dev}/include/libxml2"; + + configureFlags = [ + "--with-openssl" + "--with-libxml" + "--with-icu" + "--sysconfdir=/etc" + "--libdir=$(lib)/lib" + "--with-system-tzdata=${tzdata}/share/zoneinfo" + "--enable-debug" + (lib.optionalString systemdSupport' "--with-systemd") + (if stdenv'.isDarwin then "--with-uuid=e2fs" else "--with-ossp-uuid") + ] ++ lib.optionals lz4Enabled [ "--with-lz4" ] + ++ lib.optionals zstdEnabled [ "--with-zstd" ] + ++ lib.optionals gssSupport [ "--with-gssapi" ] + ++ lib.optionals pythonSupport [ "--with-python" ] + ++ lib.optionals jitSupport [ "--with-llvm" ] + ++ lib.optionals stdenv'.isLinux [ "--with-pam" ]; + + patches = [ + (if atLeast "16" then ./patches/relative-to-symlinks-16+.patch else ./patches/relative-to-symlinks.patch) + ./patches/less-is-more.patch + ./patches/paths-for-split-outputs.patch + ./patches/specify_pkglibdir_at_runtime.patch + ./patches/paths-with-postgresql-suffix.patch + + (substituteAll { + src = ./patches/locale-binary-path.patch; + locale = "${if stdenv.isDarwin then darwin.adv_cmds else lib.getBin stdenv.cc.libc}/bin/locale"; + }) + ] ++ lib.optionals stdenv'.hostPlatform.isMusl ( + # Using fetchurl instead of fetchpatch on purpose: https://github.com/NixOS/nixpkgs/issues/240141 + map fetchurl (lib.attrValues muslPatches) + ) ++ lib.optionals stdenv'.isLinux [ + (if atLeast "13" then ./patches/socketdir-in-run-13+.patch else ./patches/socketdir-in-run.patch) + ]; + + installTargets = [ "install-world" ]; + + postPatch = '' + # Hardcode the path to pgxs so pg_config returns the path in $out + substituteInPlace "src/common/config_info.c" --subst-var out + '' + lib.optionalString jitSupport '' + # Force lookup of jit stuff in $out instead of $lib + substituteInPlace src/backend/jit/jit.c --replace pkglib_path \"$out/lib\" + substituteInPlace src/backend/jit/llvm/llvmjit.c --replace pkglib_path \"$out/lib\" + substituteInPlace src/backend/jit/llvm/llvmjit_inline.cpp --replace pkglib_path \"$out/lib\" + ''; + + postInstall = + '' + moveToOutput "lib/pgxs" "$out" # looks strange, but not deleting it + moveToOutput "lib/libpgcommon*.a" "$out" + moveToOutput "lib/libpgport*.a" "$out" + moveToOutput "lib/libecpg*" "$out" + + # Prevent a retained dependency on gcc-wrapper. + substituteInPlace "$out/lib/pgxs/src/Makefile.global" --replace ${stdenv'.cc}/bin/ld ld + + if [ -z "''${dontDisableStatic:-}" ]; then + # Remove static libraries in case dynamic are available. + for i in $out/lib/*.a $lib/lib/*.a; do + name="$(basename "$i")" + ext="${stdenv'.hostPlatform.extensions.sharedLibrary}" + if [ -e "$lib/lib/''${name%.a}$ext" ] || [ -e "''${i%.a}$ext" ]; then + rm "$i" + fi + done + fi + '' + lib.optionalString jitSupport '' + # Move the bitcode and libllvmjit.so library out of $lib; otherwise, every client that + # depends on libpq.so will also have libLLVM.so in its closure too, bloating it + moveToOutput "lib/bitcode" "$out" + moveToOutput "lib/llvmjit*" "$out" + + # In the case of JIT support, prevent a retained dependency on clang-wrapper + substituteInPlace "$out/lib/pgxs/src/Makefile.global" --replace ${stdenv'.cc}/bin/clang clang + nuke-refs $out/lib/llvmjit_types.bc $(find $out/lib/bitcode -type f) + + # Stop out depending on the default output of llvm + substituteInPlace $out/lib/pgxs/src/Makefile.global \ + --replace ${llvmPackages.llvm.out}/bin "" \ + --replace '$(LLVM_BINPATH)/' "" + + # Stop out depending on the -dev output of llvm + substituteInPlace $out/lib/pgxs/src/Makefile.global \ + --replace ${llvmPackages.llvm.dev}/bin/llvm-config llvm-config \ + --replace -I${llvmPackages.llvm.dev}/include "" + + ${lib.optionalString (!stdenv'.isDarwin) '' + # Stop lib depending on the -dev output of llvm + rpath=$(patchelf --print-rpath $out/lib/llvmjit.so) + nuke-refs -e $out $out/lib/llvmjit.so + # Restore the correct rpath + patchelf $out/lib/llvmjit.so --set-rpath "$rpath" + ''} + ''; + + postFixup = lib.optionalString (!stdenv'.isDarwin && stdenv'.hostPlatform.libc == "glibc") + '' + # initdb needs access to "locale" command from glibc. + wrapProgram $out/bin/initdb --prefix PATH ":" ${glibc.bin}/bin + ''; + + doCheck = !stdenv'.isDarwin; + # autodetection doesn't seem to able to find this, but it's there. + checkTarget = "check"; + + disallowedReferences = [ stdenv'.cc ]; + + passthru = let + this = self.callPackage generic args; + jitToggle = this.override { + jitSupport = !jitSupport; + }; + in + { + psqlSchema = lib.versions.major version; + + withJIT = if jitSupport then this else jitToggle; + withoutJIT = if jitSupport then jitToggle else this; + + dlSuffix = if olderThan "16" then ".so" else stdenv.hostPlatform.extensions.sharedLibrary; + + pkgs = let + scope = { + inherit jitSupport; + inherit (llvmPackages) llvm; + postgresql = this; + stdenv = stdenv'; + }; + newSelf = self // scope; + newSuper = { callPackage = newScope (scope // this.pkgs); }; + in import ./ext newSelf newSuper; + + withPackages = postgresqlWithPackages { + inherit makeWrapper buildEnv; + postgresql = this; + } + this.pkgs; + + tests = { + postgresql-wal-receiver = import ../../../../nixos/tests/postgresql-wal-receiver.nix { + inherit (stdenv) system; + pkgs = self; + package = this; + }; + pkg-config = testers.testMetaPkgConfig finalAttrs.finalPackage; + } // lib.optionalAttrs jitSupport { + postgresql-jit = import ../../../../nixos/tests/postgresql-jit.nix { + inherit (stdenv) system; + pkgs = self; + package = this; + }; + }; + }; + + meta = with lib; { + homepage = "https://www.postgresql.org"; + description = "Powerful, open source object-relational database system"; + license = licenses.postgresql; + changelog = "https://www.postgresql.org/docs/release/${finalAttrs.version}/"; + maintainers = with maintainers; [ thoughtpolice danbst globin ivan ma27 wolfgangwalther ]; + pkgConfigModules = [ "libecpg" "libecpg_compat" "libpgtypes" "libpq" ]; + platforms = platforms.unix; + + # JIT support doesn't work with cross-compilation. It is attempted to build LLVM-bytecode + # (`%.bc` is the corresponding `make(1)`-rule) for each sub-directory in `backend/` for + # the JIT apparently, but with a $(CLANG) that can produce binaries for the build, not the + # host-platform. + # + # I managed to get a cross-build with JIT support working with + # `depsBuildBuild = [ llvmPackages.clang ] ++ buildInputs`, but considering that the + # resulting LLVM IR isn't platform-independent this doesn't give you much. + # In fact, I tried to test the result in a VM-test, but as soon as JIT was used to optimize + # a query, postgres would coredump with `Illegal instruction`. + broken = (jitSupport && stdenv.hostPlatform != stdenv.buildPlatform) + # Allmost all tests fail FATAL errors for v12 and v13 + || (jitSupport && stdenv.hostPlatform.isMusl && olderThan "14"); + }; + }); + + postgresqlWithPackages = { postgresql, makeWrapper, buildEnv }: pkgs: f: buildEnv { + name = "postgresql-and-plugins-${postgresql.version}"; + paths = f pkgs ++ [ + postgresql + postgresql.lib + postgresql.man # in case user installs this into environment + ]; + nativeBuildInputs = [ makeWrapper ]; + + + # We include /bin to ensure the $out/bin directory is created, which is + # needed because we'll be removing the files from that directory in postBuild + # below. See #22653 + pathsToLink = ["/" "/bin"]; + + # Note: the duplication of executables is about 4MB size. + # So a nicer solution was patching postgresql to allow setting the + # libdir explicitly. + postBuild = '' + mkdir -p $out/bin + rm $out/bin/{pg_config,postgres,pg_ctl} + cp --target-directory=$out/bin ${postgresql}/bin/{postgres,pg_config,pg_ctl} + wrapProgram $out/bin/postgres --set NIX_PGLIBDIR $out/lib + ''; + + passthru.version = postgresql.version; + passthru.psqlSchema = postgresql.psqlSchema; + }; + +in +# passed by .nix +versionArgs: +# passed by default.nix +{ self, ... } @defaultArgs: +self.callPackage generic (defaultArgs // versionArgs) diff --git a/nix/postgresql/patches/less-is-more.patch b/nix/postgresql/patches/less-is-more.patch new file mode 100644 index 0000000..a72d1a2 --- /dev/null +++ b/nix/postgresql/patches/less-is-more.patch @@ -0,0 +1,11 @@ +--- a/src/include/fe_utils/print.h ++++ b/src/include/fe_utils/print.h +@@ -18,7 +18,7 @@ + + /* This is not a particularly great place for this ... */ + #ifndef __CYGWIN__ +-#define DEFAULT_PAGER "more" ++#define DEFAULT_PAGER "less" + #else + #define DEFAULT_PAGER "less" + #endif diff --git a/nix/postgresql/patches/locale-binary-path.patch b/nix/postgresql/patches/locale-binary-path.patch new file mode 100644 index 0000000..8068683 --- /dev/null +++ b/nix/postgresql/patches/locale-binary-path.patch @@ -0,0 +1,11 @@ +--- a/src/backend/commands/collationcmds.c ++++ b/src/backend/commands/collationcmds.c +@@ -611,7 +611,7 @@ pg_import_system_collations(PG_FUNCTION_ARGS) + aliases = (CollAliasData *) palloc(maxaliases * sizeof(CollAliasData)); + naliases = 0; + +- locale_a_handle = OpenPipeStream("locale -a", "r"); ++ locale_a_handle = OpenPipeStream("@locale@ -a", "r"); + if (locale_a_handle == NULL) + ereport(ERROR, + (errcode_for_file_access(), diff --git a/nix/postgresql/patches/paths-for-split-outputs.patch b/nix/postgresql/patches/paths-for-split-outputs.patch new file mode 100644 index 0000000..2134f7e --- /dev/null +++ b/nix/postgresql/patches/paths-for-split-outputs.patch @@ -0,0 +1,11 @@ +--- a/src/common/config_info.c ++++ b/src/common/config_info.c +@@ -118,7 +118,7 @@ + i++; + + configdata[i].name = pstrdup("PGXS"); ++ strlcpy(path, "@out@/lib", sizeof(path)); +- get_pkglib_path(my_exec_path, path); + strlcat(path, "/pgxs/src/makefiles/pgxs.mk", sizeof(path)); + cleanup_path(path); + configdata[i].setting = pstrdup(path); diff --git a/nix/postgresql/patches/paths-with-postgresql-suffix.patch b/nix/postgresql/patches/paths-with-postgresql-suffix.patch new file mode 100644 index 0000000..04d2f55 --- /dev/null +++ b/nix/postgresql/patches/paths-with-postgresql-suffix.patch @@ -0,0 +1,41 @@ +Nix outputs put the `name' in each store path like +/nix/store/...-. This was confusing the Postgres make script +because it thought its data directory already had postgresql in its +directory. This lead to Postgres installing all of its fils in +$out/share. To fix this, we just look for postgres or psql in the part +after the / using make's notdir. + +--- +--- a/src/Makefile.global.in ++++ b/src/Makefile.global.in +@@ -102,15 +102,15 @@ datarootdir := @datarootdir@ + bindir := @bindir@ + + datadir := @datadir@ +-ifeq "$(findstring pgsql, $(datadir))" "" +-ifeq "$(findstring postgres, $(datadir))" "" ++ifeq "$(findstring pgsql, $(notdir $(datadir)))" "" ++ifeq "$(findstring postgres, $(notdir $(datadir)))" "" + override datadir := $(datadir)/postgresql + endif + endif + + sysconfdir := @sysconfdir@ +-ifeq "$(findstring pgsql, $(sysconfdir))" "" +-ifeq "$(findstring postgres, $(sysconfdir))" "" ++ifeq "$(findstring pgsql, $(notdir $(sysconfdir)))" "" ++ifeq "$(findstring postgres, $(notdir $(sysconfdir)))" "" + override sysconfdir := $(sysconfdir)/postgresql + endif + endif +@@ -136,8 +136,8 @@ endif + mandir := @mandir@ + + docdir := @docdir@ +-ifeq "$(findstring pgsql, $(docdir))" "" +-ifeq "$(findstring postgres, $(docdir))" "" ++ifeq "$(findstring pgsql, $(notdir $(docdir)))" "" ++ifeq "$(findstring postgres, $(notdir $(docdir)))" "" + override docdir := $(docdir)/postgresql + endif + endif diff --git a/nix/postgresql/patches/relative-to-symlinks-16+.patch b/nix/postgresql/patches/relative-to-symlinks-16+.patch new file mode 100644 index 0000000..996072e --- /dev/null +++ b/nix/postgresql/patches/relative-to-symlinks-16+.patch @@ -0,0 +1,13 @@ +On NixOS we *want* stuff relative to symlinks. +--- +--- a/src/common/exec.c ++++ b/src/common/exec.c +@@ -238,6 +238,8 @@ + static int + normalize_exec_path(char *path) + { ++ return 0; ++ + /* + * We used to do a lot of work ourselves here, but now we just let + * realpath(3) do all the heavy lifting. diff --git a/nix/postgresql/patches/relative-to-symlinks.patch b/nix/postgresql/patches/relative-to-symlinks.patch new file mode 100644 index 0000000..c9b199b --- /dev/null +++ b/nix/postgresql/patches/relative-to-symlinks.patch @@ -0,0 +1,13 @@ +On NixOS we *want* stuff relative to symlinks. +--- +--- a/src/common/exec.c ++++ b/src/common/exec.c +@@ -218,6 +218,8 @@ + static int + resolve_symlinks(char *path) + { ++ return 0; ++ + #ifdef HAVE_READLINK + struct stat buf; + char orig_wd[MAXPGPATH], diff --git a/nix/postgresql/patches/socketdir-in-run-13+.patch b/nix/postgresql/patches/socketdir-in-run-13+.patch new file mode 100644 index 0000000..fd808b6 --- /dev/null +++ b/nix/postgresql/patches/socketdir-in-run-13+.patch @@ -0,0 +1,11 @@ +--- a/src/include/pg_config_manual.h ++++ b/src/include/pg_config_manual.h +@@ -201,7 +201,7 @@ + * support them yet. + */ + #ifndef WIN32 +-#define DEFAULT_PGSOCKET_DIR "/tmp" ++#define DEFAULT_PGSOCKET_DIR "/run/postgresql" + #else + #define DEFAULT_PGSOCKET_DIR "" + #endif diff --git a/nix/postgresql/patches/socketdir-in-run.patch b/nix/postgresql/patches/socketdir-in-run.patch new file mode 100644 index 0000000..4932ef6 --- /dev/null +++ b/nix/postgresql/patches/socketdir-in-run.patch @@ -0,0 +1,11 @@ +--- a/src/include/pg_config_manual.h ++++ b/src/include/pg_config_manual.h +@@ -179,7 +179,7 @@ + * here's where to twiddle it. You can also override this at runtime + * with the postmaster's -k switch. + */ +-#define DEFAULT_PGSOCKET_DIR "/tmp" ++#define DEFAULT_PGSOCKET_DIR "/run/postgresql" + + /* + * This is the default event source for Windows event log. diff --git a/nix/postgresql/patches/specify_pkglibdir_at_runtime.patch b/nix/postgresql/patches/specify_pkglibdir_at_runtime.patch new file mode 100644 index 0000000..b94fc9e --- /dev/null +++ b/nix/postgresql/patches/specify_pkglibdir_at_runtime.patch @@ -0,0 +1,28 @@ +--- a/src/port/path.c ++++ b/src/port/path.c +@@ -714,7 +714,11 @@ + void + get_lib_path(const char *my_exec_path, char *ret_path) + { +- make_relative_path(ret_path, LIBDIR, PGBINDIR, my_exec_path); ++ char const * const nix_pglibdir = getenv("NIX_PGLIBDIR"); ++ if(nix_pglibdir == NULL) ++ make_relative_path(ret_path, LIBDIR, PGBINDIR, my_exec_path); ++ else ++ make_relative_path(ret_path, nix_pglibdir, PGBINDIR, my_exec_path); + } + + /* +@@ -723,7 +727,11 @@ + void + get_pkglib_path(const char *my_exec_path, char *ret_path) + { +- make_relative_path(ret_path, PKGLIBDIR, PGBINDIR, my_exec_path); ++ char const * const nix_pglibdir = getenv("NIX_PGLIBDIR"); ++ if(nix_pglibdir == NULL) ++ make_relative_path(ret_path, PKGLIBDIR, PGBINDIR, my_exec_path); ++ else ++ make_relative_path(ret_path, nix_pglibdir, PGBINDIR, my_exec_path); + } + + /* diff --git a/nix/tealbase-groonga.nix b/nix/tealbase-groonga.nix new file mode 100644 index 0000000..7b50b8e --- /dev/null +++ b/nix/tealbase-groonga.nix @@ -0,0 +1,75 @@ +{ lib, stdenv, cmake, fetchurl, kytea, msgpack-c, mecab, pkg-config, rapidjson +, testers, xxHash, zstd, postgresqlPackages, makeWrapper, suggestSupport ? false +, zeromq, libevent, openssl, lz4Support ? false, lz4, zlibSupport ? true, zlib +, writeShellScriptBin, callPackage }: +let mecab-naist-jdic = callPackage ./ext/mecab-naist-jdic { }; +in stdenv.mkDerivation (finalAttrs: { + pname = "tealbase-groonga"; + version = "14.0.5"; + src = fetchurl { + url = + "https://packages.groonga.org/source/groonga/groonga-${finalAttrs.version}.tar.gz"; + hash = "sha256-y4UGnv8kK0z+br8wXpPf57NMXkdEJHcLCuTvYiubnIc="; + }; + patches = + [ ./fix-cmake-install-path.patch ./do-not-use-vendored-libraries.patch ]; + nativeBuildInputs = [ cmake pkg-config makeWrapper ]; + buildInputs = [ rapidjson xxHash zstd mecab kytea msgpack-c ] + ++ lib.optionals lz4Support [ lz4 ] ++ lib.optional zlibSupport [ zlib ] + ++ lib.optionals suggestSupport [ zeromq libevent ]; + cmakeFlags = [ + "-DWITH_MECAB=ON" + "-DMECAB_DICDIR=${mecab-naist-jdic}/lib/mecab/dic/naist-jdic" + "-DMECAB_CONFIG=${mecab}/bin/mecab-config" + "-DENABLE_MECAB_TOKENIZER=ON" + "-DMECAB_INCLUDE_DIR=${mecab}/include" + "-DMECAB_LIBRARY=${mecab}/lib/libmecab.so" + "-DGROONGA_ENABLE_TOKENIZER_MECAB=YES" + "-DGRN_WITH_MECAB=YES" + ]; + preConfigure = '' + export MECAB_DICDIR=${mecab-naist-jdic}/lib/mecab/dic/naist-jdic + echo "MeCab dictionary directory is: $MECAB_DICDIR" + ''; + buildPhase = '' + cmake --build . -- VERBOSE=1 + grep -i mecab CMakeCache.txt || (echo "MeCab not detected in CMake cache" && exit 1) + echo "CMake cache contents related to MeCab:" + grep -i mecab CMakeCache.txt + ''; + + # installPhase = '' + # mkdir -p $out/bin $out/lib/groonga/plugins + # cp -r lib/groonga/plugins/* $out/lib/groonga/plugins + # cp -r bin/* $out/bin + # echo "Installed Groonga plugins:" + # ls -l $out/lib/groonga/plugins + # ''; + + postInstall = '' + echo "Searching for MeCab-related files:" + find $out -name "*mecab*" + + echo "Checking Groonga plugins directory:" + ls -l $out/lib/groonga/plugins + + echo "Wrapping Groonga binary:" + wrapProgram $out/bin/groonga \ + --set GRN_PLUGINS_DIR $out/lib/groonga/plugins + + ''; + env.NIX_CFLAGS_COMPILE = + lib.optionalString zlibSupport "-I${zlib.dev}/include"; + + meta = with lib; { + homepage = "https://groonga.org/"; + description = "Open-source fulltext search engine and column store"; + license = licenses.lgpl21; + maintainers = [ maintainers.samrose ]; + platforms = platforms.all; + longDescription = '' + Groonga is an open-source fulltext search engine and column store. + It lets you write high-performance applications that requires fulltext search. + ''; + }; +}) diff --git a/nix/tests/expected/extensions_sql_interface.out b/nix/tests/expected/extensions_sql_interface.out new file mode 100644 index 0000000..5ab593d --- /dev/null +++ b/nix/tests/expected/extensions_sql_interface.out @@ -0,0 +1,6312 @@ +/* + +The purpose of this test is to monitor the SQL interface exposed +by Postgres extensions so we have to manually review/approve any difference +that emerge as versions change. + +*/ +/* + +List all extensions that are not enabled +If a new entry shows up in this list, that means a new extension has been +added and you should `create extension ...` to enable it in ./nix/tests/prime + +*/ +select + name +from + pg_available_extensions +where + installed_version is null +order by + name asc; + name +--------- + pg_cron +(1 row) + +/* + +Monitor relocatability and config of each extension +- lesson learned from pg_cron + +*/ +select + extname as extension_name, + extrelocatable as is_relocatable +from + pg_extension +order by + extname asc; + extension_name | is_relocatable +------------------------------+---------------- + address_standardizer | t + address_standardizer_data_us | t + adminpack | f + amcheck | t + autoinc | t + bloom | t + btree_gin | t + btree_gist | t + citext | t + cube | t + dblink | t + dict_int | t + dict_xsyn | t + earthdistance | t + file_fdw | t + fuzzystrmatch | t + hstore | t + http | f + hypopg | t + index_advisor | t + insert_username | t + intagg | t + intarray | t + isn | t + lo | t + ltree | t + moddatetime | t + old_snapshot | t + pageinspect | t + pg_backtrace | t + pg_buffercache | t + pg_freespacemap | t + pg_graphql | f + pg_hashids | t + pg_jsonschema | f + pg_net | f + pg_prewarm | t + pg_repack | f + pg_stat_monitor | t + pg_stat_statements | t + pg_surgery | t + pg_tle | f + pg_trgm | t + pg_visibility | t + pg_walinspect | t + pgaudit | t + pgcrypto | t + pgjwt | f + pgmq | f + pgroonga | f + pgroonga_database | f + pgrouting | t + pgrowlocks | t + pgsodium | f + pgstattuple | t + pgtap | t + plcoffee | f + plls | f + plpgsql | f + plpgsql_check | f + plv8 | f + postgis | f + postgis_raster | f + postgis_sfcgal | t + postgis_tiger_geocoder | f + postgis_topology | f + postgres_fdw | t + refint | t + rum | t + seg | t + sslinfo | t + tealbase_vault | f + tablefunc | t + tcn | t + timescaledb | f + tsm_system_rows | t + tsm_system_time | t + unaccent | t + uuid-ossp | t + vector | t + wrappers | f + xml2 | f +(82 rows) + +/* + +Monitor extension public function interface + +*/ +select + e.extname as extension_name, + n.nspname as schema_name, + p.proname as function_name, + pg_catalog.pg_get_function_identity_arguments(p.oid) as argument_types, + pg_catalog.pg_get_function_result(p.oid) as return_type +from + pg_catalog.pg_proc p + join pg_catalog.pg_namespace n + on n.oid = p.pronamespace + join pg_catalog.pg_depend d + on d.objid = p.oid + join pg_catalog.pg_extension e + on e.oid = d.refobjid +where + d.deptype = 'e' +order by + e.extname, + n.nspname, + p.proname, + pg_catalog.pg_get_function_identity_arguments(p.oid); + extension_name | schema_name | function_name | argument_types | return_type +------------------------+--------------------------+--------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + address_standardizer | public | parse_address | text, OUT num text, OUT street text, OUT street2 text, OUT address1 text, OUT city text, OUT state text, OUT zip text, OUT zipplus text, OUT country text | record + address_standardizer | public | standardize_address | lextab text, gaztab text, rultab text, address text | stdaddr + address_standardizer | public | standardize_address | lextab text, gaztab text, rultab text, micro text, macro text | stdaddr + adminpack | pg_catalog | pg_file_rename | text, text | boolean + adminpack | pg_catalog | pg_file_rename | text, text, text | boolean + adminpack | pg_catalog | pg_file_sync | text | void + adminpack | pg_catalog | pg_file_unlink | text | boolean + adminpack | pg_catalog | pg_file_write | text, text, boolean | bigint + adminpack | pg_catalog | pg_logdir_ls | | SETOF record + amcheck | public | bt_index_check | index regclass | void + amcheck | public | bt_index_check | index regclass, heapallindexed boolean | void + amcheck | public | bt_index_parent_check | index regclass | void + amcheck | public | bt_index_parent_check | index regclass, heapallindexed boolean | void + amcheck | public | bt_index_parent_check | index regclass, heapallindexed boolean, rootdescend boolean | void + amcheck | public | verify_heapam | relation regclass, on_error_stop boolean, check_toast boolean, skip text, startblock bigint, endblock bigint, OUT blkno bigint, OUT offnum integer, OUT attnum integer, OUT msg text | SETOF record + autoinc | public | autoinc | | trigger + bloom | public | blhandler | internal | index_am_handler + btree_gin | public | gin_btree_consistent | internal, smallint, anyelement, integer, internal, internal | boolean + btree_gin | public | gin_compare_prefix_anyenum | anyenum, anyenum, smallint, internal | integer + btree_gin | public | gin_compare_prefix_bit | bit, bit, smallint, internal | integer + btree_gin | public | gin_compare_prefix_bool | boolean, boolean, smallint, internal | integer + btree_gin | public | gin_compare_prefix_bpchar | character, character, smallint, internal | integer + btree_gin | public | gin_compare_prefix_bytea | bytea, bytea, smallint, internal | integer + btree_gin | public | gin_compare_prefix_char | "char", "char", smallint, internal | integer + btree_gin | public | gin_compare_prefix_cidr | cidr, cidr, smallint, internal | integer + btree_gin | public | gin_compare_prefix_date | date, date, smallint, internal | integer + btree_gin | public | gin_compare_prefix_float4 | real, real, smallint, internal | integer + btree_gin | public | gin_compare_prefix_float8 | double precision, double precision, smallint, internal | integer + btree_gin | public | gin_compare_prefix_inet | inet, inet, smallint, internal | integer + btree_gin | public | gin_compare_prefix_int2 | smallint, smallint, smallint, internal | integer + btree_gin | public | gin_compare_prefix_int4 | integer, integer, smallint, internal | integer + btree_gin | public | gin_compare_prefix_int8 | bigint, bigint, smallint, internal | integer + btree_gin | public | gin_compare_prefix_interval | interval, interval, smallint, internal | integer + btree_gin | public | gin_compare_prefix_macaddr | macaddr, macaddr, smallint, internal | integer + btree_gin | public | gin_compare_prefix_macaddr8 | macaddr8, macaddr8, smallint, internal | integer + btree_gin | public | gin_compare_prefix_money | money, money, smallint, internal | integer + btree_gin | public | gin_compare_prefix_name | name, name, smallint, internal | integer + btree_gin | public | gin_compare_prefix_numeric | numeric, numeric, smallint, internal | integer + btree_gin | public | gin_compare_prefix_oid | oid, oid, smallint, internal | integer + btree_gin | public | gin_compare_prefix_text | text, text, smallint, internal | integer + btree_gin | public | gin_compare_prefix_time | time without time zone, time without time zone, smallint, internal | integer + btree_gin | public | gin_compare_prefix_timestamp | timestamp without time zone, timestamp without time zone, smallint, internal | integer + btree_gin | public | gin_compare_prefix_timestamptz | timestamp with time zone, timestamp with time zone, smallint, internal | integer + btree_gin | public | gin_compare_prefix_timetz | time with time zone, time with time zone, smallint, internal | integer + btree_gin | public | gin_compare_prefix_uuid | uuid, uuid, smallint, internal | integer + btree_gin | public | gin_compare_prefix_varbit | bit varying, bit varying, smallint, internal | integer + btree_gin | public | gin_enum_cmp | anyenum, anyenum | integer + btree_gin | public | gin_extract_query_anyenum | anyenum, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_bit | bit, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_bool | boolean, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_bpchar | character, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_bytea | bytea, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_char | "char", internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_cidr | cidr, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_date | date, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_float4 | real, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_float8 | double precision, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_inet | inet, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_int2 | smallint, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_int4 | integer, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_int8 | bigint, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_interval | interval, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_macaddr | macaddr, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_macaddr8 | macaddr8, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_money | money, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_name | name, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_numeric | numeric, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_oid | oid, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_text | text, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_time | time without time zone, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_timestamp | timestamp without time zone, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_timestamptz | timestamp with time zone, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_timetz | time with time zone, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_uuid | uuid, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_varbit | bit varying, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_value_anyenum | anyenum, internal | internal + btree_gin | public | gin_extract_value_bit | bit, internal | internal + btree_gin | public | gin_extract_value_bool | boolean, internal | internal + btree_gin | public | gin_extract_value_bpchar | character, internal | internal + btree_gin | public | gin_extract_value_bytea | bytea, internal | internal + btree_gin | public | gin_extract_value_char | "char", internal | internal + btree_gin | public | gin_extract_value_cidr | cidr, internal | internal + btree_gin | public | gin_extract_value_date | date, internal | internal + btree_gin | public | gin_extract_value_float4 | real, internal | internal + btree_gin | public | gin_extract_value_float8 | double precision, internal | internal + btree_gin | public | gin_extract_value_inet | inet, internal | internal + btree_gin | public | gin_extract_value_int2 | smallint, internal | internal + btree_gin | public | gin_extract_value_int4 | integer, internal | internal + btree_gin | public | gin_extract_value_int8 | bigint, internal | internal + btree_gin | public | gin_extract_value_interval | interval, internal | internal + btree_gin | public | gin_extract_value_macaddr | macaddr, internal | internal + btree_gin | public | gin_extract_value_macaddr8 | macaddr8, internal | internal + btree_gin | public | gin_extract_value_money | money, internal | internal + btree_gin | public | gin_extract_value_name | name, internal | internal + btree_gin | public | gin_extract_value_numeric | numeric, internal | internal + btree_gin | public | gin_extract_value_oid | oid, internal | internal + btree_gin | public | gin_extract_value_text | text, internal | internal + btree_gin | public | gin_extract_value_time | time without time zone, internal | internal + btree_gin | public | gin_extract_value_timestamp | timestamp without time zone, internal | internal + btree_gin | public | gin_extract_value_timestamptz | timestamp with time zone, internal | internal + btree_gin | public | gin_extract_value_timetz | time with time zone, internal | internal + btree_gin | public | gin_extract_value_uuid | uuid, internal | internal + btree_gin | public | gin_extract_value_varbit | bit varying, internal | internal + btree_gin | public | gin_numeric_cmp | numeric, numeric | integer + btree_gist | public | cash_dist | money, money | money + btree_gist | public | date_dist | date, date | integer + btree_gist | public | float4_dist | real, real | real + btree_gist | public | float8_dist | double precision, double precision | double precision + btree_gist | public | gbt_bit_compress | internal | internal + btree_gist | public | gbt_bit_consistent | internal, bit, smallint, oid, internal | boolean + btree_gist | public | gbt_bit_penalty | internal, internal, internal | internal + btree_gist | public | gbt_bit_picksplit | internal, internal | internal + btree_gist | public | gbt_bit_same | gbtreekey_var, gbtreekey_var, internal | internal + btree_gist | public | gbt_bit_union | internal, internal | gbtreekey_var + btree_gist | public | gbt_bool_compress | internal | internal + btree_gist | public | gbt_bool_consistent | internal, boolean, smallint, oid, internal | boolean + btree_gist | public | gbt_bool_fetch | internal | internal + btree_gist | public | gbt_bool_penalty | internal, internal, internal | internal + btree_gist | public | gbt_bool_picksplit | internal, internal | internal + btree_gist | public | gbt_bool_same | gbtreekey2, gbtreekey2, internal | internal + btree_gist | public | gbt_bool_union | internal, internal | gbtreekey2 + btree_gist | public | gbt_bpchar_compress | internal | internal + btree_gist | public | gbt_bpchar_consistent | internal, character, smallint, oid, internal | boolean + btree_gist | public | gbt_bytea_compress | internal | internal + btree_gist | public | gbt_bytea_consistent | internal, bytea, smallint, oid, internal | boolean + btree_gist | public | gbt_bytea_penalty | internal, internal, internal | internal + btree_gist | public | gbt_bytea_picksplit | internal, internal | internal + btree_gist | public | gbt_bytea_same | gbtreekey_var, gbtreekey_var, internal | internal + btree_gist | public | gbt_bytea_union | internal, internal | gbtreekey_var + btree_gist | public | gbt_cash_compress | internal | internal + btree_gist | public | gbt_cash_consistent | internal, money, smallint, oid, internal | boolean + btree_gist | public | gbt_cash_distance | internal, money, smallint, oid, internal | double precision + btree_gist | public | gbt_cash_fetch | internal | internal + btree_gist | public | gbt_cash_penalty | internal, internal, internal | internal + btree_gist | public | gbt_cash_picksplit | internal, internal | internal + btree_gist | public | gbt_cash_same | gbtreekey16, gbtreekey16, internal | internal + btree_gist | public | gbt_cash_union | internal, internal | gbtreekey16 + btree_gist | public | gbt_date_compress | internal | internal + btree_gist | public | gbt_date_consistent | internal, date, smallint, oid, internal | boolean + btree_gist | public | gbt_date_distance | internal, date, smallint, oid, internal | double precision + btree_gist | public | gbt_date_fetch | internal | internal + btree_gist | public | gbt_date_penalty | internal, internal, internal | internal + btree_gist | public | gbt_date_picksplit | internal, internal | internal + btree_gist | public | gbt_date_same | gbtreekey8, gbtreekey8, internal | internal + btree_gist | public | gbt_date_union | internal, internal | gbtreekey8 + btree_gist | public | gbt_decompress | internal | internal + btree_gist | public | gbt_enum_compress | internal | internal + btree_gist | public | gbt_enum_consistent | internal, anyenum, smallint, oid, internal | boolean + btree_gist | public | gbt_enum_fetch | internal | internal + btree_gist | public | gbt_enum_penalty | internal, internal, internal | internal + btree_gist | public | gbt_enum_picksplit | internal, internal | internal + btree_gist | public | gbt_enum_same | gbtreekey8, gbtreekey8, internal | internal + btree_gist | public | gbt_enum_union | internal, internal | gbtreekey8 + btree_gist | public | gbt_float4_compress | internal | internal + btree_gist | public | gbt_float4_consistent | internal, real, smallint, oid, internal | boolean + btree_gist | public | gbt_float4_distance | internal, real, smallint, oid, internal | double precision + btree_gist | public | gbt_float4_fetch | internal | internal + btree_gist | public | gbt_float4_penalty | internal, internal, internal | internal + btree_gist | public | gbt_float4_picksplit | internal, internal | internal + btree_gist | public | gbt_float4_same | gbtreekey8, gbtreekey8, internal | internal + btree_gist | public | gbt_float4_union | internal, internal | gbtreekey8 + btree_gist | public | gbt_float8_compress | internal | internal + btree_gist | public | gbt_float8_consistent | internal, double precision, smallint, oid, internal | boolean + btree_gist | public | gbt_float8_distance | internal, double precision, smallint, oid, internal | double precision + btree_gist | public | gbt_float8_fetch | internal | internal + btree_gist | public | gbt_float8_penalty | internal, internal, internal | internal + btree_gist | public | gbt_float8_picksplit | internal, internal | internal + btree_gist | public | gbt_float8_same | gbtreekey16, gbtreekey16, internal | internal + btree_gist | public | gbt_float8_union | internal, internal | gbtreekey16 + btree_gist | public | gbt_inet_compress | internal | internal + btree_gist | public | gbt_inet_consistent | internal, inet, smallint, oid, internal | boolean + btree_gist | public | gbt_inet_penalty | internal, internal, internal | internal + btree_gist | public | gbt_inet_picksplit | internal, internal | internal + btree_gist | public | gbt_inet_same | gbtreekey16, gbtreekey16, internal | internal + btree_gist | public | gbt_inet_union | internal, internal | gbtreekey16 + btree_gist | public | gbt_int2_compress | internal | internal + btree_gist | public | gbt_int2_consistent | internal, smallint, smallint, oid, internal | boolean + btree_gist | public | gbt_int2_distance | internal, smallint, smallint, oid, internal | double precision + btree_gist | public | gbt_int2_fetch | internal | internal + btree_gist | public | gbt_int2_penalty | internal, internal, internal | internal + btree_gist | public | gbt_int2_picksplit | internal, internal | internal + btree_gist | public | gbt_int2_same | gbtreekey4, gbtreekey4, internal | internal + btree_gist | public | gbt_int2_union | internal, internal | gbtreekey4 + btree_gist | public | gbt_int4_compress | internal | internal + btree_gist | public | gbt_int4_consistent | internal, integer, smallint, oid, internal | boolean + btree_gist | public | gbt_int4_distance | internal, integer, smallint, oid, internal | double precision + btree_gist | public | gbt_int4_fetch | internal | internal + btree_gist | public | gbt_int4_penalty | internal, internal, internal | internal + btree_gist | public | gbt_int4_picksplit | internal, internal | internal + btree_gist | public | gbt_int4_same | gbtreekey8, gbtreekey8, internal | internal + btree_gist | public | gbt_int4_union | internal, internal | gbtreekey8 + btree_gist | public | gbt_int8_compress | internal | internal + btree_gist | public | gbt_int8_consistent | internal, bigint, smallint, oid, internal | boolean + btree_gist | public | gbt_int8_distance | internal, bigint, smallint, oid, internal | double precision + btree_gist | public | gbt_int8_fetch | internal | internal + btree_gist | public | gbt_int8_penalty | internal, internal, internal | internal + btree_gist | public | gbt_int8_picksplit | internal, internal | internal + btree_gist | public | gbt_int8_same | gbtreekey16, gbtreekey16, internal | internal + btree_gist | public | gbt_int8_union | internal, internal | gbtreekey16 + btree_gist | public | gbt_intv_compress | internal | internal + btree_gist | public | gbt_intv_consistent | internal, interval, smallint, oid, internal | boolean + btree_gist | public | gbt_intv_decompress | internal | internal + btree_gist | public | gbt_intv_distance | internal, interval, smallint, oid, internal | double precision + btree_gist | public | gbt_intv_fetch | internal | internal + btree_gist | public | gbt_intv_penalty | internal, internal, internal | internal + btree_gist | public | gbt_intv_picksplit | internal, internal | internal + btree_gist | public | gbt_intv_same | gbtreekey32, gbtreekey32, internal | internal + btree_gist | public | gbt_intv_union | internal, internal | gbtreekey32 + btree_gist | public | gbt_macad8_compress | internal | internal + btree_gist | public | gbt_macad8_consistent | internal, macaddr8, smallint, oid, internal | boolean + btree_gist | public | gbt_macad8_fetch | internal | internal + btree_gist | public | gbt_macad8_penalty | internal, internal, internal | internal + btree_gist | public | gbt_macad8_picksplit | internal, internal | internal + btree_gist | public | gbt_macad8_same | gbtreekey16, gbtreekey16, internal | internal + btree_gist | public | gbt_macad8_union | internal, internal | gbtreekey16 + btree_gist | public | gbt_macad_compress | internal | internal + btree_gist | public | gbt_macad_consistent | internal, macaddr, smallint, oid, internal | boolean + btree_gist | public | gbt_macad_fetch | internal | internal + btree_gist | public | gbt_macad_penalty | internal, internal, internal | internal + btree_gist | public | gbt_macad_picksplit | internal, internal | internal + btree_gist | public | gbt_macad_same | gbtreekey16, gbtreekey16, internal | internal + btree_gist | public | gbt_macad_union | internal, internal | gbtreekey16 + btree_gist | public | gbt_numeric_compress | internal | internal + btree_gist | public | gbt_numeric_consistent | internal, numeric, smallint, oid, internal | boolean + btree_gist | public | gbt_numeric_penalty | internal, internal, internal | internal + btree_gist | public | gbt_numeric_picksplit | internal, internal | internal + btree_gist | public | gbt_numeric_same | gbtreekey_var, gbtreekey_var, internal | internal + btree_gist | public | gbt_numeric_union | internal, internal | gbtreekey_var + btree_gist | public | gbt_oid_compress | internal | internal + btree_gist | public | gbt_oid_consistent | internal, oid, smallint, oid, internal | boolean + btree_gist | public | gbt_oid_distance | internal, oid, smallint, oid, internal | double precision + btree_gist | public | gbt_oid_fetch | internal | internal + btree_gist | public | gbt_oid_penalty | internal, internal, internal | internal + btree_gist | public | gbt_oid_picksplit | internal, internal | internal + btree_gist | public | gbt_oid_same | gbtreekey8, gbtreekey8, internal | internal + btree_gist | public | gbt_oid_union | internal, internal | gbtreekey8 + btree_gist | public | gbt_text_compress | internal | internal + btree_gist | public | gbt_text_consistent | internal, text, smallint, oid, internal | boolean + btree_gist | public | gbt_text_penalty | internal, internal, internal | internal + btree_gist | public | gbt_text_picksplit | internal, internal | internal + btree_gist | public | gbt_text_same | gbtreekey_var, gbtreekey_var, internal | internal + btree_gist | public | gbt_text_union | internal, internal | gbtreekey_var + btree_gist | public | gbt_time_compress | internal | internal + btree_gist | public | gbt_time_consistent | internal, time without time zone, smallint, oid, internal | boolean + btree_gist | public | gbt_time_distance | internal, time without time zone, smallint, oid, internal | double precision + btree_gist | public | gbt_time_fetch | internal | internal + btree_gist | public | gbt_time_penalty | internal, internal, internal | internal + btree_gist | public | gbt_time_picksplit | internal, internal | internal + btree_gist | public | gbt_time_same | gbtreekey16, gbtreekey16, internal | internal + btree_gist | public | gbt_time_union | internal, internal | gbtreekey16 + btree_gist | public | gbt_timetz_compress | internal | internal + btree_gist | public | gbt_timetz_consistent | internal, time with time zone, smallint, oid, internal | boolean + btree_gist | public | gbt_ts_compress | internal | internal + btree_gist | public | gbt_ts_consistent | internal, timestamp without time zone, smallint, oid, internal | boolean + btree_gist | public | gbt_ts_distance | internal, timestamp without time zone, smallint, oid, internal | double precision + btree_gist | public | gbt_ts_fetch | internal | internal + btree_gist | public | gbt_ts_penalty | internal, internal, internal | internal + btree_gist | public | gbt_ts_picksplit | internal, internal | internal + btree_gist | public | gbt_ts_same | gbtreekey16, gbtreekey16, internal | internal + btree_gist | public | gbt_ts_union | internal, internal | gbtreekey16 + btree_gist | public | gbt_tstz_compress | internal | internal + btree_gist | public | gbt_tstz_consistent | internal, timestamp with time zone, smallint, oid, internal | boolean + btree_gist | public | gbt_tstz_distance | internal, timestamp with time zone, smallint, oid, internal | double precision + btree_gist | public | gbt_uuid_compress | internal | internal + btree_gist | public | gbt_uuid_consistent | internal, uuid, smallint, oid, internal | boolean + btree_gist | public | gbt_uuid_fetch | internal | internal + btree_gist | public | gbt_uuid_penalty | internal, internal, internal | internal + btree_gist | public | gbt_uuid_picksplit | internal, internal | internal + btree_gist | public | gbt_uuid_same | gbtreekey32, gbtreekey32, internal | internal + btree_gist | public | gbt_uuid_union | internal, internal | gbtreekey32 + btree_gist | public | gbt_var_decompress | internal | internal + btree_gist | public | gbt_var_fetch | internal | internal + btree_gist | public | gbtreekey16_in | cstring | gbtreekey16 + btree_gist | public | gbtreekey16_out | gbtreekey16 | cstring + btree_gist | public | gbtreekey2_in | cstring | gbtreekey2 + btree_gist | public | gbtreekey2_out | gbtreekey2 | cstring + btree_gist | public | gbtreekey32_in | cstring | gbtreekey32 + btree_gist | public | gbtreekey32_out | gbtreekey32 | cstring + btree_gist | public | gbtreekey4_in | cstring | gbtreekey4 + btree_gist | public | gbtreekey4_out | gbtreekey4 | cstring + btree_gist | public | gbtreekey8_in | cstring | gbtreekey8 + btree_gist | public | gbtreekey8_out | gbtreekey8 | cstring + btree_gist | public | gbtreekey_var_in | cstring | gbtreekey_var + btree_gist | public | gbtreekey_var_out | gbtreekey_var | cstring + btree_gist | public | int2_dist | smallint, smallint | smallint + btree_gist | public | int4_dist | integer, integer | integer + btree_gist | public | int8_dist | bigint, bigint | bigint + btree_gist | public | interval_dist | interval, interval | interval + btree_gist | public | oid_dist | oid, oid | oid + btree_gist | public | time_dist | time without time zone, time without time zone | interval + btree_gist | public | ts_dist | timestamp without time zone, timestamp without time zone | interval + btree_gist | public | tstz_dist | timestamp with time zone, timestamp with time zone | interval + citext | public | citext | boolean | citext + citext | public | citext | character | citext + citext | public | citext | inet | citext + citext | public | citext_cmp | citext, citext | integer + citext | public | citext_eq | citext, citext | boolean + citext | public | citext_ge | citext, citext | boolean + citext | public | citext_gt | citext, citext | boolean + citext | public | citext_hash | citext | integer + citext | public | citext_hash_extended | citext, bigint | bigint + citext | public | citext_larger | citext, citext | citext + citext | public | citext_le | citext, citext | boolean + citext | public | citext_lt | citext, citext | boolean + citext | public | citext_ne | citext, citext | boolean + citext | public | citext_pattern_cmp | citext, citext | integer + citext | public | citext_pattern_ge | citext, citext | boolean + citext | public | citext_pattern_gt | citext, citext | boolean + citext | public | citext_pattern_le | citext, citext | boolean + citext | public | citext_pattern_lt | citext, citext | boolean + citext | public | citext_smaller | citext, citext | citext + citext | public | citextin | cstring | citext + citext | public | citextout | citext | cstring + citext | public | citextrecv | internal | citext + citext | public | citextsend | citext | bytea + citext | public | max | citext | citext + citext | public | min | citext | citext + citext | public | regexp_match | citext, citext | text[] + citext | public | regexp_match | citext, citext, text | text[] + citext | public | regexp_matches | citext, citext | SETOF text[] + citext | public | regexp_matches | citext, citext, text | SETOF text[] + citext | public | regexp_replace | citext, citext, text | text + citext | public | regexp_replace | citext, citext, text, text | text + citext | public | regexp_split_to_array | citext, citext | text[] + citext | public | regexp_split_to_array | citext, citext, text | text[] + citext | public | regexp_split_to_table | citext, citext | SETOF text + citext | public | regexp_split_to_table | citext, citext, text | SETOF text + citext | public | replace | citext, citext, citext | text + citext | public | split_part | citext, citext, integer | text + citext | public | strpos | citext, citext | integer + citext | public | texticlike | citext, citext | boolean + citext | public | texticlike | citext, text | boolean + citext | public | texticnlike | citext, citext | boolean + citext | public | texticnlike | citext, text | boolean + citext | public | texticregexeq | citext, citext | boolean + citext | public | texticregexeq | citext, text | boolean + citext | public | texticregexne | citext, citext | boolean + citext | public | texticregexne | citext, text | boolean + citext | public | translate | citext, citext, text | text + cube | public | cube | cube, double precision | cube + cube | public | cube | cube, double precision, double precision | cube + cube | public | cube | double precision | cube + cube | public | cube | double precision, double precision | cube + cube | public | cube | double precision[] | cube + cube | public | cube | double precision[], double precision[] | cube + cube | public | cube_cmp | cube, cube | integer + cube | public | cube_contained | cube, cube | boolean + cube | public | cube_contains | cube, cube | boolean + cube | public | cube_coord | cube, integer | double precision + cube | public | cube_coord_llur | cube, integer | double precision + cube | public | cube_dim | cube | integer + cube | public | cube_distance | cube, cube | double precision + cube | public | cube_enlarge | cube, double precision, integer | cube + cube | public | cube_eq | cube, cube | boolean + cube | public | cube_ge | cube, cube | boolean + cube | public | cube_gt | cube, cube | boolean + cube | public | cube_in | cstring | cube + cube | public | cube_inter | cube, cube | cube + cube | public | cube_is_point | cube | boolean + cube | public | cube_le | cube, cube | boolean + cube | public | cube_ll_coord | cube, integer | double precision + cube | public | cube_lt | cube, cube | boolean + cube | public | cube_ne | cube, cube | boolean + cube | public | cube_out | cube | cstring + cube | public | cube_overlap | cube, cube | boolean + cube | public | cube_recv | internal | cube + cube | public | cube_send | cube | bytea + cube | public | cube_size | cube | double precision + cube | public | cube_subset | cube, integer[] | cube + cube | public | cube_union | cube, cube | cube + cube | public | cube_ur_coord | cube, integer | double precision + cube | public | distance_chebyshev | cube, cube | double precision + cube | public | distance_taxicab | cube, cube | double precision + cube | public | g_cube_consistent | internal, cube, smallint, oid, internal | boolean + cube | public | g_cube_distance | internal, cube, smallint, oid, internal | double precision + cube | public | g_cube_penalty | internal, internal, internal | internal + cube | public | g_cube_picksplit | internal, internal | internal + cube | public | g_cube_same | cube, cube, internal | internal + cube | public | g_cube_union | internal, internal | cube + dblink | public | dblink | text | SETOF record + dblink | public | dblink | text, boolean | SETOF record + dblink | public | dblink | text, text | SETOF record + dblink | public | dblink | text, text, boolean | SETOF record + dblink | public | dblink_build_sql_delete | text, int2vector, integer, text[] | text + dblink | public | dblink_build_sql_insert | text, int2vector, integer, text[], text[] | text + dblink | public | dblink_build_sql_update | text, int2vector, integer, text[], text[] | text + dblink | public | dblink_cancel_query | text | text + dblink | public | dblink_close | text | text + dblink | public | dblink_close | text, boolean | text + dblink | public | dblink_close | text, text | text + dblink | public | dblink_close | text, text, boolean | text + dblink | public | dblink_connect | text | text + dblink | public | dblink_connect | text, text | text + dblink | public | dblink_connect_u | text | text + dblink | public | dblink_connect_u | text, text | text + dblink | public | dblink_current_query | | text + dblink | public | dblink_disconnect | | text + dblink | public | dblink_disconnect | text | text + dblink | public | dblink_error_message | text | text + dblink | public | dblink_exec | text | text + dblink | public | dblink_exec | text, boolean | text + dblink | public | dblink_exec | text, text | text + dblink | public | dblink_exec | text, text, boolean | text + dblink | public | dblink_fdw_validator | options text[], catalog oid | void + dblink | public | dblink_fetch | text, integer | SETOF record + dblink | public | dblink_fetch | text, integer, boolean | SETOF record + dblink | public | dblink_fetch | text, text, integer | SETOF record + dblink | public | dblink_fetch | text, text, integer, boolean | SETOF record + dblink | public | dblink_get_connections | | text[] + dblink | public | dblink_get_notify | OUT notify_name text, OUT be_pid integer, OUT extra text | SETOF record + dblink | public | dblink_get_notify | conname text, OUT notify_name text, OUT be_pid integer, OUT extra text | SETOF record + dblink | public | dblink_get_pkey | text | SETOF dblink_pkey_results + dblink | public | dblink_get_result | text | SETOF record + dblink | public | dblink_get_result | text, boolean | SETOF record + dblink | public | dblink_is_busy | text | integer + dblink | public | dblink_open | text, text | text + dblink | public | dblink_open | text, text, boolean | text + dblink | public | dblink_open | text, text, text | text + dblink | public | dblink_open | text, text, text, boolean | text + dblink | public | dblink_send_query | text, text | integer + dict_int | public | dintdict_init | internal | internal + dict_int | public | dintdict_lexize | internal, internal, internal, internal | internal + dict_xsyn | public | dxsyn_init | internal | internal + dict_xsyn | public | dxsyn_lexize | internal, internal, internal, internal | internal + earthdistance | public | earth | | double precision + earthdistance | public | earth_box | earth, double precision | cube + earthdistance | public | earth_distance | earth, earth | double precision + earthdistance | public | gc_to_sec | double precision | double precision + earthdistance | public | geo_distance | point, point | double precision + earthdistance | public | latitude | earth | double precision + earthdistance | public | ll_to_earth | double precision, double precision | earth + earthdistance | public | longitude | earth | double precision + earthdistance | public | sec_to_gc | double precision | double precision + file_fdw | public | file_fdw_handler | | fdw_handler + file_fdw | public | file_fdw_validator | text[], oid | void + fuzzystrmatch | public | difference | text, text | integer + fuzzystrmatch | public | dmetaphone | text | text + fuzzystrmatch | public | dmetaphone_alt | text | text + fuzzystrmatch | public | levenshtein | text, text | integer + fuzzystrmatch | public | levenshtein | text, text, integer, integer, integer | integer + fuzzystrmatch | public | levenshtein_less_equal | text, text, integer | integer + fuzzystrmatch | public | levenshtein_less_equal | text, text, integer, integer, integer, integer | integer + fuzzystrmatch | public | metaphone | text, integer | text + fuzzystrmatch | public | soundex | text | text + fuzzystrmatch | public | text_soundex | text | text + hstore | public | akeys | hstore | text[] + hstore | public | avals | hstore | text[] + hstore | public | defined | hstore, text | boolean + hstore | public | delete | hstore, hstore | hstore + hstore | public | delete | hstore, text | hstore + hstore | public | delete | hstore, text[] | hstore + hstore | public | each | hs hstore, OUT key text, OUT value text | SETOF record + hstore | public | exist | hstore, text | boolean + hstore | public | exists_all | hstore, text[] | boolean + hstore | public | exists_any | hstore, text[] | boolean + hstore | public | fetchval | hstore, text | text + hstore | public | ghstore_compress | internal | internal + hstore | public | ghstore_consistent | internal, hstore, smallint, oid, internal | boolean + hstore | public | ghstore_decompress | internal | internal + hstore | public | ghstore_in | cstring | ghstore + hstore | public | ghstore_options | internal | void + hstore | public | ghstore_out | ghstore | cstring + hstore | public | ghstore_penalty | internal, internal, internal | internal + hstore | public | ghstore_picksplit | internal, internal | internal + hstore | public | ghstore_same | ghstore, ghstore, internal | internal + hstore | public | ghstore_union | internal, internal | ghstore + hstore | public | gin_consistent_hstore | internal, smallint, hstore, integer, internal, internal | boolean + hstore | public | gin_extract_hstore | hstore, internal | internal + hstore | public | gin_extract_hstore_query | hstore, internal, smallint, internal, internal | internal + hstore | public | hs_concat | hstore, hstore | hstore + hstore | public | hs_contained | hstore, hstore | boolean + hstore | public | hs_contains | hstore, hstore | boolean + hstore | public | hstore | record | hstore + hstore | public | hstore | text, text | hstore + hstore | public | hstore | text[] | hstore + hstore | public | hstore | text[], text[] | hstore + hstore | public | hstore_cmp | hstore, hstore | integer + hstore | public | hstore_eq | hstore, hstore | boolean + hstore | public | hstore_ge | hstore, hstore | boolean + hstore | public | hstore_gt | hstore, hstore | boolean + hstore | public | hstore_hash | hstore | integer + hstore | public | hstore_hash_extended | hstore, bigint | bigint + hstore | public | hstore_in | cstring | hstore + hstore | public | hstore_le | hstore, hstore | boolean + hstore | public | hstore_lt | hstore, hstore | boolean + hstore | public | hstore_ne | hstore, hstore | boolean + hstore | public | hstore_out | hstore | cstring + hstore | public | hstore_recv | internal | hstore + hstore | public | hstore_send | hstore | bytea + hstore | public | hstore_subscript_handler | internal | internal + hstore | public | hstore_to_array | hstore | text[] + hstore | public | hstore_to_json | hstore | json + hstore | public | hstore_to_json_loose | hstore | json + hstore | public | hstore_to_jsonb | hstore | jsonb + hstore | public | hstore_to_jsonb_loose | hstore | jsonb + hstore | public | hstore_to_matrix | hstore | text[] + hstore | public | hstore_version_diag | hstore | integer + hstore | public | isdefined | hstore, text | boolean + hstore | public | isexists | hstore, text | boolean + hstore | public | populate_record | anyelement, hstore | anyelement + hstore | public | skeys | hstore | SETOF text + hstore | public | slice | hstore, text[] | hstore + hstore | public | slice_array | hstore, text[] | text[] + hstore | public | svals | hstore | SETOF text + hstore | public | tconvert | text, text | hstore + http | public | bytea_to_text | data bytea | text + http | public | http | request http_request | http_response + http | public | http_delete | uri character varying | http_response + http | public | http_delete | uri character varying, content character varying, content_type character varying | http_response + http | public | http_get | uri character varying | http_response + http | public | http_get | uri character varying, data jsonb | http_response + http | public | http_head | uri character varying | http_response + http | public | http_header | field character varying, value character varying | http_header + http | public | http_list_curlopt | | TABLE(curlopt text, value text) + http | public | http_patch | uri character varying, content character varying, content_type character varying | http_response + http | public | http_post | uri character varying, content character varying, content_type character varying | http_response + http | public | http_post | uri character varying, data jsonb | http_response + http | public | http_put | uri character varying, content character varying, content_type character varying | http_response + http | public | http_reset_curlopt | | boolean + http | public | http_set_curlopt | curlopt character varying, value character varying | boolean + http | public | text_to_bytea | data text | bytea + http | public | urlencode | data jsonb | text + http | public | urlencode | string bytea | text + http | public | urlencode | string character varying | text + hypopg | public | hypopg | OUT indexname text, OUT indexrelid oid, OUT indrelid oid, OUT innatts integer, OUT indisunique boolean, OUT indkey int2vector, OUT indcollation oidvector, OUT indclass oidvector, OUT indoption oidvector, OUT indexprs pg_node_tree, OUT indpred pg_node_tree, OUT amid oid | SETOF record + hypopg | public | hypopg_create_index | sql_order text, OUT indexrelid oid, OUT indexname text | SETOF record + hypopg | public | hypopg_drop_index | indexid oid | boolean + hypopg | public | hypopg_get_indexdef | indexid oid | text + hypopg | public | hypopg_hidden_indexes | | TABLE(indexid oid) + hypopg | public | hypopg_hide_index | indexid oid | boolean + hypopg | public | hypopg_relation_size | indexid oid | bigint + hypopg | public | hypopg_reset | | void + hypopg | public | hypopg_reset_index | | void + hypopg | public | hypopg_unhide_all_indexes | | void + hypopg | public | hypopg_unhide_index | indexid oid | boolean + index_advisor | public | index_advisor | query text | TABLE(startup_cost_before jsonb, startup_cost_after jsonb, total_cost_before jsonb, total_cost_after jsonb, index_statements text[], errors text[]) + insert_username | public | insert_username | | trigger + intagg | public | int_agg_final_array | internal | integer[] + intagg | public | int_agg_state | internal, integer | internal + intagg | public | int_array_aggregate | integer | integer[] + intagg | public | int_array_enum | integer[] | SETOF integer + intarray | public | _int_contained | integer[], integer[] | boolean + intarray | public | _int_contained_joinsel | internal, oid, internal, smallint, internal | double precision + intarray | public | _int_contained_sel | internal, oid, internal, integer | double precision + intarray | public | _int_contains | integer[], integer[] | boolean + intarray | public | _int_contains_joinsel | internal, oid, internal, smallint, internal | double precision + intarray | public | _int_contains_sel | internal, oid, internal, integer | double precision + intarray | public | _int_different | integer[], integer[] | boolean + intarray | public | _int_inter | integer[], integer[] | integer[] + intarray | public | _int_matchsel | internal, oid, internal, integer | double precision + intarray | public | _int_overlap | integer[], integer[] | boolean + intarray | public | _int_overlap_joinsel | internal, oid, internal, smallint, internal | double precision + intarray | public | _int_overlap_sel | internal, oid, internal, integer | double precision + intarray | public | _int_same | integer[], integer[] | boolean + intarray | public | _int_union | integer[], integer[] | integer[] + intarray | public | _intbig_in | cstring | intbig_gkey + intarray | public | _intbig_out | intbig_gkey | cstring + intarray | public | boolop | integer[], query_int | boolean + intarray | public | bqarr_in | cstring | query_int + intarray | public | bqarr_out | query_int | cstring + intarray | public | g_int_compress | internal | internal + intarray | public | g_int_consistent | internal, integer[], smallint, oid, internal | boolean + intarray | public | g_int_decompress | internal | internal + intarray | public | g_int_options | internal | void + intarray | public | g_int_penalty | internal, internal, internal | internal + intarray | public | g_int_picksplit | internal, internal | internal + intarray | public | g_int_same | integer[], integer[], internal | internal + intarray | public | g_int_union | internal, internal | integer[] + intarray | public | g_intbig_compress | internal | internal + intarray | public | g_intbig_consistent | internal, integer[], smallint, oid, internal | boolean + intarray | public | g_intbig_decompress | internal | internal + intarray | public | g_intbig_options | internal | void + intarray | public | g_intbig_penalty | internal, internal, internal | internal + intarray | public | g_intbig_picksplit | internal, internal | internal + intarray | public | g_intbig_same | intbig_gkey, intbig_gkey, internal | internal + intarray | public | g_intbig_union | internal, internal | intbig_gkey + intarray | public | ginint4_consistent | internal, smallint, integer[], integer, internal, internal, internal, internal | boolean + intarray | public | ginint4_queryextract | integer[], internal, smallint, internal, internal, internal, internal | internal + intarray | public | icount | integer[] | integer + intarray | public | idx | integer[], integer | integer + intarray | public | intarray_del_elem | integer[], integer | integer[] + intarray | public | intarray_push_array | integer[], integer[] | integer[] + intarray | public | intarray_push_elem | integer[], integer | integer[] + intarray | public | intset | integer | integer[] + intarray | public | intset_subtract | integer[], integer[] | integer[] + intarray | public | intset_union_elem | integer[], integer | integer[] + intarray | public | querytree | query_int | text + intarray | public | rboolop | query_int, integer[] | boolean + intarray | public | sort | integer[] | integer[] + intarray | public | sort | integer[], text | integer[] + intarray | public | sort_asc | integer[] | integer[] + intarray | public | sort_desc | integer[] | integer[] + intarray | public | subarray | integer[], integer | integer[] + intarray | public | subarray | integer[], integer, integer | integer[] + intarray | public | uniq | integer[] | integer[] + isn | public | btean13cmp | ean13, ean13 | integer + isn | public | btean13cmp | ean13, isbn | integer + isn | public | btean13cmp | ean13, isbn13 | integer + isn | public | btean13cmp | ean13, ismn | integer + isn | public | btean13cmp | ean13, ismn13 | integer + isn | public | btean13cmp | ean13, issn | integer + isn | public | btean13cmp | ean13, issn13 | integer + isn | public | btean13cmp | ean13, upc | integer + isn | public | btisbn13cmp | isbn13, ean13 | integer + isn | public | btisbn13cmp | isbn13, isbn | integer + isn | public | btisbn13cmp | isbn13, isbn13 | integer + isn | public | btisbncmp | isbn, ean13 | integer + isn | public | btisbncmp | isbn, isbn | integer + isn | public | btisbncmp | isbn, isbn13 | integer + isn | public | btismn13cmp | ismn13, ean13 | integer + isn | public | btismn13cmp | ismn13, ismn | integer + isn | public | btismn13cmp | ismn13, ismn13 | integer + isn | public | btismncmp | ismn, ean13 | integer + isn | public | btismncmp | ismn, ismn | integer + isn | public | btismncmp | ismn, ismn13 | integer + isn | public | btissn13cmp | issn13, ean13 | integer + isn | public | btissn13cmp | issn13, issn | integer + isn | public | btissn13cmp | issn13, issn13 | integer + isn | public | btissncmp | issn, ean13 | integer + isn | public | btissncmp | issn, issn | integer + isn | public | btissncmp | issn, issn13 | integer + isn | public | btupccmp | upc, ean13 | integer + isn | public | btupccmp | upc, upc | integer + isn | public | ean13_in | cstring | ean13 + isn | public | ean13_out | ean13 | cstring + isn | public | ean13_out | isbn13 | cstring + isn | public | ean13_out | ismn13 | cstring + isn | public | ean13_out | issn13 | cstring + isn | public | hashean13 | ean13 | integer + isn | public | hashisbn | isbn | integer + isn | public | hashisbn13 | isbn13 | integer + isn | public | hashismn | ismn | integer + isn | public | hashismn13 | ismn13 | integer + isn | public | hashissn | issn | integer + isn | public | hashissn13 | issn13 | integer + isn | public | hashupc | upc | integer + isn | public | is_valid | ean13 | boolean + isn | public | is_valid | isbn | boolean + isn | public | is_valid | isbn13 | boolean + isn | public | is_valid | ismn | boolean + isn | public | is_valid | ismn13 | boolean + isn | public | is_valid | issn | boolean + isn | public | is_valid | issn13 | boolean + isn | public | is_valid | upc | boolean + isn | public | isbn | ean13 | isbn + isn | public | isbn13 | ean13 | isbn13 + isn | public | isbn13_in | cstring | isbn13 + isn | public | isbn_in | cstring | isbn + isn | public | ismn | ean13 | ismn + isn | public | ismn13 | ean13 | ismn13 + isn | public | ismn13_in | cstring | ismn13 + isn | public | ismn_in | cstring | ismn + isn | public | isn_out | isbn | cstring + isn | public | isn_out | ismn | cstring + isn | public | isn_out | issn | cstring + isn | public | isn_out | upc | cstring + isn | public | isn_weak | | boolean + isn | public | isn_weak | boolean | boolean + isn | public | isneq | ean13, ean13 | boolean + isn | public | isneq | ean13, isbn | boolean + isn | public | isneq | ean13, isbn13 | boolean + isn | public | isneq | ean13, ismn | boolean + isn | public | isneq | ean13, ismn13 | boolean + isn | public | isneq | ean13, issn | boolean + isn | public | isneq | ean13, issn13 | boolean + isn | public | isneq | ean13, upc | boolean + isn | public | isneq | isbn, ean13 | boolean + isn | public | isneq | isbn, isbn | boolean + isn | public | isneq | isbn, isbn13 | boolean + isn | public | isneq | isbn13, ean13 | boolean + isn | public | isneq | isbn13, isbn | boolean + isn | public | isneq | isbn13, isbn13 | boolean + isn | public | isneq | ismn, ean13 | boolean + isn | public | isneq | ismn, ismn | boolean + isn | public | isneq | ismn, ismn13 | boolean + isn | public | isneq | ismn13, ean13 | boolean + isn | public | isneq | ismn13, ismn | boolean + isn | public | isneq | ismn13, ismn13 | boolean + isn | public | isneq | issn, ean13 | boolean + isn | public | isneq | issn, issn | boolean + isn | public | isneq | issn, issn13 | boolean + isn | public | isneq | issn13, ean13 | boolean + isn | public | isneq | issn13, issn | boolean + isn | public | isneq | issn13, issn13 | boolean + isn | public | isneq | upc, ean13 | boolean + isn | public | isneq | upc, upc | boolean + isn | public | isnge | ean13, ean13 | boolean + isn | public | isnge | ean13, isbn | boolean + isn | public | isnge | ean13, isbn13 | boolean + isn | public | isnge | ean13, ismn | boolean + isn | public | isnge | ean13, ismn13 | boolean + isn | public | isnge | ean13, issn | boolean + isn | public | isnge | ean13, issn13 | boolean + isn | public | isnge | ean13, upc | boolean + isn | public | isnge | isbn, ean13 | boolean + isn | public | isnge | isbn, isbn | boolean + isn | public | isnge | isbn, isbn13 | boolean + isn | public | isnge | isbn13, ean13 | boolean + isn | public | isnge | isbn13, isbn | boolean + isn | public | isnge | isbn13, isbn13 | boolean + isn | public | isnge | ismn, ean13 | boolean + isn | public | isnge | ismn, ismn | boolean + isn | public | isnge | ismn, ismn13 | boolean + isn | public | isnge | ismn13, ean13 | boolean + isn | public | isnge | ismn13, ismn | boolean + isn | public | isnge | ismn13, ismn13 | boolean + isn | public | isnge | issn, ean13 | boolean + isn | public | isnge | issn, issn | boolean + isn | public | isnge | issn, issn13 | boolean + isn | public | isnge | issn13, ean13 | boolean + isn | public | isnge | issn13, issn | boolean + isn | public | isnge | issn13, issn13 | boolean + isn | public | isnge | upc, ean13 | boolean + isn | public | isnge | upc, upc | boolean + isn | public | isngt | ean13, ean13 | boolean + isn | public | isngt | ean13, isbn | boolean + isn | public | isngt | ean13, isbn13 | boolean + isn | public | isngt | ean13, ismn | boolean + isn | public | isngt | ean13, ismn13 | boolean + isn | public | isngt | ean13, issn | boolean + isn | public | isngt | ean13, issn13 | boolean + isn | public | isngt | ean13, upc | boolean + isn | public | isngt | isbn, ean13 | boolean + isn | public | isngt | isbn, isbn | boolean + isn | public | isngt | isbn, isbn13 | boolean + isn | public | isngt | isbn13, ean13 | boolean + isn | public | isngt | isbn13, isbn | boolean + isn | public | isngt | isbn13, isbn13 | boolean + isn | public | isngt | ismn, ean13 | boolean + isn | public | isngt | ismn, ismn | boolean + isn | public | isngt | ismn, ismn13 | boolean + isn | public | isngt | ismn13, ean13 | boolean + isn | public | isngt | ismn13, ismn | boolean + isn | public | isngt | ismn13, ismn13 | boolean + isn | public | isngt | issn, ean13 | boolean + isn | public | isngt | issn, issn | boolean + isn | public | isngt | issn, issn13 | boolean + isn | public | isngt | issn13, ean13 | boolean + isn | public | isngt | issn13, issn | boolean + isn | public | isngt | issn13, issn13 | boolean + isn | public | isngt | upc, ean13 | boolean + isn | public | isngt | upc, upc | boolean + isn | public | isnle | ean13, ean13 | boolean + isn | public | isnle | ean13, isbn | boolean + isn | public | isnle | ean13, isbn13 | boolean + isn | public | isnle | ean13, ismn | boolean + isn | public | isnle | ean13, ismn13 | boolean + isn | public | isnle | ean13, issn | boolean + isn | public | isnle | ean13, issn13 | boolean + isn | public | isnle | ean13, upc | boolean + isn | public | isnle | isbn, ean13 | boolean + isn | public | isnle | isbn, isbn | boolean + isn | public | isnle | isbn, isbn13 | boolean + isn | public | isnle | isbn13, ean13 | boolean + isn | public | isnle | isbn13, isbn | boolean + isn | public | isnle | isbn13, isbn13 | boolean + isn | public | isnle | ismn, ean13 | boolean + isn | public | isnle | ismn, ismn | boolean + isn | public | isnle | ismn, ismn13 | boolean + isn | public | isnle | ismn13, ean13 | boolean + isn | public | isnle | ismn13, ismn | boolean + isn | public | isnle | ismn13, ismn13 | boolean + isn | public | isnle | issn, ean13 | boolean + isn | public | isnle | issn, issn | boolean + isn | public | isnle | issn, issn13 | boolean + isn | public | isnle | issn13, ean13 | boolean + isn | public | isnle | issn13, issn | boolean + isn | public | isnle | issn13, issn13 | boolean + isn | public | isnle | upc, ean13 | boolean + isn | public | isnle | upc, upc | boolean + isn | public | isnlt | ean13, ean13 | boolean + isn | public | isnlt | ean13, isbn | boolean + isn | public | isnlt | ean13, isbn13 | boolean + isn | public | isnlt | ean13, ismn | boolean + isn | public | isnlt | ean13, ismn13 | boolean + isn | public | isnlt | ean13, issn | boolean + isn | public | isnlt | ean13, issn13 | boolean + isn | public | isnlt | ean13, upc | boolean + isn | public | isnlt | isbn, ean13 | boolean + isn | public | isnlt | isbn, isbn | boolean + isn | public | isnlt | isbn, isbn13 | boolean + isn | public | isnlt | isbn13, ean13 | boolean + isn | public | isnlt | isbn13, isbn | boolean + isn | public | isnlt | isbn13, isbn13 | boolean + isn | public | isnlt | ismn, ean13 | boolean + isn | public | isnlt | ismn, ismn | boolean + isn | public | isnlt | ismn, ismn13 | boolean + isn | public | isnlt | ismn13, ean13 | boolean + isn | public | isnlt | ismn13, ismn | boolean + isn | public | isnlt | ismn13, ismn13 | boolean + isn | public | isnlt | issn, ean13 | boolean + isn | public | isnlt | issn, issn | boolean + isn | public | isnlt | issn, issn13 | boolean + isn | public | isnlt | issn13, ean13 | boolean + isn | public | isnlt | issn13, issn | boolean + isn | public | isnlt | issn13, issn13 | boolean + isn | public | isnlt | upc, ean13 | boolean + isn | public | isnlt | upc, upc | boolean + isn | public | isnne | ean13, ean13 | boolean + isn | public | isnne | ean13, isbn | boolean + isn | public | isnne | ean13, isbn13 | boolean + isn | public | isnne | ean13, ismn | boolean + isn | public | isnne | ean13, ismn13 | boolean + isn | public | isnne | ean13, issn | boolean + isn | public | isnne | ean13, issn13 | boolean + isn | public | isnne | ean13, upc | boolean + isn | public | isnne | isbn, ean13 | boolean + isn | public | isnne | isbn, isbn | boolean + isn | public | isnne | isbn, isbn13 | boolean + isn | public | isnne | isbn13, ean13 | boolean + isn | public | isnne | isbn13, isbn | boolean + isn | public | isnne | isbn13, isbn13 | boolean + isn | public | isnne | ismn, ean13 | boolean + isn | public | isnne | ismn, ismn | boolean + isn | public | isnne | ismn, ismn13 | boolean + isn | public | isnne | ismn13, ean13 | boolean + isn | public | isnne | ismn13, ismn | boolean + isn | public | isnne | ismn13, ismn13 | boolean + isn | public | isnne | issn, ean13 | boolean + isn | public | isnne | issn, issn | boolean + isn | public | isnne | issn, issn13 | boolean + isn | public | isnne | issn13, ean13 | boolean + isn | public | isnne | issn13, issn | boolean + isn | public | isnne | issn13, issn13 | boolean + isn | public | isnne | upc, ean13 | boolean + isn | public | isnne | upc, upc | boolean + isn | public | issn | ean13 | issn + isn | public | issn13 | ean13 | issn13 + isn | public | issn13_in | cstring | issn13 + isn | public | issn_in | cstring | issn + isn | public | make_valid | ean13 | ean13 + isn | public | make_valid | isbn | isbn + isn | public | make_valid | isbn13 | isbn13 + isn | public | make_valid | ismn | ismn + isn | public | make_valid | ismn13 | ismn13 + isn | public | make_valid | issn | issn + isn | public | make_valid | issn13 | issn13 + isn | public | make_valid | upc | upc + isn | public | upc | ean13 | upc + isn | public | upc_in | cstring | upc + lo | public | lo_manage | | trigger + lo | public | lo_oid | lo | oid + ltree | public | _lt_q_regex | ltree[], lquery[] | boolean + ltree | public | _lt_q_rregex | lquery[], ltree[] | boolean + ltree | public | _ltq_extract_regex | ltree[], lquery | ltree + ltree | public | _ltq_regex | ltree[], lquery | boolean + ltree | public | _ltq_rregex | lquery, ltree[] | boolean + ltree | public | _ltree_compress | internal | internal + ltree | public | _ltree_consistent | internal, ltree[], smallint, oid, internal | boolean + ltree | public | _ltree_extract_isparent | ltree[], ltree | ltree + ltree | public | _ltree_extract_risparent | ltree[], ltree | ltree + ltree | public | _ltree_gist_options | internal | void + ltree | public | _ltree_isparent | ltree[], ltree | boolean + ltree | public | _ltree_penalty | internal, internal, internal | internal + ltree | public | _ltree_picksplit | internal, internal | internal + ltree | public | _ltree_r_isparent | ltree, ltree[] | boolean + ltree | public | _ltree_r_risparent | ltree, ltree[] | boolean + ltree | public | _ltree_risparent | ltree[], ltree | boolean + ltree | public | _ltree_same | ltree_gist, ltree_gist, internal | internal + ltree | public | _ltree_union | internal, internal | ltree_gist + ltree | public | _ltxtq_exec | ltree[], ltxtquery | boolean + ltree | public | _ltxtq_extract_exec | ltree[], ltxtquery | ltree + ltree | public | _ltxtq_rexec | ltxtquery, ltree[] | boolean + ltree | public | index | ltree, ltree | integer + ltree | public | index | ltree, ltree, integer | integer + ltree | public | lca | ltree, ltree | ltree + ltree | public | lca | ltree, ltree, ltree | ltree + ltree | public | lca | ltree, ltree, ltree, ltree | ltree + ltree | public | lca | ltree, ltree, ltree, ltree, ltree | ltree + ltree | public | lca | ltree, ltree, ltree, ltree, ltree, ltree | ltree + ltree | public | lca | ltree, ltree, ltree, ltree, ltree, ltree, ltree | ltree + ltree | public | lca | ltree, ltree, ltree, ltree, ltree, ltree, ltree, ltree | ltree + ltree | public | lca | ltree[] | ltree + ltree | public | lquery_in | cstring | lquery + ltree | public | lquery_out | lquery | cstring + ltree | public | lquery_recv | internal | lquery + ltree | public | lquery_send | lquery | bytea + ltree | public | lt_q_regex | ltree, lquery[] | boolean + ltree | public | lt_q_rregex | lquery[], ltree | boolean + ltree | public | ltq_regex | ltree, lquery | boolean + ltree | public | ltq_rregex | lquery, ltree | boolean + ltree | public | ltree2text | ltree | text + ltree | public | ltree_addltree | ltree, ltree | ltree + ltree | public | ltree_addtext | ltree, text | ltree + ltree | public | ltree_cmp | ltree, ltree | integer + ltree | public | ltree_compress | internal | internal + ltree | public | ltree_consistent | internal, ltree, smallint, oid, internal | boolean + ltree | public | ltree_decompress | internal | internal + ltree | public | ltree_eq | ltree, ltree | boolean + ltree | public | ltree_ge | ltree, ltree | boolean + ltree | public | ltree_gist_in | cstring | ltree_gist + ltree | public | ltree_gist_options | internal | void + ltree | public | ltree_gist_out | ltree_gist | cstring + ltree | public | ltree_gt | ltree, ltree | boolean + ltree | public | ltree_in | cstring | ltree + ltree | public | ltree_isparent | ltree, ltree | boolean + ltree | public | ltree_le | ltree, ltree | boolean + ltree | public | ltree_lt | ltree, ltree | boolean + ltree | public | ltree_ne | ltree, ltree | boolean + ltree | public | ltree_out | ltree | cstring + ltree | public | ltree_penalty | internal, internal, internal | internal + ltree | public | ltree_picksplit | internal, internal | internal + ltree | public | ltree_recv | internal | ltree + ltree | public | ltree_risparent | ltree, ltree | boolean + ltree | public | ltree_same | ltree_gist, ltree_gist, internal | internal + ltree | public | ltree_send | ltree | bytea + ltree | public | ltree_textadd | text, ltree | ltree + ltree | public | ltree_union | internal, internal | ltree_gist + ltree | public | ltreeparentsel | internal, oid, internal, integer | double precision + ltree | public | ltxtq_exec | ltree, ltxtquery | boolean + ltree | public | ltxtq_in | cstring | ltxtquery + ltree | public | ltxtq_out | ltxtquery | cstring + ltree | public | ltxtq_recv | internal | ltxtquery + ltree | public | ltxtq_rexec | ltxtquery, ltree | boolean + ltree | public | ltxtq_send | ltxtquery | bytea + ltree | public | nlevel | ltree | integer + ltree | public | subltree | ltree, integer, integer | ltree + ltree | public | subpath | ltree, integer | ltree + ltree | public | subpath | ltree, integer, integer | ltree + ltree | public | text2ltree | text | ltree + moddatetime | public | moddatetime | | trigger + old_snapshot | public | pg_old_snapshot_time_mapping | OUT array_offset integer, OUT end_timestamp timestamp with time zone, OUT newest_xmin xid | SETOF record + pageinspect | public | brin_metapage_info | page bytea, OUT magic text, OUT version integer, OUT pagesperrange integer, OUT lastrevmappage bigint | record + pageinspect | public | brin_page_items | page bytea, index_oid regclass, OUT itemoffset integer, OUT blknum bigint, OUT attnum integer, OUT allnulls boolean, OUT hasnulls boolean, OUT placeholder boolean, OUT value text | SETOF record + pageinspect | public | brin_page_type | page bytea | text + pageinspect | public | brin_revmap_data | page bytea, OUT pages tid | SETOF tid + pageinspect | public | bt_metap | relname text, OUT magic integer, OUT version integer, OUT root bigint, OUT level bigint, OUT fastroot bigint, OUT fastlevel bigint, OUT last_cleanup_num_delpages bigint, OUT last_cleanup_num_tuples double precision, OUT allequalimage boolean | record + pageinspect | public | bt_page_items | page bytea, OUT itemoffset smallint, OUT ctid tid, OUT itemlen smallint, OUT nulls boolean, OUT vars boolean, OUT data text, OUT dead boolean, OUT htid tid, OUT tids tid[] | SETOF record + pageinspect | public | bt_page_items | relname text, blkno bigint, OUT itemoffset smallint, OUT ctid tid, OUT itemlen smallint, OUT nulls boolean, OUT vars boolean, OUT data text, OUT dead boolean, OUT htid tid, OUT tids tid[] | SETOF record + pageinspect | public | bt_page_stats | relname text, blkno bigint, OUT blkno bigint, OUT type "char", OUT live_items integer, OUT dead_items integer, OUT avg_item_size integer, OUT page_size integer, OUT free_size integer, OUT btpo_prev bigint, OUT btpo_next bigint, OUT btpo_level bigint, OUT btpo_flags integer | record + pageinspect | public | fsm_page_contents | page bytea | text + pageinspect | public | get_raw_page | text, bigint | bytea + pageinspect | public | get_raw_page | text, text, bigint | bytea + pageinspect | public | gin_leafpage_items | page bytea, OUT first_tid tid, OUT nbytes smallint, OUT tids tid[] | SETOF record + pageinspect | public | gin_metapage_info | page bytea, OUT pending_head bigint, OUT pending_tail bigint, OUT tail_free_size integer, OUT n_pending_pages bigint, OUT n_pending_tuples bigint, OUT n_total_pages bigint, OUT n_entry_pages bigint, OUT n_data_pages bigint, OUT n_entries bigint, OUT version integer | record + pageinspect | public | gin_page_opaque_info | page bytea, OUT rightlink bigint, OUT maxoff integer, OUT flags text[] | record + pageinspect | public | gist_page_items | page bytea, index_oid regclass, OUT itemoffset smallint, OUT ctid tid, OUT itemlen smallint, OUT dead boolean, OUT keys text | SETOF record + pageinspect | public | gist_page_items_bytea | page bytea, OUT itemoffset smallint, OUT ctid tid, OUT itemlen smallint, OUT dead boolean, OUT key_data bytea | SETOF record + pageinspect | public | gist_page_opaque_info | page bytea, OUT lsn pg_lsn, OUT nsn pg_lsn, OUT rightlink bigint, OUT flags text[] | record + pageinspect | public | hash_bitmap_info | index_oid regclass, blkno bigint, OUT bitmapblkno bigint, OUT bitmapbit integer, OUT bitstatus boolean | SETOF record + pageinspect | public | hash_metapage_info | page bytea, OUT magic bigint, OUT version bigint, OUT ntuples double precision, OUT ffactor integer, OUT bsize integer, OUT bmsize integer, OUT bmshift integer, OUT maxbucket bigint, OUT highmask bigint, OUT lowmask bigint, OUT ovflpoint bigint, OUT firstfree bigint, OUT nmaps bigint, OUT procid oid, OUT spares bigint[], OUT mapp bigint[] | record + pageinspect | public | hash_page_items | page bytea, OUT itemoffset integer, OUT ctid tid, OUT data bigint | SETOF record + pageinspect | public | hash_page_stats | page bytea, OUT live_items integer, OUT dead_items integer, OUT page_size integer, OUT free_size integer, OUT hasho_prevblkno bigint, OUT hasho_nextblkno bigint, OUT hasho_bucket bigint, OUT hasho_flag integer, OUT hasho_page_id integer | record + pageinspect | public | hash_page_type | page bytea | text + pageinspect | public | heap_page_item_attrs | page bytea, rel_oid regclass, OUT lp smallint, OUT lp_off smallint, OUT lp_flags smallint, OUT lp_len smallint, OUT t_xmin xid, OUT t_xmax xid, OUT t_field3 integer, OUT t_ctid tid, OUT t_infomask2 integer, OUT t_infomask integer, OUT t_hoff smallint, OUT t_bits text, OUT t_oid oid, OUT t_attrs bytea[] | SETOF record + pageinspect | public | heap_page_item_attrs | page bytea, rel_oid regclass, do_detoast boolean, OUT lp smallint, OUT lp_off smallint, OUT lp_flags smallint, OUT lp_len smallint, OUT t_xmin xid, OUT t_xmax xid, OUT t_field3 integer, OUT t_ctid tid, OUT t_infomask2 integer, OUT t_infomask integer, OUT t_hoff smallint, OUT t_bits text, OUT t_oid oid, OUT t_attrs bytea[] | SETOF record + pageinspect | public | heap_page_items | page bytea, OUT lp smallint, OUT lp_off smallint, OUT lp_flags smallint, OUT lp_len smallint, OUT t_xmin xid, OUT t_xmax xid, OUT t_field3 integer, OUT t_ctid tid, OUT t_infomask2 integer, OUT t_infomask integer, OUT t_hoff smallint, OUT t_bits text, OUT t_oid oid, OUT t_data bytea | SETOF record + pageinspect | public | heap_tuple_infomask_flags | t_infomask integer, t_infomask2 integer, OUT raw_flags text[], OUT combined_flags text[] | record + pageinspect | public | page_checksum | page bytea, blkno bigint | smallint + pageinspect | public | page_header | page bytea, OUT lsn pg_lsn, OUT checksum smallint, OUT flags smallint, OUT lower integer, OUT upper integer, OUT special integer, OUT pagesize integer, OUT version smallint, OUT prune_xid xid | record + pageinspect | public | tuple_data_split | rel_oid oid, t_data bytea, t_infomask integer, t_infomask2 integer, t_bits text | bytea[] + pageinspect | public | tuple_data_split | rel_oid oid, t_data bytea, t_infomask integer, t_infomask2 integer, t_bits text, do_detoast boolean | bytea[] + pg_backtrace | public | pg_backtrace_init | | void + pg_buffercache | public | pg_buffercache_pages | | SETOF record + pg_freespacemap | public | pg_freespace | regclass, bigint | smallint + pg_freespacemap | public | pg_freespace | rel regclass, OUT blkno bigint, OUT avail smallint | SETOF record + pg_graphql | graphql | _internal_resolve | query text, variables jsonb, "operationName" text, extensions jsonb | jsonb + pg_graphql | graphql | comment_directive | comment_ text | jsonb + pg_graphql | graphql | exception | message text | text + pg_graphql | graphql | get_schema_version | | integer + pg_graphql | graphql | increment_schema_version | | event_trigger + pg_graphql | graphql | resolve | query text, variables jsonb, "operationName" text, extensions jsonb | jsonb + pg_hashids | public | hash_decode | text, text, integer | integer + pg_hashids | public | hash_encode | bigint | text + pg_hashids | public | hash_encode | bigint, text | text + pg_hashids | public | hash_encode | bigint, text, integer | text + pg_hashids | public | id_decode | text | bigint[] + pg_hashids | public | id_decode | text, text | bigint[] + pg_hashids | public | id_decode | text, text, integer | bigint[] + pg_hashids | public | id_decode | text, text, integer, text | bigint[] + pg_hashids | public | id_decode_once | text | bigint + pg_hashids | public | id_decode_once | text, text | bigint + pg_hashids | public | id_decode_once | text, text, integer | bigint + pg_hashids | public | id_decode_once | text, text, integer, text | bigint + pg_hashids | public | id_encode | bigint | text + pg_hashids | public | id_encode | bigint, text | text + pg_hashids | public | id_encode | bigint, text, integer | text + pg_hashids | public | id_encode | bigint, text, integer, text | text + pg_hashids | public | id_encode | bigint[] | text + pg_hashids | public | id_encode | bigint[], text | text + pg_hashids | public | id_encode | bigint[], text, integer | text + pg_hashids | public | id_encode | bigint[], text, integer, text | text + pg_jsonschema | public | json_matches_schema | schema json, instance json | boolean + pg_jsonschema | public | jsonb_matches_schema | schema json, instance jsonb | boolean + pg_jsonschema | public | jsonschema_is_valid | schema json | boolean + pg_jsonschema | public | jsonschema_validation_errors | schema json, instance json | text[] + pg_net | net | _await_response | request_id bigint | boolean + pg_net | net | _encode_url_with_params_array | url text, params_array text[] | text + pg_net | net | _http_collect_response | request_id bigint, async boolean | net.http_response_result + pg_net | net | _urlencode_string | string character varying | text + pg_net | net | check_worker_is_up | | void + pg_net | net | http_collect_response | request_id bigint, async boolean | net.http_response_result + pg_net | net | http_delete | url text, params jsonb, headers jsonb, timeout_milliseconds integer | bigint + pg_net | net | http_get | url text, params jsonb, headers jsonb, timeout_milliseconds integer | bigint + pg_net | net | http_post | url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer | bigint + pg_net | net | worker_restart | | boolean + pg_prewarm | public | autoprewarm_dump_now | | bigint + pg_prewarm | public | autoprewarm_start_worker | | void + pg_prewarm | public | pg_prewarm | regclass, mode text, fork text, first_block bigint, last_block bigint | bigint + pg_repack | repack | conflicted_triggers | oid | SETOF name + pg_repack | repack | create_index_type | oid, oid | void + pg_repack | repack | create_log_table | oid | void + pg_repack | repack | create_table | oid, name | void + pg_repack | repack | disable_autovacuum | regclass | void + pg_repack | repack | get_alter_col_storage | oid | text + pg_repack | repack | get_assign | oid, text | text + pg_repack | repack | get_columns_for_create_as | oid | text + pg_repack | repack | get_compare_pkey | oid, text | text + pg_repack | repack | get_create_index_type | oid, name | text + pg_repack | repack | get_create_trigger | relid oid, pkid oid | text + pg_repack | repack | get_drop_columns | oid, text | text + pg_repack | repack | get_enable_trigger | relid oid | text + pg_repack | repack | get_index_columns | oid | text + pg_repack | repack | get_order_by | oid, oid | text + pg_repack | repack | get_storage_param | oid | text + pg_repack | repack | get_table_and_inheritors | regclass | regclass[] + pg_repack | repack | oid2text | oid | text + pg_repack | repack | repack_apply | sql_peek cstring, sql_insert cstring, sql_delete cstring, sql_update cstring, sql_pop cstring, count integer | integer + pg_repack | repack | repack_drop | oid, integer | void + pg_repack | repack | repack_index_swap | oid | void + pg_repack | repack | repack_indexdef | oid, oid, name, boolean | text + pg_repack | repack | repack_swap | oid | void + pg_repack | repack | repack_trigger | | trigger + pg_repack | repack | version | | text + pg_repack | repack | version_sql | | text + pg_stat_monitor | public | decode_error_level | elevel integer | text + pg_stat_monitor | public | get_cmd_type | cmd_type integer | text + pg_stat_monitor | public | get_histogram_timings | | text + pg_stat_monitor | public | histogram | _bucket integer, _quryid bigint | SETOF record + pg_stat_monitor | public | pg_stat_monitor_internal | showtext boolean, OUT bucket bigint, OUT userid oid, OUT username text, OUT dbid oid, OUT datname text, OUT client_ip bigint, OUT queryid bigint, OUT planid bigint, OUT query text, OUT query_plan text, OUT pgsm_query_id bigint, OUT top_queryid bigint, OUT top_query text, OUT application_name text, OUT relations text, OUT cmd_type integer, OUT elevel integer, OUT sqlcode text, OUT message text, OUT bucket_start_time timestamp with time zone, OUT calls bigint, OUT total_exec_time double precision, OUT min_exec_time double precision, OUT max_exec_time double precision, OUT mean_exec_time double precision, OUT stddev_exec_time double precision, OUT rows bigint, OUT plans bigint, OUT total_plan_time double precision, OUT min_plan_time double precision, OUT max_plan_time double precision, OUT mean_plan_time double precision, OUT stddev_plan_time double precision, OUT shared_blks_hit bigint, OUT shared_blks_read bigint, OUT shared_blks_dirtied bigint, OUT shared_blks_written bigint, OUT local_blks_hit bigint, OUT local_blks_read bigint, OUT local_blks_dirtied bigint, OUT local_blks_written bigint, OUT temp_blks_read bigint, OUT temp_blks_written bigint, OUT shared_blk_read_time double precision, OUT shared_blk_write_time double precision, OUT local_blk_read_time double precision, OUT local_blk_write_time double precision, OUT temp_blk_read_time double precision, OUT temp_blk_write_time double precision, OUT resp_calls text, OUT cpu_user_time double precision, OUT cpu_sys_time double precision, OUT wal_records bigint, OUT wal_fpi bigint, OUT wal_bytes numeric, OUT comments text, OUT jit_functions bigint, OUT jit_generation_time double precision, OUT jit_inlining_count bigint, OUT jit_inlining_time double precision, OUT jit_optimization_count bigint, OUT jit_optimization_time double precision, OUT jit_emission_count bigint, OUT jit_emission_time double precision, OUT jit_deform_count bigint, OUT jit_deform_time double precision, OUT stats_since timestamp with time zone, OUT minmax_stats_since timestamp with time zone, OUT toplevel boolean, OUT bucket_done boolean | SETOF record + pg_stat_monitor | public | pg_stat_monitor_reset | | void + pg_stat_monitor | public | pg_stat_monitor_version | | text + pg_stat_monitor | public | pgsm_create_11_view | | integer + pg_stat_monitor | public | pgsm_create_13_view | | integer + pg_stat_monitor | public | pgsm_create_14_view | | integer + pg_stat_monitor | public | pgsm_create_15_view | | integer + pg_stat_monitor | public | pgsm_create_17_view | | integer + pg_stat_monitor | public | pgsm_create_view | | integer + pg_stat_monitor | public | range | | text[] + pg_stat_statements | public | pg_stat_statements | showtext boolean, OUT userid oid, OUT dbid oid, OUT toplevel boolean, OUT queryid bigint, OUT query text, OUT plans bigint, OUT total_plan_time double precision, OUT min_plan_time double precision, OUT max_plan_time double precision, OUT mean_plan_time double precision, OUT stddev_plan_time double precision, OUT calls bigint, OUT total_exec_time double precision, OUT min_exec_time double precision, OUT max_exec_time double precision, OUT mean_exec_time double precision, OUT stddev_exec_time double precision, OUT rows bigint, OUT shared_blks_hit bigint, OUT shared_blks_read bigint, OUT shared_blks_dirtied bigint, OUT shared_blks_written bigint, OUT local_blks_hit bigint, OUT local_blks_read bigint, OUT local_blks_dirtied bigint, OUT local_blks_written bigint, OUT temp_blks_read bigint, OUT temp_blks_written bigint, OUT blk_read_time double precision, OUT blk_write_time double precision, OUT temp_blk_read_time double precision, OUT temp_blk_write_time double precision, OUT wal_records bigint, OUT wal_fpi bigint, OUT wal_bytes numeric, OUT jit_functions bigint, OUT jit_generation_time double precision, OUT jit_inlining_count bigint, OUT jit_inlining_time double precision, OUT jit_optimization_count bigint, OUT jit_optimization_time double precision, OUT jit_emission_count bigint, OUT jit_emission_time double precision | SETOF record + pg_stat_statements | public | pg_stat_statements_info | OUT dealloc bigint, OUT stats_reset timestamp with time zone | record + pg_stat_statements | public | pg_stat_statements_reset | userid oid, dbid oid, queryid bigint | void + pg_surgery | public | heap_force_freeze | reloid regclass, tids tid[] | void + pg_surgery | public | heap_force_kill | reloid regclass, tids tid[] | void + pg_tle | pgtle | available_extension_versions | OUT name name, OUT version text, OUT superuser boolean, OUT trusted boolean, OUT relocatable boolean, OUT schema name, OUT requires name[], OUT comment text | SETOF record + pg_tle | pgtle | available_extensions | OUT name name, OUT default_version text, OUT comment text | SETOF record + pg_tle | pgtle | create_base_type | typenamespace regnamespace, typename name, infunc regprocedure, outfunc regprocedure, internallength integer, alignment text, storage text | void + pg_tle | pgtle | create_base_type_if_not_exists | typenamespace regnamespace, typename name, infunc regprocedure, outfunc regprocedure, internallength integer, alignment text, storage text | boolean + pg_tle | pgtle | create_operator_func | typenamespace regnamespace, typename name, opfunc regprocedure | void + pg_tle | pgtle | create_operator_func_if_not_exists | typenamespace regnamespace, typename name, opfunc regprocedure | boolean + pg_tle | pgtle | create_shell_type | typenamespace regnamespace, typename name | void + pg_tle | pgtle | create_shell_type_if_not_exists | typenamespace regnamespace, typename name | boolean + pg_tle | pgtle | extension_update_paths | name name, OUT source text, OUT target text, OUT path text | SETOF record + pg_tle | pgtle | install_extension | name text, version text, description text, ext text, requires text[] | boolean + pg_tle | pgtle | install_extension_version_sql | name text, version text, ext text | boolean + pg_tle | pgtle | install_update_path | name text, fromvers text, tovers text, ext text | boolean + pg_tle | pgtle | pg_tle_feature_info_sql_drop | | event_trigger + pg_tle | pgtle | register_feature | proc regproc, feature pgtle.pg_tle_features | void + pg_tle | pgtle | register_feature_if_not_exists | proc regproc, feature pgtle.pg_tle_features | boolean + pg_tle | pgtle | set_default_version | name text, version text | boolean + pg_tle | pgtle | uninstall_extension | extname text | boolean + pg_tle | pgtle | uninstall_extension | extname text, version text | boolean + pg_tle | pgtle | uninstall_extension_if_exists | extname text | boolean + pg_tle | pgtle | uninstall_update_path | extname text, fromvers text, tovers text | boolean + pg_tle | pgtle | uninstall_update_path_if_exists | extname text, fromvers text, tovers text | boolean + pg_tle | pgtle | unregister_feature | proc regproc, feature pgtle.pg_tle_features | void + pg_tle | pgtle | unregister_feature_if_exists | proc regproc, feature pgtle.pg_tle_features | boolean + pg_trgm | public | gin_extract_query_trgm | text, internal, smallint, internal, internal, internal, internal | internal + pg_trgm | public | gin_extract_value_trgm | text, internal | internal + pg_trgm | public | gin_trgm_consistent | internal, smallint, text, integer, internal, internal, internal, internal | boolean + pg_trgm | public | gin_trgm_triconsistent | internal, smallint, text, integer, internal, internal, internal | "char" + pg_trgm | public | gtrgm_compress | internal | internal + pg_trgm | public | gtrgm_consistent | internal, text, smallint, oid, internal | boolean + pg_trgm | public | gtrgm_decompress | internal | internal + pg_trgm | public | gtrgm_distance | internal, text, smallint, oid, internal | double precision + pg_trgm | public | gtrgm_in | cstring | gtrgm + pg_trgm | public | gtrgm_options | internal | void + pg_trgm | public | gtrgm_out | gtrgm | cstring + pg_trgm | public | gtrgm_penalty | internal, internal, internal | internal + pg_trgm | public | gtrgm_picksplit | internal, internal | internal + pg_trgm | public | gtrgm_same | gtrgm, gtrgm, internal | internal + pg_trgm | public | gtrgm_union | internal, internal | gtrgm + pg_trgm | public | set_limit | real | real + pg_trgm | public | show_limit | | real + pg_trgm | public | show_trgm | text | text[] + pg_trgm | public | similarity | text, text | real + pg_trgm | public | similarity_dist | text, text | real + pg_trgm | public | similarity_op | text, text | boolean + pg_trgm | public | strict_word_similarity | text, text | real + pg_trgm | public | strict_word_similarity_commutator_op | text, text | boolean + pg_trgm | public | strict_word_similarity_dist_commutator_op | text, text | real + pg_trgm | public | strict_word_similarity_dist_op | text, text | real + pg_trgm | public | strict_word_similarity_op | text, text | boolean + pg_trgm | public | word_similarity | text, text | real + pg_trgm | public | word_similarity_commutator_op | text, text | boolean + pg_trgm | public | word_similarity_dist_commutator_op | text, text | real + pg_trgm | public | word_similarity_dist_op | text, text | real + pg_trgm | public | word_similarity_op | text, text | boolean + pg_visibility | public | pg_check_frozen | regclass, OUT t_ctid tid | SETOF tid + pg_visibility | public | pg_check_visible | regclass, OUT t_ctid tid | SETOF tid + pg_visibility | public | pg_truncate_visibility_map | regclass | void + pg_visibility | public | pg_visibility | regclass, OUT blkno bigint, OUT all_visible boolean, OUT all_frozen boolean, OUT pd_all_visible boolean | SETOF record + pg_visibility | public | pg_visibility | regclass, blkno bigint, OUT all_visible boolean, OUT all_frozen boolean, OUT pd_all_visible boolean | record + pg_visibility | public | pg_visibility_map | regclass, OUT blkno bigint, OUT all_visible boolean, OUT all_frozen boolean | SETOF record + pg_visibility | public | pg_visibility_map | regclass, blkno bigint, OUT all_visible boolean, OUT all_frozen boolean | record + pg_visibility | public | pg_visibility_map_summary | regclass, OUT all_visible bigint, OUT all_frozen bigint | record + pg_walinspect | public | pg_get_wal_record_info | in_lsn pg_lsn, OUT start_lsn pg_lsn, OUT end_lsn pg_lsn, OUT prev_lsn pg_lsn, OUT xid xid, OUT resource_manager text, OUT record_type text, OUT record_length integer, OUT main_data_length integer, OUT fpi_length integer, OUT description text, OUT block_ref text | record + pg_walinspect | public | pg_get_wal_records_info | start_lsn pg_lsn, end_lsn pg_lsn, OUT start_lsn pg_lsn, OUT end_lsn pg_lsn, OUT prev_lsn pg_lsn, OUT xid xid, OUT resource_manager text, OUT record_type text, OUT record_length integer, OUT main_data_length integer, OUT fpi_length integer, OUT description text, OUT block_ref text | SETOF record + pg_walinspect | public | pg_get_wal_records_info_till_end_of_wal | start_lsn pg_lsn, OUT start_lsn pg_lsn, OUT end_lsn pg_lsn, OUT prev_lsn pg_lsn, OUT xid xid, OUT resource_manager text, OUT record_type text, OUT record_length integer, OUT main_data_length integer, OUT fpi_length integer, OUT description text, OUT block_ref text | SETOF record + pg_walinspect | public | pg_get_wal_stats | start_lsn pg_lsn, end_lsn pg_lsn, per_record boolean, OUT "resource_manager/record_type" text, OUT count bigint, OUT count_percentage double precision, OUT record_size bigint, OUT record_size_percentage double precision, OUT fpi_size bigint, OUT fpi_size_percentage double precision, OUT combined_size bigint, OUT combined_size_percentage double precision | SETOF record + pg_walinspect | public | pg_get_wal_stats_till_end_of_wal | start_lsn pg_lsn, per_record boolean, OUT "resource_manager/record_type" text, OUT count bigint, OUT count_percentage double precision, OUT record_size bigint, OUT record_size_percentage double precision, OUT fpi_size bigint, OUT fpi_size_percentage double precision, OUT combined_size bigint, OUT combined_size_percentage double precision | SETOF record + pgaudit | public | pgaudit_ddl_command_end | | event_trigger + pgaudit | public | pgaudit_sql_drop | | event_trigger + pgcrypto | public | armor | bytea | text + pgcrypto | public | armor | bytea, text[], text[] | text + pgcrypto | public | crypt | text, text | text + pgcrypto | public | dearmor | text | bytea + pgcrypto | public | decrypt | bytea, bytea, text | bytea + pgcrypto | public | decrypt_iv | bytea, bytea, bytea, text | bytea + pgcrypto | public | digest | bytea, text | bytea + pgcrypto | public | digest | text, text | bytea + pgcrypto | public | encrypt | bytea, bytea, text | bytea + pgcrypto | public | encrypt_iv | bytea, bytea, bytea, text | bytea + pgcrypto | public | gen_random_bytes | integer | bytea + pgcrypto | public | gen_random_uuid | | uuid + pgcrypto | public | gen_salt | text | text + pgcrypto | public | gen_salt | text, integer | text + pgcrypto | public | hmac | bytea, bytea, text | bytea + pgcrypto | public | hmac | text, text, text | bytea + pgcrypto | public | pgp_armor_headers | text, OUT key text, OUT value text | SETOF record + pgcrypto | public | pgp_key_id | bytea | text + pgcrypto | public | pgp_pub_decrypt | bytea, bytea | text + pgcrypto | public | pgp_pub_decrypt | bytea, bytea, text | text + pgcrypto | public | pgp_pub_decrypt | bytea, bytea, text, text | text + pgcrypto | public | pgp_pub_decrypt_bytea | bytea, bytea | bytea + pgcrypto | public | pgp_pub_decrypt_bytea | bytea, bytea, text | bytea + pgcrypto | public | pgp_pub_decrypt_bytea | bytea, bytea, text, text | bytea + pgcrypto | public | pgp_pub_encrypt | text, bytea | bytea + pgcrypto | public | pgp_pub_encrypt | text, bytea, text | bytea + pgcrypto | public | pgp_pub_encrypt_bytea | bytea, bytea | bytea + pgcrypto | public | pgp_pub_encrypt_bytea | bytea, bytea, text | bytea + pgcrypto | public | pgp_sym_decrypt | bytea, text | text + pgcrypto | public | pgp_sym_decrypt | bytea, text, text | text + pgcrypto | public | pgp_sym_decrypt_bytea | bytea, text | bytea + pgcrypto | public | pgp_sym_decrypt_bytea | bytea, text, text | bytea + pgcrypto | public | pgp_sym_encrypt | text, text | bytea + pgcrypto | public | pgp_sym_encrypt | text, text, text | bytea + pgcrypto | public | pgp_sym_encrypt_bytea | bytea, text | bytea + pgcrypto | public | pgp_sym_encrypt_bytea | bytea, text, text | bytea + pgjwt | public | algorithm_sign | signables text, secret text, algorithm text | text + pgjwt | public | sign | payload json, secret text, algorithm text | text + pgjwt | public | try_cast_double | inp text | double precision + pgjwt | public | url_decode | data text | bytea + pgjwt | public | url_encode | data bytea | text + pgjwt | public | verify | token text, secret text, algorithm text | TABLE(header json, payload json, valid boolean) + pgmq | pgmq | _belongs_to_pgmq | table_name text | boolean + pgmq | pgmq | _ensure_pg_partman_installed | | void + pgmq | pgmq | _get_partition_col | partition_interval text | text + pgmq | pgmq | _get_pg_partman_major_version | | integer + pgmq | pgmq | _get_pg_partman_schema | | text + pgmq | pgmq | archive | queue_name text, msg_id bigint | boolean + pgmq | pgmq | archive | queue_name text, msg_ids bigint[] | SETOF bigint + pgmq | pgmq | convert_archive_partitioned | table_name text, partition_interval text, retention_interval text, leading_partition integer | void + pgmq | pgmq | create | queue_name text | void + pgmq | pgmq | create_non_partitioned | queue_name text | void + pgmq | pgmq | create_partitioned | queue_name text, partition_interval text, retention_interval text | void + pgmq | pgmq | create_unlogged | queue_name text | void + pgmq | pgmq | delete | queue_name text, msg_id bigint | boolean + pgmq | pgmq | delete | queue_name text, msg_ids bigint[] | SETOF bigint + pgmq | pgmq | detach_archive | queue_name text | void + pgmq | pgmq | drop_queue | queue_name text, partitioned boolean | boolean + pgmq | pgmq | format_table_name | queue_name text, prefix text | text + pgmq | pgmq | list_queues | | SETOF pgmq.queue_record + pgmq | pgmq | metrics | queue_name text | pgmq.metrics_result + pgmq | pgmq | metrics_all | | SETOF pgmq.metrics_result + pgmq | pgmq | pop | queue_name text | SETOF pgmq.message_record + pgmq | pgmq | purge_queue | queue_name text | bigint + pgmq | pgmq | read | queue_name text, vt integer, qty integer | SETOF pgmq.message_record + pgmq | pgmq | read_with_poll | queue_name text, vt integer, qty integer, max_poll_seconds integer, poll_interval_ms integer | SETOF pgmq.message_record + pgmq | pgmq | send | queue_name text, msg jsonb, delay integer | SETOF bigint + pgmq | pgmq | send_batch | queue_name text, msgs jsonb[], delay integer | SETOF bigint + pgmq | pgmq | set_vt | queue_name text, msg_id bigint, vt integer | SETOF pgmq.message_record + pgmq | pgmq | validate_queue_name | queue_name text | void + pgroonga | pgroonga | command | groongacommand text | text + pgroonga | pgroonga | command | groongacommand text, arguments text[] | text + pgroonga | pgroonga | command_escape_value | value text | text + pgroonga | pgroonga | contain_varchar_array | character varying[], character varying | boolean + pgroonga | pgroonga | escape | value bigint | text + pgroonga | pgroonga | escape | value boolean | text + pgroonga | pgroonga | escape | value double precision | text + pgroonga | pgroonga | escape | value integer | text + pgroonga | pgroonga | escape | value real | text + pgroonga | pgroonga | escape | value smallint | text + pgroonga | pgroonga | escape | value text | text + pgroonga | pgroonga | escape | value text, special_characters text | text + pgroonga | pgroonga | escape | value timestamp with time zone | text + pgroonga | pgroonga | escape | value timestamp without time zone | text + pgroonga | pgroonga | flush | indexname cstring | boolean + pgroonga | pgroonga | highlight_html | target text, keywords text[] | text + pgroonga | pgroonga | match_in_text | text, text[] | boolean + pgroonga | pgroonga | match_in_text_array | text[], text[] | boolean + pgroonga | pgroonga | match_in_varchar | character varying, character varying[] | boolean + pgroonga | pgroonga | match_jsonb | jsonb, text | boolean + pgroonga | pgroonga | match_positions_byte | target text, keywords text[] | integer[] + pgroonga | pgroonga | match_positions_character | target text, keywords text[] | integer[] + pgroonga | pgroonga | match_query | character varying, character varying | boolean + pgroonga | pgroonga | match_query | text, text | boolean + pgroonga | pgroonga | match_query | text[], text | boolean + pgroonga | pgroonga | match_regexp | character varying, character varying | boolean + pgroonga | pgroonga | match_regexp | text, text | boolean + pgroonga | pgroonga | match_script_jsonb | jsonb, text | boolean + pgroonga | pgroonga | match_term | target character varying, term character varying | boolean + pgroonga | pgroonga | match_term | target character varying[], term character varying | boolean + pgroonga | pgroonga | match_term | target text, term text | boolean + pgroonga | pgroonga | match_term | target text[], term text | boolean + pgroonga | pgroonga | match_text | text, text | boolean + pgroonga | pgroonga | match_text_array | text[], text | boolean + pgroonga | pgroonga | match_varchar | character varying, character varying | boolean + pgroonga | pgroonga | prefix_in_text | text, text[] | boolean + pgroonga | pgroonga | prefix_in_text_array | text[], text[] | boolean + pgroonga | pgroonga | prefix_rk_in_text | text, text[] | boolean + pgroonga | pgroonga | prefix_rk_in_text_array | text[], text[] | boolean + pgroonga | pgroonga | prefix_rk_text | text, text | boolean + pgroonga | pgroonga | prefix_rk_text_array | text[], text | boolean + pgroonga | pgroonga | prefix_text | text, text | boolean + pgroonga | pgroonga | prefix_text_array | text[], text | boolean + pgroonga | pgroonga | query_escape | query text | text + pgroonga | pgroonga | query_expand | tablename cstring, termcolumnname text, synonymscolumnname text, query text | text + pgroonga | pgroonga | query_extract_keywords | query text | text[] + pgroonga | pgroonga | query_in_text | text, text[] | boolean + pgroonga | pgroonga | query_in_text_array | text[], text[] | boolean + pgroonga | pgroonga | query_in_varchar | character varying, character varying[] | boolean + pgroonga | pgroonga | query_jsonb | jsonb, text | boolean + pgroonga | pgroonga | query_text | text, text | boolean + pgroonga | pgroonga | query_text_array | text[], text | boolean + pgroonga | pgroonga | query_varchar | character varying, character varying | boolean + pgroonga | pgroonga | regexp_text | text, text | boolean + pgroonga | pgroonga | regexp_varchar | character varying, character varying | boolean + pgroonga | pgroonga | score | "row" record | double precision + pgroonga | pgroonga | script_jsonb | jsonb, text | boolean + pgroonga | pgroonga | script_text | text, text | boolean + pgroonga | pgroonga | script_text_array | text[], text | boolean + pgroonga | pgroonga | script_varchar | character varying, character varying | boolean + pgroonga | pgroonga | similar_text | text, text | boolean + pgroonga | pgroonga | similar_text_array | text[], text | boolean + pgroonga | pgroonga | similar_varchar | character varying, character varying | boolean + pgroonga | pgroonga | snippet_html | target text, keywords text[], width integer | text[] + pgroonga | pgroonga | table_name | indexname cstring | text + pgroonga | public | pgroonga_command | groongacommand text | text + pgroonga | public | pgroonga_command | groongacommand text, arguments text[] | text + pgroonga | public | pgroonga_command_escape_value | value text | text + pgroonga | public | pgroonga_contain_varchar_array | character varying[], character varying | boolean + pgroonga | public | pgroonga_equal_query_text_array | targets text[], query text | boolean + pgroonga | public | pgroonga_equal_query_varchar_array | targets character varying[], query text | boolean + pgroonga | public | pgroonga_equal_text | target text, other text | boolean + pgroonga | public | pgroonga_equal_text_condition | target text, condition pgroonga_full_text_search_condition | boolean + pgroonga | public | pgroonga_equal_varchar | target character varying, other character varying | boolean + pgroonga | public | pgroonga_equal_varchar_condition | target character varying, condition pgroonga_full_text_search_condition | boolean + pgroonga | public | pgroonga_escape | value bigint | text + pgroonga | public | pgroonga_escape | value boolean | text + pgroonga | public | pgroonga_escape | value double precision | text + pgroonga | public | pgroonga_escape | value integer | text + pgroonga | public | pgroonga_escape | value real | text + pgroonga | public | pgroonga_escape | value smallint | text + pgroonga | public | pgroonga_escape | value text | text + pgroonga | public | pgroonga_escape | value text, special_characters text | text + pgroonga | public | pgroonga_escape | value timestamp with time zone | text + pgroonga | public | pgroonga_escape | value timestamp without time zone | text + pgroonga | public | pgroonga_flush | indexname cstring | boolean + pgroonga | public | pgroonga_handler | internal | index_am_handler + pgroonga | public | pgroonga_highlight_html | target text, keywords text[] | text + pgroonga | public | pgroonga_highlight_html | target text, keywords text[], indexname cstring | text + pgroonga | public | pgroonga_highlight_html | targets text[], keywords text[] | text[] + pgroonga | public | pgroonga_highlight_html | targets text[], keywords text[], indexname cstring | text[] + pgroonga | public | pgroonga_index_column_name | indexname cstring, columnindex integer | text + pgroonga | public | pgroonga_index_column_name | indexname cstring, columnname text | text + pgroonga | public | pgroonga_is_writable | | boolean + pgroonga | public | pgroonga_match_in_text | text, text[] | boolean + pgroonga | public | pgroonga_match_in_text_array | text[], text[] | boolean + pgroonga | public | pgroonga_match_in_varchar | character varying, character varying[] | boolean + pgroonga | public | pgroonga_match_jsonb | jsonb, text | boolean + pgroonga | public | pgroonga_match_positions_byte | target text, keywords text[] | integer[] + pgroonga | public | pgroonga_match_positions_byte | target text, keywords text[], indexname cstring | integer[] + pgroonga | public | pgroonga_match_positions_character | target text, keywords text[] | integer[] + pgroonga | public | pgroonga_match_positions_character | target text, keywords text[], indexname cstring | integer[] + pgroonga | public | pgroonga_match_query | character varying, character varying | boolean + pgroonga | public | pgroonga_match_query | text, text | boolean + pgroonga | public | pgroonga_match_query | text[], text | boolean + pgroonga | public | pgroonga_match_regexp | character varying, character varying | boolean + pgroonga | public | pgroonga_match_regexp | text, text | boolean + pgroonga | public | pgroonga_match_script_jsonb | jsonb, text | boolean + pgroonga | public | pgroonga_match_term | target character varying, term character varying | boolean + pgroonga | public | pgroonga_match_term | target character varying[], term character varying | boolean + pgroonga | public | pgroonga_match_term | target text, term text | boolean + pgroonga | public | pgroonga_match_term | target text[], term text | boolean + pgroonga | public | pgroonga_match_text | text, text | boolean + pgroonga | public | pgroonga_match_text_array | text[], text | boolean + pgroonga | public | pgroonga_match_text_array_condition | target text[], condition pgroonga_full_text_search_condition | boolean + pgroonga | public | pgroonga_match_text_array_condition_with_scorers | target text[], condition pgroonga_full_text_search_condition_with_scorers | boolean + pgroonga | public | pgroonga_match_text_condition | target text, condition pgroonga_full_text_search_condition | boolean + pgroonga | public | pgroonga_match_text_condition_with_scorers | target text, condition pgroonga_full_text_search_condition_with_scorers | boolean + pgroonga | public | pgroonga_match_varchar | character varying, character varying | boolean + pgroonga | public | pgroonga_match_varchar_condition | target character varying, condition pgroonga_full_text_search_condition | boolean + pgroonga | public | pgroonga_match_varchar_condition_with_scorers | target character varying, condition pgroonga_full_text_search_condition_with_scorers | boolean + pgroonga | public | pgroonga_normalize | target text | text + pgroonga | public | pgroonga_normalize | target text, normalizername text | text + pgroonga | public | pgroonga_not_prefix_in_text | text, text[] | boolean + pgroonga | public | pgroonga_prefix_in_text | text, text[] | boolean + pgroonga | public | pgroonga_prefix_in_text_array | text[], text[] | boolean + pgroonga | public | pgroonga_prefix_in_varchar | character varying, character varying[] | boolean + pgroonga | public | pgroonga_prefix_in_varchar_array | character varying[], character varying[] | boolean + pgroonga | public | pgroonga_prefix_rk_in_text | text, text[] | boolean + pgroonga | public | pgroonga_prefix_rk_in_text_array | text[], text[] | boolean + pgroonga | public | pgroonga_prefix_rk_in_varchar | character varying, character varying[] | boolean + pgroonga | public | pgroonga_prefix_rk_in_varchar_array | character varying[], character varying[] | boolean + pgroonga | public | pgroonga_prefix_rk_text | text, text | boolean + pgroonga | public | pgroonga_prefix_rk_text_array | text[], text | boolean + pgroonga | public | pgroonga_prefix_rk_varchar | character varying, character varying | boolean + pgroonga | public | pgroonga_prefix_rk_varchar_array | character varying[], character varying | boolean + pgroonga | public | pgroonga_prefix_text | text, text | boolean + pgroonga | public | pgroonga_prefix_text_array | text[], text | boolean + pgroonga | public | pgroonga_prefix_text_condition | text, condition pgroonga_full_text_search_condition | boolean + pgroonga | public | pgroonga_prefix_varchar | character varying, character varying | boolean + pgroonga | public | pgroonga_prefix_varchar_array | character varying[], character varying | boolean + pgroonga | public | pgroonga_prefix_varchar_condition | target character varying, conditoin pgroonga_full_text_search_condition | boolean + pgroonga | public | pgroonga_query_escape | query text | text + pgroonga | public | pgroonga_query_expand | tablename cstring, termcolumnname text, synonymscolumnname text, query text | text + pgroonga | public | pgroonga_query_extract_keywords | query text, index_name text | text[] + pgroonga | public | pgroonga_query_in_text | text, text[] | boolean + pgroonga | public | pgroonga_query_in_text_array | text[], text[] | boolean + pgroonga | public | pgroonga_query_in_varchar | character varying, character varying[] | boolean + pgroonga | public | pgroonga_query_jsonb | jsonb, text | boolean + pgroonga | public | pgroonga_query_text | text, text | boolean + pgroonga | public | pgroonga_query_text_array | text[], text | boolean + pgroonga | public | pgroonga_query_text_array_condition | targets text[], condition pgroonga_full_text_search_condition | boolean + pgroonga | public | pgroonga_query_text_array_condition_with_scorers | targets text[], condition pgroonga_full_text_search_condition_with_scorers | boolean + pgroonga | public | pgroonga_query_text_condition | target text, condition pgroonga_full_text_search_condition | boolean + pgroonga | public | pgroonga_query_text_condition_with_scorers | target text, condition pgroonga_full_text_search_condition_with_scorers | boolean + pgroonga | public | pgroonga_query_varchar | character varying, character varying | boolean + pgroonga | public | pgroonga_query_varchar_condition | target character varying, condition pgroonga_full_text_search_condition | boolean + pgroonga | public | pgroonga_query_varchar_condition_with_scorers | target character varying, condition pgroonga_full_text_search_condition_with_scorers | boolean + pgroonga | public | pgroonga_regexp_in_text | text, text[] | boolean + pgroonga | public | pgroonga_regexp_in_varchar | character varying, character varying[] | boolean + pgroonga | public | pgroonga_regexp_text | text, text | boolean + pgroonga | public | pgroonga_regexp_varchar | character varying, character varying | boolean + pgroonga | public | pgroonga_result_to_jsonb_objects | result jsonb | jsonb + pgroonga | public | pgroonga_result_to_recordset | result jsonb | SETOF record + pgroonga | public | pgroonga_score | "row" record | double precision + pgroonga | public | pgroonga_score | tableoid oid, ctid tid | double precision + pgroonga | public | pgroonga_script_jsonb | jsonb, text | boolean + pgroonga | public | pgroonga_script_text | text, text | boolean + pgroonga | public | pgroonga_script_text_array | text[], text | boolean + pgroonga | public | pgroonga_script_varchar | character varying, character varying | boolean + pgroonga | public | pgroonga_set_writable | newwritable boolean | boolean + pgroonga | public | pgroonga_similar_text | text, text | boolean + pgroonga | public | pgroonga_similar_text_array | text[], text | boolean + pgroonga | public | pgroonga_similar_varchar | character varying, character varying | boolean + pgroonga | public | pgroonga_snippet_html | target text, keywords text[], width integer | text[] + pgroonga | public | pgroonga_table_name | indexname cstring | text + pgroonga | public | pgroonga_tokenize | target text, VARIADIC options text[] | json[] + pgroonga | public | pgroonga_vacuum | | boolean + pgroonga | public | pgroonga_wal_apply | | bigint + pgroonga | public | pgroonga_wal_apply | indexname cstring | bigint + pgroonga | public | pgroonga_wal_set_applied_position | | boolean + pgroonga | public | pgroonga_wal_set_applied_position | block bigint, "offset" bigint | boolean + pgroonga | public | pgroonga_wal_set_applied_position | indexname cstring | boolean + pgroonga | public | pgroonga_wal_set_applied_position | indexname cstring, block bigint, "offset" bigint | boolean + pgroonga | public | pgroonga_wal_status | | TABLE(name text, oid oid, current_block bigint, current_offset bigint, current_size bigint, last_block bigint, last_offset bigint, last_size bigint) + pgroonga | public | pgroonga_wal_truncate | | bigint + pgroonga | public | pgroonga_wal_truncate | indexname cstring | bigint + pgroonga_database | public | pgroonga_database_remove | | boolean + pgrouting | public | _pgr_alphashape | text, alpha double precision, OUT seq1 bigint, OUT textgeom text | SETOF record + pgrouting | public | _pgr_array_reverse | anyarray | anyarray + pgrouting | public | _pgr_articulationpoints | edges_sql text, OUT seq integer, OUT node bigint | SETOF record + pgrouting | public | _pgr_astar | edges_sql text, combinations_sql text, directed boolean, heuristic integer, factor double precision, epsilon double precision, only_cost boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_astar | edges_sql text, start_vids anyarray, end_vids anyarray, directed boolean, heuristic integer, factor double precision, epsilon double precision, only_cost boolean, normal boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_bdastar | text, anyarray, anyarray, directed boolean, heuristic integer, factor double precision, epsilon double precision, only_cost boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_bdastar | text, text, directed boolean, heuristic integer, factor double precision, epsilon double precision, only_cost boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_bddijkstra | text, anyarray, anyarray, directed boolean, only_cost boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_bddijkstra | text, text, directed boolean, only_cost boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_bellmanford | edges_sql text, combinations_sql text, directed boolean, only_cost boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_bellmanford | edges_sql text, from_vids anyarray, to_vids anyarray, directed boolean, only_cost boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_biconnectedcomponents | edges_sql text, OUT seq bigint, OUT component bigint, OUT edge bigint | SETOF record + pgrouting | public | _pgr_binarybreadthfirstsearch | edges_sql text, combinations_sql text, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_binarybreadthfirstsearch | edges_sql text, from_vids anyarray, to_vids anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_bipartite | edges_sql text, OUT node bigint, OUT color bigint | SETOF record + pgrouting | public | _pgr_boost_version | | text + pgrouting | public | _pgr_breadthfirstsearch | edges_sql text, from_vids anyarray, max_depth bigint, directed boolean, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_bridges | edges_sql text, OUT seq integer, OUT edge bigint | SETOF record + pgrouting | public | _pgr_build_type | | text + pgrouting | public | _pgr_checkcolumn | text, text, text, is_optional boolean, dryrun boolean | boolean + pgrouting | public | _pgr_checkquery | text | text + pgrouting | public | _pgr_checkverttab | vertname text, columnsarr text[], reporterrs integer, fnname text, OUT sname text, OUT vname text | record + pgrouting | public | _pgr_chinesepostman | edges_sql text, only_cost boolean, OUT seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_compilation_date | | text + pgrouting | public | _pgr_compiler_version | | text + pgrouting | public | _pgr_connectedcomponents | edges_sql text, OUT seq bigint, OUT component bigint, OUT node bigint | SETOF record + pgrouting | public | _pgr_contraction | edges_sql text, contraction_order bigint[], max_cycles integer, forbidden_vertices bigint[], directed boolean, OUT type text, OUT id bigint, OUT contracted_vertices bigint[], OUT source bigint, OUT target bigint, OUT cost double precision | SETOF record + pgrouting | public | _pgr_createindex | sname text, tname text, colname text, indext text, reporterrs integer, fnname text | void + pgrouting | public | _pgr_createindex | tabname text, colname text, indext text, reporterrs integer, fnname text | void + pgrouting | public | _pgr_cuthillmckeeordering | text, OUT seq bigint, OUT node bigint | SETOF record + pgrouting | public | _pgr_dagshortestpath | text, anyarray, anyarray, directed boolean, only_cost boolean, OUT seq integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_dagshortestpath | text, text, directed boolean, only_cost boolean, OUT seq integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_depthfirstsearch | edges_sql text, root_vids anyarray, directed boolean, max_depth bigint, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_dijkstra | edges_sql text, combinations_sql text, directed boolean, only_cost boolean, n_goals bigint, global boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_dijkstra | edges_sql text, combinations_sql text, directed boolean, only_cost boolean, normal boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_dijkstra | edges_sql text, start_vids anyarray, end_vids anyarray, directed boolean, only_cost boolean, normal boolean, n_goals bigint, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_dijkstra | edges_sql text, start_vids anyarray, end_vids anyarray, directed boolean, only_cost boolean, normal boolean, n_goals bigint, global boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_dijkstranear | text, anyarray, anyarray, bigint, directed boolean, OUT seq integer, OUT path_seq integer, OUT end_vid bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_dijkstranear | text, anyarray, bigint, bigint, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_dijkstranear | text, bigint, anyarray, bigint, directed boolean, OUT seq integer, OUT path_seq integer, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_dijkstravia | edges_sql text, via_vids anyarray, directed boolean, strict boolean, u_turn_on_edge boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision, OUT route_agg_cost double precision | SETOF record + pgrouting | public | _pgr_drivingdistance | edges_sql text, start_vids anyarray, distance double precision, directed boolean, equicost boolean, OUT seq integer, OUT from_v bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_edgecoloring | edges_sql text, OUT edge_id bigint, OUT color_id bigint | SETOF record + pgrouting | public | _pgr_edgedisjointpaths | text, anyarray, anyarray, directed boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_edgedisjointpaths | text, text, directed boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_edwardmoore | edges_sql text, combinations_sql text, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_edwardmoore | edges_sql text, from_vids anyarray, to_vids anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_endpoint | g geometry | geometry + pgrouting | public | _pgr_floydwarshall | edges_sql text, directed boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_get_statement | o_sql text | text + pgrouting | public | _pgr_getcolumnname | sname text, tname text, col text, reporterrs integer, fnname text | text + pgrouting | public | _pgr_getcolumnname | tab text, col text, reporterrs integer, fnname text | text + pgrouting | public | _pgr_getcolumntype | sname text, tname text, cname text, reporterrs integer, fnname text | text + pgrouting | public | _pgr_getcolumntype | tab text, col text, reporterrs integer, fnname text | text + pgrouting | public | _pgr_gettablename | tab text, reporterrs integer, fnname text, OUT sname text, OUT tname text | record + pgrouting | public | _pgr_git_hash | | text + pgrouting | public | _pgr_hawickcircuits | text, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_iscolumnindexed | sname text, tname text, cname text, reporterrs integer, fnname text | boolean + pgrouting | public | _pgr_iscolumnindexed | tab text, col text, reporterrs integer, fnname text | boolean + pgrouting | public | _pgr_iscolumnintable | tab text, col text | boolean + pgrouting | public | _pgr_isplanar | text | boolean + pgrouting | public | _pgr_johnson | edges_sql text, directed boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_kruskal | text, anyarray, fn_suffix text, max_depth bigint, distance double precision, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_ksp | edges_sql text, start_vid bigint, end_vid bigint, k integer, directed boolean, heap_paths boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_lengauertarjandominatortree | edges_sql text, root_vid bigint, OUT seq integer, OUT vid bigint, OUT idom bigint | SETOF record + pgrouting | public | _pgr_lib_version | | text + pgrouting | public | _pgr_linegraph | text, directed boolean, OUT seq integer, OUT source bigint, OUT target bigint, OUT cost double precision, OUT reverse_cost double precision | SETOF record + pgrouting | public | _pgr_linegraphfull | text, OUT seq integer, OUT source bigint, OUT target bigint, OUT cost double precision, OUT edge bigint | SETOF record + pgrouting | public | _pgr_makeconnected | text, OUT seq bigint, OUT start_vid bigint, OUT end_vid bigint | SETOF record + pgrouting | public | _pgr_maxcardinalitymatch | edges_sql text, directed boolean, OUT seq integer, OUT edge bigint, OUT source bigint, OUT target bigint | SETOF record + pgrouting | public | _pgr_maxflow | edges_sql text, combinations_sql text, algorithm integer, only_flow boolean, OUT seq integer, OUT edge_id bigint, OUT source bigint, OUT target bigint, OUT flow bigint, OUT residual_capacity bigint | SETOF record + pgrouting | public | _pgr_maxflow | edges_sql text, sources anyarray, targets anyarray, algorithm integer, only_flow boolean, OUT seq integer, OUT edge_id bigint, OUT source bigint, OUT target bigint, OUT flow bigint, OUT residual_capacity bigint | SETOF record + pgrouting | public | _pgr_maxflowmincost | edges_sql text, combinations_sql text, only_cost boolean, OUT seq integer, OUT edge bigint, OUT source bigint, OUT target bigint, OUT flow bigint, OUT residual_capacity bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_maxflowmincost | edges_sql text, sources anyarray, targets anyarray, only_cost boolean, OUT seq integer, OUT edge bigint, OUT source bigint, OUT target bigint, OUT flow bigint, OUT residual_capacity bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_msg | msgkind integer, fnname text, msg text | void + pgrouting | public | _pgr_onerror | errcond boolean, reporterrs integer, fnname text, msgerr text, hinto text, msgok text | void + pgrouting | public | _pgr_operating_system | | text + pgrouting | public | _pgr_parameter_check | fn text, sql text, big boolean | boolean + pgrouting | public | _pgr_pgsql_version | | text + pgrouting | public | _pgr_pickdeliver | text, text, text, factor double precision, max_cycles integer, initial_sol integer, OUT seq integer, OUT vehicle_seq integer, OUT vehicle_id bigint, OUT stop_seq integer, OUT stop_type integer, OUT stop_id bigint, OUT order_id bigint, OUT cargo double precision, OUT travel_time double precision, OUT arrival_time double precision, OUT wait_time double precision, OUT service_time double precision, OUT departure_time double precision | SETOF record + pgrouting | public | _pgr_pickdelivereuclidean | text, text, factor double precision, max_cycles integer, initial_sol integer, OUT seq integer, OUT vehicle_seq integer, OUT vehicle_id bigint, OUT stop_seq integer, OUT stop_type integer, OUT order_id bigint, OUT cargo double precision, OUT travel_time double precision, OUT arrival_time double precision, OUT wait_time double precision, OUT service_time double precision, OUT departure_time double precision | SETOF record + pgrouting | public | _pgr_pointtoid | point geometry, tolerance double precision, vertname text, srid integer | bigint + pgrouting | public | _pgr_prim | text, anyarray, order_by text, max_depth bigint, distance double precision, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_quote_ident | idname text | text + pgrouting | public | _pgr_sequentialvertexcoloring | edges_sql text, OUT vertex_id bigint, OUT color_id bigint | SETOF record + pgrouting | public | _pgr_startpoint | g geometry | geometry + pgrouting | public | _pgr_stoerwagner | edges_sql text, OUT seq integer, OUT edge bigint, OUT cost double precision, OUT mincut double precision | SETOF record + pgrouting | public | _pgr_strongcomponents | edges_sql text, OUT seq bigint, OUT component bigint, OUT node bigint | SETOF record + pgrouting | public | _pgr_topologicalsort | edges_sql text, OUT seq integer, OUT sorted_v bigint | SETOF record + pgrouting | public | _pgr_transitiveclosure | edges_sql text, OUT seq integer, OUT vid bigint, OUT target_array bigint[] | SETOF record + pgrouting | public | _pgr_trsp | sql text, source_eid integer, source_pos double precision, target_eid integer, target_pos double precision, directed boolean, has_reverse_cost boolean, turn_restrict_sql text, OUT seq integer, OUT id1 integer, OUT id2 integer, OUT cost double precision | SETOF record + pgrouting | public | _pgr_trsp | text, text, anyarray, anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_trsp | text, text, anyarray, bigint, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_trsp | text, text, bigint, anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_trsp | text, text, bigint, bigint, directed boolean, OUT seq integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_trsp_withpoints | text, text, text, anyarray, anyarray, directed boolean, driving_side character, details boolean, OUT seq integer, OUT path_seq integer, OUT departure bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_trsp_withpoints | text, text, text, text, directed boolean, driving_side character, details boolean, OUT seq integer, OUT path_seq integer, OUT departure bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_trspvia | text, text, anyarray, boolean, boolean, boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision, OUT route_agg_cost double precision | SETOF record + pgrouting | public | _pgr_trspvia_withpoints | text, text, text, anyarray, boolean, boolean, boolean, character, boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision, OUT route_agg_cost double precision | SETOF record + pgrouting | public | _pgr_trspviavertices | sql text, vids integer[], directed boolean, has_rcost boolean, turn_restrict_sql text, OUT seq integer, OUT id1 integer, OUT id2 integer, OUT id3 integer, OUT cost double precision | SETOF record + pgrouting | public | _pgr_tsp | matrix_row_sql text, start_id bigint, end_id bigint, max_processing_time double precision, tries_per_temperature integer, max_changes_per_temperature integer, max_consecutive_non_changes integer, initial_temperature double precision, final_temperature double precision, cooling_factor double precision, randomize boolean, OUT seq integer, OUT node bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_tspeuclidean | coordinates_sql text, start_id bigint, end_id bigint, max_processing_time double precision, tries_per_temperature integer, max_changes_per_temperature integer, max_consecutive_non_changes integer, initial_temperature double precision, final_temperature double precision, cooling_factor double precision, randomize boolean, OUT seq integer, OUT node bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_turnrestrictedpath | text, text, bigint, bigint, integer, directed boolean, heap_paths boolean, stop_on_first boolean, strict boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_versionless | v1 text, v2 text | boolean + pgrouting | public | _pgr_vrponedepot | text, text, text, integer, OUT seq integer, OUT vehicle_seq integer, OUT vehicle_id bigint, OUT stop_seq integer, OUT stop_type integer, OUT stop_id bigint, OUT order_id bigint, OUT cargo double precision, OUT travel_time double precision, OUT arrival_time double precision, OUT wait_time double precision, OUT service_time double precision, OUT departure_time double precision | SETOF record + pgrouting | public | _pgr_withpoints | edges_sql text, points_sql text, combinations_sql text, directed boolean, driving_side character, details boolean, only_cost boolean, OUT seq integer, OUT path_seq integer, OUT start_pid bigint, OUT end_pid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_withpoints | edges_sql text, points_sql text, start_pids anyarray, end_pids anyarray, directed boolean, driving_side character, details boolean, only_cost boolean, normal boolean, OUT seq integer, OUT path_seq integer, OUT start_pid bigint, OUT end_pid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_withpointsdd | edges_sql text, points_sql text, start_pid anyarray, distance double precision, directed boolean, driving_side character, details boolean, equicost boolean, OUT seq integer, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_withpointsksp | edges_sql text, points_sql text, start_pid bigint, end_pid bigint, k integer, directed boolean, heap_paths boolean, driving_side character, details boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_withpointsvia | sql text, via_edges bigint[], fraction double precision[], directed boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision, OUT route_agg_cost double precision | SETOF record + pgrouting | public | _pgr_withpointsvia | text, text, anyarray, boolean, boolean, boolean, character, boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision, OUT route_agg_cost double precision | SETOF record + pgrouting | public | _trsp | text, text, anyarray, anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _v4trsp | text, text, anyarray, anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _v4trsp | text, text, text, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_alphashape | geometry, alpha double precision | geometry + pgrouting | public | pgr_analyzegraph | text, double precision, the_geom text, id text, source text, target text, rows_where text | character varying + pgrouting | public | pgr_analyzeoneway | text, text[], text[], text[], text[], two_way_if_null boolean, oneway text, source text, target text | text + pgrouting | public | pgr_articulationpoints | text, OUT node bigint | SETOF bigint + pgrouting | public | pgr_astar | text, anyarray, anyarray, directed boolean, heuristic integer, factor double precision, epsilon double precision, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_astar | text, anyarray, bigint, directed boolean, heuristic integer, factor double precision, epsilon double precision, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_astar | text, bigint, anyarray, directed boolean, heuristic integer, factor double precision, epsilon double precision, OUT seq integer, OUT path_seq integer, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_astar | text, bigint, bigint, directed boolean, heuristic integer, factor double precision, epsilon double precision, OUT seq integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_astar | text, text, directed boolean, heuristic integer, factor double precision, epsilon double precision, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_astarcost | text, anyarray, anyarray, directed boolean, heuristic integer, factor double precision, epsilon double precision, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_astarcost | text, anyarray, bigint, directed boolean, heuristic integer, factor double precision, epsilon double precision, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_astarcost | text, bigint, anyarray, directed boolean, heuristic integer, factor double precision, epsilon double precision, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_astarcost | text, bigint, bigint, directed boolean, heuristic integer, factor double precision, epsilon double precision, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_astarcost | text, text, directed boolean, heuristic integer, factor double precision, epsilon double precision, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_astarcostmatrix | text, anyarray, directed boolean, heuristic integer, factor double precision, epsilon double precision, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bdastar | text, anyarray, anyarray, directed boolean, heuristic integer, factor numeric, epsilon numeric, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bdastar | text, anyarray, bigint, directed boolean, heuristic integer, factor numeric, epsilon numeric, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bdastar | text, bigint, anyarray, directed boolean, heuristic integer, factor numeric, epsilon numeric, OUT seq integer, OUT path_seq integer, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bdastar | text, bigint, bigint, directed boolean, heuristic integer, factor numeric, epsilon numeric, OUT seq integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bdastar | text, text, directed boolean, heuristic integer, factor numeric, epsilon numeric, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bdastarcost | text, anyarray, anyarray, directed boolean, heuristic integer, factor numeric, epsilon numeric, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bdastarcost | text, anyarray, bigint, directed boolean, heuristic integer, factor numeric, epsilon numeric, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bdastarcost | text, bigint, anyarray, directed boolean, heuristic integer, factor numeric, epsilon numeric, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bdastarcost | text, bigint, bigint, directed boolean, heuristic integer, factor numeric, epsilon numeric, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bdastarcost | text, text, directed boolean, heuristic integer, factor numeric, epsilon numeric, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bdastarcostmatrix | text, anyarray, directed boolean, heuristic integer, factor numeric, epsilon numeric, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bddijkstra | text, anyarray, anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bddijkstra | text, anyarray, bigint, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bddijkstra | text, bigint, anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bddijkstra | text, bigint, bigint, directed boolean, OUT seq integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bddijkstra | text, text, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bddijkstracost | text, anyarray, anyarray, directed boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bddijkstracost | text, anyarray, bigint, directed boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bddijkstracost | text, bigint, anyarray, directed boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bddijkstracost | text, bigint, bigint, directed boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bddijkstracost | text, text, directed boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bddijkstracostmatrix | text, anyarray, directed boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bellmanford | text, anyarray, anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bellmanford | text, anyarray, bigint, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bellmanford | text, bigint, anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bellmanford | text, bigint, bigint, directed boolean, OUT seq integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bellmanford | text, text, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_biconnectedcomponents | text, OUT seq bigint, OUT component bigint, OUT edge bigint | SETOF record + pgrouting | public | pgr_binarybreadthfirstsearch | text, anyarray, anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_binarybreadthfirstsearch | text, anyarray, bigint, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_binarybreadthfirstsearch | text, bigint, anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_binarybreadthfirstsearch | text, bigint, bigint, directed boolean, OUT seq integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_binarybreadthfirstsearch | text, text, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bipartite | text, OUT vertex_id bigint, OUT color_id bigint | SETOF record + pgrouting | public | pgr_boykovkolmogorov | text, anyarray, anyarray, OUT seq integer, OUT edge bigint, OUT start_vid bigint, OUT end_vid bigint, OUT flow bigint, OUT residual_capacity bigint | SETOF record + pgrouting | public | pgr_boykovkolmogorov | text, anyarray, bigint, OUT seq integer, OUT edge bigint, OUT start_vid bigint, OUT end_vid bigint, OUT flow bigint, OUT residual_capacity bigint | SETOF record + pgrouting | public | pgr_boykovkolmogorov | text, bigint, anyarray, OUT seq integer, OUT edge bigint, OUT start_vid bigint, OUT end_vid bigint, OUT flow bigint, OUT residual_capacity bigint | SETOF record + pgrouting | public | pgr_boykovkolmogorov | text, bigint, bigint, OUT seq integer, OUT edge bigint, OUT start_vid bigint, OUT end_vid bigint, OUT flow bigint, OUT residual_capacity bigint | SETOF record + pgrouting | public | pgr_boykovkolmogorov | text, text, OUT seq integer, OUT edge bigint, OUT start_vid bigint, OUT end_vid bigint, OUT flow bigint, OUT residual_capacity bigint | SETOF record + pgrouting | public | pgr_breadthfirstsearch | text, anyarray, max_depth bigint, directed boolean, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_breadthfirstsearch | text, bigint, max_depth bigint, directed boolean, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bridges | text, OUT edge bigint | SETOF bigint + pgrouting | public | pgr_chinesepostman | text, OUT seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_chinesepostmancost | text | double precision + pgrouting | public | pgr_connectedcomponents | text, OUT seq bigint, OUT component bigint, OUT node bigint | SETOF record + pgrouting | public | pgr_contraction | text, bigint[], max_cycles integer, forbidden_vertices bigint[], directed boolean, OUT type text, OUT id bigint, OUT contracted_vertices bigint[], OUT source bigint, OUT target bigint, OUT cost double precision | SETOF record + pgrouting | public | pgr_createtopology | text, double precision, the_geom text, id text, source text, target text, rows_where text, clean boolean | character varying + pgrouting | public | pgr_createverticestable | text, the_geom text, source text, target text, rows_where text | text + pgrouting | public | pgr_cuthillmckeeordering | text, OUT seq bigint, OUT node bigint | SETOF record + pgrouting | public | pgr_dagshortestpath | text, anyarray, anyarray, OUT seq integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dagshortestpath | text, anyarray, bigint, OUT seq integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dagshortestpath | text, bigint, anyarray, OUT seq integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dagshortestpath | text, bigint, bigint, OUT seq integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dagshortestpath | text, text, OUT seq integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_degree | text, text, dryrun boolean, OUT node bigint, OUT degree bigint | SETOF record + pgrouting | public | pgr_depthfirstsearch | text, anyarray, directed boolean, max_depth bigint, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_depthfirstsearch | text, bigint, directed boolean, max_depth bigint, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dijkstra | text, anyarray, anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dijkstra | text, anyarray, bigint, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dijkstra | text, bigint, anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dijkstra | text, bigint, bigint, directed boolean, OUT seq integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dijkstra | text, text, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dijkstracost | text, anyarray, anyarray, directed boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dijkstracost | text, anyarray, bigint, directed boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dijkstracost | text, bigint, anyarray, directed boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dijkstracost | text, bigint, bigint, directed boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dijkstracost | text, text, directed boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dijkstracostmatrix | text, anyarray, directed boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dijkstranear | text, anyarray, anyarray, directed boolean, cap bigint, global boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dijkstranear | text, anyarray, bigint, directed boolean, cap bigint, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dijkstranear | text, bigint, anyarray, directed boolean, cap bigint, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dijkstranear | text, text, directed boolean, cap bigint, global boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dijkstranearcost | text, anyarray, anyarray, directed boolean, cap bigint, global boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dijkstranearcost | text, anyarray, bigint, directed boolean, cap bigint, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dijkstranearcost | text, bigint, anyarray, directed boolean, cap bigint, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dijkstranearcost | text, text, directed boolean, cap bigint, global boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dijkstravia | text, anyarray, directed boolean, strict boolean, u_turn_on_edge boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision, OUT route_agg_cost double precision | SETOF record + pgrouting | public | pgr_drivingdistance | text, anyarray, double precision, directed boolean, equicost boolean, OUT seq integer, OUT from_v bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_drivingdistance | text, bigint, double precision, directed boolean, OUT seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_edgecoloring | text, OUT edge_id bigint, OUT color_id bigint | SETOF record + pgrouting | public | pgr_edgedisjointpaths | text, anyarray, anyarray, directed boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_edgedisjointpaths | text, anyarray, bigint, directed boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_edgedisjointpaths | text, bigint, anyarray, directed boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_edgedisjointpaths | text, bigint, bigint, directed boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_edgedisjointpaths | text, text, directed boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_edmondskarp | text, anyarray, anyarray, OUT seq integer, OUT edge bigint, OUT start_vid bigint, OUT end_vid bigint, OUT flow bigint, OUT residual_capacity bigint | SETOF record + pgrouting | public | pgr_edmondskarp | text, anyarray, bigint, OUT seq integer, OUT edge bigint, OUT start_vid bigint, OUT end_vid bigint, OUT flow bigint, OUT residual_capacity bigint | SETOF record + pgrouting | public | pgr_edmondskarp | text, bigint, anyarray, OUT seq integer, OUT edge bigint, OUT start_vid bigint, OUT end_vid bigint, OUT flow bigint, OUT residual_capacity bigint | SETOF record + pgrouting | public | pgr_edmondskarp | text, bigint, bigint, OUT seq integer, OUT edge bigint, OUT start_vid bigint, OUT end_vid bigint, OUT flow bigint, OUT residual_capacity bigint | SETOF record + pgrouting | public | pgr_edmondskarp | text, text, OUT seq integer, OUT edge bigint, OUT start_vid bigint, OUT end_vid bigint, OUT flow bigint, OUT residual_capacity bigint | SETOF record + pgrouting | public | pgr_edwardmoore | text, anyarray, anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_edwardmoore | text, anyarray, bigint, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_edwardmoore | text, bigint, anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_edwardmoore | text, bigint, bigint, directed boolean, OUT seq integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_edwardmoore | text, text, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_extractvertices | text, dryrun boolean, OUT id bigint, OUT in_edges bigint[], OUT out_edges bigint[], OUT x double precision, OUT y double precision, OUT geom geometry | SETOF record + pgrouting | public | pgr_findcloseedges | text, geometry, double precision, cap integer, partial boolean, dryrun boolean, OUT edge_id bigint, OUT fraction double precision, OUT side character, OUT distance double precision, OUT geom geometry, OUT edge geometry | SETOF record + pgrouting | public | pgr_findcloseedges | text, geometry[], double precision, cap integer, partial boolean, dryrun boolean, OUT edge_id bigint, OUT fraction double precision, OUT side character, OUT distance double precision, OUT geom geometry, OUT edge geometry | SETOF record + pgrouting | public | pgr_floydwarshall | text, directed boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_full_version | OUT version text, OUT build_type text, OUT compile_date text, OUT library text, OUT system text, OUT postgresql text, OUT compiler text, OUT boost text, OUT hash text | record + pgrouting | public | pgr_hawickcircuits | text, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_isplanar | text | boolean + pgrouting | public | pgr_johnson | text, directed boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_kruskal | text, OUT edge bigint, OUT cost double precision | SETOF record + pgrouting | public | pgr_kruskalbfs | text, anyarray, max_depth bigint, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_kruskalbfs | text, bigint, max_depth bigint, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_kruskaldd | text, anyarray, double precision, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_kruskaldd | text, anyarray, numeric, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_kruskaldd | text, bigint, double precision, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_kruskaldd | text, bigint, numeric, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_kruskaldfs | text, anyarray, max_depth bigint, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_kruskaldfs | text, bigint, max_depth bigint, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_ksp | text, bigint, bigint, integer, directed boolean, heap_paths boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_lengauertarjandominatortree | text, bigint, OUT seq integer, OUT vertex_id bigint, OUT idom bigint | SETOF record + pgrouting | public | pgr_linegraph | text, directed boolean, OUT seq integer, OUT source bigint, OUT target bigint, OUT cost double precision, OUT reverse_cost double precision | SETOF record + pgrouting | public | pgr_linegraphfull | text, OUT seq integer, OUT source bigint, OUT target bigint, OUT cost double precision, OUT edge bigint | SETOF record + pgrouting | public | pgr_makeconnected | text, OUT seq bigint, OUT start_vid bigint, OUT end_vid bigint | SETOF record + pgrouting | public | pgr_maxcardinalitymatch | text, OUT edge bigint | SETOF bigint + pgrouting | public | pgr_maxcardinalitymatch | text, directed boolean, OUT seq integer, OUT edge bigint, OUT source bigint, OUT target bigint | SETOF record + pgrouting | public | pgr_maxflow | text, anyarray, anyarray | bigint + pgrouting | public | pgr_maxflow | text, anyarray, bigint | bigint + pgrouting | public | pgr_maxflow | text, bigint, anyarray | bigint + pgrouting | public | pgr_maxflow | text, bigint, bigint | bigint + pgrouting | public | pgr_maxflow | text, text | bigint + pgrouting | public | pgr_maxflowmincost | text, anyarray, anyarray, OUT seq integer, OUT edge bigint, OUT source bigint, OUT target bigint, OUT flow bigint, OUT residual_capacity bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_maxflowmincost | text, anyarray, bigint, OUT seq integer, OUT edge bigint, OUT source bigint, OUT target bigint, OUT flow bigint, OUT residual_capacity bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_maxflowmincost | text, bigint, anyarray, OUT seq integer, OUT edge bigint, OUT source bigint, OUT target bigint, OUT flow bigint, OUT residual_capacity bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_maxflowmincost | text, bigint, bigint, OUT seq integer, OUT edge bigint, OUT source bigint, OUT target bigint, OUT flow bigint, OUT residual_capacity bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_maxflowmincost | text, text, OUT seq integer, OUT edge bigint, OUT source bigint, OUT target bigint, OUT flow bigint, OUT residual_capacity bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_maxflowmincost_cost | text, anyarray, anyarray | double precision + pgrouting | public | pgr_maxflowmincost_cost | text, anyarray, bigint | double precision + pgrouting | public | pgr_maxflowmincost_cost | text, bigint, anyarray | double precision + pgrouting | public | pgr_maxflowmincost_cost | text, bigint, bigint | double precision + pgrouting | public | pgr_maxflowmincost_cost | text, text | double precision + pgrouting | public | pgr_nodenetwork | text, double precision, id text, the_geom text, table_ending text, rows_where text, outall boolean | text + pgrouting | public | pgr_pickdeliver | text, text, text, factor double precision, max_cycles integer, initial_sol integer, OUT seq integer, OUT vehicle_seq integer, OUT vehicle_id bigint, OUT stop_seq integer, OUT stop_type integer, OUT stop_id bigint, OUT order_id bigint, OUT cargo double precision, OUT travel_time double precision, OUT arrival_time double precision, OUT wait_time double precision, OUT service_time double precision, OUT departure_time double precision | SETOF record + pgrouting | public | pgr_pickdelivereuclidean | text, text, factor double precision, max_cycles integer, initial_sol integer, OUT seq integer, OUT vehicle_seq integer, OUT vehicle_id bigint, OUT stop_seq integer, OUT stop_type integer, OUT order_id bigint, OUT cargo double precision, OUT travel_time double precision, OUT arrival_time double precision, OUT wait_time double precision, OUT service_time double precision, OUT departure_time double precision | SETOF record + pgrouting | public | pgr_prim | text, OUT edge bigint, OUT cost double precision | SETOF record + pgrouting | public | pgr_primbfs | text, anyarray, max_depth bigint, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_primbfs | text, bigint, max_depth bigint, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_primdd | text, anyarray, double precision, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_primdd | text, anyarray, numeric, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_primdd | text, bigint, double precision, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_primdd | text, bigint, numeric, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_primdfs | text, anyarray, max_depth bigint, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_primdfs | text, bigint, max_depth bigint, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_pushrelabel | text, anyarray, anyarray, OUT seq integer, OUT edge bigint, OUT start_vid bigint, OUT end_vid bigint, OUT flow bigint, OUT residual_capacity bigint | SETOF record + pgrouting | public | pgr_pushrelabel | text, anyarray, bigint, OUT seq integer, OUT edge bigint, OUT start_vid bigint, OUT end_vid bigint, OUT flow bigint, OUT residual_capacity bigint | SETOF record + pgrouting | public | pgr_pushrelabel | text, bigint, anyarray, OUT seq integer, OUT edge bigint, OUT start_vid bigint, OUT end_vid bigint, OUT flow bigint, OUT residual_capacity bigint | SETOF record + pgrouting | public | pgr_pushrelabel | text, bigint, bigint, OUT seq integer, OUT edge bigint, OUT start_vid bigint, OUT end_vid bigint, OUT flow bigint, OUT residual_capacity bigint | SETOF record + pgrouting | public | pgr_pushrelabel | text, text, OUT seq integer, OUT edge bigint, OUT start_vid bigint, OUT end_vid bigint, OUT flow bigint, OUT residual_capacity bigint | SETOF record + pgrouting | public | pgr_sequentialvertexcoloring | text, OUT vertex_id bigint, OUT color_id bigint | SETOF record + pgrouting | public | pgr_stoerwagner | text, OUT seq integer, OUT edge bigint, OUT cost double precision, OUT mincut double precision | SETOF record + pgrouting | public | pgr_strongcomponents | text, OUT seq bigint, OUT component bigint, OUT node bigint | SETOF record + pgrouting | public | pgr_topologicalsort | text, OUT seq integer, OUT sorted_v bigint | SETOF record + pgrouting | public | pgr_transitiveclosure | text, OUT seq integer, OUT vid bigint, OUT target_array bigint[] | SETOF record + pgrouting | public | pgr_trsp | text, integer, double precision, integer, double precision, boolean, boolean, turn_restrict_sql text, OUT seq integer, OUT id1 integer, OUT id2 integer, OUT cost double precision | SETOF record + pgrouting | public | pgr_trsp | text, integer, integer, boolean, boolean, restrictions_sql text, OUT seq integer, OUT id1 integer, OUT id2 integer, OUT cost double precision | SETOF record + pgrouting | public | pgr_trsp | text, text, anyarray, anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_trsp | text, text, anyarray, bigint, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_trsp | text, text, bigint, anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_trsp | text, text, bigint, bigint, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_trsp | text, text, text, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_trsp_withpoints | text, text, text, anyarray, anyarray, directed boolean, driving_side character, details boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_trsp_withpoints | text, text, text, anyarray, bigint, directed boolean, driving_side character, details boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_trsp_withpoints | text, text, text, bigint, anyarray, directed boolean, driving_side character, details boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_trsp_withpoints | text, text, text, bigint, bigint, directed boolean, driving_side character, details boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_trsp_withpoints | text, text, text, text, directed boolean, driving_side character, details boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_trspvia | text, text, anyarray, directed boolean, strict boolean, u_turn_on_edge boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision, OUT route_agg_cost double precision | SETOF record + pgrouting | public | pgr_trspvia_withpoints | text, text, text, anyarray, directed boolean, strict boolean, u_turn_on_edge boolean, driving_side character, details boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision, OUT route_agg_cost double precision | SETOF record + pgrouting | public | pgr_trspviaedges | text, integer[], double precision[], boolean, boolean, turn_restrict_sql text, OUT seq integer, OUT id1 integer, OUT id2 integer, OUT id3 integer, OUT cost double precision | SETOF record + pgrouting | public | pgr_trspviavertices | text, anyarray, boolean, boolean, restrictions_sql text, OUT seq integer, OUT id1 integer, OUT id2 integer, OUT id3 integer, OUT cost double precision | SETOF record + pgrouting | public | pgr_tsp | text, start_id bigint, end_id bigint, max_processing_time double precision, tries_per_temperature integer, max_changes_per_temperature integer, max_consecutive_non_changes integer, initial_temperature double precision, final_temperature double precision, cooling_factor double precision, randomize boolean, OUT seq integer, OUT node bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_tspeuclidean | text, start_id bigint, end_id bigint, max_processing_time double precision, tries_per_temperature integer, max_changes_per_temperature integer, max_consecutive_non_changes integer, initial_temperature double precision, final_temperature double precision, cooling_factor double precision, randomize boolean, OUT seq integer, OUT node bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_turnrestrictedpath | text, text, bigint, bigint, integer, directed boolean, heap_paths boolean, stop_on_first boolean, strict boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_version | | text + pgrouting | public | pgr_vrponedepot | text, text, text, integer, OUT oid integer, OUT opos integer, OUT vid integer, OUT tarrival integer, OUT tdepart integer | SETOF record + pgrouting | public | pgr_withpoints | text, text, anyarray, anyarray, directed boolean, driving_side character, details boolean, OUT seq integer, OUT path_seq integer, OUT start_pid bigint, OUT end_pid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_withpoints | text, text, anyarray, bigint, directed boolean, driving_side character, details boolean, OUT seq integer, OUT path_seq integer, OUT start_pid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_withpoints | text, text, bigint, anyarray, directed boolean, driving_side character, details boolean, OUT seq integer, OUT path_seq integer, OUT end_pid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_withpoints | text, text, bigint, bigint, directed boolean, driving_side character, details boolean, OUT seq integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_withpoints | text, text, text, directed boolean, driving_side character, details boolean, OUT seq integer, OUT path_seq integer, OUT start_pid bigint, OUT end_pid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_withpointscost | text, text, anyarray, anyarray, directed boolean, driving_side character, OUT start_pid bigint, OUT end_pid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_withpointscost | text, text, anyarray, bigint, directed boolean, driving_side character, OUT start_pid bigint, OUT end_pid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_withpointscost | text, text, bigint, anyarray, directed boolean, driving_side character, OUT start_pid bigint, OUT end_pid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_withpointscost | text, text, bigint, bigint, directed boolean, driving_side character, OUT start_pid bigint, OUT end_pid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_withpointscost | text, text, text, directed boolean, driving_side character, OUT start_pid bigint, OUT end_pid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_withpointscostmatrix | text, text, anyarray, directed boolean, driving_side character, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_withpointsdd | text, text, anyarray, double precision, directed boolean, driving_side character, details boolean, equicost boolean, OUT seq integer, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_withpointsdd | text, text, bigint, double precision, directed boolean, driving_side character, details boolean, OUT seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_withpointsksp | text, text, bigint, bigint, integer, directed boolean, heap_paths boolean, driving_side character, details boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_withpointsvia | text, text, anyarray, directed boolean, strict boolean, u_turn_on_edge boolean, driving_side character, details boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision, OUT route_agg_cost double precision | SETOF record + pgrowlocks | public | pgrowlocks | relname text, OUT locked_row tid, OUT locker xid, OUT multi boolean, OUT xids xid[], OUT modes text[], OUT pids integer[] | SETOF record + pgsodium | pgsodium | create_key | key_type pgsodium.key_type, name text, raw_key bytea, raw_key_nonce bytea, parent_key uuid, key_context bytea, expires timestamp with time zone, associated_data text | pgsodium.valid_key + pgsodium | pgsodium | create_mask_view | relid oid, debug boolean | void + pgsodium | pgsodium | create_mask_view | relid oid, subid integer, debug boolean | void + pgsodium | pgsodium | crypto_aead_det_decrypt | ciphertext bytea, additional bytea, key bytea, nonce bytea | bytea + pgsodium | pgsodium | crypto_aead_det_decrypt | message bytea, additional bytea, key_id bigint, context bytea, nonce bytea | bytea + pgsodium | pgsodium | crypto_aead_det_decrypt | message bytea, additional bytea, key_uuid uuid | bytea + pgsodium | pgsodium | crypto_aead_det_decrypt | message bytea, additional bytea, key_uuid uuid, nonce bytea | bytea + pgsodium | pgsodium | crypto_aead_det_encrypt | message bytea, additional bytea, key bytea, nonce bytea | bytea + pgsodium | pgsodium | crypto_aead_det_encrypt | message bytea, additional bytea, key_id bigint, context bytea, nonce bytea | bytea + pgsodium | pgsodium | crypto_aead_det_encrypt | message bytea, additional bytea, key_uuid uuid | bytea + pgsodium | pgsodium | crypto_aead_det_encrypt | message bytea, additional bytea, key_uuid uuid, nonce bytea | bytea + pgsodium | pgsodium | crypto_aead_det_keygen | | bytea + pgsodium | pgsodium | crypto_aead_det_noncegen | | bytea + pgsodium | pgsodium | crypto_aead_ietf_decrypt | message bytea, additional bytea, nonce bytea, key bytea | bytea + pgsodium | pgsodium | crypto_aead_ietf_decrypt | message bytea, additional bytea, nonce bytea, key_id bigint, context bytea | bytea + pgsodium | pgsodium | crypto_aead_ietf_decrypt | message bytea, additional bytea, nonce bytea, key_uuid uuid | bytea + pgsodium | pgsodium | crypto_aead_ietf_encrypt | message bytea, additional bytea, nonce bytea, key bytea | bytea + pgsodium | pgsodium | crypto_aead_ietf_encrypt | message bytea, additional bytea, nonce bytea, key_id bigint, context bytea | bytea + pgsodium | pgsodium | crypto_aead_ietf_encrypt | message bytea, additional bytea, nonce bytea, key_uuid uuid | bytea + pgsodium | pgsodium | crypto_aead_ietf_keygen | | bytea + pgsodium | pgsodium | crypto_aead_ietf_noncegen | | bytea + pgsodium | pgsodium | crypto_auth | message bytea, key bytea | bytea + pgsodium | pgsodium | crypto_auth | message bytea, key_id bigint, context bytea | bytea + pgsodium | pgsodium | crypto_auth | message bytea, key_uuid uuid | bytea + pgsodium | pgsodium | crypto_auth_hmacsha256 | message bytea, key_id bigint, context bytea | bytea + pgsodium | pgsodium | crypto_auth_hmacsha256 | message bytea, key_uuid uuid | bytea + pgsodium | pgsodium | crypto_auth_hmacsha256 | message bytea, secret bytea | bytea + pgsodium | pgsodium | crypto_auth_hmacsha256_keygen | | bytea + pgsodium | pgsodium | crypto_auth_hmacsha256_verify | hash bytea, message bytea, key_id bigint, context bytea | boolean + pgsodium | pgsodium | crypto_auth_hmacsha256_verify | hash bytea, message bytea, secret bytea | boolean + pgsodium | pgsodium | crypto_auth_hmacsha256_verify | signature bytea, message bytea, key_uuid uuid | boolean + pgsodium | pgsodium | crypto_auth_hmacsha512 | message bytea, key_id bigint, context bytea | bytea + pgsodium | pgsodium | crypto_auth_hmacsha512 | message bytea, key_uuid uuid | bytea + pgsodium | pgsodium | crypto_auth_hmacsha512 | message bytea, secret bytea | bytea + pgsodium | pgsodium | crypto_auth_hmacsha512_keygen | | bytea + pgsodium | pgsodium | crypto_auth_hmacsha512_verify | hash bytea, message bytea, key_id bigint, context bytea | boolean + pgsodium | pgsodium | crypto_auth_hmacsha512_verify | hash bytea, message bytea, secret bytea | boolean + pgsodium | pgsodium | crypto_auth_hmacsha512_verify | signature bytea, message bytea, key_uuid uuid | boolean + pgsodium | pgsodium | crypto_auth_keygen | | bytea + pgsodium | pgsodium | crypto_auth_verify | mac bytea, message bytea, key bytea | boolean + pgsodium | pgsodium | crypto_auth_verify | mac bytea, message bytea, key_id bigint, context bytea | boolean + pgsodium | pgsodium | crypto_auth_verify | mac bytea, message bytea, key_uuid uuid | boolean + pgsodium | pgsodium | crypto_box | message bytea, nonce bytea, public bytea, secret bytea | bytea + pgsodium | pgsodium | crypto_box_new_keypair | | pgsodium.crypto_box_keypair + pgsodium | pgsodium | crypto_box_new_seed | | bytea + pgsodium | pgsodium | crypto_box_noncegen | | bytea + pgsodium | pgsodium | crypto_box_open | ciphertext bytea, nonce bytea, public bytea, secret bytea | bytea + pgsodium | pgsodium | crypto_box_seal | message bytea, public_key bytea | bytea + pgsodium | pgsodium | crypto_box_seal_open | ciphertext bytea, public_key bytea, secret_key bytea | bytea + pgsodium | pgsodium | crypto_box_seed_new_keypair | seed bytea | pgsodium.crypto_box_keypair + pgsodium | pgsodium | crypto_cmp | text, text | boolean + pgsodium | pgsodium | crypto_generichash | message bytea, key bigint, context bytea | bytea + pgsodium | pgsodium | crypto_generichash | message bytea, key bytea | bytea + pgsodium | pgsodium | crypto_generichash | message bytea, key_uuid uuid | bytea + pgsodium | pgsodium | crypto_generichash_keygen | | bytea + pgsodium | pgsodium | crypto_hash_sha256 | message bytea | bytea + pgsodium | pgsodium | crypto_hash_sha512 | message bytea | bytea + pgsodium | pgsodium | crypto_kdf_derive_from_key | subkey_size bigint, subkey_id bigint, context bytea, primary_key bytea | bytea + pgsodium | pgsodium | crypto_kdf_derive_from_key | subkey_size integer, subkey_id bigint, context bytea, primary_key uuid | bytea + pgsodium | pgsodium | crypto_kdf_keygen | | bytea + pgsodium | pgsodium | crypto_kx_client_session_keys | client_pk bytea, client_sk bytea, server_pk bytea | pgsodium.crypto_kx_session + pgsodium | pgsodium | crypto_kx_new_keypair | | pgsodium.crypto_kx_keypair + pgsodium | pgsodium | crypto_kx_new_seed | | bytea + pgsodium | pgsodium | crypto_kx_seed_new_keypair | seed bytea | pgsodium.crypto_kx_keypair + pgsodium | pgsodium | crypto_kx_server_session_keys | server_pk bytea, server_sk bytea, client_pk bytea | pgsodium.crypto_kx_session + pgsodium | pgsodium | crypto_pwhash | password bytea, salt bytea | bytea + pgsodium | pgsodium | crypto_pwhash_saltgen | | bytea + pgsodium | pgsodium | crypto_pwhash_str | password bytea | bytea + pgsodium | pgsodium | crypto_pwhash_str_verify | hashed_password bytea, password bytea | boolean + pgsodium | pgsodium | crypto_secretbox | message bytea, nonce bytea, key bytea | bytea + pgsodium | pgsodium | crypto_secretbox | message bytea, nonce bytea, key_id bigint, context bytea | bytea + pgsodium | pgsodium | crypto_secretbox | message bytea, nonce bytea, key_uuid uuid | bytea + pgsodium | pgsodium | crypto_secretbox_keygen | | bytea + pgsodium | pgsodium | crypto_secretbox_noncegen | | bytea + pgsodium | pgsodium | crypto_secretbox_open | ciphertext bytea, nonce bytea, key bytea | bytea + pgsodium | pgsodium | crypto_secretbox_open | message bytea, nonce bytea, key_id bigint, context bytea | bytea + pgsodium | pgsodium | crypto_secretbox_open | message bytea, nonce bytea, key_uuid uuid | bytea + pgsodium | pgsodium | crypto_secretstream_keygen | | bytea + pgsodium | pgsodium | crypto_shorthash | message bytea, key bigint, context bytea | bytea + pgsodium | pgsodium | crypto_shorthash | message bytea, key bytea | bytea + pgsodium | pgsodium | crypto_shorthash | message bytea, key_uuid uuid | bytea + pgsodium | pgsodium | crypto_shorthash_keygen | | bytea + pgsodium | pgsodium | crypto_sign | message bytea, key bytea | bytea + pgsodium | pgsodium | crypto_sign_detached | message bytea, key bytea | bytea + pgsodium | pgsodium | crypto_sign_final_create | state bytea, key bytea | bytea + pgsodium | pgsodium | crypto_sign_final_verify | state bytea, signature bytea, key bytea | boolean + pgsodium | pgsodium | crypto_sign_init | | bytea + pgsodium | pgsodium | crypto_sign_new_keypair | | pgsodium.crypto_sign_keypair + pgsodium | pgsodium | crypto_sign_new_seed | | bytea + pgsodium | pgsodium | crypto_sign_open | signed_message bytea, key bytea | bytea + pgsodium | pgsodium | crypto_sign_seed_new_keypair | seed bytea | pgsodium.crypto_sign_keypair + pgsodium | pgsodium | crypto_sign_update | state bytea, message bytea | bytea + pgsodium | pgsodium | crypto_sign_update_agg | message bytea | bytea + pgsodium | pgsodium | crypto_sign_update_agg | state bytea, message bytea | bytea + pgsodium | pgsodium | crypto_sign_update_agg1 | state bytea, message bytea | bytea + pgsodium | pgsodium | crypto_sign_update_agg2 | cur_state bytea, initial_state bytea, message bytea | bytea + pgsodium | pgsodium | crypto_sign_verify_detached | sig bytea, message bytea, key bytea | boolean + pgsodium | pgsodium | crypto_signcrypt_new_keypair | | pgsodium.crypto_signcrypt_keypair + pgsodium | pgsodium | crypto_signcrypt_sign_after | state bytea, sender_sk bytea, ciphertext bytea | bytea + pgsodium | pgsodium | crypto_signcrypt_sign_before | sender bytea, recipient bytea, sender_sk bytea, recipient_pk bytea, additional bytea | pgsodium.crypto_signcrypt_state_key + pgsodium | pgsodium | crypto_signcrypt_verify_after | state bytea, signature bytea, sender_pk bytea, ciphertext bytea | boolean + pgsodium | pgsodium | crypto_signcrypt_verify_before | signature bytea, sender bytea, recipient bytea, additional bytea, sender_pk bytea, recipient_sk bytea | pgsodium.crypto_signcrypt_state_key + pgsodium | pgsodium | crypto_signcrypt_verify_public | signature bytea, sender bytea, recipient bytea, additional bytea, sender_pk bytea, ciphertext bytea | boolean + pgsodium | pgsodium | crypto_stream_xchacha20 | bigint, bytea, bigint, context bytea | bytea + pgsodium | pgsodium | crypto_stream_xchacha20 | bigint, bytea, bytea | bytea + pgsodium | pgsodium | crypto_stream_xchacha20_keygen | | bytea + pgsodium | pgsodium | crypto_stream_xchacha20_noncegen | | bytea + pgsodium | pgsodium | crypto_stream_xchacha20_xor | bytea, bytea, bigint, context bytea | bytea + pgsodium | pgsodium | crypto_stream_xchacha20_xor | bytea, bytea, bytea | bytea + pgsodium | pgsodium | crypto_stream_xchacha20_xor_ic | bytea, bytea, bigint, bigint, context bytea | bytea + pgsodium | pgsodium | crypto_stream_xchacha20_xor_ic | bytea, bytea, bigint, bytea | bytea + pgsodium | pgsodium | decrypted_columns | relid oid | text + pgsodium | pgsodium | derive_key | key_id bigint, key_len integer, context bytea | bytea + pgsodium | pgsodium | disable_security_label_trigger | | void + pgsodium | pgsodium | enable_security_label_trigger | | void + pgsodium | pgsodium | encrypted_column | relid oid, m record | text + pgsodium | pgsodium | encrypted_columns | relid oid | text + pgsodium | pgsodium | get_key_by_id | uuid | pgsodium.valid_key + pgsodium | pgsodium | get_key_by_name | text | pgsodium.valid_key + pgsodium | pgsodium | get_named_keys | filter text | SETOF pgsodium.valid_key + pgsodium | pgsodium | has_mask | role regrole, source_name text | boolean + pgsodium | pgsodium | key_encrypt_secret_raw_key | | trigger + pgsodium | pgsodium | mask_columns | source_relid oid | TABLE(attname name, key_id text, key_id_column text, associated_column text, nonce_column text, format_type text) + pgsodium | pgsodium | mask_role | masked_role regrole, source_name text, view_name text | void + pgsodium | pgsodium | pgsodium_derive | key_id bigint, key_len integer, context bytea | bytea + pgsodium | pgsodium | quote_assoc | text, boolean | text + pgsodium | pgsodium | randombytes_buf | size integer | bytea + pgsodium | pgsodium | randombytes_buf_deterministic | size integer, seed bytea | bytea + pgsodium | pgsodium | randombytes_new_seed | | bytea + pgsodium | pgsodium | randombytes_random | | integer + pgsodium | pgsodium | randombytes_uniform | upper_bound integer | integer + pgsodium | pgsodium | sodium_base642bin | base64 text | bytea + pgsodium | pgsodium | sodium_bin2base64 | bin bytea | text + pgsodium | pgsodium | trg_mask_update | | event_trigger + pgsodium | pgsodium | update_mask | target oid, debug boolean | void + pgsodium | pgsodium | update_masks | debug boolean | void + pgsodium | pgsodium | version | | text + pgstattuple | public | pg_relpages | relname regclass | bigint + pgstattuple | public | pg_relpages | relname text | bigint + pgstattuple | public | pgstatginindex | relname regclass, OUT version integer, OUT pending_pages integer, OUT pending_tuples bigint | record + pgstattuple | public | pgstathashindex | relname regclass, OUT version integer, OUT bucket_pages bigint, OUT overflow_pages bigint, OUT bitmap_pages bigint, OUT unused_pages bigint, OUT live_items bigint, OUT dead_items bigint, OUT free_percent double precision | record + pgstattuple | public | pgstatindex | relname regclass, OUT version integer, OUT tree_level integer, OUT index_size bigint, OUT root_block_no bigint, OUT internal_pages bigint, OUT leaf_pages bigint, OUT empty_pages bigint, OUT deleted_pages bigint, OUT avg_leaf_density double precision, OUT leaf_fragmentation double precision | record + pgstattuple | public | pgstatindex | relname text, OUT version integer, OUT tree_level integer, OUT index_size bigint, OUT root_block_no bigint, OUT internal_pages bigint, OUT leaf_pages bigint, OUT empty_pages bigint, OUT deleted_pages bigint, OUT avg_leaf_density double precision, OUT leaf_fragmentation double precision | record + pgstattuple | public | pgstattuple | relname text, OUT table_len bigint, OUT tuple_count bigint, OUT tuple_len bigint, OUT tuple_percent double precision, OUT dead_tuple_count bigint, OUT dead_tuple_len bigint, OUT dead_tuple_percent double precision, OUT free_space bigint, OUT free_percent double precision | record + pgstattuple | public | pgstattuple | reloid regclass, OUT table_len bigint, OUT tuple_count bigint, OUT tuple_len bigint, OUT tuple_percent double precision, OUT dead_tuple_count bigint, OUT dead_tuple_len bigint, OUT dead_tuple_percent double precision, OUT free_space bigint, OUT free_percent double precision | record + pgstattuple | public | pgstattuple_approx | reloid regclass, OUT table_len bigint, OUT scanned_percent double precision, OUT approx_tuple_count bigint, OUT approx_tuple_len bigint, OUT approx_tuple_percent double precision, OUT dead_tuple_count bigint, OUT dead_tuple_len bigint, OUT dead_tuple_percent double precision, OUT approx_free_space bigint, OUT approx_free_percent double precision | record + pgtap | public | _add | text, integer | integer + pgtap | public | _add | text, integer, text | integer + pgtap | public | _alike | boolean, anyelement, text, text | text + pgtap | public | _ancestor_of | name, name, integer | boolean + pgtap | public | _ancestor_of | name, name, name, name, integer | boolean + pgtap | public | _are | text, name[], name[], text | text + pgtap | public | _areni | text, text[], text[], text | text + pgtap | public | _array_to_sorted_string | name[], text | text + pgtap | public | _assets_are | text, text[], text[], text | text + pgtap | public | _cast_exists | name, name | boolean + pgtap | public | _cast_exists | name, name, name | boolean + pgtap | public | _cast_exists | name, name, name, name | boolean + pgtap | public | _cdi | name, name, anyelement | text + pgtap | public | _cdi | name, name, anyelement, text | text + pgtap | public | _cdi | name, name, name, anyelement, text | text + pgtap | public | _cexists | name, name | boolean + pgtap | public | _cexists | name, name, name | boolean + pgtap | public | _ckeys | name, character | name[] + pgtap | public | _ckeys | name, name, character | name[] + pgtap | public | _cleanup | | boolean + pgtap | public | _cmp_types | oid, name | boolean + pgtap | public | _col_is_null | name, name, name, text, boolean | text + pgtap | public | _col_is_null | name, name, text, boolean | text + pgtap | public | _constraint | name, character, name[], text, text | text + pgtap | public | _constraint | name, name, character, name[], text, text | text + pgtap | public | _contract_on | text | "char" + pgtap | public | _currtest | | integer + pgtap | public | _db_privs | | name[] + pgtap | public | _def_is | text, text, anyelement, text | text + pgtap | public | _definer | name | boolean + pgtap | public | _definer | name, name | boolean + pgtap | public | _definer | name, name, name[] | boolean + pgtap | public | _definer | name, name[] | boolean + pgtap | public | _dexists | name | boolean + pgtap | public | _dexists | name, name | boolean + pgtap | public | _do_ne | text, text, text, text | text + pgtap | public | _docomp | text, text, text, text | text + pgtap | public | _error_diag | text, text, text, text, text, text, text, text, text, text | text + pgtap | public | _expand_context | character | text + pgtap | public | _expand_on | character | text + pgtap | public | _expand_vol | character | text + pgtap | public | _ext_exists | name | boolean + pgtap | public | _ext_exists | name, name | boolean + pgtap | public | _extensions | | SETOF name + pgtap | public | _extensions | name | SETOF name + pgtap | public | _extras | character, name, name[] | name[] + pgtap | public | _extras | character, name[] | name[] + pgtap | public | _extras | character[], name, name[] | name[] + pgtap | public | _extras | character[], name[] | name[] + pgtap | public | _finish | integer, integer, integer, boolean | SETOF text + pgtap | public | _fkexists | name, name, name[] | boolean + pgtap | public | _fkexists | name, name[] | boolean + pgtap | public | _fprivs_are | text, name, name[], text | text + pgtap | public | _func_compare | name, name, anyelement, anyelement, text | text + pgtap | public | _func_compare | name, name, boolean, text | text + pgtap | public | _func_compare | name, name, name[], anyelement, anyelement, text | text + pgtap | public | _func_compare | name, name, name[], boolean, text | text + pgtap | public | _funkargs | name[] | text + pgtap | public | _get | text | integer + pgtap | public | _get_ac_privs | name, text | text[] + pgtap | public | _get_col_ns_type | name, name, name | text + pgtap | public | _get_col_privs | name, text, name | text[] + pgtap | public | _get_col_type | name, name | text + pgtap | public | _get_col_type | name, name, name | text + pgtap | public | _get_context | name, name | "char" + pgtap | public | _get_db_owner | name | name + pgtap | public | _get_db_privs | name, text | text[] + pgtap | public | _get_dtype | name | text + pgtap | public | _get_dtype | name, text, boolean | text + pgtap | public | _get_fdw_privs | name, text | text[] + pgtap | public | _get_func_owner | name, name, name[] | name + pgtap | public | _get_func_owner | name, name[] | name + pgtap | public | _get_func_privs | text, text | text[] + pgtap | public | _get_index_owner | name, name | name + pgtap | public | _get_index_owner | name, name, name | name + pgtap | public | _get_lang_privs | name, text | text[] + pgtap | public | _get_language_owner | name | name + pgtap | public | _get_latest | text | integer[] + pgtap | public | _get_latest | text, integer | integer + pgtap | public | _get_note | integer | text + pgtap | public | _get_note | text | text + pgtap | public | _get_opclass_owner | name | name + pgtap | public | _get_opclass_owner | name, name | name + pgtap | public | _get_rel_owner | character, name | name + pgtap | public | _get_rel_owner | character, name, name | name + pgtap | public | _get_rel_owner | character[], name | name + pgtap | public | _get_rel_owner | character[], name, name | name + pgtap | public | _get_rel_owner | name | name + pgtap | public | _get_rel_owner | name, name | name + pgtap | public | _get_schema_owner | name | name + pgtap | public | _get_schema_privs | name, text | text[] + pgtap | public | _get_sequence_privs | name, text | text[] + pgtap | public | _get_server_privs | name, text | text[] + pgtap | public | _get_table_privs | name, text | text[] + pgtap | public | _get_tablespace_owner | name | name + pgtap | public | _get_tablespaceprivs | name, text | text[] + pgtap | public | _get_type_owner | name | name + pgtap | public | _get_type_owner | name, name | name + pgtap | public | _got_func | name | boolean + pgtap | public | _got_func | name, name | boolean + pgtap | public | _got_func | name, name, name[] | boolean + pgtap | public | _got_func | name, name[] | boolean + pgtap | public | _grolist | name | oid[] + pgtap | public | _has_def | name, name | boolean + pgtap | public | _has_def | name, name, name | boolean + pgtap | public | _has_group | name | boolean + pgtap | public | _has_role | name | boolean + pgtap | public | _has_type | name, character[] | boolean + pgtap | public | _has_type | name, name, character[] | boolean + pgtap | public | _has_user | name | boolean + pgtap | public | _hasc | name, character | boolean + pgtap | public | _hasc | name, name, character | boolean + pgtap | public | _have_index | name, name | boolean + pgtap | public | _have_index | name, name, name | boolean + pgtap | public | _ident_array_to_sorted_string | name[], text | text + pgtap | public | _ident_array_to_string | name[], text | text + pgtap | public | _ikeys | name, name | text[] + pgtap | public | _ikeys | name, name, name | text[] + pgtap | public | _inherited | name | boolean + pgtap | public | _inherited | name, name | boolean + pgtap | public | _is_indexed | name, name, text[] | boolean + pgtap | public | _is_instead | name, name | boolean + pgtap | public | _is_instead | name, name, name | boolean + pgtap | public | _is_schema | name | boolean + pgtap | public | _is_super | name | boolean + pgtap | public | _is_trusted | name | boolean + pgtap | public | _is_verbose | | boolean + pgtap | public | _keys | name, character | SETOF name[] + pgtap | public | _keys | name, name, character | SETOF name[] + pgtap | public | _lang | name | name + pgtap | public | _lang | name, name | name + pgtap | public | _lang | name, name, name[] | name + pgtap | public | _lang | name, name[] | name + pgtap | public | _missing | character, name, name[] | name[] + pgtap | public | _missing | character, name[] | name[] + pgtap | public | _missing | character[], name, name[] | name[] + pgtap | public | _missing | character[], name[] | name[] + pgtap | public | _nosuch | name, name, name[] | text + pgtap | public | _op_exists | name, name, name | boolean + pgtap | public | _op_exists | name, name, name, name | boolean + pgtap | public | _op_exists | name, name, name, name, name | boolean + pgtap | public | _opc_exists | name | boolean + pgtap | public | _opc_exists | name, name | boolean + pgtap | public | _partof | name, name | boolean + pgtap | public | _partof | name, name, name, name | boolean + pgtap | public | _parts | name | SETOF name + pgtap | public | _parts | name, name | SETOF name + pgtap | public | _pg_sv_column_array | oid, smallint[] | name[] + pgtap | public | _pg_sv_table_accessible | oid, oid | boolean + pgtap | public | _pg_sv_type_array | oid[] | name[] + pgtap | public | _prokind | p_oid oid | "char" + pgtap | public | _query | text | text + pgtap | public | _quote_ident_like | text, text | text + pgtap | public | _refine_vol | text | text + pgtap | public | _relcomp | text, anyarray, text, text | text + pgtap | public | _relcomp | text, text, text, text | text + pgtap | public | _relcomp | text, text, text, text, text | text + pgtap | public | _relexists | name | boolean + pgtap | public | _relexists | name, name | boolean + pgtap | public | _relne | text, anyarray, text, text | text + pgtap | public | _relne | text, text, text, text | text + pgtap | public | _returns | name | text + pgtap | public | _returns | name, name | text + pgtap | public | _returns | name, name, name[] | text + pgtap | public | _returns | name, name[] | text + pgtap | public | _rexists | character, name | boolean + pgtap | public | _rexists | character, name, name | boolean + pgtap | public | _rexists | character[], name | boolean + pgtap | public | _rexists | character[], name, name | boolean + pgtap | public | _rule_on | name, name | "char" + pgtap | public | _rule_on | name, name, name | "char" + pgtap | public | _runem | text[], boolean | SETOF text + pgtap | public | _runner | text[], text[], text[], text[], text[] | SETOF text + pgtap | public | _set | integer, integer | integer + pgtap | public | _set | text, integer | integer + pgtap | public | _set | text, integer, text | integer + pgtap | public | _strict | name | boolean + pgtap | public | _strict | name, name | boolean + pgtap | public | _strict | name, name, name[] | boolean + pgtap | public | _strict | name, name[] | boolean + pgtap | public | _table_privs | | name[] + pgtap | public | _temptable | anyarray, text | text + pgtap | public | _temptable | text, text | text + pgtap | public | _temptypes | text | text + pgtap | public | _time_trials | text, integer, numeric | SETOF _time_trial_type + pgtap | public | _tlike | boolean, text, text, text | text + pgtap | public | _todo | | text + pgtap | public | _trig | name, name | boolean + pgtap | public | _trig | name, name, name | boolean + pgtap | public | _type_func | "char", name | boolean + pgtap | public | _type_func | "char", name, name | boolean + pgtap | public | _type_func | "char", name, name, name[] | boolean + pgtap | public | _type_func | "char", name, name[] | boolean + pgtap | public | _types_are | name, name[], text, character[] | text + pgtap | public | _types_are | name[], text, character[] | text + pgtap | public | _unalike | boolean, anyelement, text, text | text + pgtap | public | _vol | name | text + pgtap | public | _vol | name, name | text + pgtap | public | _vol | name, name, name[] | text + pgtap | public | _vol | name, name[] | text + pgtap | public | add_result | boolean, boolean, text, text, text | integer + pgtap | public | alike | anyelement, text | text + pgtap | public | alike | anyelement, text, text | text + pgtap | public | any_column_privs_are | name, name, name, name[] | text + pgtap | public | any_column_privs_are | name, name, name, name[], text | text + pgtap | public | any_column_privs_are | name, name, name[] | text + pgtap | public | any_column_privs_are | name, name, name[], text | text + pgtap | public | bag_eq | text, anyarray | text + pgtap | public | bag_eq | text, anyarray, text | text + pgtap | public | bag_eq | text, text | text + pgtap | public | bag_eq | text, text, text | text + pgtap | public | bag_has | text, text | text + pgtap | public | bag_has | text, text, text | text + pgtap | public | bag_hasnt | text, text | text + pgtap | public | bag_hasnt | text, text, text | text + pgtap | public | bag_ne | text, anyarray | text + pgtap | public | bag_ne | text, anyarray, text | text + pgtap | public | bag_ne | text, text | text + pgtap | public | bag_ne | text, text, text | text + pgtap | public | can | name, name[] | text + pgtap | public | can | name, name[], text | text + pgtap | public | can | name[] | text + pgtap | public | can | name[], text | text + pgtap | public | cast_context_is | name, name, text | text + pgtap | public | cast_context_is | name, name, text, text | text + pgtap | public | casts_are | text[] | text + pgtap | public | casts_are | text[], text | text + pgtap | public | check_test | text, boolean | SETOF text + pgtap | public | check_test | text, boolean, text | SETOF text + pgtap | public | check_test | text, boolean, text, text | SETOF text + pgtap | public | check_test | text, boolean, text, text, text | SETOF text + pgtap | public | check_test | text, boolean, text, text, text, boolean | SETOF text + pgtap | public | cmp_ok | anyelement, text, anyelement | text + pgtap | public | cmp_ok | anyelement, text, anyelement, text | text + pgtap | public | col_default_is | name, name, anyelement | text + pgtap | public | col_default_is | name, name, anyelement, text | text + pgtap | public | col_default_is | name, name, name, anyelement, text | text + pgtap | public | col_default_is | name, name, name, text, text | text + pgtap | public | col_default_is | name, name, text | text + pgtap | public | col_default_is | name, name, text, text | text + pgtap | public | col_has_check | name, name | text + pgtap | public | col_has_check | name, name, name, text | text + pgtap | public | col_has_check | name, name, name[], text | text + pgtap | public | col_has_check | name, name, text | text + pgtap | public | col_has_check | name, name[] | text + pgtap | public | col_has_check | name, name[], text | text + pgtap | public | col_has_default | name, name | text + pgtap | public | col_has_default | name, name, name, text | text + pgtap | public | col_has_default | name, name, text | text + pgtap | public | col_hasnt_default | name, name | text + pgtap | public | col_hasnt_default | name, name, name, text | text + pgtap | public | col_hasnt_default | name, name, text | text + pgtap | public | col_is_fk | name, name | text + pgtap | public | col_is_fk | name, name, name, text | text + pgtap | public | col_is_fk | name, name, name[], text | text + pgtap | public | col_is_fk | name, name, text | text + pgtap | public | col_is_fk | name, name[] | text + pgtap | public | col_is_fk | name, name[], text | text + pgtap | public | col_is_null | schema_name name, table_name name, column_name name, description text | text + pgtap | public | col_is_null | table_name name, column_name name, description text | text + pgtap | public | col_is_pk | name, name | text + pgtap | public | col_is_pk | name, name, name, text | text + pgtap | public | col_is_pk | name, name, name[], text | text + pgtap | public | col_is_pk | name, name, text | text + pgtap | public | col_is_pk | name, name[] | text + pgtap | public | col_is_pk | name, name[], text | text + pgtap | public | col_is_unique | name, name | text + pgtap | public | col_is_unique | name, name, name | text + pgtap | public | col_is_unique | name, name, name, text | text + pgtap | public | col_is_unique | name, name, name[] | text + pgtap | public | col_is_unique | name, name, name[], text | text + pgtap | public | col_is_unique | name, name, text | text + pgtap | public | col_is_unique | name, name[] | text + pgtap | public | col_is_unique | name, name[], text | text + pgtap | public | col_isnt_fk | name, name | text + pgtap | public | col_isnt_fk | name, name, name, text | text + pgtap | public | col_isnt_fk | name, name, name[], text | text + pgtap | public | col_isnt_fk | name, name, text | text + pgtap | public | col_isnt_fk | name, name[] | text + pgtap | public | col_isnt_fk | name, name[], text | text + pgtap | public | col_isnt_pk | name, name | text + pgtap | public | col_isnt_pk | name, name, name, text | text + pgtap | public | col_isnt_pk | name, name, name[], text | text + pgtap | public | col_isnt_pk | name, name, text | text + pgtap | public | col_isnt_pk | name, name[] | text + pgtap | public | col_isnt_pk | name, name[], text | text + pgtap | public | col_not_null | schema_name name, table_name name, column_name name, description text | text + pgtap | public | col_not_null | table_name name, column_name name, description text | text + pgtap | public | col_type_is | name, name, name, name, text | text + pgtap | public | col_type_is | name, name, name, name, text, text | text + pgtap | public | col_type_is | name, name, name, text | text + pgtap | public | col_type_is | name, name, name, text, text | text + pgtap | public | col_type_is | name, name, text | text + pgtap | public | col_type_is | name, name, text, text | text + pgtap | public | collect_tap | VARIADIC text[] | text + pgtap | public | collect_tap | character varying[] | text + pgtap | public | column_privs_are | name, name, name, name, name[] | text + pgtap | public | column_privs_are | name, name, name, name, name[], text | text + pgtap | public | column_privs_are | name, name, name, name[] | text + pgtap | public | column_privs_are | name, name, name, name[], text | text + pgtap | public | columns_are | name, name, name[] | text + pgtap | public | columns_are | name, name, name[], text | text + pgtap | public | columns_are | name, name[] | text + pgtap | public | columns_are | name, name[], text | text + pgtap | public | composite_owner_is | name, name | text + pgtap | public | composite_owner_is | name, name, name | text + pgtap | public | composite_owner_is | name, name, name, text | text + pgtap | public | composite_owner_is | name, name, text | text + pgtap | public | database_privs_are | name, name, name[] | text + pgtap | public | database_privs_are | name, name, name[], text | text + pgtap | public | db_owner_is | name, name | text + pgtap | public | db_owner_is | name, name, text | text + pgtap | public | diag | VARIADIC anyarray | text + pgtap | public | diag | VARIADIC text[] | text + pgtap | public | diag | msg anyelement | text + pgtap | public | diag | msg text | text + pgtap | public | diag_test_name | text | text + pgtap | public | display_oper | name, oid | text + pgtap | public | do_tap | | SETOF text + pgtap | public | do_tap | name | SETOF text + pgtap | public | do_tap | name, text | SETOF text + pgtap | public | do_tap | text | SETOF text + pgtap | public | doesnt_imatch | anyelement, text | text + pgtap | public | doesnt_imatch | anyelement, text, text | text + pgtap | public | doesnt_match | anyelement, text | text + pgtap | public | doesnt_match | anyelement, text, text | text + pgtap | public | domain_type_is | name, text, name, text | text + pgtap | public | domain_type_is | name, text, name, text, text | text + pgtap | public | domain_type_is | name, text, text | text + pgtap | public | domain_type_is | name, text, text, text | text + pgtap | public | domain_type_is | text, text | text + pgtap | public | domain_type_is | text, text, text | text + pgtap | public | domain_type_isnt | name, text, name, text | text + pgtap | public | domain_type_isnt | name, text, name, text, text | text + pgtap | public | domain_type_isnt | name, text, text | text + pgtap | public | domain_type_isnt | name, text, text, text | text + pgtap | public | domain_type_isnt | text, text | text + pgtap | public | domain_type_isnt | text, text, text | text + pgtap | public | domains_are | name, name[] | text + pgtap | public | domains_are | name, name[], text | text + pgtap | public | domains_are | name[] | text + pgtap | public | domains_are | name[], text | text + pgtap | public | enum_has_labels | name, name, name[] | text + pgtap | public | enum_has_labels | name, name, name[], text | text + pgtap | public | enum_has_labels | name, name[] | text + pgtap | public | enum_has_labels | name, name[], text | text + pgtap | public | enums_are | name, name[] | text + pgtap | public | enums_are | name, name[], text | text + pgtap | public | enums_are | name[] | text + pgtap | public | enums_are | name[], text | text + pgtap | public | extensions_are | name, name[] | text + pgtap | public | extensions_are | name, name[], text | text + pgtap | public | extensions_are | name[] | text + pgtap | public | extensions_are | name[], text | text + pgtap | public | fail | | text + pgtap | public | fail | text | text + pgtap | public | fdw_privs_are | name, name, name[] | text + pgtap | public | fdw_privs_are | name, name, name[], text | text + pgtap | public | findfuncs | name, text | text[] + pgtap | public | findfuncs | name, text, text | text[] + pgtap | public | findfuncs | text | text[] + pgtap | public | findfuncs | text, text | text[] + pgtap | public | finish | exception_on_failure boolean | SETOF text + pgtap | public | fk_ok | name, name, name, name | text + pgtap | public | fk_ok | name, name, name, name, name, name, text | text + pgtap | public | fk_ok | name, name, name, name, name, text | text + pgtap | public | fk_ok | name, name, name, name, text | text + pgtap | public | fk_ok | name, name, name[], name, name, name[] | text + pgtap | public | fk_ok | name, name, name[], name, name, name[], text | text + pgtap | public | fk_ok | name, name[], name, name[] | text + pgtap | public | fk_ok | name, name[], name, name[], text | text + pgtap | public | foreign_table_owner_is | name, name | text + pgtap | public | foreign_table_owner_is | name, name, name | text + pgtap | public | foreign_table_owner_is | name, name, name, text | text + pgtap | public | foreign_table_owner_is | name, name, text | text + pgtap | public | foreign_tables_are | name, name[] | text + pgtap | public | foreign_tables_are | name, name[], text | text + pgtap | public | foreign_tables_are | name[] | text + pgtap | public | foreign_tables_are | name[], text | text + pgtap | public | function_lang_is | name, name | text + pgtap | public | function_lang_is | name, name, name | text + pgtap | public | function_lang_is | name, name, name, text | text + pgtap | public | function_lang_is | name, name, name[], name | text + pgtap | public | function_lang_is | name, name, name[], name, text | text + pgtap | public | function_lang_is | name, name, text | text + pgtap | public | function_lang_is | name, name[], name | text + pgtap | public | function_lang_is | name, name[], name, text | text + pgtap | public | function_owner_is | name, name, name[], name | text + pgtap | public | function_owner_is | name, name, name[], name, text | text + pgtap | public | function_owner_is | name, name[], name | text + pgtap | public | function_owner_is | name, name[], name, text | text + pgtap | public | function_privs_are | name, name, name[], name, name[] | text + pgtap | public | function_privs_are | name, name, name[], name, name[], text | text + pgtap | public | function_privs_are | name, name[], name, name[] | text + pgtap | public | function_privs_are | name, name[], name, name[], text | text + pgtap | public | function_returns | name, name, name[], text | text + pgtap | public | function_returns | name, name, name[], text, text | text + pgtap | public | function_returns | name, name, text | text + pgtap | public | function_returns | name, name, text, text | text + pgtap | public | function_returns | name, name[], text | text + pgtap | public | function_returns | name, name[], text, text | text + pgtap | public | function_returns | name, text | text + pgtap | public | function_returns | name, text, text | text + pgtap | public | functions_are | name, name[] | text + pgtap | public | functions_are | name, name[], text | text + pgtap | public | functions_are | name[] | text + pgtap | public | functions_are | name[], text | text + pgtap | public | groups_are | name[] | text + pgtap | public | groups_are | name[], text | text + pgtap | public | has_cast | name, name | text + pgtap | public | has_cast | name, name, name | text + pgtap | public | has_cast | name, name, name, name | text + pgtap | public | has_cast | name, name, name, name, text | text + pgtap | public | has_cast | name, name, name, text | text + pgtap | public | has_cast | name, name, text | text + pgtap | public | has_check | name | text + pgtap | public | has_check | name, name, text | text + pgtap | public | has_check | name, text | text + pgtap | public | has_column | name, name | text + pgtap | public | has_column | name, name, name, text | text + pgtap | public | has_column | name, name, text | text + pgtap | public | has_composite | name | text + pgtap | public | has_composite | name, name, text | text + pgtap | public | has_composite | name, text | text + pgtap | public | has_domain | name | text + pgtap | public | has_domain | name, name | text + pgtap | public | has_domain | name, name, text | text + pgtap | public | has_domain | name, text | text + pgtap | public | has_enum | name | text + pgtap | public | has_enum | name, name | text + pgtap | public | has_enum | name, name, text | text + pgtap | public | has_enum | name, text | text + pgtap | public | has_extension | name | text + pgtap | public | has_extension | name, name | text + pgtap | public | has_extension | name, name, text | text + pgtap | public | has_extension | name, text | text + pgtap | public | has_fk | name | text + pgtap | public | has_fk | name, name, text | text + pgtap | public | has_fk | name, text | text + pgtap | public | has_foreign_table | name | text + pgtap | public | has_foreign_table | name, name | text + pgtap | public | has_foreign_table | name, name, text | text + pgtap | public | has_foreign_table | name, text | text + pgtap | public | has_function | name | text + pgtap | public | has_function | name, name | text + pgtap | public | has_function | name, name, name[] | text + pgtap | public | has_function | name, name, name[], text | text + pgtap | public | has_function | name, name, text | text + pgtap | public | has_function | name, name[] | text + pgtap | public | has_function | name, name[], text | text + pgtap | public | has_function | name, text | text + pgtap | public | has_group | name | text + pgtap | public | has_group | name, text | text + pgtap | public | has_index | name, name | text + pgtap | public | has_index | name, name, name | text + pgtap | public | has_index | name, name, name, name | text + pgtap | public | has_index | name, name, name, name, text | text + pgtap | public | has_index | name, name, name, name[] | text + pgtap | public | has_index | name, name, name, name[], text | text + pgtap | public | has_index | name, name, name, text | text + pgtap | public | has_index | name, name, name[] | text + pgtap | public | has_index | name, name, name[], text | text + pgtap | public | has_index | name, name, text | text + pgtap | public | has_inherited_tables | name | text + pgtap | public | has_inherited_tables | name, name | text + pgtap | public | has_inherited_tables | name, name, text | text + pgtap | public | has_inherited_tables | name, text | text + pgtap | public | has_language | name | text + pgtap | public | has_language | name, text | text + pgtap | public | has_leftop | name, name | text + pgtap | public | has_leftop | name, name, name | text + pgtap | public | has_leftop | name, name, name, name | text + pgtap | public | has_leftop | name, name, name, name, text | text + pgtap | public | has_leftop | name, name, name, text | text + pgtap | public | has_leftop | name, name, text | text + pgtap | public | has_materialized_view | name | text + pgtap | public | has_materialized_view | name, name, text | text + pgtap | public | has_materialized_view | name, text | text + pgtap | public | has_opclass | name | text + pgtap | public | has_opclass | name, name | text + pgtap | public | has_opclass | name, name, text | text + pgtap | public | has_opclass | name, text | text + pgtap | public | has_operator | name, name, name | text + pgtap | public | has_operator | name, name, name, name | text + pgtap | public | has_operator | name, name, name, name, name | text + pgtap | public | has_operator | name, name, name, name, name, text | text + pgtap | public | has_operator | name, name, name, name, text | text + pgtap | public | has_operator | name, name, name, text | text + pgtap | public | has_pk | name | text + pgtap | public | has_pk | name, name, text | text + pgtap | public | has_pk | name, text | text + pgtap | public | has_relation | name | text + pgtap | public | has_relation | name, name, text | text + pgtap | public | has_relation | name, text | text + pgtap | public | has_rightop | name, name | text + pgtap | public | has_rightop | name, name, name | text + pgtap | public | has_rightop | name, name, name, name | text + pgtap | public | has_rightop | name, name, name, name, text | text + pgtap | public | has_rightop | name, name, name, text | text + pgtap | public | has_rightop | name, name, text | text + pgtap | public | has_role | name | text + pgtap | public | has_role | name, text | text + pgtap | public | has_rule | name, name | text + pgtap | public | has_rule | name, name, name | text + pgtap | public | has_rule | name, name, name, text | text + pgtap | public | has_rule | name, name, text | text + pgtap | public | has_schema | name | text + pgtap | public | has_schema | name, text | text + pgtap | public | has_sequence | name | text + pgtap | public | has_sequence | name, name | text + pgtap | public | has_sequence | name, name, text | text + pgtap | public | has_sequence | name, text | text + pgtap | public | has_table | name | text + pgtap | public | has_table | name, name | text + pgtap | public | has_table | name, name, text | text + pgtap | public | has_table | name, text | text + pgtap | public | has_tablespace | name | text + pgtap | public | has_tablespace | name, text | text + pgtap | public | has_tablespace | name, text, text | text + pgtap | public | has_trigger | name, name | text + pgtap | public | has_trigger | name, name, name | text + pgtap | public | has_trigger | name, name, name, text | text + pgtap | public | has_trigger | name, name, text | text + pgtap | public | has_type | name | text + pgtap | public | has_type | name, name | text + pgtap | public | has_type | name, name, text | text + pgtap | public | has_type | name, text | text + pgtap | public | has_unique | text | text + pgtap | public | has_unique | text, text | text + pgtap | public | has_unique | text, text, text | text + pgtap | public | has_user | name | text + pgtap | public | has_user | name, text | text + pgtap | public | has_view | name | text + pgtap | public | has_view | name, name | text + pgtap | public | has_view | name, name, text | text + pgtap | public | has_view | name, text | text + pgtap | public | hasnt_cast | name, name | text + pgtap | public | hasnt_cast | name, name, name | text + pgtap | public | hasnt_cast | name, name, name, name | text + pgtap | public | hasnt_cast | name, name, name, name, text | text + pgtap | public | hasnt_cast | name, name, name, text | text + pgtap | public | hasnt_cast | name, name, text | text + pgtap | public | hasnt_column | name, name | text + pgtap | public | hasnt_column | name, name, name, text | text + pgtap | public | hasnt_column | name, name, text | text + pgtap | public | hasnt_composite | name | text + pgtap | public | hasnt_composite | name, name, text | text + pgtap | public | hasnt_composite | name, text | text + pgtap | public | hasnt_domain | name | text + pgtap | public | hasnt_domain | name, name | text + pgtap | public | hasnt_domain | name, name, text | text + pgtap | public | hasnt_domain | name, text | text + pgtap | public | hasnt_enum | name | text + pgtap | public | hasnt_enum | name, name | text + pgtap | public | hasnt_enum | name, name, text | text + pgtap | public | hasnt_enum | name, text | text + pgtap | public | hasnt_extension | name | text + pgtap | public | hasnt_extension | name, name | text + pgtap | public | hasnt_extension | name, name, text | text + pgtap | public | hasnt_extension | name, text | text + pgtap | public | hasnt_fk | name | text + pgtap | public | hasnt_fk | name, name, text | text + pgtap | public | hasnt_fk | name, text | text + pgtap | public | hasnt_foreign_table | name | text + pgtap | public | hasnt_foreign_table | name, name | text + pgtap | public | hasnt_foreign_table | name, name, text | text + pgtap | public | hasnt_foreign_table | name, text | text + pgtap | public | hasnt_function | name | text + pgtap | public | hasnt_function | name, name | text + pgtap | public | hasnt_function | name, name, name[] | text + pgtap | public | hasnt_function | name, name, name[], text | text + pgtap | public | hasnt_function | name, name, text | text + pgtap | public | hasnt_function | name, name[] | text + pgtap | public | hasnt_function | name, name[], text | text + pgtap | public | hasnt_function | name, text | text + pgtap | public | hasnt_group | name | text + pgtap | public | hasnt_group | name, text | text + pgtap | public | hasnt_index | name, name | text + pgtap | public | hasnt_index | name, name, name | text + pgtap | public | hasnt_index | name, name, name, text | text + pgtap | public | hasnt_index | name, name, text | text + pgtap | public | hasnt_inherited_tables | name | text + pgtap | public | hasnt_inherited_tables | name, name | text + pgtap | public | hasnt_inherited_tables | name, name, text | text + pgtap | public | hasnt_inherited_tables | name, text | text + pgtap | public | hasnt_language | name | text + pgtap | public | hasnt_language | name, text | text + pgtap | public | hasnt_leftop | name, name | text + pgtap | public | hasnt_leftop | name, name, name | text + pgtap | public | hasnt_leftop | name, name, name, name | text + pgtap | public | hasnt_leftop | name, name, name, name, text | text + pgtap | public | hasnt_leftop | name, name, name, text | text + pgtap | public | hasnt_leftop | name, name, text | text + pgtap | public | hasnt_materialized_view | name | text + pgtap | public | hasnt_materialized_view | name, name, text | text + pgtap | public | hasnt_materialized_view | name, text | text + pgtap | public | hasnt_opclass | name | text + pgtap | public | hasnt_opclass | name, name | text + pgtap | public | hasnt_opclass | name, name, text | text + pgtap | public | hasnt_opclass | name, text | text + pgtap | public | hasnt_operator | name, name, name | text + pgtap | public | hasnt_operator | name, name, name, name | text + pgtap | public | hasnt_operator | name, name, name, name, name | text + pgtap | public | hasnt_operator | name, name, name, name, name, text | text + pgtap | public | hasnt_operator | name, name, name, name, text | text + pgtap | public | hasnt_operator | name, name, name, text | text + pgtap | public | hasnt_pk | name | text + pgtap | public | hasnt_pk | name, name, text | text + pgtap | public | hasnt_pk | name, text | text + pgtap | public | hasnt_relation | name | text + pgtap | public | hasnt_relation | name, name, text | text + pgtap | public | hasnt_relation | name, text | text + pgtap | public | hasnt_rightop | name, name | text + pgtap | public | hasnt_rightop | name, name, name | text + pgtap | public | hasnt_rightop | name, name, name, name | text + pgtap | public | hasnt_rightop | name, name, name, name, text | text + pgtap | public | hasnt_rightop | name, name, name, text | text + pgtap | public | hasnt_rightop | name, name, text | text + pgtap | public | hasnt_role | name | text + pgtap | public | hasnt_role | name, text | text + pgtap | public | hasnt_rule | name, name | text + pgtap | public | hasnt_rule | name, name, name | text + pgtap | public | hasnt_rule | name, name, name, text | text + pgtap | public | hasnt_rule | name, name, text | text + pgtap | public | hasnt_schema | name | text + pgtap | public | hasnt_schema | name, text | text + pgtap | public | hasnt_sequence | name | text + pgtap | public | hasnt_sequence | name, name, text | text + pgtap | public | hasnt_sequence | name, text | text + pgtap | public | hasnt_table | name | text + pgtap | public | hasnt_table | name, name | text + pgtap | public | hasnt_table | name, name, text | text + pgtap | public | hasnt_table | name, text | text + pgtap | public | hasnt_tablespace | name | text + pgtap | public | hasnt_tablespace | name, text | text + pgtap | public | hasnt_trigger | name, name | text + pgtap | public | hasnt_trigger | name, name, name | text + pgtap | public | hasnt_trigger | name, name, name, text | text + pgtap | public | hasnt_trigger | name, name, text | text + pgtap | public | hasnt_type | name | text + pgtap | public | hasnt_type | name, name | text + pgtap | public | hasnt_type | name, name, text | text + pgtap | public | hasnt_type | name, text | text + pgtap | public | hasnt_user | name | text + pgtap | public | hasnt_user | name, text | text + pgtap | public | hasnt_view | name | text + pgtap | public | hasnt_view | name, name | text + pgtap | public | hasnt_view | name, name, text | text + pgtap | public | hasnt_view | name, text | text + pgtap | public | ialike | anyelement, text | text + pgtap | public | ialike | anyelement, text, text | text + pgtap | public | imatches | anyelement, text | text + pgtap | public | imatches | anyelement, text, text | text + pgtap | public | in_todo | | boolean + pgtap | public | index_is_primary | name | text + pgtap | public | index_is_primary | name, name | text + pgtap | public | index_is_primary | name, name, name | text + pgtap | public | index_is_primary | name, name, name, text | text + pgtap | public | index_is_type | name, name | text + pgtap | public | index_is_type | name, name, name | text + pgtap | public | index_is_type | name, name, name, name | text + pgtap | public | index_is_type | name, name, name, name, text | text + pgtap | public | index_is_unique | name | text + pgtap | public | index_is_unique | name, name | text + pgtap | public | index_is_unique | name, name, name | text + pgtap | public | index_is_unique | name, name, name, text | text + pgtap | public | index_owner_is | name, name, name | text + pgtap | public | index_owner_is | name, name, name, name | text + pgtap | public | index_owner_is | name, name, name, name, text | text + pgtap | public | index_owner_is | name, name, name, text | text + pgtap | public | indexes_are | name, name, name[] | text + pgtap | public | indexes_are | name, name, name[], text | text + pgtap | public | indexes_are | name, name[] | text + pgtap | public | indexes_are | name, name[], text | text + pgtap | public | is | anyelement, anyelement | text + pgtap | public | is | anyelement, anyelement, text | text + pgtap | public | is_aggregate | name | text + pgtap | public | is_aggregate | name, name | text + pgtap | public | is_aggregate | name, name, name[] | text + pgtap | public | is_aggregate | name, name, name[], text | text + pgtap | public | is_aggregate | name, name, text | text + pgtap | public | is_aggregate | name, name[] | text + pgtap | public | is_aggregate | name, name[], text | text + pgtap | public | is_aggregate | name, text | text + pgtap | public | is_ancestor_of | name, name | text + pgtap | public | is_ancestor_of | name, name, integer | text + pgtap | public | is_ancestor_of | name, name, integer, text | text + pgtap | public | is_ancestor_of | name, name, name, name | text + pgtap | public | is_ancestor_of | name, name, name, name, integer | text + pgtap | public | is_ancestor_of | name, name, name, name, integer, text | text + pgtap | public | is_ancestor_of | name, name, name, name, text | text + pgtap | public | is_ancestor_of | name, name, text | text + pgtap | public | is_clustered | name | text + pgtap | public | is_clustered | name, name | text + pgtap | public | is_clustered | name, name, name | text + pgtap | public | is_clustered | name, name, name, text | text + pgtap | public | is_definer | name | text + pgtap | public | is_definer | name, name | text + pgtap | public | is_definer | name, name, name[] | text + pgtap | public | is_definer | name, name, name[], text | text + pgtap | public | is_definer | name, name, text | text + pgtap | public | is_definer | name, name[] | text + pgtap | public | is_definer | name, name[], text | text + pgtap | public | is_definer | name, text | text + pgtap | public | is_descendent_of | name, name | text + pgtap | public | is_descendent_of | name, name, integer | text + pgtap | public | is_descendent_of | name, name, integer, text | text + pgtap | public | is_descendent_of | name, name, name, name | text + pgtap | public | is_descendent_of | name, name, name, name, integer | text + pgtap | public | is_descendent_of | name, name, name, name, integer, text | text + pgtap | public | is_descendent_of | name, name, name, name, text | text + pgtap | public | is_descendent_of | name, name, text | text + pgtap | public | is_empty | text | text + pgtap | public | is_empty | text, text | text + pgtap | public | is_indexed | name, name | text + pgtap | public | is_indexed | name, name, name | text + pgtap | public | is_indexed | name, name, name, text | text + pgtap | public | is_indexed | name, name, name[] | text + pgtap | public | is_indexed | name, name, name[], text | text + pgtap | public | is_indexed | name, name[] | text + pgtap | public | is_indexed | name, name[], text | text + pgtap | public | is_member_of | name, name | text + pgtap | public | is_member_of | name, name, text | text + pgtap | public | is_member_of | name, name[] | text + pgtap | public | is_member_of | name, name[], text | text + pgtap | public | is_normal_function | name | text + pgtap | public | is_normal_function | name, name | text + pgtap | public | is_normal_function | name, name, name[] | text + pgtap | public | is_normal_function | name, name, name[], text | text + pgtap | public | is_normal_function | name, name, text | text + pgtap | public | is_normal_function | name, name[] | text + pgtap | public | is_normal_function | name, name[], text | text + pgtap | public | is_normal_function | name, text | text + pgtap | public | is_partition_of | name, name | text + pgtap | public | is_partition_of | name, name, name, name | text + pgtap | public | is_partition_of | name, name, name, name, text | text + pgtap | public | is_partition_of | name, name, text | text + pgtap | public | is_partitioned | name | text + pgtap | public | is_partitioned | name, name | text + pgtap | public | is_partitioned | name, name, text | text + pgtap | public | is_partitioned | name, text | text + pgtap | public | is_procedure | name | text + pgtap | public | is_procedure | name, name | text + pgtap | public | is_procedure | name, name, name[] | text + pgtap | public | is_procedure | name, name, name[], text | text + pgtap | public | is_procedure | name, name, text | text + pgtap | public | is_procedure | name, name[] | text + pgtap | public | is_procedure | name, name[], text | text + pgtap | public | is_procedure | name, text | text + pgtap | public | is_strict | name | text + pgtap | public | is_strict | name, name | text + pgtap | public | is_strict | name, name, name[] | text + pgtap | public | is_strict | name, name, name[], text | text + pgtap | public | is_strict | name, name, text | text + pgtap | public | is_strict | name, name[] | text + pgtap | public | is_strict | name, name[], text | text + pgtap | public | is_strict | name, text | text + pgtap | public | is_superuser | name | text + pgtap | public | is_superuser | name, text | text + pgtap | public | is_window | name | text + pgtap | public | is_window | name, name | text + pgtap | public | is_window | name, name, name[] | text + pgtap | public | is_window | name, name, name[], text | text + pgtap | public | is_window | name, name, text | text + pgtap | public | is_window | name, name[] | text + pgtap | public | is_window | name, name[], text | text + pgtap | public | is_window | name, text | text + pgtap | public | isa_ok | anyelement, regtype | text + pgtap | public | isa_ok | anyelement, regtype, text | text + pgtap | public | isnt | anyelement, anyelement | text + pgtap | public | isnt | anyelement, anyelement, text | text + pgtap | public | isnt_aggregate | name | text + pgtap | public | isnt_aggregate | name, name | text + pgtap | public | isnt_aggregate | name, name, name[] | text + pgtap | public | isnt_aggregate | name, name, name[], text | text + pgtap | public | isnt_aggregate | name, name, text | text + pgtap | public | isnt_aggregate | name, name[] | text + pgtap | public | isnt_aggregate | name, name[], text | text + pgtap | public | isnt_aggregate | name, text | text + pgtap | public | isnt_ancestor_of | name, name | text + pgtap | public | isnt_ancestor_of | name, name, integer | text + pgtap | public | isnt_ancestor_of | name, name, integer, text | text + pgtap | public | isnt_ancestor_of | name, name, name, name | text + pgtap | public | isnt_ancestor_of | name, name, name, name, integer | text + pgtap | public | isnt_ancestor_of | name, name, name, name, integer, text | text + pgtap | public | isnt_ancestor_of | name, name, name, name, text | text + pgtap | public | isnt_ancestor_of | name, name, text | text + pgtap | public | isnt_definer | name | text + pgtap | public | isnt_definer | name, name | text + pgtap | public | isnt_definer | name, name, name[] | text + pgtap | public | isnt_definer | name, name, name[], text | text + pgtap | public | isnt_definer | name, name, text | text + pgtap | public | isnt_definer | name, name[] | text + pgtap | public | isnt_definer | name, name[], text | text + pgtap | public | isnt_definer | name, text | text + pgtap | public | isnt_descendent_of | name, name | text + pgtap | public | isnt_descendent_of | name, name, integer | text + pgtap | public | isnt_descendent_of | name, name, integer, text | text + pgtap | public | isnt_descendent_of | name, name, name, name | text + pgtap | public | isnt_descendent_of | name, name, name, name, integer | text + pgtap | public | isnt_descendent_of | name, name, name, name, integer, text | text + pgtap | public | isnt_descendent_of | name, name, name, name, text | text + pgtap | public | isnt_descendent_of | name, name, text | text + pgtap | public | isnt_empty | text | text + pgtap | public | isnt_empty | text, text | text + pgtap | public | isnt_member_of | name, name | text + pgtap | public | isnt_member_of | name, name, text | text + pgtap | public | isnt_member_of | name, name[] | text + pgtap | public | isnt_member_of | name, name[], text | text + pgtap | public | isnt_normal_function | name | text + pgtap | public | isnt_normal_function | name, name | text + pgtap | public | isnt_normal_function | name, name, name[] | text + pgtap | public | isnt_normal_function | name, name, name[], text | text + pgtap | public | isnt_normal_function | name, name, text | text + pgtap | public | isnt_normal_function | name, name[] | text + pgtap | public | isnt_normal_function | name, name[], text | text + pgtap | public | isnt_normal_function | name, text | text + pgtap | public | isnt_partitioned | name | text + pgtap | public | isnt_partitioned | name, name | text + pgtap | public | isnt_partitioned | name, name, text | text + pgtap | public | isnt_partitioned | name, text | text + pgtap | public | isnt_procedure | name | text + pgtap | public | isnt_procedure | name, name | text + pgtap | public | isnt_procedure | name, name, name[] | text + pgtap | public | isnt_procedure | name, name, name[], text | text + pgtap | public | isnt_procedure | name, name, text | text + pgtap | public | isnt_procedure | name, name[] | text + pgtap | public | isnt_procedure | name, name[], text | text + pgtap | public | isnt_procedure | name, text | text + pgtap | public | isnt_strict | name | text + pgtap | public | isnt_strict | name, name | text + pgtap | public | isnt_strict | name, name, name[] | text + pgtap | public | isnt_strict | name, name, name[], text | text + pgtap | public | isnt_strict | name, name, text | text + pgtap | public | isnt_strict | name, name[] | text + pgtap | public | isnt_strict | name, name[], text | text + pgtap | public | isnt_strict | name, text | text + pgtap | public | isnt_superuser | name | text + pgtap | public | isnt_superuser | name, text | text + pgtap | public | isnt_window | name | text + pgtap | public | isnt_window | name, name | text + pgtap | public | isnt_window | name, name, name[] | text + pgtap | public | isnt_window | name, name, name[], text | text + pgtap | public | isnt_window | name, name, text | text + pgtap | public | isnt_window | name, name[] | text + pgtap | public | isnt_window | name, name[], text | text + pgtap | public | isnt_window | name, text | text + pgtap | public | language_is_trusted | name | text + pgtap | public | language_is_trusted | name, text | text + pgtap | public | language_owner_is | name, name | text + pgtap | public | language_owner_is | name, name, text | text + pgtap | public | language_privs_are | name, name, name[] | text + pgtap | public | language_privs_are | name, name, name[], text | text + pgtap | public | languages_are | name[] | text + pgtap | public | languages_are | name[], text | text + pgtap | public | lives_ok | text | text + pgtap | public | lives_ok | text, text | text + pgtap | public | matches | anyelement, text | text + pgtap | public | matches | anyelement, text, text | text + pgtap | public | materialized_view_owner_is | name, name | text + pgtap | public | materialized_view_owner_is | name, name, name | text + pgtap | public | materialized_view_owner_is | name, name, name, text | text + pgtap | public | materialized_view_owner_is | name, name, text | text + pgtap | public | materialized_views_are | name, name[] | text + pgtap | public | materialized_views_are | name, name[], text | text + pgtap | public | materialized_views_are | name[] | text + pgtap | public | materialized_views_are | name[], text | text + pgtap | public | no_plan | | SETOF boolean + pgtap | public | num_failed | | integer + pgtap | public | ok | boolean | text + pgtap | public | ok | boolean, text | text + pgtap | public | opclass_owner_is | name, name | text + pgtap | public | opclass_owner_is | name, name, name | text + pgtap | public | opclass_owner_is | name, name, name, text | text + pgtap | public | opclass_owner_is | name, name, text | text + pgtap | public | opclasses_are | name, name[] | text + pgtap | public | opclasses_are | name, name[], text | text + pgtap | public | opclasses_are | name[] | text + pgtap | public | opclasses_are | name[], text | text + pgtap | public | operators_are | name, text[] | text + pgtap | public | operators_are | name, text[], text | text + pgtap | public | operators_are | text[] | text + pgtap | public | operators_are | text[], text | text + pgtap | public | os_name | | text + pgtap | public | partitions_are | name, name, name[] | text + pgtap | public | partitions_are | name, name, name[], text | text + pgtap | public | partitions_are | name, name[] | text + pgtap | public | partitions_are | name, name[], text | text + pgtap | public | pass | | text + pgtap | public | pass | text | text + pgtap | public | performs_ok | text, numeric | text + pgtap | public | performs_ok | text, numeric, text | text + pgtap | public | performs_within | text, numeric, numeric | text + pgtap | public | performs_within | text, numeric, numeric, integer | text + pgtap | public | performs_within | text, numeric, numeric, integer, text | text + pgtap | public | performs_within | text, numeric, numeric, text | text + pgtap | public | pg_version | | text + pgtap | public | pg_version_num | | integer + pgtap | public | pgtap_version | | numeric + pgtap | public | plan | integer | text + pgtap | public | policies_are | name, name, name[] | text + pgtap | public | policies_are | name, name, name[], text | text + pgtap | public | policies_are | name, name[] | text + pgtap | public | policies_are | name, name[], text | text + pgtap | public | policy_cmd_is | name, name, name, text | text + pgtap | public | policy_cmd_is | name, name, name, text, text | text + pgtap | public | policy_cmd_is | name, name, text | text + pgtap | public | policy_cmd_is | name, name, text, text | text + pgtap | public | policy_roles_are | name, name, name, name[] | text + pgtap | public | policy_roles_are | name, name, name, name[], text | text + pgtap | public | policy_roles_are | name, name, name[] | text + pgtap | public | policy_roles_are | name, name, name[], text | text + pgtap | public | relation_owner_is | name, name | text + pgtap | public | relation_owner_is | name, name, name | text + pgtap | public | relation_owner_is | name, name, name, text | text + pgtap | public | relation_owner_is | name, name, text | text + pgtap | public | results_eq | refcursor, anyarray | text + pgtap | public | results_eq | refcursor, anyarray, text | text + pgtap | public | results_eq | refcursor, refcursor | text + pgtap | public | results_eq | refcursor, refcursor, text | text + pgtap | public | results_eq | refcursor, text | text + pgtap | public | results_eq | refcursor, text, text | text + pgtap | public | results_eq | text, anyarray | text + pgtap | public | results_eq | text, anyarray, text | text + pgtap | public | results_eq | text, refcursor | text + pgtap | public | results_eq | text, refcursor, text | text + pgtap | public | results_eq | text, text | text + pgtap | public | results_eq | text, text, text | text + pgtap | public | results_ne | refcursor, anyarray | text + pgtap | public | results_ne | refcursor, anyarray, text | text + pgtap | public | results_ne | refcursor, refcursor | text + pgtap | public | results_ne | refcursor, refcursor, text | text + pgtap | public | results_ne | refcursor, text | text + pgtap | public | results_ne | refcursor, text, text | text + pgtap | public | results_ne | text, anyarray | text + pgtap | public | results_ne | text, anyarray, text | text + pgtap | public | results_ne | text, refcursor | text + pgtap | public | results_ne | text, refcursor, text | text + pgtap | public | results_ne | text, text | text + pgtap | public | results_ne | text, text, text | text + pgtap | public | roles_are | name[] | text + pgtap | public | roles_are | name[], text | text + pgtap | public | row_eq | text, anyelement | text + pgtap | public | row_eq | text, anyelement, text | text + pgtap | public | rule_is_instead | name, name | text + pgtap | public | rule_is_instead | name, name, name | text + pgtap | public | rule_is_instead | name, name, name, text | text + pgtap | public | rule_is_instead | name, name, text | text + pgtap | public | rule_is_on | name, name, name, text | text + pgtap | public | rule_is_on | name, name, name, text, text | text + pgtap | public | rule_is_on | name, name, text | text + pgtap | public | rule_is_on | name, name, text, text | text + pgtap | public | rules_are | name, name, name[] | text + pgtap | public | rules_are | name, name, name[], text | text + pgtap | public | rules_are | name, name[] | text + pgtap | public | rules_are | name, name[], text | text + pgtap | public | runtests | | SETOF text + pgtap | public | runtests | name | SETOF text + pgtap | public | runtests | name, text | SETOF text + pgtap | public | runtests | text | SETOF text + pgtap | public | schema_owner_is | name, name | text + pgtap | public | schema_owner_is | name, name, text | text + pgtap | public | schema_privs_are | name, name, name[] | text + pgtap | public | schema_privs_are | name, name, name[], text | text + pgtap | public | schemas_are | name[] | text + pgtap | public | schemas_are | name[], text | text + pgtap | public | sequence_owner_is | name, name | text + pgtap | public | sequence_owner_is | name, name, name | text + pgtap | public | sequence_owner_is | name, name, name, text | text + pgtap | public | sequence_owner_is | name, name, text | text + pgtap | public | sequence_privs_are | name, name, name, name[] | text + pgtap | public | sequence_privs_are | name, name, name, name[], text | text + pgtap | public | sequence_privs_are | name, name, name[] | text + pgtap | public | sequence_privs_are | name, name, name[], text | text + pgtap | public | sequences_are | name, name[] | text + pgtap | public | sequences_are | name, name[], text | text + pgtap | public | sequences_are | name[] | text + pgtap | public | sequences_are | name[], text | text + pgtap | public | server_privs_are | name, name, name[] | text + pgtap | public | server_privs_are | name, name, name[], text | text + pgtap | public | set_eq | text, anyarray | text + pgtap | public | set_eq | text, anyarray, text | text + pgtap | public | set_eq | text, text | text + pgtap | public | set_eq | text, text, text | text + pgtap | public | set_has | text, text | text + pgtap | public | set_has | text, text, text | text + pgtap | public | set_hasnt | text, text | text + pgtap | public | set_hasnt | text, text, text | text + pgtap | public | set_ne | text, anyarray | text + pgtap | public | set_ne | text, anyarray, text | text + pgtap | public | set_ne | text, text | text + pgtap | public | set_ne | text, text, text | text + pgtap | public | skip | integer | text + pgtap | public | skip | integer, text | text + pgtap | public | skip | text | text + pgtap | public | skip | why text, how_many integer | text + pgtap | public | table_owner_is | name, name | text + pgtap | public | table_owner_is | name, name, name | text + pgtap | public | table_owner_is | name, name, name, text | text + pgtap | public | table_owner_is | name, name, text | text + pgtap | public | table_privs_are | name, name, name, name[] | text + pgtap | public | table_privs_are | name, name, name, name[], text | text + pgtap | public | table_privs_are | name, name, name[] | text + pgtap | public | table_privs_are | name, name, name[], text | text + pgtap | public | tables_are | name, name[] | text + pgtap | public | tables_are | name, name[], text | text + pgtap | public | tables_are | name[] | text + pgtap | public | tables_are | name[], text | text + pgtap | public | tablespace_owner_is | name, name | text + pgtap | public | tablespace_owner_is | name, name, text | text + pgtap | public | tablespace_privs_are | name, name, name[] | text + pgtap | public | tablespace_privs_are | name, name, name[], text | text + pgtap | public | tablespaces_are | name[] | text + pgtap | public | tablespaces_are | name[], text | text + pgtap | public | throws_ilike | text, text | text + pgtap | public | throws_ilike | text, text, text | text + pgtap | public | throws_imatching | text, text | text + pgtap | public | throws_imatching | text, text, text | text + pgtap | public | throws_like | text, text | text + pgtap | public | throws_like | text, text, text | text + pgtap | public | throws_matching | text, text | text + pgtap | public | throws_matching | text, text, text | text + pgtap | public | throws_ok | text | text + pgtap | public | throws_ok | text, character, text, text | text + pgtap | public | throws_ok | text, integer | text + pgtap | public | throws_ok | text, integer, text | text + pgtap | public | throws_ok | text, integer, text, text | text + pgtap | public | throws_ok | text, text | text + pgtap | public | throws_ok | text, text, text | text + pgtap | public | todo | how_many integer | SETOF boolean + pgtap | public | todo | how_many integer, why text | SETOF boolean + pgtap | public | todo | why text | SETOF boolean + pgtap | public | todo | why text, how_many integer | SETOF boolean + pgtap | public | todo_end | | SETOF boolean + pgtap | public | todo_start | | SETOF boolean + pgtap | public | todo_start | text | SETOF boolean + pgtap | public | trigger_is | name, name, name | text + pgtap | public | trigger_is | name, name, name, name, name | text + pgtap | public | trigger_is | name, name, name, name, name, text | text + pgtap | public | trigger_is | name, name, name, text | text + pgtap | public | triggers_are | name, name, name[] | text + pgtap | public | triggers_are | name, name, name[], text | text + pgtap | public | triggers_are | name, name[] | text + pgtap | public | triggers_are | name, name[], text | text + pgtap | public | type_owner_is | name, name | text + pgtap | public | type_owner_is | name, name, name | text + pgtap | public | type_owner_is | name, name, name, text | text + pgtap | public | type_owner_is | name, name, text | text + pgtap | public | types_are | name, name[] | text + pgtap | public | types_are | name, name[], text | text + pgtap | public | types_are | name[] | text + pgtap | public | types_are | name[], text | text + pgtap | public | unalike | anyelement, text | text + pgtap | public | unalike | anyelement, text, text | text + pgtap | public | unialike | anyelement, text | text + pgtap | public | unialike | anyelement, text, text | text + pgtap | public | users_are | name[] | text + pgtap | public | users_are | name[], text | text + pgtap | public | view_owner_is | name, name | text + pgtap | public | view_owner_is | name, name, name | text + pgtap | public | view_owner_is | name, name, name, text | text + pgtap | public | view_owner_is | name, name, text | text + pgtap | public | views_are | name, name[] | text + pgtap | public | views_are | name, name[], text | text + pgtap | public | views_are | name[] | text + pgtap | public | views_are | name[], text | text + pgtap | public | volatility_is | name, name, name[], text | text + pgtap | public | volatility_is | name, name, name[], text, text | text + pgtap | public | volatility_is | name, name, text | text + pgtap | public | volatility_is | name, name, text, text | text + pgtap | public | volatility_is | name, name[], text | text + pgtap | public | volatility_is | name, name[], text, text | text + pgtap | public | volatility_is | name, text | text + pgtap | public | volatility_is | name, text, text | text + plcoffee | pg_catalog | plcoffee_call_handler | | language_handler + plcoffee | pg_catalog | plcoffee_call_validator | oid | void + plcoffee | pg_catalog | plcoffee_inline_handler | internal | void + plls | pg_catalog | plls_call_handler | | language_handler + plls | pg_catalog | plls_call_validator | oid | void + plls | pg_catalog | plls_inline_handler | internal | void + plpgsql | pg_catalog | plpgsql_call_handler | | language_handler + plpgsql | pg_catalog | plpgsql_inline_handler | internal | void + plpgsql | pg_catalog | plpgsql_validator | oid | void + plpgsql_check | public | __plpgsql_show_dependency_tb | funcoid regprocedure, relid regclass, anyelememttype regtype, anyenumtype regtype, anyrangetype regtype, anycompatibletype regtype, anycompatiblerangetype regtype | TABLE(type text, oid oid, schema text, name text, params text) + plpgsql_check | public | __plpgsql_show_dependency_tb | name text, relid regclass, anyelememttype regtype, anyenumtype regtype, anyrangetype regtype, anycompatibletype regtype, anycompatiblerangetype regtype | TABLE(type text, oid oid, schema text, name text, params text) + plpgsql_check | public | plpgsql_check_function | funcoid regprocedure, relid regclass, format text, fatal_errors boolean, other_warnings boolean, performance_warnings boolean, extra_warnings boolean, security_warnings boolean, compatibility_warnings boolean, oldtable name, newtable name, anyelememttype regtype, anyenumtype regtype, anyrangetype regtype, anycompatibletype regtype, anycompatiblerangetype regtype, without_warnings boolean, all_warnings boolean, use_incomment_options boolean, incomment_options_usage_warning boolean, constant_tracing boolean | SETOF text + plpgsql_check | public | plpgsql_check_function | name text, relid regclass, format text, fatal_errors boolean, other_warnings boolean, performance_warnings boolean, extra_warnings boolean, security_warnings boolean, compatibility_warnings boolean, oldtable name, newtable name, anyelememttype regtype, anyenumtype regtype, anyrangetype regtype, anycompatibletype regtype, anycompatiblerangetype regtype, without_warnings boolean, all_warnings boolean, use_incomment_options boolean, incomment_options_usage_warning boolean, constant_tracing boolean | SETOF text + plpgsql_check | public | plpgsql_check_function_tb | funcoid regprocedure, relid regclass, fatal_errors boolean, other_warnings boolean, performance_warnings boolean, extra_warnings boolean, security_warnings boolean, compatibility_warnings boolean, oldtable name, newtable name, anyelememttype regtype, anyenumtype regtype, anyrangetype regtype, anycompatibletype regtype, anycompatiblerangetype regtype, without_warnings boolean, all_warnings boolean, use_incomment_options boolean, incomment_options_usage_warning boolean, constant_tracing boolean | TABLE(functionid regproc, lineno integer, statement text, sqlstate text, message text, detail text, hint text, level text, "position" integer, query text, context text) + plpgsql_check | public | plpgsql_check_function_tb | name text, relid regclass, fatal_errors boolean, other_warnings boolean, performance_warnings boolean, extra_warnings boolean, security_warnings boolean, compatibility_warnings boolean, oldtable name, newtable name, anyelememttype regtype, anyenumtype regtype, anyrangetype regtype, anycompatibletype regtype, anycompatiblerangetype regtype, without_warnings boolean, all_warnings boolean, use_incomment_options boolean, incomment_options_usage_warning boolean, constant_tracing boolean | TABLE(functionid regproc, lineno integer, statement text, sqlstate text, message text, detail text, hint text, level text, "position" integer, query text, context text) + plpgsql_check | public | plpgsql_check_pragma | VARIADIC name text[] | integer + plpgsql_check | public | plpgsql_check_profiler | enable boolean | boolean + plpgsql_check | public | plpgsql_check_tracer | enable boolean, verbosity text | boolean + plpgsql_check | public | plpgsql_coverage_branches | funcoid regprocedure | double precision + plpgsql_check | public | plpgsql_coverage_branches | name text | double precision + plpgsql_check | public | plpgsql_coverage_statements | funcoid regprocedure | double precision + plpgsql_check | public | plpgsql_coverage_statements | name text | double precision + plpgsql_check | public | plpgsql_profiler_function_statements_tb | funcoid regprocedure | TABLE(stmtid integer, parent_stmtid integer, parent_note text, block_num integer, lineno integer, queryid bigint, exec_stmts bigint, exec_stmts_err bigint, total_time double precision, avg_time double precision, max_time double precision, processed_rows bigint, stmtname text) + plpgsql_check | public | plpgsql_profiler_function_statements_tb | name text | TABLE(stmtid integer, parent_stmtid integer, parent_note text, block_num integer, lineno integer, queryid bigint, exec_stmts bigint, exec_stmts_err bigint, total_time double precision, avg_time double precision, max_time double precision, processed_rows bigint, stmtname text) + plpgsql_check | public | plpgsql_profiler_function_tb | funcoid regprocedure | TABLE(lineno integer, stmt_lineno integer, queryids bigint[], cmds_on_row integer, exec_stmts bigint, exec_stmts_err bigint, total_time double precision, avg_time double precision, max_time double precision[], processed_rows bigint[], source text) + plpgsql_check | public | plpgsql_profiler_function_tb | name text | TABLE(lineno integer, stmt_lineno integer, queryids bigint[], cmds_on_row integer, exec_stmts bigint, exec_stmts_err bigint, total_time double precision, avg_time double precision, max_time double precision[], processed_rows bigint[], source text) + plpgsql_check | public | plpgsql_profiler_functions_all | | TABLE(funcoid regprocedure, exec_count bigint, exec_stmts_err bigint, total_time double precision, avg_time double precision, stddev_time double precision, min_time double precision, max_time double precision) + plpgsql_check | public | plpgsql_profiler_install_fake_queryid_hook | | void + plpgsql_check | public | plpgsql_profiler_remove_fake_queryid_hook | | void + plpgsql_check | public | plpgsql_profiler_reset | funcoid regprocedure | void + plpgsql_check | public | plpgsql_profiler_reset_all | | void + plpgsql_check | public | plpgsql_show_dependency_tb | fnname text, relid regclass, anyelememttype regtype, anyenumtype regtype, anyrangetype regtype, anycompatibletype regtype, anycompatiblerangetype regtype | TABLE(type text, oid oid, schema text, name text, params text) + plpgsql_check | public | plpgsql_show_dependency_tb | funcoid regprocedure, relid regclass, anyelememttype regtype, anyenumtype regtype, anyrangetype regtype, anycompatibletype regtype, anycompatiblerangetype regtype | TABLE(type text, oid oid, schema text, name text, params text) + plv8 | pg_catalog | plv8_call_handler | | language_handler + plv8 | pg_catalog | plv8_call_validator | oid | void + plv8 | pg_catalog | plv8_info | | json + plv8 | pg_catalog | plv8_inline_handler | internal | void + plv8 | pg_catalog | plv8_reset | | void + plv8 | pg_catalog | plv8_version | | text + postgis | public | _postgis_deprecate | oldname text, newname text, version text | void + postgis | public | _postgis_index_extent | tbl regclass, col text | box2d + postgis | public | _postgis_join_selectivity | regclass, text, regclass, text, text | double precision + postgis | public | _postgis_pgsql_version | | text + postgis | public | _postgis_scripts_pgsql_version | | text + postgis | public | _postgis_selectivity | tbl regclass, att_name text, geom geometry, mode text | double precision + postgis | public | _postgis_stats | tbl regclass, att_name text, text | text + postgis | public | _st_3ddfullywithin | geom1 geometry, geom2 geometry, double precision | boolean + postgis | public | _st_3ddwithin | geom1 geometry, geom2 geometry, double precision | boolean + postgis | public | _st_3dintersects | geom1 geometry, geom2 geometry | boolean + postgis | public | _st_asgml | integer, geometry, integer, integer, text, text | text + postgis | public | _st_asx3d | integer, geometry, integer, integer, text | text + postgis | public | _st_bestsrid | geography | integer + postgis | public | _st_bestsrid | geography, geography | integer + postgis | public | _st_contains | geom1 geometry, geom2 geometry | boolean + postgis | public | _st_containsproperly | geom1 geometry, geom2 geometry | boolean + postgis | public | _st_coveredby | geog1 geography, geog2 geography | boolean + postgis | public | _st_coveredby | geom1 geometry, geom2 geometry | boolean + postgis | public | _st_covers | geog1 geography, geog2 geography | boolean + postgis | public | _st_covers | geom1 geometry, geom2 geometry | boolean + postgis | public | _st_crosses | geom1 geometry, geom2 geometry | boolean + postgis | public | _st_dfullywithin | geom1 geometry, geom2 geometry, double precision | boolean + postgis | public | _st_distancetree | geography, geography | double precision + postgis | public | _st_distancetree | geography, geography, double precision, boolean | double precision + postgis | public | _st_distanceuncached | geography, geography | double precision + postgis | public | _st_distanceuncached | geography, geography, boolean | double precision + postgis | public | _st_distanceuncached | geography, geography, double precision, boolean | double precision + postgis | public | _st_dwithin | geog1 geography, geog2 geography, tolerance double precision, use_spheroid boolean | boolean + postgis | public | _st_dwithin | geom1 geometry, geom2 geometry, double precision | boolean + postgis | public | _st_dwithinuncached | geography, geography, double precision | boolean + postgis | public | _st_dwithinuncached | geography, geography, double precision, boolean | boolean + postgis | public | _st_equals | geom1 geometry, geom2 geometry | boolean + postgis | public | _st_expand | geography, double precision | geography + postgis | public | _st_geomfromgml | text, integer | geometry + postgis | public | _st_intersects | geom1 geometry, geom2 geometry | boolean + postgis | public | _st_linecrossingdirection | line1 geometry, line2 geometry | integer + postgis | public | _st_longestline | geom1 geometry, geom2 geometry | geometry + postgis | public | _st_maxdistance | geom1 geometry, geom2 geometry | double precision + postgis | public | _st_orderingequals | geom1 geometry, geom2 geometry | boolean + postgis | public | _st_overlaps | geom1 geometry, geom2 geometry | boolean + postgis | public | _st_pointoutside | geography | geography + postgis | public | _st_sortablehash | geom geometry | bigint + postgis | public | _st_touches | geom1 geometry, geom2 geometry | boolean + postgis | public | _st_voronoi | g1 geometry, clip geometry, tolerance double precision, return_polygons boolean | geometry + postgis | public | _st_within | geom1 geometry, geom2 geometry | boolean + postgis | public | addauth | text | boolean + postgis | public | addgeometrycolumn | catalog_name character varying, schema_name character varying, table_name character varying, column_name character varying, new_srid_in integer, new_type character varying, new_dim integer, use_typmod boolean | text + postgis | public | addgeometrycolumn | schema_name character varying, table_name character varying, column_name character varying, new_srid integer, new_type character varying, new_dim integer, use_typmod boolean | text + postgis | public | addgeometrycolumn | table_name character varying, column_name character varying, new_srid integer, new_type character varying, new_dim integer, use_typmod boolean | text + postgis | public | box | box3d | box + postgis | public | box | geometry | box + postgis | public | box2d | box3d | box2d + postgis | public | box2d | geometry | box2d + postgis | public | box2d_in | cstring | box2d + postgis | public | box2d_out | box2d | cstring + postgis | public | box2df_in | cstring | box2df + postgis | public | box2df_out | box2df | cstring + postgis | public | box3d | box2d | box3d + postgis | public | box3d | geometry | box3d + postgis | public | box3d_in | cstring | box3d + postgis | public | box3d_out | box3d | cstring + postgis | public | box3dtobox | box3d | box + postgis | public | bytea | geography | bytea + postgis | public | bytea | geometry | bytea + postgis | public | checkauth | text, text | integer + postgis | public | checkauth | text, text, text | integer + postgis | public | checkauthtrigger | | trigger + postgis | public | contains_2d | box2df, box2df | boolean + postgis | public | contains_2d | box2df, geometry | boolean + postgis | public | contains_2d | geometry, box2df | boolean + postgis | public | disablelongtransactions | | text + postgis | public | dropgeometrycolumn | catalog_name character varying, schema_name character varying, table_name character varying, column_name character varying | text + postgis | public | dropgeometrycolumn | schema_name character varying, table_name character varying, column_name character varying | text + postgis | public | dropgeometrycolumn | table_name character varying, column_name character varying | text + postgis | public | dropgeometrytable | catalog_name character varying, schema_name character varying, table_name character varying | text + postgis | public | dropgeometrytable | schema_name character varying, table_name character varying | text + postgis | public | dropgeometrytable | table_name character varying | text + postgis | public | enablelongtransactions | | text + postgis | public | equals | geom1 geometry, geom2 geometry | boolean + postgis | public | find_srid | character varying, character varying, character varying | integer + postgis | public | geog_brin_inclusion_add_value | internal, internal, internal, internal | boolean + postgis | public | geography | bytea | geography + postgis | public | geography | geography, integer, boolean | geography + postgis | public | geography | geometry | geography + postgis | public | geography_analyze | internal | boolean + postgis | public | geography_cmp | geography, geography | integer + postgis | public | geography_distance_knn | geography, geography | double precision + postgis | public | geography_eq | geography, geography | boolean + postgis | public | geography_ge | geography, geography | boolean + postgis | public | geography_gist_compress | internal | internal + postgis | public | geography_gist_consistent | internal, geography, integer | boolean + postgis | public | geography_gist_decompress | internal | internal + postgis | public | geography_gist_distance | internal, geography, integer | double precision + postgis | public | geography_gist_penalty | internal, internal, internal | internal + postgis | public | geography_gist_picksplit | internal, internal | internal + postgis | public | geography_gist_same | box2d, box2d, internal | internal + postgis | public | geography_gist_union | bytea, internal | internal + postgis | public | geography_gt | geography, geography | boolean + postgis | public | geography_in | cstring, oid, integer | geography + postgis | public | geography_le | geography, geography | boolean + postgis | public | geography_lt | geography, geography | boolean + postgis | public | geography_out | geography | cstring + postgis | public | geography_overlaps | geography, geography | boolean + postgis | public | geography_recv | internal, oid, integer | geography + postgis | public | geography_send | geography | bytea + postgis | public | geography_spgist_choose_nd | internal, internal | void + postgis | public | geography_spgist_compress_nd | internal | internal + postgis | public | geography_spgist_config_nd | internal, internal | void + postgis | public | geography_spgist_inner_consistent_nd | internal, internal | void + postgis | public | geography_spgist_leaf_consistent_nd | internal, internal | boolean + postgis | public | geography_spgist_picksplit_nd | internal, internal | void + postgis | public | geography_typmod_in | cstring[] | integer + postgis | public | geography_typmod_out | integer | cstring + postgis | public | geom2d_brin_inclusion_add_value | internal, internal, internal, internal | boolean + postgis | public | geom3d_brin_inclusion_add_value | internal, internal, internal, internal | boolean + postgis | public | geom4d_brin_inclusion_add_value | internal, internal, internal, internal | boolean + postgis | public | geometry | box2d | geometry + postgis | public | geometry | box3d | geometry + postgis | public | geometry | bytea | geometry + postgis | public | geometry | geography | geometry + postgis | public | geometry | geometry, integer, boolean | geometry + postgis | public | geometry | path | geometry + postgis | public | geometry | point | geometry + postgis | public | geometry | polygon | geometry + postgis | public | geometry | text | geometry + postgis | public | geometry_above | geom1 geometry, geom2 geometry | boolean + postgis | public | geometry_analyze | internal | boolean + postgis | public | geometry_below | geom1 geometry, geom2 geometry | boolean + postgis | public | geometry_cmp | geom1 geometry, geom2 geometry | integer + postgis | public | geometry_contained_3d | geom1 geometry, geom2 geometry | boolean + postgis | public | geometry_contains | geom1 geometry, geom2 geometry | boolean + postgis | public | geometry_contains_3d | geom1 geometry, geom2 geometry | boolean + postgis | public | geometry_contains_nd | geometry, geometry | boolean + postgis | public | geometry_distance_box | geom1 geometry, geom2 geometry | double precision + postgis | public | geometry_distance_centroid | geom1 geometry, geom2 geometry | double precision + postgis | public | geometry_distance_centroid_nd | geometry, geometry | double precision + postgis | public | geometry_distance_cpa | geometry, geometry | double precision + postgis | public | geometry_eq | geom1 geometry, geom2 geometry | boolean + postgis | public | geometry_ge | geom1 geometry, geom2 geometry | boolean + postgis | public | geometry_gist_compress_2d | internal | internal + postgis | public | geometry_gist_compress_nd | internal | internal + postgis | public | geometry_gist_consistent_2d | internal, geometry, integer | boolean + postgis | public | geometry_gist_consistent_nd | internal, geometry, integer | boolean + postgis | public | geometry_gist_decompress_2d | internal | internal + postgis | public | geometry_gist_decompress_nd | internal | internal + postgis | public | geometry_gist_distance_2d | internal, geometry, integer | double precision + postgis | public | geometry_gist_distance_nd | internal, geometry, integer | double precision + postgis | public | geometry_gist_penalty_2d | internal, internal, internal | internal + postgis | public | geometry_gist_penalty_nd | internal, internal, internal | internal + postgis | public | geometry_gist_picksplit_2d | internal, internal | internal + postgis | public | geometry_gist_picksplit_nd | internal, internal | internal + postgis | public | geometry_gist_same_2d | geom1 geometry, geom2 geometry, internal | internal + postgis | public | geometry_gist_same_nd | geometry, geometry, internal | internal + postgis | public | geometry_gist_sortsupport_2d | internal | void + postgis | public | geometry_gist_union_2d | bytea, internal | internal + postgis | public | geometry_gist_union_nd | bytea, internal | internal + postgis | public | geometry_gt | geom1 geometry, geom2 geometry | boolean + postgis | public | geometry_hash | geometry | integer + postgis | public | geometry_in | cstring | geometry + postgis | public | geometry_le | geom1 geometry, geom2 geometry | boolean + postgis | public | geometry_left | geom1 geometry, geom2 geometry | boolean + postgis | public | geometry_lt | geom1 geometry, geom2 geometry | boolean + postgis | public | geometry_out | geometry | cstring + postgis | public | geometry_overabove | geom1 geometry, geom2 geometry | boolean + postgis | public | geometry_overbelow | geom1 geometry, geom2 geometry | boolean + postgis | public | geometry_overlaps | geom1 geometry, geom2 geometry | boolean + postgis | public | geometry_overlaps_3d | geom1 geometry, geom2 geometry | boolean + postgis | public | geometry_overlaps_nd | geometry, geometry | boolean + postgis | public | geometry_overleft | geom1 geometry, geom2 geometry | boolean + postgis | public | geometry_overright | geom1 geometry, geom2 geometry | boolean + postgis | public | geometry_recv | internal | geometry + postgis | public | geometry_right | geom1 geometry, geom2 geometry | boolean + postgis | public | geometry_same | geom1 geometry, geom2 geometry | boolean + postgis | public | geometry_same_3d | geom1 geometry, geom2 geometry | boolean + postgis | public | geometry_same_nd | geometry, geometry | boolean + postgis | public | geometry_send | geometry | bytea + postgis | public | geometry_sortsupport | internal | void + postgis | public | geometry_spgist_choose_2d | internal, internal | void + postgis | public | geometry_spgist_choose_3d | internal, internal | void + postgis | public | geometry_spgist_choose_nd | internal, internal | void + postgis | public | geometry_spgist_compress_2d | internal | internal + postgis | public | geometry_spgist_compress_3d | internal | internal + postgis | public | geometry_spgist_compress_nd | internal | internal + postgis | public | geometry_spgist_config_2d | internal, internal | void + postgis | public | geometry_spgist_config_3d | internal, internal | void + postgis | public | geometry_spgist_config_nd | internal, internal | void + postgis | public | geometry_spgist_inner_consistent_2d | internal, internal | void + postgis | public | geometry_spgist_inner_consistent_3d | internal, internal | void + postgis | public | geometry_spgist_inner_consistent_nd | internal, internal | void + postgis | public | geometry_spgist_leaf_consistent_2d | internal, internal | boolean + postgis | public | geometry_spgist_leaf_consistent_3d | internal, internal | boolean + postgis | public | geometry_spgist_leaf_consistent_nd | internal, internal | boolean + postgis | public | geometry_spgist_picksplit_2d | internal, internal | void + postgis | public | geometry_spgist_picksplit_3d | internal, internal | void + postgis | public | geometry_spgist_picksplit_nd | internal, internal | void + postgis | public | geometry_typmod_in | cstring[] | integer + postgis | public | geometry_typmod_out | integer | cstring + postgis | public | geometry_within | geom1 geometry, geom2 geometry | boolean + postgis | public | geometry_within_nd | geometry, geometry | boolean + postgis | public | geometrytype | geography | text + postgis | public | geometrytype | geometry | text + postgis | public | geomfromewkb | bytea | geometry + postgis | public | geomfromewkt | text | geometry + postgis | public | get_proj4_from_srid | integer | text + postgis | public | gettransactionid | | xid + postgis | public | gidx_in | cstring | gidx + postgis | public | gidx_out | gidx | cstring + postgis | public | gserialized_gist_joinsel_2d | internal, oid, internal, smallint | double precision + postgis | public | gserialized_gist_joinsel_nd | internal, oid, internal, smallint | double precision + postgis | public | gserialized_gist_sel_2d | internal, oid, internal, integer | double precision + postgis | public | gserialized_gist_sel_nd | internal, oid, internal, integer | double precision + postgis | public | is_contained_2d | box2df, box2df | boolean + postgis | public | is_contained_2d | box2df, geometry | boolean + postgis | public | is_contained_2d | geometry, box2df | boolean + postgis | public | json | geometry | json + postgis | public | jsonb | geometry | jsonb + postgis | public | lockrow | text, text, text | integer + postgis | public | lockrow | text, text, text, text | integer + postgis | public | lockrow | text, text, text, text, timestamp without time zone | integer + postgis | public | lockrow | text, text, text, timestamp without time zone | integer + postgis | public | longtransactionsenabled | | boolean + postgis | public | overlaps_2d | box2df, box2df | boolean + postgis | public | overlaps_2d | box2df, geometry | boolean + postgis | public | overlaps_2d | geometry, box2df | boolean + postgis | public | overlaps_geog | geography, gidx | boolean + postgis | public | overlaps_geog | gidx, geography | boolean + postgis | public | overlaps_geog | gidx, gidx | boolean + postgis | public | overlaps_nd | geometry, gidx | boolean + postgis | public | overlaps_nd | gidx, geometry | boolean + postgis | public | overlaps_nd | gidx, gidx | boolean + postgis | public | path | geometry | path + postgis | public | pgis_asflatgeobuf_finalfn | internal | bytea + postgis | public | pgis_asflatgeobuf_transfn | internal, anyelement | internal + postgis | public | pgis_asflatgeobuf_transfn | internal, anyelement, boolean | internal + postgis | public | pgis_asflatgeobuf_transfn | internal, anyelement, boolean, text | internal + postgis | public | pgis_asgeobuf_finalfn | internal | bytea + postgis | public | pgis_asgeobuf_transfn | internal, anyelement | internal + postgis | public | pgis_asgeobuf_transfn | internal, anyelement, text | internal + postgis | public | pgis_asmvt_combinefn | internal, internal | internal + postgis | public | pgis_asmvt_deserialfn | bytea, internal | internal + postgis | public | pgis_asmvt_finalfn | internal | bytea + postgis | public | pgis_asmvt_serialfn | internal | bytea + postgis | public | pgis_asmvt_transfn | internal, anyelement | internal + postgis | public | pgis_asmvt_transfn | internal, anyelement, text | internal + postgis | public | pgis_asmvt_transfn | internal, anyelement, text, integer | internal + postgis | public | pgis_asmvt_transfn | internal, anyelement, text, integer, text | internal + postgis | public | pgis_asmvt_transfn | internal, anyelement, text, integer, text, text | internal + postgis | public | pgis_geometry_accum_transfn | internal, geometry | internal + postgis | public | pgis_geometry_accum_transfn | internal, geometry, double precision | internal + postgis | public | pgis_geometry_accum_transfn | internal, geometry, double precision, integer | internal + postgis | public | pgis_geometry_clusterintersecting_finalfn | internal | geometry[] + postgis | public | pgis_geometry_clusterwithin_finalfn | internal | geometry[] + postgis | public | pgis_geometry_collect_finalfn | internal | geometry + postgis | public | pgis_geometry_makeline_finalfn | internal | geometry + postgis | public | pgis_geometry_polygonize_finalfn | internal | geometry + postgis | public | pgis_geometry_union_parallel_combinefn | internal, internal | internal + postgis | public | pgis_geometry_union_parallel_deserialfn | bytea, internal | internal + postgis | public | pgis_geometry_union_parallel_finalfn | internal | geometry + postgis | public | pgis_geometry_union_parallel_serialfn | internal | bytea + postgis | public | pgis_geometry_union_parallel_transfn | internal, geometry | internal + postgis | public | pgis_geometry_union_parallel_transfn | internal, geometry, double precision | internal + postgis | public | point | geometry | point + postgis | public | polygon | geometry | polygon + postgis | public | populate_geometry_columns | tbl_oid oid, use_typmod boolean | integer + postgis | public | populate_geometry_columns | use_typmod boolean | text + postgis | public | postgis_addbbox | geometry | geometry + postgis | public | postgis_cache_bbox | | trigger + postgis | public | postgis_constraint_dims | geomschema text, geomtable text, geomcolumn text | integer + postgis | public | postgis_constraint_srid | geomschema text, geomtable text, geomcolumn text | integer + postgis | public | postgis_constraint_type | geomschema text, geomtable text, geomcolumn text | character varying + postgis | public | postgis_dropbbox | geometry | geometry + postgis | public | postgis_extensions_upgrade | | text + postgis | public | postgis_full_version | | text + postgis | public | postgis_geos_noop | geometry | geometry + postgis | public | postgis_geos_version | | text + postgis | public | postgis_getbbox | geometry | box2d + postgis | public | postgis_hasbbox | geometry | boolean + postgis | public | postgis_index_supportfn | internal | internal + postgis | public | postgis_lib_build_date | | text + postgis | public | postgis_lib_revision | | text + postgis | public | postgis_lib_version | | text + postgis | public | postgis_libjson_version | | text + postgis | public | postgis_liblwgeom_version | | text + postgis | public | postgis_libprotobuf_version | | text + postgis | public | postgis_libxml_version | | text + postgis | public | postgis_noop | geometry | geometry + postgis | public | postgis_proj_version | | text + postgis | public | postgis_scripts_build_date | | text + postgis | public | postgis_scripts_installed | | text + postgis | public | postgis_scripts_released | | text + postgis | public | postgis_svn_version | | text + postgis | public | postgis_transform_geometry | geom geometry, text, text, integer | geometry + postgis | public | postgis_type_name | geomname character varying, coord_dimension integer, use_new_name boolean | character varying + postgis | public | postgis_typmod_dims | integer | integer + postgis | public | postgis_typmod_srid | integer | integer + postgis | public | postgis_typmod_type | integer | text + postgis | public | postgis_version | | text + postgis | public | postgis_wagyu_version | | text + postgis | public | spheroid_in | cstring | spheroid + postgis | public | spheroid_out | spheroid | cstring + postgis | public | st_3dclosestpoint | geom1 geometry, geom2 geometry | geometry + postgis | public | st_3ddfullywithin | geom1 geometry, geom2 geometry, double precision | boolean + postgis | public | st_3ddistance | geom1 geometry, geom2 geometry | double precision + postgis | public | st_3ddwithin | geom1 geometry, geom2 geometry, double precision | boolean + postgis | public | st_3dextent | geometry | box3d + postgis | public | st_3dintersects | geom1 geometry, geom2 geometry | boolean + postgis | public | st_3dlength | geometry | double precision + postgis | public | st_3dlineinterpolatepoint | geometry, double precision | geometry + postgis | public | st_3dlongestline | geom1 geometry, geom2 geometry | geometry + postgis | public | st_3dmakebox | geom1 geometry, geom2 geometry | box3d + postgis | public | st_3dmaxdistance | geom1 geometry, geom2 geometry | double precision + postgis | public | st_3dperimeter | geometry | double precision + postgis | public | st_3dshortestline | geom1 geometry, geom2 geometry | geometry + postgis | public | st_addmeasure | geometry, double precision, double precision | geometry + postgis | public | st_addpoint | geom1 geometry, geom2 geometry | geometry + postgis | public | st_addpoint | geom1 geometry, geom2 geometry, integer | geometry + postgis | public | st_affine | geometry, double precision, double precision, double precision, double precision, double precision, double precision | geometry + postgis | public | st_affine | geometry, double precision, double precision, double precision, double precision, double precision, double precision, double precision, double precision, double precision, double precision, double precision, double precision | geometry + postgis | public | st_angle | line1 geometry, line2 geometry | double precision + postgis | public | st_angle | pt1 geometry, pt2 geometry, pt3 geometry, pt4 geometry | double precision + postgis | public | st_area | geog geography, use_spheroid boolean | double precision + postgis | public | st_area | geometry | double precision + postgis | public | st_area | text | double precision + postgis | public | st_area2d | geometry | double precision + postgis | public | st_asbinary | geography | bytea + postgis | public | st_asbinary | geography, text | bytea + postgis | public | st_asbinary | geometry | bytea + postgis | public | st_asbinary | geometry, text | bytea + postgis | public | st_asencodedpolyline | geom geometry, nprecision integer | text + postgis | public | st_asewkb | geometry | bytea + postgis | public | st_asewkb | geometry, text | bytea + postgis | public | st_asewkt | geography | text + postgis | public | st_asewkt | geography, integer | text + postgis | public | st_asewkt | geometry | text + postgis | public | st_asewkt | geometry, integer | text + postgis | public | st_asewkt | text | text + postgis | public | st_asflatgeobuf | anyelement | bytea + postgis | public | st_asflatgeobuf | anyelement, boolean | bytea + postgis | public | st_asflatgeobuf | anyelement, boolean, text | bytea + postgis | public | st_asgeobuf | anyelement | bytea + postgis | public | st_asgeobuf | anyelement, text | bytea + postgis | public | st_asgeojson | geog geography, maxdecimaldigits integer, options integer | text + postgis | public | st_asgeojson | geom geometry, maxdecimaldigits integer, options integer | text + postgis | public | st_asgeojson | r record, geom_column text, maxdecimaldigits integer, pretty_bool boolean | text + postgis | public | st_asgeojson | text | text + postgis | public | st_asgml | geog geography, maxdecimaldigits integer, options integer, nprefix text, id text | text + postgis | public | st_asgml | geom geometry, maxdecimaldigits integer, options integer | text + postgis | public | st_asgml | text | text + postgis | public | st_asgml | version integer, geog geography, maxdecimaldigits integer, options integer, nprefix text, id text | text + postgis | public | st_asgml | version integer, geom geometry, maxdecimaldigits integer, options integer, nprefix text, id text | text + postgis | public | st_ashexewkb | geometry | text + postgis | public | st_ashexewkb | geometry, text | text + postgis | public | st_askml | geog geography, maxdecimaldigits integer, nprefix text | text + postgis | public | st_askml | geom geometry, maxdecimaldigits integer, nprefix text | text + postgis | public | st_askml | text | text + postgis | public | st_aslatlontext | geom geometry, tmpl text | text + postgis | public | st_asmarc21 | geom geometry, format text | text + postgis | public | st_asmvt | anyelement | bytea + postgis | public | st_asmvt | anyelement, text | bytea + postgis | public | st_asmvt | anyelement, text, integer | bytea + postgis | public | st_asmvt | anyelement, text, integer, text | bytea + postgis | public | st_asmvt | anyelement, text, integer, text, text | bytea + postgis | public | st_asmvtgeom | geom geometry, bounds box2d, extent integer, buffer integer, clip_geom boolean | geometry + postgis | public | st_assvg | geog geography, rel integer, maxdecimaldigits integer | text + postgis | public | st_assvg | geom geometry, rel integer, maxdecimaldigits integer | text + postgis | public | st_assvg | text | text + postgis | public | st_astext | geography | text + postgis | public | st_astext | geography, integer | text + postgis | public | st_astext | geometry | text + postgis | public | st_astext | geometry, integer | text + postgis | public | st_astext | text | text + postgis | public | st_astwkb | geom geometry, prec integer, prec_z integer, prec_m integer, with_sizes boolean, with_boxes boolean | bytea + postgis | public | st_astwkb | geom geometry[], ids bigint[], prec integer, prec_z integer, prec_m integer, with_sizes boolean, with_boxes boolean | bytea + postgis | public | st_asx3d | geom geometry, maxdecimaldigits integer, options integer | text + postgis | public | st_azimuth | geog1 geography, geog2 geography | double precision + postgis | public | st_azimuth | geom1 geometry, geom2 geometry | double precision + postgis | public | st_bdmpolyfromtext | text, integer | geometry + postgis | public | st_bdpolyfromtext | text, integer | geometry + postgis | public | st_boundary | geometry | geometry + postgis | public | st_boundingdiagonal | geom geometry, fits boolean | geometry + postgis | public | st_box2dfromgeohash | text, integer | box2d + postgis | public | st_buffer | geography, double precision | geography + postgis | public | st_buffer | geography, double precision, integer | geography + postgis | public | st_buffer | geography, double precision, text | geography + postgis | public | st_buffer | geom geometry, radius double precision, options text | geometry + postgis | public | st_buffer | geom geometry, radius double precision, quadsegs integer | geometry + postgis | public | st_buffer | text, double precision | geometry + postgis | public | st_buffer | text, double precision, integer | geometry + postgis | public | st_buffer | text, double precision, text | geometry + postgis | public | st_buildarea | geometry | geometry + postgis | public | st_centroid | geography, use_spheroid boolean | geography + postgis | public | st_centroid | geometry | geometry + postgis | public | st_centroid | text | geometry + postgis | public | st_chaikinsmoothing | geometry, integer, boolean | geometry + postgis | public | st_cleangeometry | geometry | geometry + postgis | public | st_clipbybox2d | geom geometry, box box2d | geometry + postgis | public | st_closestpoint | geom1 geometry, geom2 geometry | geometry + postgis | public | st_closestpointofapproach | geometry, geometry | double precision + postgis | public | st_clusterdbscan | geometry, eps double precision, minpoints integer | integer + postgis | public | st_clusterintersecting | geometry | geometry[] + postgis | public | st_clusterintersecting | geometry[] | geometry[] + postgis | public | st_clusterkmeans | geom geometry, k integer, max_radius double precision | integer + postgis | public | st_clusterwithin | geometry, double precision | geometry[] + postgis | public | st_clusterwithin | geometry[], double precision | geometry[] + postgis | public | st_collect | geom1 geometry, geom2 geometry | geometry + postgis | public | st_collect | geometry | geometry + postgis | public | st_collect | geometry[] | geometry + postgis | public | st_collectionextract | geometry | geometry + postgis | public | st_collectionextract | geometry, integer | geometry + postgis | public | st_collectionhomogenize | geometry | geometry + postgis | public | st_combinebbox | box2d, geometry | box2d + postgis | public | st_combinebbox | box3d, box3d | box3d + postgis | public | st_combinebbox | box3d, geometry | box3d + postgis | public | st_concavehull | param_geom geometry, param_pctconvex double precision, param_allow_holes boolean | geometry + postgis | public | st_contains | geom1 geometry, geom2 geometry | boolean + postgis | public | st_containsproperly | geom1 geometry, geom2 geometry | boolean + postgis | public | st_convexhull | geometry | geometry + postgis | public | st_coorddim | geometry geometry | smallint + postgis | public | st_coveredby | geog1 geography, geog2 geography | boolean + postgis | public | st_coveredby | geom1 geometry, geom2 geometry | boolean + postgis | public | st_coveredby | text, text | boolean + postgis | public | st_covers | geog1 geography, geog2 geography | boolean + postgis | public | st_covers | geom1 geometry, geom2 geometry | boolean + postgis | public | st_covers | text, text | boolean + postgis | public | st_cpawithin | geometry, geometry, double precision | boolean + postgis | public | st_crosses | geom1 geometry, geom2 geometry | boolean + postgis | public | st_curvetoline | geom geometry, tol double precision, toltype integer, flags integer | geometry + postgis | public | st_delaunaytriangles | g1 geometry, tolerance double precision, flags integer | geometry + postgis | public | st_dfullywithin | geom1 geometry, geom2 geometry, double precision | boolean + postgis | public | st_difference | geom1 geometry, geom2 geometry, gridsize double precision | geometry + postgis | public | st_dimension | geometry | integer + postgis | public | st_disjoint | geom1 geometry, geom2 geometry | boolean + postgis | public | st_distance | geog1 geography, geog2 geography, use_spheroid boolean | double precision + postgis | public | st_distance | geom1 geometry, geom2 geometry | double precision + postgis | public | st_distance | text, text | double precision + postgis | public | st_distancecpa | geometry, geometry | double precision + postgis | public | st_distancesphere | geom1 geometry, geom2 geometry | double precision + postgis | public | st_distancesphere | geom1 geometry, geom2 geometry, radius double precision | double precision + postgis | public | st_distancespheroid | geom1 geometry, geom2 geometry | double precision + postgis | public | st_distancespheroid | geom1 geometry, geom2 geometry, spheroid | double precision + postgis | public | st_dump | geometry | SETOF geometry_dump + postgis | public | st_dumppoints | geometry | SETOF geometry_dump + postgis | public | st_dumprings | geometry | SETOF geometry_dump + postgis | public | st_dumpsegments | geometry | SETOF geometry_dump + postgis | public | st_dwithin | geog1 geography, geog2 geography, tolerance double precision, use_spheroid boolean | boolean + postgis | public | st_dwithin | geom1 geometry, geom2 geometry, double precision | boolean + postgis | public | st_dwithin | text, text, double precision | boolean + postgis | public | st_endpoint | geometry | geometry + postgis | public | st_envelope | geometry | geometry + postgis | public | st_equals | geom1 geometry, geom2 geometry | boolean + postgis | public | st_estimatedextent | text, text | box2d + postgis | public | st_estimatedextent | text, text, text | box2d + postgis | public | st_estimatedextent | text, text, text, boolean | box2d + postgis | public | st_expand | box box2d, dx double precision, dy double precision | box2d + postgis | public | st_expand | box box3d, dx double precision, dy double precision, dz double precision | box3d + postgis | public | st_expand | box2d, double precision | box2d + postgis | public | st_expand | box3d, double precision | box3d + postgis | public | st_expand | geom geometry, dx double precision, dy double precision, dz double precision, dm double precision | geometry + postgis | public | st_expand | geometry, double precision | geometry + postgis | public | st_extent | geometry | box2d + postgis | public | st_exteriorring | geometry | geometry + postgis | public | st_filterbym | geometry, double precision, double precision, boolean | geometry + postgis | public | st_findextent | text, text | box2d + postgis | public | st_findextent | text, text, text | box2d + postgis | public | st_flipcoordinates | geometry | geometry + postgis | public | st_force2d | geometry | geometry + postgis | public | st_force3d | geom geometry, zvalue double precision | geometry + postgis | public | st_force3dm | geom geometry, mvalue double precision | geometry + postgis | public | st_force3dz | geom geometry, zvalue double precision | geometry + postgis | public | st_force4d | geom geometry, zvalue double precision, mvalue double precision | geometry + postgis | public | st_forcecollection | geometry | geometry + postgis | public | st_forcecurve | geometry | geometry + postgis | public | st_forcepolygonccw | geometry | geometry + postgis | public | st_forcepolygoncw | geometry | geometry + postgis | public | st_forcerhr | geometry | geometry + postgis | public | st_forcesfs | geometry | geometry + postgis | public | st_forcesfs | geometry, version text | geometry + postgis | public | st_frechetdistance | geom1 geometry, geom2 geometry, double precision | double precision + postgis | public | st_fromflatgeobuf | anyelement, bytea | SETOF anyelement + postgis | public | st_fromflatgeobuftotable | text, text, bytea | void + postgis | public | st_generatepoints | area geometry, npoints integer | geometry + postgis | public | st_generatepoints | area geometry, npoints integer, seed integer | geometry + postgis | public | st_geogfromtext | text | geography + postgis | public | st_geogfromwkb | bytea | geography + postgis | public | st_geographyfromtext | text | geography + postgis | public | st_geohash | geog geography, maxchars integer | text + postgis | public | st_geohash | geom geometry, maxchars integer | text + postgis | public | st_geomcollfromtext | text | geometry + postgis | public | st_geomcollfromtext | text, integer | geometry + postgis | public | st_geomcollfromwkb | bytea | geometry + postgis | public | st_geomcollfromwkb | bytea, integer | geometry + postgis | public | st_geometricmedian | g geometry, tolerance double precision, max_iter integer, fail_if_not_converged boolean | geometry + postgis | public | st_geometryfromtext | text | geometry + postgis | public | st_geometryfromtext | text, integer | geometry + postgis | public | st_geometryn | geometry, integer | geometry + postgis | public | st_geometrytype | geometry | text + postgis | public | st_geomfromewkb | bytea | geometry + postgis | public | st_geomfromewkt | text | geometry + postgis | public | st_geomfromgeohash | text, integer | geometry + postgis | public | st_geomfromgeojson | json | geometry + postgis | public | st_geomfromgeojson | jsonb | geometry + postgis | public | st_geomfromgeojson | text | geometry + postgis | public | st_geomfromgml | text | geometry + postgis | public | st_geomfromgml | text, integer | geometry + postgis | public | st_geomfromkml | text | geometry + postgis | public | st_geomfrommarc21 | marc21xml text | geometry + postgis | public | st_geomfromtext | text | geometry + postgis | public | st_geomfromtext | text, integer | geometry + postgis | public | st_geomfromtwkb | bytea | geometry + postgis | public | st_geomfromwkb | bytea | geometry + postgis | public | st_geomfromwkb | bytea, integer | geometry + postgis | public | st_gmltosql | text | geometry + postgis | public | st_gmltosql | text, integer | geometry + postgis | public | st_hasarc | geometry geometry | boolean + postgis | public | st_hausdorffdistance | geom1 geometry, geom2 geometry | double precision + postgis | public | st_hausdorffdistance | geom1 geometry, geom2 geometry, double precision | double precision + postgis | public | st_hexagon | size double precision, cell_i integer, cell_j integer, origin geometry | geometry + postgis | public | st_hexagongrid | size double precision, bounds geometry, OUT geom geometry, OUT i integer, OUT j integer | SETOF record + postgis | public | st_interiorringn | geometry, integer | geometry + postgis | public | st_interpolatepoint | line geometry, point geometry | double precision + postgis | public | st_intersection | geography, geography | geography + postgis | public | st_intersection | geom1 geometry, geom2 geometry, gridsize double precision | geometry + postgis | public | st_intersection | text, text | geometry + postgis | public | st_intersects | geog1 geography, geog2 geography | boolean + postgis | public | st_intersects | geom1 geometry, geom2 geometry | boolean + postgis | public | st_intersects | text, text | boolean + postgis | public | st_isclosed | geometry | boolean + postgis | public | st_iscollection | geometry | boolean + postgis | public | st_isempty | geometry | boolean + postgis | public | st_ispolygonccw | geometry | boolean + postgis | public | st_ispolygoncw | geometry | boolean + postgis | public | st_isring | geometry | boolean + postgis | public | st_issimple | geometry | boolean + postgis | public | st_isvalid | geometry | boolean + postgis | public | st_isvalid | geometry, integer | boolean + postgis | public | st_isvaliddetail | geom geometry, flags integer | valid_detail + postgis | public | st_isvalidreason | geometry | text + postgis | public | st_isvalidreason | geometry, integer | text + postgis | public | st_isvalidtrajectory | geometry | boolean + postgis | public | st_length | geog geography, use_spheroid boolean | double precision + postgis | public | st_length | geometry | double precision + postgis | public | st_length | text | double precision + postgis | public | st_length2d | geometry | double precision + postgis | public | st_length2dspheroid | geometry, spheroid | double precision + postgis | public | st_lengthspheroid | geometry, spheroid | double precision + postgis | public | st_letters | letters text, font json | geometry + postgis | public | st_linecrossingdirection | line1 geometry, line2 geometry | integer + postgis | public | st_linefromencodedpolyline | txtin text, nprecision integer | geometry + postgis | public | st_linefrommultipoint | geometry | geometry + postgis | public | st_linefromtext | text | geometry + postgis | public | st_linefromtext | text, integer | geometry + postgis | public | st_linefromwkb | bytea | geometry + postgis | public | st_linefromwkb | bytea, integer | geometry + postgis | public | st_lineinterpolatepoint | geometry, double precision | geometry + postgis | public | st_lineinterpolatepoints | geometry, double precision, repeat boolean | geometry + postgis | public | st_linelocatepoint | geom1 geometry, geom2 geometry | double precision + postgis | public | st_linemerge | geometry | geometry + postgis | public | st_linemerge | geometry, boolean | geometry + postgis | public | st_linestringfromwkb | bytea | geometry + postgis | public | st_linestringfromwkb | bytea, integer | geometry + postgis | public | st_linesubstring | geometry, double precision, double precision | geometry + postgis | public | st_linetocurve | geometry geometry | geometry + postgis | public | st_locatealong | geometry geometry, measure double precision, leftrightoffset double precision | geometry + postgis | public | st_locatebetween | geometry geometry, frommeasure double precision, tomeasure double precision, leftrightoffset double precision | geometry + postgis | public | st_locatebetweenelevations | geometry geometry, fromelevation double precision, toelevation double precision | geometry + postgis | public | st_longestline | geom1 geometry, geom2 geometry | geometry + postgis | public | st_m | geometry | double precision + postgis | public | st_makebox2d | geom1 geometry, geom2 geometry | box2d + postgis | public | st_makeenvelope | double precision, double precision, double precision, double precision, integer | geometry + postgis | public | st_makeline | geom1 geometry, geom2 geometry | geometry + postgis | public | st_makeline | geometry | geometry + postgis | public | st_makeline | geometry[] | geometry + postgis | public | st_makepoint | double precision, double precision | geometry + postgis | public | st_makepoint | double precision, double precision, double precision | geometry + postgis | public | st_makepoint | double precision, double precision, double precision, double precision | geometry + postgis | public | st_makepointm | double precision, double precision, double precision | geometry + postgis | public | st_makepolygon | geometry | geometry + postgis | public | st_makepolygon | geometry, geometry[] | geometry + postgis | public | st_makevalid | geom geometry, params text | geometry + postgis | public | st_makevalid | geometry | geometry + postgis | public | st_maxdistance | geom1 geometry, geom2 geometry | double precision + postgis | public | st_maximuminscribedcircle | geometry, OUT center geometry, OUT nearest geometry, OUT radius double precision | record + postgis | public | st_memcollect | geometry | geometry + postgis | public | st_memsize | geometry | integer + postgis | public | st_memunion | geometry | geometry + postgis | public | st_minimumboundingcircle | inputgeom geometry, segs_per_quarter integer | geometry + postgis | public | st_minimumboundingradius | geometry, OUT center geometry, OUT radius double precision | record + postgis | public | st_minimumclearance | geometry | double precision + postgis | public | st_minimumclearanceline | geometry | geometry + postgis | public | st_mlinefromtext | text | geometry + postgis | public | st_mlinefromtext | text, integer | geometry + postgis | public | st_mlinefromwkb | bytea | geometry + postgis | public | st_mlinefromwkb | bytea, integer | geometry + postgis | public | st_mpointfromtext | text | geometry + postgis | public | st_mpointfromtext | text, integer | geometry + postgis | public | st_mpointfromwkb | bytea | geometry + postgis | public | st_mpointfromwkb | bytea, integer | geometry + postgis | public | st_mpolyfromtext | text | geometry + postgis | public | st_mpolyfromtext | text, integer | geometry + postgis | public | st_mpolyfromwkb | bytea | geometry + postgis | public | st_mpolyfromwkb | bytea, integer | geometry + postgis | public | st_multi | geometry | geometry + postgis | public | st_multilinefromwkb | bytea | geometry + postgis | public | st_multilinestringfromtext | text | geometry + postgis | public | st_multilinestringfromtext | text, integer | geometry + postgis | public | st_multipointfromtext | text | geometry + postgis | public | st_multipointfromwkb | bytea | geometry + postgis | public | st_multipointfromwkb | bytea, integer | geometry + postgis | public | st_multipolyfromwkb | bytea | geometry + postgis | public | st_multipolyfromwkb | bytea, integer | geometry + postgis | public | st_multipolygonfromtext | text | geometry + postgis | public | st_multipolygonfromtext | text, integer | geometry + postgis | public | st_ndims | geometry | smallint + postgis | public | st_node | g geometry | geometry + postgis | public | st_normalize | geom geometry | geometry + postgis | public | st_npoints | geometry | integer + postgis | public | st_nrings | geometry | integer + postgis | public | st_numgeometries | geometry | integer + postgis | public | st_numinteriorring | geometry | integer + postgis | public | st_numinteriorrings | geometry | integer + postgis | public | st_numpatches | geometry | integer + postgis | public | st_numpoints | geometry | integer + postgis | public | st_offsetcurve | line geometry, distance double precision, params text | geometry + postgis | public | st_orderingequals | geom1 geometry, geom2 geometry | boolean + postgis | public | st_orientedenvelope | geometry | geometry + postgis | public | st_overlaps | geom1 geometry, geom2 geometry | boolean + postgis | public | st_patchn | geometry, integer | geometry + postgis | public | st_perimeter | geog geography, use_spheroid boolean | double precision + postgis | public | st_perimeter | geometry | double precision + postgis | public | st_perimeter2d | geometry | double precision + postgis | public | st_point | double precision, double precision | geometry + postgis | public | st_point | double precision, double precision, srid integer | geometry + postgis | public | st_pointfromgeohash | text, integer | geometry + postgis | public | st_pointfromtext | text | geometry + postgis | public | st_pointfromtext | text, integer | geometry + postgis | public | st_pointfromwkb | bytea | geometry + postgis | public | st_pointfromwkb | bytea, integer | geometry + postgis | public | st_pointinsidecircle | geometry, double precision, double precision, double precision | boolean + postgis | public | st_pointm | xcoordinate double precision, ycoordinate double precision, mcoordinate double precision, srid integer | geometry + postgis | public | st_pointn | geometry, integer | geometry + postgis | public | st_pointonsurface | geometry | geometry + postgis | public | st_points | geometry | geometry + postgis | public | st_pointz | xcoordinate double precision, ycoordinate double precision, zcoordinate double precision, srid integer | geometry + postgis | public | st_pointzm | xcoordinate double precision, ycoordinate double precision, zcoordinate double precision, mcoordinate double precision, srid integer | geometry + postgis | public | st_polyfromtext | text | geometry + postgis | public | st_polyfromtext | text, integer | geometry + postgis | public | st_polyfromwkb | bytea | geometry + postgis | public | st_polyfromwkb | bytea, integer | geometry + postgis | public | st_polygon | geometry, integer | geometry + postgis | public | st_polygonfromtext | text | geometry + postgis | public | st_polygonfromtext | text, integer | geometry + postgis | public | st_polygonfromwkb | bytea | geometry + postgis | public | st_polygonfromwkb | bytea, integer | geometry + postgis | public | st_polygonize | geometry | geometry + postgis | public | st_polygonize | geometry[] | geometry + postgis | public | st_project | geog geography, distance double precision, azimuth double precision | geography + postgis | public | st_quantizecoordinates | g geometry, prec_x integer, prec_y integer, prec_z integer, prec_m integer | geometry + postgis | public | st_reduceprecision | geom geometry, gridsize double precision | geometry + postgis | public | st_relate | geom1 geometry, geom2 geometry | text + postgis | public | st_relate | geom1 geometry, geom2 geometry, integer | text + postgis | public | st_relate | geom1 geometry, geom2 geometry, text | boolean + postgis | public | st_relatematch | text, text | boolean + postgis | public | st_removepoint | geometry, integer | geometry + postgis | public | st_removerepeatedpoints | geom geometry, tolerance double precision | geometry + postgis | public | st_reverse | geometry | geometry + postgis | public | st_rotate | geometry, double precision | geometry + postgis | public | st_rotate | geometry, double precision, double precision, double precision | geometry + postgis | public | st_rotate | geometry, double precision, geometry | geometry + postgis | public | st_rotatex | geometry, double precision | geometry + postgis | public | st_rotatey | geometry, double precision | geometry + postgis | public | st_rotatez | geometry, double precision | geometry + postgis | public | st_scale | geometry, double precision, double precision | geometry + postgis | public | st_scale | geometry, double precision, double precision, double precision | geometry + postgis | public | st_scale | geometry, geometry | geometry + postgis | public | st_scale | geometry, geometry, origin geometry | geometry + postgis | public | st_scroll | geometry, geometry | geometry + postgis | public | st_segmentize | geog geography, max_segment_length double precision | geography + postgis | public | st_segmentize | geometry, double precision | geometry + postgis | public | st_seteffectivearea | geometry, double precision, integer | geometry + postgis | public | st_setpoint | geometry, integer, geometry | geometry + postgis | public | st_setsrid | geog geography, srid integer | geography + postgis | public | st_setsrid | geom geometry, srid integer | geometry + postgis | public | st_sharedpaths | geom1 geometry, geom2 geometry | geometry + postgis | public | st_shiftlongitude | geometry | geometry + postgis | public | st_shortestline | geom1 geometry, geom2 geometry | geometry + postgis | public | st_simplify | geometry, double precision | geometry + postgis | public | st_simplify | geometry, double precision, boolean | geometry + postgis | public | st_simplifypolygonhull | geom geometry, vertex_fraction double precision, is_outer boolean | geometry + postgis | public | st_simplifypreservetopology | geometry, double precision | geometry + postgis | public | st_simplifyvw | geometry, double precision | geometry + postgis | public | st_snap | geom1 geometry, geom2 geometry, double precision | geometry + postgis | public | st_snaptogrid | geom1 geometry, geom2 geometry, double precision, double precision, double precision, double precision | geometry + postgis | public | st_snaptogrid | geometry, double precision | geometry + postgis | public | st_snaptogrid | geometry, double precision, double precision | geometry + postgis | public | st_snaptogrid | geometry, double precision, double precision, double precision, double precision | geometry + postgis | public | st_split | geom1 geometry, geom2 geometry | geometry + postgis | public | st_square | size double precision, cell_i integer, cell_j integer, origin geometry | geometry + postgis | public | st_squaregrid | size double precision, bounds geometry, OUT geom geometry, OUT i integer, OUT j integer | SETOF record + postgis | public | st_srid | geog geography | integer + postgis | public | st_srid | geom geometry | integer + postgis | public | st_startpoint | geometry | geometry + postgis | public | st_subdivide | geom geometry, maxvertices integer, gridsize double precision | SETOF geometry + postgis | public | st_summary | geography | text + postgis | public | st_summary | geometry | text + postgis | public | st_swapordinates | geom geometry, ords cstring | geometry + postgis | public | st_symdifference | geom1 geometry, geom2 geometry, gridsize double precision | geometry + postgis | public | st_symmetricdifference | geom1 geometry, geom2 geometry | geometry + postgis | public | st_tileenvelope | zoom integer, x integer, y integer, bounds geometry, margin double precision | geometry + postgis | public | st_touches | geom1 geometry, geom2 geometry | boolean + postgis | public | st_transform | geom geometry, from_proj text, to_proj text | geometry + postgis | public | st_transform | geom geometry, from_proj text, to_srid integer | geometry + postgis | public | st_transform | geom geometry, to_proj text | geometry + postgis | public | st_transform | geometry, integer | geometry + postgis | public | st_translate | geometry, double precision, double precision | geometry + postgis | public | st_translate | geometry, double precision, double precision, double precision | geometry + postgis | public | st_transscale | geometry, double precision, double precision, double precision, double precision | geometry + postgis | public | st_triangulatepolygon | g1 geometry | geometry + postgis | public | st_unaryunion | geometry, gridsize double precision | geometry + postgis | public | st_union | geom1 geometry, geom2 geometry | geometry + postgis | public | st_union | geom1 geometry, geom2 geometry, gridsize double precision | geometry + postgis | public | st_union | geometry | geometry + postgis | public | st_union | geometry, gridsize double precision | geometry + postgis | public | st_union | geometry[] | geometry + postgis | public | st_voronoilines | g1 geometry, tolerance double precision, extend_to geometry | geometry + postgis | public | st_voronoipolygons | g1 geometry, tolerance double precision, extend_to geometry | geometry + postgis | public | st_within | geom1 geometry, geom2 geometry | boolean + postgis | public | st_wkbtosql | wkb bytea | geometry + postgis | public | st_wkttosql | text | geometry + postgis | public | st_wrapx | geom geometry, wrap double precision, move double precision | geometry + postgis | public | st_x | geometry | double precision + postgis | public | st_xmax | box3d | double precision + postgis | public | st_xmin | box3d | double precision + postgis | public | st_y | geometry | double precision + postgis | public | st_ymax | box3d | double precision + postgis | public | st_ymin | box3d | double precision + postgis | public | st_z | geometry | double precision + postgis | public | st_zmax | box3d | double precision + postgis | public | st_zmflag | geometry | smallint + postgis | public | st_zmin | box3d | double precision + postgis | public | text | geometry | text + postgis | public | unlockrows | text | integer + postgis | public | updategeometrysrid | catalogn_name character varying, schema_name character varying, table_name character varying, column_name character varying, new_srid_in integer | text + postgis | public | updategeometrysrid | character varying, character varying, character varying, integer | text + postgis | public | updategeometrysrid | character varying, character varying, integer | text + postgis_raster | public | __st_countagg_transfn | agg agg_count, rast raster, nband integer, exclude_nodata_value boolean, sample_percent double precision | agg_count + postgis_raster | public | _add_overview_constraint | ovschema name, ovtable name, ovcolumn name, refschema name, reftable name, refcolumn name, factor integer | boolean + postgis_raster | public | _add_raster_constraint | cn name, sql text | boolean + postgis_raster | public | _add_raster_constraint_alignment | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _add_raster_constraint_blocksize | rastschema name, rasttable name, rastcolumn name, axis text | boolean + postgis_raster | public | _add_raster_constraint_coverage_tile | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _add_raster_constraint_extent | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _add_raster_constraint_nodata_values | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _add_raster_constraint_num_bands | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _add_raster_constraint_out_db | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _add_raster_constraint_pixel_types | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _add_raster_constraint_scale | rastschema name, rasttable name, rastcolumn name, axis character | boolean + postgis_raster | public | _add_raster_constraint_spatially_unique | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _add_raster_constraint_srid | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _drop_overview_constraint | ovschema name, ovtable name, ovcolumn name | boolean + postgis_raster | public | _drop_raster_constraint | rastschema name, rasttable name, cn name | boolean + postgis_raster | public | _drop_raster_constraint_alignment | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _drop_raster_constraint_blocksize | rastschema name, rasttable name, rastcolumn name, axis text | boolean + postgis_raster | public | _drop_raster_constraint_coverage_tile | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _drop_raster_constraint_extent | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _drop_raster_constraint_nodata_values | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _drop_raster_constraint_num_bands | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _drop_raster_constraint_out_db | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _drop_raster_constraint_pixel_types | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _drop_raster_constraint_regular_blocking | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _drop_raster_constraint_scale | rastschema name, rasttable name, rastcolumn name, axis character | boolean + postgis_raster | public | _drop_raster_constraint_spatially_unique | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _drop_raster_constraint_srid | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _overview_constraint | ov raster, factor integer, refschema name, reftable name, refcolumn name | boolean + postgis_raster | public | _overview_constraint_info | ovschema name, ovtable name, ovcolumn name, OUT refschema name, OUT reftable name, OUT refcolumn name, OUT factor integer | record + postgis_raster | public | _raster_constraint_info_alignment | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _raster_constraint_info_blocksize | rastschema name, rasttable name, rastcolumn name, axis text | integer + postgis_raster | public | _raster_constraint_info_coverage_tile | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _raster_constraint_info_extent | rastschema name, rasttable name, rastcolumn name | geometry + postgis_raster | public | _raster_constraint_info_index | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _raster_constraint_info_nodata_values | rastschema name, rasttable name, rastcolumn name | double precision[] + postgis_raster | public | _raster_constraint_info_num_bands | rastschema name, rasttable name, rastcolumn name | integer + postgis_raster | public | _raster_constraint_info_out_db | rastschema name, rasttable name, rastcolumn name | boolean[] + postgis_raster | public | _raster_constraint_info_pixel_types | rastschema name, rasttable name, rastcolumn name | text[] + postgis_raster | public | _raster_constraint_info_regular_blocking | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _raster_constraint_info_scale | rastschema name, rasttable name, rastcolumn name, axis character | double precision + postgis_raster | public | _raster_constraint_info_spatially_unique | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _raster_constraint_info_srid | rastschema name, rasttable name, rastcolumn name | integer + postgis_raster | public | _raster_constraint_nodata_values | rast raster | numeric[] + postgis_raster | public | _raster_constraint_out_db | rast raster | boolean[] + postgis_raster | public | _raster_constraint_pixel_types | rast raster | text[] + postgis_raster | public | _st_aspect4ma | value double precision[], pos integer[], VARIADIC userargs text[] | double precision + postgis_raster | public | _st_asraster | geom geometry, scalex double precision, scaley double precision, width integer, height integer, pixeltype text[], value double precision[], nodataval double precision[], upperleftx double precision, upperlefty double precision, gridx double precision, gridy double precision, skewx double precision, skewy double precision, touched boolean | raster + postgis_raster | public | _st_clip | rast raster, nband integer[], geom geometry, nodataval double precision[], crop boolean | raster + postgis_raster | public | _st_colormap | rast raster, nband integer, colormap text, method text | raster + postgis_raster | public | _st_contains | rast1 raster, nband1 integer, rast2 raster, nband2 integer | boolean + postgis_raster | public | _st_containsproperly | rast1 raster, nband1 integer, rast2 raster, nband2 integer | boolean + postgis_raster | public | _st_convertarray4ma | value double precision[] | double precision[] + postgis_raster | public | _st_count | rast raster, nband integer, exclude_nodata_value boolean, sample_percent double precision | bigint + postgis_raster | public | _st_countagg_finalfn | agg agg_count | bigint + postgis_raster | public | _st_countagg_transfn | agg agg_count, rast raster, exclude_nodata_value boolean | agg_count + postgis_raster | public | _st_countagg_transfn | agg agg_count, rast raster, nband integer, exclude_nodata_value boolean | agg_count + postgis_raster | public | _st_countagg_transfn | agg agg_count, rast raster, nband integer, exclude_nodata_value boolean, sample_percent double precision | agg_count + postgis_raster | public | _st_coveredby | rast1 raster, nband1 integer, rast2 raster, nband2 integer | boolean + postgis_raster | public | _st_covers | rast1 raster, nband1 integer, rast2 raster, nband2 integer | boolean + postgis_raster | public | _st_dfullywithin | rast1 raster, nband1 integer, rast2 raster, nband2 integer, distance double precision | boolean + postgis_raster | public | _st_dwithin | rast1 raster, nband1 integer, rast2 raster, nband2 integer, distance double precision | boolean + postgis_raster | public | _st_gdalwarp | rast raster, algorithm text, maxerr double precision, srid integer, scalex double precision, scaley double precision, gridx double precision, gridy double precision, skewx double precision, skewy double precision, width integer, height integer | raster + postgis_raster | public | _st_grayscale4ma | value double precision[], pos integer[], VARIADIC userargs text[] | double precision + postgis_raster | public | _st_hillshade4ma | value double precision[], pos integer[], VARIADIC userargs text[] | double precision + postgis_raster | public | _st_histogram | rast raster, nband integer, exclude_nodata_value boolean, sample_percent double precision, bins integer, width double precision[], "right" boolean, min double precision, max double precision, OUT min double precision, OUT max double precision, OUT count bigint, OUT percent double precision | SETOF record + postgis_raster | public | _st_intersects | geom geometry, rast raster, nband integer | boolean + postgis_raster | public | _st_intersects | rast1 raster, nband1 integer, rast2 raster, nband2 integer | boolean + postgis_raster | public | _st_mapalgebra | rastbandargset rastbandarg[], callbackfunc regprocedure, pixeltype text, distancex integer, distancey integer, extenttype text, customextent raster, mask double precision[], weighted boolean, VARIADIC userargs text[] | raster + postgis_raster | public | _st_mapalgebra | rastbandargset rastbandarg[], expression text, pixeltype text, extenttype text, nodata1expr text, nodata2expr text, nodatanodataval double precision | raster + postgis_raster | public | _st_neighborhood | rast raster, band integer, columnx integer, rowy integer, distancex integer, distancey integer, exclude_nodata_value boolean | double precision[] + postgis_raster | public | _st_overlaps | rast1 raster, nband1 integer, rast2 raster, nband2 integer | boolean + postgis_raster | public | _st_pixelascentroids | rast raster, band integer, columnx integer, rowy integer, exclude_nodata_value boolean | TABLE(geom geometry, val double precision, x integer, y integer) + postgis_raster | public | _st_pixelaspolygons | rast raster, band integer, columnx integer, rowy integer, exclude_nodata_value boolean | TABLE(geom geometry, val double precision, x integer, y integer) + postgis_raster | public | _st_quantile | rast raster, nband integer, exclude_nodata_value boolean, sample_percent double precision, quantiles double precision[], OUT quantile double precision, OUT value double precision | SETOF record + postgis_raster | public | _st_rastertoworldcoord | rast raster, columnx integer, rowy integer, OUT longitude double precision, OUT latitude double precision | record + postgis_raster | public | _st_reclass | rast raster, VARIADIC reclassargset reclassarg[] | raster + postgis_raster | public | _st_roughness4ma | value double precision[], pos integer[], VARIADIC userargs text[] | double precision + postgis_raster | public | _st_samealignment_finalfn | agg agg_samealignment | boolean + postgis_raster | public | _st_samealignment_transfn | agg agg_samealignment, rast raster | agg_samealignment + postgis_raster | public | _st_setvalues | rast raster, nband integer, x integer, y integer, newvalueset double precision[], noset boolean[], hasnosetvalue boolean, nosetvalue double precision, keepnodata boolean | raster + postgis_raster | public | _st_slope4ma | value double precision[], pos integer[], VARIADIC userargs text[] | double precision + postgis_raster | public | _st_summarystats | rast raster, nband integer, exclude_nodata_value boolean, sample_percent double precision | summarystats + postgis_raster | public | _st_summarystats_finalfn | internal | summarystats + postgis_raster | public | _st_summarystats_transfn | internal, raster, boolean, double precision | internal + postgis_raster | public | _st_summarystats_transfn | internal, raster, integer, boolean | internal + postgis_raster | public | _st_summarystats_transfn | internal, raster, integer, boolean, double precision | internal + postgis_raster | public | _st_tile | rast raster, width integer, height integer, nband integer[], padwithnodata boolean, nodataval double precision | SETOF raster + postgis_raster | public | _st_touches | rast1 raster, nband1 integer, rast2 raster, nband2 integer | boolean + postgis_raster | public | _st_tpi4ma | value double precision[], pos integer[], VARIADIC userargs text[] | double precision + postgis_raster | public | _st_tri4ma | value double precision[], pos integer[], VARIADIC userargs text[] | double precision + postgis_raster | public | _st_union_finalfn | internal | raster + postgis_raster | public | _st_union_transfn | internal, raster | internal + postgis_raster | public | _st_union_transfn | internal, raster, integer | internal + postgis_raster | public | _st_union_transfn | internal, raster, integer, text | internal + postgis_raster | public | _st_union_transfn | internal, raster, text | internal + postgis_raster | public | _st_union_transfn | internal, raster, unionarg[] | internal + postgis_raster | public | _st_valuecount | rast raster, nband integer, exclude_nodata_value boolean, searchvalues double precision[], roundto double precision, OUT value double precision, OUT count integer, OUT percent double precision | SETOF record + postgis_raster | public | _st_valuecount | rastertable text, rastercolumn text, nband integer, exclude_nodata_value boolean, searchvalues double precision[], roundto double precision, OUT value double precision, OUT count integer, OUT percent double precision | SETOF record + postgis_raster | public | _st_within | rast1 raster, nband1 integer, rast2 raster, nband2 integer | boolean + postgis_raster | public | _st_worldtorastercoord | rast raster, longitude double precision, latitude double precision, OUT columnx integer, OUT rowy integer | record + postgis_raster | public | _updaterastersrid | schema_name name, table_name name, column_name name, new_srid integer | boolean + postgis_raster | public | addoverviewconstraints | ovschema name, ovtable name, ovcolumn name, refschema name, reftable name, refcolumn name, ovfactor integer | boolean + postgis_raster | public | addoverviewconstraints | ovtable name, ovcolumn name, reftable name, refcolumn name, ovfactor integer | boolean + postgis_raster | public | addrasterconstraints | rastschema name, rasttable name, rastcolumn name, VARIADIC constraints text[] | boolean + postgis_raster | public | addrasterconstraints | rastschema name, rasttable name, rastcolumn name, srid boolean, scale_x boolean, scale_y boolean, blocksize_x boolean, blocksize_y boolean, same_alignment boolean, regular_blocking boolean, num_bands boolean, pixel_types boolean, nodata_values boolean, out_db boolean, extent boolean | boolean + postgis_raster | public | addrasterconstraints | rasttable name, rastcolumn name, VARIADIC constraints text[] | boolean + postgis_raster | public | addrasterconstraints | rasttable name, rastcolumn name, srid boolean, scale_x boolean, scale_y boolean, blocksize_x boolean, blocksize_y boolean, same_alignment boolean, regular_blocking boolean, num_bands boolean, pixel_types boolean, nodata_values boolean, out_db boolean, extent boolean | boolean + postgis_raster | public | box3d | raster | box3d + postgis_raster | public | bytea | raster | bytea + postgis_raster | public | dropoverviewconstraints | ovschema name, ovtable name, ovcolumn name | boolean + postgis_raster | public | dropoverviewconstraints | ovtable name, ovcolumn name | boolean + postgis_raster | public | droprasterconstraints | rastschema name, rasttable name, rastcolumn name, VARIADIC constraints text[] | boolean + postgis_raster | public | droprasterconstraints | rastschema name, rasttable name, rastcolumn name, srid boolean, scale_x boolean, scale_y boolean, blocksize_x boolean, blocksize_y boolean, same_alignment boolean, regular_blocking boolean, num_bands boolean, pixel_types boolean, nodata_values boolean, out_db boolean, extent boolean | boolean + postgis_raster | public | droprasterconstraints | rasttable name, rastcolumn name, VARIADIC constraints text[] | boolean + postgis_raster | public | droprasterconstraints | rasttable name, rastcolumn name, srid boolean, scale_x boolean, scale_y boolean, blocksize_x boolean, blocksize_y boolean, same_alignment boolean, regular_blocking boolean, num_bands boolean, pixel_types boolean, nodata_values boolean, out_db boolean, extent boolean | boolean + postgis_raster | public | geometry_contained_by_raster | geometry, raster | boolean + postgis_raster | public | geometry_raster_contain | geometry, raster | boolean + postgis_raster | public | geometry_raster_overlap | geometry, raster | boolean + postgis_raster | public | postgis_gdal_version | | text + postgis_raster | public | postgis_noop | raster | geometry + postgis_raster | public | postgis_raster_lib_build_date | | text + postgis_raster | public | postgis_raster_lib_version | | text + postgis_raster | public | postgis_raster_scripts_installed | | text + postgis_raster | public | raster_above | raster, raster | boolean + postgis_raster | public | raster_below | raster, raster | boolean + postgis_raster | public | raster_contain | raster, raster | boolean + postgis_raster | public | raster_contained | raster, raster | boolean + postgis_raster | public | raster_contained_by_geometry | raster, geometry | boolean + postgis_raster | public | raster_eq | raster, raster | boolean + postgis_raster | public | raster_geometry_contain | raster, geometry | boolean + postgis_raster | public | raster_geometry_overlap | raster, geometry | boolean + postgis_raster | public | raster_hash | raster | integer + postgis_raster | public | raster_in | cstring | raster + postgis_raster | public | raster_left | raster, raster | boolean + postgis_raster | public | raster_out | raster | cstring + postgis_raster | public | raster_overabove | raster, raster | boolean + postgis_raster | public | raster_overbelow | raster, raster | boolean + postgis_raster | public | raster_overlap | raster, raster | boolean + postgis_raster | public | raster_overleft | raster, raster | boolean + postgis_raster | public | raster_overright | raster, raster | boolean + postgis_raster | public | raster_right | raster, raster | boolean + postgis_raster | public | raster_same | raster, raster | boolean + postgis_raster | public | st_addband | rast raster, addbandargset addbandarg[] | raster + postgis_raster | public | st_addband | rast raster, index integer, outdbfile text, outdbindex integer[], nodataval double precision | raster + postgis_raster | public | st_addband | rast raster, index integer, pixeltype text, initialvalue double precision, nodataval double precision | raster + postgis_raster | public | st_addband | rast raster, outdbfile text, outdbindex integer[], index integer, nodataval double precision | raster + postgis_raster | public | st_addband | rast raster, pixeltype text, initialvalue double precision, nodataval double precision | raster + postgis_raster | public | st_addband | torast raster, fromrast raster, fromband integer, torastindex integer | raster + postgis_raster | public | st_addband | torast raster, fromrasts raster[], fromband integer, torastindex integer | raster + postgis_raster | public | st_approxcount | rast raster, exclude_nodata_value boolean, sample_percent double precision | bigint + postgis_raster | public | st_approxcount | rast raster, nband integer, exclude_nodata_value boolean, sample_percent double precision | bigint + postgis_raster | public | st_approxcount | rast raster, nband integer, sample_percent double precision | bigint + postgis_raster | public | st_approxcount | rast raster, sample_percent double precision | bigint + postgis_raster | public | st_approxhistogram | rast raster, nband integer, exclude_nodata_value boolean, sample_percent double precision, bins integer, "right" boolean, OUT min double precision, OUT max double precision, OUT count bigint, OUT percent double precision | SETOF record + postgis_raster | public | st_approxhistogram | rast raster, nband integer, exclude_nodata_value boolean, sample_percent double precision, bins integer, width double precision[], "right" boolean, OUT min double precision, OUT max double precision, OUT count bigint, OUT percent double precision | SETOF record + postgis_raster | public | st_approxhistogram | rast raster, nband integer, sample_percent double precision, OUT min double precision, OUT max double precision, OUT count bigint, OUT percent double precision | SETOF record + postgis_raster | public | st_approxhistogram | rast raster, nband integer, sample_percent double precision, bins integer, "right" boolean, OUT min double precision, OUT max double precision, OUT count bigint, OUT percent double precision | SETOF record + postgis_raster | public | st_approxhistogram | rast raster, nband integer, sample_percent double precision, bins integer, width double precision[], "right" boolean, OUT min double precision, OUT max double precision, OUT count bigint, OUT percent double precision | SETOF record + postgis_raster | public | st_approxhistogram | rast raster, sample_percent double precision, OUT min double precision, OUT max double precision, OUT count bigint, OUT percent double precision | SETOF record + postgis_raster | public | st_approxquantile | rast raster, exclude_nodata_value boolean, quantile double precision | double precision + postgis_raster | public | st_approxquantile | rast raster, nband integer, exclude_nodata_value boolean, sample_percent double precision, quantile double precision | double precision + postgis_raster | public | st_approxquantile | rast raster, nband integer, exclude_nodata_value boolean, sample_percent double precision, quantiles double precision[], OUT quantile double precision, OUT value double precision | SETOF record + postgis_raster | public | st_approxquantile | rast raster, nband integer, sample_percent double precision, quantile double precision | double precision + postgis_raster | public | st_approxquantile | rast raster, nband integer, sample_percent double precision, quantiles double precision[], OUT quantile double precision, OUT value double precision | SETOF record + postgis_raster | public | st_approxquantile | rast raster, quantile double precision | double precision + postgis_raster | public | st_approxquantile | rast raster, quantiles double precision[], OUT quantile double precision, OUT value double precision | SETOF record + postgis_raster | public | st_approxquantile | rast raster, sample_percent double precision, quantile double precision | double precision + postgis_raster | public | st_approxquantile | rast raster, sample_percent double precision, quantiles double precision[], OUT quantile double precision, OUT value double precision | SETOF record + postgis_raster | public | st_approxsummarystats | rast raster, exclude_nodata_value boolean, sample_percent double precision | summarystats + postgis_raster | public | st_approxsummarystats | rast raster, nband integer, exclude_nodata_value boolean, sample_percent double precision | summarystats + postgis_raster | public | st_approxsummarystats | rast raster, nband integer, sample_percent double precision | summarystats + postgis_raster | public | st_approxsummarystats | rast raster, sample_percent double precision | summarystats + postgis_raster | public | st_asbinary | raster, outasin boolean | bytea + postgis_raster | public | st_asgdalraster | rast raster, format text, options text[], srid integer | bytea + postgis_raster | public | st_ashexwkb | raster, outasin boolean | text + postgis_raster | public | st_asjpeg | rast raster, nband integer, options text[] | bytea + postgis_raster | public | st_asjpeg | rast raster, nband integer, quality integer | bytea + postgis_raster | public | st_asjpeg | rast raster, nbands integer[], options text[] | bytea + postgis_raster | public | st_asjpeg | rast raster, nbands integer[], quality integer | bytea + postgis_raster | public | st_asjpeg | rast raster, options text[] | bytea + postgis_raster | public | st_aspect | rast raster, nband integer, customextent raster, pixeltype text, units text, interpolate_nodata boolean | raster + postgis_raster | public | st_aspect | rast raster, nband integer, pixeltype text, units text, interpolate_nodata boolean | raster + postgis_raster | public | st_aspng | rast raster, nband integer, compression integer | bytea + postgis_raster | public | st_aspng | rast raster, nband integer, options text[] | bytea + postgis_raster | public | st_aspng | rast raster, nbands integer[], compression integer | bytea + postgis_raster | public | st_aspng | rast raster, nbands integer[], options text[] | bytea + postgis_raster | public | st_aspng | rast raster, options text[] | bytea + postgis_raster | public | st_asraster | geom geometry, ref raster, pixeltype text, value double precision, nodataval double precision, touched boolean | raster + postgis_raster | public | st_asraster | geom geometry, ref raster, pixeltype text[], value double precision[], nodataval double precision[], touched boolean | raster + postgis_raster | public | st_asraster | geom geometry, scalex double precision, scaley double precision, gridx double precision, gridy double precision, pixeltype text, value double precision, nodataval double precision, skewx double precision, skewy double precision, touched boolean | raster + postgis_raster | public | st_asraster | geom geometry, scalex double precision, scaley double precision, gridx double precision, gridy double precision, pixeltype text[], value double precision[], nodataval double precision[], skewx double precision, skewy double precision, touched boolean | raster + postgis_raster | public | st_asraster | geom geometry, scalex double precision, scaley double precision, pixeltype text, value double precision, nodataval double precision, upperleftx double precision, upperlefty double precision, skewx double precision, skewy double precision, touched boolean | raster + postgis_raster | public | st_asraster | geom geometry, scalex double precision, scaley double precision, pixeltype text[], value double precision[], nodataval double precision[], upperleftx double precision, upperlefty double precision, skewx double precision, skewy double precision, touched boolean | raster + postgis_raster | public | st_asraster | geom geometry, width integer, height integer, gridx double precision, gridy double precision, pixeltype text, value double precision, nodataval double precision, skewx double precision, skewy double precision, touched boolean | raster + postgis_raster | public | st_asraster | geom geometry, width integer, height integer, gridx double precision, gridy double precision, pixeltype text[], value double precision[], nodataval double precision[], skewx double precision, skewy double precision, touched boolean | raster + postgis_raster | public | st_asraster | geom geometry, width integer, height integer, pixeltype text, value double precision, nodataval double precision, upperleftx double precision, upperlefty double precision, skewx double precision, skewy double precision, touched boolean | raster + postgis_raster | public | st_asraster | geom geometry, width integer, height integer, pixeltype text[], value double precision[], nodataval double precision[], upperleftx double precision, upperlefty double precision, skewx double precision, skewy double precision, touched boolean | raster + postgis_raster | public | st_astiff | rast raster, compression text, srid integer | bytea + postgis_raster | public | st_astiff | rast raster, nbands integer[], compression text, srid integer | bytea + postgis_raster | public | st_astiff | rast raster, nbands integer[], options text[], srid integer | bytea + postgis_raster | public | st_astiff | rast raster, options text[], srid integer | bytea + postgis_raster | public | st_aswkb | raster, outasin boolean | bytea + postgis_raster | public | st_band | rast raster, nband integer | raster + postgis_raster | public | st_band | rast raster, nbands integer[] | raster + postgis_raster | public | st_band | rast raster, nbands text, delimiter character | raster + postgis_raster | public | st_bandfilesize | rast raster, band integer | bigint + postgis_raster | public | st_bandfiletimestamp | rast raster, band integer | bigint + postgis_raster | public | st_bandisnodata | rast raster, band integer, forcechecking boolean | boolean + postgis_raster | public | st_bandisnodata | rast raster, forcechecking boolean | boolean + postgis_raster | public | st_bandmetadata | rast raster, band integer | TABLE(pixeltype text, nodatavalue double precision, isoutdb boolean, path text, outdbbandnum integer, filesize bigint, filetimestamp bigint) + postgis_raster | public | st_bandmetadata | rast raster, band integer[] | TABLE(bandnum integer, pixeltype text, nodatavalue double precision, isoutdb boolean, path text, outdbbandnum integer, filesize bigint, filetimestamp bigint) + postgis_raster | public | st_bandnodatavalue | rast raster, band integer | double precision + postgis_raster | public | st_bandpath | rast raster, band integer | text + postgis_raster | public | st_bandpixeltype | rast raster, band integer | text + postgis_raster | public | st_clip | rast raster, geom geometry, crop boolean | raster + postgis_raster | public | st_clip | rast raster, geom geometry, nodataval double precision, crop boolean | raster + postgis_raster | public | st_clip | rast raster, geom geometry, nodataval double precision[], crop boolean | raster + postgis_raster | public | st_clip | rast raster, nband integer, geom geometry, crop boolean | raster + postgis_raster | public | st_clip | rast raster, nband integer, geom geometry, nodataval double precision, crop boolean | raster + postgis_raster | public | st_clip | rast raster, nband integer[], geom geometry, nodataval double precision[], crop boolean | raster + postgis_raster | public | st_colormap | rast raster, colormap text, method text | raster + postgis_raster | public | st_colormap | rast raster, nband integer, colormap text, method text | raster + postgis_raster | public | st_contains | rast1 raster, nband1 integer, rast2 raster, nband2 integer | boolean + postgis_raster | public | st_contains | rast1 raster, rast2 raster | boolean + postgis_raster | public | st_containsproperly | rast1 raster, nband1 integer, rast2 raster, nband2 integer | boolean + postgis_raster | public | st_containsproperly | rast1 raster, rast2 raster | boolean + postgis_raster | public | st_contour | rast raster, bandnumber integer, level_interval double precision, level_base double precision, fixed_levels double precision[], polygonize boolean | TABLE(geom geometry, id integer, value double precision) + postgis_raster | public | st_convexhull | raster | geometry + postgis_raster | public | st_count | rast raster, exclude_nodata_value boolean | bigint + postgis_raster | public | st_count | rast raster, nband integer, exclude_nodata_value boolean | bigint + postgis_raster | public | st_countagg | raster, boolean | bigint + postgis_raster | public | st_countagg | raster, integer, boolean | bigint + postgis_raster | public | st_countagg | raster, integer, boolean, double precision | bigint + postgis_raster | public | st_coveredby | rast1 raster, nband1 integer, rast2 raster, nband2 integer | boolean + postgis_raster | public | st_coveredby | rast1 raster, rast2 raster | boolean + postgis_raster | public | st_covers | rast1 raster, nband1 integer, rast2 raster, nband2 integer | boolean + postgis_raster | public | st_covers | rast1 raster, rast2 raster | boolean + postgis_raster | public | st_createoverview | tab regclass, col name, factor integer, algo text | regclass + postgis_raster | public | st_dfullywithin | rast1 raster, nband1 integer, rast2 raster, nband2 integer, distance double precision | boolean + postgis_raster | public | st_dfullywithin | rast1 raster, rast2 raster, distance double precision | boolean + postgis_raster | public | st_disjoint | rast1 raster, nband1 integer, rast2 raster, nband2 integer | boolean + postgis_raster | public | st_disjoint | rast1 raster, rast2 raster | boolean + postgis_raster | public | st_distinct4ma | matrix double precision[], nodatamode text, VARIADIC args text[] | double precision + postgis_raster | public | st_distinct4ma | value double precision[], pos integer[], VARIADIC userargs text[] | double precision + postgis_raster | public | st_dumpaspolygons | rast raster, band integer, exclude_nodata_value boolean | SETOF geomval + postgis_raster | public | st_dumpvalues | rast raster, nband integer, exclude_nodata_value boolean | double precision[] + postgis_raster | public | st_dumpvalues | rast raster, nband integer[], exclude_nodata_value boolean | TABLE(nband integer, valarray double precision[]) + postgis_raster | public | st_dwithin | rast1 raster, nband1 integer, rast2 raster, nband2 integer, distance double precision | boolean + postgis_raster | public | st_dwithin | rast1 raster, rast2 raster, distance double precision | boolean + postgis_raster | public | st_envelope | raster | geometry + postgis_raster | public | st_fromgdalraster | gdaldata bytea, srid integer | raster + postgis_raster | public | st_gdaldrivers | OUT idx integer, OUT short_name text, OUT long_name text, OUT can_read boolean, OUT can_write boolean, OUT create_options text | SETOF record + postgis_raster | public | st_georeference | rast raster, format text | text + postgis_raster | public | st_geotransform | raster, OUT imag double precision, OUT jmag double precision, OUT theta_i double precision, OUT theta_ij double precision, OUT xoffset double precision, OUT yoffset double precision | record + postgis_raster | public | st_grayscale | rast raster, redband integer, greenband integer, blueband integer, extenttype text | raster + postgis_raster | public | st_grayscale | rastbandargset rastbandarg[], extenttype text | raster + postgis_raster | public | st_hasnoband | rast raster, nband integer | boolean + postgis_raster | public | st_height | raster | integer + postgis_raster | public | st_hillshade | rast raster, nband integer, customextent raster, pixeltype text, azimuth double precision, altitude double precision, max_bright double precision, scale double precision, interpolate_nodata boolean | raster + postgis_raster | public | st_hillshade | rast raster, nband integer, pixeltype text, azimuth double precision, altitude double precision, max_bright double precision, scale double precision, interpolate_nodata boolean | raster + postgis_raster | public | st_histogram | rast raster, nband integer, bins integer, "right" boolean, OUT min double precision, OUT max double precision, OUT count bigint, OUT percent double precision | SETOF record + postgis_raster | public | st_histogram | rast raster, nband integer, bins integer, width double precision[], "right" boolean, OUT min double precision, OUT max double precision, OUT count bigint, OUT percent double precision | SETOF record + postgis_raster | public | st_histogram | rast raster, nband integer, exclude_nodata_value boolean, bins integer, "right" boolean, OUT min double precision, OUT max double precision, OUT count bigint, OUT percent double precision | SETOF record + postgis_raster | public | st_histogram | rast raster, nband integer, exclude_nodata_value boolean, bins integer, width double precision[], "right" boolean, OUT min double precision, OUT max double precision, OUT count bigint, OUT percent double precision | SETOF record + postgis_raster | public | st_interpolateraster | geom geometry, options text, rast raster, bandnumber integer | raster + postgis_raster | public | st_intersection | geomin geometry, rast raster, band integer | SETOF geomval + postgis_raster | public | st_intersection | rast raster, band integer, geomin geometry | SETOF geomval + postgis_raster | public | st_intersection | rast raster, geomin geometry | SETOF geomval + postgis_raster | public | st_intersection | rast1 raster, band1 integer, rast2 raster, band2 integer, nodataval double precision | raster + postgis_raster | public | st_intersection | rast1 raster, band1 integer, rast2 raster, band2 integer, nodataval double precision[] | raster + postgis_raster | public | st_intersection | rast1 raster, band1 integer, rast2 raster, band2 integer, returnband text, nodataval double precision | raster + postgis_raster | public | st_intersection | rast1 raster, band1 integer, rast2 raster, band2 integer, returnband text, nodataval double precision[] | raster + postgis_raster | public | st_intersection | rast1 raster, rast2 raster, nodataval double precision | raster + postgis_raster | public | st_intersection | rast1 raster, rast2 raster, nodataval double precision[] | raster + postgis_raster | public | st_intersection | rast1 raster, rast2 raster, returnband text, nodataval double precision | raster + postgis_raster | public | st_intersection | rast1 raster, rast2 raster, returnband text, nodataval double precision[] | raster + postgis_raster | public | st_intersects | geom geometry, rast raster, nband integer | boolean + postgis_raster | public | st_intersects | rast raster, geom geometry, nband integer | boolean + postgis_raster | public | st_intersects | rast raster, nband integer, geom geometry | boolean + postgis_raster | public | st_intersects | rast1 raster, nband1 integer, rast2 raster, nband2 integer | boolean + postgis_raster | public | st_intersects | rast1 raster, rast2 raster | boolean + postgis_raster | public | st_invdistweight4ma | value double precision[], pos integer[], VARIADIC userargs text[] | double precision + postgis_raster | public | st_iscoveragetile | rast raster, coverage raster, tilewidth integer, tileheight integer | boolean + postgis_raster | public | st_isempty | rast raster | boolean + postgis_raster | public | st_makeemptycoverage | tilewidth integer, tileheight integer, width integer, height integer, upperleftx double precision, upperlefty double precision, scalex double precision, scaley double precision, skewx double precision, skewy double precision, srid integer | SETOF raster + postgis_raster | public | st_makeemptyraster | rast raster | raster + postgis_raster | public | st_makeemptyraster | width integer, height integer, upperleftx double precision, upperlefty double precision, pixelsize double precision | raster + postgis_raster | public | st_makeemptyraster | width integer, height integer, upperleftx double precision, upperlefty double precision, scalex double precision, scaley double precision, skewx double precision, skewy double precision, srid integer | raster + postgis_raster | public | st_mapalgebra | rast raster, nband integer, callbackfunc regprocedure, mask double precision[], weighted boolean, pixeltype text, extenttype text, customextent raster, VARIADIC userargs text[] | raster + postgis_raster | public | st_mapalgebra | rast raster, nband integer, callbackfunc regprocedure, pixeltype text, extenttype text, customextent raster, distancex integer, distancey integer, VARIADIC userargs text[] | raster + postgis_raster | public | st_mapalgebra | rast raster, nband integer, pixeltype text, expression text, nodataval double precision | raster + postgis_raster | public | st_mapalgebra | rast raster, nband integer[], callbackfunc regprocedure, pixeltype text, extenttype text, customextent raster, distancex integer, distancey integer, VARIADIC userargs text[] | raster + postgis_raster | public | st_mapalgebra | rast raster, pixeltype text, expression text, nodataval double precision | raster + postgis_raster | public | st_mapalgebra | rast1 raster, band1 integer, rast2 raster, band2 integer, expression text, pixeltype text, extenttype text, nodata1expr text, nodata2expr text, nodatanodataval double precision | raster + postgis_raster | public | st_mapalgebra | rast1 raster, nband1 integer, rast2 raster, nband2 integer, callbackfunc regprocedure, pixeltype text, extenttype text, customextent raster, distancex integer, distancey integer, VARIADIC userargs text[] | raster + postgis_raster | public | st_mapalgebra | rast1 raster, rast2 raster, expression text, pixeltype text, extenttype text, nodata1expr text, nodata2expr text, nodatanodataval double precision | raster + postgis_raster | public | st_mapalgebra | rastbandargset rastbandarg[], callbackfunc regprocedure, pixeltype text, extenttype text, customextent raster, distancex integer, distancey integer, VARIADIC userargs text[] | raster + postgis_raster | public | st_mapalgebraexpr | rast raster, band integer, pixeltype text, expression text, nodataval double precision | raster + postgis_raster | public | st_mapalgebraexpr | rast raster, pixeltype text, expression text, nodataval double precision | raster + postgis_raster | public | st_mapalgebraexpr | rast1 raster, band1 integer, rast2 raster, band2 integer, expression text, pixeltype text, extenttype text, nodata1expr text, nodata2expr text, nodatanodataval double precision | raster + postgis_raster | public | st_mapalgebraexpr | rast1 raster, rast2 raster, expression text, pixeltype text, extenttype text, nodata1expr text, nodata2expr text, nodatanodataval double precision | raster + postgis_raster | public | st_mapalgebrafct | rast raster, band integer, onerastuserfunc regprocedure | raster + postgis_raster | public | st_mapalgebrafct | rast raster, band integer, onerastuserfunc regprocedure, VARIADIC args text[] | raster + postgis_raster | public | st_mapalgebrafct | rast raster, band integer, pixeltype text, onerastuserfunc regprocedure | raster + postgis_raster | public | st_mapalgebrafct | rast raster, band integer, pixeltype text, onerastuserfunc regprocedure, VARIADIC args text[] | raster + postgis_raster | public | st_mapalgebrafct | rast raster, onerastuserfunc regprocedure | raster + postgis_raster | public | st_mapalgebrafct | rast raster, onerastuserfunc regprocedure, VARIADIC args text[] | raster + postgis_raster | public | st_mapalgebrafct | rast raster, pixeltype text, onerastuserfunc regprocedure | raster + postgis_raster | public | st_mapalgebrafct | rast raster, pixeltype text, onerastuserfunc regprocedure, VARIADIC args text[] | raster + postgis_raster | public | st_mapalgebrafct | rast1 raster, band1 integer, rast2 raster, band2 integer, tworastuserfunc regprocedure, pixeltype text, extenttype text, VARIADIC userargs text[] | raster + postgis_raster | public | st_mapalgebrafct | rast1 raster, rast2 raster, tworastuserfunc regprocedure, pixeltype text, extenttype text, VARIADIC userargs text[] | raster + postgis_raster | public | st_mapalgebrafctngb | rast raster, band integer, pixeltype text, ngbwidth integer, ngbheight integer, onerastngbuserfunc regprocedure, nodatamode text, VARIADIC args text[] | raster + postgis_raster | public | st_max4ma | matrix double precision[], nodatamode text, VARIADIC args text[] | double precision + postgis_raster | public | st_max4ma | value double precision[], pos integer[], VARIADIC userargs text[] | double precision + postgis_raster | public | st_mean4ma | matrix double precision[], nodatamode text, VARIADIC args text[] | double precision + postgis_raster | public | st_mean4ma | value double precision[], pos integer[], VARIADIC userargs text[] | double precision + postgis_raster | public | st_memsize | raster | integer + postgis_raster | public | st_metadata | rast raster, OUT upperleftx double precision, OUT upperlefty double precision, OUT width integer, OUT height integer, OUT scalex double precision, OUT scaley double precision, OUT skewx double precision, OUT skewy double precision, OUT srid integer, OUT numbands integer | record + postgis_raster | public | st_min4ma | matrix double precision[], nodatamode text, VARIADIC args text[] | double precision + postgis_raster | public | st_min4ma | value double precision[], pos integer[], VARIADIC userargs text[] | double precision + postgis_raster | public | st_minconvexhull | rast raster, nband integer | geometry + postgis_raster | public | st_mindist4ma | value double precision[], pos integer[], VARIADIC userargs text[] | double precision + postgis_raster | public | st_minpossiblevalue | pixeltype text | double precision + postgis_raster | public | st_nearestvalue | rast raster, band integer, columnx integer, rowy integer, exclude_nodata_value boolean | double precision + postgis_raster | public | st_nearestvalue | rast raster, band integer, pt geometry, exclude_nodata_value boolean | double precision + postgis_raster | public | st_nearestvalue | rast raster, columnx integer, rowy integer, exclude_nodata_value boolean | double precision + postgis_raster | public | st_nearestvalue | rast raster, pt geometry, exclude_nodata_value boolean | double precision + postgis_raster | public | st_neighborhood | rast raster, band integer, columnx integer, rowy integer, distancex integer, distancey integer, exclude_nodata_value boolean | double precision[] + postgis_raster | public | st_neighborhood | rast raster, band integer, pt geometry, distancex integer, distancey integer, exclude_nodata_value boolean | double precision[] + postgis_raster | public | st_neighborhood | rast raster, columnx integer, rowy integer, distancex integer, distancey integer, exclude_nodata_value boolean | double precision[] + postgis_raster | public | st_neighborhood | rast raster, pt geometry, distancex integer, distancey integer, exclude_nodata_value boolean | double precision[] + postgis_raster | public | st_notsamealignmentreason | rast1 raster, rast2 raster | text + postgis_raster | public | st_numbands | raster | integer + postgis_raster | public | st_overlaps | rast1 raster, nband1 integer, rast2 raster, nband2 integer | boolean + postgis_raster | public | st_overlaps | rast1 raster, rast2 raster | boolean + postgis_raster | public | st_pixelascentroid | rast raster, x integer, y integer | geometry + postgis_raster | public | st_pixelascentroids | rast raster, band integer, exclude_nodata_value boolean | TABLE(geom geometry, val double precision, x integer, y integer) + postgis_raster | public | st_pixelaspoint | rast raster, x integer, y integer | geometry + postgis_raster | public | st_pixelaspoints | rast raster, band integer, exclude_nodata_value boolean | TABLE(geom geometry, val double precision, x integer, y integer) + postgis_raster | public | st_pixelaspolygon | rast raster, x integer, y integer | geometry + postgis_raster | public | st_pixelaspolygons | rast raster, band integer, exclude_nodata_value boolean | TABLE(geom geometry, val double precision, x integer, y integer) + postgis_raster | public | st_pixelheight | raster | double precision + postgis_raster | public | st_pixelofvalue | rast raster, nband integer, search double precision, exclude_nodata_value boolean | TABLE(x integer, y integer) + postgis_raster | public | st_pixelofvalue | rast raster, nband integer, search double precision[], exclude_nodata_value boolean | TABLE(val double precision, x integer, y integer) + postgis_raster | public | st_pixelofvalue | rast raster, search double precision, exclude_nodata_value boolean | TABLE(x integer, y integer) + postgis_raster | public | st_pixelofvalue | rast raster, search double precision[], exclude_nodata_value boolean | TABLE(val double precision, x integer, y integer) + postgis_raster | public | st_pixelwidth | raster | double precision + postgis_raster | public | st_polygon | rast raster, band integer | geometry + postgis_raster | public | st_quantile | rast raster, exclude_nodata_value boolean, quantile double precision | double precision + postgis_raster | public | st_quantile | rast raster, nband integer, exclude_nodata_value boolean, quantile double precision | double precision + postgis_raster | public | st_quantile | rast raster, nband integer, exclude_nodata_value boolean, quantiles double precision[], OUT quantile double precision, OUT value double precision | SETOF record + postgis_raster | public | st_quantile | rast raster, nband integer, quantile double precision | double precision + postgis_raster | public | st_quantile | rast raster, nband integer, quantiles double precision[], OUT quantile double precision, OUT value double precision | SETOF record + postgis_raster | public | st_quantile | rast raster, quantile double precision | double precision + postgis_raster | public | st_quantile | rast raster, quantiles double precision[], OUT quantile double precision, OUT value double precision | SETOF record + postgis_raster | public | st_range4ma | matrix double precision[], nodatamode text, VARIADIC args text[] | double precision + postgis_raster | public | st_range4ma | value double precision[], pos integer[], VARIADIC userargs text[] | double precision + postgis_raster | public | st_rastertoworldcoord | rast raster, columnx integer, rowy integer, OUT longitude double precision, OUT latitude double precision | record + postgis_raster | public | st_rastertoworldcoordx | rast raster, xr integer | double precision + postgis_raster | public | st_rastertoworldcoordx | rast raster, xr integer, yr integer | double precision + postgis_raster | public | st_rastertoworldcoordy | rast raster, xr integer, yr integer | double precision + postgis_raster | public | st_rastertoworldcoordy | rast raster, yr integer | double precision + postgis_raster | public | st_rastfromhexwkb | text | raster + postgis_raster | public | st_rastfromwkb | bytea | raster + postgis_raster | public | st_reclass | rast raster, VARIADIC reclassargset reclassarg[] | raster + postgis_raster | public | st_reclass | rast raster, nband integer, reclassexpr text, pixeltype text, nodataval double precision | raster + postgis_raster | public | st_reclass | rast raster, reclassexpr text, pixeltype text | raster + postgis_raster | public | st_resample | rast raster, ref raster, algorithm text, maxerr double precision, usescale boolean | raster + postgis_raster | public | st_resample | rast raster, ref raster, usescale boolean, algorithm text, maxerr double precision | raster + postgis_raster | public | st_resample | rast raster, scalex double precision, scaley double precision, gridx double precision, gridy double precision, skewx double precision, skewy double precision, algorithm text, maxerr double precision | raster + postgis_raster | public | st_resample | rast raster, width integer, height integer, gridx double precision, gridy double precision, skewx double precision, skewy double precision, algorithm text, maxerr double precision | raster + postgis_raster | public | st_rescale | rast raster, scalex double precision, scaley double precision, algorithm text, maxerr double precision | raster + postgis_raster | public | st_rescale | rast raster, scalexy double precision, algorithm text, maxerr double precision | raster + postgis_raster | public | st_resize | rast raster, percentwidth double precision, percentheight double precision, algorithm text, maxerr double precision | raster + postgis_raster | public | st_resize | rast raster, width integer, height integer, algorithm text, maxerr double precision | raster + postgis_raster | public | st_resize | rast raster, width text, height text, algorithm text, maxerr double precision | raster + postgis_raster | public | st_reskew | rast raster, skewx double precision, skewy double precision, algorithm text, maxerr double precision | raster + postgis_raster | public | st_reskew | rast raster, skewxy double precision, algorithm text, maxerr double precision | raster + postgis_raster | public | st_retile | tab regclass, col name, ext geometry, sfx double precision, sfy double precision, tw integer, th integer, algo text | SETOF raster + postgis_raster | public | st_rotation | raster | double precision + postgis_raster | public | st_roughness | rast raster, nband integer, customextent raster, pixeltype text, interpolate_nodata boolean | raster + postgis_raster | public | st_roughness | rast raster, nband integer, pixeltype text, interpolate_nodata boolean | raster + postgis_raster | public | st_samealignment | rast1 raster, rast2 raster | boolean + postgis_raster | public | st_samealignment | raster | boolean + postgis_raster | public | st_samealignment | ulx1 double precision, uly1 double precision, scalex1 double precision, scaley1 double precision, skewx1 double precision, skewy1 double precision, ulx2 double precision, uly2 double precision, scalex2 double precision, scaley2 double precision, skewx2 double precision, skewy2 double precision | boolean + postgis_raster | public | st_scalex | raster | double precision + postgis_raster | public | st_scaley | raster | double precision + postgis_raster | public | st_setbandindex | rast raster, band integer, outdbindex integer, force boolean | raster + postgis_raster | public | st_setbandisnodata | rast raster, band integer | raster + postgis_raster | public | st_setbandnodatavalue | rast raster, band integer, nodatavalue double precision, forcechecking boolean | raster + postgis_raster | public | st_setbandnodatavalue | rast raster, nodatavalue double precision | raster + postgis_raster | public | st_setbandpath | rast raster, band integer, outdbpath text, outdbindex integer, force boolean | raster + postgis_raster | public | st_setgeoreference | rast raster, georef text, format text | raster + postgis_raster | public | st_setgeoreference | rast raster, upperleftx double precision, upperlefty double precision, scalex double precision, scaley double precision, skewx double precision, skewy double precision | raster + postgis_raster | public | st_setgeotransform | rast raster, imag double precision, jmag double precision, theta_i double precision, theta_ij double precision, xoffset double precision, yoffset double precision | raster + postgis_raster | public | st_setm | rast raster, geom geometry, resample text, band integer | geometry + postgis_raster | public | st_setrotation | rast raster, rotation double precision | raster + postgis_raster | public | st_setscale | rast raster, scale double precision | raster + postgis_raster | public | st_setscale | rast raster, scalex double precision, scaley double precision | raster + postgis_raster | public | st_setskew | rast raster, skew double precision | raster + postgis_raster | public | st_setskew | rast raster, skewx double precision, skewy double precision | raster + postgis_raster | public | st_setsrid | rast raster, srid integer | raster + postgis_raster | public | st_setupperleft | rast raster, upperleftx double precision, upperlefty double precision | raster + postgis_raster | public | st_setvalue | rast raster, band integer, x integer, y integer, newvalue double precision | raster + postgis_raster | public | st_setvalue | rast raster, geom geometry, newvalue double precision | raster + postgis_raster | public | st_setvalue | rast raster, nband integer, geom geometry, newvalue double precision | raster + postgis_raster | public | st_setvalue | rast raster, x integer, y integer, newvalue double precision | raster + postgis_raster | public | st_setvalues | rast raster, nband integer, geomvalset geomval[], keepnodata boolean | raster + postgis_raster | public | st_setvalues | rast raster, nband integer, x integer, y integer, newvalueset double precision[], noset boolean[], keepnodata boolean | raster + postgis_raster | public | st_setvalues | rast raster, nband integer, x integer, y integer, newvalueset double precision[], nosetvalue double precision, keepnodata boolean | raster + postgis_raster | public | st_setvalues | rast raster, nband integer, x integer, y integer, width integer, height integer, newvalue double precision, keepnodata boolean | raster + postgis_raster | public | st_setvalues | rast raster, x integer, y integer, width integer, height integer, newvalue double precision, keepnodata boolean | raster + postgis_raster | public | st_setz | rast raster, geom geometry, resample text, band integer | geometry + postgis_raster | public | st_skewx | raster | double precision + postgis_raster | public | st_skewy | raster | double precision + postgis_raster | public | st_slope | rast raster, nband integer, customextent raster, pixeltype text, units text, scale double precision, interpolate_nodata boolean | raster + postgis_raster | public | st_slope | rast raster, nband integer, pixeltype text, units text, scale double precision, interpolate_nodata boolean | raster + postgis_raster | public | st_snaptogrid | rast raster, gridx double precision, gridy double precision, algorithm text, maxerr double precision, scalex double precision, scaley double precision | raster + postgis_raster | public | st_snaptogrid | rast raster, gridx double precision, gridy double precision, scalex double precision, scaley double precision, algorithm text, maxerr double precision | raster + postgis_raster | public | st_snaptogrid | rast raster, gridx double precision, gridy double precision, scalexy double precision, algorithm text, maxerr double precision | raster + postgis_raster | public | st_srid | raster | integer + postgis_raster | public | st_stddev4ma | matrix double precision[], nodatamode text, VARIADIC args text[] | double precision + postgis_raster | public | st_stddev4ma | value double precision[], pos integer[], VARIADIC userargs text[] | double precision + postgis_raster | public | st_sum4ma | matrix double precision[], nodatamode text, VARIADIC args text[] | double precision + postgis_raster | public | st_sum4ma | value double precision[], pos integer[], VARIADIC userargs text[] | double precision + postgis_raster | public | st_summary | rast raster | text + postgis_raster | public | st_summarystats | rast raster, exclude_nodata_value boolean | summarystats + postgis_raster | public | st_summarystats | rast raster, nband integer, exclude_nodata_value boolean | summarystats + postgis_raster | public | st_summarystatsagg | raster, boolean, double precision | summarystats + postgis_raster | public | st_summarystatsagg | raster, integer, boolean | summarystats + postgis_raster | public | st_summarystatsagg | raster, integer, boolean, double precision | summarystats + postgis_raster | public | st_tile | rast raster, nband integer, width integer, height integer, padwithnodata boolean, nodataval double precision | SETOF raster + postgis_raster | public | st_tile | rast raster, nband integer[], width integer, height integer, padwithnodata boolean, nodataval double precision | SETOF raster + postgis_raster | public | st_tile | rast raster, width integer, height integer, padwithnodata boolean, nodataval double precision | SETOF raster + postgis_raster | public | st_touches | rast1 raster, nband1 integer, rast2 raster, nband2 integer | boolean + postgis_raster | public | st_touches | rast1 raster, rast2 raster | boolean + postgis_raster | public | st_tpi | rast raster, nband integer, customextent raster, pixeltype text, interpolate_nodata boolean | raster + postgis_raster | public | st_tpi | rast raster, nband integer, pixeltype text, interpolate_nodata boolean | raster + postgis_raster | public | st_transform | rast raster, alignto raster, algorithm text, maxerr double precision | raster + postgis_raster | public | st_transform | rast raster, srid integer, algorithm text, maxerr double precision, scalex double precision, scaley double precision | raster + postgis_raster | public | st_transform | rast raster, srid integer, scalex double precision, scaley double precision, algorithm text, maxerr double precision | raster + postgis_raster | public | st_transform | rast raster, srid integer, scalexy double precision, algorithm text, maxerr double precision | raster + postgis_raster | public | st_tri | rast raster, nband integer, customextent raster, pixeltype text, interpolate_nodata boolean | raster + postgis_raster | public | st_tri | rast raster, nband integer, pixeltype text, interpolate_nodata boolean | raster + postgis_raster | public | st_union | raster | raster + postgis_raster | public | st_union | raster, integer | raster + postgis_raster | public | st_union | raster, integer, text | raster + postgis_raster | public | st_union | raster, text | raster + postgis_raster | public | st_union | raster, unionarg[] | raster + postgis_raster | public | st_upperleftx | raster | double precision + postgis_raster | public | st_upperlefty | raster | double precision + postgis_raster | public | st_value | rast raster, band integer, pt geometry, exclude_nodata_value boolean, resample text | double precision + postgis_raster | public | st_value | rast raster, band integer, x integer, y integer, exclude_nodata_value boolean | double precision + postgis_raster | public | st_value | rast raster, pt geometry, exclude_nodata_value boolean | double precision + postgis_raster | public | st_value | rast raster, x integer, y integer, exclude_nodata_value boolean | double precision + postgis_raster | public | st_valuecount | rast raster, nband integer, exclude_nodata_value boolean, searchvalue double precision, roundto double precision | integer + postgis_raster | public | st_valuecount | rast raster, nband integer, exclude_nodata_value boolean, searchvalues double precision[], roundto double precision, OUT value double precision, OUT count integer | SETOF record + postgis_raster | public | st_valuecount | rast raster, nband integer, searchvalue double precision, roundto double precision | integer + postgis_raster | public | st_valuecount | rast raster, nband integer, searchvalues double precision[], roundto double precision, OUT value double precision, OUT count integer | SETOF record + postgis_raster | public | st_valuecount | rast raster, searchvalue double precision, roundto double precision | integer + postgis_raster | public | st_valuecount | rast raster, searchvalues double precision[], roundto double precision, OUT value double precision, OUT count integer | SETOF record + postgis_raster | public | st_valuecount | rastertable text, rastercolumn text, nband integer, exclude_nodata_value boolean, searchvalue double precision, roundto double precision | integer + postgis_raster | public | st_valuecount | rastertable text, rastercolumn text, nband integer, exclude_nodata_value boolean, searchvalues double precision[], roundto double precision, OUT value double precision, OUT count integer | SETOF record + postgis_raster | public | st_valuecount | rastertable text, rastercolumn text, nband integer, searchvalue double precision, roundto double precision | integer + postgis_raster | public | st_valuecount | rastertable text, rastercolumn text, nband integer, searchvalues double precision[], roundto double precision, OUT value double precision, OUT count integer | SETOF record + postgis_raster | public | st_valuecount | rastertable text, rastercolumn text, searchvalue double precision, roundto double precision | integer + postgis_raster | public | st_valuecount | rastertable text, rastercolumn text, searchvalues double precision[], roundto double precision, OUT value double precision, OUT count integer | SETOF record + postgis_raster | public | st_valuepercent | rast raster, nband integer, exclude_nodata_value boolean, searchvalue double precision, roundto double precision | double precision + postgis_raster | public | st_valuepercent | rast raster, nband integer, exclude_nodata_value boolean, searchvalues double precision[], roundto double precision, OUT value double precision, OUT percent double precision | SETOF record + postgis_raster | public | st_valuepercent | rast raster, nband integer, searchvalue double precision, roundto double precision | double precision + postgis_raster | public | st_valuepercent | rast raster, nband integer, searchvalues double precision[], roundto double precision, OUT value double precision, OUT percent double precision | SETOF record + postgis_raster | public | st_valuepercent | rast raster, searchvalue double precision, roundto double precision | double precision + postgis_raster | public | st_valuepercent | rast raster, searchvalues double precision[], roundto double precision, OUT value double precision, OUT percent double precision | SETOF record + postgis_raster | public | st_valuepercent | rastertable text, rastercolumn text, nband integer, exclude_nodata_value boolean, searchvalue double precision, roundto double precision | double precision + postgis_raster | public | st_valuepercent | rastertable text, rastercolumn text, nband integer, exclude_nodata_value boolean, searchvalues double precision[], roundto double precision, OUT value double precision, OUT percent double precision | SETOF record + postgis_raster | public | st_valuepercent | rastertable text, rastercolumn text, nband integer, searchvalue double precision, roundto double precision | double precision + postgis_raster | public | st_valuepercent | rastertable text, rastercolumn text, nband integer, searchvalues double precision[], roundto double precision, OUT value double precision, OUT percent double precision | SETOF record + postgis_raster | public | st_valuepercent | rastertable text, rastercolumn text, searchvalue double precision, roundto double precision | double precision + postgis_raster | public | st_valuepercent | rastertable text, rastercolumn text, searchvalues double precision[], roundto double precision, OUT value double precision, OUT percent double precision | SETOF record + postgis_raster | public | st_width | raster | integer + postgis_raster | public | st_within | rast1 raster, nband1 integer, rast2 raster, nband2 integer | boolean + postgis_raster | public | st_within | rast1 raster, rast2 raster | boolean + postgis_raster | public | st_worldtorastercoord | rast raster, longitude double precision, latitude double precision, OUT columnx integer, OUT rowy integer | record + postgis_raster | public | st_worldtorastercoord | rast raster, pt geometry, OUT columnx integer, OUT rowy integer | record + postgis_raster | public | st_worldtorastercoordx | rast raster, pt geometry | integer + postgis_raster | public | st_worldtorastercoordx | rast raster, xw double precision | integer + postgis_raster | public | st_worldtorastercoordx | rast raster, xw double precision, yw double precision | integer + postgis_raster | public | st_worldtorastercoordy | rast raster, pt geometry | integer + postgis_raster | public | st_worldtorastercoordy | rast raster, xw double precision, yw double precision | integer + postgis_raster | public | st_worldtorastercoordy | rast raster, yw double precision | integer + postgis_raster | public | updaterastersrid | schema_name name, table_name name, column_name name, new_srid integer | boolean + postgis_raster | public | updaterastersrid | table_name name, column_name name, new_srid integer | boolean + postgis_sfcgal | public | postgis_sfcgal_full_version | | text + postgis_sfcgal | public | postgis_sfcgal_noop | geometry | geometry + postgis_sfcgal | public | postgis_sfcgal_scripts_installed | | text + postgis_sfcgal | public | postgis_sfcgal_version | | text + postgis_sfcgal | public | st_3darea | geometry | double precision + postgis_sfcgal | public | st_3dconvexhull | geometry | geometry + postgis_sfcgal | public | st_3ddifference | geom1 geometry, geom2 geometry | geometry + postgis_sfcgal | public | st_3dintersection | geom1 geometry, geom2 geometry | geometry + postgis_sfcgal | public | st_3dunion | geom1 geometry, geom2 geometry | geometry + postgis_sfcgal | public | st_3dunion | geometry | geometry + postgis_sfcgal | public | st_alphashape | g1 geometry, alpha double precision, allow_holes boolean | geometry + postgis_sfcgal | public | st_approximatemedialaxis | geometry | geometry + postgis_sfcgal | public | st_constraineddelaunaytriangles | geometry | geometry + postgis_sfcgal | public | st_extrude | geometry, double precision, double precision, double precision | geometry + postgis_sfcgal | public | st_forcelhr | geometry | geometry + postgis_sfcgal | public | st_isplanar | geometry | boolean + postgis_sfcgal | public | st_issolid | geometry | boolean + postgis_sfcgal | public | st_makesolid | geometry | geometry + postgis_sfcgal | public | st_minkowskisum | geometry, geometry | geometry + postgis_sfcgal | public | st_optimalalphashape | g1 geometry, allow_holes boolean, nb_components integer | geometry + postgis_sfcgal | public | st_orientation | geometry | integer + postgis_sfcgal | public | st_straightskeleton | geometry | geometry + postgis_sfcgal | public | st_tesselate | geometry | geometry + postgis_sfcgal | public | st_volume | geometry | double precision + postgis_tiger_geocoder | tiger | count_words | character varying | integer + postgis_tiger_geocoder | tiger | create_census_base_tables | | text + postgis_tiger_geocoder | tiger | cull_null | character varying | character varying + postgis_tiger_geocoder | tiger | diff_zip | zip1 character varying, zip2 character varying | integer + postgis_tiger_geocoder | tiger | drop_dupe_featnames_generate_script | | text + postgis_tiger_geocoder | tiger | drop_indexes_generate_script | tiger_data_schema text | text + postgis_tiger_geocoder | tiger | drop_nation_tables_generate_script | param_schema text | text + postgis_tiger_geocoder | tiger | drop_state_tables_generate_script | param_state text, param_schema text | text + postgis_tiger_geocoder | tiger | end_soundex | character varying | character varying + postgis_tiger_geocoder | tiger | geocode | in_addy tiger.norm_addy, max_results integer, restrict_geom geometry, OUT addy tiger.norm_addy, OUT geomout geometry, OUT rating integer | SETOF record + postgis_tiger_geocoder | tiger | geocode | input character varying, max_results integer, restrict_geom geometry, OUT addy tiger.norm_addy, OUT geomout geometry, OUT rating integer | SETOF record + postgis_tiger_geocoder | tiger | geocode_address | parsed tiger.norm_addy, max_results integer, restrict_geom geometry, OUT addy tiger.norm_addy, OUT geomout geometry, OUT rating integer | SETOF record + postgis_tiger_geocoder | tiger | geocode_intersection | roadway1 text, roadway2 text, in_state text, in_city text, in_zip text, num_results integer, OUT addy tiger.norm_addy, OUT geomout geometry, OUT rating integer | SETOF record + postgis_tiger_geocoder | tiger | geocode_location | parsed tiger.norm_addy, restrict_geom geometry, OUT addy tiger.norm_addy, OUT geomout geometry, OUT rating integer | SETOF record + postgis_tiger_geocoder | tiger | get_geocode_setting | setting_name text | text + postgis_tiger_geocoder | tiger | get_last_words | inputstring character varying, count integer | character varying + postgis_tiger_geocoder | tiger | get_tract | loc_geom geometry, output_field text | text + postgis_tiger_geocoder | tiger | greatest_hn | fromhn character varying, tohn character varying | integer + postgis_tiger_geocoder | tiger | includes_address | given_address integer, addr1 integer, addr2 integer, addr3 integer, addr4 integer | boolean + postgis_tiger_geocoder | tiger | install_geocode_settings | | void + postgis_tiger_geocoder | tiger | install_missing_indexes | | boolean + postgis_tiger_geocoder | tiger | install_pagc_tables | | void + postgis_tiger_geocoder | tiger | interpolate_from_address | given_address integer, in_addr1 character varying, in_addr2 character varying, in_road geometry, in_side character varying, in_offset_m double precision | geometry + postgis_tiger_geocoder | tiger | is_pretype | text | boolean + postgis_tiger_geocoder | tiger | least_hn | fromhn character varying, tohn character varying | integer + postgis_tiger_geocoder | tiger | levenshtein_ignore_case | character varying, character varying | integer + postgis_tiger_geocoder | tiger | loader_generate_census_script | param_states text[], os text | SETOF text + postgis_tiger_geocoder | tiger | loader_generate_nation_script | os text | SETOF text + postgis_tiger_geocoder | tiger | loader_generate_script | param_states text[], os text | SETOF text + postgis_tiger_geocoder | tiger | loader_load_staged_data | param_staging_table text, param_target_table text | integer + postgis_tiger_geocoder | tiger | loader_load_staged_data | param_staging_table text, param_target_table text, param_columns_exclude text[] | integer + postgis_tiger_geocoder | tiger | loader_macro_replace | param_input text, param_keys text[], param_values text[] | text + postgis_tiger_geocoder | tiger | location_extract | fullstreet character varying, stateabbrev character varying | character varying + postgis_tiger_geocoder | tiger | location_extract_countysub_exact | fullstreet character varying, stateabbrev character varying | character varying + postgis_tiger_geocoder | tiger | location_extract_countysub_fuzzy | fullstreet character varying, stateabbrev character varying | character varying + postgis_tiger_geocoder | tiger | location_extract_place_exact | fullstreet character varying, stateabbrev character varying | character varying + postgis_tiger_geocoder | tiger | location_extract_place_fuzzy | fullstreet character varying, stateabbrev character varying | character varying + postgis_tiger_geocoder | tiger | missing_indexes_generate_script | | text + postgis_tiger_geocoder | tiger | normalize_address | in_rawinput character varying | tiger.norm_addy + postgis_tiger_geocoder | tiger | nullable_levenshtein | character varying, character varying | integer + postgis_tiger_geocoder | tiger | numeric_streets_equal | input_street character varying, output_street character varying | boolean + postgis_tiger_geocoder | tiger | pagc_normalize_address | in_rawinput character varying | tiger.norm_addy + postgis_tiger_geocoder | tiger | pprint_addy | input tiger.norm_addy | character varying + postgis_tiger_geocoder | tiger | rate_attributes | dirpa character varying, dirpb character varying, streetnamea character varying, streetnameb character varying, streettypea character varying, streettypeb character varying, dirsa character varying, dirsb character varying, locationa character varying, locationb character varying, prequalabr character varying | integer + postgis_tiger_geocoder | tiger | rate_attributes | dirpa character varying, dirpb character varying, streetnamea character varying, streetnameb character varying, streettypea character varying, streettypeb character varying, dirsa character varying, dirsb character varying, prequalabr character varying | integer + postgis_tiger_geocoder | tiger | reverse_geocode | pt geometry, include_strnum_range boolean, OUT intpt geometry[], OUT addy tiger.norm_addy[], OUT street character varying[] | record + postgis_tiger_geocoder | tiger | set_geocode_setting | setting_name text, setting_value text | text + postgis_tiger_geocoder | tiger | setsearchpathforinstall | a_schema_name text | text + postgis_tiger_geocoder | tiger | state_extract | rawinput character varying | character varying + postgis_tiger_geocoder | tiger | topology_load_tiger | toponame character varying, region_type character varying, region_id character varying | text + postgis_tiger_geocoder | tiger | utmzone | geometry | integer + postgis_tiger_geocoder | tiger | zip_range | zip text, range_start integer, range_end integer | character varying[] + postgis_topology | topology | _asgmledge | edge_id integer, start_node integer, end_node integer, line geometry, visitedtable regclass, nsprefix_in text, prec integer, options integer, idprefix text, gmlver integer | text + postgis_topology | topology | _asgmlface | toponame text, face_id integer, visitedtable regclass, nsprefix_in text, prec integer, options integer, idprefix text, gmlver integer | text + postgis_topology | topology | _asgmlnode | id integer, point geometry, nsprefix_in text, prec integer, options integer, idprefix text, gmlver integer | text + postgis_topology | topology | _checkedgelinking | curedge_edge_id integer, prevedge_edge_id integer, prevedge_next_left_edge integer, prevedge_next_right_edge integer | validatetopology_returntype + postgis_topology | topology | _st_adjacentedges | atopology character varying, anode integer, anedge integer | integer[] + postgis_topology | topology | _st_mintolerance | ageom geometry | double precision + postgis_topology | topology | _st_mintolerance | atopology character varying, ageom geometry | double precision + postgis_topology | topology | _validatetopologyedgelinking | bbox geometry | SETOF validatetopology_returntype + postgis_topology | topology | _validatetopologygetfaceshellmaximaledgering | atopology character varying, aface integer | geometry + postgis_topology | topology | _validatetopologygetringedges | starting_edge integer | integer[] + postgis_topology | topology | _validatetopologyrings | bbox geometry | SETOF validatetopology_returntype + postgis_topology | topology | addedge | atopology character varying, aline geometry | integer + postgis_topology | topology | addface | atopology character varying, apoly geometry, force_new boolean | integer + postgis_topology | topology | addnode | atopology character varying, apoint geometry, allowedgesplitting boolean, setcontainingface boolean | integer + postgis_topology | topology | addtopogeometrycolumn | character varying, character varying, character varying, character varying, character varying | integer + postgis_topology | topology | addtopogeometrycolumn | toponame character varying, schema character varying, tbl character varying, col character varying, ltype character varying, child integer | integer + postgis_topology | topology | addtosearchpath | a_schema_name character varying | text + postgis_topology | topology | asgml | tg topogeometry | text + postgis_topology | topology | asgml | tg topogeometry, nsprefix text | text + postgis_topology | topology | asgml | tg topogeometry, nsprefix text, prec integer, options integer, vis regclass | text + postgis_topology | topology | asgml | tg topogeometry, nsprefix text, prec integer, options integer, visitedtable regclass, idprefix text | text + postgis_topology | topology | asgml | tg topogeometry, nsprefix text, prec integer, opts integer | text + postgis_topology | topology | asgml | tg topogeometry, nsprefix_in text, precision_in integer, options_in integer, visitedtable regclass, idprefix text, gmlver integer | text + postgis_topology | topology | asgml | tg topogeometry, visitedtable regclass | text + postgis_topology | topology | asgml | tg topogeometry, visitedtable regclass, nsprefix text | text + postgis_topology | topology | astopojson | tg topogeometry, edgemaptable regclass | text + postgis_topology | topology | cleartopogeom | tg topogeometry | topogeometry + postgis_topology | topology | copytopology | atopology character varying, newtopo character varying | integer + postgis_topology | topology | createtopogeom | toponame character varying, tg_type integer, layer_id integer | topogeometry + postgis_topology | topology | createtopogeom | toponame character varying, tg_type integer, layer_id integer, tg_objs topoelementarray | topogeometry + postgis_topology | topology | createtopology | atopology character varying, srid integer, prec double precision, hasz boolean | integer + postgis_topology | topology | createtopology | character varying | integer + postgis_topology | topology | createtopology | character varying, integer | integer + postgis_topology | topology | createtopology | toponame character varying, srid integer, prec double precision | integer + postgis_topology | topology | droptopogeometrycolumn | schema character varying, tbl character varying, col character varying | text + postgis_topology | topology | droptopology | atopology character varying | text + postgis_topology | topology | equals | tg1 topogeometry, tg2 topogeometry | boolean + postgis_topology | topology | findlayer | layer_table regclass, feature_column name | layer + postgis_topology | topology | findlayer | schema_name name, table_name name, feature_column name | layer + postgis_topology | topology | findlayer | tg topogeometry | layer + postgis_topology | topology | findlayer | topology_id integer, layer_id integer | layer + postgis_topology | topology | findtopology | integer | topology + postgis_topology | topology | findtopology | name, name, name | topology + postgis_topology | topology | findtopology | regclass, name | topology + postgis_topology | topology | findtopology | text | topology + postgis_topology | topology | findtopology | topogeometry | topology + postgis_topology | topology | geometry | topogeom topogeometry | geometry + postgis_topology | topology | geometrytype | tg topogeometry | text + postgis_topology | topology | getedgebypoint | atopology character varying, apoint geometry, tol1 double precision | integer + postgis_topology | topology | getfacebypoint | atopology character varying, apoint geometry, tol1 double precision | integer + postgis_topology | topology | getfacecontainingpoint | atopology text, apoint geometry | integer + postgis_topology | topology | getnodebypoint | atopology character varying, apoint geometry, tol1 double precision | integer + postgis_topology | topology | getnodeedges | atopology character varying, anode integer | SETOF getfaceedges_returntype + postgis_topology | topology | getringedges | atopology character varying, anedge integer, maxedges integer | SETOF getfaceedges_returntype + postgis_topology | topology | gettopogeomelementarray | tg topogeometry | topoelementarray + postgis_topology | topology | gettopogeomelementarray | toponame character varying, layer_id integer, tgid integer | topoelementarray + postgis_topology | topology | gettopogeomelements | tg topogeometry | SETOF topoelement + postgis_topology | topology | gettopogeomelements | toponame character varying, layerid integer, tgid integer | SETOF topoelement + postgis_topology | topology | gettopologyid | toponame character varying | integer + postgis_topology | topology | gettopologyname | topoid integer | character varying + postgis_topology | topology | gettopologysrid | toponame character varying | integer + postgis_topology | topology | intersects | tg1 topogeometry, tg2 topogeometry | boolean + postgis_topology | topology | layertrigger | | trigger + postgis_topology | topology | polygonize | toponame character varying | text + postgis_topology | topology | populate_topology_layer | | TABLE(schema_name text, table_name text, feature_column text) + postgis_topology | topology | postgis_topology_scripts_installed | | text + postgis_topology | topology | relationtrigger | | trigger + postgis_topology | topology | removeunusedprimitives | atopology text, bbox geometry | integer + postgis_topology | topology | st_addedgemodface | atopology character varying, anode integer, anothernode integer, acurve geometry | integer + postgis_topology | topology | st_addedgenewfaces | atopology character varying, anode integer, anothernode integer, acurve geometry | integer + postgis_topology | topology | st_addisoedge | atopology character varying, anode integer, anothernode integer, acurve geometry | integer + postgis_topology | topology | st_addisonode | atopology character varying, aface integer, apoint geometry | integer + postgis_topology | topology | st_changeedgegeom | atopology character varying, anedge integer, acurve geometry | text + postgis_topology | topology | st_createtopogeo | atopology character varying, acollection geometry | text + postgis_topology | topology | st_geometrytype | tg topogeometry | text + postgis_topology | topology | st_getfaceedges | toponame character varying, face_id integer | SETOF getfaceedges_returntype + postgis_topology | topology | st_getfacegeometry | toponame character varying, aface integer | geometry + postgis_topology | topology | st_inittopogeo | atopology character varying | text + postgis_topology | topology | st_modedgeheal | toponame character varying, e1id integer, e2id integer | integer + postgis_topology | topology | st_modedgesplit | atopology character varying, anedge integer, apoint geometry | integer + postgis_topology | topology | st_moveisonode | atopology character varying, anode integer, apoint geometry | text + postgis_topology | topology | st_newedgeheal | toponame character varying, e1id integer, e2id integer | integer + postgis_topology | topology | st_newedgessplit | atopology character varying, anedge integer, apoint geometry | integer + postgis_topology | topology | st_remedgemodface | toponame character varying, e1id integer | integer + postgis_topology | topology | st_remedgenewface | toponame character varying, e1id integer | integer + postgis_topology | topology | st_remisonode | character varying, integer | text + postgis_topology | topology | st_removeisoedge | atopology character varying, anedge integer | text + postgis_topology | topology | st_removeisonode | atopology character varying, anode integer | text + postgis_topology | topology | st_simplify | tg topogeometry, tolerance double precision | geometry + postgis_topology | topology | st_srid | tg topogeometry | integer + postgis_topology | topology | topoelementarray_agg | topoelement | topoelementarray + postgis_topology | topology | topoelementarray_append | topoelementarray, topoelement | topoelementarray + postgis_topology | topology | topogeo_addgeometry | atopology character varying, ageom geometry, tolerance double precision | void + postgis_topology | topology | topogeo_addlinestring | atopology character varying, aline geometry, tolerance double precision | SETOF integer + postgis_topology | topology | topogeo_addpoint | atopology character varying, apoint geometry, tolerance double precision | integer + postgis_topology | topology | topogeo_addpolygon | atopology character varying, apoly geometry, tolerance double precision | SETOF integer + postgis_topology | topology | topogeom_addelement | tg topogeometry, el topoelement | topogeometry + postgis_topology | topology | topogeom_addtopogeom | tgt topogeometry, src topogeometry | topogeometry + postgis_topology | topology | topogeom_remelement | tg topogeometry, el topoelement | topogeometry + postgis_topology | topology | topologysummary | atopology character varying | text + postgis_topology | topology | totopogeom | ageom geometry, atopology character varying, alayer integer, atolerance double precision | topogeometry + postgis_topology | topology | totopogeom | ageom geometry, tg topogeometry, atolerance double precision | topogeometry + postgis_topology | topology | validatetopology | toponame character varying, bbox geometry | SETOF validatetopology_returntype + postgis_topology | topology | validatetopologyrelation | toponame character varying | TABLE(error text, layer_id integer, topogeo_id integer, element_id integer) + postgres_fdw | public | postgres_fdw_disconnect | text | boolean + postgres_fdw | public | postgres_fdw_disconnect_all | | boolean + postgres_fdw | public | postgres_fdw_get_connections | OUT server_name text, OUT valid boolean | SETOF record + postgres_fdw | public | postgres_fdw_handler | | fdw_handler + postgres_fdw | public | postgres_fdw_validator | text[], oid | void + refint | public | check_foreign_key | | trigger + refint | public | check_primary_key | | trigger + rum | public | rum_anyarray_config | internal | void + rum | public | rum_anyarray_consistent | internal, smallint, anyarray, integer, internal, internal, internal, internal | boolean + rum | public | rum_anyarray_distance | anyarray, anyarray | double precision + rum | public | rum_anyarray_ordering | internal, smallint, anyarray, integer, internal, internal, internal, internal, internal | double precision + rum | public | rum_anyarray_similar | anyarray, anyarray | boolean + rum | public | rum_bit_compare_prefix | bit, bit, smallint, internal | integer + rum | public | rum_bit_extract_query | bit, internal, smallint, internal, internal | internal + rum | public | rum_bit_extract_value | bit, internal | internal + rum | public | rum_btree_consistent | internal, smallint, internal, integer, internal, internal, internal, internal | boolean + rum | public | rum_bytea_compare_prefix | bytea, bytea, smallint, internal | integer + rum | public | rum_bytea_extract_query | bytea, internal, smallint, internal, internal | internal + rum | public | rum_bytea_extract_value | bytea, internal | internal + rum | public | rum_char_compare_prefix | "char", "char", smallint, internal | integer + rum | public | rum_char_extract_query | "char", internal, smallint, internal, internal | internal + rum | public | rum_char_extract_value | "char", internal | internal + rum | public | rum_cidr_compare_prefix | cidr, cidr, smallint, internal | integer + rum | public | rum_cidr_extract_query | cidr, internal, smallint, internal, internal | internal + rum | public | rum_cidr_extract_value | cidr, internal | internal + rum | public | rum_date_compare_prefix | date, date, smallint, internal | integer + rum | public | rum_date_extract_query | date, internal, smallint, internal, internal | internal + rum | public | rum_date_extract_value | date, internal | internal + rum | public | rum_extract_anyarray | anyarray, internal, internal, internal, internal | internal + rum | public | rum_extract_anyarray_query | anyarray, internal, smallint, internal, internal, internal, internal | internal + rum | public | rum_extract_tsquery | tsquery, internal, smallint, internal, internal, internal, internal | internal + rum | public | rum_extract_tsquery_hash | tsquery, internal, smallint, internal, internal, internal, internal | internal + rum | public | rum_extract_tsvector | tsvector, internal, internal, internal, internal | internal + rum | public | rum_extract_tsvector_hash | tsvector, internal, internal, internal, internal | internal + rum | public | rum_float4_compare_prefix | real, real, smallint, internal | integer + rum | public | rum_float4_config | internal | void + rum | public | rum_float4_distance | real, real | double precision + rum | public | rum_float4_extract_query | real, internal, smallint, internal, internal | internal + rum | public | rum_float4_extract_value | real, internal | internal + rum | public | rum_float4_key_distance | real, real, smallint | double precision + rum | public | rum_float4_left_distance | real, real | double precision + rum | public | rum_float4_outer_distance | real, real, smallint | double precision + rum | public | rum_float4_right_distance | real, real | double precision + rum | public | rum_float8_compare_prefix | double precision, double precision, smallint, internal | integer + rum | public | rum_float8_config | internal | void + rum | public | rum_float8_distance | double precision, double precision | double precision + rum | public | rum_float8_extract_query | double precision, internal, smallint, internal, internal | internal + rum | public | rum_float8_extract_value | double precision, internal | internal + rum | public | rum_float8_key_distance | double precision, double precision, smallint | double precision + rum | public | rum_float8_left_distance | double precision, double precision | double precision + rum | public | rum_float8_outer_distance | double precision, double precision, smallint | double precision + rum | public | rum_float8_right_distance | double precision, double precision | double precision + rum | public | rum_inet_compare_prefix | inet, inet, smallint, internal | integer + rum | public | rum_inet_extract_query | inet, internal, smallint, internal, internal | internal + rum | public | rum_inet_extract_value | inet, internal | internal + rum | public | rum_int2_compare_prefix | smallint, smallint, smallint, internal | integer + rum | public | rum_int2_config | internal | void + rum | public | rum_int2_distance | smallint, smallint | double precision + rum | public | rum_int2_extract_query | smallint, internal, smallint, internal, internal | internal + rum | public | rum_int2_extract_value | smallint, internal | internal + rum | public | rum_int2_key_distance | smallint, smallint, smallint | double precision + rum | public | rum_int2_left_distance | smallint, smallint | double precision + rum | public | rum_int2_outer_distance | smallint, smallint, smallint | double precision + rum | public | rum_int2_right_distance | smallint, smallint | double precision + rum | public | rum_int4_compare_prefix | integer, integer, smallint, internal | integer + rum | public | rum_int4_config | internal | void + rum | public | rum_int4_distance | integer, integer | double precision + rum | public | rum_int4_extract_query | integer, internal, smallint, internal, internal | internal + rum | public | rum_int4_extract_value | integer, internal | internal + rum | public | rum_int4_key_distance | integer, integer, smallint | double precision + rum | public | rum_int4_left_distance | integer, integer | double precision + rum | public | rum_int4_outer_distance | integer, integer, smallint | double precision + rum | public | rum_int4_right_distance | integer, integer | double precision + rum | public | rum_int8_compare_prefix | bigint, bigint, smallint, internal | integer + rum | public | rum_int8_config | internal | void + rum | public | rum_int8_distance | bigint, bigint | double precision + rum | public | rum_int8_extract_query | bigint, internal, smallint, internal, internal | internal + rum | public | rum_int8_extract_value | bigint, internal | internal + rum | public | rum_int8_key_distance | bigint, bigint, smallint | double precision + rum | public | rum_int8_left_distance | bigint, bigint | double precision + rum | public | rum_int8_outer_distance | bigint, bigint, smallint | double precision + rum | public | rum_int8_right_distance | bigint, bigint | double precision + rum | public | rum_interval_compare_prefix | interval, interval, smallint, internal | integer + rum | public | rum_interval_extract_query | interval, internal, smallint, internal, internal | internal + rum | public | rum_interval_extract_value | interval, internal | internal + rum | public | rum_macaddr_compare_prefix | macaddr, macaddr, smallint, internal | integer + rum | public | rum_macaddr_extract_query | macaddr, internal, smallint, internal, internal | internal + rum | public | rum_macaddr_extract_value | macaddr, internal | internal + rum | public | rum_money_compare_prefix | money, money, smallint, internal | integer + rum | public | rum_money_config | internal | void + rum | public | rum_money_distance | money, money | double precision + rum | public | rum_money_extract_query | money, internal, smallint, internal, internal | internal + rum | public | rum_money_extract_value | money, internal | internal + rum | public | rum_money_key_distance | money, money, smallint | double precision + rum | public | rum_money_left_distance | money, money | double precision + rum | public | rum_money_outer_distance | money, money, smallint | double precision + rum | public | rum_money_right_distance | money, money | double precision + rum | public | rum_numeric_cmp | numeric, numeric | integer + rum | public | rum_numeric_compare_prefix | numeric, numeric, smallint, internal | integer + rum | public | rum_numeric_extract_query | numeric, internal, smallint, internal, internal | internal + rum | public | rum_numeric_extract_value | numeric, internal | internal + rum | public | rum_oid_compare_prefix | oid, oid, smallint, internal | integer + rum | public | rum_oid_config | internal | void + rum | public | rum_oid_distance | oid, oid | double precision + rum | public | rum_oid_extract_query | oid, internal, smallint, internal, internal | internal + rum | public | rum_oid_extract_value | oid, internal | internal + rum | public | rum_oid_key_distance | oid, oid, smallint | double precision + rum | public | rum_oid_left_distance | oid, oid | double precision + rum | public | rum_oid_outer_distance | oid, oid, smallint | double precision + rum | public | rum_oid_right_distance | oid, oid | double precision + rum | public | rum_text_compare_prefix | text, text, smallint, internal | integer + rum | public | rum_text_extract_query | text, internal, smallint, internal, internal | internal + rum | public | rum_text_extract_value | text, internal | internal + rum | public | rum_time_compare_prefix | time without time zone, time without time zone, smallint, internal | integer + rum | public | rum_time_extract_query | time without time zone, internal, smallint, internal, internal | internal + rum | public | rum_time_extract_value | time without time zone, internal | internal + rum | public | rum_timestamp_compare_prefix | timestamp without time zone, timestamp without time zone, smallint, internal | integer + rum | public | rum_timestamp_config | internal | void + rum | public | rum_timestamp_consistent | internal, smallint, timestamp without time zone, integer, internal, internal, internal, internal | boolean + rum | public | rum_timestamp_distance | timestamp without time zone, timestamp without time zone | double precision + rum | public | rum_timestamp_extract_query | timestamp without time zone, internal, smallint, internal, internal, internal, internal | internal + rum | public | rum_timestamp_extract_value | timestamp without time zone, internal, internal, internal, internal | internal + rum | public | rum_timestamp_key_distance | timestamp without time zone, timestamp without time zone, smallint | double precision + rum | public | rum_timestamp_left_distance | timestamp without time zone, timestamp without time zone | double precision + rum | public | rum_timestamp_outer_distance | timestamp without time zone, timestamp without time zone, smallint | double precision + rum | public | rum_timestamp_right_distance | timestamp without time zone, timestamp without time zone | double precision + rum | public | rum_timestamptz_distance | timestamp with time zone, timestamp with time zone | double precision + rum | public | rum_timestamptz_key_distance | timestamp with time zone, timestamp with time zone, smallint | double precision + rum | public | rum_timestamptz_left_distance | timestamp with time zone, timestamp with time zone | double precision + rum | public | rum_timestamptz_right_distance | timestamp with time zone, timestamp with time zone | double precision + rum | public | rum_timetz_compare_prefix | time with time zone, time with time zone, smallint, internal | integer + rum | public | rum_timetz_extract_query | time with time zone, internal, smallint, internal, internal | internal + rum | public | rum_timetz_extract_value | time with time zone, internal | internal + rum | public | rum_ts_distance | tsvector, rum_distance_query | real + rum | public | rum_ts_distance | tsvector, tsquery | real + rum | public | rum_ts_distance | tsvector, tsquery, integer | real + rum | public | rum_ts_join_pos | internal, internal | bytea + rum | public | rum_ts_score | tsvector, rum_distance_query | real + rum | public | rum_ts_score | tsvector, tsquery | real + rum | public | rum_ts_score | tsvector, tsquery, integer | real + rum | public | rum_tsquery_addon_consistent | internal, smallint, tsvector, integer, internal, internal, internal, internal | boolean + rum | public | rum_tsquery_consistent | internal, smallint, tsvector, integer, internal, internal, internal, internal | boolean + rum | public | rum_tsquery_distance | internal, smallint, tsvector, integer, internal, internal, internal, internal, internal | double precision + rum | public | rum_tsquery_pre_consistent | internal, smallint, tsvector, integer, internal, internal, internal, internal | boolean + rum | public | rum_tsvector_config | internal | void + rum | public | rum_varbit_compare_prefix | bit varying, bit varying, smallint, internal | integer + rum | public | rum_varbit_extract_query | bit varying, internal, smallint, internal, internal | internal + rum | public | rum_varbit_extract_value | bit varying, internal | internal + rum | public | rumhandler | internal | index_am_handler + rum | public | ruminv_extract_tsquery | tsquery, internal, internal, internal, internal | internal + rum | public | ruminv_extract_tsvector | tsvector, internal, smallint, internal, internal, internal, internal | internal + rum | public | ruminv_tsquery_config | internal | void + rum | public | ruminv_tsvector_consistent | internal, smallint, tsvector, integer, internal, internal, internal, internal | boolean + rum | public | tsquery_to_distance_query | tsquery | rum_distance_query + seg | public | gseg_consistent | internal, seg, smallint, oid, internal | boolean + seg | public | gseg_penalty | internal, internal, internal | internal + seg | public | gseg_picksplit | internal, internal | internal + seg | public | gseg_same | seg, seg, internal | internal + seg | public | gseg_union | internal, internal | seg + seg | public | seg_center | seg | real + seg | public | seg_cmp | seg, seg | integer + seg | public | seg_contained | seg, seg | boolean + seg | public | seg_contains | seg, seg | boolean + seg | public | seg_different | seg, seg | boolean + seg | public | seg_ge | seg, seg | boolean + seg | public | seg_gt | seg, seg | boolean + seg | public | seg_in | cstring | seg + seg | public | seg_inter | seg, seg | seg + seg | public | seg_le | seg, seg | boolean + seg | public | seg_left | seg, seg | boolean + seg | public | seg_lower | seg | real + seg | public | seg_lt | seg, seg | boolean + seg | public | seg_out | seg | cstring + seg | public | seg_over_left | seg, seg | boolean + seg | public | seg_over_right | seg, seg | boolean + seg | public | seg_overlap | seg, seg | boolean + seg | public | seg_right | seg, seg | boolean + seg | public | seg_same | seg, seg | boolean + seg | public | seg_size | seg | real + seg | public | seg_union | seg, seg | seg + seg | public | seg_upper | seg | real + sslinfo | public | ssl_cipher | | text + sslinfo | public | ssl_client_cert_present | | boolean + sslinfo | public | ssl_client_dn | | text + sslinfo | public | ssl_client_dn_field | text | text + sslinfo | public | ssl_client_serial | | numeric + sslinfo | public | ssl_extension_info | OUT name text, OUT value text, OUT critical boolean | SETOF record + sslinfo | public | ssl_is_used | | boolean + sslinfo | public | ssl_issuer_dn | | text + sslinfo | public | ssl_issuer_field | text | text + sslinfo | public | ssl_version | | text + tealbase_vault | vault | create_secret | new_secret text, new_name text, new_description text, new_key_id uuid | uuid + tealbase_vault | vault | update_secret | secret_id uuid, new_secret text, new_name text, new_description text, new_key_id uuid | void + tablefunc | public | connectby | text, text, text, text, integer | SETOF record + tablefunc | public | connectby | text, text, text, text, integer, text | SETOF record + tablefunc | public | connectby | text, text, text, text, text, integer | SETOF record + tablefunc | public | connectby | text, text, text, text, text, integer, text | SETOF record + tablefunc | public | crosstab | text | SETOF record + tablefunc | public | crosstab | text, integer | SETOF record + tablefunc | public | crosstab | text, text | SETOF record + tablefunc | public | crosstab2 | text | SETOF tablefunc_crosstab_2 + tablefunc | public | crosstab3 | text | SETOF tablefunc_crosstab_3 + tablefunc | public | crosstab4 | text | SETOF tablefunc_crosstab_4 + tablefunc | public | normal_rand | integer, double precision, double precision | SETOF double precision + tcn | public | triggered_change_notification | | trigger + timescaledb | _timescaledb_debug | extension_state | | text + timescaledb | _timescaledb_functions | alter_job_set_hypertable_id | job_id integer, hypertable regclass | integer + timescaledb | _timescaledb_functions | attach_osm_table_chunk | hypertable regclass, chunk regclass | boolean + timescaledb | _timescaledb_functions | bookend_deserializefunc | bytea, internal | internal + timescaledb | _timescaledb_functions | bookend_finalfunc | internal, anyelement, "any" | anyelement + timescaledb | _timescaledb_functions | bookend_serializefunc | internal | bytea + timescaledb | _timescaledb_functions | cagg_get_bucket_function_info | mat_hypertable_id integer, OUT bucket_func regprocedure, OUT bucket_width text, OUT bucket_origin text, OUT bucket_offset text, OUT bucket_timezone text, OUT bucket_fixed_width boolean | record + timescaledb | _timescaledb_functions | cagg_migrate_create_plan | IN _cagg_data _timescaledb_catalog.continuous_agg, IN _cagg_name_new text, IN _override boolean, IN _drop_old boolean | + timescaledb | _timescaledb_functions | cagg_migrate_execute_copy_data | IN _cagg_data _timescaledb_catalog.continuous_agg, IN _plan_step _timescaledb_catalog.continuous_agg_migrate_plan_step | + timescaledb | _timescaledb_functions | cagg_migrate_execute_copy_policies | IN _cagg_data _timescaledb_catalog.continuous_agg, IN _plan_step _timescaledb_catalog.continuous_agg_migrate_plan_step | + timescaledb | _timescaledb_functions | cagg_migrate_execute_create_new_cagg | IN _cagg_data _timescaledb_catalog.continuous_agg, IN _plan_step _timescaledb_catalog.continuous_agg_migrate_plan_step | + timescaledb | _timescaledb_functions | cagg_migrate_execute_disable_policies | IN _cagg_data _timescaledb_catalog.continuous_agg, IN _plan_step _timescaledb_catalog.continuous_agg_migrate_plan_step | + timescaledb | _timescaledb_functions | cagg_migrate_execute_drop_old_cagg | IN _cagg_data _timescaledb_catalog.continuous_agg, IN _plan_step _timescaledb_catalog.continuous_agg_migrate_plan_step | + timescaledb | _timescaledb_functions | cagg_migrate_execute_enable_policies | IN _cagg_data _timescaledb_catalog.continuous_agg, IN _plan_step _timescaledb_catalog.continuous_agg_migrate_plan_step | + timescaledb | _timescaledb_functions | cagg_migrate_execute_override_cagg | IN _cagg_data _timescaledb_catalog.continuous_agg, IN _plan_step _timescaledb_catalog.continuous_agg_migrate_plan_step | + timescaledb | _timescaledb_functions | cagg_migrate_execute_plan | IN _cagg_data _timescaledb_catalog.continuous_agg | + timescaledb | _timescaledb_functions | cagg_migrate_execute_refresh_new_cagg | IN _cagg_data _timescaledb_catalog.continuous_agg, IN _plan_step _timescaledb_catalog.continuous_agg_migrate_plan_step | + timescaledb | _timescaledb_functions | cagg_migrate_plan_exists | _hypertable_id integer | boolean + timescaledb | _timescaledb_functions | cagg_migrate_pre_validation | _cagg_schema text, _cagg_name text, _cagg_name_new text | _timescaledb_catalog.continuous_agg + timescaledb | _timescaledb_functions | cagg_migrate_to_time_bucket | IN cagg regclass | + timescaledb | _timescaledb_functions | cagg_validate_query | query text, OUT is_valid boolean, OUT error_level text, OUT error_code text, OUT error_message text, OUT error_detail text, OUT error_hint text | record + timescaledb | _timescaledb_functions | cagg_watermark | hypertable_id integer | bigint + timescaledb | _timescaledb_functions | cagg_watermark_materialized | hypertable_id integer | bigint + timescaledb | _timescaledb_functions | calculate_chunk_interval | dimension_id integer, dimension_coord bigint, chunk_target_size bigint | bigint + timescaledb | _timescaledb_functions | chunk_constraint_add_table_constraint | chunk_constraint_row _timescaledb_catalog.chunk_constraint | void + timescaledb | _timescaledb_functions | chunk_id_from_relid | relid oid | integer + timescaledb | _timescaledb_functions | chunk_index_clone | chunk_index_oid oid | oid + timescaledb | _timescaledb_functions | chunk_index_replace | chunk_index_oid_old oid, chunk_index_oid_new oid | void + timescaledb | _timescaledb_functions | chunk_status | regclass | integer + timescaledb | _timescaledb_functions | chunks_local_size | schema_name_in name, table_name_in name | TABLE(chunk_id integer, chunk_schema name, chunk_name name, table_bytes bigint, index_bytes bigint, toast_bytes bigint, total_bytes bigint) + timescaledb | _timescaledb_functions | compressed_chunk_local_stats | schema_name_in name, table_name_in name | TABLE(chunk_schema name, chunk_name name, compression_status text, before_compression_table_bytes bigint, before_compression_index_bytes bigint, before_compression_toast_bytes bigint, before_compression_total_bytes bigint, after_compression_table_bytes bigint, after_compression_index_bytes bigint, after_compression_toast_bytes bigint, after_compression_total_bytes bigint) + timescaledb | _timescaledb_functions | compressed_data_in | cstring | _timescaledb_internal.compressed_data + timescaledb | _timescaledb_functions | compressed_data_out | _timescaledb_internal.compressed_data | cstring + timescaledb | _timescaledb_functions | compressed_data_recv | internal | _timescaledb_internal.compressed_data + timescaledb | _timescaledb_functions | compressed_data_send | _timescaledb_internal.compressed_data | bytea + timescaledb | _timescaledb_functions | constraint_clone | constraint_oid oid, target_oid regclass | void + timescaledb | _timescaledb_functions | continuous_agg_invalidation_trigger | | trigger + timescaledb | _timescaledb_functions | create_chunk | hypertable regclass, slices jsonb, schema_name name, table_name name, chunk_table regclass | TABLE(chunk_id integer, hypertable_id integer, schema_name name, table_name name, relkind "char", slices jsonb, created boolean) + timescaledb | _timescaledb_functions | create_chunk_table | hypertable regclass, slices jsonb, schema_name name, table_name name | boolean + timescaledb | _timescaledb_functions | create_compressed_chunk | chunk regclass, chunk_table regclass, uncompressed_heap_size bigint, uncompressed_toast_size bigint, uncompressed_index_size bigint, compressed_heap_size bigint, compressed_toast_size bigint, compressed_index_size bigint, numrows_pre_compression bigint, numrows_post_compression bigint | regclass + timescaledb | _timescaledb_functions | dimension_info_in | cstring | _timescaledb_internal.dimension_info + timescaledb | _timescaledb_functions | dimension_info_out | _timescaledb_internal.dimension_info | cstring + timescaledb | _timescaledb_functions | drop_chunk | chunk regclass | boolean + timescaledb | _timescaledb_functions | finalize_agg | agg_name text, inner_agg_collation_schema name, inner_agg_collation_name name, inner_agg_input_types name[], inner_agg_serialized_state bytea, return_type_dummy_val anyelement | anyelement + timescaledb | _timescaledb_functions | finalize_agg_ffunc | tstate internal, aggfn text, inner_agg_collation_schema name, inner_agg_collation_name name, inner_agg_input_types name[], inner_agg_serialized_state bytea, return_type_dummy_val anyelement | anyelement + timescaledb | _timescaledb_functions | finalize_agg_sfunc | tstate internal, aggfn text, inner_agg_collation_schema name, inner_agg_collation_name name, inner_agg_input_types name[], inner_agg_serialized_state bytea, return_type_dummy_val anyelement | internal + timescaledb | _timescaledb_functions | first_combinefunc | internal, internal | internal + timescaledb | _timescaledb_functions | first_sfunc | internal, anyelement, "any" | internal + timescaledb | _timescaledb_functions | freeze_chunk | chunk regclass | boolean + timescaledb | _timescaledb_functions | generate_uuid | | uuid + timescaledb | _timescaledb_functions | get_approx_row_count | relation regclass | bigint + timescaledb | _timescaledb_functions | get_compressed_chunk_index_for_recompression | uncompressed_chunk regclass | regclass + timescaledb | _timescaledb_functions | get_create_command | table_name name | text + timescaledb | _timescaledb_functions | get_git_commit | | TABLE(commit_tag text, commit_hash text, commit_time timestamp with time zone) + timescaledb | _timescaledb_functions | get_orderby_defaults | relation regclass, segment_by_cols text[] | jsonb + timescaledb | _timescaledb_functions | get_os_info | | TABLE(sysname text, version text, release text, version_pretty text) + timescaledb | _timescaledb_functions | get_partition_for_key | val anyelement | integer + timescaledb | _timescaledb_functions | get_partition_hash | val anyelement | integer + timescaledb | _timescaledb_functions | get_segmentby_defaults | relation regclass | jsonb + timescaledb | _timescaledb_functions | hist_combinefunc | state1 internal, state2 internal | internal + timescaledb | _timescaledb_functions | hist_deserializefunc | bytea, internal | internal + timescaledb | _timescaledb_functions | hist_finalfunc | state internal, val double precision, min double precision, max double precision, nbuckets integer | integer[] + timescaledb | _timescaledb_functions | hist_serializefunc | internal | bytea + timescaledb | _timescaledb_functions | hist_sfunc | state internal, val double precision, min double precision, max double precision, nbuckets integer | internal + timescaledb | _timescaledb_functions | hypertable_local_size | schema_name_in name, table_name_in name | TABLE(table_bytes bigint, index_bytes bigint, toast_bytes bigint, total_bytes bigint) + timescaledb | _timescaledb_functions | hypertable_osm_range_update | hypertable regclass, range_start anyelement, range_end anyelement, empty boolean | boolean + timescaledb | _timescaledb_functions | indexes_local_size | schema_name_in name, index_name_in name | TABLE(hypertable_id integer, total_bytes bigint) + timescaledb | _timescaledb_functions | insert_blocker | | trigger + timescaledb | _timescaledb_functions | interval_to_usec | chunk_interval interval | bigint + timescaledb | _timescaledb_functions | last_combinefunc | internal, internal | internal + timescaledb | _timescaledb_functions | last_sfunc | internal, anyelement, "any" | internal + timescaledb | _timescaledb_functions | makeaclitem | regrole, regrole, text, boolean | aclitem + timescaledb | _timescaledb_functions | metadata_insert_trigger | | trigger + timescaledb | _timescaledb_functions | partialize_agg | arg anyelement | bytea + timescaledb | _timescaledb_functions | policy_compression | IN job_id integer, IN config jsonb | + timescaledb | _timescaledb_functions | policy_compression_check | config jsonb | void + timescaledb | _timescaledb_functions | policy_compression_execute | IN job_id integer, IN htid integer, IN lag anyelement, IN maxchunks integer, IN verbose_log boolean, IN recompress_enabled boolean, IN use_creation_time boolean | + timescaledb | _timescaledb_functions | policy_job_stat_history_retention | job_id integer, config jsonb | integer + timescaledb | _timescaledb_functions | policy_job_stat_history_retention_check | config jsonb | void + timescaledb | _timescaledb_functions | policy_recompression | IN job_id integer, IN config jsonb | + timescaledb | _timescaledb_functions | policy_refresh_continuous_aggregate | IN job_id integer, IN config jsonb | + timescaledb | _timescaledb_functions | policy_refresh_continuous_aggregate_check | config jsonb | void + timescaledb | _timescaledb_functions | policy_reorder | IN job_id integer, IN config jsonb | + timescaledb | _timescaledb_functions | policy_reorder_check | config jsonb | void + timescaledb | _timescaledb_functions | policy_retention | IN job_id integer, IN config jsonb | + timescaledb | _timescaledb_functions | policy_retention_check | config jsonb | void + timescaledb | _timescaledb_functions | process_ddl_event | | event_trigger + timescaledb | _timescaledb_functions | range_value_to_pretty | time_value bigint, column_type regtype | text + timescaledb | _timescaledb_functions | recompress_chunk_segmentwise | uncompressed_chunk regclass, if_compressed boolean | regclass + timescaledb | _timescaledb_functions | relation_approximate_size | relation regclass | TABLE(total_size bigint, heap_size bigint, index_size bigint, toast_size bigint) + timescaledb | _timescaledb_functions | relation_size | relation regclass | TABLE(total_size bigint, heap_size bigint, index_size bigint, toast_size bigint) + timescaledb | _timescaledb_functions | remove_dropped_chunk_metadata | _hypertable_id integer | integer + timescaledb | _timescaledb_functions | repair_relation_acls | | + timescaledb | _timescaledb_functions | restart_background_workers | | boolean + timescaledb | _timescaledb_functions | show_chunk | chunk regclass | TABLE(chunk_id integer, hypertable_id integer, schema_name name, table_name name, relkind "char", slices jsonb) + timescaledb | _timescaledb_functions | start_background_workers | | boolean + timescaledb | _timescaledb_functions | stop_background_workers | | boolean + timescaledb | _timescaledb_functions | subtract_integer_from_now | hypertable_relid regclass, lag bigint | bigint + timescaledb | _timescaledb_functions | time_to_internal | time_val anyelement | bigint + timescaledb | _timescaledb_functions | to_date | unixtime_us bigint | date + timescaledb | _timescaledb_functions | to_interval | unixtime_us bigint | interval + timescaledb | _timescaledb_functions | to_timestamp | unixtime_us bigint | timestamp with time zone + timescaledb | _timescaledb_functions | to_timestamp_without_timezone | unixtime_us bigint | timestamp without time zone + timescaledb | _timescaledb_functions | to_unix_microseconds | ts timestamp with time zone | bigint + timescaledb | _timescaledb_functions | tsl_loaded | | boolean + timescaledb | _timescaledb_functions | unfreeze_chunk | chunk regclass | boolean + timescaledb | _timescaledb_internal | alter_job_set_hypertable_id | job_id integer, hypertable regclass | integer + timescaledb | _timescaledb_internal | attach_osm_table_chunk | hypertable regclass, chunk regclass | boolean + timescaledb | _timescaledb_internal | cagg_migrate_create_plan | IN _cagg_data _timescaledb_catalog.continuous_agg, IN _cagg_name_new text, IN _override boolean, IN _drop_old boolean | + timescaledb | _timescaledb_internal | cagg_migrate_execute_copy_data | IN _cagg_data _timescaledb_catalog.continuous_agg, IN _plan_step _timescaledb_catalog.continuous_agg_migrate_plan_step | + timescaledb | _timescaledb_internal | cagg_migrate_execute_copy_policies | IN _cagg_data _timescaledb_catalog.continuous_agg, IN _plan_step _timescaledb_catalog.continuous_agg_migrate_plan_step | + timescaledb | _timescaledb_internal | cagg_migrate_execute_create_new_cagg | IN _cagg_data _timescaledb_catalog.continuous_agg, IN _plan_step _timescaledb_catalog.continuous_agg_migrate_plan_step | + timescaledb | _timescaledb_internal | cagg_migrate_execute_disable_policies | IN _cagg_data _timescaledb_catalog.continuous_agg, IN _plan_step _timescaledb_catalog.continuous_agg_migrate_plan_step | + timescaledb | _timescaledb_internal | cagg_migrate_execute_drop_old_cagg | IN _cagg_data _timescaledb_catalog.continuous_agg, IN _plan_step _timescaledb_catalog.continuous_agg_migrate_plan_step | + timescaledb | _timescaledb_internal | cagg_migrate_execute_enable_policies | IN _cagg_data _timescaledb_catalog.continuous_agg, IN _plan_step _timescaledb_catalog.continuous_agg_migrate_plan_step | + timescaledb | _timescaledb_internal | cagg_migrate_execute_override_cagg | IN _cagg_data _timescaledb_catalog.continuous_agg, IN _plan_step _timescaledb_catalog.continuous_agg_migrate_plan_step | + timescaledb | _timescaledb_internal | cagg_migrate_execute_plan | IN _cagg_data _timescaledb_catalog.continuous_agg | + timescaledb | _timescaledb_internal | cagg_migrate_execute_refresh_new_cagg | IN _cagg_data _timescaledb_catalog.continuous_agg, IN _plan_step _timescaledb_catalog.continuous_agg_migrate_plan_step | + timescaledb | _timescaledb_internal | cagg_migrate_plan_exists | _hypertable_id integer | boolean + timescaledb | _timescaledb_internal | cagg_migrate_pre_validation | _cagg_schema text, _cagg_name text, _cagg_name_new text | _timescaledb_catalog.continuous_agg + timescaledb | _timescaledb_internal | cagg_watermark | hypertable_id integer | bigint + timescaledb | _timescaledb_internal | cagg_watermark_materialized | hypertable_id integer | bigint + timescaledb | _timescaledb_internal | calculate_chunk_interval | dimension_id integer, dimension_coord bigint, chunk_target_size bigint | bigint + timescaledb | _timescaledb_internal | chunk_constraint_add_table_constraint | chunk_constraint_row _timescaledb_catalog.chunk_constraint | void + timescaledb | _timescaledb_internal | chunk_id_from_relid | relid oid | integer + timescaledb | _timescaledb_internal | chunk_index_clone | chunk_index_oid oid | oid + timescaledb | _timescaledb_internal | chunk_index_replace | chunk_index_oid_old oid, chunk_index_oid_new oid | void + timescaledb | _timescaledb_internal | chunk_status | regclass | integer + timescaledb | _timescaledb_internal | chunks_local_size | schema_name_in name, table_name_in name | TABLE(chunk_id integer, chunk_schema name, chunk_name name, table_bytes bigint, index_bytes bigint, toast_bytes bigint, total_bytes bigint) + timescaledb | _timescaledb_internal | compressed_chunk_local_stats | schema_name_in name, table_name_in name | TABLE(chunk_schema name, chunk_name name, compression_status text, before_compression_table_bytes bigint, before_compression_index_bytes bigint, before_compression_toast_bytes bigint, before_compression_total_bytes bigint, after_compression_table_bytes bigint, after_compression_index_bytes bigint, after_compression_toast_bytes bigint, after_compression_total_bytes bigint) + timescaledb | _timescaledb_internal | compressed_chunk_remote_stats | schema_name_in name, table_name_in name | TABLE(chunk_schema name, chunk_name name, compression_status text, before_compression_table_bytes bigint, before_compression_index_bytes bigint, before_compression_toast_bytes bigint, before_compression_total_bytes bigint, after_compression_table_bytes bigint, after_compression_index_bytes bigint, after_compression_toast_bytes bigint, after_compression_total_bytes bigint, node_name name) + timescaledb | _timescaledb_internal | continuous_agg_invalidation_trigger | | trigger + timescaledb | _timescaledb_internal | create_chunk | hypertable regclass, _slices jsonb, _schema_name name, _table_name name, chunk_table regclass | TABLE(chunk_id integer, hypertable_id integer, schema_name name, table_name name, relkind "char", slices jsonb, created boolean) + timescaledb | _timescaledb_internal | create_chunk_table | hypertable regclass, slices jsonb, schema_name name, table_name name | boolean + timescaledb | _timescaledb_internal | create_compressed_chunk | chunk regclass, chunk_table regclass, uncompressed_heap_size bigint, uncompressed_toast_size bigint, uncompressed_index_size bigint, compressed_heap_size bigint, compressed_toast_size bigint, compressed_index_size bigint, numrows_pre_compression bigint, numrows_post_compression bigint | regclass + timescaledb | _timescaledb_internal | drop_chunk | chunk regclass | boolean + timescaledb | _timescaledb_internal | finalize_agg | agg_name text, inner_agg_collation_schema name, inner_agg_collation_name name, inner_agg_input_types name[], inner_agg_serialized_state bytea, return_type_dummy_val anyelement | anyelement + timescaledb | _timescaledb_internal | finalize_agg_ffunc | tstate internal, aggfn text, inner_agg_collation_schema name, inner_agg_collation_name name, inner_agg_input_types name[], inner_agg_serialized_state bytea, return_type_dummy_val anyelement | anyelement + timescaledb | _timescaledb_internal | finalize_agg_sfunc | tstate internal, aggfn text, inner_agg_collation_schema name, inner_agg_collation_name name, inner_agg_input_types name[], inner_agg_serialized_state bytea, return_type_dummy_val anyelement | internal + timescaledb | _timescaledb_internal | freeze_chunk | chunk regclass | boolean + timescaledb | _timescaledb_internal | generate_uuid | | uuid + timescaledb | _timescaledb_internal | get_approx_row_count | relation regclass | bigint + timescaledb | _timescaledb_internal | get_compressed_chunk_index_for_recompression | uncompressed_chunk regclass | regclass + timescaledb | _timescaledb_internal | get_create_command | table_name name | text + timescaledb | _timescaledb_internal | get_git_commit | | TABLE(commit_tag text, commit_hash text, commit_time timestamp with time zone) + timescaledb | _timescaledb_internal | get_os_info | | TABLE(sysname text, version text, release text, version_pretty text) + timescaledb | _timescaledb_internal | get_partition_for_key | val anyelement | integer + timescaledb | _timescaledb_internal | get_partition_hash | val anyelement | integer + timescaledb | _timescaledb_internal | hypertable_local_size | schema_name_in name, table_name_in name | TABLE(table_bytes bigint, index_bytes bigint, toast_bytes bigint, total_bytes bigint) + timescaledb | _timescaledb_internal | indexes_local_size | schema_name_in name, table_name_in name | TABLE(hypertable_id integer, total_bytes bigint) + timescaledb | _timescaledb_internal | insert_blocker | | trigger + timescaledb | _timescaledb_internal | interval_to_usec | chunk_interval interval | bigint + timescaledb | _timescaledb_internal | partialize_agg | arg anyelement | bytea + timescaledb | _timescaledb_internal | policy_compression | IN job_id integer, IN config jsonb | + timescaledb | _timescaledb_internal | policy_compression_check | config jsonb | void + timescaledb | _timescaledb_internal | policy_compression_execute | IN job_id integer, IN htid integer, IN lag anyelement, IN maxchunks integer, IN verbose_log boolean, IN recompress_enabled boolean, IN use_creation_time boolean | + timescaledb | _timescaledb_internal | policy_job_stat_history_retention | job_id integer, config jsonb | integer + timescaledb | _timescaledb_internal | policy_job_stat_history_retention_check | config jsonb | void + timescaledb | _timescaledb_internal | policy_recompression | IN job_id integer, IN config jsonb | + timescaledb | _timescaledb_internal | policy_refresh_continuous_aggregate | IN job_id integer, IN config jsonb | + timescaledb | _timescaledb_internal | policy_refresh_continuous_aggregate_check | config jsonb | void + timescaledb | _timescaledb_internal | policy_reorder | IN job_id integer, IN config jsonb | + timescaledb | _timescaledb_internal | policy_reorder_check | config jsonb | void + timescaledb | _timescaledb_internal | policy_retention | IN job_id integer, IN config jsonb | + timescaledb | _timescaledb_internal | policy_retention_check | config jsonb | void + timescaledb | _timescaledb_internal | process_ddl_event | | event_trigger + timescaledb | _timescaledb_internal | range_value_to_pretty | time_value bigint, column_type regtype | text + timescaledb | _timescaledb_internal | recompress_chunk_segmentwise | uncompressed_chunk regclass, if_compressed boolean | regclass + timescaledb | _timescaledb_internal | relation_size | relation regclass | TABLE(total_size bigint, heap_size bigint, index_size bigint, toast_size bigint) + timescaledb | _timescaledb_internal | restart_background_workers | | boolean + timescaledb | _timescaledb_internal | show_chunk | chunk regclass | TABLE(chunk_id integer, hypertable_id integer, schema_name name, table_name name, relkind "char", slices jsonb) + timescaledb | _timescaledb_internal | start_background_workers | | boolean + timescaledb | _timescaledb_internal | stop_background_workers | | boolean + timescaledb | _timescaledb_internal | subtract_integer_from_now | hypertable_relid regclass, lag bigint | bigint + timescaledb | _timescaledb_internal | time_to_internal | time_val anyelement | bigint + timescaledb | _timescaledb_internal | to_date | unixtime_us bigint | date + timescaledb | _timescaledb_internal | to_interval | unixtime_us bigint | interval + timescaledb | _timescaledb_internal | to_timestamp | unixtime_us bigint | timestamp with time zone + timescaledb | _timescaledb_internal | to_timestamp_without_timezone | unixtime_us bigint | timestamp without time zone + timescaledb | _timescaledb_internal | to_unix_microseconds | ts timestamp with time zone | bigint + timescaledb | _timescaledb_internal | tsl_loaded | | boolean + timescaledb | _timescaledb_internal | unfreeze_chunk | chunk regclass | boolean + timescaledb | public | add_compression_policy | hypertable regclass, compress_after "any", if_not_exists boolean, schedule_interval interval, initial_start timestamp with time zone, timezone text, compress_created_before interval | integer + timescaledb | public | add_continuous_aggregate_policy | continuous_aggregate regclass, start_offset "any", end_offset "any", schedule_interval interval, if_not_exists boolean, initial_start timestamp with time zone, timezone text | integer + timescaledb | public | add_dimension | hypertable regclass, column_name name, number_partitions integer, chunk_time_interval anyelement, partitioning_func regproc, if_not_exists boolean | TABLE(dimension_id integer, schema_name name, table_name name, column_name name, created boolean) + timescaledb | public | add_dimension | hypertable regclass, dimension _timescaledb_internal.dimension_info, if_not_exists boolean | TABLE(dimension_id integer, created boolean) + timescaledb | public | add_job | proc regproc, schedule_interval interval, config jsonb, initial_start timestamp with time zone, scheduled boolean, check_config regproc, fixed_schedule boolean, timezone text | integer + timescaledb | public | add_reorder_policy | hypertable regclass, index_name name, if_not_exists boolean, initial_start timestamp with time zone, timezone text | integer + timescaledb | public | add_retention_policy | relation regclass, drop_after "any", if_not_exists boolean, schedule_interval interval, initial_start timestamp with time zone, timezone text, drop_created_before interval | integer + timescaledb | public | alter_job | job_id integer, schedule_interval interval, max_runtime interval, max_retries integer, retry_period interval, scheduled boolean, config jsonb, next_start timestamp with time zone, if_exists boolean, check_config regproc, fixed_schedule boolean, initial_start timestamp with time zone, timezone text | TABLE(job_id integer, schedule_interval interval, max_runtime interval, max_retries integer, retry_period interval, scheduled boolean, config jsonb, next_start timestamp with time zone, check_config text, fixed_schedule boolean, initial_start timestamp with time zone, timezone text) + timescaledb | public | approximate_row_count | relation regclass | bigint + timescaledb | public | attach_tablespace | tablespace name, hypertable regclass, if_not_attached boolean | void + timescaledb | public | by_hash | column_name name, number_partitions integer, partition_func regproc | _timescaledb_internal.dimension_info + timescaledb | public | by_range | column_name name, partition_interval anyelement, partition_func regproc | _timescaledb_internal.dimension_info + timescaledb | public | cagg_migrate | IN cagg regclass, IN override boolean, IN drop_old boolean | + timescaledb | public | chunk_compression_stats | hypertable regclass | TABLE(chunk_schema name, chunk_name name, compression_status text, before_compression_table_bytes bigint, before_compression_index_bytes bigint, before_compression_toast_bytes bigint, before_compression_total_bytes bigint, after_compression_table_bytes bigint, after_compression_index_bytes bigint, after_compression_toast_bytes bigint, after_compression_total_bytes bigint, node_name name) + timescaledb | public | chunks_detailed_size | hypertable regclass | TABLE(chunk_schema name, chunk_name name, table_bytes bigint, index_bytes bigint, toast_bytes bigint, total_bytes bigint, node_name name) + timescaledb | public | compress_chunk | uncompressed_chunk regclass, if_not_compressed boolean, recompress boolean | regclass + timescaledb | public | create_hypertable | relation regclass, dimension _timescaledb_internal.dimension_info, create_default_indexes boolean, if_not_exists boolean, migrate_data boolean | TABLE(hypertable_id integer, created boolean) + timescaledb | public | create_hypertable | relation regclass, time_column_name name, partitioning_column name, number_partitions integer, associated_schema_name name, associated_table_prefix name, chunk_time_interval anyelement, create_default_indexes boolean, if_not_exists boolean, partitioning_func regproc, migrate_data boolean, chunk_target_size text, chunk_sizing_func regproc, time_partitioning_func regproc | TABLE(hypertable_id integer, schema_name name, table_name name, created boolean) + timescaledb | public | decompress_chunk | uncompressed_chunk regclass, if_compressed boolean | regclass + timescaledb | public | delete_job | job_id integer | void + timescaledb | public | detach_tablespace | tablespace name, hypertable regclass, if_attached boolean | integer + timescaledb | public | detach_tablespaces | hypertable regclass | integer + timescaledb | public | disable_chunk_skipping | hypertable regclass, column_name name, if_not_exists boolean | TABLE(hypertable_id integer, column_name name, disabled boolean) + timescaledb | public | drop_chunks | relation regclass, older_than "any", newer_than "any", "verbose" boolean, created_before "any", created_after "any" | SETOF text + timescaledb | public | enable_chunk_skipping | hypertable regclass, column_name name, if_not_exists boolean | TABLE(column_stats_id integer, enabled boolean) + timescaledb | public | first | anyelement, "any" | anyelement + timescaledb | public | get_telemetry_report | | jsonb + timescaledb | public | histogram | double precision, double precision, double precision, integer | integer[] + timescaledb | public | hypertable_approximate_detailed_size | relation regclass | TABLE(table_bytes bigint, index_bytes bigint, toast_bytes bigint, total_bytes bigint) + timescaledb | public | hypertable_approximate_size | hypertable regclass | bigint + timescaledb | public | hypertable_compression_stats | hypertable regclass | TABLE(total_chunks bigint, number_compressed_chunks bigint, before_compression_table_bytes bigint, before_compression_index_bytes bigint, before_compression_toast_bytes bigint, before_compression_total_bytes bigint, after_compression_table_bytes bigint, after_compression_index_bytes bigint, after_compression_toast_bytes bigint, after_compression_total_bytes bigint, node_name name) + timescaledb | public | hypertable_detailed_size | hypertable regclass | TABLE(table_bytes bigint, index_bytes bigint, toast_bytes bigint, total_bytes bigint, node_name name) + timescaledb | public | hypertable_index_size | index_name regclass | bigint + timescaledb | public | hypertable_size | hypertable regclass | bigint + timescaledb | public | interpolate | value bigint, prev record, next record | bigint + timescaledb | public | interpolate | value double precision, prev record, next record | double precision + timescaledb | public | interpolate | value integer, prev record, next record | integer + timescaledb | public | interpolate | value real, prev record, next record | real + timescaledb | public | interpolate | value smallint, prev record, next record | smallint + timescaledb | public | last | anyelement, "any" | anyelement + timescaledb | public | locf | value anyelement, prev anyelement, treat_null_as_missing boolean | anyelement + timescaledb | public | move_chunk | chunk regclass, destination_tablespace name, index_destination_tablespace name, reorder_index regclass, "verbose" boolean | void + timescaledb | public | recompress_chunk | IN chunk regclass, IN if_not_compressed boolean | + timescaledb | public | refresh_continuous_aggregate | IN continuous_aggregate regclass, IN window_start "any", IN window_end "any" | + timescaledb | public | remove_compression_policy | hypertable regclass, if_exists boolean | boolean + timescaledb | public | remove_continuous_aggregate_policy | continuous_aggregate regclass, if_not_exists boolean, if_exists boolean | void + timescaledb | public | remove_reorder_policy | hypertable regclass, if_exists boolean | void + timescaledb | public | remove_retention_policy | relation regclass, if_exists boolean | void + timescaledb | public | reorder_chunk | chunk regclass, index regclass, "verbose" boolean | void + timescaledb | public | run_job | IN job_id integer | + timescaledb | public | set_adaptive_chunking | hypertable regclass, chunk_target_size text, INOUT chunk_sizing_func regproc, OUT chunk_target_size bigint | record + timescaledb | public | set_chunk_time_interval | hypertable regclass, chunk_time_interval anyelement, dimension_name name | void + timescaledb | public | set_integer_now_func | hypertable regclass, integer_now_func regproc, replace_if_exists boolean | void + timescaledb | public | set_number_partitions | hypertable regclass, number_partitions integer, dimension_name name | void + timescaledb | public | set_partitioning_interval | hypertable regclass, partition_interval anyelement, dimension_name name | void + timescaledb | public | show_chunks | relation regclass, older_than "any", newer_than "any", created_before "any", created_after "any" | SETOF regclass + timescaledb | public | show_tablespaces | hypertable regclass | SETOF name + timescaledb | public | time_bucket | bucket_width bigint, ts bigint | bigint + timescaledb | public | time_bucket | bucket_width bigint, ts bigint, "offset" bigint | bigint + timescaledb | public | time_bucket | bucket_width integer, ts integer | integer + timescaledb | public | time_bucket | bucket_width integer, ts integer, "offset" integer | integer + timescaledb | public | time_bucket | bucket_width interval, ts date | date + timescaledb | public | time_bucket | bucket_width interval, ts date, "offset" interval | date + timescaledb | public | time_bucket | bucket_width interval, ts date, origin date | date + timescaledb | public | time_bucket | bucket_width interval, ts timestamp with time zone | timestamp with time zone + timescaledb | public | time_bucket | bucket_width interval, ts timestamp with time zone, "offset" interval | timestamp with time zone + timescaledb | public | time_bucket | bucket_width interval, ts timestamp with time zone, origin timestamp with time zone | timestamp with time zone + timescaledb | public | time_bucket | bucket_width interval, ts timestamp with time zone, timezone text, origin timestamp with time zone, "offset" interval | timestamp with time zone + timescaledb | public | time_bucket | bucket_width interval, ts timestamp without time zone | timestamp without time zone + timescaledb | public | time_bucket | bucket_width interval, ts timestamp without time zone, "offset" interval | timestamp without time zone + timescaledb | public | time_bucket | bucket_width interval, ts timestamp without time zone, origin timestamp without time zone | timestamp without time zone + timescaledb | public | time_bucket | bucket_width smallint, ts smallint | smallint + timescaledb | public | time_bucket | bucket_width smallint, ts smallint, "offset" smallint | smallint + timescaledb | public | time_bucket_gapfill | bucket_width bigint, ts bigint, start bigint, finish bigint | bigint + timescaledb | public | time_bucket_gapfill | bucket_width integer, ts integer, start integer, finish integer | integer + timescaledb | public | time_bucket_gapfill | bucket_width interval, ts date, start date, finish date | date + timescaledb | public | time_bucket_gapfill | bucket_width interval, ts timestamp with time zone, start timestamp with time zone, finish timestamp with time zone | timestamp with time zone + timescaledb | public | time_bucket_gapfill | bucket_width interval, ts timestamp with time zone, timezone text, start timestamp with time zone, finish timestamp with time zone | timestamp with time zone + timescaledb | public | time_bucket_gapfill | bucket_width interval, ts timestamp without time zone, start timestamp without time zone, finish timestamp without time zone | timestamp without time zone + timescaledb | public | time_bucket_gapfill | bucket_width smallint, ts smallint, start smallint, finish smallint | smallint + timescaledb | public | timescaledb_post_restore | | boolean + timescaledb | public | timescaledb_pre_restore | | boolean + timescaledb | timescaledb_experimental | add_policies | relation regclass, if_not_exists boolean, refresh_start_offset "any", refresh_end_offset "any", compress_after "any", drop_after "any" | boolean + timescaledb | timescaledb_experimental | alter_policies | relation regclass, if_exists boolean, refresh_start_offset "any", refresh_end_offset "any", compress_after "any", drop_after "any" | boolean + timescaledb | timescaledb_experimental | remove_all_policies | relation regclass, if_exists boolean | boolean + timescaledb | timescaledb_experimental | remove_policies | relation regclass, if_exists boolean, VARIADIC policy_names text[] | boolean + timescaledb | timescaledb_experimental | show_policies | relation regclass | SETOF jsonb + timescaledb | timescaledb_experimental | time_bucket_ng | bucket_width interval, ts date | date + timescaledb | timescaledb_experimental | time_bucket_ng | bucket_width interval, ts date, origin date | date + timescaledb | timescaledb_experimental | time_bucket_ng | bucket_width interval, ts timestamp with time zone | timestamp with time zone + timescaledb | timescaledb_experimental | time_bucket_ng | bucket_width interval, ts timestamp with time zone, origin timestamp with time zone | timestamp with time zone + timescaledb | timescaledb_experimental | time_bucket_ng | bucket_width interval, ts timestamp with time zone, origin timestamp with time zone, timezone text | timestamp with time zone + timescaledb | timescaledb_experimental | time_bucket_ng | bucket_width interval, ts timestamp with time zone, timezone text | timestamp with time zone + timescaledb | timescaledb_experimental | time_bucket_ng | bucket_width interval, ts timestamp without time zone | timestamp without time zone + timescaledb | timescaledb_experimental | time_bucket_ng | bucket_width interval, ts timestamp without time zone, origin timestamp without time zone | timestamp without time zone + tsm_system_rows | public | system_rows | internal | tsm_handler + tsm_system_time | public | system_time | internal | tsm_handler + unaccent | public | unaccent | regdictionary, text | text + unaccent | public | unaccent | text | text + unaccent | public | unaccent_init | internal | internal + unaccent | public | unaccent_lexize | internal, internal, internal, internal | internal + uuid-ossp | public | uuid_generate_v1 | | uuid + uuid-ossp | public | uuid_generate_v1mc | | uuid + uuid-ossp | public | uuid_generate_v3 | namespace uuid, name text | uuid + uuid-ossp | public | uuid_generate_v4 | | uuid + uuid-ossp | public | uuid_generate_v5 | namespace uuid, name text | uuid + uuid-ossp | public | uuid_nil | | uuid + uuid-ossp | public | uuid_ns_dns | | uuid + uuid-ossp | public | uuid_ns_oid | | uuid + uuid-ossp | public | uuid_ns_url | | uuid + uuid-ossp | public | uuid_ns_x500 | | uuid + vector | public | array_to_halfvec | double precision[], integer, boolean | halfvec + vector | public | array_to_halfvec | integer[], integer, boolean | halfvec + vector | public | array_to_halfvec | numeric[], integer, boolean | halfvec + vector | public | array_to_halfvec | real[], integer, boolean | halfvec + vector | public | array_to_sparsevec | double precision[], integer, boolean | sparsevec + vector | public | array_to_sparsevec | integer[], integer, boolean | sparsevec + vector | public | array_to_sparsevec | numeric[], integer, boolean | sparsevec + vector | public | array_to_sparsevec | real[], integer, boolean | sparsevec + vector | public | array_to_vector | double precision[], integer, boolean | vector + vector | public | array_to_vector | integer[], integer, boolean | vector + vector | public | array_to_vector | numeric[], integer, boolean | vector + vector | public | array_to_vector | real[], integer, boolean | vector + vector | public | avg | halfvec | halfvec + vector | public | avg | vector | vector + vector | public | binary_quantize | halfvec | bit + vector | public | binary_quantize | vector | bit + vector | public | cosine_distance | halfvec, halfvec | double precision + vector | public | cosine_distance | sparsevec, sparsevec | double precision + vector | public | cosine_distance | vector, vector | double precision + vector | public | halfvec | halfvec, integer, boolean | halfvec + vector | public | halfvec_accum | double precision[], halfvec | double precision[] + vector | public | halfvec_add | halfvec, halfvec | halfvec + vector | public | halfvec_avg | double precision[] | halfvec + vector | public | halfvec_cmp | halfvec, halfvec | integer + vector | public | halfvec_combine | double precision[], double precision[] | double precision[] + vector | public | halfvec_concat | halfvec, halfvec | halfvec + vector | public | halfvec_eq | halfvec, halfvec | boolean + vector | public | halfvec_ge | halfvec, halfvec | boolean + vector | public | halfvec_gt | halfvec, halfvec | boolean + vector | public | halfvec_in | cstring, oid, integer | halfvec + vector | public | halfvec_l2_squared_distance | halfvec, halfvec | double precision + vector | public | halfvec_le | halfvec, halfvec | boolean + vector | public | halfvec_lt | halfvec, halfvec | boolean + vector | public | halfvec_mul | halfvec, halfvec | halfvec + vector | public | halfvec_ne | halfvec, halfvec | boolean + vector | public | halfvec_negative_inner_product | halfvec, halfvec | double precision + vector | public | halfvec_out | halfvec | cstring + vector | public | halfvec_recv | internal, oid, integer | halfvec + vector | public | halfvec_send | halfvec | bytea + vector | public | halfvec_spherical_distance | halfvec, halfvec | double precision + vector | public | halfvec_sub | halfvec, halfvec | halfvec + vector | public | halfvec_to_float4 | halfvec, integer, boolean | real[] + vector | public | halfvec_to_sparsevec | halfvec, integer, boolean | sparsevec + vector | public | halfvec_to_vector | halfvec, integer, boolean | vector + vector | public | halfvec_typmod_in | cstring[] | integer + vector | public | hamming_distance | bit, bit | double precision + vector | public | hnsw_bit_support | internal | internal + vector | public | hnsw_halfvec_support | internal | internal + vector | public | hnsw_sparsevec_support | internal | internal + vector | public | hnswhandler | internal | index_am_handler + vector | public | inner_product | halfvec, halfvec | double precision + vector | public | inner_product | sparsevec, sparsevec | double precision + vector | public | inner_product | vector, vector | double precision + vector | public | ivfflat_bit_support | internal | internal + vector | public | ivfflat_halfvec_support | internal | internal + vector | public | ivfflathandler | internal | index_am_handler + vector | public | jaccard_distance | bit, bit | double precision + vector | public | l1_distance | halfvec, halfvec | double precision + vector | public | l1_distance | sparsevec, sparsevec | double precision + vector | public | l1_distance | vector, vector | double precision + vector | public | l2_distance | halfvec, halfvec | double precision + vector | public | l2_distance | sparsevec, sparsevec | double precision + vector | public | l2_distance | vector, vector | double precision + vector | public | l2_norm | halfvec | double precision + vector | public | l2_norm | sparsevec | double precision + vector | public | l2_normalize | halfvec | halfvec + vector | public | l2_normalize | sparsevec | sparsevec + vector | public | l2_normalize | vector | vector + vector | public | sparsevec | sparsevec, integer, boolean | sparsevec + vector | public | sparsevec_cmp | sparsevec, sparsevec | integer + vector | public | sparsevec_eq | sparsevec, sparsevec | boolean + vector | public | sparsevec_ge | sparsevec, sparsevec | boolean + vector | public | sparsevec_gt | sparsevec, sparsevec | boolean + vector | public | sparsevec_in | cstring, oid, integer | sparsevec + vector | public | sparsevec_l2_squared_distance | sparsevec, sparsevec | double precision + vector | public | sparsevec_le | sparsevec, sparsevec | boolean + vector | public | sparsevec_lt | sparsevec, sparsevec | boolean + vector | public | sparsevec_ne | sparsevec, sparsevec | boolean + vector | public | sparsevec_negative_inner_product | sparsevec, sparsevec | double precision + vector | public | sparsevec_out | sparsevec | cstring + vector | public | sparsevec_recv | internal, oid, integer | sparsevec + vector | public | sparsevec_send | sparsevec | bytea + vector | public | sparsevec_to_halfvec | sparsevec, integer, boolean | halfvec + vector | public | sparsevec_to_vector | sparsevec, integer, boolean | vector + vector | public | sparsevec_typmod_in | cstring[] | integer + vector | public | subvector | halfvec, integer, integer | halfvec + vector | public | subvector | vector, integer, integer | vector + vector | public | sum | halfvec | halfvec + vector | public | sum | vector | vector + vector | public | vector | vector, integer, boolean | vector + vector | public | vector_accum | double precision[], vector | double precision[] + vector | public | vector_add | vector, vector | vector + vector | public | vector_avg | double precision[] | vector + vector | public | vector_cmp | vector, vector | integer + vector | public | vector_combine | double precision[], double precision[] | double precision[] + vector | public | vector_concat | vector, vector | vector + vector | public | vector_dims | halfvec | integer + vector | public | vector_dims | vector | integer + vector | public | vector_eq | vector, vector | boolean + vector | public | vector_ge | vector, vector | boolean + vector | public | vector_gt | vector, vector | boolean + vector | public | vector_in | cstring, oid, integer | vector + vector | public | vector_l2_squared_distance | vector, vector | double precision + vector | public | vector_le | vector, vector | boolean + vector | public | vector_lt | vector, vector | boolean + vector | public | vector_mul | vector, vector | vector + vector | public | vector_ne | vector, vector | boolean + vector | public | vector_negative_inner_product | vector, vector | double precision + vector | public | vector_norm | vector | double precision + vector | public | vector_out | vector | cstring + vector | public | vector_recv | internal, oid, integer | vector + vector | public | vector_send | vector | bytea + vector | public | vector_spherical_distance | vector, vector | double precision + vector | public | vector_sub | vector, vector | vector + vector | public | vector_to_float4 | vector, integer, boolean | real[] + vector | public | vector_to_halfvec | vector, integer, boolean | halfvec + vector | public | vector_to_sparsevec | vector, integer, boolean | sparsevec + vector | public | vector_typmod_in | cstring[] | integer + wrappers | public | airtable_fdw_handler | | fdw_handler + wrappers | public | airtable_fdw_meta | | TABLE(name text, version text, author text, website text) + wrappers | public | airtable_fdw_validator | options text[], catalog oid | void + wrappers | public | auth0_fdw_handler | | fdw_handler + wrappers | public | auth0_fdw_meta | | TABLE(name text, version text, author text, website text) + wrappers | public | auth0_fdw_validator | options text[], catalog oid | void + wrappers | public | big_query_fdw_handler | | fdw_handler + wrappers | public | big_query_fdw_meta | | TABLE(name text, version text, author text, website text) + wrappers | public | big_query_fdw_validator | options text[], catalog oid | void + wrappers | public | click_house_fdw_handler | | fdw_handler + wrappers | public | click_house_fdw_meta | | TABLE(name text, version text, author text, website text) + wrappers | public | click_house_fdw_validator | options text[], catalog oid | void + wrappers | public | cognito_fdw_handler | | fdw_handler + wrappers | public | cognito_fdw_meta | | TABLE(name text, version text, author text, website text) + wrappers | public | cognito_fdw_validator | options text[], catalog oid | void + wrappers | public | firebase_fdw_handler | | fdw_handler + wrappers | public | firebase_fdw_meta | | TABLE(name text, version text, author text, website text) + wrappers | public | firebase_fdw_validator | options text[], catalog oid | void + wrappers | public | hello_world_fdw_handler | | fdw_handler + wrappers | public | hello_world_fdw_meta | | TABLE(name text, version text, author text, website text) + wrappers | public | hello_world_fdw_validator | options text[], catalog oid | void + wrappers | public | logflare_fdw_handler | | fdw_handler + wrappers | public | logflare_fdw_meta | | TABLE(name text, version text, author text, website text) + wrappers | public | logflare_fdw_validator | options text[], catalog oid | void + wrappers | public | mssql_fdw_handler | | fdw_handler + wrappers | public | mssql_fdw_meta | | TABLE(name text, version text, author text, website text) + wrappers | public | mssql_fdw_validator | options text[], catalog oid | void + wrappers | public | redis_fdw_handler | | fdw_handler + wrappers | public | redis_fdw_meta | | TABLE(name text, version text, author text, website text) + wrappers | public | redis_fdw_validator | options text[], catalog oid | void + wrappers | public | s3_fdw_handler | | fdw_handler + wrappers | public | s3_fdw_meta | | TABLE(name text, version text, author text, website text) + wrappers | public | s3_fdw_validator | options text[], catalog oid | void + wrappers | public | stripe_fdw_handler | | fdw_handler + wrappers | public | stripe_fdw_meta | | TABLE(name text, version text, author text, website text) + wrappers | public | stripe_fdw_validator | options text[], catalog oid | void + wrappers | public | wasm_fdw_handler | | fdw_handler + wrappers | public | wasm_fdw_meta | | TABLE(name text, version text, author text, website text) + wrappers | public | wasm_fdw_validator | options text[], catalog oid | void + xml2 | public | xml_encode_special_chars | text | text + xml2 | public | xml_valid | text | boolean + xml2 | public | xpath_bool | text, text | boolean + xml2 | public | xpath_list | text, text | text + xml2 | public | xpath_list | text, text, text | text + xml2 | public | xpath_nodeset | text, text | text + xml2 | public | xpath_nodeset | text, text, text | text + xml2 | public | xpath_nodeset | text, text, text, text | text + xml2 | public | xpath_number | text, text | real + xml2 | public | xpath_string | text, text | text + xml2 | public | xpath_table | text, text, text, text, text | SETOF record + xml2 | public | xslt_process | text, text | text + xml2 | public | xslt_process | text, text, text | text +(5037 rows) + +/* + +Monitor extension public table/view/matview/index interface + +*/ +select + e.extname as extension_name, + n.nspname as schema_name, + pc.relname as entity_name, + pa.attname +from + pg_catalog.pg_class pc + join pg_catalog.pg_namespace n + on n.oid = pc.relnamespace + join pg_catalog.pg_depend d + on d.objid = pc.oid + join pg_catalog.pg_extension e + on e.oid = d.refobjid + left join pg_catalog.pg_attribute pa + on pa.attrelid = pc.oid + and pa.attnum > 0 + and not pa.attisdropped +where + d.deptype = 'e' + and pc.relkind in ('r', 'v', 'm', 'i') +order by + e.extname, + pc.relname, + pa.attname; + extension_name | schema_name | entity_name | attname +------------------------------+--------------------------+--------------------------------------------------+----------------------------------- + address_standardizer_data_us | public | us_gaz | id + address_standardizer_data_us | public | us_gaz | is_custom + address_standardizer_data_us | public | us_gaz | seq + address_standardizer_data_us | public | us_gaz | stdword + address_standardizer_data_us | public | us_gaz | token + address_standardizer_data_us | public | us_gaz | word + address_standardizer_data_us | public | us_lex | id + address_standardizer_data_us | public | us_lex | is_custom + address_standardizer_data_us | public | us_lex | seq + address_standardizer_data_us | public | us_lex | stdword + address_standardizer_data_us | public | us_lex | token + address_standardizer_data_us | public | us_lex | word + address_standardizer_data_us | public | us_rules | id + address_standardizer_data_us | public | us_rules | is_custom + address_standardizer_data_us | public | us_rules | rule + hypopg | public | hypopg_hidden_indexes | am_name + hypopg | public | hypopg_hidden_indexes | index_name + hypopg | public | hypopg_hidden_indexes | indexrelid + hypopg | public | hypopg_hidden_indexes | is_hypo + hypopg | public | hypopg_hidden_indexes | schema_name + hypopg | public | hypopg_hidden_indexes | table_name + hypopg | public | hypopg_list_indexes | am_name + hypopg | public | hypopg_list_indexes | index_name + hypopg | public | hypopg_list_indexes | indexrelid + hypopg | public | hypopg_list_indexes | schema_name + hypopg | public | hypopg_list_indexes | table_name + pg_buffercache | public | pg_buffercache | bufferid + pg_buffercache | public | pg_buffercache | isdirty + pg_buffercache | public | pg_buffercache | pinning_backends + pg_buffercache | public | pg_buffercache | relblocknumber + pg_buffercache | public | pg_buffercache | reldatabase + pg_buffercache | public | pg_buffercache | relfilenode + pg_buffercache | public | pg_buffercache | relforknumber + pg_buffercache | public | pg_buffercache | reltablespace + pg_buffercache | public | pg_buffercache | usagecount + pg_net | net | _http_response | content + pg_net | net | _http_response | content_type + pg_net | net | _http_response | created + pg_net | net | _http_response | error_msg + pg_net | net | _http_response | headers + pg_net | net | _http_response | id + pg_net | net | _http_response | status_code + pg_net | net | _http_response | timed_out + pg_net | net | http_request_queue | body + pg_net | net | http_request_queue | headers + pg_net | net | http_request_queue | id + pg_net | net | http_request_queue | method + pg_net | net | http_request_queue | timeout_milliseconds + pg_net | net | http_request_queue | url + pg_repack | repack | primary_keys | indexrelid + pg_repack | repack | primary_keys | indrelid + pg_repack | repack | tables | alter_col_storage + pg_repack | repack | tables | ckey + pg_repack | repack | tables | ckid + pg_repack | repack | tables | copy_data + pg_repack | repack | tables | create_log + pg_repack | repack | tables | create_pktype + pg_repack | repack | tables | create_table + pg_repack | repack | tables | create_trigger + pg_repack | repack | tables | delete_log + pg_repack | repack | tables | drop_columns + pg_repack | repack | tables | enable_trigger + pg_repack | repack | tables | lock_table + pg_repack | repack | tables | pkid + pg_repack | repack | tables | relid + pg_repack | repack | tables | relname + pg_repack | repack | tables | reltoastidxid + pg_repack | repack | tables | reltoastrelid + pg_repack | repack | tables | schemaname + pg_repack | repack | tables | sql_delete + pg_repack | repack | tables | sql_insert + pg_repack | repack | tables | sql_peek + pg_repack | repack | tables | sql_pop + pg_repack | repack | tables | sql_update + pg_repack | repack | tables | tablespace_orig + pg_stat_monitor | public | pg_stat_monitor | application_name + pg_stat_monitor | public | pg_stat_monitor | blk_read_time + pg_stat_monitor | public | pg_stat_monitor | blk_write_time + pg_stat_monitor | public | pg_stat_monitor | bucket + pg_stat_monitor | public | pg_stat_monitor | bucket_done + pg_stat_monitor | public | pg_stat_monitor | bucket_start_time + pg_stat_monitor | public | pg_stat_monitor | calls + pg_stat_monitor | public | pg_stat_monitor | client_ip + pg_stat_monitor | public | pg_stat_monitor | cmd_type + pg_stat_monitor | public | pg_stat_monitor | cmd_type_text + pg_stat_monitor | public | pg_stat_monitor | comments + pg_stat_monitor | public | pg_stat_monitor | cpu_sys_time + pg_stat_monitor | public | pg_stat_monitor | cpu_user_time + pg_stat_monitor | public | pg_stat_monitor | datname + pg_stat_monitor | public | pg_stat_monitor | dbid + pg_stat_monitor | public | pg_stat_monitor | elevel + pg_stat_monitor | public | pg_stat_monitor | jit_emission_count + pg_stat_monitor | public | pg_stat_monitor | jit_emission_time + pg_stat_monitor | public | pg_stat_monitor | jit_functions + pg_stat_monitor | public | pg_stat_monitor | jit_generation_time + pg_stat_monitor | public | pg_stat_monitor | jit_inlining_count + pg_stat_monitor | public | pg_stat_monitor | jit_inlining_time + pg_stat_monitor | public | pg_stat_monitor | jit_optimization_count + pg_stat_monitor | public | pg_stat_monitor | jit_optimization_time + pg_stat_monitor | public | pg_stat_monitor | local_blks_dirtied + pg_stat_monitor | public | pg_stat_monitor | local_blks_hit + pg_stat_monitor | public | pg_stat_monitor | local_blks_read + pg_stat_monitor | public | pg_stat_monitor | local_blks_written + pg_stat_monitor | public | pg_stat_monitor | max_exec_time + pg_stat_monitor | public | pg_stat_monitor | max_plan_time + pg_stat_monitor | public | pg_stat_monitor | mean_exec_time + pg_stat_monitor | public | pg_stat_monitor | mean_plan_time + pg_stat_monitor | public | pg_stat_monitor | message + pg_stat_monitor | public | pg_stat_monitor | min_exec_time + pg_stat_monitor | public | pg_stat_monitor | min_plan_time + pg_stat_monitor | public | pg_stat_monitor | pgsm_query_id + pg_stat_monitor | public | pg_stat_monitor | planid + pg_stat_monitor | public | pg_stat_monitor | plans + pg_stat_monitor | public | pg_stat_monitor | query + pg_stat_monitor | public | pg_stat_monitor | query_plan + pg_stat_monitor | public | pg_stat_monitor | queryid + pg_stat_monitor | public | pg_stat_monitor | relations + pg_stat_monitor | public | pg_stat_monitor | resp_calls + pg_stat_monitor | public | pg_stat_monitor | rows + pg_stat_monitor | public | pg_stat_monitor | shared_blks_dirtied + pg_stat_monitor | public | pg_stat_monitor | shared_blks_hit + pg_stat_monitor | public | pg_stat_monitor | shared_blks_read + pg_stat_monitor | public | pg_stat_monitor | shared_blks_written + pg_stat_monitor | public | pg_stat_monitor | sqlcode + pg_stat_monitor | public | pg_stat_monitor | stddev_exec_time + pg_stat_monitor | public | pg_stat_monitor | stddev_plan_time + pg_stat_monitor | public | pg_stat_monitor | temp_blk_read_time + pg_stat_monitor | public | pg_stat_monitor | temp_blk_write_time + pg_stat_monitor | public | pg_stat_monitor | temp_blks_read + pg_stat_monitor | public | pg_stat_monitor | temp_blks_written + pg_stat_monitor | public | pg_stat_monitor | top_query + pg_stat_monitor | public | pg_stat_monitor | top_queryid + pg_stat_monitor | public | pg_stat_monitor | toplevel + pg_stat_monitor | public | pg_stat_monitor | total_exec_time + pg_stat_monitor | public | pg_stat_monitor | total_plan_time + pg_stat_monitor | public | pg_stat_monitor | userid + pg_stat_monitor | public | pg_stat_monitor | username + pg_stat_monitor | public | pg_stat_monitor | wal_bytes + pg_stat_monitor | public | pg_stat_monitor | wal_fpi + pg_stat_monitor | public | pg_stat_monitor | wal_records + pg_stat_statements | public | pg_stat_statements | blk_read_time + pg_stat_statements | public | pg_stat_statements | blk_write_time + pg_stat_statements | public | pg_stat_statements | calls + pg_stat_statements | public | pg_stat_statements | dbid + pg_stat_statements | public | pg_stat_statements | jit_emission_count + pg_stat_statements | public | pg_stat_statements | jit_emission_time + pg_stat_statements | public | pg_stat_statements | jit_functions + pg_stat_statements | public | pg_stat_statements | jit_generation_time + pg_stat_statements | public | pg_stat_statements | jit_inlining_count + pg_stat_statements | public | pg_stat_statements | jit_inlining_time + pg_stat_statements | public | pg_stat_statements | jit_optimization_count + pg_stat_statements | public | pg_stat_statements | jit_optimization_time + pg_stat_statements | public | pg_stat_statements | local_blks_dirtied + pg_stat_statements | public | pg_stat_statements | local_blks_hit + pg_stat_statements | public | pg_stat_statements | local_blks_read + pg_stat_statements | public | pg_stat_statements | local_blks_written + pg_stat_statements | public | pg_stat_statements | max_exec_time + pg_stat_statements | public | pg_stat_statements | max_plan_time + pg_stat_statements | public | pg_stat_statements | mean_exec_time + pg_stat_statements | public | pg_stat_statements | mean_plan_time + pg_stat_statements | public | pg_stat_statements | min_exec_time + pg_stat_statements | public | pg_stat_statements | min_plan_time + pg_stat_statements | public | pg_stat_statements | plans + pg_stat_statements | public | pg_stat_statements | query + pg_stat_statements | public | pg_stat_statements | queryid + pg_stat_statements | public | pg_stat_statements | rows + pg_stat_statements | public | pg_stat_statements | shared_blks_dirtied + pg_stat_statements | public | pg_stat_statements | shared_blks_hit + pg_stat_statements | public | pg_stat_statements | shared_blks_read + pg_stat_statements | public | pg_stat_statements | shared_blks_written + pg_stat_statements | public | pg_stat_statements | stddev_exec_time + pg_stat_statements | public | pg_stat_statements | stddev_plan_time + pg_stat_statements | public | pg_stat_statements | temp_blk_read_time + pg_stat_statements | public | pg_stat_statements | temp_blk_write_time + pg_stat_statements | public | pg_stat_statements | temp_blks_read + pg_stat_statements | public | pg_stat_statements | temp_blks_written + pg_stat_statements | public | pg_stat_statements | toplevel + pg_stat_statements | public | pg_stat_statements | total_exec_time + pg_stat_statements | public | pg_stat_statements | total_plan_time + pg_stat_statements | public | pg_stat_statements | userid + pg_stat_statements | public | pg_stat_statements | wal_bytes + pg_stat_statements | public | pg_stat_statements | wal_fpi + pg_stat_statements | public | pg_stat_statements | wal_records + pg_stat_statements | public | pg_stat_statements_info | dealloc + pg_stat_statements | public | pg_stat_statements_info | stats_reset + pg_tle | pgtle | feature_info | feature + pg_tle | pgtle | feature_info | obj_identity + pg_tle | pgtle | feature_info | proname + pg_tle | pgtle | feature_info | schema_name + pgmq | pgmq | meta | created_at + pgmq | pgmq | meta | is_partitioned + pgmq | pgmq | meta | is_unlogged + pgmq | pgmq | meta | queue_name + pgsodium | pgsodium | decrypted_key | associated_data + pgsodium | pgsodium | decrypted_key | comment + pgsodium | pgsodium | decrypted_key | created + pgsodium | pgsodium | decrypted_key | decrypted_raw_key + pgsodium | pgsodium | decrypted_key | expires + pgsodium | pgsodium | decrypted_key | id + pgsodium | pgsodium | decrypted_key | key_context + pgsodium | pgsodium | decrypted_key | key_id + pgsodium | pgsodium | decrypted_key | key_type + pgsodium | pgsodium | decrypted_key | name + pgsodium | pgsodium | decrypted_key | parent_key + pgsodium | pgsodium | decrypted_key | raw_key + pgsodium | pgsodium | decrypted_key | raw_key_nonce + pgsodium | pgsodium | decrypted_key | status + pgsodium | pgsodium | key | associated_data + pgsodium | pgsodium | key | comment + pgsodium | pgsodium | key | created + pgsodium | pgsodium | key | expires + pgsodium | pgsodium | key | id + pgsodium | pgsodium | key | key_context + pgsodium | pgsodium | key | key_id + pgsodium | pgsodium | key | key_type + pgsodium | pgsodium | key | name + pgsodium | pgsodium | key | parent_key + pgsodium | pgsodium | key | raw_key + pgsodium | pgsodium | key | raw_key_nonce + pgsodium | pgsodium | key | status + pgsodium | pgsodium | key | user_data + pgsodium | pgsodium | mask_columns | associated_columns + pgsodium | pgsodium | mask_columns | attname + pgsodium | pgsodium | mask_columns | attrelid + pgsodium | pgsodium | mask_columns | format_type + pgsodium | pgsodium | mask_columns | key_id + pgsodium | pgsodium | mask_columns | key_id_column + pgsodium | pgsodium | mask_columns | nonce_column + pgsodium | pgsodium | masking_rule | associated_columns + pgsodium | pgsodium | masking_rule | attname + pgsodium | pgsodium | masking_rule | attnum + pgsodium | pgsodium | masking_rule | attrelid + pgsodium | pgsodium | masking_rule | col_description + pgsodium | pgsodium | masking_rule | format_type + pgsodium | pgsodium | masking_rule | key_id + pgsodium | pgsodium | masking_rule | key_id_column + pgsodium | pgsodium | masking_rule | nonce_column + pgsodium | pgsodium | masking_rule | priority + pgsodium | pgsodium | masking_rule | relname + pgsodium | pgsodium | masking_rule | relnamespace + pgsodium | pgsodium | masking_rule | security_invoker + pgsodium | pgsodium | masking_rule | view_name + pgsodium | pgsodium | valid_key | associated_data + pgsodium | pgsodium | valid_key | created + pgsodium | pgsodium | valid_key | expires + pgsodium | pgsodium | valid_key | id + pgsodium | pgsodium | valid_key | key_context + pgsodium | pgsodium | valid_key | key_id + pgsodium | pgsodium | valid_key | key_type + pgsodium | pgsodium | valid_key | name + pgsodium | pgsodium | valid_key | status + pgtap | public | pg_all_foreign_keys | fk_columns + pgtap | public | pg_all_foreign_keys | fk_constraint_name + pgtap | public | pg_all_foreign_keys | fk_schema_name + pgtap | public | pg_all_foreign_keys | fk_table_name + pgtap | public | pg_all_foreign_keys | fk_table_oid + pgtap | public | pg_all_foreign_keys | is_deferrable + pgtap | public | pg_all_foreign_keys | is_deferred + pgtap | public | pg_all_foreign_keys | match_type + pgtap | public | pg_all_foreign_keys | on_delete + pgtap | public | pg_all_foreign_keys | on_update + pgtap | public | pg_all_foreign_keys | pk_columns + pgtap | public | pg_all_foreign_keys | pk_constraint_name + pgtap | public | pg_all_foreign_keys | pk_index_name + pgtap | public | pg_all_foreign_keys | pk_schema_name + pgtap | public | pg_all_foreign_keys | pk_table_name + pgtap | public | pg_all_foreign_keys | pk_table_oid + pgtap | public | tap_funky | args + pgtap | public | tap_funky | is_definer + pgtap | public | tap_funky | is_strict + pgtap | public | tap_funky | is_visible + pgtap | public | tap_funky | kind + pgtap | public | tap_funky | langoid + pgtap | public | tap_funky | name + pgtap | public | tap_funky | oid + pgtap | public | tap_funky | owner + pgtap | public | tap_funky | returns + pgtap | public | tap_funky | returns_set + pgtap | public | tap_funky | schema + pgtap | public | tap_funky | volatility + postgis | public | geography_columns | coord_dimension + postgis | public | geography_columns | f_geography_column + postgis | public | geography_columns | f_table_catalog + postgis | public | geography_columns | f_table_name + postgis | public | geography_columns | f_table_schema + postgis | public | geography_columns | srid + postgis | public | geography_columns | type + postgis | public | geometry_columns | coord_dimension + postgis | public | geometry_columns | f_geometry_column + postgis | public | geometry_columns | f_table_catalog + postgis | public | geometry_columns | f_table_name + postgis | public | geometry_columns | f_table_schema + postgis | public | geometry_columns | srid + postgis | public | geometry_columns | type + postgis | public | spatial_ref_sys | auth_name + postgis | public | spatial_ref_sys | auth_srid + postgis | public | spatial_ref_sys | proj4text + postgis | public | spatial_ref_sys | srid + postgis | public | spatial_ref_sys | srtext + postgis_raster | public | raster_columns | blocksize_x + postgis_raster | public | raster_columns | blocksize_y + postgis_raster | public | raster_columns | extent + postgis_raster | public | raster_columns | nodata_values + postgis_raster | public | raster_columns | num_bands + postgis_raster | public | raster_columns | out_db + postgis_raster | public | raster_columns | pixel_types + postgis_raster | public | raster_columns | r_raster_column + postgis_raster | public | raster_columns | r_table_catalog + postgis_raster | public | raster_columns | r_table_name + postgis_raster | public | raster_columns | r_table_schema + postgis_raster | public | raster_columns | regular_blocking + postgis_raster | public | raster_columns | same_alignment + postgis_raster | public | raster_columns | scale_x + postgis_raster | public | raster_columns | scale_y + postgis_raster | public | raster_columns | spatial_index + postgis_raster | public | raster_columns | srid + postgis_raster | public | raster_overviews | o_raster_column + postgis_raster | public | raster_overviews | o_table_catalog + postgis_raster | public | raster_overviews | o_table_name + postgis_raster | public | raster_overviews | o_table_schema + postgis_raster | public | raster_overviews | overview_factor + postgis_raster | public | raster_overviews | r_raster_column + postgis_raster | public | raster_overviews | r_table_catalog + postgis_raster | public | raster_overviews | r_table_name + postgis_raster | public | raster_overviews | r_table_schema + postgis_tiger_geocoder | tiger | addr | arid + postgis_tiger_geocoder | tiger | addr | fromarmid + postgis_tiger_geocoder | tiger | addr | fromhn + postgis_tiger_geocoder | tiger | addr | fromtyp + postgis_tiger_geocoder | tiger | addr | gid + postgis_tiger_geocoder | tiger | addr | mtfcc + postgis_tiger_geocoder | tiger | addr | plus4 + postgis_tiger_geocoder | tiger | addr | side + postgis_tiger_geocoder | tiger | addr | statefp + postgis_tiger_geocoder | tiger | addr | tlid + postgis_tiger_geocoder | tiger | addr | toarmid + postgis_tiger_geocoder | tiger | addr | tohn + postgis_tiger_geocoder | tiger | addr | totyp + postgis_tiger_geocoder | tiger | addr | zip + postgis_tiger_geocoder | tiger | addrfeat | aridl + postgis_tiger_geocoder | tiger | addrfeat | aridr + postgis_tiger_geocoder | tiger | addrfeat | edge_mtfcc + postgis_tiger_geocoder | tiger | addrfeat | fullname + postgis_tiger_geocoder | tiger | addrfeat | gid + postgis_tiger_geocoder | tiger | addrfeat | lfromhn + postgis_tiger_geocoder | tiger | addrfeat | lfromtyp + postgis_tiger_geocoder | tiger | addrfeat | linearid + postgis_tiger_geocoder | tiger | addrfeat | ltohn + postgis_tiger_geocoder | tiger | addrfeat | ltotyp + postgis_tiger_geocoder | tiger | addrfeat | offsetl + postgis_tiger_geocoder | tiger | addrfeat | offsetr + postgis_tiger_geocoder | tiger | addrfeat | parityl + postgis_tiger_geocoder | tiger | addrfeat | parityr + postgis_tiger_geocoder | tiger | addrfeat | plus4l + postgis_tiger_geocoder | tiger | addrfeat | plus4r + postgis_tiger_geocoder | tiger | addrfeat | rfromhn + postgis_tiger_geocoder | tiger | addrfeat | rfromtyp + postgis_tiger_geocoder | tiger | addrfeat | rtohn + postgis_tiger_geocoder | tiger | addrfeat | rtotyp + postgis_tiger_geocoder | tiger | addrfeat | statefp + postgis_tiger_geocoder | tiger | addrfeat | the_geom + postgis_tiger_geocoder | tiger | addrfeat | tlid + postgis_tiger_geocoder | tiger | addrfeat | zipl + postgis_tiger_geocoder | tiger | addrfeat | zipr + postgis_tiger_geocoder | tiger | bg | aland + postgis_tiger_geocoder | tiger | bg | awater + postgis_tiger_geocoder | tiger | bg | bg_id + postgis_tiger_geocoder | tiger | bg | blkgrpce + postgis_tiger_geocoder | tiger | bg | countyfp + postgis_tiger_geocoder | tiger | bg | funcstat + postgis_tiger_geocoder | tiger | bg | gid + postgis_tiger_geocoder | tiger | bg | intptlat + postgis_tiger_geocoder | tiger | bg | intptlon + postgis_tiger_geocoder | tiger | bg | mtfcc + postgis_tiger_geocoder | tiger | bg | namelsad + postgis_tiger_geocoder | tiger | bg | statefp + postgis_tiger_geocoder | tiger | bg | the_geom + postgis_tiger_geocoder | tiger | bg | tractce + postgis_tiger_geocoder | tiger | county | aland + postgis_tiger_geocoder | tiger | county | awater + postgis_tiger_geocoder | tiger | county | cbsafp + postgis_tiger_geocoder | tiger | county | classfp + postgis_tiger_geocoder | tiger | county | cntyidfp + postgis_tiger_geocoder | tiger | county | countyfp + postgis_tiger_geocoder | tiger | county | countyns + postgis_tiger_geocoder | tiger | county | csafp + postgis_tiger_geocoder | tiger | county | funcstat + postgis_tiger_geocoder | tiger | county | gid + postgis_tiger_geocoder | tiger | county | intptlat + postgis_tiger_geocoder | tiger | county | intptlon + postgis_tiger_geocoder | tiger | county | lsad + postgis_tiger_geocoder | tiger | county | metdivfp + postgis_tiger_geocoder | tiger | county | mtfcc + postgis_tiger_geocoder | tiger | county | name + postgis_tiger_geocoder | tiger | county | namelsad + postgis_tiger_geocoder | tiger | county | statefp + postgis_tiger_geocoder | tiger | county | the_geom + postgis_tiger_geocoder | tiger | county_lookup | co_code + postgis_tiger_geocoder | tiger | county_lookup | name + postgis_tiger_geocoder | tiger | county_lookup | st_code + postgis_tiger_geocoder | tiger | county_lookup | state + postgis_tiger_geocoder | tiger | countysub_lookup | co_code + postgis_tiger_geocoder | tiger | countysub_lookup | county + postgis_tiger_geocoder | tiger | countysub_lookup | cs_code + postgis_tiger_geocoder | tiger | countysub_lookup | name + postgis_tiger_geocoder | tiger | countysub_lookup | st_code + postgis_tiger_geocoder | tiger | countysub_lookup | state + postgis_tiger_geocoder | tiger | cousub | aland + postgis_tiger_geocoder | tiger | cousub | awater + postgis_tiger_geocoder | tiger | cousub | classfp + postgis_tiger_geocoder | tiger | cousub | cnectafp + postgis_tiger_geocoder | tiger | cousub | cosbidfp + postgis_tiger_geocoder | tiger | cousub | countyfp + postgis_tiger_geocoder | tiger | cousub | cousubfp + postgis_tiger_geocoder | tiger | cousub | cousubns + postgis_tiger_geocoder | tiger | cousub | funcstat + postgis_tiger_geocoder | tiger | cousub | gid + postgis_tiger_geocoder | tiger | cousub | intptlat + postgis_tiger_geocoder | tiger | cousub | intptlon + postgis_tiger_geocoder | tiger | cousub | lsad + postgis_tiger_geocoder | tiger | cousub | mtfcc + postgis_tiger_geocoder | tiger | cousub | name + postgis_tiger_geocoder | tiger | cousub | namelsad + postgis_tiger_geocoder | tiger | cousub | nctadvfp + postgis_tiger_geocoder | tiger | cousub | nectafp + postgis_tiger_geocoder | tiger | cousub | statefp + postgis_tiger_geocoder | tiger | cousub | the_geom + postgis_tiger_geocoder | tiger | direction_lookup | abbrev + postgis_tiger_geocoder | tiger | direction_lookup | name + postgis_tiger_geocoder | tiger | edges | artpath + postgis_tiger_geocoder | tiger | edges | countyfp + postgis_tiger_geocoder | tiger | edges | deckedroad + postgis_tiger_geocoder | tiger | edges | divroad + postgis_tiger_geocoder | tiger | edges | exttyp + postgis_tiger_geocoder | tiger | edges | featcat + postgis_tiger_geocoder | tiger | edges | fullname + postgis_tiger_geocoder | tiger | edges | gcseflg + postgis_tiger_geocoder | tiger | edges | gid + postgis_tiger_geocoder | tiger | edges | hydroflg + postgis_tiger_geocoder | tiger | edges | lfromadd + postgis_tiger_geocoder | tiger | edges | ltoadd + postgis_tiger_geocoder | tiger | edges | mtfcc + postgis_tiger_geocoder | tiger | edges | offsetl + postgis_tiger_geocoder | tiger | edges | offsetr + postgis_tiger_geocoder | tiger | edges | olfflg + postgis_tiger_geocoder | tiger | edges | passflg + postgis_tiger_geocoder | tiger | edges | persist + postgis_tiger_geocoder | tiger | edges | railflg + postgis_tiger_geocoder | tiger | edges | rfromadd + postgis_tiger_geocoder | tiger | edges | roadflg + postgis_tiger_geocoder | tiger | edges | rtoadd + postgis_tiger_geocoder | tiger | edges | smid + postgis_tiger_geocoder | tiger | edges | statefp + postgis_tiger_geocoder | tiger | edges | tfidl + postgis_tiger_geocoder | tiger | edges | tfidr + postgis_tiger_geocoder | tiger | edges | the_geom + postgis_tiger_geocoder | tiger | edges | tlid + postgis_tiger_geocoder | tiger | edges | tnidf + postgis_tiger_geocoder | tiger | edges | tnidt + postgis_tiger_geocoder | tiger | edges | ttyp + postgis_tiger_geocoder | tiger | edges | zipl + postgis_tiger_geocoder | tiger | edges | zipr + postgis_tiger_geocoder | tiger | faces | aiannhce + postgis_tiger_geocoder | tiger | faces | aiannhce00 + postgis_tiger_geocoder | tiger | faces | aiannhfp + postgis_tiger_geocoder | tiger | faces | aiannhfp00 + postgis_tiger_geocoder | tiger | faces | anrcfp + postgis_tiger_geocoder | tiger | faces | anrcfp00 + postgis_tiger_geocoder | tiger | faces | atotal + postgis_tiger_geocoder | tiger | faces | blkgrpce + postgis_tiger_geocoder | tiger | faces | blkgrpce00 + postgis_tiger_geocoder | tiger | faces | blkgrpce20 + postgis_tiger_geocoder | tiger | faces | blockce + postgis_tiger_geocoder | tiger | faces | blockce00 + postgis_tiger_geocoder | tiger | faces | blockce20 + postgis_tiger_geocoder | tiger | faces | cbsafp + postgis_tiger_geocoder | tiger | faces | cd108fp + postgis_tiger_geocoder | tiger | faces | cd111fp + postgis_tiger_geocoder | tiger | faces | cnectafp + postgis_tiger_geocoder | tiger | faces | comptyp + postgis_tiger_geocoder | tiger | faces | comptyp00 + postgis_tiger_geocoder | tiger | faces | conctyfp + postgis_tiger_geocoder | tiger | faces | conctyfp00 + postgis_tiger_geocoder | tiger | faces | countyfp + postgis_tiger_geocoder | tiger | faces | countyfp00 + postgis_tiger_geocoder | tiger | faces | countyfp20 + postgis_tiger_geocoder | tiger | faces | cousubfp + postgis_tiger_geocoder | tiger | faces | cousubfp00 + postgis_tiger_geocoder | tiger | faces | csafp + postgis_tiger_geocoder | tiger | faces | elsdlea + postgis_tiger_geocoder | tiger | faces | elsdlea00 + postgis_tiger_geocoder | tiger | faces | gid + postgis_tiger_geocoder | tiger | faces | intptlat + postgis_tiger_geocoder | tiger | faces | intptlon + postgis_tiger_geocoder | tiger | faces | lwflag + postgis_tiger_geocoder | tiger | faces | metdivfp + postgis_tiger_geocoder | tiger | faces | nctadvfp + postgis_tiger_geocoder | tiger | faces | nectafp + postgis_tiger_geocoder | tiger | faces | offset + postgis_tiger_geocoder | tiger | faces | placefp + postgis_tiger_geocoder | tiger | faces | placefp00 + postgis_tiger_geocoder | tiger | faces | puma5ce + postgis_tiger_geocoder | tiger | faces | puma5ce00 + postgis_tiger_geocoder | tiger | faces | scsdlea + postgis_tiger_geocoder | tiger | faces | scsdlea00 + postgis_tiger_geocoder | tiger | faces | sldlst + postgis_tiger_geocoder | tiger | faces | sldlst00 + postgis_tiger_geocoder | tiger | faces | sldust + postgis_tiger_geocoder | tiger | faces | sldust00 + postgis_tiger_geocoder | tiger | faces | statefp + postgis_tiger_geocoder | tiger | faces | statefp00 + postgis_tiger_geocoder | tiger | faces | statefp20 + postgis_tiger_geocoder | tiger | faces | submcdfp + postgis_tiger_geocoder | tiger | faces | submcdfp00 + postgis_tiger_geocoder | tiger | faces | tazce + postgis_tiger_geocoder | tiger | faces | tazce00 + postgis_tiger_geocoder | tiger | faces | tblkgpce + postgis_tiger_geocoder | tiger | faces | tfid + postgis_tiger_geocoder | tiger | faces | the_geom + postgis_tiger_geocoder | tiger | faces | tractce + postgis_tiger_geocoder | tiger | faces | tractce00 + postgis_tiger_geocoder | tiger | faces | tractce20 + postgis_tiger_geocoder | tiger | faces | trsubce + postgis_tiger_geocoder | tiger | faces | trsubce00 + postgis_tiger_geocoder | tiger | faces | trsubfp + postgis_tiger_geocoder | tiger | faces | trsubfp00 + postgis_tiger_geocoder | tiger | faces | ttractce + postgis_tiger_geocoder | tiger | faces | uace + postgis_tiger_geocoder | tiger | faces | uace00 + postgis_tiger_geocoder | tiger | faces | ugace + postgis_tiger_geocoder | tiger | faces | ugace00 + postgis_tiger_geocoder | tiger | faces | unsdlea + postgis_tiger_geocoder | tiger | faces | unsdlea00 + postgis_tiger_geocoder | tiger | faces | vtdst + postgis_tiger_geocoder | tiger | faces | vtdst00 + postgis_tiger_geocoder | tiger | faces | zcta5ce + postgis_tiger_geocoder | tiger | faces | zcta5ce00 + postgis_tiger_geocoder | tiger | featnames | fullname + postgis_tiger_geocoder | tiger | featnames | gid + postgis_tiger_geocoder | tiger | featnames | linearid + postgis_tiger_geocoder | tiger | featnames | mtfcc + postgis_tiger_geocoder | tiger | featnames | name + postgis_tiger_geocoder | tiger | featnames | paflag + postgis_tiger_geocoder | tiger | featnames | predir + postgis_tiger_geocoder | tiger | featnames | predirabrv + postgis_tiger_geocoder | tiger | featnames | prequal + postgis_tiger_geocoder | tiger | featnames | prequalabr + postgis_tiger_geocoder | tiger | featnames | pretyp + postgis_tiger_geocoder | tiger | featnames | pretypabrv + postgis_tiger_geocoder | tiger | featnames | statefp + postgis_tiger_geocoder | tiger | featnames | sufdir + postgis_tiger_geocoder | tiger | featnames | sufdirabrv + postgis_tiger_geocoder | tiger | featnames | sufqual + postgis_tiger_geocoder | tiger | featnames | sufqualabr + postgis_tiger_geocoder | tiger | featnames | suftyp + postgis_tiger_geocoder | tiger | featnames | suftypabrv + postgis_tiger_geocoder | tiger | featnames | tlid + postgis_tiger_geocoder | tiger | geocode_settings | category + postgis_tiger_geocoder | tiger | geocode_settings | name + postgis_tiger_geocoder | tiger | geocode_settings | setting + postgis_tiger_geocoder | tiger | geocode_settings | short_desc + postgis_tiger_geocoder | tiger | geocode_settings | unit + postgis_tiger_geocoder | tiger | geocode_settings_default | category + postgis_tiger_geocoder | tiger | geocode_settings_default | name + postgis_tiger_geocoder | tiger | geocode_settings_default | setting + postgis_tiger_geocoder | tiger | geocode_settings_default | short_desc + postgis_tiger_geocoder | tiger | geocode_settings_default | unit + postgis_tiger_geocoder | tiger | loader_lookuptables | columns_exclude + postgis_tiger_geocoder | tiger | loader_lookuptables | insert_mode + postgis_tiger_geocoder | tiger | loader_lookuptables | level_county + postgis_tiger_geocoder | tiger | loader_lookuptables | level_nation + postgis_tiger_geocoder | tiger | loader_lookuptables | level_state + postgis_tiger_geocoder | tiger | loader_lookuptables | load + postgis_tiger_geocoder | tiger | loader_lookuptables | lookup_name + postgis_tiger_geocoder | tiger | loader_lookuptables | post_load_process + postgis_tiger_geocoder | tiger | loader_lookuptables | pre_load_process + postgis_tiger_geocoder | tiger | loader_lookuptables | process_order + postgis_tiger_geocoder | tiger | loader_lookuptables | single_geom_mode + postgis_tiger_geocoder | tiger | loader_lookuptables | single_mode + postgis_tiger_geocoder | tiger | loader_lookuptables | table_name + postgis_tiger_geocoder | tiger | loader_lookuptables | website_root_override + postgis_tiger_geocoder | tiger | loader_platform | county_process_command + postgis_tiger_geocoder | tiger | loader_platform | declare_sect + postgis_tiger_geocoder | tiger | loader_platform | environ_set_command + postgis_tiger_geocoder | tiger | loader_platform | loader + postgis_tiger_geocoder | tiger | loader_platform | os + postgis_tiger_geocoder | tiger | loader_platform | path_sep + postgis_tiger_geocoder | tiger | loader_platform | pgbin + postgis_tiger_geocoder | tiger | loader_platform | psql + postgis_tiger_geocoder | tiger | loader_platform | unzip_command + postgis_tiger_geocoder | tiger | loader_platform | wget + postgis_tiger_geocoder | tiger | loader_variables | data_schema + postgis_tiger_geocoder | tiger | loader_variables | staging_fold + postgis_tiger_geocoder | tiger | loader_variables | staging_schema + postgis_tiger_geocoder | tiger | loader_variables | tiger_year + postgis_tiger_geocoder | tiger | loader_variables | website_root + postgis_tiger_geocoder | tiger | pagc_gaz | id + postgis_tiger_geocoder | tiger | pagc_gaz | is_custom + postgis_tiger_geocoder | tiger | pagc_gaz | seq + postgis_tiger_geocoder | tiger | pagc_gaz | stdword + postgis_tiger_geocoder | tiger | pagc_gaz | token + postgis_tiger_geocoder | tiger | pagc_gaz | word + postgis_tiger_geocoder | tiger | pagc_lex | id + postgis_tiger_geocoder | tiger | pagc_lex | is_custom + postgis_tiger_geocoder | tiger | pagc_lex | seq + postgis_tiger_geocoder | tiger | pagc_lex | stdword + postgis_tiger_geocoder | tiger | pagc_lex | token + postgis_tiger_geocoder | tiger | pagc_lex | word + postgis_tiger_geocoder | tiger | pagc_rules | id + postgis_tiger_geocoder | tiger | pagc_rules | is_custom + postgis_tiger_geocoder | tiger | pagc_rules | rule + postgis_tiger_geocoder | tiger | place | aland + postgis_tiger_geocoder | tiger | place | awater + postgis_tiger_geocoder | tiger | place | classfp + postgis_tiger_geocoder | tiger | place | cpi + postgis_tiger_geocoder | tiger | place | funcstat + postgis_tiger_geocoder | tiger | place | gid + postgis_tiger_geocoder | tiger | place | intptlat + postgis_tiger_geocoder | tiger | place | intptlon + postgis_tiger_geocoder | tiger | place | lsad + postgis_tiger_geocoder | tiger | place | mtfcc + postgis_tiger_geocoder | tiger | place | name + postgis_tiger_geocoder | tiger | place | namelsad + postgis_tiger_geocoder | tiger | place | pcicbsa + postgis_tiger_geocoder | tiger | place | pcinecta + postgis_tiger_geocoder | tiger | place | placefp + postgis_tiger_geocoder | tiger | place | placens + postgis_tiger_geocoder | tiger | place | plcidfp + postgis_tiger_geocoder | tiger | place | statefp + postgis_tiger_geocoder | tiger | place | the_geom + postgis_tiger_geocoder | tiger | place_lookup | name + postgis_tiger_geocoder | tiger | place_lookup | pl_code + postgis_tiger_geocoder | tiger | place_lookup | st_code + postgis_tiger_geocoder | tiger | place_lookup | state + postgis_tiger_geocoder | tiger | secondary_unit_lookup | abbrev + postgis_tiger_geocoder | tiger | secondary_unit_lookup | name + postgis_tiger_geocoder | tiger | state | aland + postgis_tiger_geocoder | tiger | state | awater + postgis_tiger_geocoder | tiger | state | division + postgis_tiger_geocoder | tiger | state | funcstat + postgis_tiger_geocoder | tiger | state | gid + postgis_tiger_geocoder | tiger | state | intptlat + postgis_tiger_geocoder | tiger | state | intptlon + postgis_tiger_geocoder | tiger | state | lsad + postgis_tiger_geocoder | tiger | state | mtfcc + postgis_tiger_geocoder | tiger | state | name + postgis_tiger_geocoder | tiger | state | region + postgis_tiger_geocoder | tiger | state | statefp + postgis_tiger_geocoder | tiger | state | statens + postgis_tiger_geocoder | tiger | state | stusps + postgis_tiger_geocoder | tiger | state | the_geom + postgis_tiger_geocoder | tiger | state_lookup | abbrev + postgis_tiger_geocoder | tiger | state_lookup | name + postgis_tiger_geocoder | tiger | state_lookup | st_code + postgis_tiger_geocoder | tiger | state_lookup | statefp + postgis_tiger_geocoder | tiger | street_type_lookup | abbrev + postgis_tiger_geocoder | tiger | street_type_lookup | is_hw + postgis_tiger_geocoder | tiger | street_type_lookup | name + postgis_tiger_geocoder | tiger | tabblock | aland + postgis_tiger_geocoder | tiger | tabblock | awater + postgis_tiger_geocoder | tiger | tabblock | blockce + postgis_tiger_geocoder | tiger | tabblock | countyfp + postgis_tiger_geocoder | tiger | tabblock | funcstat + postgis_tiger_geocoder | tiger | tabblock | gid + postgis_tiger_geocoder | tiger | tabblock | intptlat + postgis_tiger_geocoder | tiger | tabblock | intptlon + postgis_tiger_geocoder | tiger | tabblock | mtfcc + postgis_tiger_geocoder | tiger | tabblock | name + postgis_tiger_geocoder | tiger | tabblock | statefp + postgis_tiger_geocoder | tiger | tabblock | tabblock_id + postgis_tiger_geocoder | tiger | tabblock | the_geom + postgis_tiger_geocoder | tiger | tabblock | tractce + postgis_tiger_geocoder | tiger | tabblock | uace + postgis_tiger_geocoder | tiger | tabblock | ur + postgis_tiger_geocoder | tiger | tabblock20 | aland + postgis_tiger_geocoder | tiger | tabblock20 | awater + postgis_tiger_geocoder | tiger | tabblock20 | blockce + postgis_tiger_geocoder | tiger | tabblock20 | countyfp + postgis_tiger_geocoder | tiger | tabblock20 | funcstat + postgis_tiger_geocoder | tiger | tabblock20 | geoid + postgis_tiger_geocoder | tiger | tabblock20 | housing + postgis_tiger_geocoder | tiger | tabblock20 | intptlat + postgis_tiger_geocoder | tiger | tabblock20 | intptlon + postgis_tiger_geocoder | tiger | tabblock20 | mtfcc + postgis_tiger_geocoder | tiger | tabblock20 | name + postgis_tiger_geocoder | tiger | tabblock20 | pop + postgis_tiger_geocoder | tiger | tabblock20 | statefp + postgis_tiger_geocoder | tiger | tabblock20 | the_geom + postgis_tiger_geocoder | tiger | tabblock20 | tractce + postgis_tiger_geocoder | tiger | tabblock20 | uace + postgis_tiger_geocoder | tiger | tabblock20 | uatype + postgis_tiger_geocoder | tiger | tabblock20 | ur + postgis_tiger_geocoder | tiger | tract | aland + postgis_tiger_geocoder | tiger | tract | awater + postgis_tiger_geocoder | tiger | tract | countyfp + postgis_tiger_geocoder | tiger | tract | funcstat + postgis_tiger_geocoder | tiger | tract | gid + postgis_tiger_geocoder | tiger | tract | intptlat + postgis_tiger_geocoder | tiger | tract | intptlon + postgis_tiger_geocoder | tiger | tract | mtfcc + postgis_tiger_geocoder | tiger | tract | name + postgis_tiger_geocoder | tiger | tract | namelsad + postgis_tiger_geocoder | tiger | tract | statefp + postgis_tiger_geocoder | tiger | tract | the_geom + postgis_tiger_geocoder | tiger | tract | tract_id + postgis_tiger_geocoder | tiger | tract | tractce + postgis_tiger_geocoder | tiger | zcta5 | aland + postgis_tiger_geocoder | tiger | zcta5 | awater + postgis_tiger_geocoder | tiger | zcta5 | classfp + postgis_tiger_geocoder | tiger | zcta5 | funcstat + postgis_tiger_geocoder | tiger | zcta5 | gid + postgis_tiger_geocoder | tiger | zcta5 | intptlat + postgis_tiger_geocoder | tiger | zcta5 | intptlon + postgis_tiger_geocoder | tiger | zcta5 | mtfcc + postgis_tiger_geocoder | tiger | zcta5 | partflg + postgis_tiger_geocoder | tiger | zcta5 | statefp + postgis_tiger_geocoder | tiger | zcta5 | the_geom + postgis_tiger_geocoder | tiger | zcta5 | zcta5ce + postgis_tiger_geocoder | tiger | zip_lookup | cnt + postgis_tiger_geocoder | tiger | zip_lookup | co_code + postgis_tiger_geocoder | tiger | zip_lookup | county + postgis_tiger_geocoder | tiger | zip_lookup | cousub + postgis_tiger_geocoder | tiger | zip_lookup | cs_code + postgis_tiger_geocoder | tiger | zip_lookup | pl_code + postgis_tiger_geocoder | tiger | zip_lookup | place + postgis_tiger_geocoder | tiger | zip_lookup | st_code + postgis_tiger_geocoder | tiger | zip_lookup | state + postgis_tiger_geocoder | tiger | zip_lookup | zip + postgis_tiger_geocoder | tiger | zip_lookup_all | cnt + postgis_tiger_geocoder | tiger | zip_lookup_all | co_code + postgis_tiger_geocoder | tiger | zip_lookup_all | county + postgis_tiger_geocoder | tiger | zip_lookup_all | cousub + postgis_tiger_geocoder | tiger | zip_lookup_all | cs_code + postgis_tiger_geocoder | tiger | zip_lookup_all | pl_code + postgis_tiger_geocoder | tiger | zip_lookup_all | place + postgis_tiger_geocoder | tiger | zip_lookup_all | st_code + postgis_tiger_geocoder | tiger | zip_lookup_all | state + postgis_tiger_geocoder | tiger | zip_lookup_all | zip + postgis_tiger_geocoder | tiger | zip_lookup_base | city + postgis_tiger_geocoder | tiger | zip_lookup_base | county + postgis_tiger_geocoder | tiger | zip_lookup_base | state + postgis_tiger_geocoder | tiger | zip_lookup_base | statefp + postgis_tiger_geocoder | tiger | zip_lookup_base | zip + postgis_tiger_geocoder | tiger | zip_state | statefp + postgis_tiger_geocoder | tiger | zip_state | stusps + postgis_tiger_geocoder | tiger | zip_state | zip + postgis_tiger_geocoder | tiger | zip_state_loc | place + postgis_tiger_geocoder | tiger | zip_state_loc | statefp + postgis_tiger_geocoder | tiger | zip_state_loc | stusps + postgis_tiger_geocoder | tiger | zip_state_loc | zip + postgis_topology | topology | layer | child_id + postgis_topology | topology | layer | feature_column + postgis_topology | topology | layer | feature_type + postgis_topology | topology | layer | layer_id + postgis_topology | topology | layer | level + postgis_topology | topology | layer | schema_name + postgis_topology | topology | layer | table_name + postgis_topology | topology | layer | topology_id + postgis_topology | topology | topology | hasz + postgis_topology | topology | topology | id + postgis_topology | topology | topology | name + postgis_topology | topology | topology | precision + postgis_topology | topology | topology | srid + tealbase_vault | vault | secrets | created_at + tealbase_vault | vault | secrets | description + tealbase_vault | vault | secrets | id + tealbase_vault | vault | secrets | key_id + tealbase_vault | vault | secrets | name + tealbase_vault | vault | secrets | nonce + tealbase_vault | vault | secrets | secret + tealbase_vault | vault | secrets | updated_at + timescaledb | _timescaledb_config | bgw_job | application_name + timescaledb | _timescaledb_config | bgw_job | check_name + timescaledb | _timescaledb_config | bgw_job | check_schema + timescaledb | _timescaledb_config | bgw_job | config + timescaledb | _timescaledb_config | bgw_job | fixed_schedule + timescaledb | _timescaledb_config | bgw_job | hypertable_id + timescaledb | _timescaledb_config | bgw_job | id + timescaledb | _timescaledb_config | bgw_job | initial_start + timescaledb | _timescaledb_config | bgw_job | max_retries + timescaledb | _timescaledb_config | bgw_job | max_runtime + timescaledb | _timescaledb_config | bgw_job | owner + timescaledb | _timescaledb_config | bgw_job | proc_name + timescaledb | _timescaledb_config | bgw_job | proc_schema + timescaledb | _timescaledb_config | bgw_job | retry_period + timescaledb | _timescaledb_config | bgw_job | schedule_interval + timescaledb | _timescaledb_config | bgw_job | scheduled + timescaledb | _timescaledb_config | bgw_job | timezone + timescaledb | _timescaledb_internal | bgw_job_stat | consecutive_crashes + timescaledb | _timescaledb_internal | bgw_job_stat | consecutive_failures + timescaledb | _timescaledb_internal | bgw_job_stat | flags + timescaledb | _timescaledb_internal | bgw_job_stat | job_id + timescaledb | _timescaledb_internal | bgw_job_stat | last_finish + timescaledb | _timescaledb_internal | bgw_job_stat | last_run_success + timescaledb | _timescaledb_internal | bgw_job_stat | last_start + timescaledb | _timescaledb_internal | bgw_job_stat | last_successful_finish + timescaledb | _timescaledb_internal | bgw_job_stat | next_start + timescaledb | _timescaledb_internal | bgw_job_stat | total_crashes + timescaledb | _timescaledb_internal | bgw_job_stat | total_duration + timescaledb | _timescaledb_internal | bgw_job_stat | total_duration_failures + timescaledb | _timescaledb_internal | bgw_job_stat | total_failures + timescaledb | _timescaledb_internal | bgw_job_stat | total_runs + timescaledb | _timescaledb_internal | bgw_job_stat | total_successes + timescaledb | _timescaledb_internal | bgw_job_stat_history | data + timescaledb | _timescaledb_internal | bgw_job_stat_history | execution_finish + timescaledb | _timescaledb_internal | bgw_job_stat_history | execution_start + timescaledb | _timescaledb_internal | bgw_job_stat_history | id + timescaledb | _timescaledb_internal | bgw_job_stat_history | job_id + timescaledb | _timescaledb_internal | bgw_job_stat_history | pid + timescaledb | _timescaledb_internal | bgw_job_stat_history | succeeded + timescaledb | _timescaledb_internal | bgw_policy_chunk_stats | chunk_id + timescaledb | _timescaledb_internal | bgw_policy_chunk_stats | job_id + timescaledb | _timescaledb_internal | bgw_policy_chunk_stats | last_time_job_run + timescaledb | _timescaledb_internal | bgw_policy_chunk_stats | num_times_job_run + timescaledb | _timescaledb_cache | cache_inval_bgw_job | + timescaledb | _timescaledb_cache | cache_inval_extension | + timescaledb | _timescaledb_cache | cache_inval_hypertable | + timescaledb | _timescaledb_catalog | chunk | compressed_chunk_id + timescaledb | _timescaledb_catalog | chunk | creation_time + timescaledb | _timescaledb_catalog | chunk | dropped + timescaledb | _timescaledb_catalog | chunk | hypertable_id + timescaledb | _timescaledb_catalog | chunk | id + timescaledb | _timescaledb_catalog | chunk | osm_chunk + timescaledb | _timescaledb_catalog | chunk | schema_name + timescaledb | _timescaledb_catalog | chunk | status + timescaledb | _timescaledb_catalog | chunk | table_name + timescaledb | _timescaledb_catalog | chunk_column_stats | chunk_id + timescaledb | _timescaledb_catalog | chunk_column_stats | column_name + timescaledb | _timescaledb_catalog | chunk_column_stats | hypertable_id + timescaledb | _timescaledb_catalog | chunk_column_stats | id + timescaledb | _timescaledb_catalog | chunk_column_stats | range_end + timescaledb | _timescaledb_catalog | chunk_column_stats | range_start + timescaledb | _timescaledb_catalog | chunk_column_stats | valid + timescaledb | timescaledb_information | chunk_compression_settings | chunk + timescaledb | timescaledb_information | chunk_compression_settings | hypertable + timescaledb | timescaledb_information | chunk_compression_settings | orderby + timescaledb | timescaledb_information | chunk_compression_settings | segmentby + timescaledb | _timescaledb_catalog | chunk_constraint | chunk_id + timescaledb | _timescaledb_catalog | chunk_constraint | constraint_name + timescaledb | _timescaledb_catalog | chunk_constraint | dimension_slice_id + timescaledb | _timescaledb_catalog | chunk_constraint | hypertable_constraint_name + timescaledb | _timescaledb_catalog | chunk_index | chunk_id + timescaledb | _timescaledb_catalog | chunk_index | hypertable_id + timescaledb | _timescaledb_catalog | chunk_index | hypertable_index_name + timescaledb | _timescaledb_catalog | chunk_index | index_name + timescaledb | timescaledb_information | chunks | chunk_creation_time + timescaledb | timescaledb_information | chunks | chunk_name + timescaledb | timescaledb_information | chunks | chunk_schema + timescaledb | timescaledb_information | chunks | chunk_tablespace + timescaledb | timescaledb_information | chunks | hypertable_name + timescaledb | timescaledb_information | chunks | hypertable_schema + timescaledb | timescaledb_information | chunks | is_compressed + timescaledb | timescaledb_information | chunks | primary_dimension + timescaledb | timescaledb_information | chunks | primary_dimension_type + timescaledb | timescaledb_information | chunks | range_end + timescaledb | timescaledb_information | chunks | range_end_integer + timescaledb | timescaledb_information | chunks | range_start + timescaledb | timescaledb_information | chunks | range_start_integer + timescaledb | _timescaledb_internal | compressed_chunk_stats | chunk_name + timescaledb | _timescaledb_internal | compressed_chunk_stats | chunk_schema + timescaledb | _timescaledb_internal | compressed_chunk_stats | compressed_heap_size + timescaledb | _timescaledb_internal | compressed_chunk_stats | compressed_index_size + timescaledb | _timescaledb_internal | compressed_chunk_stats | compressed_toast_size + timescaledb | _timescaledb_internal | compressed_chunk_stats | compressed_total_size + timescaledb | _timescaledb_internal | compressed_chunk_stats | compression_status + timescaledb | _timescaledb_internal | compressed_chunk_stats | hypertable_name + timescaledb | _timescaledb_internal | compressed_chunk_stats | hypertable_schema + timescaledb | _timescaledb_internal | compressed_chunk_stats | uncompressed_heap_size + timescaledb | _timescaledb_internal | compressed_chunk_stats | uncompressed_index_size + timescaledb | _timescaledb_internal | compressed_chunk_stats | uncompressed_toast_size + timescaledb | _timescaledb_internal | compressed_chunk_stats | uncompressed_total_size + timescaledb | _timescaledb_catalog | compression_algorithm | description + timescaledb | _timescaledb_catalog | compression_algorithm | id + timescaledb | _timescaledb_catalog | compression_algorithm | name + timescaledb | _timescaledb_catalog | compression_algorithm | version + timescaledb | _timescaledb_catalog | compression_chunk_size | chunk_id + timescaledb | _timescaledb_catalog | compression_chunk_size | compressed_chunk_id + timescaledb | _timescaledb_catalog | compression_chunk_size | compressed_heap_size + timescaledb | _timescaledb_catalog | compression_chunk_size | compressed_index_size + timescaledb | _timescaledb_catalog | compression_chunk_size | compressed_toast_size + timescaledb | _timescaledb_catalog | compression_chunk_size | numrows_frozen_immediately + timescaledb | _timescaledb_catalog | compression_chunk_size | numrows_post_compression + timescaledb | _timescaledb_catalog | compression_chunk_size | numrows_pre_compression + timescaledb | _timescaledb_catalog | compression_chunk_size | uncompressed_heap_size + timescaledb | _timescaledb_catalog | compression_chunk_size | uncompressed_index_size + timescaledb | _timescaledb_catalog | compression_chunk_size | uncompressed_toast_size + timescaledb | timescaledb_information | compression_settings | attname + timescaledb | timescaledb_information | compression_settings | hypertable_name + timescaledb | timescaledb_information | compression_settings | hypertable_schema + timescaledb | _timescaledb_catalog | compression_settings | orderby + timescaledb | timescaledb_information | compression_settings | orderby_asc + timescaledb | timescaledb_information | compression_settings | orderby_column_index + timescaledb | _timescaledb_catalog | compression_settings | orderby_desc + timescaledb | timescaledb_information | compression_settings | orderby_nullsfirst + timescaledb | _timescaledb_catalog | compression_settings | orderby_nullsfirst + timescaledb | _timescaledb_catalog | compression_settings | relid + timescaledb | _timescaledb_catalog | compression_settings | segmentby + timescaledb | timescaledb_information | compression_settings | segmentby_column_index + timescaledb | _timescaledb_catalog | continuous_agg | direct_view_name + timescaledb | _timescaledb_catalog | continuous_agg | direct_view_schema + timescaledb | _timescaledb_catalog | continuous_agg | finalized + timescaledb | _timescaledb_catalog | continuous_agg | mat_hypertable_id + timescaledb | _timescaledb_catalog | continuous_agg | materialized_only + timescaledb | _timescaledb_catalog | continuous_agg | parent_mat_hypertable_id + timescaledb | _timescaledb_catalog | continuous_agg | partial_view_name + timescaledb | _timescaledb_catalog | continuous_agg | partial_view_schema + timescaledb | _timescaledb_catalog | continuous_agg | raw_hypertable_id + timescaledb | _timescaledb_catalog | continuous_agg | user_view_name + timescaledb | _timescaledb_catalog | continuous_agg | user_view_schema + timescaledb | _timescaledb_catalog | continuous_agg_migrate_plan | end_ts + timescaledb | _timescaledb_catalog | continuous_agg_migrate_plan | mat_hypertable_id + timescaledb | _timescaledb_catalog | continuous_agg_migrate_plan | start_ts + timescaledb | _timescaledb_catalog | continuous_agg_migrate_plan | user_view_definition + timescaledb | _timescaledb_catalog | continuous_agg_migrate_plan_step | config + timescaledb | _timescaledb_catalog | continuous_agg_migrate_plan_step | end_ts + timescaledb | _timescaledb_catalog | continuous_agg_migrate_plan_step | mat_hypertable_id + timescaledb | _timescaledb_catalog | continuous_agg_migrate_plan_step | start_ts + timescaledb | _timescaledb_catalog | continuous_agg_migrate_plan_step | status + timescaledb | _timescaledb_catalog | continuous_agg_migrate_plan_step | step_id + timescaledb | _timescaledb_catalog | continuous_agg_migrate_plan_step | type + timescaledb | timescaledb_information | continuous_aggregates | compression_enabled + timescaledb | timescaledb_information | continuous_aggregates | finalized + timescaledb | timescaledb_information | continuous_aggregates | hypertable_name + timescaledb | timescaledb_information | continuous_aggregates | hypertable_schema + timescaledb | timescaledb_information | continuous_aggregates | materialization_hypertable_name + timescaledb | timescaledb_information | continuous_aggregates | materialization_hypertable_schema + timescaledb | timescaledb_information | continuous_aggregates | materialized_only + timescaledb | timescaledb_information | continuous_aggregates | view_definition + timescaledb | timescaledb_information | continuous_aggregates | view_name + timescaledb | timescaledb_information | continuous_aggregates | view_owner + timescaledb | timescaledb_information | continuous_aggregates | view_schema + timescaledb | _timescaledb_catalog | continuous_aggs_bucket_function | bucket_fixed_width + timescaledb | _timescaledb_catalog | continuous_aggs_bucket_function | bucket_func + timescaledb | _timescaledb_catalog | continuous_aggs_bucket_function | bucket_offset + timescaledb | _timescaledb_catalog | continuous_aggs_bucket_function | bucket_origin + timescaledb | _timescaledb_catalog | continuous_aggs_bucket_function | bucket_timezone + timescaledb | _timescaledb_catalog | continuous_aggs_bucket_function | bucket_width + timescaledb | _timescaledb_catalog | continuous_aggs_bucket_function | mat_hypertable_id + timescaledb | _timescaledb_catalog | continuous_aggs_hypertable_invalidation_log | greatest_modified_value + timescaledb | _timescaledb_catalog | continuous_aggs_hypertable_invalidation_log | hypertable_id + timescaledb | _timescaledb_catalog | continuous_aggs_hypertable_invalidation_log | lowest_modified_value + timescaledb | _timescaledb_catalog | continuous_aggs_invalidation_threshold | hypertable_id + timescaledb | _timescaledb_catalog | continuous_aggs_invalidation_threshold | watermark + timescaledb | _timescaledb_catalog | continuous_aggs_materialization_invalidation_log | greatest_modified_value + timescaledb | _timescaledb_catalog | continuous_aggs_materialization_invalidation_log | lowest_modified_value + timescaledb | _timescaledb_catalog | continuous_aggs_materialization_invalidation_log | materialization_id + timescaledb | _timescaledb_catalog | continuous_aggs_watermark | mat_hypertable_id + timescaledb | _timescaledb_catalog | continuous_aggs_watermark | watermark + timescaledb | _timescaledb_catalog | dimension | aligned + timescaledb | _timescaledb_catalog | dimension | column_name + timescaledb | _timescaledb_catalog | dimension | column_type + timescaledb | _timescaledb_catalog | dimension | compress_interval_length + timescaledb | _timescaledb_catalog | dimension | hypertable_id + timescaledb | _timescaledb_catalog | dimension | id + timescaledb | _timescaledb_catalog | dimension | integer_now_func + timescaledb | _timescaledb_catalog | dimension | integer_now_func_schema + timescaledb | _timescaledb_catalog | dimension | interval_length + timescaledb | _timescaledb_catalog | dimension | num_slices + timescaledb | _timescaledb_catalog | dimension | partitioning_func + timescaledb | _timescaledb_catalog | dimension | partitioning_func_schema + timescaledb | _timescaledb_catalog | dimension_slice | dimension_id + timescaledb | _timescaledb_catalog | dimension_slice | id + timescaledb | _timescaledb_catalog | dimension_slice | range_end + timescaledb | _timescaledb_catalog | dimension_slice | range_start + timescaledb | timescaledb_information | dimensions | column_name + timescaledb | timescaledb_information | dimensions | column_type + timescaledb | timescaledb_information | dimensions | dimension_number + timescaledb | timescaledb_information | dimensions | dimension_type + timescaledb | timescaledb_information | dimensions | hypertable_name + timescaledb | timescaledb_information | dimensions | hypertable_schema + timescaledb | timescaledb_information | dimensions | integer_interval + timescaledb | timescaledb_information | dimensions | integer_now_func + timescaledb | timescaledb_information | dimensions | num_partitions + timescaledb | timescaledb_information | dimensions | time_interval + timescaledb | _timescaledb_catalog | hypertable | associated_schema_name + timescaledb | _timescaledb_catalog | hypertable | associated_table_prefix + timescaledb | _timescaledb_catalog | hypertable | chunk_sizing_func_name + timescaledb | _timescaledb_catalog | hypertable | chunk_sizing_func_schema + timescaledb | _timescaledb_catalog | hypertable | chunk_target_size + timescaledb | _timescaledb_catalog | hypertable | compressed_hypertable_id + timescaledb | _timescaledb_catalog | hypertable | compression_state + timescaledb | _timescaledb_catalog | hypertable | id + timescaledb | _timescaledb_catalog | hypertable | num_dimensions + timescaledb | _timescaledb_catalog | hypertable | schema_name + timescaledb | _timescaledb_catalog | hypertable | status + timescaledb | _timescaledb_catalog | hypertable | table_name + timescaledb | _timescaledb_internal | hypertable_chunk_local_size | chunk_id + timescaledb | _timescaledb_internal | hypertable_chunk_local_size | chunk_name + timescaledb | _timescaledb_internal | hypertable_chunk_local_size | chunk_schema + timescaledb | _timescaledb_internal | hypertable_chunk_local_size | compressed_heap_size + timescaledb | _timescaledb_internal | hypertable_chunk_local_size | compressed_index_size + timescaledb | _timescaledb_internal | hypertable_chunk_local_size | compressed_toast_size + timescaledb | _timescaledb_internal | hypertable_chunk_local_size | compressed_total_size + timescaledb | _timescaledb_internal | hypertable_chunk_local_size | heap_bytes + timescaledb | _timescaledb_internal | hypertable_chunk_local_size | hypertable_id + timescaledb | _timescaledb_internal | hypertable_chunk_local_size | hypertable_name + timescaledb | _timescaledb_internal | hypertable_chunk_local_size | hypertable_schema + timescaledb | _timescaledb_internal | hypertable_chunk_local_size | index_bytes + timescaledb | _timescaledb_internal | hypertable_chunk_local_size | toast_bytes + timescaledb | _timescaledb_internal | hypertable_chunk_local_size | total_bytes + timescaledb | timescaledb_information | hypertable_compression_settings | compress_interval_length + timescaledb | timescaledb_information | hypertable_compression_settings | hypertable + timescaledb | timescaledb_information | hypertable_compression_settings | orderby + timescaledb | timescaledb_information | hypertable_compression_settings | segmentby + timescaledb | timescaledb_information | hypertables | compression_enabled + timescaledb | timescaledb_information | hypertables | hypertable_name + timescaledb | timescaledb_information | hypertables | hypertable_schema + timescaledb | timescaledb_information | hypertables | num_chunks + timescaledb | timescaledb_information | hypertables | num_dimensions + timescaledb | timescaledb_information | hypertables | owner + timescaledb | timescaledb_information | hypertables | tablespaces + timescaledb | timescaledb_information | job_errors | err_message + timescaledb | timescaledb_information | job_errors | finish_time + timescaledb | timescaledb_information | job_errors | job_id + timescaledb | timescaledb_information | job_errors | pid + timescaledb | timescaledb_information | job_errors | proc_name + timescaledb | timescaledb_information | job_errors | proc_schema + timescaledb | timescaledb_information | job_errors | sqlerrcode + timescaledb | timescaledb_information | job_errors | start_time + timescaledb | timescaledb_information | job_history | config + timescaledb | timescaledb_information | job_history | err_message + timescaledb | timescaledb_information | job_history | finish_time + timescaledb | timescaledb_information | job_history | id + timescaledb | timescaledb_information | job_history | job_id + timescaledb | timescaledb_information | job_history | pid + timescaledb | timescaledb_information | job_history | proc_name + timescaledb | timescaledb_information | job_history | proc_schema + timescaledb | timescaledb_information | job_history | sqlerrcode + timescaledb | timescaledb_information | job_history | start_time + timescaledb | timescaledb_information | job_history | succeeded + timescaledb | timescaledb_information | job_stats | hypertable_name + timescaledb | timescaledb_information | job_stats | hypertable_schema + timescaledb | timescaledb_information | job_stats | job_id + timescaledb | timescaledb_information | job_stats | job_status + timescaledb | timescaledb_information | job_stats | last_run_duration + timescaledb | timescaledb_information | job_stats | last_run_started_at + timescaledb | timescaledb_information | job_stats | last_run_status + timescaledb | timescaledb_information | job_stats | last_successful_finish + timescaledb | timescaledb_information | job_stats | next_start + timescaledb | timescaledb_information | job_stats | total_failures + timescaledb | timescaledb_information | job_stats | total_runs + timescaledb | timescaledb_information | job_stats | total_successes + timescaledb | timescaledb_information | jobs | application_name + timescaledb | timescaledb_information | jobs | check_name + timescaledb | timescaledb_information | jobs | check_schema + timescaledb | timescaledb_information | jobs | config + timescaledb | timescaledb_information | jobs | fixed_schedule + timescaledb | timescaledb_information | jobs | hypertable_name + timescaledb | timescaledb_information | jobs | hypertable_schema + timescaledb | timescaledb_information | jobs | initial_start + timescaledb | timescaledb_information | jobs | job_id + timescaledb | timescaledb_information | jobs | max_retries + timescaledb | timescaledb_information | jobs | max_runtime + timescaledb | timescaledb_information | jobs | next_start + timescaledb | timescaledb_information | jobs | owner + timescaledb | timescaledb_information | jobs | proc_name + timescaledb | timescaledb_information | jobs | proc_schema + timescaledb | timescaledb_information | jobs | retry_period + timescaledb | timescaledb_information | jobs | schedule_interval + timescaledb | timescaledb_information | jobs | scheduled + timescaledb | _timescaledb_catalog | metadata | include_in_telemetry + timescaledb | _timescaledb_catalog | metadata | key + timescaledb | _timescaledb_catalog | metadata | value + timescaledb | timescaledb_experimental | policies | config + timescaledb | timescaledb_experimental | policies | hypertable_name + timescaledb | timescaledb_experimental | policies | hypertable_schema + timescaledb | timescaledb_experimental | policies | proc_name + timescaledb | timescaledb_experimental | policies | proc_schema + timescaledb | timescaledb_experimental | policies | relation_name + timescaledb | timescaledb_experimental | policies | relation_schema + timescaledb | timescaledb_experimental | policies | schedule_interval + timescaledb | _timescaledb_catalog | tablespace | hypertable_id + timescaledb | _timescaledb_catalog | tablespace | id + timescaledb | _timescaledb_catalog | tablespace | tablespace_name + timescaledb | _timescaledb_catalog | telemetry_event | body + timescaledb | _timescaledb_catalog | telemetry_event | created + timescaledb | _timescaledb_catalog | telemetry_event | tag + wrappers | public | wrappers_fdw_stats | bytes_in + wrappers | public | wrappers_fdw_stats | bytes_out + wrappers | public | wrappers_fdw_stats | create_times + wrappers | public | wrappers_fdw_stats | created_at + wrappers | public | wrappers_fdw_stats | fdw_name + wrappers | public | wrappers_fdw_stats | metadata + wrappers | public | wrappers_fdw_stats | rows_in + wrappers | public | wrappers_fdw_stats | rows_out + wrappers | public | wrappers_fdw_stats | updated_at +(1086 rows) + diff --git a/nix/tests/expected/hypopg.out b/nix/tests/expected/hypopg.out new file mode 100644 index 0000000..35c8a5b --- /dev/null +++ b/nix/tests/expected/hypopg.out @@ -0,0 +1,14 @@ +create schema v; +create table v.samp( + id int +); +select 1 from hypopg_create_index($$ + create index on v.samp(id) +$$); + ?column? +---------- + 1 +(1 row) + +drop schema v cascade; +NOTICE: drop cascades to table v.samp diff --git a/nix/tests/expected/index_advisor.out b/nix/tests/expected/index_advisor.out new file mode 100644 index 0000000..5a269ba --- /dev/null +++ b/nix/tests/expected/index_advisor.out @@ -0,0 +1,16 @@ +create schema v; +create table v.book( + id int primary key, + title text not null +); +select + index_statements, errors +from + index_advisor('select id from v.book where title = $1'); + index_statements | errors +------------------------------------------------+-------- + {"CREATE INDEX ON v.book USING btree (title)"} | {} +(1 row) + +drop schema v cascade; +NOTICE: drop cascades to table v.book diff --git a/nix/tests/expected/pg-safeupdate.out b/nix/tests/expected/pg-safeupdate.out new file mode 100644 index 0000000..f910011 --- /dev/null +++ b/nix/tests/expected/pg-safeupdate.out @@ -0,0 +1,12 @@ +load 'safeupdate'; +set safeupdate.enabled=1; +create schema v; +create table v.foo( + id int, + val text +); +update v.foo + set val = 'bar'; +ERROR: UPDATE requires a WHERE clause +drop schema v cascade; +NOTICE: drop cascades to table v.foo diff --git a/nix/tests/expected/pg_graphql.out b/nix/tests/expected/pg_graphql.out new file mode 100644 index 0000000..63a3520 --- /dev/null +++ b/nix/tests/expected/pg_graphql.out @@ -0,0 +1,259 @@ +begin; + comment on schema public is '@graphql({"inflect_names": true})'; + create table account( + id serial primary key, + email varchar(255) not null, + priority int, + status text default 'active' + ); + create table blog( + id serial primary key, + owner_id integer not null references account(id) + ); + comment on table blog is e'@graphql({"totalCount": {"enabled": true}})'; + -- Make sure functions still work + create function _echo_email(account) + returns text + language sql + as $$ select $1.email $$; + /* + Literals + */ + select graphql.resolve($$ + mutation { + insertIntoAccountCollection(objects: [ + { email: "foo@barsley.com", priority: 1 }, + { email: "bar@foosworth.com" } + ]) { + affectedCount + records { + id + status + echoEmail + blogCollection { + totalCount + } + } + } + } + $$); + resolve +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + {"data": {"insertIntoAccountCollection": {"records": [{"id": 1, "status": "active", "echoEmail": "foo@barsley.com", "blogCollection": {"totalCount": 0}}, {"id": 2, "status": "active", "echoEmail": "bar@foosworth.com", "blogCollection": {"totalCount": 0}}], "affectedCount": 2}}} +(1 row) + + select graphql.resolve($$ + mutation { + insertIntoBlogCollection(objects: [{ + ownerId: 1 + }]) { + records { + id + owner { + id + } + } + } + } + $$); + resolve +-------------------------------------------------------------------------------------- + {"data": {"insertIntoBlogCollection": {"records": [{"id": 1, "owner": {"id": 1}}]}}} +(1 row) + + -- Override a default on status with null + select graphql.resolve($$ + mutation { + insertIntoAccountCollection(objects: [ + { email: "baz@baz.com", status: null }, + ]) { + affectedCount + records { + email + status + } + } + } + $$); + resolve +------------------------------------------------------------------------------------------------------------------------ + {"data": {"insertIntoAccountCollection": {"records": [{"email": "baz@baz.com", "status": null}], "affectedCount": 1}}} +(1 row) + + /* + Variables + */ + select graphql.resolve($$ + mutation newAccount($emailAddress: String) { + xyz: insertIntoAccountCollection(objects: [ + { email: $emailAddress }, + { email: "other@email.com" } + ]) { + affectedCount + records { + id + email + } + } + } + $$, + variables := '{"emailAddress": "foo@bar.com"}'::jsonb + ); + resolve +-------------------------------------------------------------------------------------------------------------------------------- + {"data": {"xyz": {"records": [{"id": 4, "email": "foo@bar.com"}, {"id": 5, "email": "other@email.com"}], "affectedCount": 2}}} +(1 row) + + -- Variable override of default with null results in null + select graphql.resolve($$ + mutation newAccount($status: String) { + xyz: insertIntoAccountCollection(objects: [ + { email: "1@email.com", status: $status} + ]) { + affectedCount + records { + email + status + } + } + } + $$, + variables := '{"status": null}'::jsonb + ); + resolve +------------------------------------------------------------------------------------------------ + {"data": {"xyz": {"records": [{"email": "1@email.com", "status": null}], "affectedCount": 1}}} +(1 row) + + -- Skipping variable override of default results in default + select graphql.resolve($$ + mutation newAccount($status: String) { + xyz: insertIntoAccountCollection(objects: [ + { email: "x@y.com", status: $status}, + ]) { + affectedCount + records { + email + status + } + } + } + $$, + variables := '{}'::jsonb + ); + resolve +------------------------------------------------------------------------------------------------ + {"data": {"xyz": {"records": [{"email": "x@y.com", "status": "active"}], "affectedCount": 1}}} +(1 row) + + select graphql.resolve($$ + mutation newAccount($acc: AccountInsertInput!) { + insertIntoAccountCollection(objects: [$acc]) { + affectedCount + records { + id + email + } + } + } + $$, + variables := '{"acc": {"email": "bar@foo.com"}}'::jsonb + ); + resolve +----------------------------------------------------------------------------------------------------------------- + {"data": {"insertIntoAccountCollection": {"records": [{"id": 8, "email": "bar@foo.com"}], "affectedCount": 1}}} +(1 row) + + select graphql.resolve($$ + mutation newAccounts($acc: [AccountInsertInput!]!) { + insertIntoAccountCollection(objects: $accs) { + affectedCount + records { + id + email + } + } + } + $$, + variables := '{"accs": [{"email": "bar@foo.com"}]}'::jsonb + ); + resolve +----------------------------------------------------------------------------------------------------------------- + {"data": {"insertIntoAccountCollection": {"records": [{"id": 9, "email": "bar@foo.com"}], "affectedCount": 1}}} +(1 row) + + -- Single object coerces to a list + select graphql.resolve($$ + mutation { + insertIntoBlogCollection(objects: {ownerId: 1}) { + affectedCount + } + } + $$); + resolve +-------------------------------------------------------------- + {"data": {"insertIntoBlogCollection": {"affectedCount": 1}}} +(1 row) + + /* + Errors + */ + -- Field does not exist + select graphql.resolve($$ + mutation createAccount($acc: AccountInsertInput) { + insertIntoAccountCollection(objects: [$acc]) { + affectedCount + records { + id + email + } + } + } + $$, + variables := '{"acc": {"doesNotExist": "other"}}'::jsonb + ); + resolve +--------------------------------------------------------------------------------------------------------------------- + {"data": null, "errors": [{"message": "Input for type AccountInsertInput contains extra keys [\"doesNotExist\"]"}]} +(1 row) + + -- Wrong input type (list of string, not list of object) + select graphql.resolve($$ + mutation { + insertIntoBlogCollection(objects: ["not an object"]) { + affectedCount + } + } + $$); + resolve +----------------------------------------------------------------------------------- + {"data": null, "errors": [{"message": "Invalid input for BlogInsertInput type"}]} +(1 row) + + -- objects argument is missing + select graphql.resolve($$ + mutation { + insertIntoBlogCollection { + affectedCount + } + } + $$); + resolve +--------------------------------------------------------------------------- + {"data": null, "errors": [{"message": "Invalid input for NonNull type"}]} +(1 row) + + -- Empty call + select graphql.resolve($$ + mutation { + insertIntoBlogCollection(objects: []) { + affectedCount + } + } + $$); + resolve +-------------------------------------------------------------------------------------------- + {"data": null, "errors": [{"message": "At least one record must be provided to objects"}]} +(1 row) + +rollback; diff --git a/nix/tests/expected/pg_hashids.out b/nix/tests/expected/pg_hashids.out new file mode 100644 index 0000000..393218e --- /dev/null +++ b/nix/tests/expected/pg_hashids.out @@ -0,0 +1,36 @@ +select id_encode(1001); -- Result: jNl + id_encode +----------- + jNl +(1 row) + +select id_encode(1234567, 'This is my salt'); -- Result: Pdzxp + id_encode +----------- + Pdzxp +(1 row) + +select id_encode(1234567, 'This is my salt', 10); -- Result: PlRPdzxpR7 + id_encode +------------ + PlRPdzxpR7 +(1 row) + +select id_encode(1234567, 'This is my salt', 10, 'abcdefghijABCDxFGHIJ1234567890'); -- Result: 3GJ956J9B9 + id_encode +------------ + 3GJ956J9B9 +(1 row) + +select id_decode('PlRPdzxpR7', 'This is my salt', 10); -- Result: 1234567 + id_decode +----------- + {1234567} +(1 row) + +select id_decode('3GJ956J9B9', 'This is my salt', 10, 'abcdefghijABCDxFGHIJ1234567890'); -- Result: 1234567 + id_decode +----------- + {1234567} +(1 row) + diff --git a/nix/tests/expected/pg_jsonschema.out b/nix/tests/expected/pg_jsonschema.out new file mode 100644 index 0000000..c291141 --- /dev/null +++ b/nix/tests/expected/pg_jsonschema.out @@ -0,0 +1,73 @@ +begin; +-- Test json_matches_schema +create table customer( + id serial primary key, + metadata json, + check ( + json_matches_schema( + '{ + "type": "object", + "properties": { + "tags": { + "type": "array", + "items": { + "type": "string", + "maxLength": 16 + } + } + } + }', + metadata + ) + ) +); +insert into customer(metadata) +values ('{"tags": ["vip", "darkmode-ui"]}'); +-- Test jsonb_matches_schema +select + jsonb_matches_schema( + '{ + "type": "object", + "properties": { + "tags": { + "type": "array", + "items": { + "type": "string", + "maxLength": 16 + } + } + } + }', + '{"tags": ["vip", "darkmode-ui"]}'::jsonb +); + jsonb_matches_schema +---------------------- + t +(1 row) + +-- Test jsonschema_is_valid +select + jsonschema_is_valid( + '{ + "type": "object", + "properties": { + "tags": { + "type": "array", + "items": { + "type": "string", + "maxLength": 16 + } + } + } + }'); + jsonschema_is_valid +--------------------- + t +(1 row) + +-- Test invalid payload +insert into customer(metadata) +values ('{"tags": [1, 3]}'); +ERROR: new row for relation "customer" violates check constraint "customer_metadata_check" +DETAIL: Failing row contains (2, {"tags": [1, 3]}). +rollback; diff --git a/nix/tests/expected/pg_net.out b/nix/tests/expected/pg_net.out new file mode 100644 index 0000000..6b3ca5b --- /dev/null +++ b/nix/tests/expected/pg_net.out @@ -0,0 +1,11 @@ +-- This is a very basic test because you can't get the value returned +-- by a pg_net request in the same transaction that created it; +select + net.http_get ( + 'https://postman-echo.com/get?foo1=bar1&foo2=bar2' + ) as request_id; + request_id +------------ + 1 +(1 row) + diff --git a/nix/tests/expected/pg_plan_filter.out b/nix/tests/expected/pg_plan_filter.out new file mode 100644 index 0000000..4bdcd65 --- /dev/null +++ b/nix/tests/expected/pg_plan_filter.out @@ -0,0 +1,16 @@ +begin; + load 'plan_filter'; + create schema v; + -- create a sample table + create table v.test_table ( + id serial primary key, + data text + ); + -- insert some test data + insert into v.test_table (data) + values ('sample1'), ('sample2'), ('sample3'); + set local plan_filter.statement_cost_limit = 0.001; + select * from v.test_table; +ERROR: plan cost limit exceeded +HINT: The plan for your query shows that it would probably have an excessive run time. This may be due to a logic error in the SQL, or it maybe just a very costly query. Rewrite your query or increase the configuration parameter "plan_filter.statement_cost_limit". +rollback; diff --git a/nix/tests/expected/pg_stat_monitor.out b/nix/tests/expected/pg_stat_monitor.out new file mode 100644 index 0000000..f4d9069 --- /dev/null +++ b/nix/tests/expected/pg_stat_monitor.out @@ -0,0 +1,10 @@ +select + * +from + pg_stat_monitor +where + false; + bucket | bucket_start_time | userid | username | dbid | datname | client_ip | pgsm_query_id | queryid | toplevel | top_queryid | query | comments | planid | query_plan | top_query | application_name | relations | cmd_type | cmd_type_text | elevel | sqlcode | message | calls | total_exec_time | min_exec_time | max_exec_time | mean_exec_time | stddev_exec_time | rows | shared_blks_hit | shared_blks_read | shared_blks_dirtied | shared_blks_written | local_blks_hit | local_blks_read | local_blks_dirtied | local_blks_written | temp_blks_read | temp_blks_written | blk_read_time | blk_write_time | temp_blk_read_time | temp_blk_write_time | resp_calls | cpu_user_time | cpu_sys_time | wal_records | wal_fpi | wal_bytes | bucket_done | plans | total_plan_time | min_plan_time | max_plan_time | mean_plan_time | stddev_plan_time | jit_functions | jit_generation_time | jit_inlining_count | jit_inlining_time | jit_optimization_count | jit_optimization_time | jit_emission_count | jit_emission_time +--------+-------------------+--------+----------+------+---------+-----------+---------------+---------+----------+-------------+-------+----------+--------+------------+-----------+------------------+-----------+----------+---------------+--------+---------+---------+-------+-----------------+---------------+---------------+----------------+------------------+------+-----------------+------------------+---------------------+---------------------+----------------+-----------------+--------------------+--------------------+----------------+-------------------+---------------+----------------+--------------------+---------------------+------------+---------------+--------------+-------------+---------+-----------+-------------+-------+-----------------+---------------+---------------+----------------+------------------+---------------+---------------------+--------------------+-------------------+------------------------+-----------------------+--------------------+------------------- +(0 rows) + diff --git a/nix/tests/expected/pg_tle.out b/nix/tests/expected/pg_tle.out new file mode 100644 index 0000000..cffce1d --- /dev/null +++ b/nix/tests/expected/pg_tle.out @@ -0,0 +1,91 @@ +select + pgtle.install_extension( + 'pg_distance', + '0.1', + 'Distance functions for two points', + $_pg_tle_$ + CREATE FUNCTION dist(x1 float8, y1 float8, x2 float8, y2 float8, norm int) + RETURNS float8 + AS $$ + SELECT (abs(x2 - x1) ^ norm + abs(y2 - y1) ^ norm) ^ (1::float8 / norm); + $$ LANGUAGE SQL; + + CREATE FUNCTION manhattan_dist(x1 float8, y1 float8, x2 float8, y2 float8) + RETURNS float8 + AS $$ + SELECT dist(x1, y1, x2, y2, 1); + $$ LANGUAGE SQL; + + CREATE FUNCTION euclidean_dist(x1 float8, y1 float8, x2 float8, y2 float8) + RETURNS float8 + AS $$ + SELECT dist(x1, y1, x2, y2, 2); + $$ LANGUAGE SQL; + $_pg_tle_$ + ); + install_extension +------------------- + t +(1 row) + +create extension pg_distance; +select manhattan_dist(1, 1, 5, 5); + manhattan_dist +---------------- + 8 +(1 row) + +select euclidean_dist(1, 1, 5, 5); + euclidean_dist +------------------- + 5.656854249492381 +(1 row) + +SELECT pgtle.install_update_path( + 'pg_distance', + '0.1', + '0.2', + $_pg_tle_$ + CREATE OR REPLACE FUNCTION dist(x1 float8, y1 float8, x2 float8, y2 float8, norm int) + RETURNS float8 + AS $$ + SELECT (abs(x2 - x1) ^ norm + abs(y2 - y1) ^ norm) ^ (1::float8 / norm); + $$ LANGUAGE SQL IMMUTABLE PARALLEL SAFE; + + CREATE OR REPLACE FUNCTION manhattan_dist(x1 float8, y1 float8, x2 float8, y2 float8) + RETURNS float8 + AS $$ + SELECT dist(x1, y1, x2, y2, 1); + $$ LANGUAGE SQL IMMUTABLE PARALLEL SAFE; + + CREATE OR REPLACE FUNCTION euclidean_dist(x1 float8, y1 float8, x2 float8, y2 float8) + RETURNS float8 + AS $$ + SELECT dist(x1, y1, x2, y2, 2); + $$ LANGUAGE SQL IMMUTABLE PARALLEL SAFE; + $_pg_tle_$ + ); + install_update_path +--------------------- + t +(1 row) + +select + pgtle.set_default_version('pg_distance', '0.2'); + set_default_version +--------------------- + t +(1 row) + +alter extension pg_distance update; +drop extension pg_distance; +select + pgtle.uninstall_extension('pg_distance'); + uninstall_extension +--------------------- + t +(1 row) + +-- Restore original state if any of the above fails +drop extension pg_tle cascade; +create extension pg_tle; diff --git a/nix/tests/expected/pgaudit.out b/nix/tests/expected/pgaudit.out new file mode 100644 index 0000000..1937be6 --- /dev/null +++ b/nix/tests/expected/pgaudit.out @@ -0,0 +1,24 @@ +-- Note: there is no test that the logs were correctly output. Only checking for exceptions +set pgaudit.log = 'write, ddl'; +set pgaudit.log_relation = on; +set pgaudit.log_level = notice; +create schema v; +create table v.account( + id int, + name text, + password text, + description text +); +insert into v.account (id, name, password, description) +values (1, 'user1', 'HASH1', 'blah, blah'); +select + * +from + v.account; + id | name | password | description +----+-------+----------+------------- + 1 | user1 | HASH1 | blah, blah +(1 row) + +drop schema v cascade; +NOTICE: drop cascades to table v.account diff --git a/nix/tests/expected/pgjwt.out b/nix/tests/expected/pgjwt.out new file mode 100644 index 0000000..4e4500f --- /dev/null +++ b/nix/tests/expected/pgjwt.out @@ -0,0 +1,22 @@ +select + sign( + payload := '{"sub":"1234567890","name":"John Doe","iat":1516239022}', + secret := 'secret', + algorithm := 'HS256' + ); + sign +------------------------------------------------------------------------------------------------------------------------------------------------------------- + eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.XbPfbIHMI6arZ3Y922BhjWgQzWXcXNrz0ogtVhfEd2o +(1 row) + +select + verify( + token := 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJuYW1lIjoiRm9vIn0.Q8hKjuadCEhnCPuqIj9bfLhTh_9QSxshTRsA5Aq4IuM', + secret := 'secret', + algorithm := 'HS256' + ); + verify +---------------------------------------------------------------- + ("{""alg"":""HS256"",""typ"":""JWT""}","{""name"":""Foo""}",t) +(1 row) + diff --git a/nix/tests/expected/pgmq.out b/nix/tests/expected/pgmq.out new file mode 100644 index 0000000..9fb1819 --- /dev/null +++ b/nix/tests/expected/pgmq.out @@ -0,0 +1,141 @@ +-- Test the standard flow +select + pgmq.create('Foo'); + create +-------- + +(1 row) + +select + * +from + pgmq.send( + queue_name:='Foo', + msg:='{"foo": "bar1"}' + ); + send +------ + 1 +(1 row) + +-- Test queue is not case sensitive +select + * +from + pgmq.send( + queue_name:='foo', -- note: lowercase useage + msg:='{"foo": "bar2"}', + delay:=5 + ); + send +------ + 2 +(1 row) + +select + msg_id, + read_ct, + message +from + pgmq.read( + queue_name:='Foo', + vt:=30, + qty:=2 + ); + msg_id | read_ct | message +--------+---------+----------------- + 1 | 1 | {"foo": "bar1"} +(1 row) + +select + msg_id, + read_ct, + message +from + pgmq.pop('Foo'); + msg_id | read_ct | message +--------+---------+--------- +(0 rows) + +-- Archive message with msg_id=2. +select + pgmq.archive( + queue_name:='Foo', + msg_id:=2 + ); + archive +--------- + t +(1 row) + +select + pgmq.create('my_queue'); + create +-------- + +(1 row) + +select + pgmq.send_batch( + queue_name:='my_queue', + msgs:=array['{"foo": "bar3"}','{"foo": "bar4"}','{"foo": "bar5"}']::jsonb[] +); + send_batch +------------ + 1 + 2 + 3 +(3 rows) + +select + pgmq.archive( + queue_name:='my_queue', + msg_ids:=array[3, 4, 5] + ); + archive +--------- + 3 +(1 row) + +select + pgmq.delete('my_queue', 6); + delete +-------- + f +(1 row) + +select + pgmq.drop_queue('my_queue'); + drop_queue +------------ + t +(1 row) + +/* +-- Disabled until pg_partman goes back into the image +select + pgmq.create_partitioned( + 'my_partitioned_queue', + '5 seconds', + '10 seconds' +); +*/ +-- Make sure SQLI enabling characters are blocked +select pgmq.create('F--oo'); +ERROR: queue name contains invalid characters: $, ;, --, or \' +CONTEXT: PL/pgSQL function pgmq.format_table_name(text,text) line 5 at RAISE +PL/pgSQL function pgmq.create_non_partitioned(text) line 3 during statement block local variable initialization +SQL statement "SELECT pgmq.create_non_partitioned(queue_name)" +PL/pgSQL function pgmq."create"(text) line 3 at PERFORM +select pgmq.create('F$oo'); +ERROR: queue name contains invalid characters: $, ;, --, or \' +CONTEXT: PL/pgSQL function pgmq.format_table_name(text,text) line 5 at RAISE +PL/pgSQL function pgmq.create_non_partitioned(text) line 3 during statement block local variable initialization +SQL statement "SELECT pgmq.create_non_partitioned(queue_name)" +PL/pgSQL function pgmq."create"(text) line 3 at PERFORM +select pgmq.create($$F'oo$$); +ERROR: queue name contains invalid characters: $, ;, --, or \' +CONTEXT: PL/pgSQL function pgmq.format_table_name(text,text) line 5 at RAISE +PL/pgSQL function pgmq.create_non_partitioned(text) line 3 during statement block local variable initialization +SQL statement "SELECT pgmq.create_non_partitioned(queue_name)" +PL/pgSQL function pgmq."create"(text) line 3 at PERFORM diff --git a/nix/tests/expected/pgroonga.out b/nix/tests/expected/pgroonga.out new file mode 100644 index 0000000..5ceeed2 --- /dev/null +++ b/nix/tests/expected/pgroonga.out @@ -0,0 +1,76 @@ +create schema v; +create table v.roon( + id serial primary key, + content text +); +with tokenizers as ( + select + x + from + jsonb_array_elements( + (select pgroonga_command('tokenizer_list'))::jsonb + ) x(val) + limit + 1 + offset + 1 -- first record is unrelated and not stable +) +select + t.x::jsonb ->> 'name' +from + jsonb_array_elements((select * from tokenizers)) t(x) +order by + t.x::jsonb ->> 'name'; + ?column? +--------------------------------------------- + TokenBigram + TokenBigramIgnoreBlank + TokenBigramIgnoreBlankSplitSymbol + TokenBigramIgnoreBlankSplitSymbolAlpha + TokenBigramIgnoreBlankSplitSymbolAlphaDigit + TokenBigramSplitSymbol + TokenBigramSplitSymbolAlpha + TokenBigramSplitSymbolAlphaDigit + TokenDelimit + TokenDelimitNull + TokenDocumentVectorBM25 + TokenDocumentVectorTFIDF + TokenMecab + TokenNgram + TokenPattern + TokenRegexp + TokenTable + TokenTrigram + TokenUnigram +(19 rows) + +insert into v.roon (content) +values + ('Hello World'), + ('PostgreSQL with PGroonga is a thing'), + ('This is a full-text search test'), + ('PGroonga supports various languages'); +-- Create default index +create index pgroonga_index on v.roon using pgroonga (content); +-- Create mecab tokenizer index since we had a bug with this one once +create index pgroonga_index_mecab on v.roon using pgroonga (content) with (tokenizer='TokenMecab'); +-- Run some queries to test the index +select * from v.roon where content &@~ 'Hello'; + id | content +----+------------- + 1 | Hello World +(1 row) + +select * from v.roon where content &@~ 'powerful'; + id | content +----+--------- +(0 rows) + +select * from v.roon where content &@~ 'supports'; + id | content +----+------------------------------------- + 4 | PGroonga supports various languages +(1 row) + +drop schema v cascade; +NOTICE: drop cascades to table v.roon diff --git a/nix/tests/expected/pgrouting.out b/nix/tests/expected/pgrouting.out new file mode 100644 index 0000000..2362a72 --- /dev/null +++ b/nix/tests/expected/pgrouting.out @@ -0,0 +1,31 @@ +create schema v; +-- create the roads table +create table v.roads ( + id serial primary key, + source integer, + target integer, + cost double precision +); +-- insert sample data into roads table +insert into v.roads (source, target, cost) values +(1, 2, 1.0), +(2, 3, 1.0), +(3, 4, 1.0), +(1, 3, 2.5), +(3, 5, 2.0); +-- create a function to use pgRouting to find the shortest path +select * from pgr_dijkstra( + 'select id, source, target, cost from v.roads', + 1, -- start node + 4 -- end node +); + seq | path_seq | node | edge | cost | agg_cost +-----+----------+------+------+------+---------- + 1 | 1 | 1 | 1 | 1 | 0 + 2 | 2 | 2 | 2 | 1 | 1 + 3 | 3 | 3 | 3 | 1 | 2 + 4 | 4 | 4 | -1 | 0 | 3 +(4 rows) + +drop schema v cascade; +NOTICE: drop cascades to table v.roads diff --git a/nix/tests/expected/pgsodium.out b/nix/tests/expected/pgsodium.out new file mode 100644 index 0000000..418bf2d --- /dev/null +++ b/nix/tests/expected/pgsodium.out @@ -0,0 +1,9 @@ +select + status +from + pgsodium.create_key(); + status +-------- + valid +(1 row) + diff --git a/nix/tests/expected/pgtap.out b/nix/tests/expected/pgtap.out new file mode 100644 index 0000000..272d838 --- /dev/null +++ b/nix/tests/expected/pgtap.out @@ -0,0 +1,21 @@ +begin; +select plan(1); + plan +------ + 1..1 +(1 row) + +-- Run the tests. +select pass( 'My test passed, w00t!' ); + pass +------------------------------ + ok 1 - My test passed, w00t! +(1 row) + +-- Finish the tests and clean up. +select * from finish(); + finish +-------- +(0 rows) + +rollback; diff --git a/nix/tests/expected/pgvector.out b/nix/tests/expected/pgvector.out new file mode 100644 index 0000000..6564be5 --- /dev/null +++ b/nix/tests/expected/pgvector.out @@ -0,0 +1,90 @@ +create schema v; +create table v.items( + id serial primary key, + embedding vector(3), + half_embedding halfvec(3), + bit_embedding bit(3), + sparse_embedding sparsevec(3) +); +-- vector ops +create index on v.items using hnsw (embedding vector_l2_ops); +create index on v.items using hnsw (embedding vector_cosine_ops); +create index on v.items using hnsw (embedding vector_l1_ops); +create index on v.items using ivfflat (embedding vector_l2_ops); +NOTICE: ivfflat index created with little data +DETAIL: This will cause low recall. +HINT: Drop the index until the table has more data. +create index on v.items using ivfflat (embedding vector_cosine_ops); +NOTICE: ivfflat index created with little data +DETAIL: This will cause low recall. +HINT: Drop the index until the table has more data. +-- halfvec ops +create index on v.items using hnsw (half_embedding halfvec_l2_ops); +create index on v.items using hnsw (half_embedding halfvec_cosine_ops); +create index on v.items using hnsw (half_embedding halfvec_l1_ops); +create index on v.items using ivfflat (half_embedding halfvec_l2_ops); +NOTICE: ivfflat index created with little data +DETAIL: This will cause low recall. +HINT: Drop the index until the table has more data. +create index on v.items using ivfflat (half_embedding halfvec_cosine_ops); +NOTICE: ivfflat index created with little data +DETAIL: This will cause low recall. +HINT: Drop the index until the table has more data. +-- sparsevec +create index on v.items using hnsw (sparse_embedding sparsevec_l2_ops); +create index on v.items using hnsw (sparse_embedding sparsevec_cosine_ops); +create index on v.items using hnsw (sparse_embedding sparsevec_l1_ops); +-- bit ops +create index on v.items using hnsw (bit_embedding bit_hamming_ops); +create index on v.items using ivfflat (bit_embedding bit_hamming_ops); +NOTICE: ivfflat index created with little data +DETAIL: This will cause low recall. +HINT: Drop the index until the table has more data. +-- Populate some records +insert into v.items( + embedding, + half_embedding, + bit_embedding, + sparse_embedding +) +values + ('[1,2,3]', '[1,2,3]', '101', '{1:4}/3'), + ('[2,3,4]', '[2,3,4]', '010', '{1:7,3:0}/3'); +-- Test op types +select + * +from + v.items +order by + embedding <-> '[2,3,5]', + embedding <=> '[2,3,5]', + embedding <+> '[2,3,5]', + embedding <#> '[2,3,5]', + half_embedding <-> '[2,3,5]', + half_embedding <=> '[2,3,5]', + half_embedding <+> '[2,3,5]', + half_embedding <#> '[2,3,5]', + sparse_embedding <-> '{2:4,3:1}/3', + sparse_embedding <=> '{2:4,3:1}/3', + sparse_embedding <+> '{2:4,3:1}/3', + sparse_embedding <#> '{2:4,3:1}/3', + bit_embedding <~> '011'; + id | embedding | half_embedding | bit_embedding | sparse_embedding +----+-----------+----------------+---------------+------------------ + 2 | [2,3,4] | [2,3,4] | 010 | {1:7}/3 + 1 | [1,2,3] | [1,2,3] | 101 | {1:4}/3 +(2 rows) + +select + avg(embedding), + avg(half_embedding) +from + v.items; + avg | avg +---------------+--------------- + [1.5,2.5,3.5] | [1.5,2.5,3.5] +(1 row) + +-- Cleanup +drop schema v cascade; +NOTICE: drop cascades to table v.items diff --git a/nix/tests/expected/plpgsql-check.out b/nix/tests/expected/plpgsql-check.out new file mode 100644 index 0000000..2b5bf82 --- /dev/null +++ b/nix/tests/expected/plpgsql-check.out @@ -0,0 +1,35 @@ +create schema v; +create table v.t1( + a int, + b int +); +create or replace function v.f1() + returns void + language plpgsql +as $$ +declare r record; +begin + for r in select * from v.t1 + loop + raise notice '%', r.c; -- there is bug - table t1 missing "c" column + end loop; +end; +$$; +select * from v.f1(); + f1 +---- + +(1 row) + +-- use plpgsql_check_function to check the function for errors +select * from plpgsql_check_function('v.f1()'); + plpgsql_check_function +------------------------------------------------- + error:42703:6:RAISE:record "r" has no field "c" + Context: SQL expression "r.c" +(2 rows) + +drop schema v cascade; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to table v.t1 +drop cascades to function v.f1() diff --git a/nix/tests/expected/plv8.out b/nix/tests/expected/plv8.out new file mode 100644 index 0000000..f24c858 --- /dev/null +++ b/nix/tests/expected/plv8.out @@ -0,0 +1,17 @@ +create schema v; +-- create a function to perform some JavaScript operations +create function v.multiply_numbers(a integer, b integer) + returns integer + language plv8 +as $$ + return a * b; +$$; +select + v.multiply_numbers(3, 4); + multiply_numbers +------------------ + 12 +(1 row) + +drop schema v cascade; +NOTICE: drop cascades to function v.multiply_numbers(integer,integer) diff --git a/nix/tests/expected/postgis.out b/nix/tests/expected/postgis.out new file mode 100644 index 0000000..53194d8 --- /dev/null +++ b/nix/tests/expected/postgis.out @@ -0,0 +1,59 @@ +create schema v; +-- create a table to store geographic points +create table v.places ( + id serial primary key, + name text, + geom geometry(point, 4326) -- using WGS 84 coordinate system +); +-- insert some sample geographic points into the places table +insert into v.places (name, geom) +values + ('place_a', st_setsrid(st_makepoint(-73.9857, 40.7484), 4326)), -- latitude and longitude for a location + ('place_b', st_setsrid(st_makepoint(-74.0060, 40.7128), 4326)), -- another location + ('place_c', st_setsrid(st_makepoint(-73.9687, 40.7851), 4326)); -- yet another location +-- calculate the distance between two points (in meters) +select + a.name as place_a, + b.name as place_b, + st_distance(a.geom::geography, b.geom::geography) as distance_meters +from + v.places a, + v.places b +where + a.name = 'place_a' + and b.name = 'place_b'; + place_a | place_b | distance_meters +---------+---------+----------------- + place_a | place_b | 4309.25283351 +(1 row) + +-- find all places within a 5km radius of 'place_a' +select + name, + st_distance( + geom::geography, + ( + select + geom + from + v.places + where + name = 'place_a' + )::geography) as distance_meters +from + v.places +where + st_dwithin( + geom::geography, + (select geom from v.places where name = 'place_a')::geography, + 5000 + ) + and name != 'place_a'; + name | distance_meters +---------+----------------- + place_b | 4309.25283351 + place_c | 4320.8765634 +(2 rows) + +drop schema v cascade; +NOTICE: drop cascades to table v.places diff --git a/nix/tests/expected/rum.out b/nix/tests/expected/rum.out new file mode 100644 index 0000000..ba8a402 --- /dev/null +++ b/nix/tests/expected/rum.out @@ -0,0 +1,38 @@ +create schema v; +create table v.test_rum( + t text, + a tsvector +); +create trigger tsvectorupdate + before update or insert on v.test_rum + for each row + execute procedure + tsvector_update_trigger( + 'a', + 'pg_catalog.english', + 't' + ); +insert into v.test_rum(t) +values + ('the situation is most beautiful'), + ('it is a beautiful'), + ('it looks like a beautiful place'); +create index rumidx on v.test_rum using rum (a rum_tsvector_ops); +select + t, + a <=> to_tsquery('english', 'beautiful | place') as rank +from + v.test_rum +where + a @@ to_tsquery('english', 'beautiful | place') +order by + a <=> to_tsquery('english', 'beautiful | place'); + t | rank +---------------------------------+---------- + it looks like a beautiful place | 8.22467 + the situation is most beautiful | 16.44934 + it is a beautiful | 16.44934 +(3 rows) + +drop schema v cascade; +NOTICE: drop cascades to table v.test_rum diff --git a/nix/tests/expected/timescale.out b/nix/tests/expected/timescale.out new file mode 100644 index 0000000..0812954 --- /dev/null +++ b/nix/tests/expected/timescale.out @@ -0,0 +1,47 @@ +-- Confirm we're running the apache version +show timescaledb.license; + timescaledb.license +--------------------- + apache +(1 row) + +-- Create schema v +create schema v; +-- Create a table in the v schema +create table v.sensor_data ( + time timestamptz not null, + sensor_id int not null, + temperature double precision not null, + humidity double precision not null +); +-- Convert the table to a hypertable +select create_hypertable('v.sensor_data', 'time'); + create_hypertable +--------------------- + (1,v,sensor_data,t) +(1 row) + +-- Insert some data into the hypertable +insert into v.sensor_data (time, sensor_id, temperature, humidity) +values + ('2024-08-09', 1, 22.5, 60.2), + ('2024-08-08', 1, 23.0, 59.1), + ('2024-08-07', 2, 21.7, 63.3); +-- Select data from the hypertable +select + * +from + v.sensor_data; + time | sensor_id | temperature | humidity +------------------------------+-----------+-------------+---------- + Fri Aug 09 00:00:00 2024 PDT | 1 | 22.5 | 60.2 + Thu Aug 08 00:00:00 2024 PDT | 1 | 23 | 59.1 + Wed Aug 07 00:00:00 2024 PDT | 2 | 21.7 | 63.3 +(3 rows) + +-- Drop schema v and all its entities +drop schema v cascade; +NOTICE: drop cascades to 3 other objects +DETAIL: drop cascades to table v.sensor_data +drop cascades to table _timescaledb_internal._hyper_1_1_chunk +drop cascades to table _timescaledb_internal._hyper_1_2_chunk diff --git a/nix/tests/expected/vault.out b/nix/tests/expected/vault.out new file mode 100644 index 0000000..e4eaff2 --- /dev/null +++ b/nix/tests/expected/vault.out @@ -0,0 +1,42 @@ +select + 1 +from + vault.create_secret('my_s3kre3t'); + ?column? +---------- + 1 +(1 row) + +select + 1 +from + vault.create_secret( + 'another_s3kre3t', + 'unique_name', + 'This is the description' + ); + ?column? +---------- + 1 +(1 row) + +insert into vault.secrets (secret) +values + ('s3kre3t_k3y'); +select + name, + description +from + vault.decrypted_secrets +order by + created_at desc +limit + 3; + name | description +-------------+------------------------- + | + unique_name | This is the description + | +(3 rows) + + diff --git a/nix/tests/expected/wal2json.out b/nix/tests/expected/wal2json.out new file mode 100644 index 0000000..6edc359 --- /dev/null +++ b/nix/tests/expected/wal2json.out @@ -0,0 +1,42 @@ +create schema v; +create table v.foo( + id int primary key +); +select + 1 +from + pg_create_logical_replication_slot('reg_test', 'wal2json', false); + ?column? +---------- + 1 +(1 row) + +insert into v.foo(id) values (1); +select + data +from + pg_logical_slot_get_changes( + 'reg_test', + null, + null, + 'include-pk', '1', + 'include-transaction', 'false', + 'include-timestamp', 'false', + 'include-type-oids', 'false', + 'format-version', '2', + 'actions', 'insert,update,delete' + ) x; + data +-------------------------------------------------------------------------------------------------------------------------------------- + {"action":"I","schema":"v","table":"foo","columns":[{"name":"id","type":"integer","value":1}],"pk":[{"name":"id","type":"integer"}]} +(1 row) + +select + pg_drop_replication_slot('reg_test'); + pg_drop_replication_slot +-------------------------- + +(1 row) + +drop schema v cascade; +NOTICE: drop cascades to table v.foo diff --git a/nix/tests/migrations/data.sql b/nix/tests/migrations/data.sql new file mode 100644 index 0000000..36396e6 --- /dev/null +++ b/nix/tests/migrations/data.sql @@ -0,0 +1,21 @@ +create table account( + id int primary key, + is_verified bool, + name text, + phone text +); + +insert into public.account(id, is_verified, name, phone) +values + (1, true, 'foo', '1111111111'), + (2, true, 'bar', null), + (3, false, 'baz', '33333333333'); + +select id as test_new_key_id from pgsodium.create_key(name:='test_new_key') \gset + +select vault.create_secret ( + 's3kr3t_k3y', 'a_name', 'this is the foo secret key') test_secret_id \gset + +select vault.create_secret ( + 's3kr3t_k3y_2', 'another_name', 'this is another foo key', + (select id from pgsodium.key where name = 'test_new_key')) test_secret_id_2 \gset diff --git a/nix/tests/postgresql.conf.in b/nix/tests/postgresql.conf.in new file mode 100644 index 0000000..ef860af --- /dev/null +++ b/nix/tests/postgresql.conf.in @@ -0,0 +1,800 @@ +# ----------------------------- +# PostgreSQL configuration file + +# ----------------------------- +# +# This file consists of lines of the form: +# +# name = value +# +# (The "=" is optional.) Whitespace may be used. Comments are introduced with +# "#" anywhere on a line. The complete list of parameter names and allowed +# values can be found in the PostgreSQL documentation. +# +# The commented-out settings shown in this file represent the default values. +# Re-commenting a setting is NOT sufficient to revert it to the default value; +# you need to reload the server. +# +# This file is read on server startup and when the server receives a SIGHUP +# signal. If you edit the file on a running system, you have to SIGHUP the +# server for the changes to take effect, run "pg_ctl reload", or execute +# "SELECT pg_reload_conf()". Some parameters, which are marked below, +# require a server shutdown and restart to take effect. +# +# Any parameter can also be given as a command-line option to the server, e.g., +# "postgres -c log_connections=on". Some parameters can be changed at run time +# with the "SET" SQL command. +# +# Memory units: B = bytes Time units: us = microseconds +# kB = kilobytes ms = milliseconds +# MB = megabytes s = seconds +# GB = gigabytes min = minutes +# TB = terabytes h = hours +# d = days + + +#------------------------------------------------------------------------------ +# FILE LOCATIONS +#------------------------------------------------------------------------------ + +# The default values of these variables are driven from the -D command-line +# option or PGDATA environment variable, represented here as ConfigDir. + +#data_directory = 'ConfigDir' # use data in another directory + # (change requires restart) +#hba_file = 'ConfigDir/pg_hba.conf' # host-based authentication file + # (change requires restart) +#ident_file = 'ConfigDir/pg_ident.conf' # ident configuration file + # (change requires restart) + +# If external_pid_file is not explicitly set, no extra PID file is written. +#external_pid_file = '' # write an extra PID file + # (change requires restart) + + +#------------------------------------------------------------------------------ +# CONNECTIONS AND AUTHENTICATION +#------------------------------------------------------------------------------ + +# - Connection Settings - + +listen_addresses = '*' # what IP address(es) to listen on; +#port = @PGSQL_DEFAULT_PORT@ # (change requires restart) +max_connections = 100 # (change requires restart) +#superuser_reserved_connections = 3 # (change requires restart) +unix_socket_directories = '/tmp' # comma-separated list of directories + # (change requires restart) +#unix_socket_group = '' # (change requires restart) +#unix_socket_permissions = 0777 # begin with 0 to use octal notation + # (change requires restart) +#bonjour = off # advertise server via Bonjour + # (change requires restart) +#bonjour_name = '' # defaults to the computer name + # (change requires restart) + +# - TCP settings - +# see "man tcp" for details + +#tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds; + # 0 selects the system default +#tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds; + # 0 selects the system default +#tcp_keepalives_count = 0 # TCP_KEEPCNT; + # 0 selects the system default +#tcp_user_timeout = 0 # TCP_USER_TIMEOUT, in milliseconds; + # 0 selects the system default + +#client_connection_check_interval = 0 # time between checks for client + # disconnection while running queries; + # 0 for never + +# - Authentication - + +#authentication_timeout = 1min # 1s-600s +#password_encryption = scram-sha-256 # scram-sha-256 or md5 +#db_user_namespace = off + +# GSSAPI using Kerberos +#krb_server_keyfile = 'FILE:${sysconfdir}/krb5.keytab' +#krb_caseins_users = off + +# - SSL - + +#ssl = off +#ssl_ca_file = '' +#ssl_cert_file = 'server.crt' +#ssl_crl_file = '' +#ssl_crl_dir = '' +#ssl_key_file = 'server.key' +#ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers +#ssl_prefer_server_ciphers = on +#ssl_ecdh_curve = 'prime256v1' +#ssl_min_protocol_version = 'TLSv1.2' +#ssl_max_protocol_version = '' +#ssl_dh_params_file = '' +#ssl_passphrase_command = '' +#ssl_passphrase_command_supports_reload = off + + +#------------------------------------------------------------------------------ +# RESOURCE USAGE (except WAL) +#------------------------------------------------------------------------------ + +# - Memory - + +shared_buffers = 128MB # min 128kB + # (change requires restart) +#huge_pages = try # on, off, or try + # (change requires restart) +#huge_page_size = 0 # zero for system default + # (change requires restart) +#temp_buffers = 8MB # min 800kB +#max_prepared_transactions = 0 # zero disables the feature + # (change requires restart) +# Caution: it is not advisable to set max_prepared_transactions nonzero unless +# you actively intend to use prepared transactions. +#work_mem = 4MB # min 64kB +#hash_mem_multiplier = 1.0 # 1-1000.0 multiplier on hash table work_mem +#maintenance_work_mem = 64MB # min 1MB +#autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem +#logical_decoding_work_mem = 64MB # min 64kB +#max_stack_depth = 2MB # min 100kB +#shared_memory_type = mmap # the default is the first option + # supported by the operating system: + # mmap + # sysv + # windows + # (change requires restart) +dynamic_shared_memory_type = posix # the default is the first option + # supported by the operating system: + # posix + # sysv + # windows + # mmap + # (change requires restart) +#min_dynamic_shared_memory = 0MB # (change requires restart) + +# - Disk - + +#temp_file_limit = -1 # limits per-process temp file space + # in kilobytes, or -1 for no limit + +# - Kernel Resources - + +#max_files_per_process = 1000 # min 64 + # (change requires restart) + +# - Cost-Based Vacuum Delay - + +#vacuum_cost_delay = 0 # 0-100 milliseconds (0 disables) +#vacuum_cost_page_hit = 1 # 0-10000 credits +#vacuum_cost_page_miss = 2 # 0-10000 credits +#vacuum_cost_page_dirty = 20 # 0-10000 credits +#vacuum_cost_limit = 200 # 1-10000 credits + +# - Background Writer - + +#bgwriter_delay = 200ms # 10-10000ms between rounds +#bgwriter_lru_maxpages = 100 # max buffers written/round, 0 disables +#bgwriter_lru_multiplier = 2.0 # 0-10.0 multiplier on buffers scanned/round +#bgwriter_flush_after = 512kB # measured in pages, 0 disables + +# - Asynchronous Behavior - + +#backend_flush_after = 0 # measured in pages, 0 disables +#effective_io_concurrency = 1 # 1-1000; 0 disables prefetching +#maintenance_io_concurrency = 10 # 1-1000; 0 disables prefetching +#max_worker_processes = 8 # (change requires restart) +#max_parallel_workers_per_gather = 2 # taken from max_parallel_workers +#max_parallel_maintenance_workers = 2 # taken from max_parallel_workers +#max_parallel_workers = 8 # maximum number of max_worker_processes that + # can be used in parallel operations +#parallel_leader_participation = on +#old_snapshot_threshold = -1 # 1min-60d; -1 disables; 0 is immediate + # (change requires restart) + + +#------------------------------------------------------------------------------ +# WRITE-AHEAD LOG +#------------------------------------------------------------------------------ + +# - Settings - + +wal_level = logical # minimal, replica, or logical + # (change requires restart) +#fsync = on # flush data to disk for crash safety + # (turning this off can cause + # unrecoverable data corruption) +#synchronous_commit = on # synchronization level; + # off, local, remote_write, remote_apply, or on +#wal_sync_method = fsync # the default is the first option + # supported by the operating system: + # open_datasync + # fdatasync (default on Linux and FreeBSD) + # fsync + # fsync_writethrough + # open_sync +#full_page_writes = on # recover from partial page writes +wal_log_hints = on # also do full page writes of non-critical updates + # (change requires restart) +#wal_compression = off # enable compression of full-page writes +#wal_init_zero = on # zero-fill new WAL files +#wal_recycle = on # recycle WAL files +#wal_buffers = -1 # min 32kB, -1 sets based on shared_buffers + # (change requires restart) +#wal_writer_delay = 200ms # 1-10000 milliseconds +#wal_writer_flush_after = 1MB # measured in pages, 0 disables +#wal_skip_threshold = 2MB + +#commit_delay = 0 # range 0-100000, in microseconds +#commit_siblings = 5 # range 1-1000 + +# - Checkpoints - + +#checkpoint_timeout = 5min # range 30s-1d +#checkpoint_completion_target = 0.9 # checkpoint target duration, 0.0 - 1.0 +#checkpoint_flush_after = 256kB # measured in pages, 0 disables +#checkpoint_warning = 30s # 0 disables +max_wal_size = 1GB +min_wal_size = 80MB + +# - Archiving - + +#archive_mode = off # enables archiving; off, on, or always + # (change requires restart) +#archive_command = '' # command to use to archive a logfile segment + # placeholders: %p = path of file to archive + # %f = file name only + # e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f' +#archive_timeout = 0 # force a logfile segment switch after this + # number of seconds; 0 disables + +# - Archive Recovery - + +# These are only used in recovery mode. + +#restore_command = '' # command to use to restore an archived logfile segment + # placeholders: %p = path of file to restore + # %f = file name only + # e.g. 'cp /mnt/server/archivedir/%f %p' +#archive_cleanup_command = '' # command to execute at every restartpoint +#recovery_end_command = '' # command to execute at completion of recovery + +# - Recovery Target - + +# Set these only when performing a targeted recovery. + +#recovery_target = '' # 'immediate' to end recovery as soon as a + # consistent state is reached + # (change requires restart) +#recovery_target_name = '' # the named restore point to which recovery will proceed + # (change requires restart) +#recovery_target_time = '' # the time stamp up to which recovery will proceed + # (change requires restart) +#recovery_target_xid = '' # the transaction ID up to which recovery will proceed + # (change requires restart) +#recovery_target_lsn = '' # the WAL LSN up to which recovery will proceed + # (change requires restart) +#recovery_target_inclusive = on # Specifies whether to stop: + # just after the specified recovery target (on) + # just before the recovery target (off) + # (change requires restart) +#recovery_target_timeline = 'latest' # 'current', 'latest', or timeline ID + # (change requires restart) +#recovery_target_action = 'pause' # 'pause', 'promote', 'shutdown' + # (change requires restart) + + +#------------------------------------------------------------------------------ +# REPLICATION +#------------------------------------------------------------------------------ + +# - Sending Servers - + +# Set these on the primary and on any standby that will send replication data. + +#max_wal_senders = 10 # max number of walsender processes + # (change requires restart) +#max_replication_slots = 10 # max number of replication slots + # (change requires restart) +#wal_keep_size = 0 # in megabytes; 0 disables +#max_slot_wal_keep_size = -1 # in megabytes; -1 disables +#wal_sender_timeout = 60s # in milliseconds; 0 disables +#track_commit_timestamp = off # collect timestamp of transaction commit + # (change requires restart) + +# - Primary Server - + +# These settings are ignored on a standby server. + +#synchronous_standby_names = '' # standby servers that provide sync rep + # method to choose sync standbys, number of sync standbys, + # and comma-separated list of application_name + # from standby(s); '*' = all +#vacuum_defer_cleanup_age = 0 # number of xacts by which cleanup is delayed + +# - Standby Servers - + +# These settings are ignored on a primary server. + +#primary_conninfo = '' # connection string to sending server +#primary_slot_name = '' # replication slot on sending server +#promote_trigger_file = '' # file name whose presence ends recovery +#hot_standby = on # "off" disallows queries during recovery + # (change requires restart) +#max_standby_archive_delay = 30s # max delay before canceling queries + # when reading WAL from archive; + # -1 allows indefinite delay +#max_standby_streaming_delay = 30s # max delay before canceling queries + # when reading streaming WAL; + # -1 allows indefinite delay +#wal_receiver_create_temp_slot = off # create temp slot if primary_slot_name + # is not set +#wal_receiver_status_interval = 10s # send replies at least this often + # 0 disables +#hot_standby_feedback = off # send info from standby to prevent + # query conflicts +#wal_receiver_timeout = 60s # time that receiver waits for + # communication from primary + # in milliseconds; 0 disables +#wal_retrieve_retry_interval = 5s # time to wait before retrying to + # retrieve WAL after a failed attempt +#recovery_min_apply_delay = 0 # minimum delay for applying changes during recovery + +# - Subscribers - + +# These settings are ignored on a publisher. + +#max_logical_replication_workers = 4 # taken from max_worker_processes + # (change requires restart) +#max_sync_workers_per_subscription = 2 # taken from max_logical_replication_workers + + +#------------------------------------------------------------------------------ +# QUERY TUNING +#------------------------------------------------------------------------------ + +# - Planner Method Configuration - + +#enable_async_append = on +#enable_bitmapscan = on +#enable_gathermerge = on +#enable_hashagg = on +#enable_hashjoin = on +#enable_incremental_sort = on +#enable_indexscan = on +#enable_indexonlyscan = on +#enable_material = on +#enable_memoize = on +#enable_mergejoin = on +#enable_nestloop = on +#enable_parallel_append = on +#enable_parallel_hash = on +#enable_partition_pruning = on +#enable_partitionwise_join = off +#enable_partitionwise_aggregate = off +#enable_seqscan = on +#enable_sort = on +#enable_tidscan = on + +# - Planner Cost Constants - + +#seq_page_cost = 1.0 # measured on an arbitrary scale +#random_page_cost = 4.0 # same scale as above +#cpu_tuple_cost = 0.01 # same scale as above +#cpu_index_tuple_cost = 0.005 # same scale as above +#cpu_operator_cost = 0.0025 # same scale as above +#parallel_setup_cost = 1000.0 # same scale as above +#parallel_tuple_cost = 0.1 # same scale as above +#min_parallel_table_scan_size = 8MB +#min_parallel_index_scan_size = 512kB +#effective_cache_size = 4GB + +#jit_above_cost = 100000 # perform JIT compilation if available + # and query more expensive than this; + # -1 disables +#jit_inline_above_cost = 500000 # inline small functions if query is + # more expensive than this; -1 disables +#jit_optimize_above_cost = 500000 # use expensive JIT optimizations if + # query is more expensive than this; + # -1 disables + +# - Genetic Query Optimizer - + +#geqo = on +#geqo_threshold = 12 +#geqo_effort = 5 # range 1-10 +#geqo_pool_size = 0 # selects default based on effort +#geqo_generations = 0 # selects default based on effort +#geqo_selection_bias = 2.0 # range 1.5-2.0 +#geqo_seed = 0.0 # range 0.0-1.0 + +# - Other Planner Options - + +#default_statistics_target = 100 # range 1-10000 +#constraint_exclusion = partition # on, off, or partition +#cursor_tuple_fraction = 0.1 # range 0.0-1.0 +#from_collapse_limit = 8 +#jit = on # allow JIT compilation +#join_collapse_limit = 8 # 1 disables collapsing of explicit + # JOIN clauses +#plan_cache_mode = auto # auto, force_generic_plan or + # force_custom_plan + + +#------------------------------------------------------------------------------ +# REPORTING AND LOGGING +#------------------------------------------------------------------------------ + +# - Where to Log - + +#log_destination = 'stderr' # Valid values are combinations of + # stderr, csvlog, syslog, and eventlog, + # depending on platform. csvlog + # requires logging_collector to be on. + +# This is used when logging to stderr: +#logging_collector = off # Enable capturing of stderr and csvlog + # into log files. Required to be on for + # csvlogs. + # (change requires restart) + +# These are only used if logging_collector is on: +#log_directory = 'log' # directory where log files are written, + # can be absolute or relative to PGDATA +#log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' # log file name pattern, + # can include strftime() escapes +#log_file_mode = 0600 # creation mode for log files, + # begin with 0 to use octal notation +#log_rotation_age = 1d # Automatic rotation of logfiles will + # happen after that time. 0 disables. +#log_rotation_size = 10MB # Automatic rotation of logfiles will + # happen after that much log output. + # 0 disables. +#log_truncate_on_rotation = off # If on, an existing log file with the + # same name as the new log file will be + # truncated rather than appended to. + # But such truncation only occurs on + # time-driven rotation, not on restarts + # or size-driven rotation. Default is + # off, meaning append to existing files + # in all cases. + +# These are relevant when logging to syslog: +#syslog_facility = 'LOCAL0' +#syslog_ident = 'postgres' +#syslog_sequence_numbers = on +#syslog_split_messages = on + +# This is only relevant when logging to eventlog (Windows): +# (change requires restart) +#event_source = 'PostgreSQL' + +# - When to Log - + +#log_min_messages = warning # values in order of decreasing detail: + # debug5 + # debug4 + # debug3 + # debug2 + # debug1 + # info + # notice + # warning + # error + # log + # fatal + # panic + +#log_min_error_statement = error # values in order of decreasing detail: + # debug5 + # debug4 + # debug3 + # debug2 + # debug1 + # info + # notice + # warning + # error + # log + # fatal + # panic (effectively off) + +#log_min_duration_statement = -1 # -1 is disabled, 0 logs all statements + # and their durations, > 0 logs only + # statements running at least this number + # of milliseconds + +#log_min_duration_sample = -1 # -1 is disabled, 0 logs a sample of statements + # and their durations, > 0 logs only a sample of + # statements running at least this number + # of milliseconds; + # sample fraction is determined by log_statement_sample_rate + +#log_statement_sample_rate = 1.0 # fraction of logged statements exceeding + # log_min_duration_sample to be logged; + # 1.0 logs all such statements, 0.0 never logs + + +#log_transaction_sample_rate = 0.0 # fraction of transactions whose statements + # are logged regardless of their duration; 1.0 logs all + # statements from all transactions, 0.0 never logs + +# - What to Log - + +#debug_print_parse = off +#debug_print_rewritten = off +#debug_print_plan = off +#debug_pretty_print = on +#log_autovacuum_min_duration = -1 # log autovacuum activity; + # -1 disables, 0 logs all actions and + # their durations, > 0 logs only + # actions running at least this number + # of milliseconds. +#log_checkpoints = off +#log_connections = off +#log_disconnections = off +#log_duration = off +#log_error_verbosity = default # terse, default, or verbose messages +#log_hostname = off +#log_line_prefix = '%m [%p] ' # special values: + # %a = application name + # %u = user name + # %d = database name + # %r = remote host and port + # %h = remote host + # %b = backend type + # %p = process ID + # %P = process ID of parallel group leader + # %t = timestamp without milliseconds + # %m = timestamp with milliseconds + # %n = timestamp with milliseconds (as a Unix epoch) + # %Q = query ID (0 if none or not computed) + # %i = command tag + # %e = SQL state + # %c = session ID + # %l = session line number + # %s = session start timestamp + # %v = virtual transaction ID + # %x = transaction ID (0 if none) + # %q = stop here in non-session + # processes + # %% = '%' + # e.g. '<%u%%%d> ' +#log_lock_waits = off # log lock waits >= deadlock_timeout +#log_recovery_conflict_waits = off # log standby recovery conflict waits + # >= deadlock_timeout +#log_parameter_max_length = -1 # when logging statements, limit logged + # bind-parameter values to N bytes; + # -1 means print in full, 0 disables +#log_parameter_max_length_on_error = 0 # when logging an error, limit logged + # bind-parameter values to N bytes; + # -1 means print in full, 0 disables +#log_statement = 'none' # none, ddl, mod, all +#log_replication_commands = off +#log_temp_files = -1 # log temporary files equal or larger + # than the specified size in kilobytes; + # -1 disables, 0 logs all temp files +log_timezone = 'America/Chicago' + + +#------------------------------------------------------------------------------ +# PROCESS TITLE +#------------------------------------------------------------------------------ + +#cluster_name = '' # added to process titles if nonempty + # (change requires restart) +#update_process_title = on + + +#------------------------------------------------------------------------------ +# STATISTICS +#------------------------------------------------------------------------------ + +# - Query and Index Statistics Collector - + +#track_activities = on +#track_activity_query_size = 1024 # (change requires restart) +#track_counts = on +#track_io_timing = off +#track_wal_io_timing = off +#track_functions = none # none, pl, all +#stats_temp_directory = 'pg_stat_tmp' + + +# - Monitoring - + +#compute_query_id = auto +#log_statement_stats = off +#log_parser_stats = off +#log_planner_stats = off +#log_executor_stats = off + + +#------------------------------------------------------------------------------ +# AUTOVACUUM +#------------------------------------------------------------------------------ + +#autovacuum = on # Enable autovacuum subprocess? 'on' + # requires track_counts to also be on. +#autovacuum_max_workers = 3 # max number of autovacuum subprocesses + # (change requires restart) +#autovacuum_naptime = 1min # time between autovacuum runs +#autovacuum_vacuum_threshold = 50 # min number of row updates before + # vacuum +#autovacuum_vacuum_insert_threshold = 1000 # min number of row inserts + # before vacuum; -1 disables insert + # vacuums +#autovacuum_analyze_threshold = 50 # min number of row updates before + # analyze +#autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum +#autovacuum_vacuum_insert_scale_factor = 0.2 # fraction of inserts over table + # size before insert vacuum +#autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze +#autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum + # (change requires restart) +#autovacuum_multixact_freeze_max_age = 400000000 # maximum multixact age + # before forced vacuum + # (change requires restart) +#autovacuum_vacuum_cost_delay = 2ms # default vacuum cost delay for + # autovacuum, in milliseconds; + # -1 means use vacuum_cost_delay +#autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for + # autovacuum, -1 means use + # vacuum_cost_limit + + +#------------------------------------------------------------------------------ +# CLIENT CONNECTION DEFAULTS +#------------------------------------------------------------------------------ + +# - Statement Behavior - + +#client_min_messages = notice # values in order of decreasing detail: + # debug5 + # debug4 + # debug3 + # debug2 + # debug1 + # log + # notice + # warning + # error +#search_path = '"$user", public' # schema names +#row_security = on +#default_table_access_method = 'heap' +#default_tablespace = '' # a tablespace name, '' uses the default +#default_toast_compression = 'pglz' # 'pglz' or 'lz4' +#temp_tablespaces = '' # a list of tablespace names, '' uses + # only default tablespace +#check_function_bodies = on +#default_transaction_isolation = 'read committed' +#default_transaction_read_only = off +#default_transaction_deferrable = off +#session_replication_role = 'origin' +#statement_timeout = 0 # in milliseconds, 0 is disabled +#lock_timeout = 0 # in milliseconds, 0 is disabled +#idle_in_transaction_session_timeout = 0 # in milliseconds, 0 is disabled +#idle_session_timeout = 0 # in milliseconds, 0 is disabled +#vacuum_freeze_table_age = 150000000 +#vacuum_freeze_min_age = 50000000 +#vacuum_failsafe_age = 1600000000 +#vacuum_multixact_freeze_table_age = 150000000 +#vacuum_multixact_freeze_min_age = 5000000 +#vacuum_multixact_failsafe_age = 1600000000 +#bytea_output = 'hex' # hex, escape +#xmlbinary = 'base64' +#xmloption = 'content' +#gin_pending_list_limit = 4MB + +# - Locale and Formatting - + +datestyle = 'iso, mdy' +#intervalstyle = 'postgres' +timezone = 'America/Chicago' +#timezone_abbreviations = 'Default' # Select the set of available time zone + # abbreviations. Currently, there are + # Default + # Australia (historical usage) + # India + # You can create your own file in + # share/timezonesets/. +#extra_float_digits = 1 # min -15, max 3; any value >0 actually + # selects precise output mode +#client_encoding = sql_ascii # actually, defaults to database + # encoding + +# These settings are initialized by initdb, but they can be changed. +lc_messages = 'C' # locale for system error message + # strings +lc_monetary = 'C' # locale for monetary formatting +lc_numeric = 'C' # locale for number formatting +lc_time = 'C' # locale for time formatting + +# default configuration for text search +default_text_search_config = 'pg_catalog.english' + +# - Shared Library Preloading - + +#local_preload_libraries = '' +#session_preload_libraries = '' +shared_preload_libraries = 'pg_stat_statements, pgaudit, plpgsql, plpgsql_check, pg_cron, pg_net, pgsodium, timescaledb, auto_explain, pg_tle, plan_filter, pg_backtrace' # (change requires restart) +jit_provider = 'llvmjit' # JIT library to use + + +# - Other Defaults - + +#dynamic_library_path = '$libdir' +#gin_fuzzy_search_limit = 0 + + +#------------------------------------------------------------------------------ +# LOCK MANAGEMENT +#------------------------------------------------------------------------------ + +#deadlock_timeout = 1s +#max_locks_per_transaction = 64 # min 10 + # (change requires restart) +#max_pred_locks_per_transaction = 64 # min 10 + # (change requires restart) +#max_pred_locks_per_relation = -2 # negative values mean + # (max_pred_locks_per_transaction + # / -max_pred_locks_per_relation) - 1 +#max_pred_locks_per_page = 2 # min 0 + + +#------------------------------------------------------------------------------ +# VERSION AND PLATFORM COMPATIBILITY +#------------------------------------------------------------------------------ + +# - Previous PostgreSQL Versions - + +#array_nulls = on +#backslash_quote = safe_encoding # on, off, or safe_encoding +#escape_string_warning = on +#lo_compat_privileges = off +#quote_all_identifiers = off +#standard_conforming_strings = on +#synchronize_seqscans = on + +# - Other Platforms and Clients - + +#transform_null_equals = off + + +#------------------------------------------------------------------------------ +# ERROR HANDLING +#------------------------------------------------------------------------------ + +#exit_on_error = off # terminate session on any error? +#restart_after_crash = on # reinitialize after backend crash? +#data_sync_retry = off # retry or panic on failure to fsync + # data? + # (change requires restart) +#recovery_init_sync_method = fsync # fsync, syncfs (Linux 5.8+) + + +#------------------------------------------------------------------------------ +# CONFIG FILE INCLUDES +#------------------------------------------------------------------------------ + +# These options allow settings to be loaded from files other than the +# default postgresql.conf. Note that these are directives, not variable +# assignments, so they can usefully be given more than once. + +#include_dir = '...' # include files ending in '.conf' from + # a directory, e.g., 'conf.d' +#include_if_exists = '...' # include file only if it exists +#include = '...' # include file + + +#------------------------------------------------------------------------------ +# CUSTOMIZED OPTIONS +#------------------------------------------------------------------------------ + +# Add settings for extensions here + +pgsodium.getkey_script = '@PGSODIUM_GETKEY_SCRIPT@' + +auto_explain.log_min_duration = 10s +cron.database_name = 'postgres' diff --git a/nix/tests/prime.sql b/nix/tests/prime.sql new file mode 100644 index 0000000..dbcd69c --- /dev/null +++ b/nix/tests/prime.sql @@ -0,0 +1,98 @@ +create role postgres; +create extension address_standardizer; +create extension address_standardizer_data_us; +create extension adminpack; +create extension amcheck; +create extension autoinc; +create extension bloom; +create extension btree_gin; +create extension btree_gist; +create extension citext; +create extension cube; +create extension dblink; +create extension dict_int; +create extension dict_xsyn; +create extension earthdistance; +create extension file_fdw; +create extension fuzzystrmatch; +create extension http; +create extension hstore; +create extension hypopg; +create extension index_advisor; +create extension insert_username; +create extension intagg; +create extension intarray; +create extension isn; +create extension lo; +create extension ltree; +create extension moddatetime; +create extension old_snapshot; +create extension pageinspect; +create extension pg_buffercache; + +/* +TODO: Does not enable locally mode +requires a change to postgresql.conf to set +cron.database_name = 'testing' +*/ +-- create extension pg_cron; + +create extension pg_net; +create extension pg_graphql; +create extension pg_freespacemap; +create extension pg_hashids; +create extension pg_prewarm; +create extension pgmq; +create extension pg_jsonschema; +create extension pg_repack; +create extension pg_stat_monitor; +create extension pg_stat_statements; +create extension pg_surgery; +create extension pg_tle; +create extension pg_trgm; +create extension pg_visibility; +create extension pg_walinspect; +create extension pgaudit; +create extension pgcrypto; +create extension pgtap; +create extension pgjwt; +create extension pgroonga; +create extension pgroonga_database; +create extension pgsodium; +create extension pgrowlocks; +create extension pgstattuple; +create extension plpgsql_check; + +create extension plv8; +create extension plcoffee; +create extension plls; + +create extension postgis; +create extension postgis_raster; +create extension postgis_sfcgal; +create extension postgis_tiger_geocoder; +create extension postgis_topology; +create extension pgrouting; -- requires postgis +create extension postgres_fdw; +create extension rum; +create extension refint; +create extension seg; +create extension sslinfo; +create extension tealbase_vault; +create extension tablefunc; +create extension tcn; +create extension timescaledb; +create extension tsm_system_rows; +create extension tsm_system_time; +create extension unaccent; +create extension "uuid-ossp"; +create extension vector; +create extension wrappers; +create extension xml2; + + + + + + +CREATE EXTENSION IF NOT EXISTS pg_backtrace; diff --git a/nix/tests/smoke/0000-hello-world.sql b/nix/tests/smoke/0000-hello-world.sql new file mode 100644 index 0000000..d6f002d --- /dev/null +++ b/nix/tests/smoke/0000-hello-world.sql @@ -0,0 +1,10 @@ +-- Start transaction and plan the tests. +BEGIN; +SELECT plan(1); + +-- Run the tests. +SELECT pass( 'My test passed, w00t!' ); + +-- Finish the tests and clean up. +SELECT * FROM finish(); +ROLLBACK; diff --git a/nix/tests/smoke/0001-pg_graphql.sql b/nix/tests/smoke/0001-pg_graphql.sql new file mode 100644 index 0000000..80e3cb2 --- /dev/null +++ b/nix/tests/smoke/0001-pg_graphql.sql @@ -0,0 +1,59 @@ +-- Start transaction and plan the tests. +begin; + select plan(1); + + create extension if not exists pg_graphql; + + create table account( + id int primary key, + is_verified bool, + name text, + phone text + ); + + insert into public.account(id, is_verified, name, phone) + values + (1, true, 'foo', '1111111111'), + (2, true, 'bar', null), + (3, false, 'baz', '33333333333'); + + select is( + graphql.resolve($$ + { + accountCollection { + edges { + node { + id + } + } + } + } + $$), + '{ + "data": { + "accountCollection": { + "edges": [ + { + "node": { + "id": 1 + } + }, + { + "node": { + "id": 2 + } + }, + { + "node": { + "id": 3 + } + } + ] + } + } + }'::jsonb + ); + + + select * from finish(); +rollback; diff --git a/nix/tests/smoke/0002-supautils.sql b/nix/tests/smoke/0002-supautils.sql new file mode 100644 index 0000000..7a21606 --- /dev/null +++ b/nix/tests/smoke/0002-supautils.sql @@ -0,0 +1,17 @@ +BEGIN; +SELECT plan(2); + +-- the setting doesn't exist when supautils is not loaded +SELECT throws_ok($$ + select current_setting('supautils.privileged_extensions', false) +$$); + +LOAD 'supautils'; + +-- now it does +SELECT ok( + current_setting('supautils.privileged_extensions', false) = '' +); + +SELECT * FROM finish(); +ROLLBACK; diff --git a/nix/tests/smoke/0003-pgsodium-vault.sql b/nix/tests/smoke/0003-pgsodium-vault.sql new file mode 100644 index 0000000..1c9cedf --- /dev/null +++ b/nix/tests/smoke/0003-pgsodium-vault.sql @@ -0,0 +1,40 @@ +BEGIN; + +select plan(3); + +select id as test_new_key_id from pgsodium.create_key(name:='test_new_key') \gset + +select vault.create_secret ( + 's3kr3t_k3y', 'a_name', 'this is the foo secret key') test_secret_id \gset + +select vault.create_secret ( + 's3kr3t_k3y_2', 'another_name', 'this is another foo key', + (select id from pgsodium.key where name = 'test_new_key')) test_secret_id_2 \gset + +SELECT results_eq( + $$ + SELECT decrypted_secret = 's3kr3t_k3y', description = 'this is the foo secret key' + FROM vault.decrypted_secrets WHERE name = 'a_name'; + $$, + $$VALUES (true, true)$$, + 'can select from masking view with custom key'); + +SELECT results_eq( + $$ + SELECT decrypted_secret = 's3kr3t_k3y_2', description = 'this is another foo key' + FROM vault.decrypted_secrets WHERE key_id = (select id from pgsodium.key where name = 'test_new_key'); + $$, + $$VALUES (true, true)$$, + 'can select from masking view'); + +SELECT lives_ok( + format($test$ + select vault.update_secret( + %L::uuid, new_name:='a_new_name', + new_secret:='new_s3kr3t_k3y', new_description:='this is the bar key') + $test$, :'test_secret_id'), + 'can update name, secret and description' + ); + +SELECT * FROM finish(); +ROLLBACK; diff --git a/nix/tests/smoke/0004-index_advisor.sql b/nix/tests/smoke/0004-index_advisor.sql new file mode 100644 index 0000000..53170f6 --- /dev/null +++ b/nix/tests/smoke/0004-index_advisor.sql @@ -0,0 +1,19 @@ +-- Start transaction and plan the tests. +begin; + select plan(1); + + create extension if not exists index_advisor; + + create table account( + id int primary key, + is_verified bool + ); + + select is( + (select count(1) from index_advisor('select id from public.account where is_verified;'))::int, + 1, + 'index_advisor returns 1 row' + ); + + select * from finish(); +rollback; diff --git a/nix/tests/smoke/0005-test_pgroonga_mecab.sql b/nix/tests/smoke/0005-test_pgroonga_mecab.sql new file mode 100644 index 0000000..7341d5f --- /dev/null +++ b/nix/tests/smoke/0005-test_pgroonga_mecab.sql @@ -0,0 +1,36 @@ +-- File: 0005-test_pgroonga_revised.sql + +begin; + -- Plan for 3 tests: extension, table, and index + select plan(3); + + -- Create the PGroonga extension + create extension if not exists pgroonga; + + -- -- Test 1: Check if PGroonga extension exists + select has_extension('pgroonga', 'The pgroonga extension should exist.'); + + -- Create the table + create table notes( + id integer primary key, + content text + ); + + -- Test 2: Check if the table was created + SELECT has_table('public', 'notes', 'The notes table should exist.'); + -- Create the PGroonga index + CREATE INDEX pgroonga_content_index + ON notes + USING pgroonga (content) + WITH (tokenizer='TokenMecab'); + + -- -- Test 3: Check if the index was created + SELECT has_index('public', 'notes', 'pgroonga_content_index', 'The pgroonga_content_index should exist.'); + + -- -- Cleanup (this won't affect the test results as they've already been checked) + DROP INDEX IF EXISTS pgroonga_content_index; + DROP TABLE IF EXISTS notes; + + -- Finish the test plan + select * from finish(); +rollback; \ No newline at end of file diff --git a/nix/tests/sql/extensions_sql_interface.sql b/nix/tests/sql/extensions_sql_interface.sql new file mode 100644 index 0000000..ce75802 --- /dev/null +++ b/nix/tests/sql/extensions_sql_interface.sql @@ -0,0 +1,101 @@ +/* + +The purpose of this test is to monitor the SQL interface exposed +by Postgres extensions so we have to manually review/approve any difference +that emerge as versions change. + +*/ + + +/* + +List all extensions that are not enabled +If a new entry shows up in this list, that means a new extension has been +added and you should `create extension ...` to enable it in ./nix/tests/prime + +*/ + +select + name +from + pg_available_extensions +where + installed_version is null +order by + name asc; + + +/* + +Monitor relocatability and config of each extension +- lesson learned from pg_cron + +*/ + +select + extname as extension_name, + extrelocatable as is_relocatable +from + pg_extension +order by + extname asc; + + +/* + +Monitor extension public function interface + +*/ + +select + e.extname as extension_name, + n.nspname as schema_name, + p.proname as function_name, + pg_catalog.pg_get_function_identity_arguments(p.oid) as argument_types, + pg_catalog.pg_get_function_result(p.oid) as return_type +from + pg_catalog.pg_proc p + join pg_catalog.pg_namespace n + on n.oid = p.pronamespace + join pg_catalog.pg_depend d + on d.objid = p.oid + join pg_catalog.pg_extension e + on e.oid = d.refobjid +where + d.deptype = 'e' +order by + e.extname, + n.nspname, + p.proname, + pg_catalog.pg_get_function_identity_arguments(p.oid); + +/* + +Monitor extension public table/view/matview/index interface + +*/ + +select + e.extname as extension_name, + n.nspname as schema_name, + pc.relname as entity_name, + pa.attname +from + pg_catalog.pg_class pc + join pg_catalog.pg_namespace n + on n.oid = pc.relnamespace + join pg_catalog.pg_depend d + on d.objid = pc.oid + join pg_catalog.pg_extension e + on e.oid = d.refobjid + left join pg_catalog.pg_attribute pa + on pa.attrelid = pc.oid + and pa.attnum > 0 + and not pa.attisdropped +where + d.deptype = 'e' + and pc.relkind in ('r', 'v', 'm', 'i') +order by + e.extname, + pc.relname, + pa.attname; diff --git a/nix/tests/sql/hypopg.sql b/nix/tests/sql/hypopg.sql new file mode 100644 index 0000000..6aabb69 --- /dev/null +++ b/nix/tests/sql/hypopg.sql @@ -0,0 +1,13 @@ +create schema v; + +create table v.samp( + id int +); + +select 1 from hypopg_create_index($$ + create index on v.samp(id) +$$); + +drop schema v cascade; + + diff --git a/nix/tests/sql/index_advisor.sql b/nix/tests/sql/index_advisor.sql new file mode 100644 index 0000000..3911d6e --- /dev/null +++ b/nix/tests/sql/index_advisor.sql @@ -0,0 +1,13 @@ +create schema v; + +create table v.book( + id int primary key, + title text not null +); + +select + index_statements, errors +from + index_advisor('select id from v.book where title = $1'); + +drop schema v cascade; diff --git a/nix/tests/sql/pg-safeupdate.sql b/nix/tests/sql/pg-safeupdate.sql new file mode 100644 index 0000000..790ec79 --- /dev/null +++ b/nix/tests/sql/pg-safeupdate.sql @@ -0,0 +1,15 @@ +load 'safeupdate'; + +set safeupdate.enabled=1; + +create schema v; + +create table v.foo( + id int, + val text +); + +update v.foo + set val = 'bar'; + +drop schema v cascade; diff --git a/nix/tests/sql/pg_graphql.sql b/nix/tests/sql/pg_graphql.sql new file mode 100644 index 0000000..03f844d --- /dev/null +++ b/nix/tests/sql/pg_graphql.sql @@ -0,0 +1,219 @@ +begin; + comment on schema public is '@graphql({"inflect_names": true})'; + + create table account( + id serial primary key, + email varchar(255) not null, + priority int, + status text default 'active' + ); + + create table blog( + id serial primary key, + owner_id integer not null references account(id) + ); + comment on table blog is e'@graphql({"totalCount": {"enabled": true}})'; + + -- Make sure functions still work + create function _echo_email(account) + returns text + language sql + as $$ select $1.email $$; + + /* + Literals + */ + + select graphql.resolve($$ + mutation { + insertIntoAccountCollection(objects: [ + { email: "foo@barsley.com", priority: 1 }, + { email: "bar@foosworth.com" } + ]) { + affectedCount + records { + id + status + echoEmail + blogCollection { + totalCount + } + } + } + } + $$); + + select graphql.resolve($$ + mutation { + insertIntoBlogCollection(objects: [{ + ownerId: 1 + }]) { + records { + id + owner { + id + } + } + } + } + $$); + + + -- Override a default on status with null + select graphql.resolve($$ + mutation { + insertIntoAccountCollection(objects: [ + { email: "baz@baz.com", status: null }, + ]) { + affectedCount + records { + email + status + } + } + } + $$); + + + /* + Variables + */ + + select graphql.resolve($$ + mutation newAccount($emailAddress: String) { + xyz: insertIntoAccountCollection(objects: [ + { email: $emailAddress }, + { email: "other@email.com" } + ]) { + affectedCount + records { + id + email + } + } + } + $$, + variables := '{"emailAddress": "foo@bar.com"}'::jsonb + ); + + + -- Variable override of default with null results in null + select graphql.resolve($$ + mutation newAccount($status: String) { + xyz: insertIntoAccountCollection(objects: [ + { email: "1@email.com", status: $status} + ]) { + affectedCount + records { + email + status + } + } + } + $$, + variables := '{"status": null}'::jsonb + ); + + -- Skipping variable override of default results in default + select graphql.resolve($$ + mutation newAccount($status: String) { + xyz: insertIntoAccountCollection(objects: [ + { email: "x@y.com", status: $status}, + ]) { + affectedCount + records { + email + status + } + } + } + $$, + variables := '{}'::jsonb + ); + + + select graphql.resolve($$ + mutation newAccount($acc: AccountInsertInput!) { + insertIntoAccountCollection(objects: [$acc]) { + affectedCount + records { + id + email + } + } + } + $$, + variables := '{"acc": {"email": "bar@foo.com"}}'::jsonb + ); + + select graphql.resolve($$ + mutation newAccounts($acc: [AccountInsertInput!]!) { + insertIntoAccountCollection(objects: $accs) { + affectedCount + records { + id + email + } + } + } + $$, + variables := '{"accs": [{"email": "bar@foo.com"}]}'::jsonb + ); + + -- Single object coerces to a list + select graphql.resolve($$ + mutation { + insertIntoBlogCollection(objects: {ownerId: 1}) { + affectedCount + } + } + $$); + + + /* + Errors + */ + + -- Field does not exist + select graphql.resolve($$ + mutation createAccount($acc: AccountInsertInput) { + insertIntoAccountCollection(objects: [$acc]) { + affectedCount + records { + id + email + } + } + } + $$, + variables := '{"acc": {"doesNotExist": "other"}}'::jsonb + ); + + -- Wrong input type (list of string, not list of object) + select graphql.resolve($$ + mutation { + insertIntoBlogCollection(objects: ["not an object"]) { + affectedCount + } + } + $$); + + -- objects argument is missing + select graphql.resolve($$ + mutation { + insertIntoBlogCollection { + affectedCount + } + } + $$); + + -- Empty call + select graphql.resolve($$ + mutation { + insertIntoBlogCollection(objects: []) { + affectedCount + } + } + $$); + +rollback; diff --git a/nix/tests/sql/pg_hashids.sql b/nix/tests/sql/pg_hashids.sql new file mode 100644 index 0000000..1b82eee --- /dev/null +++ b/nix/tests/sql/pg_hashids.sql @@ -0,0 +1,6 @@ +select id_encode(1001); -- Result: jNl +select id_encode(1234567, 'This is my salt'); -- Result: Pdzxp +select id_encode(1234567, 'This is my salt', 10); -- Result: PlRPdzxpR7 +select id_encode(1234567, 'This is my salt', 10, 'abcdefghijABCDxFGHIJ1234567890'); -- Result: 3GJ956J9B9 +select id_decode('PlRPdzxpR7', 'This is my salt', 10); -- Result: 1234567 +select id_decode('3GJ956J9B9', 'This is my salt', 10, 'abcdefghijABCDxFGHIJ1234567890'); -- Result: 1234567 diff --git a/nix/tests/sql/pg_jsonschema.sql b/nix/tests/sql/pg_jsonschema.sql new file mode 100644 index 0000000..f5d7c8c --- /dev/null +++ b/nix/tests/sql/pg_jsonschema.sql @@ -0,0 +1,68 @@ +begin; + +-- Test json_matches_schema +create table customer( + id serial primary key, + metadata json, + + check ( + json_matches_schema( + '{ + "type": "object", + "properties": { + "tags": { + "type": "array", + "items": { + "type": "string", + "maxLength": 16 + } + } + } + }', + metadata + ) + ) +); + +insert into customer(metadata) +values ('{"tags": ["vip", "darkmode-ui"]}'); + +-- Test jsonb_matches_schema +select + jsonb_matches_schema( + '{ + "type": "object", + "properties": { + "tags": { + "type": "array", + "items": { + "type": "string", + "maxLength": 16 + } + } + } + }', + '{"tags": ["vip", "darkmode-ui"]}'::jsonb +); + +-- Test jsonschema_is_valid +select + jsonschema_is_valid( + '{ + "type": "object", + "properties": { + "tags": { + "type": "array", + "items": { + "type": "string", + "maxLength": 16 + } + } + } + }'); + +-- Test invalid payload +insert into customer(metadata) +values ('{"tags": [1, 3]}'); + +rollback; diff --git a/nix/tests/sql/pg_net.sql b/nix/tests/sql/pg_net.sql new file mode 100644 index 0000000..bf44db5 --- /dev/null +++ b/nix/tests/sql/pg_net.sql @@ -0,0 +1,7 @@ +-- This is a very basic test because you can't get the value returned +-- by a pg_net request in the same transaction that created it; + +select + net.http_get ( + 'https://postman-echo.com/get?foo1=bar1&foo2=bar2' + ) as request_id; diff --git a/nix/tests/sql/pg_plan_filter.sql b/nix/tests/sql/pg_plan_filter.sql new file mode 100644 index 0000000..b49834d --- /dev/null +++ b/nix/tests/sql/pg_plan_filter.sql @@ -0,0 +1,22 @@ +begin; + load 'plan_filter'; + + create schema v; + + -- create a sample table + create table v.test_table ( + id serial primary key, + data text + ); + + -- insert some test data + insert into v.test_table (data) + values ('sample1'), ('sample2'), ('sample3'); + + set local plan_filter.statement_cost_limit = 0.001; + + select * from v.test_table; + +rollback; + + diff --git a/nix/tests/sql/pg_stat_monitor.sql b/nix/tests/sql/pg_stat_monitor.sql new file mode 100644 index 0000000..69d996b --- /dev/null +++ b/nix/tests/sql/pg_stat_monitor.sql @@ -0,0 +1,6 @@ +select + * +from + pg_stat_monitor +where + false; diff --git a/nix/tests/sql/pg_tle.sql b/nix/tests/sql/pg_tle.sql new file mode 100644 index 0000000..3af1280 --- /dev/null +++ b/nix/tests/sql/pg_tle.sql @@ -0,0 +1,70 @@ +select + pgtle.install_extension( + 'pg_distance', + '0.1', + 'Distance functions for two points', + $_pg_tle_$ + CREATE FUNCTION dist(x1 float8, y1 float8, x2 float8, y2 float8, norm int) + RETURNS float8 + AS $$ + SELECT (abs(x2 - x1) ^ norm + abs(y2 - y1) ^ norm) ^ (1::float8 / norm); + $$ LANGUAGE SQL; + + CREATE FUNCTION manhattan_dist(x1 float8, y1 float8, x2 float8, y2 float8) + RETURNS float8 + AS $$ + SELECT dist(x1, y1, x2, y2, 1); + $$ LANGUAGE SQL; + + CREATE FUNCTION euclidean_dist(x1 float8, y1 float8, x2 float8, y2 float8) + RETURNS float8 + AS $$ + SELECT dist(x1, y1, x2, y2, 2); + $$ LANGUAGE SQL; + $_pg_tle_$ + ); + +create extension pg_distance; + +select manhattan_dist(1, 1, 5, 5); +select euclidean_dist(1, 1, 5, 5); + +SELECT pgtle.install_update_path( + 'pg_distance', + '0.1', + '0.2', + $_pg_tle_$ + CREATE OR REPLACE FUNCTION dist(x1 float8, y1 float8, x2 float8, y2 float8, norm int) + RETURNS float8 + AS $$ + SELECT (abs(x2 - x1) ^ norm + abs(y2 - y1) ^ norm) ^ (1::float8 / norm); + $$ LANGUAGE SQL IMMUTABLE PARALLEL SAFE; + + CREATE OR REPLACE FUNCTION manhattan_dist(x1 float8, y1 float8, x2 float8, y2 float8) + RETURNS float8 + AS $$ + SELECT dist(x1, y1, x2, y2, 1); + $$ LANGUAGE SQL IMMUTABLE PARALLEL SAFE; + + CREATE OR REPLACE FUNCTION euclidean_dist(x1 float8, y1 float8, x2 float8, y2 float8) + RETURNS float8 + AS $$ + SELECT dist(x1, y1, x2, y2, 2); + $$ LANGUAGE SQL IMMUTABLE PARALLEL SAFE; + $_pg_tle_$ + ); + + +select + pgtle.set_default_version('pg_distance', '0.2'); + +alter extension pg_distance update; + +drop extension pg_distance; + +select + pgtle.uninstall_extension('pg_distance'); + +-- Restore original state if any of the above fails +drop extension pg_tle cascade; +create extension pg_tle; diff --git a/nix/tests/sql/pgaudit.sql b/nix/tests/sql/pgaudit.sql new file mode 100644 index 0000000..c071c6e --- /dev/null +++ b/nix/tests/sql/pgaudit.sql @@ -0,0 +1,23 @@ +-- Note: there is no test that the logs were correctly output. Only checking for exceptions +set pgaudit.log = 'write, ddl'; +set pgaudit.log_relation = on; +set pgaudit.log_level = notice; + +create schema v; + +create table v.account( + id int, + name text, + password text, + description text +); + +insert into v.account (id, name, password, description) +values (1, 'user1', 'HASH1', 'blah, blah'); + +select + * +from + v.account; + +drop schema v cascade; diff --git a/nix/tests/sql/pgjwt.sql b/nix/tests/sql/pgjwt.sql new file mode 100644 index 0000000..24179e7 --- /dev/null +++ b/nix/tests/sql/pgjwt.sql @@ -0,0 +1,13 @@ +select + sign( + payload := '{"sub":"1234567890","name":"John Doe","iat":1516239022}', + secret := 'secret', + algorithm := 'HS256' + ); + +select + verify( + token := 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJuYW1lIjoiRm9vIn0.Q8hKjuadCEhnCPuqIj9bfLhTh_9QSxshTRsA5Aq4IuM', + secret := 'secret', + algorithm := 'HS256' + ); diff --git a/nix/tests/sql/pgmq.sql b/nix/tests/sql/pgmq.sql new file mode 100644 index 0000000..cd47cc3 --- /dev/null +++ b/nix/tests/sql/pgmq.sql @@ -0,0 +1,90 @@ +-- Test the standard flow +select + pgmq.create('Foo'); + +select + * +from + pgmq.send( + queue_name:='Foo', + msg:='{"foo": "bar1"}' + ); + +-- Test queue is not case sensitive +select + * +from + pgmq.send( + queue_name:='foo', -- note: lowercase useage + msg:='{"foo": "bar2"}', + delay:=5 + ); + +select + msg_id, + read_ct, + message +from + pgmq.read( + queue_name:='Foo', + vt:=30, + qty:=2 + ); + +select + msg_id, + read_ct, + message +from + pgmq.pop('Foo'); + + +-- Archive message with msg_id=2. +select + pgmq.archive( + queue_name:='Foo', + msg_id:=2 + ); + + +select + pgmq.create('my_queue'); + +select + pgmq.send_batch( + queue_name:='my_queue', + msgs:=array['{"foo": "bar3"}','{"foo": "bar4"}','{"foo": "bar5"}']::jsonb[] +); + +select + pgmq.archive( + queue_name:='my_queue', + msg_ids:=array[3, 4, 5] + ); + +select + pgmq.delete('my_queue', 6); + + +select + pgmq.drop_queue('my_queue'); + +/* +-- Disabled until pg_partman goes back into the image +select + pgmq.create_partitioned( + 'my_partitioned_queue', + '5 seconds', + '10 seconds' +); +*/ + + +-- Make sure SQLI enabling characters are blocked +select pgmq.create('F--oo'); +select pgmq.create('F$oo'); +select pgmq.create($$F'oo$$); + + + + diff --git a/nix/tests/sql/pgroonga.sql b/nix/tests/sql/pgroonga.sql new file mode 100644 index 0000000..503f266 --- /dev/null +++ b/nix/tests/sql/pgroonga.sql @@ -0,0 +1,48 @@ +create schema v; + +create table v.roon( + id serial primary key, + content text +); + + +with tokenizers as ( + select + x + from + jsonb_array_elements( + (select pgroonga_command('tokenizer_list'))::jsonb + ) x(val) + limit + 1 + offset + 1 -- first record is unrelated and not stable +) +select + t.x::jsonb ->> 'name' +from + jsonb_array_elements((select * from tokenizers)) t(x) +order by + t.x::jsonb ->> 'name'; + + +insert into v.roon (content) +values + ('Hello World'), + ('PostgreSQL with PGroonga is a thing'), + ('This is a full-text search test'), + ('PGroonga supports various languages'); + +-- Create default index +create index pgroonga_index on v.roon using pgroonga (content); + +-- Create mecab tokenizer index since we had a bug with this one once +create index pgroonga_index_mecab on v.roon using pgroonga (content) with (tokenizer='TokenMecab'); + +-- Run some queries to test the index +select * from v.roon where content &@~ 'Hello'; +select * from v.roon where content &@~ 'powerful'; +select * from v.roon where content &@~ 'supports'; + + +drop schema v cascade; diff --git a/nix/tests/sql/pgrouting.sql b/nix/tests/sql/pgrouting.sql new file mode 100644 index 0000000..e3af562 --- /dev/null +++ b/nix/tests/sql/pgrouting.sql @@ -0,0 +1,27 @@ +create schema v; + +-- create the roads table +create table v.roads ( + id serial primary key, + source integer, + target integer, + cost double precision +); + +-- insert sample data into roads table +insert into v.roads (source, target, cost) values +(1, 2, 1.0), +(2, 3, 1.0), +(3, 4, 1.0), +(1, 3, 2.5), +(3, 5, 2.0); + +-- create a function to use pgRouting to find the shortest path +select * from pgr_dijkstra( + 'select id, source, target, cost from v.roads', + 1, -- start node + 4 -- end node +); + +drop schema v cascade; + diff --git a/nix/tests/sql/pgsodium.sql b/nix/tests/sql/pgsodium.sql new file mode 100644 index 0000000..cd3c382 --- /dev/null +++ b/nix/tests/sql/pgsodium.sql @@ -0,0 +1,4 @@ +select + status +from + pgsodium.create_key(); diff --git a/nix/tests/sql/pgtap.sql b/nix/tests/sql/pgtap.sql new file mode 100644 index 0000000..b99976a --- /dev/null +++ b/nix/tests/sql/pgtap.sql @@ -0,0 +1,11 @@ +begin; + +select plan(1); + +-- Run the tests. +select pass( 'My test passed, w00t!' ); + +-- Finish the tests and clean up. +select * from finish(); + +rollback; diff --git a/nix/tests/sql/pgvector.sql b/nix/tests/sql/pgvector.sql new file mode 100644 index 0000000..f2de305 --- /dev/null +++ b/nix/tests/sql/pgvector.sql @@ -0,0 +1,72 @@ +create schema v; + +create table v.items( + id serial primary key, + embedding vector(3), + half_embedding halfvec(3), + bit_embedding bit(3), + sparse_embedding sparsevec(3) +); + +-- vector ops +create index on v.items using hnsw (embedding vector_l2_ops); +create index on v.items using hnsw (embedding vector_cosine_ops); +create index on v.items using hnsw (embedding vector_l1_ops); +create index on v.items using ivfflat (embedding vector_l2_ops); +create index on v.items using ivfflat (embedding vector_cosine_ops); + +-- halfvec ops +create index on v.items using hnsw (half_embedding halfvec_l2_ops); +create index on v.items using hnsw (half_embedding halfvec_cosine_ops); +create index on v.items using hnsw (half_embedding halfvec_l1_ops); +create index on v.items using ivfflat (half_embedding halfvec_l2_ops); +create index on v.items using ivfflat (half_embedding halfvec_cosine_ops); + +-- sparsevec +create index on v.items using hnsw (sparse_embedding sparsevec_l2_ops); +create index on v.items using hnsw (sparse_embedding sparsevec_cosine_ops); +create index on v.items using hnsw (sparse_embedding sparsevec_l1_ops); + +-- bit ops +create index on v.items using hnsw (bit_embedding bit_hamming_ops); +create index on v.items using ivfflat (bit_embedding bit_hamming_ops); + +-- Populate some records +insert into v.items( + embedding, + half_embedding, + bit_embedding, + sparse_embedding +) +values + ('[1,2,3]', '[1,2,3]', '101', '{1:4}/3'), + ('[2,3,4]', '[2,3,4]', '010', '{1:7,3:0}/3'); + +-- Test op types +select + * +from + v.items +order by + embedding <-> '[2,3,5]', + embedding <=> '[2,3,5]', + embedding <+> '[2,3,5]', + embedding <#> '[2,3,5]', + half_embedding <-> '[2,3,5]', + half_embedding <=> '[2,3,5]', + half_embedding <+> '[2,3,5]', + half_embedding <#> '[2,3,5]', + sparse_embedding <-> '{2:4,3:1}/3', + sparse_embedding <=> '{2:4,3:1}/3', + sparse_embedding <+> '{2:4,3:1}/3', + sparse_embedding <#> '{2:4,3:1}/3', + bit_embedding <~> '011'; + +select + avg(embedding), + avg(half_embedding) +from + v.items; + +-- Cleanup +drop schema v cascade; diff --git a/nix/tests/sql/plpgsql-check.sql b/nix/tests/sql/plpgsql-check.sql new file mode 100644 index 0000000..d54d2c4 --- /dev/null +++ b/nix/tests/sql/plpgsql-check.sql @@ -0,0 +1,26 @@ +create schema v; + +create table v.t1( + a int, + b int +); + +create or replace function v.f1() + returns void + language plpgsql +as $$ +declare r record; +begin + for r in select * from v.t1 + loop + raise notice '%', r.c; -- there is bug - table t1 missing "c" column + end loop; +end; +$$; + +select * from v.f1(); + +-- use plpgsql_check_function to check the function for errors +select * from plpgsql_check_function('v.f1()'); + +drop schema v cascade; diff --git a/nix/tests/sql/plv8.sql b/nix/tests/sql/plv8.sql new file mode 100644 index 0000000..f58360f --- /dev/null +++ b/nix/tests/sql/plv8.sql @@ -0,0 +1,14 @@ +create schema v; + +-- create a function to perform some JavaScript operations +create function v.multiply_numbers(a integer, b integer) + returns integer + language plv8 +as $$ + return a * b; +$$; + +select + v.multiply_numbers(3, 4); + +drop schema v cascade; diff --git a/nix/tests/sql/postgis.sql b/nix/tests/sql/postgis.sql new file mode 100644 index 0000000..766844b --- /dev/null +++ b/nix/tests/sql/postgis.sql @@ -0,0 +1,52 @@ +create schema v; + +-- create a table to store geographic points +create table v.places ( + id serial primary key, + name text, + geom geometry(point, 4326) -- using WGS 84 coordinate system +); + +-- insert some sample geographic points into the places table +insert into v.places (name, geom) +values + ('place_a', st_setsrid(st_makepoint(-73.9857, 40.7484), 4326)), -- latitude and longitude for a location + ('place_b', st_setsrid(st_makepoint(-74.0060, 40.7128), 4326)), -- another location + ('place_c', st_setsrid(st_makepoint(-73.9687, 40.7851), 4326)); -- yet another location + +-- calculate the distance between two points (in meters) +select + a.name as place_a, + b.name as place_b, + st_distance(a.geom::geography, b.geom::geography) as distance_meters +from + v.places a, + v.places b +where + a.name = 'place_a' + and b.name = 'place_b'; + +-- find all places within a 5km radius of 'place_a' +select + name, + st_distance( + geom::geography, + ( + select + geom + from + v.places + where + name = 'place_a' + )::geography) as distance_meters +from + v.places +where + st_dwithin( + geom::geography, + (select geom from v.places where name = 'place_a')::geography, + 5000 + ) + and name != 'place_a'; + +drop schema v cascade; diff --git a/nix/tests/sql/rum.sql b/nix/tests/sql/rum.sql new file mode 100644 index 0000000..4686c12 --- /dev/null +++ b/nix/tests/sql/rum.sql @@ -0,0 +1,37 @@ +create schema v; + +create table v.test_rum( + t text, + a tsvector +); + +create trigger tsvectorupdate + before update or insert on v.test_rum + for each row + execute procedure + tsvector_update_trigger( + 'a', + 'pg_catalog.english', + 't' + ); + +insert into v.test_rum(t) +values + ('the situation is most beautiful'), + ('it is a beautiful'), + ('it looks like a beautiful place'); + +create index rumidx on v.test_rum using rum (a rum_tsvector_ops); + +select + t, + a <=> to_tsquery('english', 'beautiful | place') as rank +from + v.test_rum +where + a @@ to_tsquery('english', 'beautiful | place') +order by + a <=> to_tsquery('english', 'beautiful | place'); + + +drop schema v cascade; diff --git a/nix/tests/sql/timescale.sql b/nix/tests/sql/timescale.sql new file mode 100644 index 0000000..baa96f4 --- /dev/null +++ b/nix/tests/sql/timescale.sql @@ -0,0 +1,33 @@ +-- Confirm we're running the apache version +show timescaledb.license; + +-- Create schema v +create schema v; + +-- Create a table in the v schema +create table v.sensor_data ( + time timestamptz not null, + sensor_id int not null, + temperature double precision not null, + humidity double precision not null +); + +-- Convert the table to a hypertable +select create_hypertable('v.sensor_data', 'time'); + +-- Insert some data into the hypertable +insert into v.sensor_data (time, sensor_id, temperature, humidity) +values + ('2024-08-09', 1, 22.5, 60.2), + ('2024-08-08', 1, 23.0, 59.1), + ('2024-08-07', 2, 21.7, 63.3); + +-- Select data from the hypertable +select + * +from + v.sensor_data; + +-- Drop schema v and all its entities +drop schema v cascade; + diff --git a/nix/tests/sql/vault.sql b/nix/tests/sql/vault.sql new file mode 100644 index 0000000..bafcb4d --- /dev/null +++ b/nix/tests/sql/vault.sql @@ -0,0 +1,30 @@ +select + 1 +from + vault.create_secret('my_s3kre3t'); + +select + 1 +from + vault.create_secret( + 'another_s3kre3t', + 'unique_name', + 'This is the description' + ); + +insert into vault.secrets (secret) +values + ('s3kre3t_k3y'); + +select + name, + description +from + vault.decrypted_secrets +order by + created_at desc +limit + 3; + + + diff --git a/nix/tests/sql/wal2json.sql b/nix/tests/sql/wal2json.sql new file mode 100644 index 0000000..6ec4a6d --- /dev/null +++ b/nix/tests/sql/wal2json.sql @@ -0,0 +1,32 @@ +create schema v; + +create table v.foo( + id int primary key +); + +select + 1 +from + pg_create_logical_replication_slot('reg_test', 'wal2json', false); + +insert into v.foo(id) values (1); + +select + data +from + pg_logical_slot_get_changes( + 'reg_test', + null, + null, + 'include-pk', '1', + 'include-transaction', 'false', + 'include-timestamp', 'false', + 'include-type-oids', 'false', + 'format-version', '2', + 'actions', 'insert,update,delete' + ) x; + +select + pg_drop_replication_slot('reg_test'); + +drop schema v cascade; diff --git a/nix/tests/util/pgsodium_getkey.sh b/nix/tests/util/pgsodium_getkey.sh new file mode 100755 index 0000000..106e3bf --- /dev/null +++ b/nix/tests/util/pgsodium_getkey.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +set -euo pipefail + +KEY_FILE="${1:-/tmp/pgsodium.key}" + +if [[ ! -f "${KEY_FILE}" ]]; then + head -c 32 /dev/urandom | od -A n -t x1 | tr -d ' \n' > "${KEY_FILE}" +fi +cat $KEY_FILE \ No newline at end of file diff --git a/nix/tests/util/pgsodium_getkey_arb.sh b/nix/tests/util/pgsodium_getkey_arb.sh new file mode 100755 index 0000000..446dbba --- /dev/null +++ b/nix/tests/util/pgsodium_getkey_arb.sh @@ -0,0 +1 @@ +echo -n 8359dafbba5c05568799c1c24eb6c2fbff497654bc6aa5e9a791c666768875a1 \ No newline at end of file diff --git a/nix/tools/README.md b/nix/tools/README.md new file mode 100644 index 0000000..2606a57 --- /dev/null +++ b/nix/tools/README.md @@ -0,0 +1,2 @@ +This directory just contains tools, but you can't run them directly. For the +sake of robustness, you should use `nix run` on this repository to do so. diff --git a/nix/tools/migrate-tool.sh.in b/nix/tools/migrate-tool.sh.in new file mode 100644 index 0000000..277ee81 --- /dev/null +++ b/nix/tools/migrate-tool.sh.in @@ -0,0 +1,123 @@ +#!/usr/bin/env bash + +[ ! -z "$DEBUG" ] && set -x + +# first argument is the old version; a path 15 or 16 +if [[ $1 == /nix/store* ]]; then + if [ ! -L "$1/receipt.json" ] || [ ! -e "$1/receipt.json" ]; then + echo "ERROR: $1 does not look like a valid Postgres install" + exit 1 + fi + OLDVER="$1" +elif [ "$1" == "15" ]; then + PSQL15=@PSQL15_BINDIR@ + OLDVER="$PSQL15" +elif [ "$1" == "16" ]; then + PSQL16=@PSQL16_BINDIR@ + OLDVER="$PSQL16" +else + echo "Please provide a valid Postgres version (15 or 16), or a /nix/store path" + exit 1 +fi + +# second argument is the new version; 15 or 16 +if [[ $2 == /nix/store* ]]; then + if [ ! -L "$2/receipt.json" ] || [ ! -e "$2/receipt.json" ]; then + echo "ERROR: $1 does not look like a valid Postgres install" + exit 1 + fi + NEWVER="$2" +elif [ "$2" == "15" ]; then + PSQL15=@PSQL15_BINDIR@ + NEWVER="$PSQL15" +elif [ "$2" == "16" ]; then + PSQL16=@PSQL16_BINDIR@ + NEWVER="$PSQL16" + echo "NEWVER IS $NEWVER" +else + echo "Please provide a valid Postgres version (15 or 16), or a /nix/store path" + exit 1 +fi + +# thid argument is the upgrade method: either pg_dumpall or pg_ugprade +if [ "$3" != "pg_dumpall" ] && [ "$3" != "pg_upgrade" ]; then + echo "Please provide a valid upgrade method (pg_dumpall or pg_upgrade)" + exit 1 +fi +UPGRADE_METHOD="$3" + +echo "Old server build: PSQL $1" +echo "New server build: PSQL $2" +echo "Upgrade method: $UPGRADE_METHOD" + +PORTNO="${2:-@PGSQL_DEFAULT_PORT@}" +DATDIR=$(mktemp -d) +NEWDAT=$(mktemp -d) +mkdir -p "$DATDIR" "$NEWDAT" + +echo "NOTE: using temporary directory $DATDIR for PSQL $1 data, which will not be removed" +echo "NOTE: you are free to re-use this data directory at will" +echo + +$OLDVER/bin/initdb -D "$DATDIR" --locale=C --username=tealbase_admin +$NEWVER/bin/initdb -D "$NEWDAT" --locale=C --username=tealbase_admin + +# NOTE (aseipp): we need to patch postgresql.conf to have the right pgsodium_getkey script +PSQL_CONF_FILE=@PSQL_CONF_FILE@ +PGSODIUM_GETKEY_SCRIPT=@PGSODIUM_GETKEY@ +echo "NOTE: patching postgresql.conf files" +for x in "$DATDIR" "$NEWDAT"; do + sed \ + "s#@PGSODIUM_GETKEY_SCRIPT@#$PGSODIUM_GETKEY_SCRIPT#g" \ + $PSQL_CONF_FILE > "$x/postgresql.conf" +done + +echo "NOTE: Starting first server (v${1}) to load data into the system" +$OLDVER/bin/pg_ctl start -D "$DATDIR" + +PRIMING_SCRIPT=@PRIMING_SCRIPT@ +MIGRATION_DATA=@MIGRATION_DATA@ + +$OLDVER/bin/psql -h localhost -d postgres -Xf "$PRIMING_SCRIPT" +$OLDVER/bin/psql -h localhost -d postgres -Xf "$MIGRATION_DATA" + +if [ "$UPGRADE_METHOD" == "pg_upgrade" ]; then + echo "NOTE: Stopping old server (v${1}) to prepare for migration" + $OLDVER/bin/pg_ctl stop -D "$DATDIR" + + echo "NOTE: Migrating old data $DATDIR to $NEWDAT using pg_upgrade" + + export PGDATAOLD="$DATDIR" + export PGDATANEW="$NEWDAT" + export PGBINOLD="$OLDVER/bin" + export PGBINNEW="$NEWVER/bin" + + if ! $NEWVER/bin/pg_upgrade --check; then + echo "ERROR: pg_upgrade check failed" + exit 1 + fi + + echo "NOTE: pg_upgrade check passed, proceeding with migration" + $NEWVER/bin/pg_upgrade + rm -f delete_old_cluster.sh # we don't need this + exit 0 +fi + +if [ "$UPGRADE_METHOD" == "pg_dumpall" ]; then + SQLDAT="$DATDIR/dump.sql" + echo "NOTE: Exporting data via pg_dumpall ($SQLDAT)" + $NEWVER/bin/pg_dumpall -h localhost > "$SQLDAT" + + echo "NOTE: Stopping old server (v${1}) to prepare for migration" + $OLDVER/bin/pg_ctl stop -D "$DATDIR" + + echo "NOTE: Starting second server (v${2}) to load data into the system" + $NEWVER/bin/pg_ctl start -D "$NEWDAT" + + echo "NOTE: Loading data into new server (v${2}) via 'cat | psql'" + cat "$SQLDAT" | $NEWVER/bin/psql -h localhost -d postgres + + printf "\n\n\n\n" + echo "NOTE: Done, check logs. Stopping the server; new database is located at $NEWDAT" + $NEWVER/bin/pg_ctl stop -D "$NEWDAT" +fi diff --git a/nix/tools/postgresql_schema.sql b/nix/tools/postgresql_schema.sql new file mode 100644 index 0000000..76518a6 --- /dev/null +++ b/nix/tools/postgresql_schema.sql @@ -0,0 +1,11 @@ +ALTER DATABASE postgres SET "app.settings.jwt_secret" TO 'my_jwt_secret_which_is_not_so_secret'; +ALTER DATABASE postgres SET "app.settings.jwt_exp" TO 3600; +ALTER USER tealbase_admin WITH PASSWORD 'postgres'; +ALTER USER postgres WITH PASSWORD 'postgres'; +ALTER USER authenticator WITH PASSWORD 'postgres'; +ALTER USER pgbouncer WITH PASSWORD 'postgres'; +ALTER USER tealbase_auth_admin WITH PASSWORD 'postgres'; +ALTER USER tealbase_storage_admin WITH PASSWORD 'postgres'; +ALTER USER tealbase_replication_admin WITH PASSWORD 'postgres'; +ALTER ROLE tealbase_read_only_user WITH PASSWORD 'postgres'; +ALTER ROLE tealbase_admin SET search_path TO "$user",public,auth,extensions; diff --git a/nix/tools/run-client.sh.in b/nix/tools/run-client.sh.in new file mode 100644 index 0000000..f50e605 --- /dev/null +++ b/nix/tools/run-client.sh.in @@ -0,0 +1,54 @@ +#!/usr/bin/env bash +# shellcheck shell=bash + +[ ! -z "$DEBUG" ] && set -x + +# first argument should be '15' or '16' for the version +if [ "$1" == "15" ]; then + echo "Starting client for PSQL 15" + PSQL15=@PSQL15_BINDIR@ + BINDIR="$PSQL15" +elif [ "$1" == "16" ]; then + echo "Starting client for PSQL 16" + PSQL16=@PSQL16_BINDIR@ + BINDIR="$PSQL16" +elif [ "$1" == "orioledb-16" ]; then + echo "Starting client for PSQL ORIOLEDB 16" + PSQLORIOLEDB16=@PSQLORIOLEDB16_BINDIR@ + BINDIR="$PSQLORIOLEDB16" +else + echo "Please provide a valid Postgres version (15, 16, or orioledb-16)" + exit 1 +fi +#vars for migration.sh +export PATH=$BINDIR/bin:$PATH +export POSTGRES_DB=postgres +export POSTGRES_HOST=localhost +export POSTGRES_PORT=@PGSQL_DEFAULT_PORT@ +PORTNO="${2:-@PGSQL_DEFAULT_PORT@}" +PGSQL_SUPERUSER=@PGSQL_SUPERUSER@ +MIGRATIONS_DIR=@MIGRATIONS_DIR@ +POSTGRESQL_SCHEMA_SQL=@POSTGRESQL_SCHEMA_SQL@ +PGBOUNCER_AUTH_SCHEMA_SQL=@PGBOUNCER_AUTH_SCHEMA_SQL@ +STAT_EXTENSION_SQL=@STAT_EXTENSION_SQL@ +psql -v ON_ERROR_STOP=1 --no-password --no-psqlrc -U "$PGSQL_SUPERUSER" -p "$PORTNO" -h localhost -d postgres <<-EOSQL + create role postgres superuser login password '$PGPASSWORD'; + alter database postgres owner to postgres; +EOSQL +for sql in "$MIGRATIONS_DIR"/init-scripts/*.sql; do + echo "$0: running $sql" + psql -v ON_ERROR_STOP=1 --no-password --no-psqlrc -U postgres -p "$PORTNO" -h localhost -f "$sql" postgres +done +psql -v ON_ERROR_STOP=1 --no-password --no-psqlrc -U postgres -p "$PORTNO" -h localhost -c "ALTER USER tealbase_admin WITH PASSWORD '$PGPASSWORD'" +psql -v ON_ERROR_STOP=1 --no-password --no-psqlrc -U postgres -p "$PORTNO" -h localhost -d postgres -f "$PGBOUNCER_AUTH_SCHEMA_SQL" +psql -v ON_ERROR_STOP=1 --no-password --no-psqlrc -U postgres -p "$PORTNO" -h localhost -d postgres -f "$STAT_EXTENSION_SQL" +# run migrations as super user - postgres user demoted in post-setup +for sql in "$MIGRATIONS_DIR"/migrations/*.sql; do + echo "$0: running $sql" + psql -v ON_ERROR_STOP=1 --no-password --no-psqlrc -U tealbase_admin -p "$PORTNO" -h localhost -f "$sql" postgres +done +psql -v ON_ERROR_STOP=1 --no-password --no-psqlrc -U tealbase_admin -p "$PORTNO" -h localhost -f "$POSTGRESQL_SCHEMA_SQL" postgres +# TODO Do we need to reset stats when running migrations locally? +#psql -v ON_ERROR_STOP=1 --no-password --no-psqlrc -U tealbase_admin -p "$PORTNO" -h localhost -c 'SELECT extensions.pg_stat_statements_reset(); SELECT pg_stat_reset();' postgres || true + +exec psql -U postgres -p "$PORTNO" -h localhost postgres diff --git a/nix/tools/run-replica.sh.in b/nix/tools/run-replica.sh.in new file mode 100644 index 0000000..e2096b1 --- /dev/null +++ b/nix/tools/run-replica.sh.in @@ -0,0 +1,43 @@ +#!/usr/bin/env bash +# shellcheck shell=bash + +[ ! -z "$DEBUG" ] && set -x + +# first argument should be '15' or '16' for the version +if [ "$1" == "15" ]; then + echo "Starting server for PSQL 15" + PSQL15=@PSQL15_BINDIR@ + BINDIR="$PSQL15" +elif [ "$1" == "16" ]; then + echo "Starting server for PSQL 16" + PSQL16=@PSQL16_BINDIR@ + BINDIR="$PSQL16" +elif [ "$1" == "orioledb-16" ]; then + echo "Starting server for PSQL ORIOLEDB 16" + PSQLORIOLEDB16=@PSQLORIOLEDB16_BINDIR@ + BINDIR="$PSQLORIOLEDB16" +else + echo "Please provide a valid Postgres version (15, 16 or orioledb-16)" + exit 1 +fi + +export PATH=$BINDIR/bin:$PATH + +PGSQL_SUPERUSER=@PGSQL_SUPERUSER@ +MASTER_PORTNO="$2" +REPLICA_PORTNO="$3" +REPLICA_SLOT="replica_$RANDOM" +DATDIR=$(mktemp -d) +mkdir -p "$DATDIR" + +echo "NOTE: runing pg_basebackup for server on port $MASTER_PORTNO" +echo "NOTE: using replica slot $REPLICA_SLOT" + +pg_basebackup -p "$MASTER_PORTNO" -h localhost -U "${PGSQL_SUPERUSER}" -X stream -C -S "$REPLICA_SLOT" -v -R -D "$DATDIR" + +echo "NOTE: using port $REPLICA_PORTNO for replica" +echo "NOTE: using temporary directory $DATDIR for data, which will not be removed" +echo "NOTE: you are free to re-use this data directory at will" +echo + +exec postgres -p "$REPLICA_PORTNO" -D "$DATDIR" -k /tmp diff --git a/nix/tools/run-server.sh.in b/nix/tools/run-server.sh.in new file mode 100644 index 0000000..977a437 --- /dev/null +++ b/nix/tools/run-server.sh.in @@ -0,0 +1,65 @@ +#!/usr/bin/env bash +# shellcheck shell=bash +[ ! -z "$DEBUG" ] && set -x +# first argument should be '15' or '16' for the version +if [ "$1" == "15" ]; then + echo "Starting server for PSQL 15" + PSQL15=@PSQL15_BINDIR@ + BINDIR="$PSQL15" +elif [ "$1" == "16" ]; then + echo "Starting server for PSQL 16" + PSQL16=@PSQL16_BINDIR@ + BINDIR="$PSQL16" +elif [ "$1" == "orioledb-16" ]; then + echo "Starting server for PSQL ORIOLEDB 16" + PSQLORIOLEDB16=@PSQLORIOLEDB16_BINDIR@ + BINDIR="$PSQLORIOLEDB16" +else + echo "Please provide a valid Postgres version (15, 16 or orioledb-16)" + exit 1 +fi +export PATH=$BINDIR/bin:$PATH +PGSQL_SUPERUSER=@PGSQL_SUPERUSER@ +PSQL_CONF_FILE=@PSQL_CONF_FILE@ +PGSODIUM_GETKEY_SCRIPT=@PGSODIUM_GETKEY@ +PORTNO="${2:-@PGSQL_DEFAULT_PORT@}" +SUPAUTILS_CONFIG_FILE=@SUPAUTILS_CONF_FILE@ +LOGGING_CONFIG_FILE=@LOGGING_CONF_FILE@ +READREPL_CONFIG_FILE=@READREPL_CONF_FILE@ +PG_HBA_FILE=@PG_HBA@ +PG_IDENT_FILE=@PG_IDENT@ +EXTENSION_CUSTOM_SCRIPTS=@EXTENSION_CUSTOM_SCRIPTS_DIR@ +GROONGA=@GROONGA_DIR@ +DATDIR=$(mktemp -d) +LOCALE_ARCHIVE=@LOCALES@ +export LOCALE_ARCHIVE +export LANG=en_US.UTF-8 +export LANGUAGE=en_US.UTF-8 +export LC_ALL=en_US.UTF-8 +export LANG=en_US.UTF-8 +export LC_CTYPE=en_US.UTF-8 +mkdir -p "$DATDIR" +echo "NOTE: using port $PORTNO for server" +echo "NOTE: using temporary directory $DATDIR for data, which will not be removed" +echo "NOTE: you are free to re-use this data directory at will" +initdb -U "$PGSQL_SUPERUSER" -D "$DATDIR" +echo "NOTE: patching postgresql.conf files" +cp "$PG_HBA_FILE" "$DATDIR/pg_hba.conf" +cp "$PG_IDENT_FILE" "$DATDIR/pg_ident.conf" +cp "$READREPL_CONFIG_FILE" "$DATDIR/read-replica.conf" +cp -r "$EXTENSION_CUSTOM_SCRIPTS" "$DATDIR" +sed "s|supautils.privileged_extensions_custom_scripts_path = '/etc/postgresql-custom/extension-custom-scripts'|supautils.privileged_extensions_custom_scripts_path = '$DATDIR/extension-custom-scripts'|" "$SUPAUTILS_CONFIG_FILE" > "$DATDIR/supautils.conf" +sed -e "1i\\ +include = '$DATDIR/supautils.conf'" \ +-e "\$a\\ +pgsodium.getkey_script = '$PGSODIUM_GETKEY_SCRIPT'" \ +-e "s|data_directory = '/var/lib/postgresql/data'|data_directory = '$DATDIR'|" \ +-e "s|hba_file = '/etc/postgresql/pg_hba.conf'|hba_file = '$DATDIR/pg_hba.conf'|" \ +-e "s|ident_file = '/etc/postgresql/pg_ident.conf'|ident_file = '$DATDIR/pg_ident.conf'|" \ +-e "s|include = '/etc/postgresql/logging.conf'|#&|" \ +-e "s|include = '/etc/postgresql-custom/read-replica.conf'|include = '$DATDIR/read-replica.conf'|" \ +-e "\$a\\ +session_preload_libraries = 'supautils'" \ +"$PSQL_CONF_FILE" > "$DATDIR/postgresql.conf" +export GRN_PLUGINS_DIR=$GROONGA/lib/groonga/plugins +postgres --config-file="$DATDIR/postgresql.conf" -p "$PORTNO" -D "$DATDIR" -k /tmp diff --git a/nix/tools/sync-exts-versions.sh.in b/nix/tools/sync-exts-versions.sh.in new file mode 100644 index 0000000..1b120e9 --- /dev/null +++ b/nix/tools/sync-exts-versions.sh.in @@ -0,0 +1,282 @@ +#!/usr/bin/env bash +# shellcheck shell=bash + +[ ! -z "$DEBUG" ] && set -x + +#pass in env vars supplied by nix +yq=@YQ@ +jq=@JQ@ +editor=@NIX_EDITOR@ +ansible_vars=$($yq '.' $PWD/ansible/vars.yml) +prefetchurl=@NIXPREFETCHURL@ +_nix=@NIX@ +fetch_source_url() { + local source_url=${1//\"/} # Remove double quotes + source_url=${source_url//\'/} # Remove single quotes + + # Check if the source URL is provided + if [ -z "$source_url" ]; then + echo "Usage: fetch_nix_url " + return 1 + fi + + echo "$source_url" + + # Run nix-prefetch-url command + local initial_hash=$($prefetchurl --type sha256 "$source_url" --unpack | cut -d ' ' -f 2) + #once we can bump up nix version, we can use nix hash convert --hash-algo sha256 + local final_hash=$($_nix hash to-sri --type sha256 $initial_hash) + echo "$final_hash" +} + +sync_version() { + + local package_name=$1 + local version="\"$2\"" + local hash="\"$3\"" + + + # Update the version and hash in the Nix expression + $editor $PWD/nix/ext/$package_name.nix version --inplace -v "$version" + $editor $PWD/nix/ext/$package_name.nix src.hash --inplace -v $hash +} + +run_sync() { + local varname=$1 + local package_name=$2 + + version=$(echo $ansible_vars | $jq -r '.'$varname'') + echo "$key: $version" + url=$($_nix eval .#psql_15/exts/$package_name.src.url) + hash=$(fetch_source_url $url | tail -n 1) + $(sync_version $package_name $version $hash) + echo "synced $package_name to version $version with hash $hash" + + +} + +#for use where nix uses fetchurl +# instead of fetchFromGithub +fetchurl_source_url() { + local source_url=${1//\"/} # Remove double quotes + source_url=${source_url//\'/} # Remove single quotes + + # Check if the source URL is provided + if [ -z "$source_url" ]; then + echo "Usage: fetch_nix_url " + return 1 + fi + + echo "$source_url" + + # Run nix-prefetch-url command + local initial_hash=$($prefetchurl --type sha256 "$source_url" | cut -d ' ' -f 2) + #once we can bump up nix version, we can use nix hash convert --hash-algo sha256 + local final_hash=$($_nix hash to-sri --type sha256 $initial_hash) + echo "$final_hash" +} + +sync_version_fetchurl() { + + local package_name=$1 + local version="\"$2\"" + local hash="\"$3\"" + + + # Update the version and hash in the Nix expression + $editor $PWD/nix/ext/$package_name.nix version --inplace -v "$version" + $editor $PWD/nix/ext/$package_name.nix src.sha256 --inplace -v $hash +} + + +run_sync_fetchurl() { + local varname=$1 + local package_name=$2 + + version=$(echo $ansible_vars | $jq -r '.'$varname'') + echo "$key: $version" + url=$($_nix eval .#psql_15/exts/$package_name.src.url) + hash=$(fetchurl_source_url $url | tail -n 1) + $(sync_version_fetchurl $package_name $version $hash) + echo "synced $package_name to version $version with hash $hash" + + +} + +#for use on derivations that use cargoHash +update_cargo_vendor_hash() { + local package_name=$1 + $editor $PWD/nix/ext/$package_name.nix cargoHash --inplace -v "" + output=$($_nix build .#psql_15/exts/$package_name 2>&1) + + # Check if the command exited with an error + if [ $? -ne 0 ]; then + # Extract the hash value after "got: " + hash_value_scraped=$(echo "$output" | grep "got:" | awk '{for (i=1; i<=NF; i++) if ($i ~ /^sha/) print $i}') + hash_value="\"$hash_value_scraped\"" + # Continue using the captured hash value + $editor $PWD/nix/ext/$package_name.nix cargoHash --inplace -v $hash_value + echo "Updated cargoHash for $package_name to $hash_value" + else + echo "$package_name builds successfully, moving on..." + fi +} + +#iterate values in ansible vars, case statement +# to match ansible var to package name +keys=$(echo "$ansible_vars" | $jq -r 'keys[]') + +for key in $keys; do + case $key in + "pg_hashids_release") + varname="pg_hashids_release" + package_name="pg_hashids" + run_sync $varname $package_name + ;; + "hypopg_release") + varname="hypopg_release" + package_name="hypopg" + run_sync $varname $package_name + ;; + "pg_graphql_release") + varname="pg_graphql_release" + package_name="pg_graphql" + run_sync $varname $package_name + update_cargo_vendor_hash $package_name + ;; + "pg_cron_release") + varname="pg_cron_release" + package_name="pg_cron" + run_sync $varname $package_name + ;; + "pgsql_http_release") + varname="pgsql_http_release" + package_name="pgsql-http" + run_sync $varname $package_name + ;; + "pg_jsonschema_release") + varname="pg_jsonschema_release" + package_name="pg_jsonschema" + run_sync $varname $package_name + update_cargo_vendor_hash $package_name + ;; + "pg_net_release") + varname="pg_net_release" + package_name="pg_net" + run_sync $varname $package_name + ;; + "pg_plan_filter_release") + varname="pg_plan_filter_release" + package_name="pg_plan_filter" + run_sync $varname $package_name + ;; + "pg_safeupdate_release") + varname="pg_safeupdate_release" + package_name="pg-safeupdate" + run_sync $varname $package_name + ;; + "pgsodium_release") + varname="pgsodium_release" + package_name="pgsodium" + run_sync $varname $package_name + ;; + "pg_repack_release") + varname="pg_repack_release" + package_name="pg_repack" + run_sync $varname $package_name + ;; + "pgrouting_release") + varname="pgrouting_release" + package_name="pgrouting" + run_sync $varname $package_name + ;; + "ptap_release") + varname="pgtap_release" + package_name="pgtap" + run_sync $varname $package_name + ;; + "pg_stat_monitor_release") + varname="pg_stat_monitor_release" + package_name="pg_stat_monitor" + run_sync $varname $package_name + ;; + "pg_tle_release") + varname="pg_tle_release" + package_name="pg_tle" + run_sync $varname $package_name + ;; + "pgaudit_release") + varname="pgaudit_release" + package_name="pgaudit" + run_sync $varname $package_name + ;; + "plpgsql_check_release") + varname="plpgsql_check_release" + package_name="plpgsql-check" + run_sync $varname $package_name + ;; + "pgvector_release") + varname="pgvector_release" + package_name="pgvector" + run_sync $varname $package_name + ;; + "pgjwt_release") + varname="pgjwt_release" + package_name="pgjwt" + run_sync $varname $package_name + ;; + "plv8_release") + varname="plv8_release" + package_name="plv8" + run_sync $varname $package_name + ;; + "postgis_release") + varname="postgis_release" + package_name="postgis" + run_sync_fetchurl $varname $package_name + ;; + "pgroonga_release") + varname="pgroonga_release" + package_name="pgroonga" + run_sync_fetchurl $varname $package_name + ;; + "rum_release") + varname="rum_release" + package_name="rum" + run_sync $varname $package_name + ;; + "timescaledb_release") + varname="timescaledb_release" + package_name="timescaledb" + run_sync $varname $package_name + ;; + "supautils_release") + varname="supautils_release" + package_name="supautils" + run_sync $varname $package_name + ;; + "vault_release") + varname="vault_release" + package_name="vault" + run_sync $varname $package_name + ;; + "wal2json_release") + varname="wal2json_release" + package_name="wal2json" + run_sync $varname $package_name + ;; + *) + ;; + esac +done + +# url=$($_nix eval .#psql_16/exts/pgvector.src.url) + +# fetch_nix_url "$url" + +#res=$editor /home/sam/postgres/nix/ext/pgvector.nix src +#echo $res +# url=$($_nix eval .#psql_16/exts/pgvector.src.url) +# #echo $url +# hash=$(fetch_source_url $url | tail -n 1) +# echo "$hash" diff --git a/postgresql.gpg.key b/postgresql.gpg.key new file mode 100644 index 0000000..443bf58 --- /dev/null +++ b/postgresql.gpg.key @@ -0,0 +1,64 @@ +-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQINBE6XR8IBEACVdDKT2HEH1IyHzXkb4nIWAY7echjRxo7MTcj4vbXAyBKOfjja +UrBEJWHN6fjKJXOYWXHLIYg0hOGeW9qcSiaa1/rYIbOzjfGfhE4x0Y+NJHS1db0V +G6GUj3qXaeyqIJGS2z7m0Thy4Lgr/LpZlZ78Nf1fliSzBlMo1sV7PpP/7zUO+aA4 +bKa8Rio3weMXQOZgclzgeSdqtwKnyKTQdXY5MkH1QXyFIk1nTfWwyqpJjHlgtwMi +c2cxjqG5nnV9rIYlTTjYG6RBglq0SmzF/raBnF4Lwjxq4qRqvRllBXdFu5+2pMfC +IZ10HPRdqDCTN60DUix+BTzBUT30NzaLhZbOMT5RvQtvTVgWpeIn20i2NrPWNCUh +hj490dKDLpK/v+A5/i8zPvN4c6MkDHi1FZfaoz3863dylUBR3Ip26oM0hHXf4/2U +A/oA4pCl2W0hc4aNtozjKHkVjRx5Q8/hVYu+39csFWxo6YSB/KgIEw+0W8DiTII3 +RQj/OlD68ZDmGLyQPiJvaEtY9fDrcSpI0Esm0i4sjkNbuuh0Cvwwwqo5EF1zfkVj +Tqz2REYQGMJGc5LUbIpk5sMHo1HWV038TWxlDRwtOdzw08zQA6BeWe9FOokRPeR2 +AqhyaJJwOZJodKZ76S+LDwFkTLzEKnYPCzkoRwLrEdNt1M7wQBThnC5z6wARAQAB +tBxQb3N0Z3JlU1FMIERlYmlhbiBSZXBvc2l0b3J5iQJOBBMBCAA4AhsDBQsJCAcD +BRUKCQgLBRYCAwEAAh4BAheAFiEEuXsK/KoaR/BE8kSgf8x9RqzMTPgFAlhtCD8A +CgkQf8x9RqzMTPgECxAAk8uL+dwveTv6eH21tIHcltt8U3Ofajdo+D/ayO53LiYO +xi27kdHD0zvFMUWXLGxQtWyeqqDRvDagfWglHucIcaLxoxNwL8+e+9hVFIEskQAY +kVToBCKMXTQDLarz8/J030Pmcv3ihbwB+jhnykMuyyNmht4kq0CNgnlcMCdVz0d3 +z/09puryIHJrD+A8y3TD4RM74snQuwc9u5bsckvRtRJKbP3GX5JaFZAqUyZNRJRJ +Tn2OQRBhCpxhlZ2afkAPFIq2aVnEt/Ie6tmeRCzsW3lOxEH2K7MQSfSu/kRz7ELf +Cz3NJHj7rMzC+76Rhsas60t9CjmvMuGONEpctijDWONLCuch3Pdj6XpC+MVxpgBy +2VUdkunb48YhXNW0jgFGM/BFRj+dMQOUbY8PjJjsmVV0joDruWATQG/M4C7O8iU0 +B7o6yVv4m8LDEN9CiR6r7H17m4xZseT3f+0QpMe7iQjz6XxTUFRQxXqzmNnloA1T +7VjwPqIIzkj/u0V8nICG/ktLzp1OsCFatWXh7LbU+hwYl6gsFH/mFDqVxJ3+DKQi +vyf1NatzEwl62foVjGUSpvh3ymtmtUQ4JUkNDsXiRBWczaiGSuzD9Qi0ONdkAX3b +ewqmN4TfE+XIpCPxxHXwGq9Rv1IFjOdCX0iG436GHyTLC1tTUIKF5xV4Y0+cXIOJ +Aj0EEwEIACcCGwMFCwkIBwMFFQoJCAsFFgIDAQACHgECF4AFAlLpFRkFCQ6EJy0A +CgkQf8x9RqzMTPjOZA//Zp0e25pcvle7cLc0YuFr9pBv2JIkLzPm83nkcwKmxaWa +yUIG4Sv6pH6hm8+S/CHQij/yFCX+o3ngMw2J9HBUvafZ4bnbI0RGJ70GsAwraQ0V +lkIfg7GUw3TzvoGYO42rZTru9S0K/6nFP6D1HUu+U+AsJONLeb6oypQgInfXQExP +ZyliUnHdipei4WR1YFW6sjSkZT/5C3J1wkAvPl5lvOVthI9Zs6bZlJLZwusKxU0U +M4Btgu1Sf3nnJcHmzisixwS9PMHE+AgPWIGSec/N27a0KmTTvImV6K6nEjXJey0K +2+EYJuIBsYUNorOGBwDFIhfRk9qGlpgt0KRyguV+AP5qvgry95IrYtrOuE7307Si +dEbSnvO5ezNemE7gT9Z1tM7IMPfmoKph4BfpNoH7aXiQh1Wo+ChdP92hZUtQrY2N +m13cmkxYjQ4ZgMWfYMC+DA/GooSgZM5i6hYqyyfAuUD9kwRN6BqTbuAUAp+hCWYe +N4D88sLYpFh3paDYNKJ+Gf7Yyi6gThcV956RUFDH3ys5Dk0vDL9NiWwdebWfRFbz +oRM3dyGP889aOyLzS3mh6nHzZrNGhW73kslSQek8tjKrB+56hXOnb4HaElTZGDvD +5wmrrhN94kbyGtz3cydIohvNO9d90+29h0eGEDYti7j7maHkBKUAwlcPvMg5m3aJ +Aj0EEwEIACcCGwMFCwkIBwMFFQoJCAsFFgIDAQACHgECF4AFAlEqbZUFCQg2wEEA +CgkQf8x9RqzMTPhFMQ//WxAfKMdpSIA9oIC/yPD/dJpY/+DyouOljpE6MucMy/Ar +BECjFTBwi/j9NYM4ynAk34IkhuNexc1i9/05f5RM6+riLCLgAOsADDbHD4miZzoS +xiVr6GQ3YXMbOGld9kV9Sy6mGNjcUov7iFcf5Hy5w3AjPfKuR9zXswyfzIU1YXOb +iiZT38l55pp/BSgvGVQsvbNjsff5CbEKXS7q3xW+WzN0QWF6YsfNVhFjRGj8hKtH +vwKcA02wwjLeLXVTm6915ZUKhZXUFc0vM4Pj4EgNswH8Ojw9AJaKWJIZmLyW+aP+ +wpu6YwVCicxBY59CzBO2pPJDfKFQzUtrErk9irXeuCCLesDyirxJhv8o0JAvmnMA +KOLhNFUrSQ2m+3EnF7zhfz70gHW+EG8X8mL/EN3/dUM09j6TVrjtw43RLxBzwMDe +ariFF9yC+5bLtnGgxjsB9Ik6GV5v34/NEEGf1qBiAzFmDVFRZlrNDkq6gmpvGnA5 +hUWNr+y0i01LjGyaLSWHYjgw2UEQOqcUtTFK9MNzbZze4mVaHMEz9/aMfX25R6qb +iNqCChveIm8mYr5Ds2zdZx+G5bAKdzX7nx2IUAxFQJEE94VLSp3npAaTWv3sHr7d +R8tSyUJ9poDwgw4W9BIcnAM7zvFYbLF5FNggg/26njHCCN70sHt8zGxKQINMc6SJ +Aj0EEwEIACcCGwMFCwkIBwMFFQoJCAsFFgIDAQACHgECF4AFAlB5KywFCQPDFt8A +CgkQf8x9RqzMTPhuCQ//QAjRSAOCQ02qmUAikT+mTB6baOAakkYq6uHbEO7qPZkv +4E/M+HPIJ4wdnBNeSQjfvdNcZBA/x0hr5EMcBneKKPDj4hJ0panOIRQmNSTThQw9 +OU351gm3YQctAMPRUu1fTJAL/AuZUQf9ESmhyVtWNlH/56HBfYjE4iVeaRkkNLJy +X3vkWdJSMwC/LO3Lw/0M3R8itDsm74F8w4xOdSQ52nSRFRh7PunFtREl+QzQ3EA/ +WB4AIj3VohIGkWDfPFCzV3cyZQiEnjAe9gG5pHsXHUWQsDFZ12t784JgkGyO5wT2 +6pzTiuApWM3k/9V+o3HJSgH5hn7wuTi3TelEFwP1fNzI5iUUtZdtxbFOfWMnZAyp +EhaLmXNkg4zDkH44r0ss9fR0DAgUav1a25UnbOn4PgIEQy2fgHKHwRpCy20d6oCS +lmgyWsR40EPPYvtGq49A2aK6ibXmdvvFT+Ts8Z+q2SkFpoYFX20mR2nsF0fbt1lf +H65P64dukxeRGteWIeNakDD40bAAOH8+OaoTGVBJ2ACJfLVNM53PEoftavAwUYMr +R910qvwYfd/46rh46g1Frr9SFMKYE9uvIJIgDsQB3QBp71houU4H55M5GD8XURYs ++bfiQpJG1p7eB8e5jZx1SagNWc4XwL2FzQ9svrkbg1Y+359buUiP7T6QXX2zY+8= +=XSRU +-----END PGP PUBLIC KEY BLOCK----- diff --git a/scripts/90-cleanup.sh b/scripts/90-cleanup.sh index 2e6a3f1..f2e1968 100644 --- a/scripts/90-cleanup.sh +++ b/scripts/90-cleanup.sh @@ -37,7 +37,13 @@ elif [ -n "$(command -v apt-get)" ]; then libcgal-dev \ libgcc-9-dev \ libgcc-8-dev \ - linux-headers-5.11.0-1021-aws + ansible + + add-apt-repository --yes --remove ppa:ansible/ansible + + source /etc/os-release + apt-get -y remove --purge linux-headers-5.11.0-1021-aws + apt-get -y update apt-get -y upgrade apt-get -y autoremove diff --git a/scripts/91-log_cleanup.sh b/scripts/91-log_cleanup.sh index 39e5ff3..24073af 100644 --- a/scripts/91-log_cleanup.sh +++ b/scripts/91-log_cleanup.sh @@ -4,6 +4,9 @@ echo "Clearing all log files" rm -rf /var/log/* +# creating system stats directory +mkdir /var/log/sysstat + # https://github.com/fail2ban/fail2ban/issues/1593 touch /var/log/auth.log @@ -15,7 +18,7 @@ chown postgres:postgres /var/log/postgresql mkdir /var/log/wal-g cd /var/log/wal-g -touch backup-push.log backup-fetch.log wal-push.log wal-fetch.log +touch backup-push.log backup-fetch.log wal-push.log wal-fetch.log pitr.log chown -R postgres:postgres /var/log/wal-g chmod -R 0300 /var/log/wal-g diff --git a/scripts/nix-provision.sh b/scripts/nix-provision.sh new file mode 100644 index 0000000..5ed678d --- /dev/null +++ b/scripts/nix-provision.sh @@ -0,0 +1,49 @@ +#!/usr/bin/env bash +# shellcheck shell=bash + +set -o errexit +set -o pipefail +set -o xtrace + +function install_packages { + # Setup Ansible on host VM + sudo apt-get update && sudo apt-get install software-properties-common -y + sudo add-apt-repository --yes --update ppa:ansible/ansible && sudo apt-get install ansible -y + ansible-galaxy collection install community.general + +} + + + +function install_nix() { + sudo su -c "curl --proto '=https' --tlsv1.2 -sSf -L https://install.determinate.systems/nix | sh -s -- install --no-confirm \ + --extra-conf \"substituters = https://cache.nixos.org https://nix-postgres-artifacts.s3.amazonaws.com\" \ + --extra-conf \"trusted-public-keys = nix-postgres-artifacts:dGZlQOvKcNEjvT7QEAJbcV6b6uk7VF/hWMjhYleiaLI=% cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=\" " -s /bin/bash root + . /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh + +} + + +function execute_stage2_playbook { + sudo tee /etc/ansible/ansible.cfg < str: + inspect_results = docker_client.api.inspect_container(container.name) + return inspect_results["State"]["Health"]["Status"] + + attempts = 0 + + # containers might appear healthy but crash during bootstrap + sleep(3) + + while True: + health = get_health(container) + if health == "healthy": + break + if attempts > 60 or health == "exited": + # print container logs for debugging + print(container.logs().decode("utf-8")) + + # write logs to file to be displayed in GHA output + with open("testinfra-aio-container-logs.log", "w") as f: + f.write(container.logs().decode("utf-8")) + + raise TimeoutError("Container failed to become healthy.") + attempts += 1 + sleep(1) + + # return a testinfra connection to the container + yield testinfra.get_host("docker://" + cast(str, container.name)) + + # at the end of the test suite, destroy the container + container.remove(v=True, force=True) + + +@pytest.mark.parametrize("service_name", [ + 'adminapi', + 'lsn-checkpoint-push', + 'pg_egress_collect', + 'postgresql', + 'logrotate', + 'supa-shutdown', + 'services:kong', + 'services:postgrest', + 'services:gotrue', +]) +def test_service_is_running(host, service_name): + assert host.supervisor(service_name).is_running + + +def test_postgrest_responds_to_requests(): + res = requests.get( + "http://localhost:8000/rest/v1/", + headers={ + "apikey": all_in_one_envs["ANON_KEY"], + "authorization": f"Bearer {all_in_one_envs['ANON_KEY']}", + }, + ) + assert res.ok + + +def test_postgrest_can_connect_to_db(): + res = requests.get( + "http://localhost:8000/rest/v1/buckets", + headers={ + "apikey": all_in_one_envs["SERVICE_ROLE_KEY"], + "authorization": f"Bearer {all_in_one_envs['SERVICE_ROLE_KEY']}", + "accept-profile": "storage", + }, + ) + assert res.ok diff --git a/testinfra/test_ami.py b/testinfra/test_ami.py new file mode 100644 index 0000000..314daa9 --- /dev/null +++ b/testinfra/test_ami.py @@ -0,0 +1,439 @@ +import base64 +import boto3 +import gzip +import logging +import os +import pytest +import requests +import socket +import testinfra +from ec2instanceconnectcli.EC2InstanceConnectLogger import EC2InstanceConnectLogger +from ec2instanceconnectcli.EC2InstanceConnectKey import EC2InstanceConnectKey +from time import sleep + +# if GITHUB_RUN_ID is not set, use a default value that includes the user and hostname +RUN_ID = os.environ.get("GITHUB_RUN_ID", "unknown-ci-run-" + os.environ.get("USER", "unknown-user") + '@' + socket.gethostname()) + +postgresql_schema_sql_content = """ +ALTER DATABASE postgres SET "app.settings.jwt_secret" TO 'my_jwt_secret_which_is_not_so_secret'; +ALTER DATABASE postgres SET "app.settings.jwt_exp" TO 3600; + +ALTER USER tealbase_admin WITH PASSWORD 'postgres'; +ALTER USER postgres WITH PASSWORD 'postgres'; +ALTER USER authenticator WITH PASSWORD 'postgres'; +ALTER USER pgbouncer WITH PASSWORD 'postgres'; +ALTER USER tealbase_auth_admin WITH PASSWORD 'postgres'; +ALTER USER tealbase_storage_admin WITH PASSWORD 'postgres'; +ALTER USER tealbase_replication_admin WITH PASSWORD 'postgres'; +ALTER ROLE tealbase_read_only_user WITH PASSWORD 'postgres'; +ALTER ROLE tealbase_admin SET search_path TO "$user",public,auth,extensions; +""" +realtime_env_content = "" +adminapi_yaml_content = """ +port: 8085 +host: 0.0.0.0 +ref: aaaaaaaaaaaaaaaaaaaa +jwt_secret: my_jwt_secret_which_is_not_so_secret +metric_collectors: + - filesystem + - meminfo + - netdev + - loadavg + - cpu + - diskstats + - vmstat +node_exporter_additional_args: + - '--collector.filesystem.ignored-mount-points=^/(boot|sys|dev|run).*' + - '--collector.netdev.device-exclude=lo' +cert_path: /etc/ssl/adminapi/server.crt +key_path: /etc/ssl/adminapi/server.key +upstream_metrics_refresh_duration: 60s +pgbouncer_endpoints: + - 'postgres://pgbouncer:postgres@localhost:6543/pgbouncer' +fail2ban_socket: /var/run/fail2ban/fail2ban.sock +upstream_metrics_sources: + - + name: system + url: 'https://localhost:8085/metrics' + labels_to_attach: [{name: tealbase_project_ref, value: aaaaaaaaaaaaaaaaaaaa}, {name: service_type, value: db}] + skip_tls_verify: true + - + name: postgresql + url: 'http://localhost:9187/metrics' + labels_to_attach: [{name: tealbase_project_ref, value: aaaaaaaaaaaaaaaaaaaa}, {name: service_type, value: postgresql}] + - + name: gotrue + url: 'http://localhost:9122/metrics' + labels_to_attach: [{name: tealbase_project_ref, value: aaaaaaaaaaaaaaaaaaaa}, {name: service_type, value: gotrue}] +monitoring: + disk_usage: + enabled: true +firewall: + enabled: true + internal_ports: + - 9187 + - 8085 + - 9122 + privileged_ports: + - 22 + privileged_ports_allowlist: + - 0.0.0.0/0 + filtered_ports: + - 5432 + - 6543 + unfiltered_ports: + - 80 + - 443 + managed_rules_file: /etc/nftables/tealbase_managed.conf +pg_egress_collect_path: /tmp/pg_egress_collect.txt +aws_config: + creds: + enabled: false + check_frequency: 1h + refresh_buffer_duration: 6h +""" +pgsodium_root_key_content = ( + "0000000000000000000000000000000000000000000000000000000000000000" +) +postgrest_base_conf_content = """ +db-uri = "postgres://authenticator:postgres@localhost:5432/postgres?application_name=postgrest" +db-schema = "public, storage, graphql_public" +db-anon-role = "anon" +jwt-secret = "my_jwt_secret_which_is_not_so_secret" +role-claim-key = ".role" +openapi-mode = "ignore-privileges" +db-use-legacy-gucs = true +admin-server-port = 3001 +server-host = "*6" +db-pool-acquisition-timeout = 10 +max-rows = 1000 +db-extra-search-path = "public, extensions" +""" +gotrue_env_content = """ +API_EXTERNAL_URL=http://localhost +GOTRUE_API_HOST=0.0.0.0 +GOTRUE_SITE_URL= +GOTRUE_DB_DRIVER=postgres +GOTRUE_DB_DATABASE_URL=postgres://tealbase_auth_admin@localhost/postgres?sslmode=disable +GOTRUE_JWT_ADMIN_ROLES=tealbase_admin,service_role +GOTRUE_JWT_AUD=authenticated +GOTRUE_JWT_SECRET=my_jwt_secret_which_is_not_so_secret +""" +walg_config_json_content = """ +{ + "AWS_REGION": "ap-southeast-1", + "WALG_S3_PREFIX": "", + "PGDATABASE": "postgres", + "PGUSER": "tealbase_admin", + "PGPORT": 5432, + "WALG_DELTA_MAX_STEPS": 6, + "WALG_COMPRESSION_METHOD": "lz4" +} +""" +anon_key = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6ImFhYWFhYWFhYWFhYWFhYWFhYWFhIiwicm9sZSI6ImFub24iLCJpYXQiOjE2OTYyMjQ5NjYsImV4cCI6MjAxMTgwMDk2Nn0.QW95aRPA-4QuLzuvaIeeoFKlJP9J2hvAIpJ3WJ6G5zo" +service_role_key = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6ImFhYWFhYWFhYWFhYWFhYWFhYWFhIiwicm9sZSI6InNlcnZpY2Vfcm9sZSIsImlhdCI6MTY5NjIyNDk2NiwiZXhwIjoyMDExODAwOTY2fQ.Om7yqv15gC3mLGitBmvFRB3M4IsLsX9fXzTQnFM7lu0" +tealbase_admin_key = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6ImFhYWFhYWFhYWFhYWFhYWFhYWFhIiwicm9sZSI6InN1cGFiYXNlX2FkbWluIiwiaWF0IjoxNjk2MjI0OTY2LCJleHAiOjIwMTE4MDA5NjZ9.jrD3j2rBWiIx0vhVZzd1CXFv7qkAP392nBMadvXxk1c" +init_json_content = f""" +{{ + "jwt_secret": "my_jwt_secret_which_is_not_so_secret", + "project_ref": "aaaaaaaaaaaaaaaaaaaa", + "logflare_api_key": "", + "logflare_pitr_errors_source": "", + "logflare_postgrest_source": "", + "logflare_pgbouncer_source": "", + "logflare_db_source": "", + "logflare_gotrue_source": "", + "anon_key": "{anon_key}", + "service_key": "{service_role_key}", + "tealbase_admin_key": "{tealbase_admin_key}", + "common_name": "db.aaaaaaaaaaaaaaaaaaaa.tealbase.red", + "region": "ap-southeast-1", + "init_database_only": false +}} +""" + +logger = logging.getLogger("ami-tests") +handler = logging.StreamHandler() +formatter = logging.Formatter( + '%(asctime)s %(name)-12s %(levelname)-8s %(message)s') +handler.setFormatter(formatter) +logger.addHandler(handler) +logger.setLevel(logging.DEBUG) + +# scope='session' uses the same container for all the tests; +# scope='function' uses a new container per test function. +@pytest.fixture(scope="session") +def host(): + ec2 = boto3.resource("ec2", region_name="ap-southeast-1") + images = list( + ec2.images.filter( + Filters=[{"Name": "name", "Values": ["tealbase-postgres-ci-ami-test"]}] + ) + ) + assert len(images) == 1 + image = images[0] + + def gzip_then_base64_encode(s: str) -> str: + return base64.b64encode(gzip.compress(s.encode())).decode() + + instance = list( + ec2.create_instances( + BlockDeviceMappings=[ + { + "DeviceName": "/dev/sda1", + "Ebs": { + "VolumeSize": 8, # gb + "Encrypted": True, + "DeleteOnTermination": True, + "VolumeType": "gp3", + }, + }, + ], + MetadataOptions={ + "HttpTokens": "required", + "HttpEndpoint": "enabled", + }, + IamInstanceProfile={"Name": "pg-ap-southeast-1"}, + InstanceType="t4g.micro", + MinCount=1, + MaxCount=1, + ImageId=image.id, + NetworkInterfaces=[ + { + "DeviceIndex": 0, + "AssociatePublicIpAddress": True, + "Groups": ["sg-0a883ca614ebfbae0", "sg-014d326be5a1627dc"], + } + ], + UserData=f"""#cloud-config +hostname: db-aaaaaaaaaaaaaaaaaaaa +write_files: + - {{path: /etc/postgresql.schema.sql, content: {gzip_then_base64_encode(postgresql_schema_sql_content)}, permissions: '0600', encoding: gz+b64}} + - {{path: /etc/realtime.env, content: {gzip_then_base64_encode(realtime_env_content)}, permissions: '0664', encoding: gz+b64}} + - {{path: /etc/adminapi/adminapi.yaml, content: {gzip_then_base64_encode(adminapi_yaml_content)}, permissions: '0600', owner: 'adminapi:root', encoding: gz+b64}} + - {{path: /etc/postgresql-custom/pgsodium_root.key, content: {gzip_then_base64_encode(pgsodium_root_key_content)}, permissions: '0600', owner: 'postgres:postgres', encoding: gz+b64}} + - {{path: /etc/postgrest/base.conf, content: {gzip_then_base64_encode(postgrest_base_conf_content)}, permissions: '0664', encoding: gz+b64}} + - {{path: /etc/gotrue.env, content: {gzip_then_base64_encode(gotrue_env_content)}, permissions: '0664', encoding: gz+b64}} + - {{path: /etc/wal-g/config.json, content: {gzip_then_base64_encode(walg_config_json_content)}, permissions: '0664', owner: 'wal-g:wal-g', encoding: gz+b64}} + - {{path: /tmp/init.json, content: {gzip_then_base64_encode(init_json_content)}, permissions: '0600', encoding: gz+b64}} +runcmd: + - 'sudo echo \"pgbouncer\" \"postgres\" >> /etc/pgbouncer/userlist.txt' + - 'cd /tmp && aws s3 cp --region ap-southeast-1 s3://init-scripts-staging/project/init.sh .' + - 'bash init.sh "staging"' + - 'rm -rf /tmp/*' +""", + TagSpecifications=[ + { + "ResourceType": "instance", + "Tags": [ + {"Key": "Name", "Value": "ci-ami-test"}, + {"Key": "creator", "Value": "testinfra-ci"}, + {"Key": "testinfra-run-id", "Value": RUN_ID} + ], + } + ], + ) + )[0] + instance.wait_until_running() + + ec2logger = EC2InstanceConnectLogger(debug=False) + temp_key = EC2InstanceConnectKey(ec2logger.get_logger()) + ec2ic = boto3.client("ec2-instance-connect", region_name="ap-southeast-1") + response = ec2ic.send_ssh_public_key( + InstanceId=instance.id, + InstanceOSUser="ubuntu", + SSHPublicKey=temp_key.get_pub_key(), + ) + assert response["Success"] + + # instance doesn't have public ip yet + while not instance.public_ip_address: + logger.warning("waiting for ip to be available") + sleep(5) + instance.reload() + + while True: + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + if sock.connect_ex((instance.public_ip_address, 22)) == 0: + break + else: + logger.warning("waiting for ssh to be available") + sleep(10) + + host = testinfra.get_host( + # paramiko is an ssh backend + f"paramiko://ubuntu@{instance.public_ip_address}?timeout=60", + ssh_identity_file=temp_key.get_priv_key_file(), + ) + + def is_healthy(host) -> bool: + cmd = host.run("pg_isready -U postgres") + if cmd.failed is True: + logger.warning("pg not ready") + return False + + cmd = host.run(f"curl -sf -k --connect-timeout 30 --max-time 60 https://localhost:8085/health -H 'apikey: {tealbase_admin_key}'") + if cmd.failed is True: + logger.warning("adminapi not ready") + return False + + cmd = host.run("curl -sf --connect-timeout 30 --max-time 60 http://localhost:3001/ready") + if cmd.failed is True: + logger.warning("postgrest not ready") + return False + + cmd = host.run("curl -sf --connect-timeout 30 --max-time 60 http://localhost:8081/health") + if cmd.failed is True: + logger.warning("gotrue not ready") + return False + + # TODO(thebengeu): switch to checking Envoy once it's the default. + cmd = host.run("sudo kong health") + if cmd.failed is True: + logger.warning("kong not ready") + return False + + cmd = host.run("sudo fail2ban-client status") + if cmd.failed is True: + logger.warning("fail2ban not ready") + return False + + return True + + while True: + if is_healthy(host): + break + sleep(1) + + # return a testinfra connection to the instance + yield host + + # at the end of the test suite, destroy the instance + instance.terminate() + + +def test_postgrest_is_running(host): + postgrest = host.service("postgrest") + assert postgrest.is_running + + +def test_postgrest_responds_to_requests(host): + res = requests.get( + f"http://{host.backend.get_hostname()}/rest/v1/", + headers={ + "apikey": anon_key, + "authorization": f"Bearer {anon_key}", + }, + ) + assert res.ok + + +def test_postgrest_can_connect_to_db(host): + res = requests.get( + f"http://{host.backend.get_hostname()}/rest/v1/buckets", + headers={ + "apikey": service_role_key, + "authorization": f"Bearer {service_role_key}", + "accept-profile": "storage", + }, + ) + assert res.ok + + +# There would be an error if the `apikey` query parameter isn't removed, +# since PostgREST treats query parameters as conditions. +# +# Worth testing since remove_apikey_query_parameters uses regexp instead +# of parsed query parameters. +def test_postgrest_starting_apikey_query_parameter_is_removed(host): + res = requests.get( + f"http://{host.backend.get_hostname()}/rest/v1/buckets", + headers={ + "accept-profile": "storage", + }, + params={ + "apikey": service_role_key, + "id": "eq.absent", + "name": "eq.absent", + }, + ) + assert res.ok + + +def test_postgrest_middle_apikey_query_parameter_is_removed(host): + res = requests.get( + f"http://{host.backend.get_hostname()}/rest/v1/buckets", + headers={ + "accept-profile": "storage", + }, + params={ + "id": "eq.absent", + "apikey": service_role_key, + "name": "eq.absent", + }, + ) + assert res.ok + + +def test_postgrest_ending_apikey_query_parameter_is_removed(host): + res = requests.get( + f"http://{host.backend.get_hostname()}/rest/v1/buckets", + headers={ + "accept-profile": "storage", + }, + params={ + "id": "eq.absent", + "name": "eq.absent", + "apikey": service_role_key, + }, + ) + assert res.ok + +# There would be an error if the empty key query parameter isn't removed, +# since PostgREST treats empty key query parameters as malformed input. +# +# Worth testing since remove_apikey_and_empty_key_query_parameters uses regexp instead +# of parsed query parameters. +def test_postgrest_starting_empty_key_query_parameter_is_removed(host): + res = requests.get( + f"http://{host.backend.get_hostname()}/rest/v1/buckets", + headers={ + "accept-profile": "storage", + }, + params={ + "": "empty_key", + "id": "eq.absent", + "apikey": service_role_key, + }, + ) + assert res.ok + + +def test_postgrest_middle_empty_key_query_parameter_is_removed(host): + res = requests.get( + f"http://{host.backend.get_hostname()}/rest/v1/buckets", + headers={ + "accept-profile": "storage", + }, + params={ + "apikey": service_role_key, + "": "empty_key", + "id": "eq.absent", + }, + ) + assert res.ok + + +def test_postgrest_ending_empty_key_query_parameter_is_removed(host): + res = requests.get( + f"http://{host.backend.get_hostname()}/rest/v1/buckets", + headers={ + "accept-profile": "storage", + }, + params={ + "id": "eq.absent", + "apikey": service_role_key, + "": "empty_key", + }, + ) + assert res.ok diff --git a/testinfra/test_ami_nix.py b/testinfra/test_ami_nix.py new file mode 100644 index 0000000..fe4be1a --- /dev/null +++ b/testinfra/test_ami_nix.py @@ -0,0 +1,439 @@ +import base64 +import boto3 +import gzip +import logging +import os +import pytest +import requests +import socket +import testinfra +from ec2instanceconnectcli.EC2InstanceConnectLogger import EC2InstanceConnectLogger +from ec2instanceconnectcli.EC2InstanceConnectKey import EC2InstanceConnectKey +from time import sleep + +# if GITHUB_RUN_ID is not set, use a default value that includes the user and hostname +RUN_ID = os.environ.get("GITHUB_RUN_ID", "unknown-ci-run-" + os.environ.get("USER", "unknown-user") + '@' + socket.gethostname()) +AMI_NAME = os.environ.get('AMI_NAME') +postgresql_schema_sql_content = """ +ALTER DATABASE postgres SET "app.settings.jwt_secret" TO 'my_jwt_secret_which_is_not_so_secret'; +ALTER DATABASE postgres SET "app.settings.jwt_exp" TO 3600; + +ALTER USER tealbase_admin WITH PASSWORD 'postgres'; +ALTER USER postgres WITH PASSWORD 'postgres'; +ALTER USER authenticator WITH PASSWORD 'postgres'; +ALTER USER pgbouncer WITH PASSWORD 'postgres'; +ALTER USER tealbase_auth_admin WITH PASSWORD 'postgres'; +ALTER USER tealbase_storage_admin WITH PASSWORD 'postgres'; +ALTER USER tealbase_replication_admin WITH PASSWORD 'postgres'; +ALTER ROLE tealbase_read_only_user WITH PASSWORD 'postgres'; +ALTER ROLE tealbase_admin SET search_path TO "$user",public,auth,extensions; +""" +realtime_env_content = "" +adminapi_yaml_content = """ +port: 8085 +host: 0.0.0.0 +ref: aaaaaaaaaaaaaaaaaaaa +jwt_secret: my_jwt_secret_which_is_not_so_secret +metric_collectors: + - filesystem + - meminfo + - netdev + - loadavg + - cpu + - diskstats + - vmstat +node_exporter_additional_args: + - '--collector.filesystem.ignored-mount-points=^/(boot|sys|dev|run).*' + - '--collector.netdev.device-exclude=lo' +cert_path: /etc/ssl/adminapi/server.crt +key_path: /etc/ssl/adminapi/server.key +upstream_metrics_refresh_duration: 60s +pgbouncer_endpoints: + - 'postgres://pgbouncer:postgres@localhost:6543/pgbouncer' +fail2ban_socket: /var/run/fail2ban/fail2ban.sock +upstream_metrics_sources: + - + name: system + url: 'https://localhost:8085/metrics' + labels_to_attach: [{name: tealbase_project_ref, value: aaaaaaaaaaaaaaaaaaaa}, {name: service_type, value: db}] + skip_tls_verify: true + - + name: postgresql + url: 'http://localhost:9187/metrics' + labels_to_attach: [{name: tealbase_project_ref, value: aaaaaaaaaaaaaaaaaaaa}, {name: service_type, value: postgresql}] + - + name: gotrue + url: 'http://localhost:9122/metrics' + labels_to_attach: [{name: tealbase_project_ref, value: aaaaaaaaaaaaaaaaaaaa}, {name: service_type, value: gotrue}] +monitoring: + disk_usage: + enabled: true +firewall: + enabled: true + internal_ports: + - 9187 + - 8085 + - 9122 + privileged_ports: + - 22 + privileged_ports_allowlist: + - 0.0.0.0/0 + filtered_ports: + - 5432 + - 6543 + unfiltered_ports: + - 80 + - 443 + managed_rules_file: /etc/nftables/tealbase_managed.conf +pg_egress_collect_path: /tmp/pg_egress_collect.txt +aws_config: + creds: + enabled: false + check_frequency: 1h + refresh_buffer_duration: 6h +""" +pgsodium_root_key_content = ( + "0000000000000000000000000000000000000000000000000000000000000000" +) +postgrest_base_conf_content = """ +db-uri = "postgres://authenticator:postgres@localhost:5432/postgres?application_name=postgrest" +db-schema = "public, storage, graphql_public" +db-anon-role = "anon" +jwt-secret = "my_jwt_secret_which_is_not_so_secret" +role-claim-key = ".role" +openapi-mode = "ignore-privileges" +db-use-legacy-gucs = true +admin-server-port = 3001 +server-host = "*6" +db-pool-acquisition-timeout = 10 +max-rows = 1000 +db-extra-search-path = "public, extensions" +""" +gotrue_env_content = """ +API_EXTERNAL_URL=http://localhost +GOTRUE_API_HOST=0.0.0.0 +GOTRUE_SITE_URL= +GOTRUE_DB_DRIVER=postgres +GOTRUE_DB_DATABASE_URL=postgres://tealbase_auth_admin@localhost/postgres?sslmode=disable +GOTRUE_JWT_ADMIN_ROLES=tealbase_admin,service_role +GOTRUE_JWT_AUD=authenticated +GOTRUE_JWT_SECRET=my_jwt_secret_which_is_not_so_secret +""" +walg_config_json_content = """ +{ + "AWS_REGION": "ap-southeast-1", + "WALG_S3_PREFIX": "", + "PGDATABASE": "postgres", + "PGUSER": "tealbase_admin", + "PGPORT": 5432, + "WALG_DELTA_MAX_STEPS": 6, + "WALG_COMPRESSION_METHOD": "lz4" +} +""" +anon_key = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6ImFhYWFhYWFhYWFhYWFhYWFhYWFhIiwicm9sZSI6ImFub24iLCJpYXQiOjE2OTYyMjQ5NjYsImV4cCI6MjAxMTgwMDk2Nn0.QW95aRPA-4QuLzuvaIeeoFKlJP9J2hvAIpJ3WJ6G5zo" +service_role_key = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6ImFhYWFhYWFhYWFhYWFhYWFhYWFhIiwicm9sZSI6InNlcnZpY2Vfcm9sZSIsImlhdCI6MTY5NjIyNDk2NiwiZXhwIjoyMDExODAwOTY2fQ.Om7yqv15gC3mLGitBmvFRB3M4IsLsX9fXzTQnFM7lu0" +tealbase_admin_key = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6ImFhYWFhYWFhYWFhYWFhYWFhYWFhIiwicm9sZSI6InN1cGFiYXNlX2FkbWluIiwiaWF0IjoxNjk2MjI0OTY2LCJleHAiOjIwMTE4MDA5NjZ9.jrD3j2rBWiIx0vhVZzd1CXFv7qkAP392nBMadvXxk1c" +init_json_content = f""" +{{ + "jwt_secret": "my_jwt_secret_which_is_not_so_secret", + "project_ref": "aaaaaaaaaaaaaaaaaaaa", + "logflare_api_key": "", + "logflare_pitr_errors_source": "", + "logflare_postgrest_source": "", + "logflare_pgbouncer_source": "", + "logflare_db_source": "", + "logflare_gotrue_source": "", + "anon_key": "{anon_key}", + "service_key": "{service_role_key}", + "tealbase_admin_key": "{tealbase_admin_key}", + "common_name": "db.aaaaaaaaaaaaaaaaaaaa.tealbase.red", + "region": "ap-southeast-1", + "init_database_only": false +}} +""" + +logger = logging.getLogger("ami-tests") +handler = logging.StreamHandler() +formatter = logging.Formatter( + '%(asctime)s %(name)-12s %(levelname)-8s %(message)s') +handler.setFormatter(formatter) +logger.addHandler(handler) +logger.setLevel(logging.DEBUG) + +# scope='session' uses the same container for all the tests; +# scope='function' uses a new container per test function. +@pytest.fixture(scope="session") +def host(): + ec2 = boto3.resource("ec2", region_name="ap-southeast-1") + images = list( + ec2.images.filter( + Filters=[{"Name": "name", "Values": [AMI_NAME]}], + ) + ) + assert len(images) == 1 + image = images[0] + + def gzip_then_base64_encode(s: str) -> str: + return base64.b64encode(gzip.compress(s.encode())).decode() + + instance = list( + ec2.create_instances( + BlockDeviceMappings=[ + { + "DeviceName": "/dev/sda1", + "Ebs": { + "VolumeSize": 8, # gb + "Encrypted": True, + "DeleteOnTermination": True, + "VolumeType": "gp3", + }, + }, + ], + MetadataOptions={ + "HttpTokens": "required", + "HttpEndpoint": "enabled", + }, + IamInstanceProfile={"Name": "pg-ap-southeast-1"}, + InstanceType="t4g.micro", + MinCount=1, + MaxCount=1, + ImageId=image.id, + NetworkInterfaces=[ + { + "DeviceIndex": 0, + "AssociatePublicIpAddress": True, + "Groups": ["sg-0a883ca614ebfbae0", "sg-014d326be5a1627dc"], + } + ], + UserData=f"""#cloud-config +hostname: db-aaaaaaaaaaaaaaaaaaaa +write_files: + - {{path: /etc/postgresql.schema.sql, content: {gzip_then_base64_encode(postgresql_schema_sql_content)}, permissions: '0600', encoding: gz+b64}} + - {{path: /etc/realtime.env, content: {gzip_then_base64_encode(realtime_env_content)}, permissions: '0664', encoding: gz+b64}} + - {{path: /etc/adminapi/adminapi.yaml, content: {gzip_then_base64_encode(adminapi_yaml_content)}, permissions: '0600', owner: 'adminapi:root', encoding: gz+b64}} + - {{path: /etc/postgresql-custom/pgsodium_root.key, content: {gzip_then_base64_encode(pgsodium_root_key_content)}, permissions: '0600', owner: 'postgres:postgres', encoding: gz+b64}} + - {{path: /etc/postgrest/base.conf, content: {gzip_then_base64_encode(postgrest_base_conf_content)}, permissions: '0664', encoding: gz+b64}} + - {{path: /etc/gotrue.env, content: {gzip_then_base64_encode(gotrue_env_content)}, permissions: '0664', encoding: gz+b64}} + - {{path: /etc/wal-g/config.json, content: {gzip_then_base64_encode(walg_config_json_content)}, permissions: '0664', owner: 'wal-g:wal-g', encoding: gz+b64}} + - {{path: /tmp/init.json, content: {gzip_then_base64_encode(init_json_content)}, permissions: '0600', encoding: gz+b64}} +runcmd: + - 'sudo echo \"pgbouncer\" \"postgres\" >> /etc/pgbouncer/userlist.txt' + - 'cd /tmp && aws s3 cp --region ap-southeast-1 s3://init-scripts-staging/project/init.sh .' + - 'bash init.sh "staging"' + - 'rm -rf /tmp/*' +""", + TagSpecifications=[ + { + "ResourceType": "instance", + "Tags": [ + {"Key": "Name", "Value": "ci-ami-test-nix"}, + {"Key": "creator", "Value": "testinfra-ci"}, + {"Key": "testinfra-run-id", "Value": RUN_ID} + ], + } + ], + ) + )[0] + instance.wait_until_running() + + ec2logger = EC2InstanceConnectLogger(debug=False) + temp_key = EC2InstanceConnectKey(ec2logger.get_logger()) + ec2ic = boto3.client("ec2-instance-connect", region_name="ap-southeast-1") + response = ec2ic.send_ssh_public_key( + InstanceId=instance.id, + InstanceOSUser="ubuntu", + SSHPublicKey=temp_key.get_pub_key(), + ) + assert response["Success"] + + # instance doesn't have public ip yet + while not instance.public_ip_address: + logger.warning("waiting for ip to be available") + sleep(5) + instance.reload() + + while True: + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + if sock.connect_ex((instance.public_ip_address, 22)) == 0: + break + else: + logger.warning("waiting for ssh to be available") + sleep(10) + + host = testinfra.get_host( + # paramiko is an ssh backend + f"paramiko://ubuntu@{instance.public_ip_address}?timeout=60", + ssh_identity_file=temp_key.get_priv_key_file(), + ) + + def is_healthy(host) -> bool: + cmd = host.run("sudo -u postgres /usr/bin/pg_isready -U postgres") + if cmd.failed is True: + logger.warning("pg not ready") + return False + + cmd = host.run(f"curl -sf -k --connect-timeout 30 --max-time 60 https://localhost:8085/health -H 'apikey: {tealbase_admin_key}'") + if cmd.failed is True: + logger.warning("adminapi not ready") + return False + + cmd = host.run("curl -sf --connect-timeout 30 --max-time 60 http://localhost:3001/ready") + if cmd.failed is True: + logger.warning("postgrest not ready") + return False + + cmd = host.run("curl -sf --connect-timeout 30 --max-time 60 http://localhost:8081/health") + if cmd.failed is True: + logger.warning("gotrue not ready") + return False + + # TODO(thebengeu): switch to checking Envoy once it's the default. + cmd = host.run("sudo kong health") + if cmd.failed is True: + logger.warning("kong not ready") + return False + + cmd = host.run("sudo fail2ban-client status") + if cmd.failed is True: + logger.warning("fail2ban not ready") + return False + + return True + + while True: + if is_healthy(host): + break + sleep(1) + + # return a testinfra connection to the instance + yield host + + # at the end of the test suite, destroy the instance + instance.terminate() + + +def test_postgrest_is_running(host): + postgrest = host.service("postgrest") + assert postgrest.is_running + + +def test_postgrest_responds_to_requests(host): + res = requests.get( + f"http://{host.backend.get_hostname()}/rest/v1/", + headers={ + "apikey": anon_key, + "authorization": f"Bearer {anon_key}", + }, + ) + assert res.ok + + +def test_postgrest_can_connect_to_db(host): + res = requests.get( + f"http://{host.backend.get_hostname()}/rest/v1/buckets", + headers={ + "apikey": service_role_key, + "authorization": f"Bearer {service_role_key}", + "accept-profile": "storage", + }, + ) + assert res.ok + + +# There would be an error if the `apikey` query parameter isn't removed, +# since PostgREST treats query parameters as conditions. +# +# Worth testing since remove_apikey_query_parameters uses regexp instead +# of parsed query parameters. +def test_postgrest_starting_apikey_query_parameter_is_removed(host): + res = requests.get( + f"http://{host.backend.get_hostname()}/rest/v1/buckets", + headers={ + "accept-profile": "storage", + }, + params={ + "apikey": service_role_key, + "id": "eq.absent", + "name": "eq.absent", + }, + ) + assert res.ok + + +def test_postgrest_middle_apikey_query_parameter_is_removed(host): + res = requests.get( + f"http://{host.backend.get_hostname()}/rest/v1/buckets", + headers={ + "accept-profile": "storage", + }, + params={ + "id": "eq.absent", + "apikey": service_role_key, + "name": "eq.absent", + }, + ) + assert res.ok + + +def test_postgrest_ending_apikey_query_parameter_is_removed(host): + res = requests.get( + f"http://{host.backend.get_hostname()}/rest/v1/buckets", + headers={ + "accept-profile": "storage", + }, + params={ + "id": "eq.absent", + "name": "eq.absent", + "apikey": service_role_key, + }, + ) + assert res.ok + +# There would be an error if the empty key query parameter isn't removed, +# since PostgREST treats empty key query parameters as malformed input. +# +# Worth testing since remove_apikey_and_empty_key_query_parameters uses regexp instead +# of parsed query parameters. +def test_postgrest_starting_empty_key_query_parameter_is_removed(host): + res = requests.get( + f"http://{host.backend.get_hostname()}/rest/v1/buckets", + headers={ + "accept-profile": "storage", + }, + params={ + "": "empty_key", + "id": "eq.absent", + "apikey": service_role_key, + }, + ) + assert res.ok + + +def test_postgrest_middle_empty_key_query_parameter_is_removed(host): + res = requests.get( + f"http://{host.backend.get_hostname()}/rest/v1/buckets", + headers={ + "accept-profile": "storage", + }, + params={ + "apikey": service_role_key, + "": "empty_key", + "id": "eq.absent", + }, + ) + assert res.ok + + +def test_postgrest_ending_empty_key_query_parameter_is_removed(host): + res = requests.get( + f"http://{host.backend.get_hostname()}/rest/v1/buckets", + headers={ + "accept-profile": "storage", + }, + params={ + "id": "eq.absent", + "apikey": service_role_key, + "": "empty_key", + }, + ) + assert res.ok diff --git a/tests/pg_upgrade/.env b/tests/pg_upgrade/.env new file mode 100644 index 0000000..505503f --- /dev/null +++ b/tests/pg_upgrade/.env @@ -0,0 +1,6 @@ +POSTGRES_PASSWORD=postgres +POSTGRES_HOST=/var/run/postgresql +POSTGRES_INITDB_ARGS=--lc-ctype=C.UTF-8 +PG_MAJOR_VERSION=15 +IS_CI=true +SCRIPT_DIR=/tmp/upgrade diff --git a/tests/pg_upgrade/.gitignore b/tests/pg_upgrade/.gitignore new file mode 100644 index 0000000..c8ff8c3 --- /dev/null +++ b/tests/pg_upgrade/.gitignore @@ -0,0 +1,4 @@ +# excluding these since running debug.sh will download the files locally +pg_upgrade_bin*.tar.gz +pg_upgrade_scripts*.tar.gz +pg_upgrade_scripts/ diff --git a/tests/pg_upgrade/debug.sh b/tests/pg_upgrade/debug.sh new file mode 100755 index 0000000..eff9dbb --- /dev/null +++ b/tests/pg_upgrade/debug.sh @@ -0,0 +1,80 @@ +#!/bin/bash + +set -eEuo pipefail + +export PGPASSWORD=postgres +export PGUSER=tealbase_admin +export PGHOST=localhost +export PGDATABASE=postgres + +ARTIFACTS_BUCKET_NAME=${1:-} +if [ -z "$ARTIFACTS_BUCKET_NAME" ]; then + echo "Usage: $0 [INITIAL_PG_VERSION]" + exit 1 +fi + +INITIAL_PG_VERSION=${2:-15.1.1.60} +LATEST_PG_VERSION=$(sed -e 's/postgres-version = "\(.*\)"/\1/g' ../../common.vars.pkr.hcl) + +LATEST_VERSION_SCRIPTS="scripts/pg_upgrade_scripts-${LATEST_PG_VERSION}.tar.gz" +LATEST_VERSION_BIN="scripts/pg_upgrade_bin-${LATEST_PG_VERSION}.tar.gz" + +if [ ! -f "$LATEST_VERSION_SCRIPTS" ]; then + aws s3 cp "s3://${ARTIFACTS_BUCKET_NAME}/upgrades/postgres/tealbase-postgres-${LATEST_PG_VERSION}/pg_upgrade_scripts.tar.gz" "$LATEST_VERSION_SCRIPTS" +fi + +if [ ! -f "$LATEST_VERSION_BIN" ]; then + aws s3 cp "s3://${ARTIFACTS_BUCKET_NAME}/upgrades/postgres/tealbase-postgres-${LATEST_PG_VERSION}/20.04.tar.gz" "$LATEST_VERSION_BIN" +fi + +rm -rf scripts/pg_upgrade_scripts +cp "$LATEST_VERSION_SCRIPTS" scripts/pg_upgrade_scripts.tar.gz +cp "$LATEST_VERSION_BIN" scripts/pg_upgrade_bin.tar.gz + +docker rm -f pg_upgrade_test || true + +docker run -t --name pg_upgrade_test --env-file .env \ + -v "$(pwd)/scripts:/tmp/upgrade" \ + --entrypoint /tmp/upgrade/entrypoint.sh -d \ + -p 5432:5432 \ + "tealbase/postgres:${INITIAL_PG_VERSION}" + +sleep 3 +while ! docker exec -it pg_upgrade_test bash -c "pg_isready"; do + echo "Waiting for postgres to start..." + sleep 1 +done + +echo "Running migrations" +docker cp ../../migrations/db/migrations "pg_upgrade_test:/docker-entrypoint-initdb.d/" +docker exec -it pg_upgrade_test bash -c '/docker-entrypoint-initdb.d/migrate.sh > /tmp/migrate.log 2>&1; exit $?' +if [ $? -ne 0 ]; then + echo "Running migrations failed. Exiting." + exit 1 +fi + +echo "Running tests" +pg_prove "../../migrations/tests/test.sql" +psql -f "./tests/97-enable-extensions.sql" +psql -f "./tests/98-data-fixtures.sql" +psql -f "./tests/99-fixtures.sql" + +echo "Initiating pg_upgrade" +docker exec -it pg_upgrade_test bash -c '/tmp/upgrade/pg_upgrade_scripts/initiate.sh "$PG_MAJOR_VERSION"; exit $?' +if [ $? -ne 0 ]; then + echo "Initiating pg_upgrade failed. Exiting." + exit 1 +fi + +sleep 3 +echo "Completing pg_upgrade" +docker exec -it pg_upgrade_test bash -c 'rm -f /tmp/pg-upgrade-status; /tmp/upgrade/pg_upgrade_scripts/complete.sh; exit $?' +if [ $? -ne 0 ]; then + echo "Completing pg_upgrade failed. Exiting." + exit 1 +fi + +pg_prove tests/01-schema.sql +pg_prove tests/02-data.sql +pg_prove tests/03-settings.sql + diff --git a/tests/pg_upgrade/scripts/entrypoint.sh b/tests/pg_upgrade/scripts/entrypoint.sh new file mode 100755 index 0000000..d9d80ac --- /dev/null +++ b/tests/pg_upgrade/scripts/entrypoint.sh @@ -0,0 +1,25 @@ +#!/bin/bash + +set -e + +SCRIPT_DIR=$(dirname -- "$0";) + +ls -la "$SCRIPT_DIR" + +tar -xzf "${SCRIPT_DIR}/pg_upgrade_scripts.tar.gz" -C "${SCRIPT_DIR}" + +mkdir -p /tmp/persistent +cp "$SCRIPT_DIR/pg_upgrade_bin.tar.gz" /tmp/persistent + +export PATH="$(pg_config --bindir):$PATH" + +sed -i "s/|--version//g" /usr/local/bin/docker-entrypoint.sh +/usr/local/bin/docker-entrypoint.sh postgres --version || true + +su postgres -c "$(pg_config --bindir)/pg_ctl start -o '-c config_file=/etc/postgresql/postgresql.conf' -l /tmp/postgres.log" + +RECEIVED_EXIT_SIGNAL=false +trap 'RECEIVED_EXIT_SIGNAL=true' SIGINT SIGTERM SIGUSR1 +while ! ((RECEIVED_EXIT_SIGNAL)); do + sleep 5 +done diff --git a/tests/pg_upgrade/tests/01-schema.sql b/tests/pg_upgrade/tests/01-schema.sql new file mode 100644 index 0000000..3cf3a83 --- /dev/null +++ b/tests/pg_upgrade/tests/01-schema.sql @@ -0,0 +1,26 @@ +CREATE EXTENSION IF NOT EXISTS pgtap; + +BEGIN; +SELECT plan(15); + +select has_schema('public'); +select has_schema('auth'); +select has_schema('storage'); +select has_schema('realtime'); +select has_schema('pgsodium'); +select has_schema('vault'); +select has_schema('extensions'); + +SELECT has_enum('public', 'continents', 'Enum continents should exist'); + +SELECT has_table('public', 'countries', 'Table countries should exist'); +SELECT has_column('public', 'countries', 'id', 'Column id should exist'); +SELECT has_column('public', 'countries', 'name', 'Column name should exist'); +SELECT has_column('public', 'countries', 'iso2', 'Column iso2 should exist'); +SELECT has_column('public', 'countries', 'iso3', 'Column iso3 should exist'); +SELECT has_column('public', 'countries', 'continent', 'Column continent should exist'); + +SELECT has_materialized_view('public', 'european_countries', 'Materialized view european_countries should exist'); + +SELECT * FROM finish(); +ROLLBACK; diff --git a/tests/pg_upgrade/tests/02-data.sql b/tests/pg_upgrade/tests/02-data.sql new file mode 100644 index 0000000..d83e346 --- /dev/null +++ b/tests/pg_upgrade/tests/02-data.sql @@ -0,0 +1,27 @@ +CREATE EXTENSION IF NOT EXISTS pgtap; + +BEGIN; +SELECT plan(4); + +SELECT results_eq( + 'SELECT count(*)::int FROM public.countries', + ARRAY[ 249 ] +); + +SELECT results_eq( + 'SELECT count(*)::int FROM public.countries where continent = ''Europe''', + ARRAY[ 45 ] +); + +SELECT results_eq( + 'SELECT count(*)::int FROM public.european_countries', + ARRAY[ 45 ] +); + +SELECT results_eq( + 'SELECT count(*) FROM public.countries where continent = ''Europe''', + 'SELECT count(*) FROM public.european_countries' +); + +SELECT * FROM finish(); +ROLLBACK; diff --git a/tests/pg_upgrade/tests/03-settings.sql b/tests/pg_upgrade/tests/03-settings.sql new file mode 100644 index 0000000..32fc71a --- /dev/null +++ b/tests/pg_upgrade/tests/03-settings.sql @@ -0,0 +1,17 @@ +CREATE EXTENSION IF NOT EXISTS pgtap; + +BEGIN; +SELECT plan(2); + +SELECT results_eq( + 'SELECT setting FROM pg_settings where name = ''jit''', + ARRAY[ 'off' ] +); + +select results_eq( + 'SELECT setting FROM pg_settings WHERE name = ''password_encryption''', + ARRAY[ 'scram-sha-256' ] +); + +SELECT * FROM finish(); +ROLLBACK; diff --git a/tests/pg_upgrade/tests/97-enable-extensions.sql b/tests/pg_upgrade/tests/97-enable-extensions.sql new file mode 100644 index 0000000..34c730b --- /dev/null +++ b/tests/pg_upgrade/tests/97-enable-extensions.sql @@ -0,0 +1,10 @@ +do $$ +declare + ext record; +begin + for ext in (select * from pg_available_extensions where name not in (select extname from pg_extension) order by name) + loop + execute 'create extension if not exists ' || ext.name || ' cascade'; + end loop; +end; +$$; diff --git a/tests/pg_upgrade/tests/98-data-fixtures.sql b/tests/pg_upgrade/tests/98-data-fixtures.sql new file mode 100644 index 0000000..1a675e2 --- /dev/null +++ b/tests/pg_upgrade/tests/98-data-fixtures.sql @@ -0,0 +1,273 @@ +create type public.continents as enum ( + 'Africa', + 'Antarctica', + 'Asia', + 'Europe', + 'Oceania', + 'North America', + 'South America' +); +create table public.countries ( + id bigint generated by default as identity primary key, + name text, + iso2 text not null, + iso3 text, + local_name text, + continent continents +); +comment on table countries is 'Full list of countries.'; +comment on column countries.name is 'Full country name.'; +comment on column countries.iso2 is 'ISO 3166-1 alpha-2 code.'; +comment on column countries.iso3 is 'ISO 3166-1 alpha-3 code.'; +comment on column countries.local_name is 'Local variation of the name.'; +insert into public.countries (name,iso2,iso3,local_name,continent) values + ('Bonaire, Sint Eustatius and Saba','BQ','BES',null,null), + ('Curaçao','CW','CUW',null,null), + ('Guernsey','GG','GGY',null,null), + ('Isle of Man','IM','IMN',null,null), + ('Jersey','JE','JEY',null,null), + ('Åland Islands','AX','ALA',null,null), + ('Montenegro','ME','MNE',null,null), + ('Saint Barthélemy','BL','BLM',null,null), + ('Saint Martin (French part)','MF','MAF',null,null), + ('Serbia','RS','SRB',null,null), + ('Sint Maarten (Dutch part)','SX','SXM',null,null), + ('South Sudan','SS','SSD',null,null), + ('Timor-Leste','TL','TLS',null,null), + ('American Samoa','as','ASM','Amerika Samoa','Oceania'), + ('Andorra','AD','AND','Andorra','Europe'), + ('Angola','AO','AGO','Angola','Africa'), + ('Anguilla','AI','AIA','Anguilla','North America'), + ('Antarctica','AQ','ATA','','Antarctica'), + ('Antigua and Barbuda','AG','ATG','Antigua and Barbuda','North America'), + ('Argentina','AR','ARG','Argentina','South America'), + ('Armenia','AM','ARM','Hajastan','Asia'), + ('Aruba','AW','ABW','Aruba','North America'), + ('Australia','AU','AUS','Australia','Oceania'), + ('Austria','AT','AUT','Österreich','Europe'), + ('Azerbaijan','AZ','AZE','Azerbaijan','Asia'), + ('Bahamas','BS','BHS','The Bahamas','North America'), + ('Bahrain','BH','BHR','Al-Bahrayn','Asia'), + ('Bangladesh','BD','BGD','Bangladesh','Asia'), + ('Barbados','BB','BRB','Barbados','North America'), + ('Belarus','BY','BLR','Belarus','Europe'), + ('Belgium','BE','BEL','Belgium/Belgique','Europe'), + ('Belize','BZ','BLZ','Belize','North America'), + ('Benin','BJ','BEN','Benin','Africa'), + ('Bermuda','BM','BMU','Bermuda','North America'), + ('Bhutan','BT','BTN','Druk-Yul','Asia'), + ('Bolivia','BO','BOL','Bolivia','South America'), + ('Bosnia and Herzegovina','BA','BIH','Bosna i Hercegovina','Europe'), + ('Botswana','BW','BWA','Botswana','Africa'), + ('Bouvet Island','BV','BVT','Bouvet Island','Antarctica'), + ('Brazil','BR','BRA','Brasil','South America'), + ('British Indian Ocean Territory','IO','IOT','British Indian Ocean Territory','Africa'), + ('Brunei Darussalam','BN','BRN','Brunei Darussalam','Asia'), + ('Bulgaria','BG','BGR','Balgarija','Europe'), + ('Burkina Faso','BF','BFA','Burkina Faso','Africa'), + ('Burundi','BI','BDI','Burundi/Uburundi','Africa'), + ('Cambodia','KH','KHM','Cambodia','Asia'), + ('Cameroon','CM','CMR','Cameroun/Cameroon','Africa'), + ('Canada','CA','CAN','Canada','North America'), + ('Cape Verde','CV','CPV','Cabo Verde','Africa'), + ('Cayman Islands','KY','CYM','Cayman Islands','North America'), + ('Central African Republic','CF','CAF','Centrafrique','Africa'), + ('Chad','TD','TCD','Tchad/Tshad','Africa'), + ('Chile','CL','CHL','Chile','South America'), + ('China','CN','CHN','Zhongquo','Asia'), + ('Christmas Island','CX','CXR','Christmas Island','Oceania'), + ('Cocos (Keeling) Islands','CC','CCK','Cocos (Keeling) Islands','Oceania'), + ('Colombia','CO','COL','Colombia','South America'), + ('Comoros','KM','COM','Komori/Comores','Africa'), + ('Congo','CG','COG','Congo','Africa'), + ('Congo, the Democratic Republic of the','CD','COD','Republique Democratique du Congo','Africa'), + ('Cook Islands','CK','COK','The Cook Islands','Oceania'), + ('Costa Rica','CR','CRI','Costa Rica','North America'), + ('Cote DIvoire','CI','CIV','Côte dIvoire','Africa'), + ('Croatia','HR','HRV','Hrvatska','Europe'), + ('Cuba','CU','CUB','Cuba','North America'), + ('Cyprus','CY','CYP','Cyprus','Asia'), + ('Czech Republic','CZ','CZE','Czech','Europe'), + ('Denmark','DK','DNK','Danmark','Europe'), + ('Djibouti','DJ','DJI','Djibouti/Jibuti','Africa'), + ('Dominica','DM','DMA','Dominica','North America'), + ('Dominican Republic','DO','DOM','Republica Dominicana','North America'), + ('Ecuador','EC','ECU','Ecuador','South America'), + ('Egypt','EG','EGY','Misr','Africa'), + ('El Salvador','SV','SLV','El Salvador','North America'), + ('Equatorial Guinea','GQ','GNQ','Guinea Ecuatorial','Africa'), + ('Eritrea','ER','ERI','Ertra','Africa'), + ('Estonia','EE','EST','Eesti','Europe'), + ('Ethiopia','ET','ETH','Yeityopiya','Africa'), + ('Falkland Islands (Malvinas)','FK','FLK','Falkland Islands','South America'), + ('Faroe Islands','FO','FRO','Faroe Islands','Europe'), + ('Fiji','FJ','FJI','Fiji Islands','Oceania'), + ('Finland','FI','FIN','Suomi','Europe'), + ('France','FR','FRA','France','Europe'), + ('French Guiana','GF','GUF','Guyane francaise','South America'), + ('French Polynesia','PF','PYF','Polynésie française','Oceania'), + ('French Southern Territories','TF','ATF','Terres australes françaises','Antarctica'), + ('Gabon','GA','GAB','Le Gabon','Africa'), + ('Gambia','GM','GMB','The Gambia','Africa'), + ('Georgia','GE','GEO','Sakartvelo','Asia'), + ('Germany','DE','DEU','Deutschland','Europe'), + ('Ghana','GH','GHA','Ghana','Africa'), + ('Gibraltar','GI','GIB','Gibraltar','Europe'), + ('Greece','GR','GRC','Greece','Europe'), + ('Greenland','GL','GRL','Kalaallit Nunaat','North America'), + ('Grenada','GD','GRD','Grenada','North America'), + ('Guadeloupe','GP','GLP','Guadeloupe','North America'), + ('Guam','GU','GUM','Guam','Oceania'), + ('Guatemala','GT','GTM','Guatemala','North America'), + ('Guinea','GN','GIN','Guinea','Africa'), + ('Guinea-Bissau','GW','GNB','Guinea-Bissau','Africa'), + ('Guyana','GY','GUY','Guyana','South America'), + ('Haiti','HT','HTI','Haiti/Dayti','North America'), + ('Heard Island and Mcdonald Islands','HM','HMD','Heard and McDonald Islands','Antarctica'), + ('Holy See (Vatican City State)','VA','VAT','Santa Sede/Città del Vaticano','Europe'), + ('Honduras','HN','HND','Honduras','North America'), + ('Hong Kong','HK','HKG','Xianggang/Hong Kong','Asia'), + ('Hungary','HU','HUN','Hungary','Europe'), + ('Iceland','IS','ISL','Iceland','Europe'), + ('India','IN','IND','Bharat/India','Asia'), + ('Indonesia','ID','IDN','Indonesia','Asia'), + ('Iran, Islamic Republic of','IR','IRN','Iran','Asia'), + ('Iraq','IQ','IRQ','Al-Irāq','Asia'), + ('Ireland','IE','IRL','Ireland','Europe'), + ('Israel','IL','ISR','Yisrael','Asia'), + ('Italy','IT','ITA','Italia','Europe'), + ('Jamaica','JM','JAM','Jamaica','North America'), + ('Japan','JP','JPN','Nihon/Nippon','Asia'), + ('Jordan','JO','JOR','Al-Urdunn','Asia'), + ('Kazakhstan','KZ','KAZ','Qazaqstan','Asia'), + ('Kenya','KE','KEN','Kenya','Africa'), + ('Kiribati','KI','KIR','Kiribati','Oceania'), + ('Korea, Democratic People''s Republic of','KP','PRK','Choson Minjujuui Inmin Konghwaguk (Bukhan)','Asia'), + ('Korea, Republic of','KR','KOR','Taehan-minguk (Namhan)','Asia'), + ('Kuwait','KW','KWT','Al-Kuwayt','Asia'), + ('Kyrgyzstan','KG','KGZ','Kyrgyzstan','Asia'), + ('Lao People''s Democratic Republic','LA','LAO','Lao','Asia'), + ('Latvia','LV','LVA','Latvija','Europe'), + ('Lebanon','LB','LBN','Lubnan','Asia'), + ('Lesotho','LS','LSO','Lesotho','Africa'), + ('Liberia','LR','LBR','Liberia','Africa'), + ('Libya','LY','LBY','Libiya','Africa'), + ('Liechtenstein','LI','LIE','Liechtenstein','Europe'), + ('Lithuania','LT','LTU','Lietuva','Europe'), + ('Luxembourg','LU','LUX','Luxembourg','Europe'), + ('Macao','MO','MAC','Macau/Aomen','Asia'), + ('Macedonia, the Former Yugoslav Republic of','MK','MKD','Makedonija','Europe'), + ('Madagascar','MG','MDG','Madagasikara/Madagascar','Africa'), + ('Malawi','MW','MWI','Malawi','Africa'), + ('Malaysia','MY','MYS','Malaysia','Asia'), + ('Maldives','MV','MDV','Dhivehi Raajje/Maldives','Asia'), + ('Mali','ML','MLI','Mali','Africa'), + ('Malta','MT','MLT','Malta','Europe'), + ('Marshall Islands','MH','MHL','Marshall Islands/Majol','Oceania'), + ('Martinique','MQ','MTQ','Martinique','North America'), + ('Mauritania','MR','MRT','Muritaniya/Mauritanie','Africa'), + ('Mauritius','MU','MUS','Mauritius','Africa'), + ('Mayotte','YT','MYT','Mayotte','Africa'), + ('Mexico','MX','MEX','Mexico','North America'), + ('Micronesia, Federated States of','FM','FSM','Micronesia','Oceania'), + ('Moldova, Republic of','MD','MDA','Moldova','Europe'), + ('Monaco','MC','MCO','Monaco','Europe'), + ('Mongolia','MN','MNG','Mongol Uls','Asia'), + ('Albania','AL','ALB','Republika e Shqipërisë','Europe'), + ('Montserrat','MS','MSR','Montserrat','North America'), + ('Morocco','MA','MAR','Al-Maghrib','Africa'), + ('Mozambique','MZ','MOZ','Mozambique','Africa'), + ('Myanmar','MM','MMR','Myanma Pye','Asia'), + ('Namibia','NA','NAM','Namibia','Africa'), + ('Nauru','NR','NRU','Naoero/Nauru','Oceania'), + ('Nepal','NP','NPL','Nepal','Asia'), + ('Netherlands','NL','NLD','Nederland','Europe'), + ('New Caledonia','NC','NCL','Nouvelle-Calédonie','Oceania'), + ('New Zealand','NZ','NZL','New Zealand/Aotearoa','Oceania'), + ('Nicaragua','NI','NIC','Nicaragua','North America'), + ('Niger','NE','NER','Niger','Africa'), + ('Nigeria','NG','NGA','Nigeria','Africa'), + ('Niue','NU','NIU','Niue','Oceania'), + ('Norfolk Island','NF','NFK','Norfolk Island','Oceania'), + ('Northern Mariana Islands','MP','MNP','Northern Mariana Islands','Oceania'), + ('Norway','NO','NOR','Norge','Europe'), + ('Oman','OM','OMN','Oman','Asia'), + ('Pakistan','PK','PAK','Pakistan','Asia'), + ('Palau','PW','PLW','Belau/Palau','Oceania'), + ('Palestine, State of','PS','PSE','Filastin','Asia'), + ('Panama','PA','PAN','República de Panamá','North America'), + ('Papua New Guinea','PG','PNG','Papua New Guinea/Papua Niugini','Oceania'), + ('Paraguay','PY','PRY','Paraguay','South America'), + ('Peru','PE','PER','Perú/Piruw','South America'), + ('Philippines','PH','PHL','Pilipinas','Asia'), + ('Pitcairn','PN','PCN','Pitcairn','Oceania'), + ('Poland','PL','POL','Polska','Europe'), + ('Portugal','PT','PRT','Portugal','Europe'), + ('Puerto Rico','PR','PRI','Puerto Rico','North America'), + ('Qatar','QA','QAT','Qatar','Asia'), + ('Reunion','RE','REU','Reunion','Africa'), + ('Romania','RO','ROM','Romania','Europe'), + ('Russian Federation','RU','RUS','Rossija','Europe'), + ('Rwanda','RW','RWA','Rwanda/Urwanda','Africa'), + ('Saint Helena, Ascension and Tristan da Cunha','SH','SHN','Saint Helena','Africa'), + ('Saint Kitts and Nevis','KN','KNA','Saint Kitts and Nevis','North America'), + ('Saint Lucia','LC','LCA','Saint Lucia','North America'), + ('Saint Pierre and Miquelon','PM','SPM','Saint-Pierre-et-Miquelon','North America'), + ('Saint Vincent and the Grenadines','VC','VCT','Saint Vincent and the Grenadines','North America'), + ('Samoa','WS','WSM','Samoa','Oceania'), + ('San Marino','SM','SMR','San Marino','Europe'), + ('Sao Tome and Principe','ST','STP','São Tomé e Príncipe','Africa'), + ('Saudi Arabia','SA','SAU','Al-Mamlaka al-Arabiya as-Saudiya','Asia'), + ('Senegal','SN','SEN','Sénégal/Sounougal','Africa'), + ('Seychelles','SC','SYC','Sesel/Seychelles','Africa'), + ('Sierra Leone','SL','SLE','Sierra Leone','Africa'), + ('Singapore','SG','SGP','Singapore/Singapura/Xinjiapo/Singapur','Asia'), + ('Slovakia','SK','SVK','Slovensko','Europe'), + ('Slovenia','SI','SVN','Slovenija','Europe'), + ('Solomon Islands','SB','SLB','Solomon Islands','Oceania'), + ('Somalia','SO','SOM','Soomaaliya','Africa'), + ('South Africa','ZA','ZAF','South Africa','Africa'), + ('South Georgia and the South Sandwich Islands','GS','SGS','South Georgia and the South Sandwich Islands','Antarctica'), + ('Spain','ES','ESP','España','Europe'), + ('Sri Lanka','LK','LKA','Sri Lanka/Ilankai','Asia'), + ('Sudan','SD','SDN','As-Sudan','Africa'), + ('Suriname','SR','SUR','Suriname','South America'), + ('Svalbard and Jan Mayen','SJ','SJM','Svalbard og Jan Mayen','Europe'), + ('Swaziland','SZ','SWZ','kaNgwane','Africa'), + ('Sweden','SE','SWE','Sverige','Europe'), + ('Switzerland','CH','CHE','Schweiz/Suisse/Svizzera/Svizra','Europe'), + ('Syrian Arab Republic','SY','SYR','Suriya','Asia'), + ('Taiwan (Province of China)','TW','TWN','Tai-wan','Asia'), + ('Tajikistan','TJ','TJK','Tajikistan','Asia'), + ('Tanzania, United Republic of','TZ','TZA','Tanzania','Africa'), + ('Thailand','TH','THA','Prathet Thai','Asia'), + ('Togo','TG','TGO','Togo','Africa'), + ('Tokelau','TK','TKL','Tokelau','Oceania'), + ('Tonga','TO','TON','Tonga','Oceania'), + ('Trinidad and Tobago','TT','TTO','Trinidad and Tobago','North America'), + ('Tunisia','TN','TUN','Tunis/Tunisie','Africa'), + ('Turkey','TR','TUR','Türkiye','Asia'), + ('Turkmenistan','TM','TKM','Türkmenistan','Asia'), + ('Turks and Caicos Islands','TC','TCA','The Turks and Caicos Islands','North America'), + ('Tuvalu','TV','TUV','Tuvalu','Oceania'), + ('Uganda','UG','UGA','Uganda','Africa'), + ('Ukraine','UA','UKR','Ukrajina','Europe'), + ('United Arab Emirates','AE','ARE','Al-Amirat al-Arabiya al-Muttahida','Asia'), + ('United Kingdom','GB','GBR','United Kingdom','Europe'), + ('United States','US','USA','United States','North America'), + ('United States Minor Outlying Islands','UM','UMI','United States Minor Outlying Islands','Oceania'), + ('Uruguay','UY','URY','Uruguay','South America'), + ('Uzbekistan','UZ','UZB','Uzbekiston','Asia'), + ('Vanuatu','VU','VUT','Vanuatu','Oceania'), + ('Venezuela','VE','VEN','Venezuela','South America'), + ('Viet Nam','VN','VNM','Viet Nam','Asia'), + ('Virgin Islands (British)','VG','VGB','British Virgin Islands','North America'), + ('Virgin Islands (U.S.)','VI','VIR','Virgin Islands of the United States','North America'), + ('Wallis and Futuna','WF','WLF','Wallis-et-Futuna','Oceania'), + ('Western Sahara','EH','ESH','As-Sahrawiya','Africa'), + ('Yemen','YE','YEM','Al-Yaman','Asia'), + ('Zambia','ZM','ZMB','Zambia','Africa'), + ('Zimbabwe','ZW','ZWE','Zimbabwe','Africa'), + ('Afghanistan','AF','AFG','Afganistan/Afqanestan','Asia'), + ('Algeria','DZ','DZA','Al-Jazair/Algerie','Africa'); + \ No newline at end of file diff --git a/tests/pg_upgrade/tests/99-fixtures.sql b/tests/pg_upgrade/tests/99-fixtures.sql new file mode 100644 index 0000000..2b93d45 --- /dev/null +++ b/tests/pg_upgrade/tests/99-fixtures.sql @@ -0,0 +1,12 @@ +-- enable JIT to ensure the upgrade process disables it +alter system set jit = on; +alter system set password_encryption = 'md5'; +select pg_reload_conf(); + +-- create materialized view +create materialized view public.european_countries as + select * from public.countries where continent = 'Europe' +with no data; +refresh materialized view public.european_countries; + +select count(*) from public.european_countries; From 326b0882ceb1d08c7aebc2fcfaf105ebe2d4711d Mon Sep 17 00:00:00 2001 From: Ekjot Singh Date: Mon, 28 Jul 2025 01:37:08 +0530 Subject: [PATCH 8/9] feat: 15.8.1.123 --- .envrc.recommended | 3 + .github/CODEOWNERS | 4 - .github/workflows/ami-release-nix.yml | 141 - .github/workflows/build-ccache.yml | 86 - .github/workflows/ci.yml | 21 - .github/workflows/dockerhub-release-15-6.yml | 104 - .github/workflows/dockerhub-release-aio.yml | 157 - .../workflows/dockerhub-release-orioledb.yml | 86 - .github/workflows/dockerhub-release.yml | 102 - .github/workflows/nix-build.yml | 82 - .github/workflows/package-plv8.yml | 78 - ...ublish-nix-pgupgrade-bin-flake-version.yml | 101 - .github/workflows/test-pg-upgrade.yml | 133 - .github/workflows/test.yml | 117 - .gitignore | 8 + Dockerfile | 998 --- Dockerfile-156 => Dockerfile-15 | 38 +- Dockerfile-17 | 232 + Dockerfile-kubernetes | 12 + Dockerfile-orioledb-17 | 238 + Makefile | 16 + README.md | 176 +- amazon-arm64-nix.pkr.hcl | 11 +- amazon-arm64.pkr.hcl | 278 - ansible/files/admin_api_scripts/grow_fs.sh | 25 +- .../pg_upgrade_scripts/common.sh | 10 + .../pg_upgrade_scripts/complete.sh | 140 +- .../pg_upgrade_scripts/initiate.sh | 64 +- ansible/files/adminapi.service.j2 | 12 + ansible/files/adminapi.sudoers.conf | 1 + ansible/files/envoy_config/lds.tealbase.yaml | 396 ++ ansible/files/envoy_config/lds.yaml | 4 + ansible/files/gotrue-optimizations.service.j2 | 1 + ansible/files/gotrue.service.j2 | 5 + ansible/files/permission_check.py | 335 +- .../pgbouncer_config/pgbouncer.service.j2 | 2 + ansible/files/postgres_exporter.service.j2 | 3 +- ansible/files/postgres_prestart.sh.j2 | 44 +- .../postgresql_config/postgresql.conf.j2 | 4 +- .../postgresql_config/postgresql.service.j2 | 5 +- .../files/postgresql_config/supautils.conf.j2 | 13 +- .../pg_repack/after-create.sql | 4 + .../pgmq/after-create.sql | 156 +- .../pgsodium/after-create.sql | 23 + .../pgsodium/before-create.sql | 9 + .../supabase_vault/after-create.sql | 8 + ansible/files/sodium_extension.sql | 6 - .../tealbase-admin-agent.sudoers.conf | 2 + .../tealbase-admin-agent_salt.service | 19 + .../tealbase-admin-agent_salt.timer.j2 | 13 + .../systemd-networkd-check-and-fix.service | 1 + ansible/manifest-playbook.yml | 18 + ansible/playbook.yml | 20 +- ...{fix_ipv6_ndisc.yml => fix-ipv6-ndisc.yml} | 6 +- ansible/tasks/internal/admin-api.yml | 2 +- .../tasks/internal/tealbase-admin-agent.yml | 87 + .../tasks/postgres-extensions/01-postgis.yml | 102 - .../postgres-extensions/02-pgrouting.yml | 52 - .../tasks/postgres-extensions/03-pgtap.yml | 25 - .../tasks/postgres-extensions/04-pg_cron.yml | 30 - .../tasks/postgres-extensions/05-pgaudit.yml | 43 - .../tasks/postgres-extensions/06-pgjwt.yml | 17 - .../postgres-extensions/07-pgsql-http.yml | 43 - .../postgres-extensions/08-plpgsql_check.yml | 38 - .../postgres-extensions/09-pg-safeupdate.yml | 30 - .../postgres-extensions/10-timescaledb.yml | 36 - .../tasks/postgres-extensions/11-wal2json.yml | 17 - .../tasks/postgres-extensions/12-pljava.yml | 84 - ansible/tasks/postgres-extensions/13-plv8.yml | 73 - .../postgres-extensions/14-pg_plan_filter.yml | 23 - .../tasks/postgres-extensions/15-pg_net.yml | 37 - ansible/tasks/postgres-extensions/16-rum.yml | 34 - .../postgres-extensions/17-pg_hashids.yml | 22 - .../tasks/postgres-extensions/18-pgsodium.yml | 80 - .../postgres-extensions/19-pg_graphql.yml | 3 - .../20-pg_stat_monitor.yml | 23 - .../postgres-extensions/22-pg_jsonschema.yml | 3 - .../tasks/postgres-extensions/23-vault.yml | 31 - .../tasks/postgres-extensions/24-pgroonga.yml | 85 - .../tasks/postgres-extensions/25-wrappers.yml | 3 - .../tasks/postgres-extensions/26-hypopg.yml | 17 - .../postgres-extensions/27-pg_repack.yml | 38 - .../tasks/postgres-extensions/28-pgvector.yml | 23 - .../tasks/postgres-extensions/29-pg_tle.yml | 12 - .../99-finish_async_tasks.yml | 19 - ansible/tasks/setup-extensions.yml | 91 - ansible/tasks/setup-fail2ban.yml | 9 +- ansible/tasks/setup-gotrue.yml | 7 + ansible/tasks/setup-postgres.yml | 85 +- ansible/tasks/setup-postgrest.yml | 31 +- ansible/tasks/setup-system.yml | 14 + ansible/tasks/setup-tealbase-internal.yml | 20 +- ansible/tasks/setup-wal-g.yml | 111 +- ansible/tasks/stage2-setup-postgres.yml | 189 +- ansible/tasks/test-image.yml | 58 +- ansible/vars.yml | 150 +- aogithub/CODEOWNERS | 4 + {.github => aogithub}/FUNDING.yml | 0 .../PULL_REQUEST_TEMPLATE/default.md | 0 .../extension_upgrade.md | 0 aogithub/actions/shared-checkout/action.yml | 12 + .../pull_request_template.md | 0 aogithub/workflows/ami-release-nix-single.yml | 156 + aogithub/workflows/ami-release-nix.yml | 177 + .../workflows/check-shellscripts.yml | 6 +- aogithub/workflows/ci.yml | 50 + .../workflows/dockerhub-release-matrix.yml | 253 + aogithub/workflows/manual-docker-release.yml | 262 + .../workflows/mirror-postgrest.yml | 8 +- {.github => aogithub}/workflows/mirror.yml | 3 + aogithub/workflows/nix-build.yml | 124 + .../workflows/publish-migrations-prod.yml | 41 + .../workflows/publish-migrations-staging.yml | 20 +- ...ublish-nix-pgupgrade-bin-flake-version.yml | 130 + .../publish-nix-pgupgrade-scripts.yml | 55 +- aogithub/workflows/qemu-image-build.yml | 155 + aogithub/workflows/test.yml | 79 + .../workflows/testinfra-ami-build.yml | 71 +- common-nix.vars.pkr.hcl | 1 - common.vars.pkr.hcl | 1 - digitalOcean.json | 45 - docker/Dockerfile | 4 +- docker/all-in-one/Dockerfile | 311 - docker/all-in-one/README.md | 59 - docker/all-in-one/configure-shim.sh | 16 - docker/all-in-one/entrypoint.sh | 366 - docker/all-in-one/etc/adminapi/adminapi.yaml | 76 - .../etc/fail2ban/filter.d/pgbouncer.conf | 2 - .../etc/fail2ban/filter.d/postgresql.conf | 8 - .../all-in-one/etc/fail2ban/jail.d/jail.local | 4 - .../etc/fail2ban/jail.d/pgbouncer.conf | 7 - .../etc/fail2ban/jail.d/postgresql.conf | 8 - .../all-in-one/etc/fail2ban/jail.d/sshd.local | 3 - docker/all-in-one/etc/gotrue.env | 9 - docker/all-in-one/etc/kong/kong.conf | 37 - docker/all-in-one/etc/kong/kong.yml | 88 - .../etc/logrotate.d/postgresql.conf | 11 - docker/all-in-one/etc/logrotate.d/walg.conf | 9 - .../generated-optimizations.ini | 0 .../etc/pgbouncer-custom/ssl-config.ini | 4 - docker/all-in-one/etc/pgbouncer/pgbouncer.ini | 363 - docker/all-in-one/etc/pgbouncer/userlist.txt | 0 .../postgresql-custom/custom-overrides.conf | 0 .../generated-optimizations.conf | 0 .../postgresql-platform-defaults.conf | 9 - docker/all-in-one/etc/postgresql.schema.sql | 16 - docker/all-in-one/etc/postgresql/logging.conf | 33 - docker/all-in-one/etc/postgresql/pg_hba.conf | 94 - docker/all-in-one/etc/postgrest/base.conf | 7 - docker/all-in-one/etc/postgrest/bootstrap.sh | 8 - .../all-in-one/etc/postgrest/generated.conf | 0 docker/all-in-one/etc/salt/minion | 71 - docker/all-in-one/etc/sudoers.d/adminapi | 27 - .../etc/supa-shutdown/shutdown.conf | 1 - .../supervisor/base-services/adminapi.conf | 10 - .../supervisor/base-services/logrotate.conf | 11 - .../base-services/lsn-checkpoint-push.conf | 10 - .../base-services/pg_egress_collect.conf | 10 - .../supervisor/base-services/postgresql.conf | 13 - .../base-services/supa-shutdown.conf | 11 - .../etc/supervisor/services/envoy.conf | 10 - .../etc/supervisor/services/exporter.conf | 11 - .../etc/supervisor/services/fail2ban.conf | 9 - .../etc/supervisor/services/gotrue.conf | 10 - .../etc/supervisor/services/group.conf | 3 - .../etc/supervisor/services/kong.conf | 11 - .../etc/supervisor/services/pgbouncer.conf | 10 - .../etc/supervisor/services/postgrest.conf | 10 - .../etc/supervisor/supervisord.conf | 170 - .../all-in-one/etc/tmpfiles.d/pgbouncer.conf | 2 - docker/all-in-one/etc/vector/vector.yaml | 306 - docker/all-in-one/healthcheck.sh | 46 - docker/all-in-one/init/configure-admin-mgr.sh | 8 - docker/all-in-one/init/configure-adminapi.sh | 56 - .../all-in-one/init/configure-autoshutdown.sh | 21 - docker/all-in-one/init/configure-envoy.sh | 53 - docker/all-in-one/init/configure-exporter.sh | 5 - docker/all-in-one/init/configure-fail2ban.sh | 6 - docker/all-in-one/init/configure-gotrue.sh | 40 - docker/all-in-one/init/configure-kong.sh | 48 - .../init/configure-pg_egress_collect.sh | 14 - docker/all-in-one/init/configure-pgbouncer.sh | 46 - docker/all-in-one/init/configure-postgrest.sh | 41 - docker/all-in-one/init/configure-vector.sh | 56 - docker/all-in-one/init/start-kong.sh | 7 - .../pg_egress_collect/pg_egress_collect.pl | 126 - .../opt/postgres_exporter/queries.yml | 345 - docker/all-in-one/postgres-entrypoint.sh | 358 - docker/all-in-one/run-logrotate.sh | 8 - docker/all-in-one/shutdown.sh | 96 - docker/nix/build_nix.sh | 12 +- docker/orioledb/Dockerfile | 1059 --- docker/orioledb/entrypoint.sh | 36 - ebssurrogate/files/sources-arm64.cfg | 20 +- ebssurrogate/files/sources.cfg | 20 +- .../files/unit-tests/unit-test-01.sql | 33 - ebssurrogate/scripts/chroot-bootstrap-nix.sh | 19 +- ebssurrogate/scripts/chroot-bootstrap.sh | 204 - ebssurrogate/scripts/qemu-bootstrap-nix.sh | 160 + .../scripts/surrogate-bootstrap-nix.sh | 10 +- ebssurrogate/scripts/surrogate-bootstrap.sh | 324 - flake.lock | 235 +- flake.nix | 609 +- .../custom-overrides.ini => http/.gitkeep | 0 meta-data | 2 + migrations/Dockerfile.dbmate | 23 + migrations/README.md | 93 +- .../00000000000000-initial-schema.sql | 2 +- migrations/db/init-scripts/README.md | 7 + ...221207154255_create_pgsodium_and_vault.sql | 62 +- ...0529180330_alter_api_roles_for_inherit.sql | 7 +- ...evoke_writes_on_cron_job_from_postgres.sql | 1 + .../20241031003909_create_orioledb.sql | 11 + .../20241215003910_backfill_pgmq_metadata.sql | 79 + ...isable_log_statement_on_internal_roles.sql | 6 + ...616_move_orioledb_to_extensions_schema.sql | 26 + .../20250218031949_pgsodium_mask_role.sql | 31 + .../20250220051611_pg_net_perms_fix.sql | 64 + .../20250312095419_pgbouncer_ownership.sql | 5 + ...event_triggers_owner_to_tealbase_admin.sql | 10 + ..._subscription_to_postgres_16_and_above.sql | 13 + ...250417190610_update_pgbouncer_get_auth.sql | 24 + ...84701_revoke_admin_roles_from_postgres.sql | 10 + ...nt_with_admin_to_postgres_16_and_above.sql | 13 + ...e_tables_to_postgres_with_grant_option.sql | 6 + ...e_schema_to_postgres_with_grant_option.sql | 4 + ...nly_user_default_transaction_read_only.sql | 4 + migrations/docker-compose.yaml | 4 +- migrations/schema-15.sql | 999 +++ migrations/schema-17.sql | 1000 +++ migrations/schema-orioledb-17.sql | 1014 +++ migrations/schema.sql | 69 +- migrations/tests/database/exists.sql | 1 + migrations/tests/database/privs.sql | 4 - migrations/tests/extensions/01-postgis.sql | 73 +- migrations/tests/extensions/02-pgrouting.sql | 12 +- migrations/tests/extensions/06-pgjwt.sql | 9 +- .../tests/extensions/10-timescaledb.sql | 12 +- migrations/tests/extensions/13-plv8.sql | 12 +- migrations/tests/test.sql | 10 + nix/do-not-use-vendored-libraries.patch | 15 - nix/docker/init.sh.in | 5 - nix/docs/README.md | 8 - nix/docs/adding-new-package.md | 160 - nix/docs/adding-tests.md | 100 - nix/docs/build-postgres.md | 124 - nix/docs/docker.md | 14 - nix/docs/migration-tests.md | 50 - nix/docs/new-major-postgres.md | 34 - nix/docs/nix-overlays.md | 36 - nix/docs/receipt-files.md | 155 - nix/docs/references.md | 31 - nix/docs/start-client-server.md | 93 - nix/docs/start-here.md | 70 - nix/docs/update-extension.md | 17 - nix/docs/use-direnv.md | 102 - ...001-build-Allow-using-V8-from-system.patch | 46 - nix/ext/hypopg.nix | 31 - nix/ext/index_advisor.nix | 30 - nix/ext/mecab-naist-jdic/default.nix | 41 - nix/ext/orioledb.nix | 32 - nix/ext/pg-safeupdate.nix | 29 - nix/ext/pg_backtrace.nix | 33 - nix/ext/pg_cron.nix | 32 - nix/ext/pg_graphql.nix | 39 - nix/ext/pg_hashids.nix | 31 - nix/ext/pg_jsonschema.nix | 66 - nix/ext/pg_net.nix | 33 - nix/ext/pg_plan_filter.nix | 30 - nix/ext/pg_regress.nix | 24 - nix/ext/pg_repack.nix | 66 - nix/ext/pg_stat_monitor.nix | 49 - nix/ext/pg_tle.nix | 36 - nix/ext/pgaudit.nix | 44 - nix/ext/pgjwt.nix | 31 - nix/ext/pgmq.nix | 33 - nix/ext/pgroonga.nix | 61 - nix/ext/pgrouting.nix | 31 - nix/ext/pgsodium.nix | 31 - nix/ext/pgsql-http.nix | 31 - nix/ext/pgtap.nix | 33 - nix/ext/pgvector.nix | 31 - nix/ext/pljava.nix | 51 - nix/ext/plpgsql-check.nix | 46 - nix/ext/plv8.nix | 194 - nix/ext/postgis.nix | 87 - nix/ext/rum.nix | 31 - nix/ext/sfcgal/sfcgal.nix | 31 - nix/ext/supautils.nix | 29 - nix/ext/timescaledb-2.9.1.nix | 51 - nix/ext/timescaledb.nix | 43 - nix/ext/use-system-groonga.patch | 21 - nix/ext/vault.nix | 30 - nix/ext/wal2json.nix | 31 - nix/ext/wrappers/default.nix | 121 - nix/fix-cmake-install-path.patch | 21 - nix/init.sh | 20 - nix/overlays/cargo-pgrx-0-11-3.nix | 7 - nix/overlays/gdal-small.nix | 14 - nix/overlays/psql_16-oriole.nix | 21 - nix/postgresql/15.nix | 4 - nix/postgresql/default.nix | 20 - nix/postgresql/generic.nix | 309 - nix/postgresql/patches/less-is-more.patch | 11 - .../patches/locale-binary-path.patch | 11 - .../patches/paths-for-split-outputs.patch | 11 - .../paths-with-postgresql-suffix.patch | 41 - .../patches/relative-to-symlinks-16+.patch | 13 - .../patches/relative-to-symlinks.patch | 13 - .../patches/socketdir-in-run-13+.patch | 11 - nix/postgresql/patches/socketdir-in-run.patch | 11 - .../specify_pkglibdir_at_runtime.patch | 28 - nix/tealbase-groonga.nix | 75 - .../expected/extensions_sql_interface.out | 6312 ----------------- nix/tests/expected/hypopg.out | 14 - nix/tests/expected/index_advisor.out | 16 - nix/tests/expected/pg-safeupdate.out | 12 - nix/tests/expected/pg_graphql.out | 259 - nix/tests/expected/pg_hashids.out | 36 - nix/tests/expected/pg_jsonschema.out | 73 - nix/tests/expected/pg_net.out | 11 - nix/tests/expected/pg_plan_filter.out | 16 - nix/tests/expected/pg_stat_monitor.out | 10 - nix/tests/expected/pg_tle.out | 91 - nix/tests/expected/pgaudit.out | 24 - nix/tests/expected/pgjwt.out | 22 - nix/tests/expected/pgmq.out | 141 - nix/tests/expected/pgroonga.out | 76 - nix/tests/expected/pgrouting.out | 31 - nix/tests/expected/pgsodium.out | 9 - nix/tests/expected/pgtap.out | 21 - nix/tests/expected/pgvector.out | 90 - nix/tests/expected/plpgsql-check.out | 35 - nix/tests/expected/plv8.out | 17 - nix/tests/expected/postgis.out | 59 - nix/tests/expected/rum.out | 38 - nix/tests/expected/timescale.out | 47 - nix/tests/expected/vault.out | 42 - nix/tests/expected/wal2json.out | 42 - nix/tests/migrations/data.sql | 21 - nix/tests/postgresql.conf.in | 800 --- nix/tests/prime.sql | 98 - nix/tests/smoke/0000-hello-world.sql | 10 - nix/tests/smoke/0001-pg_graphql.sql | 59 - nix/tests/smoke/0002-supautils.sql | 17 - nix/tests/smoke/0003-pgsodium-vault.sql | 40 - nix/tests/smoke/0004-index_advisor.sql | 19 - nix/tests/smoke/0005-test_pgroonga_mecab.sql | 36 - nix/tests/sql/extensions_sql_interface.sql | 101 - nix/tests/sql/hypopg.sql | 13 - nix/tests/sql/index_advisor.sql | 13 - nix/tests/sql/pg-safeupdate.sql | 15 - nix/tests/sql/pg_graphql.sql | 219 - nix/tests/sql/pg_hashids.sql | 6 - nix/tests/sql/pg_jsonschema.sql | 68 - nix/tests/sql/pg_net.sql | 7 - nix/tests/sql/pg_plan_filter.sql | 22 - nix/tests/sql/pg_stat_monitor.sql | 6 - nix/tests/sql/pg_tle.sql | 70 - nix/tests/sql/pgaudit.sql | 23 - nix/tests/sql/pgjwt.sql | 13 - nix/tests/sql/pgmq.sql | 90 - nix/tests/sql/pgroonga.sql | 48 - nix/tests/sql/pgrouting.sql | 27 - nix/tests/sql/pgsodium.sql | 4 - nix/tests/sql/pgtap.sql | 11 - nix/tests/sql/pgvector.sql | 72 - nix/tests/sql/plpgsql-check.sql | 26 - nix/tests/sql/plv8.sql | 14 - nix/tests/sql/postgis.sql | 52 - nix/tests/sql/rum.sql | 37 - nix/tests/sql/timescale.sql | 33 - nix/tests/sql/vault.sql | 30 - nix/tests/sql/wal2json.sql | 32 - nix/tests/util/pgsodium_getkey.sh | 10 - nix/tests/util/pgsodium_getkey_arb.sh | 1 - nix/tools/README.md | 2 - nix/tools/migrate-tool.sh.in | 123 - nix/tools/postgresql_schema.sql | 11 - nix/tools/run-client.sh.in | 54 - nix/tools/run-replica.sh.in | 43 - nix/tools/run-server.sh.in | 65 - nix/tools/sync-exts-versions.sh.in | 282 - qemu-arm64-nix.pkr.hcl | 137 + qemu_artifact.md | 52 + scripts/90-cleanup-qemu.sh | 62 + scripts/90-cleanup.sh | 6 +- scripts/99-img_check.sh | 2 +- scripts/nix-provision.sh | 22 +- stage2-nix-psql.pkr.hcl | 23 +- testinfra/README.md | 4 +- testinfra/test_all_in_one.py | 135 - testinfra/test_ami.py | 439 -- testinfra/test_ami_nix.py | 449 +- tests/pg_upgrade/debug.sh | 2 +- user-data-cloudimg | 16 + 396 files changed, 8482 insertions(+), 24781 deletions(-) create mode 100644 .envrc.recommended delete mode 100644 .github/CODEOWNERS delete mode 100644 .github/workflows/ami-release-nix.yml delete mode 100644 .github/workflows/build-ccache.yml delete mode 100644 .github/workflows/ci.yml delete mode 100644 .github/workflows/dockerhub-release-15-6.yml delete mode 100644 .github/workflows/dockerhub-release-aio.yml delete mode 100644 .github/workflows/dockerhub-release-orioledb.yml delete mode 100644 .github/workflows/dockerhub-release.yml delete mode 100644 .github/workflows/nix-build.yml delete mode 100644 .github/workflows/package-plv8.yml delete mode 100644 .github/workflows/publish-nix-pgupgrade-bin-flake-version.yml delete mode 100644 .github/workflows/test-pg-upgrade.yml delete mode 100644 .github/workflows/test.yml delete mode 100644 Dockerfile rename Dockerfile-156 => Dockerfile-15 (88%) create mode 100644 Dockerfile-17 create mode 100644 Dockerfile-kubernetes create mode 100644 Dockerfile-orioledb-17 create mode 100644 Makefile delete mode 100644 amazon-arm64.pkr.hcl create mode 100644 ansible/files/envoy_config/lds.tealbase.yaml create mode 100644 ansible/files/postgresql_extension_custom_scripts/pg_repack/after-create.sql create mode 100644 ansible/files/postgresql_extension_custom_scripts/pgsodium/before-create.sql create mode 100644 ansible/files/postgresql_extension_custom_scripts/supabase_vault/after-create.sql delete mode 100644 ansible/files/sodium_extension.sql create mode 100644 ansible/files/supabase_admin_agent_config/tealbase-admin-agent.sudoers.conf create mode 100644 ansible/files/supabase_admin_agent_config/tealbase-admin-agent_salt.service create mode 100644 ansible/files/supabase_admin_agent_config/tealbase-admin-agent_salt.timer.j2 rename ansible/tasks/{fix_ipv6_ndisc.yml => fix-ipv6-ndisc.yml} (87%) create mode 100644 ansible/tasks/internal/tealbase-admin-agent.yml delete mode 100644 ansible/tasks/postgres-extensions/01-postgis.yml delete mode 100644 ansible/tasks/postgres-extensions/02-pgrouting.yml delete mode 100644 ansible/tasks/postgres-extensions/03-pgtap.yml delete mode 100644 ansible/tasks/postgres-extensions/04-pg_cron.yml delete mode 100644 ansible/tasks/postgres-extensions/05-pgaudit.yml delete mode 100644 ansible/tasks/postgres-extensions/06-pgjwt.yml delete mode 100644 ansible/tasks/postgres-extensions/07-pgsql-http.yml delete mode 100644 ansible/tasks/postgres-extensions/08-plpgsql_check.yml delete mode 100644 ansible/tasks/postgres-extensions/09-pg-safeupdate.yml delete mode 100644 ansible/tasks/postgres-extensions/10-timescaledb.yml delete mode 100644 ansible/tasks/postgres-extensions/11-wal2json.yml delete mode 100644 ansible/tasks/postgres-extensions/12-pljava.yml delete mode 100644 ansible/tasks/postgres-extensions/13-plv8.yml delete mode 100644 ansible/tasks/postgres-extensions/14-pg_plan_filter.yml delete mode 100644 ansible/tasks/postgres-extensions/15-pg_net.yml delete mode 100644 ansible/tasks/postgres-extensions/16-rum.yml delete mode 100644 ansible/tasks/postgres-extensions/17-pg_hashids.yml delete mode 100644 ansible/tasks/postgres-extensions/18-pgsodium.yml delete mode 100644 ansible/tasks/postgres-extensions/19-pg_graphql.yml delete mode 100644 ansible/tasks/postgres-extensions/20-pg_stat_monitor.yml delete mode 100644 ansible/tasks/postgres-extensions/22-pg_jsonschema.yml delete mode 100644 ansible/tasks/postgres-extensions/23-vault.yml delete mode 100644 ansible/tasks/postgres-extensions/24-pgroonga.yml delete mode 100644 ansible/tasks/postgres-extensions/25-wrappers.yml delete mode 100644 ansible/tasks/postgres-extensions/26-hypopg.yml delete mode 100644 ansible/tasks/postgres-extensions/27-pg_repack.yml delete mode 100644 ansible/tasks/postgres-extensions/28-pgvector.yml delete mode 100644 ansible/tasks/postgres-extensions/29-pg_tle.yml delete mode 100644 ansible/tasks/postgres-extensions/99-finish_async_tasks.yml delete mode 100644 ansible/tasks/setup-extensions.yml create mode 100644 aogithub/CODEOWNERS rename {.github => aogithub}/FUNDING.yml (100%) rename {.github => aogithub}/PULL_REQUEST_TEMPLATE/default.md (100%) rename {.github => aogithub}/PULL_REQUEST_TEMPLATE/extension_upgrade.md (100%) create mode 100644 aogithub/actions/shared-checkout/action.yml rename {.github => aogithub}/pull_request_template.md (100%) create mode 100644 aogithub/workflows/ami-release-nix-single.yml create mode 100644 aogithub/workflows/ami-release-nix.yml rename {.github => aogithub}/workflows/check-shellscripts.yml (83%) create mode 100644 aogithub/workflows/ci.yml create mode 100644 aogithub/workflows/dockerhub-release-matrix.yml create mode 100644 aogithub/workflows/manual-docker-release.yml rename {.github => aogithub}/workflows/mirror-postgrest.yml (76%) rename {.github => aogithub}/workflows/mirror.yml (97%) create mode 100644 aogithub/workflows/nix-build.yml create mode 100644 aogithub/workflows/publish-migrations-prod.yml rename .github/workflows/publish-migrations.yml => aogithub/workflows/publish-migrations-staging.yml (60%) create mode 100644 aogithub/workflows/publish-nix-pgupgrade-bin-flake-version.yml rename {.github => aogithub}/workflows/publish-nix-pgupgrade-scripts.yml (57%) create mode 100644 aogithub/workflows/qemu-image-build.yml create mode 100644 aogithub/workflows/test.yml rename .github/workflows/testinfra-nix.yml => aogithub/workflows/testinfra-ami-build.yml (55%) delete mode 100644 common-nix.vars.pkr.hcl delete mode 100644 common.vars.pkr.hcl delete mode 100644 digitalOcean.json delete mode 100644 docker/all-in-one/Dockerfile delete mode 100644 docker/all-in-one/README.md delete mode 100755 docker/all-in-one/configure-shim.sh delete mode 100755 docker/all-in-one/entrypoint.sh delete mode 100644 docker/all-in-one/etc/adminapi/adminapi.yaml delete mode 100644 docker/all-in-one/etc/fail2ban/filter.d/pgbouncer.conf delete mode 100644 docker/all-in-one/etc/fail2ban/filter.d/postgresql.conf delete mode 100644 docker/all-in-one/etc/fail2ban/jail.d/jail.local delete mode 100644 docker/all-in-one/etc/fail2ban/jail.d/pgbouncer.conf delete mode 100644 docker/all-in-one/etc/fail2ban/jail.d/postgresql.conf delete mode 100644 docker/all-in-one/etc/fail2ban/jail.d/sshd.local delete mode 100644 docker/all-in-one/etc/gotrue.env delete mode 100644 docker/all-in-one/etc/kong/kong.conf delete mode 100644 docker/all-in-one/etc/kong/kong.yml delete mode 100644 docker/all-in-one/etc/logrotate.d/postgresql.conf delete mode 100644 docker/all-in-one/etc/logrotate.d/walg.conf delete mode 100644 docker/all-in-one/etc/pgbouncer-custom/generated-optimizations.ini delete mode 100644 docker/all-in-one/etc/pgbouncer-custom/ssl-config.ini delete mode 100644 docker/all-in-one/etc/pgbouncer/pgbouncer.ini delete mode 100644 docker/all-in-one/etc/pgbouncer/userlist.txt delete mode 100644 docker/all-in-one/etc/postgresql-custom/custom-overrides.conf delete mode 100644 docker/all-in-one/etc/postgresql-custom/generated-optimizations.conf delete mode 100644 docker/all-in-one/etc/postgresql-custom/postgresql-platform-defaults.conf delete mode 100644 docker/all-in-one/etc/postgresql.schema.sql delete mode 100644 docker/all-in-one/etc/postgresql/logging.conf delete mode 100755 docker/all-in-one/etc/postgresql/pg_hba.conf delete mode 100644 docker/all-in-one/etc/postgrest/base.conf delete mode 100755 docker/all-in-one/etc/postgrest/bootstrap.sh delete mode 100644 docker/all-in-one/etc/postgrest/generated.conf delete mode 100644 docker/all-in-one/etc/salt/minion delete mode 100644 docker/all-in-one/etc/sudoers.d/adminapi delete mode 100644 docker/all-in-one/etc/supa-shutdown/shutdown.conf delete mode 100644 docker/all-in-one/etc/supervisor/base-services/adminapi.conf delete mode 100644 docker/all-in-one/etc/supervisor/base-services/logrotate.conf delete mode 100644 docker/all-in-one/etc/supervisor/base-services/lsn-checkpoint-push.conf delete mode 100644 docker/all-in-one/etc/supervisor/base-services/pg_egress_collect.conf delete mode 100644 docker/all-in-one/etc/supervisor/base-services/postgresql.conf delete mode 100644 docker/all-in-one/etc/supervisor/base-services/supa-shutdown.conf delete mode 100644 docker/all-in-one/etc/supervisor/services/envoy.conf delete mode 100644 docker/all-in-one/etc/supervisor/services/exporter.conf delete mode 100644 docker/all-in-one/etc/supervisor/services/fail2ban.conf delete mode 100644 docker/all-in-one/etc/supervisor/services/gotrue.conf delete mode 100644 docker/all-in-one/etc/supervisor/services/group.conf delete mode 100644 docker/all-in-one/etc/supervisor/services/kong.conf delete mode 100644 docker/all-in-one/etc/supervisor/services/pgbouncer.conf delete mode 100644 docker/all-in-one/etc/supervisor/services/postgrest.conf delete mode 100644 docker/all-in-one/etc/supervisor/supervisord.conf delete mode 100644 docker/all-in-one/etc/tmpfiles.d/pgbouncer.conf delete mode 100644 docker/all-in-one/etc/vector/vector.yaml delete mode 100755 docker/all-in-one/healthcheck.sh delete mode 100755 docker/all-in-one/init/configure-admin-mgr.sh delete mode 100755 docker/all-in-one/init/configure-adminapi.sh delete mode 100755 docker/all-in-one/init/configure-autoshutdown.sh delete mode 100755 docker/all-in-one/init/configure-envoy.sh delete mode 100755 docker/all-in-one/init/configure-exporter.sh delete mode 100755 docker/all-in-one/init/configure-fail2ban.sh delete mode 100755 docker/all-in-one/init/configure-gotrue.sh delete mode 100755 docker/all-in-one/init/configure-kong.sh delete mode 100755 docker/all-in-one/init/configure-pg_egress_collect.sh delete mode 100755 docker/all-in-one/init/configure-pgbouncer.sh delete mode 100755 docker/all-in-one/init/configure-postgrest.sh delete mode 100755 docker/all-in-one/init/configure-vector.sh delete mode 100755 docker/all-in-one/init/start-kong.sh delete mode 100644 docker/all-in-one/opt/pg_egress_collect/pg_egress_collect.pl delete mode 100644 docker/all-in-one/opt/postgres_exporter/queries.yml delete mode 100755 docker/all-in-one/postgres-entrypoint.sh delete mode 100755 docker/all-in-one/run-logrotate.sh delete mode 100755 docker/all-in-one/shutdown.sh mode change 100644 => 100755 docker/nix/build_nix.sh delete mode 100644 docker/orioledb/Dockerfile delete mode 100755 docker/orioledb/entrypoint.sh delete mode 100644 ebssurrogate/files/unit-tests/unit-test-01.sql delete mode 100755 ebssurrogate/scripts/chroot-bootstrap.sh create mode 100755 ebssurrogate/scripts/qemu-bootstrap-nix.sh delete mode 100755 ebssurrogate/scripts/surrogate-bootstrap.sh rename docker/all-in-one/etc/pgbouncer-custom/custom-overrides.ini => http/.gitkeep (100%) create mode 100644 meta-data create mode 100644 migrations/Dockerfile.dbmate create mode 100644 migrations/db/init-scripts/README.md create mode 100644 migrations/db/migrations/20241031003909_create_orioledb.sql create mode 100644 migrations/db/migrations/20241215003910_backfill_pgmq_metadata.sql create mode 100644 migrations/db/migrations/20250205060043_disable_log_statement_on_internal_roles.sql create mode 100644 migrations/db/migrations/20250205144616_move_orioledb_to_extensions_schema.sql create mode 100644 migrations/db/migrations/20250218031949_pgsodium_mask_role.sql create mode 100644 migrations/db/migrations/20250220051611_pg_net_perms_fix.sql create mode 100644 migrations/db/migrations/20250312095419_pgbouncer_ownership.sql create mode 100644 migrations/db/migrations/20250402065937_alter_internal_event_triggers_owner_to_tealbase_admin.sql create mode 100644 migrations/db/migrations/20250402093753_grant_subscription_to_postgres_16_and_above.sql create mode 100644 migrations/db/migrations/20250417190610_update_pgbouncer_get_auth.sql create mode 100644 migrations/db/migrations/20250421084701_revoke_admin_roles_from_postgres.sql create mode 100644 migrations/db/migrations/20250605172253_grant_with_admin_to_postgres_16_and_above.sql create mode 100644 migrations/db/migrations/20250623125453_tmp_grant_storage_tables_to_postgres_with_grant_option.sql create mode 100644 migrations/db/migrations/20250709135250_grant_storage_schema_to_postgres_with_grant_option.sql create mode 100644 migrations/db/migrations/20250710151649_tealbase_read_only_user_default_transaction_read_only.sql create mode 100644 migrations/schema-15.sql create mode 100644 migrations/schema-17.sql create mode 100644 migrations/schema-orioledb-17.sql delete mode 100644 nix/do-not-use-vendored-libraries.patch delete mode 100644 nix/docker/init.sh.in delete mode 100644 nix/docs/README.md delete mode 100644 nix/docs/adding-new-package.md delete mode 100644 nix/docs/adding-tests.md delete mode 100644 nix/docs/build-postgres.md delete mode 100644 nix/docs/docker.md delete mode 100644 nix/docs/migration-tests.md delete mode 100644 nix/docs/new-major-postgres.md delete mode 100644 nix/docs/nix-overlays.md delete mode 100644 nix/docs/receipt-files.md delete mode 100644 nix/docs/references.md delete mode 100644 nix/docs/start-client-server.md delete mode 100644 nix/docs/start-here.md delete mode 100644 nix/docs/update-extension.md delete mode 100644 nix/docs/use-direnv.md delete mode 100644 nix/ext/0001-build-Allow-using-V8-from-system.patch delete mode 100644 nix/ext/hypopg.nix delete mode 100644 nix/ext/index_advisor.nix delete mode 100644 nix/ext/mecab-naist-jdic/default.nix delete mode 100644 nix/ext/orioledb.nix delete mode 100644 nix/ext/pg-safeupdate.nix delete mode 100644 nix/ext/pg_backtrace.nix delete mode 100644 nix/ext/pg_cron.nix delete mode 100644 nix/ext/pg_graphql.nix delete mode 100644 nix/ext/pg_hashids.nix delete mode 100644 nix/ext/pg_jsonschema.nix delete mode 100644 nix/ext/pg_net.nix delete mode 100644 nix/ext/pg_plan_filter.nix delete mode 100644 nix/ext/pg_regress.nix delete mode 100644 nix/ext/pg_repack.nix delete mode 100644 nix/ext/pg_stat_monitor.nix delete mode 100644 nix/ext/pg_tle.nix delete mode 100644 nix/ext/pgaudit.nix delete mode 100644 nix/ext/pgjwt.nix delete mode 100644 nix/ext/pgmq.nix delete mode 100644 nix/ext/pgroonga.nix delete mode 100644 nix/ext/pgrouting.nix delete mode 100644 nix/ext/pgsodium.nix delete mode 100644 nix/ext/pgsql-http.nix delete mode 100644 nix/ext/pgtap.nix delete mode 100644 nix/ext/pgvector.nix delete mode 100644 nix/ext/pljava.nix delete mode 100644 nix/ext/plpgsql-check.nix delete mode 100644 nix/ext/plv8.nix delete mode 100644 nix/ext/postgis.nix delete mode 100644 nix/ext/rum.nix delete mode 100644 nix/ext/sfcgal/sfcgal.nix delete mode 100644 nix/ext/supautils.nix delete mode 100644 nix/ext/timescaledb-2.9.1.nix delete mode 100644 nix/ext/timescaledb.nix delete mode 100644 nix/ext/use-system-groonga.patch delete mode 100644 nix/ext/vault.nix delete mode 100644 nix/ext/wal2json.nix delete mode 100644 nix/ext/wrappers/default.nix delete mode 100644 nix/fix-cmake-install-path.patch delete mode 100755 nix/init.sh delete mode 100644 nix/overlays/cargo-pgrx-0-11-3.nix delete mode 100644 nix/overlays/gdal-small.nix delete mode 100644 nix/overlays/psql_16-oriole.nix delete mode 100644 nix/postgresql/15.nix delete mode 100644 nix/postgresql/default.nix delete mode 100644 nix/postgresql/generic.nix delete mode 100644 nix/postgresql/patches/less-is-more.patch delete mode 100644 nix/postgresql/patches/locale-binary-path.patch delete mode 100644 nix/postgresql/patches/paths-for-split-outputs.patch delete mode 100644 nix/postgresql/patches/paths-with-postgresql-suffix.patch delete mode 100644 nix/postgresql/patches/relative-to-symlinks-16+.patch delete mode 100644 nix/postgresql/patches/relative-to-symlinks.patch delete mode 100644 nix/postgresql/patches/socketdir-in-run-13+.patch delete mode 100644 nix/postgresql/patches/socketdir-in-run.patch delete mode 100644 nix/postgresql/patches/specify_pkglibdir_at_runtime.patch delete mode 100644 nix/tealbase-groonga.nix delete mode 100644 nix/tests/expected/extensions_sql_interface.out delete mode 100644 nix/tests/expected/hypopg.out delete mode 100644 nix/tests/expected/index_advisor.out delete mode 100644 nix/tests/expected/pg-safeupdate.out delete mode 100644 nix/tests/expected/pg_graphql.out delete mode 100644 nix/tests/expected/pg_hashids.out delete mode 100644 nix/tests/expected/pg_jsonschema.out delete mode 100644 nix/tests/expected/pg_net.out delete mode 100644 nix/tests/expected/pg_plan_filter.out delete mode 100644 nix/tests/expected/pg_stat_monitor.out delete mode 100644 nix/tests/expected/pg_tle.out delete mode 100644 nix/tests/expected/pgaudit.out delete mode 100644 nix/tests/expected/pgjwt.out delete mode 100644 nix/tests/expected/pgmq.out delete mode 100644 nix/tests/expected/pgroonga.out delete mode 100644 nix/tests/expected/pgrouting.out delete mode 100644 nix/tests/expected/pgsodium.out delete mode 100644 nix/tests/expected/pgtap.out delete mode 100644 nix/tests/expected/pgvector.out delete mode 100644 nix/tests/expected/plpgsql-check.out delete mode 100644 nix/tests/expected/plv8.out delete mode 100644 nix/tests/expected/postgis.out delete mode 100644 nix/tests/expected/rum.out delete mode 100644 nix/tests/expected/timescale.out delete mode 100644 nix/tests/expected/vault.out delete mode 100644 nix/tests/expected/wal2json.out delete mode 100644 nix/tests/migrations/data.sql delete mode 100644 nix/tests/postgresql.conf.in delete mode 100644 nix/tests/prime.sql delete mode 100644 nix/tests/smoke/0000-hello-world.sql delete mode 100644 nix/tests/smoke/0001-pg_graphql.sql delete mode 100644 nix/tests/smoke/0002-supautils.sql delete mode 100644 nix/tests/smoke/0003-pgsodium-vault.sql delete mode 100644 nix/tests/smoke/0004-index_advisor.sql delete mode 100644 nix/tests/smoke/0005-test_pgroonga_mecab.sql delete mode 100644 nix/tests/sql/extensions_sql_interface.sql delete mode 100644 nix/tests/sql/hypopg.sql delete mode 100644 nix/tests/sql/index_advisor.sql delete mode 100644 nix/tests/sql/pg-safeupdate.sql delete mode 100644 nix/tests/sql/pg_graphql.sql delete mode 100644 nix/tests/sql/pg_hashids.sql delete mode 100644 nix/tests/sql/pg_jsonschema.sql delete mode 100644 nix/tests/sql/pg_net.sql delete mode 100644 nix/tests/sql/pg_plan_filter.sql delete mode 100644 nix/tests/sql/pg_stat_monitor.sql delete mode 100644 nix/tests/sql/pg_tle.sql delete mode 100644 nix/tests/sql/pgaudit.sql delete mode 100644 nix/tests/sql/pgjwt.sql delete mode 100644 nix/tests/sql/pgmq.sql delete mode 100644 nix/tests/sql/pgroonga.sql delete mode 100644 nix/tests/sql/pgrouting.sql delete mode 100644 nix/tests/sql/pgsodium.sql delete mode 100644 nix/tests/sql/pgtap.sql delete mode 100644 nix/tests/sql/pgvector.sql delete mode 100644 nix/tests/sql/plpgsql-check.sql delete mode 100644 nix/tests/sql/plv8.sql delete mode 100644 nix/tests/sql/postgis.sql delete mode 100644 nix/tests/sql/rum.sql delete mode 100644 nix/tests/sql/timescale.sql delete mode 100644 nix/tests/sql/vault.sql delete mode 100644 nix/tests/sql/wal2json.sql delete mode 100755 nix/tests/util/pgsodium_getkey.sh delete mode 100755 nix/tests/util/pgsodium_getkey_arb.sh delete mode 100644 nix/tools/README.md delete mode 100644 nix/tools/migrate-tool.sh.in delete mode 100644 nix/tools/postgresql_schema.sql delete mode 100644 nix/tools/run-client.sh.in delete mode 100644 nix/tools/run-replica.sh.in delete mode 100644 nix/tools/run-server.sh.in delete mode 100644 nix/tools/sync-exts-versions.sh.in create mode 100644 qemu-arm64-nix.pkr.hcl create mode 100644 qemu_artifact.md create mode 100644 scripts/90-cleanup-qemu.sh delete mode 100644 testinfra/test_all_in_one.py delete mode 100644 testinfra/test_ami.py create mode 100644 user-data-cloudimg diff --git a/.envrc.recommended b/.envrc.recommended new file mode 100644 index 0000000..a7aaf82 --- /dev/null +++ b/.envrc.recommended @@ -0,0 +1,3 @@ +watch_file nix/devShells.nix + +use flake diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS deleted file mode 100644 index 0576d01..0000000 --- a/.github/CODEOWNERS +++ /dev/null @@ -1,4 +0,0 @@ -* @supabase/backend -migrations/ @supabase/cli @supabase/backend -docker/orioledb @supabase/postgres @supabase/backend -common.vars.pkr.hcl @supabase/postgres @supabase/backend diff --git a/.github/workflows/ami-release-nix.yml b/.github/workflows/ami-release-nix.yml deleted file mode 100644 index 194c8de..0000000 --- a/.github/workflows/ami-release-nix.yml +++ /dev/null @@ -1,141 +0,0 @@ -name: Release AMI Nix - -on: - push: - branches: - - develop - - release/* - paths: - - '.github/workflows/ami-release-nix.yml' - - 'common-nix.vars.pkr.hcl' - workflow_dispatch: - -jobs: - build: - strategy: - matrix: - include: - - runner: arm-runner - arch: arm64 - ubuntu_release: focal - ubuntu_version: 20.04 - mcpu: neoverse-n1 - runs-on: ${{ matrix.runner }} - timeout-minutes: 150 - permissions: - contents: write - packages: write - id-token: write - - steps: - - name: Checkout Repo - uses: actions/checkout@v3 - - - name: Run checks if triggered manually - if: ${{ github.event_name == 'workflow_dispatch' }} - # Update `ci.yaml` too if changing constraints. - run: | - SUFFIX=$(sed -E 's/postgres-version = "[0-9\.]+(.*)"/\1/g' common-nix.vars.pkr.hcl) - if [[ -z $SUFFIX ]] ; then - echo "Version must include non-numeric characters if built manually." - exit 1 - fi - - # extensions are build in nix prior to this step - # so we can just use the binaries from the nix store - # for postgres, extensions and wrappers - - - name: Build AMI stage 1 - run: | - packer init amazon-arm64-nix.pkr.hcl - GIT_SHA=${{github.sha}} - packer build -var "git-head-version=${GIT_SHA}" -var "packer-execution-id=${GITHUB_RUN_ID}" -var-file="development-arm.vars.pkr.hcl" -var-file="common-nix.vars.pkr.hcl" -var "ansible_arguments=" amazon-arm64-nix.pkr.hcl - - - name: Build AMI stage 2 - run: | - packer init stage2-nix-psql.pkr.hcl - GIT_SHA=${{github.sha}} - packer build -var "git_sha=${GIT_SHA}" -var "git-head-version=${GIT_SHA}" -var "packer-execution-id=${GITHUB_RUN_ID}" -var-file="development-arm.vars.pkr.hcl" -var-file="common-nix.vars.pkr.hcl" stage2-nix-psql.pkr.hcl - - - name: Grab release version - id: process_release_version - run: | - VERSION=$(sed -e 's/postgres-version = "\(.*\)"/\1/g' common-nix.vars.pkr.hcl) - echo "version=$VERSION" >> "$GITHUB_OUTPUT" - - - name: Create nix flake revision tarball - run: | - GIT_SHA=${{github.sha}} - MAJOR_VERSION=$(echo "${{ steps.process_release_version.outputs.version }}" | cut -d. -f1) - - mkdir -p "/tmp/pg_upgrade_bin/${MAJOR_VERSION}" - echo "$GIT_SHA" >> "/tmp/pg_upgrade_bin/${MAJOR_VERSION}/nix_flake_version" - tar -czf "/tmp/pg_binaries.tar.gz" -C "/tmp/pg_upgrade_bin" . - - - name: configure aws credentials - staging - uses: aws-actions/configure-aws-credentials@v4 - with: - role-to-assume: ${{ secrets.DEV_AWS_ROLE }} - aws-region: "us-east-1" - - - name: Upload software manifest to s3 staging - run: | - cd ansible - ansible-playbook -i localhost \ - -e "ami_release_version=${{ steps.process_release_version.outputs.version }}" \ - -e "internal_artifacts_bucket=${{ secrets.ARTIFACTS_BUCKET }}" \ - manifest-playbook.yml - - - name: Upload nix flake revision to s3 staging - run: | - aws s3 cp /tmp/pg_binaries.tar.gz s3://${{ secrets.ARTIFACTS_BUCKET }}/upgrades/postgres/supabase-postgres-${{ steps.process_release_version.outputs.version }}/20.04.tar.gz - - #Our self hosted github runner already has permissions to publish images - #but they're limited to only that; - #so if we want s3 access we'll need to config credentials with the below steps - # (which overwrites existing perms) after the ami build - - - name: configure aws credentials - prod - uses: aws-actions/configure-aws-credentials@v4 - with: - role-to-assume: ${{ secrets.PROD_AWS_ROLE }} - aws-region: "us-east-1" - - - name: Upload software manifest to s3 prod - run: | - cd ansible - ansible-playbook -i localhost \ - -e "ami_release_version=${{ steps.process_release_version.outputs.version }}" \ - -e "internal_artifacts_bucket=${{ secrets.PROD_ARTIFACTS_BUCKET }}" \ - manifest-playbook.yml - - - name: Upload nix flake revision to s3 prod - run: | - aws s3 cp /tmp/pg_binaries.tar.gz s3://${{ secrets.PROD_ARTIFACTS_BUCKET }}/upgrades/postgres/supabase-postgres-${{ steps.process_release_version.outputs.version }}/20.04.tar.gz - - - name: Create release - uses: softprops/action-gh-release@v1 - with: - name: ${{ steps.process_release_version.outputs.version }} - tag_name: ${{ steps.process_release_version.outputs.version }} - target_commitish: ${{github.sha}} - - - name: Slack Notification on Failure - if: ${{ failure() }} - uses: rtCamp/action-slack-notify@v2 - env: - SLACK_WEBHOOK: ${{ secrets.SLACK_NOTIFICATIONS_WEBHOOK }} - SLACK_USERNAME: 'gha-failures-notifier' - SLACK_COLOR: 'danger' - SLACK_MESSAGE: 'Building Postgres AMI failed' - SLACK_FOOTER: '' - - - name: Cleanup resources on build cancellation - if: ${{ always() }} - run: | - aws ec2 describe-instances --filters "Name=tag:packerExecutionId,Values=${GITHUB_RUN_ID}" --query "Reservations[].Instances[].InstanceId" --output text | xargs -n 1 -I {} aws ec2 terminate-instances --instance-ids {} - - - name: Cleanup resources on build cancellation - if: ${{ cancelled() }} - run: | - aws ec2 describe-instances --filters "Name=tag:packerExecutionId,Values=${GITHUB_RUN_ID}" --query "Reservations[].Instances[].InstanceId" --output text | xargs -n 1 -I {} aws ec2 terminate-instances --instance-ids {} diff --git a/.github/workflows/build-ccache.yml b/.github/workflows/build-ccache.yml deleted file mode 100644 index f296dd6..0000000 --- a/.github/workflows/build-ccache.yml +++ /dev/null @@ -1,86 +0,0 @@ -name: Update ccache - -on: - push: - branches: - - develop - paths: - - ".github/workflows/build-ccache.yml" - - "ansible/vars.yml" - - "Dockerfile" - workflow_dispatch: - -env: - image_tag: public.ecr.aws/supabase/postgres:ccache -permissions: - contents: read - packages: write - id-token: write - -jobs: - settings: - runs-on: ubuntu-latest - outputs: - build_args: ${{ steps.args.outputs.result }} - steps: - - uses: actions/checkout@v3 - - id: args - uses: mikefarah/yq@master - with: - cmd: yq 'to_entries | map(select(.value|type == "!!str")) | map(.key + "=" + .value) | join("\n")' 'ansible/vars.yml' - - build_image: - needs: settings - strategy: - matrix: - include: - - runner: [self-hosted, X64] - arch: amd64 - - runner: arm-runner - arch: arm64 - runs-on: ${{ matrix.runner }} - timeout-minutes: 180 - outputs: - image_digest: ${{ steps.build.outputs.digest }} - steps: - - run: docker context create builders - - uses: docker/setup-buildx-action@v3 - with: - endpoint: builders - - name: Configure AWS credentials - prod - uses: aws-actions/configure-aws-credentials@v1 - with: - role-to-assume: ${{ secrets.PROD_AWS_ROLE }} - aws-region: "us-east-1" - - uses: docker/login-action@v2 - with: - registry: public.ecr.aws - - id: build - uses: docker/build-push-action@v5 - with: - push: true - target: buildcache - build-args: | - CACHE_EPOCH=${{ github.event.repository.updated_at }} - ${{ needs.settings.outputs.build_args }} - tags: ${{ env.image_tag }}_${{ matrix.arch }} - platforms: linux/${{ matrix.arch }} - - merge_manifest: - needs: build_image - runs-on: ubuntu-latest - steps: - - uses: docker/setup-buildx-action@v3 - - name: Configure AWS credentials - prod - uses: aws-actions/configure-aws-credentials@v1 - with: - role-to-assume: ${{ secrets.PROD_AWS_ROLE }} - aws-region: "us-east-1" - - uses: docker/login-action@v2 - with: - registry: public.ecr.aws - - name: Merge multi-arch manifests - run: | - docker buildx imagetools create -t ${{ env.image_tag }} \ - ${{ env.image_tag }}_amd64 \ - ${{ env.image_tag }}_arm64 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml deleted file mode 100644 index 5999341..0000000 --- a/.github/workflows/ci.yml +++ /dev/null @@ -1,21 +0,0 @@ -name: Check merge requirements - -on: - pull_request: - -jobs: - check-release-version: - timeout-minutes: 5 - runs-on: ubuntu-latest - steps: - - name: Checkout Repo - uses: actions/checkout@v3 - - - name: Run checks - # Update `ami-release.yaml` too if changing constraints. - run: | - SUFFIX=$(sed -E 's/postgres-version = "[0-9\.]+(.*)"/\1/g' common.vars.pkr.hcl) - if [[ -n $SUFFIX ]] ; then - echo "We no longer allow merging RC versions to develop." - exit 1 - fi diff --git a/.github/workflows/dockerhub-release-15-6.yml b/.github/workflows/dockerhub-release-15-6.yml deleted file mode 100644 index 4c7b5b9..0000000 --- a/.github/workflows/dockerhub-release-15-6.yml +++ /dev/null @@ -1,104 +0,0 @@ -name: Release 15.6 on Dockerhub - -on: - push: - branches: - - develop - - release/* - paths: - - ".github/workflows/dockerhub-release-15-6.yml" - - "common-nix.vars*" - workflow_dispatch: - -jobs: - settings: - runs-on: ubuntu-latest - outputs: - docker_version: ${{ steps.settings.outputs.postgres-version }} - image_tag: supabase/postgres:${{ steps.settings.outputs.postgres-version }} - build_args: ${{ steps.args.outputs.result }} - steps: - - uses: actions/checkout@v3 - - id: settings - # Remove spaces and quotes to get the raw version string - run: sed -r 's/(\s|\")+//g' common-nix.vars.pkr.hcl >> $GITHUB_OUTPUT - - id: args - uses: mikefarah/yq@master - with: - cmd: yq 'to_entries | map(select(.value|type == "!!str")) | map(.key + "=" + .value) | join("\n")' 'ansible/vars.yml' - - build_release_image: - needs: [settings] - strategy: - matrix: - include: - - runner: [self-hosted, X64] - arch: amd64 - - runner: arm-runner - arch: arm64 - runs-on: ${{ matrix.runner }} - timeout-minutes: 180 - outputs: - image_digest: ${{ steps.build.outputs.digest }} - steps: - - run: docker context create builders - - uses: docker/setup-buildx-action@v3 - with: - endpoint: builders - - uses: docker/login-action@v2 - with: - username: ${{ secrets.DOCKER_USERNAME }} - password: ${{ secrets.DOCKER_PASSWORD }} - - id: build - uses: docker/build-push-action@v5 - with: - push: true - build-args: | - ${{ needs.settings.outputs.build_args }} - target: production - tags: ${{ needs.settings.outputs.image_tag }}_${{ matrix.arch }} - platforms: linux/${{ matrix.arch }} - cache-from: type=gha,scope=${{ github.ref_name }}-latest-${{ matrix.arch }} - cache-to: type=gha,mode=max,scope=${{ github.ref_name }}-latest-${{ matrix.arch }} - file: "Dockerfile-156" - - name: Slack Notification - if: ${{ failure() }} - uses: rtCamp/action-slack-notify@v2 - env: - SLACK_WEBHOOK: ${{ secrets.SLACK_NOTIFICATIONS_WEBHOOK }} - SLACK_USERNAME: "gha-failures-notifier" - SLACK_COLOR: "danger" - SLACK_MESSAGE: "Building Postgres ${{ matrix.arch }} image failed" - SLACK_FOOTER: "" - - merge_manifest: - needs: [settings, build_release_image] - runs-on: ubuntu-latest - steps: - - uses: docker/setup-buildx-action@v3 - - uses: docker/login-action@v2 - with: - username: ${{ secrets.DOCKER_USERNAME }} - password: ${{ secrets.DOCKER_PASSWORD }} - - name: Merge multi-arch manifests - run: | - docker buildx imagetools create -t ${{ needs.settings.outputs.image_tag }} \ - ${{ needs.settings.outputs.image_tag }}_amd64 \ - ${{ needs.settings.outputs.image_tag }}_arm64 - - name: Slack Notification - if: ${{ failure() }} - uses: rtCamp/action-slack-notify@v2 - env: - SLACK_WEBHOOK: ${{ secrets.SLACK_NOTIFICATIONS_WEBHOOK }} - SLACK_USERNAME: "gha-failures-notifier" - SLACK_COLOR: "danger" - SLACK_MESSAGE: "Building Postgres image failed" - SLACK_FOOTER: "" - - publish: - needs: [settings, merge_manifest] - # Call workflow explicitly because events from actions cannot trigger more actions - uses: ./.github/workflows/mirror.yml - with: - version: ${{ needs.settings.outputs.docker_version }} - secrets: inherit diff --git a/.github/workflows/dockerhub-release-aio.yml b/.github/workflows/dockerhub-release-aio.yml deleted file mode 100644 index e5a5a82..0000000 --- a/.github/workflows/dockerhub-release-aio.yml +++ /dev/null @@ -1,157 +0,0 @@ -name: Release AIO image - -on: - push: - branches: - - develop - paths: - - ".github/workflows/dockerhub-release-aio.yml" - - "docker/all-in-one/*" - workflow_run: - workflows: [Release on Dockerhub] - branches: - - develop - types: - - completed - workflow_dispatch: - inputs: - baseDockerVersion: - description: 'Base Docker Version. E.g., 15.1.1.27' - required: false - -jobs: - settings: - runs-on: ubuntu-latest - outputs: - base_docker_version: ${{ steps.base_docker.outputs.base-docker-version }} - docker_version: ${{ steps.settings.outputs.postgres-version }} - image_tag: supabase/postgres:aio-${{ steps.settings.outputs.postgres-version }} - fly_image_tag: supabase-postgres-image:aio-${{ steps.settings.outputs.postgres-version }} - build_args: ${{ steps.args.outputs.result }} - steps: - - uses: actions/checkout@v3 - - id: settings - # Remove spaces and quotes to get the raw version string - run: sed -r 's/(\s|\")+//g' common.vars.pkr.hcl >> $GITHUB_OUTPUT - - id: base_docker - run: | - if [[ "${{ inputs.baseDockerVersion }}" != "" ]]; then - echo "base-docker-version=${{ inputs.baseDockerVersion }}" >> $GITHUB_OUTPUT - else - echo "base-docker-version=${{ steps.settings.outputs.postgres-version }}" >> $GITHUB_OUTPUT - fi - - id: args - uses: mikefarah/yq@master - with: - cmd: yq 'to_entries | map(select(.value|type == "!!str")) | map(.key + "=" + .value) | join("\n")' 'ansible/vars.yml' - - build_image: - needs: settings - strategy: - matrix: - include: - - runner: [self-hosted, X64] - arch: amd64 - - runner: arm-runner - arch: arm64 - runs-on: ${{ matrix.runner }} - timeout-minutes: 180 - outputs: - image_digest: ${{ steps.build.outputs.digest }} - steps: - - run: docker context create builders - - uses: docker/setup-buildx-action@v3 - with: - endpoint: builders - - uses: docker/login-action@v2 - with: - username: ${{ secrets.DOCKER_USERNAME }} - password: ${{ secrets.DOCKER_PASSWORD }} - - id: build - uses: docker/build-push-action@v5 - with: - file: docker/all-in-one/Dockerfile - push: true - build-args: | - postgres_version=${{ needs.settings.outputs.base_docker_version }} - ${{ needs.settings.outputs.build_args }} - target: production - tags: ${{ needs.settings.outputs.image_tag }}_${{ matrix.arch }} - platforms: linux/${{ matrix.arch }} - cache-from: type=gha,scope=${{ github.ref_name }}-aio-${{ matrix.arch }} - cache-to: type=gha,mode=max,scope=${{ github.ref_name }}-aio-${{ matrix.arch }} - - name: Slack Notification - if: ${{ failure() }} - uses: rtCamp/action-slack-notify@v2 - env: - SLACK_WEBHOOK: ${{ secrets.SLACK_NOTIFICATIONS_WEBHOOK }} - SLACK_USERNAME: "gha-failures-notifier" - SLACK_COLOR: "danger" - SLACK_MESSAGE: "Building Postgres AIO ${{ matrix.arch }} image failed" - SLACK_FOOTER: "" - - merge_manifest: - needs: [settings, build_image] - runs-on: ubuntu-latest - steps: - - uses: docker/setup-buildx-action@v3 - - uses: docker/login-action@v2 - with: - username: ${{ secrets.DOCKER_USERNAME }} - password: ${{ secrets.DOCKER_PASSWORD }} - - name: Merge multi-arch manifests - run: | - docker buildx imagetools create -t ${{ needs.settings.outputs.image_tag }} \ - ${{ needs.settings.outputs.image_tag }}_amd64 \ - ${{ needs.settings.outputs.image_tag }}_arm64 - - name: Slack Notification - if: ${{ failure() }} - uses: rtCamp/action-slack-notify@v2 - env: - SLACK_WEBHOOK: ${{ secrets.SLACK_NOTIFICATIONS_WEBHOOK }} - SLACK_USERNAME: "gha-failures-notifier" - SLACK_COLOR: "danger" - SLACK_MESSAGE: "Building Postgres image failed" - SLACK_FOOTER: "" - - publish: - needs: [settings, merge_manifest] - # Call workflow explicitly because events from actions cannot trigger more actions - uses: ./.github/workflows/mirror.yml - with: - version: aio-${{ needs.settings.outputs.docker_version }} - secrets: inherit - - publish_to_fly: - needs: [settings, build_image] - runs-on: ubuntu-latest - steps: - - uses: docker/setup-buildx-action@v3 - - uses: docker/login-action@v2 - with: - username: ${{ secrets.DOCKER_USERNAME }} - password: ${{ secrets.DOCKER_PASSWORD }} - - name: Push to Fly - uses: superfly/flyctl-actions/setup-flyctl@dfdfedc86b296f5e5384f755a18bf400409a15d0 - with: - version: 0.1.64 - - run: | - docker pull ${{ needs.settings.outputs.image_tag }}_amd64 - docker tag ${{ needs.settings.outputs.image_tag }}_amd64 "registry.fly.io/staging-${{ needs.settings.outputs.fly_image_tag }}" - docker tag ${{ needs.settings.outputs.image_tag }}_amd64 "registry.fly.io/prod-${{ needs.settings.outputs.fly_image_tag }}" - - flyctl auth docker - docker push "registry.fly.io/staging-${{ needs.settings.outputs.fly_image_tag }}" - docker push "registry.fly.io/prod-${{ needs.settings.outputs.fly_image_tag }}" - env: - FLY_API_TOKEN: ${{ secrets.FLY_API_TOKEN }} - - - name: Slack Notification - if: ${{ failure() }} - uses: rtCamp/action-slack-notify@v2 - env: - SLACK_WEBHOOK: ${{ secrets.SLACK_NOTIFICATIONS_WEBHOOK }} - SLACK_USERNAME: "gha-failures-notifier" - SLACK_COLOR: "danger" - SLACK_MESSAGE: "Failed pushing AIO image to Fly.io" - SLACK_FOOTER: "" diff --git a/.github/workflows/dockerhub-release-orioledb.yml b/.github/workflows/dockerhub-release-orioledb.yml deleted file mode 100644 index 2c2f051..0000000 --- a/.github/workflows/dockerhub-release-orioledb.yml +++ /dev/null @@ -1,86 +0,0 @@ -name: Release OrioleDB on Dockerhub - -on: - push: - branches: - - develop - paths: - - ".github/workflows/dockerhub-release-orioledb.yml" - - "docker/orioledb/*" - - "common.vars*" - -jobs: - settings: - runs-on: ubuntu-latest - outputs: - docker_version: orioledb-${{ steps.settings.outputs.postgres-version }} - image_tag: supabase/postgres:orioledb-${{ steps.settings.outputs.postgres-version }} - build_args: ${{ steps.args.outputs.result }} - steps: - - uses: actions/checkout@v3 - - id: settings - # Remove spaces and quotes to get the raw version string - run: sed -r 's/(\s|\")+//g' common.vars.pkr.hcl >> $GITHUB_OUTPUT - - id: args - uses: mikefarah/yq@master - with: - cmd: yq 'to_entries | map(select(.value|type == "!!str")) | map(.key + "=" + .value) | join("\n")' 'ansible/vars.yml' - - build_image: - needs: settings - strategy: - fail-fast: false - matrix: - include: - - runner: [self-hosted, X64] - arch: amd64 - - runner: arm-runner - arch: arm64 - runs-on: ${{ matrix.runner }} - timeout-minutes: 180 - outputs: - image_digest: ${{ steps.build.outputs.digest }} - steps: - - run: docker context create builders - - uses: docker/setup-buildx-action@v3 - with: - endpoint: builders - - uses: docker/login-action@v2 - with: - username: ${{ secrets.DOCKER_USERNAME }} - password: ${{ secrets.DOCKER_PASSWORD }} - - id: build - uses: docker/build-push-action@v5 - with: - file: docker/orioledb/Dockerfile - push: true - build-args: | - ${{ needs.settings.outputs.build_args }} - target: production - tags: ${{ needs.settings.outputs.image_tag }}_${{ matrix.arch }} - platforms: linux/${{ matrix.arch }} - cache-from: type=gha,scope=${{ github.ref_name }}-orioledb-${{ matrix.arch }} - cache-to: type=gha,mode=max,scope=${{ github.ref_name }}-orioledb-${{ matrix.arch }} - - merge_manifest: - needs: [settings, build_image] - runs-on: ubuntu-latest - steps: - - uses: docker/setup-buildx-action@v3 - - uses: docker/login-action@v2 - with: - username: ${{ secrets.DOCKER_USERNAME }} - password: ${{ secrets.DOCKER_PASSWORD }} - - name: Merge multi-arch manifests - run: | - docker buildx imagetools create -t ${{ needs.settings.outputs.image_tag }} \ - ${{ needs.settings.outputs.image_tag }}_amd64 \ - ${{ needs.settings.outputs.image_tag }}_arm64 - - publish: - needs: [settings, merge_manifest] - # Call workflow explicitly because events from actions cannot trigger more actions - uses: ./.github/workflows/mirror.yml - with: - version: ${{ needs.settings.outputs.docker_version }} - secrets: inherit diff --git a/.github/workflows/dockerhub-release.yml b/.github/workflows/dockerhub-release.yml deleted file mode 100644 index 7f4be55..0000000 --- a/.github/workflows/dockerhub-release.yml +++ /dev/null @@ -1,102 +0,0 @@ -name: Release on Dockerhub - -on: - push: - branches: - - develop - paths: - - ".github/workflows/dockerhub-release.yml" - - "common.vars*" - -jobs: - settings: - runs-on: ubuntu-latest - outputs: - docker_version: ${{ steps.settings.outputs.postgres-version }} - image_tag: supabase/postgres:${{ steps.settings.outputs.postgres-version }} - build_args: ${{ steps.args.outputs.result }} - steps: - - uses: actions/checkout@v3 - - id: settings - # Remove spaces and quotes to get the raw version string - run: sed -r 's/(\s|\")+//g' common.vars.pkr.hcl >> $GITHUB_OUTPUT - - id: args - uses: mikefarah/yq@master - with: - cmd: yq 'to_entries | map(select(.value|type == "!!str")) | map(.key + "=" + .value) | join("\n")' 'ansible/vars.yml' - - - build_image: - needs: settings - strategy: - matrix: - include: - - runner: [self-hosted, X64] - arch: amd64 - - runner: arm-runner - arch: arm64 - runs-on: ${{ matrix.runner }} - timeout-minutes: 180 - outputs: - image_digest: ${{ steps.build.outputs.digest }} - steps: - - run: docker context create builders - - uses: docker/setup-buildx-action@v3 - with: - endpoint: builders - - uses: docker/login-action@v2 - with: - username: ${{ secrets.DOCKER_USERNAME }} - password: ${{ secrets.DOCKER_PASSWORD }} - - id: build - uses: docker/build-push-action@v5 - with: - push: true - build-args: | - ${{ needs.settings.outputs.build_args }} - target: production - tags: ${{ needs.settings.outputs.image_tag }}_${{ matrix.arch }} - platforms: linux/${{ matrix.arch }} - cache-from: type=gha,scope=${{ github.ref_name }}-latest-${{ matrix.arch }} - cache-to: type=gha,mode=max,scope=${{ github.ref_name }}-latest-${{ matrix.arch }} - - name: Slack Notification - if: ${{ failure() }} - uses: rtCamp/action-slack-notify@v2 - env: - SLACK_WEBHOOK: ${{ secrets.SLACK_NOTIFICATIONS_WEBHOOK }} - SLACK_USERNAME: "gha-failures-notifier" - SLACK_COLOR: "danger" - SLACK_MESSAGE: "Building Postgres ${{ matrix.arch }} image failed" - SLACK_FOOTER: "" - - merge_manifest: - needs: [settings, build_image] - runs-on: ubuntu-latest - steps: - - uses: docker/setup-buildx-action@v3 - - uses: docker/login-action@v2 - with: - username: ${{ secrets.DOCKER_USERNAME }} - password: ${{ secrets.DOCKER_PASSWORD }} - - name: Merge multi-arch manifests - run: | - docker buildx imagetools create -t ${{ needs.settings.outputs.image_tag }} \ - ${{ needs.settings.outputs.image_tag }}_amd64 \ - ${{ needs.settings.outputs.image_tag }}_arm64 - - name: Slack Notification - if: ${{ failure() }} - uses: rtCamp/action-slack-notify@v2 - env: - SLACK_WEBHOOK: ${{ secrets.SLACK_NOTIFICATIONS_WEBHOOK }} - SLACK_USERNAME: "gha-failures-notifier" - SLACK_COLOR: "danger" - SLACK_MESSAGE: "Building Postgres image failed" - SLACK_FOOTER: "" - - publish: - needs: [settings, merge_manifest] - # Call workflow explicitly because events from actions cannot trigger more actions - uses: ./.github/workflows/mirror.yml - with: - version: ${{ needs.settings.outputs.docker_version }} - secrets: inherit diff --git a/.github/workflows/nix-build.yml b/.github/workflows/nix-build.yml deleted file mode 100644 index 08c316b..0000000 --- a/.github/workflows/nix-build.yml +++ /dev/null @@ -1,82 +0,0 @@ -name: Nix CI - -on: - push: - branches: - - develop - - release/* - pull_request: - workflow_dispatch: - -permissions: - contents: read - id-token: write - -jobs: - build-run-image: - strategy: - fail-fast: false - matrix: - include: - - runner: larger-runner-4cpu - arch: amd64 - - runner: arm-runner - arch: arm64 - - runner: macos-latest - arch: arm64 - runs-on: ${{ matrix.runner }} - - steps: - - - name: Check out code - uses: actions/checkout@v4 - with: - ref: ${{ github.event.pull_request.head.ref || github.ref }} - fetch-depth: 0 - fetch-tags: true - - name: aws-creds - uses: aws-actions/configure-aws-credentials@v4 - with: - role-to-assume: ${{ secrets.DEV_AWS_ROLE }} - aws-region: "us-east-1" - output-credentials: true - - name: write secret key - # use python so we don't interpolate the secret into the workflow logs, in case of bugs - run: | - python -c "import os; file = open('nix-secret-key', 'w'); file.write(os.environ['NIX_SIGN_SECRET_KEY']); file.close()" - env: - NIX_SIGN_SECRET_KEY: ${{ secrets.NIX_SIGN_SECRET_KEY }} - - name: Log in to Docker Hub - if: matrix.runner != 'macos-latest' - uses: docker/login-action@v2 - with: - username: ${{ secrets.DOCKER_USERNAME }} - password: ${{ secrets.DOCKER_PASSWORD }} - - name: Build psql bundle with nix - if: matrix.runner != 'macos-latest' - run: docker build -t base_nix -f docker/nix/Dockerfile . - - name: Run build psql bundle - if: matrix.runner != 'macos-latest' - run: | - docker run -e AWS_ACCESS_KEY_ID=${{ env.AWS_ACCESS_KEY_ID }} \ - -e AWS_SECRET_ACCESS_KEY=${{ env.AWS_SECRET_ACCESS_KEY }} \ - -e AWS_SESSION_TOKEN=${{ env.AWS_SESSION_TOKEN }} \ - base_nix bash -c "./workspace/docker/nix/build_nix.sh" - - name: Build psql bundle on macos - if: matrix.runner == 'macos-latest' - run: | - curl --proto '=https' --tlsv1.2 -sSf -L https://install.determinate.systems/nix | sh -s -- install --no-confirm \ - --extra-conf "substituters = https://cache.nixos.org https://nix-postgres-artifacts.s3.amazonaws.com" \ - --extra-conf "trusted-public-keys = nix-postgres-artifacts:dGZlQOvKcNEjvT7QEAJbcV6b6uk7VF/hWMjhYleiaLI=% cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=" - . /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh - cp ./docker/nix/build_nix.sh ./build_nix.sh - sed -i '' '1s|^#!/bin/env bash|#!/usr/bin/env bash|' ./build_nix.sh - chmod +x ./build_nix.sh - ./build_nix.sh - env: - AWS_ACCESS_KEY_ID: ${{ env.AWS_ACCESS_KEY_ID }} - AWS_SECRET_ACCESS_KEY: ${{ env.AWS_SECRET_ACCESS_KEY }} - AWS_SESSION_TOKEN: ${{ env.AWS_SESSION_TOKEN }} - - name: build psql bundle on ${{ matrix.arch }} - diff --git a/.github/workflows/package-plv8.yml b/.github/workflows/package-plv8.yml deleted file mode 100644 index 09b2c4e..0000000 --- a/.github/workflows/package-plv8.yml +++ /dev/null @@ -1,78 +0,0 @@ -name: Package plv8 - -on: - push: - branches: - - develop - paths: - - ".github/workflows/package-plv8.yml" - - "Dockerfile" - workflow_dispatch: - -env: - image: ghcr.io/supabase/plv8 -permissions: - contents: read - packages: write - id-token: write - -jobs: - settings: - runs-on: ubuntu-latest - outputs: - image_tag: ${{ env.image }}:${{ steps.meta.outputs.image_tag }} - steps: - - uses: actions/checkout@v3 - - id: meta - run: | - plv8_release=$(grep -o 'plv8_release=.*' Dockerfile | head -1 | cut -d "=" -f 2) - postgresql_major=$(grep -o 'postgresql_major=.*' Dockerfile | head -1 | cut -d "=" -f 2) - echo "image_tag=${plv8_release}-pg${postgresql_major}" >> $GITHUB_OUTPUT - - build_image: - needs: settings - strategy: - matrix: - include: - - runner: [self-hosted, X64] - arch: amd64 - - runner: arm-runner - arch: arm64 - runs-on: ${{ matrix.runner }} - timeout-minutes: 180 - outputs: - image_digest: ${{ steps.build.outputs.digest }} - steps: - - run: docker context create builders - - uses: docker/setup-buildx-action@v3 - with: - endpoint: builders - - uses: docker/login-action@v2 - with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - id: build - uses: docker/build-push-action@v5 - with: - push: true - target: plv8-deb - tags: ${{ needs.settings.outputs.image_tag }}_${{ matrix.arch }} - platforms: linux/${{ matrix.arch }} - no-cache: true - - merge_manifest: - needs: [settings, build_image] - runs-on: ubuntu-latest - steps: - - uses: docker/setup-buildx-action@v3 - - uses: docker/login-action@v2 - with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - name: Merge multi-arch manifests - run: | - docker buildx imagetools create -t ${{ needs.settings.outputs.image_tag }} \ - ${{ needs.settings.outputs.image_tag }}_amd64 \ - ${{ needs.settings.outputs.image_tag }}_arm64 diff --git a/.github/workflows/publish-nix-pgupgrade-bin-flake-version.yml b/.github/workflows/publish-nix-pgupgrade-bin-flake-version.yml deleted file mode 100644 index 5b985f4..0000000 --- a/.github/workflows/publish-nix-pgupgrade-bin-flake-version.yml +++ /dev/null @@ -1,101 +0,0 @@ -name: Publish nix pg_upgrade_bin flake version - -on: - workflow_dispatch: - inputs: - postgresVersion: - description: 'Optional. Postgres version to publish against, i.e. 15.1.1.78' - required: false - -permissions: - id-token: write - -jobs: - publish-staging: - runs-on: ubuntu-latest - - steps: - - name: Checkout Repo - uses: actions/checkout@v3 - - - name: Grab release version - id: process_release_version - run: | - VERSION=$(grep 'postgres-version' common-nix.vars.pkr.hcl | sed -e 's/postgres-version = "\(.*\)"/\1/g') - if [[ "${{ inputs.postgresVersion }}" != "" ]]; then - VERSION=${{ inputs.postgresVersion }} - fi - echo "version=$VERSION" >> "$GITHUB_OUTPUT" - echo "major_version=$(echo $VERSION | cut -d'.' -f1)" >> "$GITHUB_OUTPUT" - - - name: Create a tarball containing the latest nix flake version - working-directory: /tmp/ - run: | - mkdir -p ${{ steps.process_release_version.outputs.major_version }} - echo $GITHUB_SHA > ${{ steps.process_release_version.outputs.major_version }}/nix_flake_version - tar -czvf pg_upgrade_bin.tar.gz ${{ steps.process_release_version.outputs.major_version }} - - - name: configure aws credentials - staging - uses: aws-actions/configure-aws-credentials@v1 - with: - role-to-assume: ${{ secrets.DEV_AWS_ROLE }} - aws-region: "us-east-1" - - - name: Upload pg_upgrade scripts to s3 staging - run: | - aws s3 cp /tmp/pg_upgrade_bin.tar.gz s3://${{ secrets.ARTIFACTS_BUCKET }}/upgrades/postgres/supabase-postgres-${{ steps.process_release_version.outputs.version }}/20.04.tar.gz - - - name: Slack Notification on Failure - if: ${{ failure() }} - uses: rtCamp/action-slack-notify@v2 - env: - SLACK_WEBHOOK: ${{ secrets.SLACK_NOTIFICATIONS_WEBHOOK }} - SLACK_USERNAME: 'gha-failures-notifier' - SLACK_COLOR: 'danger' - SLACK_MESSAGE: 'Publishing pg_upgrade binaries flake version failed' - SLACK_FOOTER: '' - - publish-prod: - runs-on: ubuntu-latest - if: github.ref_name == 'develop' || contains( github.ref, 'release' ) - - steps: - - name: Checkout Repo - uses: actions/checkout@v3 - - - name: Grab release version - id: process_release_version - run: | - VERSION=$(grep 'postgres-version' common-nix.vars.pkr.hcl | sed -e 's/postgres-version = "\(.*\)"/\1/g') - if [[ "${{ inputs.postgresVersion }}" != "" ]]; then - VERSION=${{ inputs.postgresVersion }} - fi - echo "version=$VERSION" >> "$GITHUB_OUTPUT" - echo "major_version=$(echo $VERSION | cut -d'.' -f1)" >> "$GITHUB_OUTPUT" - - - name: Create a tarball containing the latest nix flake version - working-directory: /tmp/ - run: | - mkdir -p ${{ steps.process_release_version.outputs.major_version }} - echo $GITHUB_SHA > ${{ steps.process_release_version.outputs.major_version }}/nix_flake_version - tar -czvf pg_upgrade_bin.tar.gz ${{ steps.process_release_version.outputs.major_version }} - - - name: configure aws credentials - prod - uses: aws-actions/configure-aws-credentials@v1 - with: - role-to-assume: ${{ secrets.PROD_AWS_ROLE }} - aws-region: "us-east-1" - - - name: Upload pg_upgrade scripts to s3 prod - run: | - aws s3 cp /tmp/pg_upgrade_bin.tar.gz s3://${{ secrets.PROD_ARTIFACTS_BUCKET }}/upgrades/postgres/supabase-postgres-${{ steps.process_release_version.outputs.version }}/20.04.tar.gz - - - name: Slack Notification on Failure - if: ${{ failure() }} - uses: rtCamp/action-slack-notify@v2 - env: - SLACK_WEBHOOK: ${{ secrets.SLACK_NOTIFICATIONS_WEBHOOK }} - SLACK_USERNAME: 'gha-failures-notifier' - SLACK_COLOR: 'danger' - SLACK_MESSAGE: 'Publishing pg_upgrade binaries flake version failed' - SLACK_FOOTER: '' diff --git a/.github/workflows/test-pg-upgrade.yml b/.github/workflows/test-pg-upgrade.yml deleted file mode 100644 index b90791b..0000000 --- a/.github/workflows/test-pg-upgrade.yml +++ /dev/null @@ -1,133 +0,0 @@ -name: Test pg_upgrade - -on: - push: - branches: - - develop - - pcnc/pg_upgrade-test-extensions - workflow_dispatch: - -permissions: - id-token: write - -jobs: - test: - strategy: - matrix: - base_pg_version: - - 15.1.1.60 - - 15.1.1.70 - runs-on: arm-runner - timeout-minutes: 30 - defaults: - run: - working-directory: ./tests/pg_upgrade - env: - PGPORT: 5478 - PGPASSWORD: postgres - PGDATABASE: postgres - PGUSER: supabase_admin - PGHOST: localhost - PG_MAJOR_VERSION: 15 - IS_CI: true - container: pg_upgrade_test - steps: - - uses: actions/checkout@v3 - - - name: Grab release version - id: process_release_version - working-directory: ./ - run: | - VERSION=$(sed -e 's/postgres-version = "\(.*\)"/\1/g' common.vars.pkr.hcl) - echo "version=$VERSION" >> "$GITHUB_OUTPUT" - - - name: configure aws credentials - staging - uses: aws-actions/configure-aws-credentials@v1 - with: - role-to-assume: ${{ secrets.DEV_AWS_ROLE }} - aws-region: "us-east-1" - - - name: Download pg_upgrade_scripts and binaries - run: | - aws s3 cp s3://${{ secrets.ARTIFACTS_BUCKET }}/upgrades/postgres/supabase-postgres-${{ steps.process_release_version.outputs.version }}/pg_upgrade_scripts.tar.gz scripts/pg_upgrade_scripts.tar.gz - aws s3 cp s3://${{ secrets.ARTIFACTS_BUCKET }}/upgrades/postgres/supabase-postgres-${{ steps.process_release_version.outputs.version }}/20.04.tar.gz scripts/pg_upgrade_bin.tar.gz - - - run: docker context create builders - - uses: docker/setup-buildx-action@v2 - with: - endpoint: builders - driver-opts: image=moby/buildkit:v0.11.6 - buildkitd-flags: --debug - - - name: Start Postgres - run: | - docker rm -f "$container" || true - docker run --name "$container" --env-file .env \ - -v "$(pwd)/scripts:/tmp/upgrade" \ - --entrypoint "/tmp/upgrade/entrypoint.sh" -d \ - -p ${PGPORT}:5432 \ - "supabase/postgres:${{ matrix.base_pg_version }}" - - - name: Install psql - run: | - sudo apt update - sudo apt install -y --no-install-recommends postgresql-client - - - name: Install pg_prove - run: | - sudo apt-get update - sudo apt-get install -y --no-install-recommends perl cpanminus - sudo cpanm -n App::cpanminus - sudo cpanm -n TAP::Parser::SourceHandler::pgTAP - env: - SHELL: /bin/bash - PERL_MM_USE_DEFAULT: 1 - PERL_MM_NONINTERACTIVE: 1 - - - name: Wait for healthy database - run: | - count=0 - while ! docker exec "$container" bash -c "pg_isready"; do - count=$((count + 1)) - if [ $count -ge "$retries" ]; then - echo "Retry $count/$retries exited $exit, no more retries left." - docker logs "$container" - docker rm -f "$container" - exit 1 - fi - done - env: - retries: 20 - - - name: Run migrations - run: | - docker cp ../../migrations/db/migrations "$container:/docker-entrypoint-initdb.d/" - docker exec "$container" bash -c "/docker-entrypoint-initdb.d/migrate.sh > /tmp/migrate.log 2>&1" - - - name: Run initial tests - run: pg_prove "../../migrations/tests/test.sql" - env: - PERL5LIB: /usr/local/lib/perl5 - - - name: Apply pre-upgrade fixtures - run: | - psql -f "./tests/97-enable-extensions.sql" - psql -f "./tests/98-data-fixtures.sql" - psql -f "./tests/99-fixtures.sql" - - - name: Initiate upgrade - run: docker exec "$container" bash -c '/tmp/upgrade/pg_upgrade_scripts/initiate.sh "$PG_MAJOR_VERSION"; exit $?' - - - name: Complete pg_upgrade - run: docker exec pg_upgrade_test bash -c '/tmp/upgrade/pg_upgrade_scripts/complete.sh; exit $?' - - - name: Run post-upgrade tests - run: | - pg_prove tests/01-schema.sql - pg_prove tests/02-data.sql - pg_prove tests/03-settings.sql - - - name: Clean up container - if: ${{ always() }} - continue-on-error: true - run: docker rm -f "$container" || true diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml deleted file mode 100644 index 27ae328..0000000 --- a/.github/workflows/test.yml +++ /dev/null @@ -1,117 +0,0 @@ -name: Test Database - -on: - push: - branches: - - develop - pull_request: - workflow_dispatch: - -jobs: - build: - strategy: - matrix: - include: - - runner: [self-hosted, X64] - arch: amd64 - - runner: arm-runner - arch: arm64 - runs-on: ${{ matrix.runner }} - timeout-minutes: 180 - env: - POSTGRES_PORT: 5478 - POSTGRES_PASSWORD: password - steps: - - uses: actions/checkout@v3 - - id: settings - # Remove spaces and quotes to get the raw version string - run: sed -r 's/(\s|\")+//g' common-nix.vars.pkr.hcl >> $GITHUB_OUTPUT - - - id: args - uses: mikefarah/yq@master - with: - cmd: yq 'to_entries | map(select(.value|type == "!!str")) | map(.key + "=" + .value) | join("\n")' 'ansible/vars.yml' - - - run: docker context create builders - - uses: docker/setup-buildx-action@v3 - with: - endpoint: builders - - uses: docker/build-push-action@v5 - with: - load: true - context: . - file: "Dockerfile-156" - target: production - build-args: | - ${{ steps.args.outputs.result }} - tags: supabase/postgres:${{ steps.settings.outputs.postgres-version }},supabase_postgres - cache-from: | - type=gha,scope=${{ github.ref_name }}-${{ steps.settings.outputs.postgres-version }}-${{ matrix.arch }} - type=gha,scope=${{ github.base_ref }}-${{ steps.settings.outputs.postgres-version }}-${{ matrix.arch }} - cache-to: type=gha,mode=max,scope=${{ github.ref_name }}-${{ steps.settings.outputs.postgres-version }}-${{ matrix.arch }} - - - name: Start Postgres - run: | - docker run --rm --pull=never \ - -e POSTGRES_PASSWORD=${{ env.POSTGRES_PASSWORD }} \ - -p ${{ env.POSTGRES_PORT }}:5432 \ - --name supabase_postgres \ - -d supabase/postgres:${{ steps.settings.outputs.postgres-version }} - - name: Install psql - run: | - sudo apt update - sudo apt install -y --no-install-recommends postgresql-client - - - name: Install pg_prove - run: sudo cpan -T TAP::Parser::SourceHandler::pgTAP - env: - SHELL: /bin/bash - - - name: Wait for healthy database - run: | - count=0 - until [ "$(docker inspect -f '{{.State.Health.Status}}' "$container")" == "healthy" ]; do - exit=$? - count=$((count + 1)) - if [ $count -ge "$retries" ]; then - echo "Retry $count/$retries exited $exit, no more retries left." - docker stop -t 2 "$container" - return $exit - fi - sleep 1; - done; - echo "$container container is healthy" - env: - retries: 20 - container: supabase_postgres - - - name: Run tests - run: pg_prove migrations/tests/test.sql - env: - PGHOST: localhost - PGPORT: ${{ env.POSTGRES_PORT }} - PGDATABASE: postgres - PGUSER: supabase_admin - PGPASSWORD: ${{ env.POSTGRES_PASSWORD }} - - - name: Check migrations are idempotent - run: | - for sql in ./migrations/db/migrations/*.sql; do - echo "$0: running $sql" - psql -v ON_ERROR_STOP=1 --no-password --no-psqlrc -f "$sql" - done - env: - PGHOST: localhost - PGPORT: ${{ env.POSTGRES_PORT }} - PGDATABASE: postgres - PGUSER: tealbase_admin - PGPASSWORD: ${{ env.POSTGRES_PASSWORD }} - - - name: verify schema.sql is committed - run: | - docker compose -f migrations/docker-compose.yaml up db dbmate --abort-on-container-exit - if ! git diff --ignore-space-at-eol --exit-code --quiet migrations/schema.sql; then - echo "Detected uncommitted changes after build. See status below:" - git diff - exit 1 - fi diff --git a/.gitignore b/.gitignore index a6950d2..5372bfd 100644 --- a/.gitignore +++ b/.gitignore @@ -17,8 +17,16 @@ __pycache__/ result* .env-local .history +.envrc +.direnv #IDE .idea/ .vscode/ + +db/schema.sql +common-nix.vars.pkr.hcl + +# pre-commit config is managed in nix +.pre-commit-config.yaml diff --git a/Dockerfile b/Dockerfile deleted file mode 100644 index fdd7c18..0000000 --- a/Dockerfile +++ /dev/null @@ -1,998 +0,0 @@ -# syntax=docker/dockerfile:1.6 -ARG postgresql_major=15 -ARG postgresql_release=${postgresql_major}.1 - -# Bump default build arg to build a package from source -# Bump vars.yml to specify runtime package version -ARG sfcgal_release=1.3.10 -ARG postgis_release=3.3.2 -ARG pgrouting_release=3.4.1 -ARG pgtap_release=1.2.0 -ARG pg_cron_release=1.6.2 -ARG pgaudit_release=1.7.0 -ARG pgjwt_release=9742dab1b2f297ad3811120db7b21451bca2d3c9 -ARG pgsql_http_release=1.5.0 -ARG plpgsql_check_release=2.2.5 -ARG pg_safeupdate_release=1.4 -ARG timescaledb_release=2.9.1 -ARG wal2json_release=2_5 -ARG pljava_release=1.6.4 -ARG plv8_release=3.1.5 -ARG pg_plan_filter_release=5081a7b5cb890876e67d8e7486b6a64c38c9a492 -ARG pg_net_release=0.9.2 -ARG rum_release=1.3.13 -ARG pg_hashids_release=cd0e1b31d52b394a0df64079406a14a4f7387cd6 -ARG libsodium_release=1.0.18 -ARG pgsodium_release=3.1.6 -ARG pg_graphql_release=1.5.1 -ARG pg_stat_monitor_release=1.1.1 -ARG pg_jsonschema_release=0.1.4 -ARG pg_repack_release=1.4.8 -ARG vault_release=0.2.8 -ARG groonga_release=12.0.8 -ARG pgroonga_release=2.4.0 -ARG wrappers_release=0.4.1 -ARG hypopg_release=1.3.1 -ARG pgvector_release=0.4.0 -ARG pg_tle_release=1.3.2 -ARG index_advisor_release=0.2.0 -ARG supautils_release=2.5.0 -ARG wal_g_release=2.0.1 - -#################### -# Setup Postgres PPA -#################### -FROM ubuntu:focal as ppa -# Redeclare args for use in subsequent stages -ARG postgresql_major -RUN apt-get update && apt-get install -y --no-install-recommends \ - gnupg \ - ca-certificates \ - && rm -rf /var/lib/apt/lists/* -# Add Postgres PPA -# In the off-chance that the key in the repository expires, it can be replaced by running the following in the repository's root: -# gpg --keyserver hkps://keyserver.ubuntu.com --recv-keys $NEW_POSTGRESQL_GPG_KEY -# gpg --export --armor $NEW_POSTGRESQL_GPG_KEY > postgresql.gpg.key -COPY postgresql.gpg.key /tmp/postgresql.gpg.key -RUN apt-key add /tmp/postgresql.gpg.key && \ - echo "deb https://apt-archive.postgresql.org/pub/repos/apt focal-pgdg-archive main" > /etc/apt/sources.list.d/pgdg.list - -#################### -# Download pre-built postgres -#################### -FROM ppa as pg -ARG postgresql_release -# Download .deb packages -RUN apt-get update && apt-get install -y --no-install-recommends --download-only \ - postgresql-${postgresql_major}=${postgresql_release}-1.pgdg20.04+1 \ - && rm -rf /var/lib/apt/lists/* -RUN mv /var/cache/apt/archives/*.deb /tmp/ - -FROM ppa as pg-dev -ARG postgresql_release -# Download .deb packages -RUN apt-get update && apt-get install -y --no-install-recommends --download-only \ - postgresql-server-dev-${postgresql_major}=${postgresql_release}-1.pgdg20.04+1 \ - && rm -rf /var/lib/apt/lists/* -RUN mv /var/cache/apt/archives/*.deb /tmp/ - -#################### -# Install postgres -#################### -FROM ubuntu:focal as base -# Redeclare args for use in subsequent stages -ARG TARGETARCH -ARG postgresql_major - -# Install postgres -COPY --from=pg /tmp /tmp -# Ref: https://github.com/docker-library/postgres/blob/master/15/bullseye/Dockerfile#L91 -ENV DEBIAN_FRONTEND=noninteractive -RUN set -ex; \ - export PYTHONDONTWRITEBYTECODE=1; \ - apt-get update; \ - apt-get install -y --no-install-recommends /tmp/postgresql-common_*.deb /tmp/postgresql-client-common_*.deb; \ - sed -ri 's/#(create_main_cluster) .*$/\1 = false/' /etc/postgresql-common/createcluster.conf; \ - apt-get install -y --no-install-recommends /tmp/*.deb; \ - rm -rf /var/lib/apt/lists/* /tmp/*; \ - find /usr -name '*.pyc' -type f -exec bash -c 'for pyc; do dpkg -S "$pyc" &> /dev/null || rm -vf "$pyc"; done' -- '{}' + - -ENV PATH=$PATH:/usr/lib/postgresql/${postgresql_major}/bin -ENV PGDATA=/var/lib/postgresql/data - -# Make the "en_US.UTF-8" locale so postgres will be utf-8 enabled by default -RUN localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8 -ENV LANG=en_US.UTF-8 -ENV LC_CTYPE=C.UTF-8 -ENV LC_COLLATE=C.UTF-8 - -FROM base as builder -# Install build dependencies -COPY --from=pg-dev /tmp /tmp -RUN apt-get update && \ - rm -f /tmp/libssl-dev* && \ - apt-get install -y --no-install-recommends \ - /tmp/*.deb \ - build-essential \ - checkinstall \ - cmake \ - && rm -rf /var/lib/apt/lists/* /tmp/* - -FROM builder as ccache -# Cache large build artifacts -RUN apt-get update && apt-get install -y --no-install-recommends \ - clang \ - ccache \ - && rm -rf /var/lib/apt/lists/* -ENV CCACHE_DIR=/ccache -ENV PATH=/usr/lib/ccache:$PATH -# Used to update ccache -ARG CACHE_EPOCH - -#################### -# 01-postgis.yml -#################### -FROM ccache as sfcgal -# Download and extract -ARG sfcgal_release -ARG sfcgal_release_checksum -ADD --checksum=${sfcgal_release_checksum} \ - "https://tealbase-public-artifacts-bucket.s3.amazonaws.com/sfcgal/SFCGAL-v${sfcgal_release}.tar.gz" \ - /tmp/sfcgal.tar.gz -RUN tar -xvf /tmp/sfcgal.tar.gz -C /tmp --one-top-level --strip-components 1 && \ - rm -rf /tmp/sfcgal.tar.gz -# Install build dependencies -RUN apt-get update && apt-get install -y --no-install-recommends \ - libcgal-dev \ - libboost-serialization1.71-dev \ - libmpfr-dev \ - libgmp-dev \ - && rm -rf /var/lib/apt/lists/* -# Build from source -WORKDIR /tmp/sfcgal/build -RUN cmake .. -RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ - make -j$(nproc) -# Create debian package -RUN checkinstall -D --install=yes --fstrans=no --backup=no --pakdir=/tmp --pkgname=sfcgal --pkgversion=${sfcgal_release} --requires=libgmpxx4ldbl,libboost-serialization1.71.0,libmpfr6 --nodoc - -FROM sfcgal as postgis-source -# Download and extract -ARG postgis_release -ARG postgis_release_checksum -ADD --checksum=${postgis_release_checksum} \ - "https://tealbase-public-artifacts-bucket.s3.amazonaws.com/postgis-${postgis_release}.tar.gz" \ - /tmp/postgis.tar.gz -RUN tar -xvf /tmp/postgis.tar.gz -C /tmp && \ - rm -rf /tmp/postgis.tar.gz -# Install build dependencies -RUN apt-get update && apt-get install -y --no-install-recommends \ - protobuf-c-compiler \ - libgeos-dev \ - libproj-dev \ - libgdal-dev \ - libjson-c-dev \ - libxml2-dev \ - libprotobuf-c-dev \ - && rm -rf /var/lib/apt/lists/* -# Build from source -WORKDIR /tmp/postgis-${postgis_release} -RUN ./configure --with-sfcgal -RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ - make -j$(nproc) -# Create debian package -RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --requires=libgeos-c1v5,libproj15,libjson-c4,libprotobuf-c1,libgdal26 --nodoc - -FROM ppa as postgis -# Latest available is 3.3.2 -ARG postgis_release -# Download pre-built packages -RUN apt-get update && apt-get install -y --no-install-recommends --download-only \ - postgresql-${postgresql_major}-postgis-3=${postgis_release}+dfsg-1.pgdg20.04+1 \ - && rm -rf /var/lib/apt/lists/* -RUN mv /var/cache/apt/archives/*.deb /tmp/ - -#################### -# 02-pgrouting.yml -#################### -FROM ccache as pgrouting-source -# Download and extract -ARG pgrouting_release -ARG pgrouting_release_checksum -ADD --checksum=${pgrouting_release_checksum} \ - "https://github.com/pgRouting/pgrouting/releases/download/v${pgrouting_release}/pgrouting-${pgrouting_release}.tar.gz" \ - /tmp/pgrouting.tar.gz -RUN tar -xvf /tmp/pgrouting.tar.gz -C /tmp && \ - rm -rf /tmp/pgrouting.tar.gz -# Install build dependencies -RUN apt-get update && apt-get install -y --no-install-recommends \ - libboost-all-dev \ - && rm -rf /var/lib/apt/lists/* -# Build from source -WORKDIR /tmp/pgrouting-${pgrouting_release}/build -RUN cmake -DBUILD_HTML=OFF -DBUILD_DOXY=OFF .. -RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ - make -j$(nproc) -# Create debian package -RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --pkgname=pgrouting --pkgversion=${pgrouting_release} --nodoc - -FROM ppa as pgrouting -ARG pgrouting_release -# Download pre-built packages -RUN apt-get update && apt-get install -y --no-install-recommends --download-only \ - postgresql-${postgresql_major}-pgrouting=${pgrouting_release}-1.pgdg20.04+1 \ - && rm -rf /var/lib/apt/lists/* -RUN mv /var/cache/apt/archives/*.deb /tmp/ - -#################### -# 03-pgtap.yml -#################### -FROM builder as pgtap-source -# Download and extract -ARG pgtap_release -ARG pgtap_release_checksum -ADD --checksum=${pgtap_release_checksum} \ - "https://github.com/theory/pgtap/archive/v${pgtap_release}.tar.gz" \ - /tmp/pgtap.tar.gz -RUN tar -xvf /tmp/pgtap.tar.gz -C /tmp && \ - rm -rf /tmp/pgtap.tar.gz -# Build from source -WORKDIR /tmp/pgtap-${pgtap_release} -RUN make -j$(nproc) -# Create debian package -RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --nodoc - -#################### -# 04-pg_cron.yml -#################### -FROM ccache as pg_cron-source -# Download and extract -ARG pg_cron_release -ARG pg_cron_release_checksum -ADD --checksum=${pg_cron_release_checksum} \ - "https://github.com/citusdata/pg_cron/archive/refs/tags/v${pg_cron_release}.tar.gz" \ - /tmp/pg_cron.tar.gz -RUN tar -xvf /tmp/pg_cron.tar.gz -C /tmp && \ - rm -rf /tmp/pg_cron.tar.gz -# Build from source -WORKDIR /tmp/pg_cron-${pg_cron_release} -RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ - make -j$(nproc) -# Create debian package -RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --nodoc - -#################### -# 05-pgaudit.yml -#################### -FROM ccache as pgaudit-source -# Download and extract -ARG pgaudit_release -ARG pgaudit_release_checksum -ADD --checksum=${pgaudit_release_checksum} \ - "https://github.com/pgaudit/pgaudit/archive/refs/tags/${pgaudit_release}.tar.gz" \ - /tmp/pgaudit.tar.gz -RUN tar -xvf /tmp/pgaudit.tar.gz -C /tmp && \ - rm -rf /tmp/pgaudit.tar.gz -# Install build dependencies -RUN apt-get update && apt-get install -y --no-install-recommends \ - libssl-dev \ - libkrb5-dev \ - && rm -rf /var/lib/apt/lists/* -# Build from source -WORKDIR /tmp/pgaudit-${pgaudit_release} -ENV USE_PGXS=1 -RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ - make -j$(nproc) -# Create debian package -RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --nodoc - -#################### -# 06-pgjwt.yml -#################### -FROM builder as pgjwt-source -# Download and extract -ARG pgjwt_release -ADD "https://github.com/michelp/pgjwt.git#${pgjwt_release}" \ - /tmp/pgjwt-${pgjwt_release} -# Build from source -WORKDIR /tmp/pgjwt-${pgjwt_release} -RUN make -j$(nproc) -# Create debian package -RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --pkgversion=1 --nodoc - -#################### -# 07-pgsql-http.yml -#################### -FROM ccache as pgsql-http-source -# Download and extract -ARG pgsql_http_release -ARG pgsql_http_release_checksum -ADD --checksum=${pgsql_http_release_checksum} \ - "https://github.com/pramsey/pgsql-http/archive/refs/tags/v${pgsql_http_release}.tar.gz" \ - /tmp/pgsql-http.tar.gz -RUN tar -xvf /tmp/pgsql-http.tar.gz -C /tmp && \ - rm -rf /tmp/pgsql-http.tar.gz -# Install build dependencies -RUN apt-get update && apt-get install -y --no-install-recommends \ - libcurl4-gnutls-dev \ - && rm -rf /var/lib/apt/lists/* -# Build from source -WORKDIR /tmp/pgsql-http-${pgsql_http_release} -RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ - make -j$(nproc) -# Create debian package -RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --requires=libcurl3-gnutls --nodoc - -#################### -# 08-plpgsql_check.yml -#################### -FROM ccache as plpgsql_check-source -# Download and extract -ARG plpgsql_check_release -ARG plpgsql_check_release_checksum -ADD --checksum=${plpgsql_check_release_checksum} \ - "https://github.com/okbob/plpgsql_check/archive/refs/tags/v${plpgsql_check_release}.tar.gz" \ - /tmp/plpgsql_check.tar.gz -RUN tar -xvf /tmp/plpgsql_check.tar.gz -C /tmp && \ - rm -rf /tmp/plpgsql_check.tar.gz -# Install build dependencies -RUN apt-get update && apt-get install -y --no-install-recommends \ - libicu-dev \ - && rm -rf /var/lib/apt/lists/* -# Build from source -WORKDIR /tmp/plpgsql_check-${plpgsql_check_release} -RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ - make -j$(nproc) -# Create debian package -RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --nodoc - -#################### -# 09-pg-safeupdate.yml -#################### -FROM ccache as pg-safeupdate-source -# Download and extract -ARG pg_safeupdate_release -ARG pg_safeupdate_release_checksum -ADD --checksum=${pg_safeupdate_release_checksum} \ - "https://github.com/eradman/pg-safeupdate/archive/refs/tags/${pg_safeupdate_release}.tar.gz" \ - /tmp/pg-safeupdate.tar.gz -RUN tar -xvf /tmp/pg-safeupdate.tar.gz -C /tmp && \ - rm -rf /tmp/pg-safeupdate.tar.gz -# Build from source -WORKDIR /tmp/pg-safeupdate-${pg_safeupdate_release} -RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ - make -j$(nproc) -# Create debian package -RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --nodoc - -#################### -# 10-timescaledb.yml -#################### -FROM ccache as timescaledb-source -# Download and extract -ARG timescaledb_release -ARG timescaledb_release_checksum -ADD --checksum=${timescaledb_release_checksum} \ - "https://github.com/timescale/timescaledb/archive/refs/tags/${timescaledb_release}.tar.gz" \ - /tmp/timescaledb.tar.gz -RUN tar -xvf /tmp/timescaledb.tar.gz -C /tmp && \ - rm -rf /tmp/timescaledb.tar.gz -# Build from source -WORKDIR /tmp/timescaledb-${timescaledb_release}/build -RUN cmake -DAPACHE_ONLY=1 .. -RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ - make -j$(nproc) -# Create debian package -RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --pkgname=timescaledb --pkgversion=${timescaledb_release} --nodoc - -#################### -# 11-wal2json.yml -#################### -FROM ccache as wal2json-source -# Download and extract -ARG wal2json_release -ARG wal2json_release_checksum -ADD --checksum=${wal2json_release_checksum} \ - "https://github.com/eulerto/wal2json/archive/refs/tags/wal2json_${wal2json_release}.tar.gz" \ - /tmp/wal2json.tar.gz -RUN tar -xvf /tmp/wal2json.tar.gz -C /tmp --one-top-level --strip-components 1 && \ - rm -rf /tmp/wal2json.tar.gz -# Build from source -WORKDIR /tmp/wal2json -RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ - make -j$(nproc) -# Create debian package -ENV version=${wal2json_release} -RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --pkgversion="\${version/_/.}" --nodoc - -#################### -# 12-pljava.yml -#################### -FROM builder as pljava-source -# Download and extract -# TODO: revert to using main repo after PG15 support is merged: https://github.com/tada/pljava/pull/413 -ARG pljava_release=master -ARG pljava_release_checksum=sha256:e99b1c52f7b57f64c8986fe6ea4a6cc09d78e779c1643db060d0ac66c93be8b6 -ADD --checksum=${pljava_release_checksum} \ - "https://github.com/tealbase/pljava/archive/refs/heads/${pljava_release}.tar.gz" \ - /tmp/pljava.tar.gz -RUN tar -xvf /tmp/pljava.tar.gz -C /tmp && \ - rm -rf /tmp/pljava.tar.gz -# Install build dependencies -RUN apt-get update && apt-get install -y --no-install-recommends \ - maven \ - default-jdk \ - libssl-dev \ - libkrb5-dev \ - && rm -rf /var/lib/apt/lists/* -# Build from source -WORKDIR /tmp/pljava-${pljava_release} -RUN mvn -T 1C clean install -Dmaven.test.skip -DskipTests -Dmaven.javadoc.skip=true -# Create debian package -RUN cp pljava-packaging/target/pljava-pg${postgresql_major}.jar /tmp/ - -FROM base as pljava -# Download pre-built packages -RUN apt-get update && apt-get install -y --no-install-recommends --download-only \ - default-jdk-headless \ - postgresql-${postgresql_major}-pljava \ - && rm -rf /var/lib/apt/lists/* -RUN mv /var/cache/apt/archives/*.deb /tmp/ - -#################### -# 13-plv8.yml -#################### -FROM ccache as plv8-source -# Download and extract -ARG plv8_release -ARG plv8_release_checksum -ADD --checksum=${plv8_release_checksum} \ - "https://github.com/tealbase/plv8/archive/refs/tags/v${plv8_release}.tar.gz" \ - /tmp/plv8.tar.gz -RUN tar -xvf /tmp/plv8.tar.gz -C /tmp && \ - rm -rf /tmp/plv8.tar.gz -# Install build dependencies -RUN apt-get update && apt-get install -y --no-install-recommends \ - ca-certificates \ - pkg-config \ - ninja-build \ - git \ - libtinfo5 \ - libstdc++-10-dev \ - && rm -rf /var/lib/apt/lists/* -# Build from source -WORKDIR /tmp/plv8-${plv8_release} -ENV DOCKER=1 -RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ - make -# Create debian package -RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --nodoc - -FROM scratch as plv8-deb -COPY --from=plv8-source /tmp/*.deb /tmp/ - -FROM ghcr.io/tealbase/plv8:${plv8_release}-pg${postgresql_major} as plv8 - -#################### -# 14-pg_plan_filter.yml -#################### -FROM ccache as pg_plan_filter-source -# Download and extract -ARG pg_plan_filter_release -ADD "https://github.com/pgexperts/pg_plan_filter.git#${pg_plan_filter_release}" \ - /tmp/pg_plan_filter-${pg_plan_filter_release} -# Build from source -WORKDIR /tmp/pg_plan_filter-${pg_plan_filter_release} -RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ - make -j$(nproc) -# Create debian package -RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --pkgversion=1 --nodoc - -#################### -# 15-pg_net.yml -#################### -FROM ccache as pg_net-source -# Download and extract -ARG pg_net_release -ARG pg_net_release_checksum -ADD --checksum=${pg_net_release_checksum} \ - "https://github.com/tealbase/pg_net/archive/refs/tags/v${pg_net_release}.tar.gz" \ - /tmp/pg_net.tar.gz -RUN tar -xvf /tmp/pg_net.tar.gz -C /tmp && \ - rm -rf /tmp/pg_net.tar.gz -# Install build dependencies -RUN apt-get update && apt-get install -y --no-install-recommends \ - libcurl4-gnutls-dev \ - && rm -rf /var/lib/apt/lists/* -# Build from source -WORKDIR /tmp/pg_net-${pg_net_release} -RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ - make -j$(nproc) -# Create debian package -RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --requires=libcurl3-gnutls --nodoc - -#################### -# 16-rum.yml -#################### -FROM ccache as rum-source -# Download and extract -ARG rum_release -ARG rum_release_checksum -ADD --checksum=${rum_release_checksum} \ - "https://github.com/postgrespro/rum/archive/refs/tags/${rum_release}.tar.gz" \ - /tmp/rum.tar.gz -RUN tar -xvf /tmp/rum.tar.gz -C /tmp && \ - rm -rf /tmp/rum.tar.gz -# Install build dependencies -RUN apt-get update && apt-get install -y --no-install-recommends \ - systemtap-sdt-dev \ - && rm -rf /var/lib/apt/lists/* -# Build from source -WORKDIR /tmp/rum-${rum_release} -ENV USE_PGXS=1 -RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ - make -j$(nproc) -# Create debian package -RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --nodoc - -#################### -# 17-pg_hashids.yml -#################### -FROM ccache as pg_hashids-source -# Download and extract -ARG pg_hashids_release -ADD "https://github.com/iCyberon/pg_hashids.git#${pg_hashids_release}" \ - /tmp/pg_hashids-${pg_hashids_release} -# Build from source -WORKDIR /tmp/pg_hashids-${pg_hashids_release} -RUN make -j$(nproc) -# Create debian package -RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --pkgversion=1 --nodoc - -#################### -# 18-pgsodium.yml -#################### -FROM ccache as libsodium -# Download and extract -ARG libsodium_release -ARG libsodium_release_checksum -ADD --checksum=${libsodium_release_checksum} \ - "https://tealbase-public-artifacts-bucket.s3.amazonaws.com/libsodium/libsodium-${libsodium_release}.tar.gz" \ - /tmp/libsodium.tar.gz -RUN tar -xvf /tmp/libsodium.tar.gz -C /tmp && \ - rm -rf /tmp/libsodium.tar.gz -# Build from source -WORKDIR /tmp/libsodium-${libsodium_release} -RUN ./configure -RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ - make -j$(nproc) -RUN make install - -FROM libsodium as pgsodium-source -# Download and extract -ARG pgsodium_release -ARG pgsodium_release_checksum -ADD --checksum=${pgsodium_release_checksum} \ - "https://github.com/michelp/pgsodium/archive/refs/tags/v${pgsodium_release}.tar.gz" \ - /tmp/pgsodium.tar.gz -RUN tar -xvf /tmp/pgsodium.tar.gz -C /tmp && \ - rm -rf /tmp/pgsodium.tar.gz -# Build from source -WORKDIR /tmp/pgsodium-${pgsodium_release} -RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ - make -j$(nproc) -# Create debian package -RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --requires=libsodium23 --nodoc - -#################### -# 19-pg_graphql.yml -#################### -FROM base as pg_graphql -# Download package archive -ARG pg_graphql_release -ADD "https://github.com/tealbase/pg_graphql/releases/download/v${pg_graphql_release}/pg_graphql-v${pg_graphql_release}-pg${postgresql_major}-${TARGETARCH}-linux-gnu.deb" \ - /tmp/pg_graphql.deb - -#################### -# 20-pg_stat_monitor.yml -#################### -FROM ccache as pg_stat_monitor-source -# Download and extract -ARG pg_stat_monitor_release -ARG pg_stat_monitor_release_checksum -ADD --checksum=${pg_stat_monitor_release_checksum} \ - "https://github.com/percona/pg_stat_monitor/archive/refs/tags/${pg_stat_monitor_release}.tar.gz" \ - /tmp/pg_stat_monitor.tar.gz -RUN tar -xvf /tmp/pg_stat_monitor.tar.gz -C /tmp && \ - rm -rf /tmp/pg_stat_monitor.tar.gz -# Build from source -WORKDIR /tmp/pg_stat_monitor-${pg_stat_monitor_release} -ENV USE_PGXS=1 -RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ - make -j$(nproc) -# Create debian package -RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --nodoc - -#################### -# 22-pg_jsonschema.yml -#################### -FROM base as pg_jsonschema -# Download package archive -ARG pg_jsonschema_release -ADD "https://github.com/tealbase/pg_jsonschema/releases/download/v${pg_jsonschema_release}/pg_jsonschema-v${pg_jsonschema_release}-pg${postgresql_major}-${TARGETARCH}-linux-gnu.deb" \ - /tmp/pg_jsonschema.deb - -#################### -# 23-vault.yml -#################### -FROM builder as vault-source -# Download and extract -ARG vault_release -ARG vault_release_checksum -ADD --checksum=${vault_release_checksum} \ - "https://github.com/tealbase/vault/archive/refs/tags/v${vault_release}.tar.gz" \ - /tmp/vault.tar.gz -RUN tar -xvf /tmp/vault.tar.gz -C /tmp && \ - rm -rf /tmp/vault.tar.gz -# Build from source -WORKDIR /tmp/vault-${vault_release} -RUN make -j$(nproc) -# Create debian package -RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --nodoc - -#################### -# 24-pgroonga.yml -#################### -FROM ccache as groonga -# Download and extract -ARG groonga_release -ARG groonga_release_checksum -ADD --checksum=${groonga_release_checksum} \ - "https://packages.groonga.org/source/groonga/groonga-${groonga_release}.tar.gz" \ - /tmp/groonga.tar.gz -RUN tar -xvf /tmp/groonga.tar.gz -C /tmp && \ - rm -rf /tmp/groonga.tar.gz -# Install build dependencies -RUN apt-get update && apt-get install -y --no-install-recommends \ - zlib1g-dev \ - liblz4-dev \ - libzstd-dev \ - libmsgpack-dev \ - libzmq3-dev \ - libevent-dev \ - libmecab-dev \ - rapidjson-dev \ - pkg-config \ - && rm -rf /var/lib/apt/lists/* -# Build from source -WORKDIR /tmp/groonga-${groonga_release} -RUN ./configure -RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ - make -j$(nproc) -# Create debian package -RUN checkinstall -D --install=yes --fstrans=no --backup=no --pakdir=/tmp --requires=zlib1g,liblz4-1,libzstd1,libmsgpackc2,libzmq5,libevent-2.1-7,libmecab2 --nodoc - -FROM groonga as pgroonga-source -# Download and extract -ARG pgroonga_release -ARG pgroonga_release_checksum -ADD --checksum=${pgroonga_release_checksum} \ - "https://packages.groonga.org/source/pgroonga/pgroonga-${pgroonga_release}.tar.gz" \ - /tmp/pgroonga.tar.gz -RUN tar -xvf /tmp/pgroonga.tar.gz -C /tmp && \ - rm -rf /tmp/pgroonga.tar.gz -# Build from source -WORKDIR /tmp/pgroonga-${pgroonga_release} -RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ - make -j$(nproc) -# Create debian package -RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --requires=mecab-naist-jdic --nodoc - -FROM scratch as pgroonga-deb -COPY --from=pgroonga-source /tmp/*.deb /tmp/ - -FROM base as pgroonga -# Latest available is 3.0.3 -ARG pgroonga_release -# Download pre-built packages -ADD "https://packages.groonga.org/ubuntu/groonga-apt-source-latest-focal.deb" /tmp/source.deb -RUN apt-get update && apt-get install -y --no-install-recommends \ - ca-certificates \ - /tmp/source.deb \ - && rm -rf /var/lib/apt/lists/* -RUN rm /tmp/source.deb -RUN apt-get update && apt-get install -y --no-install-recommends --download-only \ - postgresql-${postgresql_major}-pgdg-pgroonga=${pgroonga_release}-1 \ - && rm -rf /var/lib/apt/lists/* -RUN mv /var/cache/apt/archives/*.deb /tmp/ - -#################### -# 25-wrappers.yml -#################### -FROM base as wrappers -# Download package archive -ARG wrappers_release -ADD "https://github.com/tealbase/wrappers/releases/download/v${wrappers_release}/wrappers-v${wrappers_release}-pg${postgresql_major}-${TARGETARCH}-linux-gnu.deb" \ - /tmp/wrappers.deb - -#################### -# 26-hypopg.yml -#################### -FROM ccache as hypopg-source -# Download and extract -ARG hypopg_release -ARG hypopg_release_checksum -ADD --checksum=${hypopg_release_checksum} \ - "https://github.com/HypoPG/hypopg/archive/refs/tags/${hypopg_release}.tar.gz" \ - /tmp/hypopg.tar.gz -RUN tar -xvf /tmp/hypopg.tar.gz -C /tmp && \ - rm -rf /tmp/hypopg.tar.gz -# Build from source -WORKDIR /tmp/hypopg-${hypopg_release} -RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ - make -j$(nproc) -# Create debian package -RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --nodoc - -#################### - # 27-pg_repack.yml - #################### - FROM ccache as pg_repack-source - ARG pg_repack_release - ARG pg_repack_release_checksum - ADD --checksum=${pg_repack_release_checksum} \ - "https://github.com/reorg/pg_repack/archive/refs/tags/ver_${pg_repack_release}.tar.gz" \ - /tmp/pg_repack.tar.gz - RUN tar -xvf /tmp/pg_repack.tar.gz -C /tmp && \ - rm -rf /tmp/pg_repack.tar.gz - # Install build dependencies - RUN apt-get update && apt-get install -y --no-install-recommends \ - liblz4-dev \ - libz-dev \ - libzstd-dev \ - libreadline-dev \ - && rm -rf /var/lib/apt/lists/* - # Build from source - WORKDIR /tmp/pg_repack-ver_${pg_repack_release} - ENV USE_PGXS=1 - RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ - make -j$(nproc) - # Create debian package - RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --pkgversion=${pg_repack_release} --nodoc - -#################### -# 28-pgvector.yml -#################### -FROM ccache as pgvector-source -ARG pgvector_release -ARG pgvector_release_checksum -ADD --checksum=${pgvector_release_checksum} \ - "https://github.com/pgvector/pgvector/archive/refs/tags/v${pgvector_release}.tar.gz" \ - /tmp/pgvector.tar.gz -RUN tar -xvf /tmp/pgvector.tar.gz -C /tmp && \ - rm -rf /tmp/pgvector.tar.gz -# Build from source -WORKDIR /tmp/pgvector-${pgvector_release} -RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ - make -j$(nproc) -# Create debian package -RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --nodoc - -#################### -# 29-pg_tle.yml -#################### -FROM ccache as pg_tle-source -ARG pg_tle_release -ARG pg_tle_release_checksum -ADD --checksum=${pg_tle_release_checksum} \ - "https://github.com/aws/pg_tle/archive/refs/tags/v${pg_tle_release}.tar.gz" \ - /tmp/pg_tle.tar.gz -RUN tar -xvf /tmp/pg_tle.tar.gz -C /tmp && \ - rm -rf /tmp/pg_tle.tar.gz -RUN apt-get update && apt-get install -y --no-install-recommends \ - flex \ - libkrb5-dev \ - && rm -rf /var/lib/apt/lists/* -# Build from source -WORKDIR /tmp/pg_tle-${pg_tle_release} -RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ - make -j$(nproc) -# Create debian package -RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --nodoc - -###################### -# 30-index_advisor.yml -###################### -FROM ccache as index_advisor -ARG index_advisor_release -ARG index_advisor_release_checksum -ADD --checksum=${index_advisor_release_checksum} \ - "https://github.com/olirice/index_advisor/archive/refs/tags/v${index_advisor_release}.tar.gz" \ - /tmp/index_advisor.tar.gz -RUN tar -xvf /tmp/index_advisor.tar.gz -C /tmp && \ - rm -rf /tmp/index_advisor.tar.gz -# Build from source -WORKDIR /tmp/index_advisor-${index_advisor_release} -RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ - make -j$(nproc) -# Create debian package -RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --nodoc - -#################### -# internal/supautils.yml -#################### -FROM base as supautils -# Download package archive -ARG supautils_release -# Define checksums for different architectures -ARG supautils_release_arm64_deb_checksum -ARG supautils_release_amd64_deb_checksum - -RUN apt-get update && apt-get install -y curl && rm -rf /var/lib/apt/lists/* - -# Set up a script to download the correct package -RUN echo '#!/bin/sh' > /tmp/download_supautils.sh && \ - echo 'set -e' >> /tmp/download_supautils.sh && \ - echo 'if [ "$TARGETARCH" = "amd64" ]; then' >> /tmp/download_supautils.sh && \ - echo ' CHECKSUM="${supautils_release_amd64_deb_checksum}"' >> /tmp/download_supautils.sh && \ - echo ' ARCH="amd64"' >> /tmp/download_supautils.sh && \ - echo 'elif [ "$TARGETARCH" = "arm64" ]; then' >> /tmp/download_supautils.sh && \ - echo ' CHECKSUM="${supautils_release_arm64_deb_checksum}"' >> /tmp/download_supautils.sh && \ - echo ' ARCH="arm64"' >> /tmp/download_supautils.sh && \ - echo 'else' >> /tmp/download_supautils.sh && \ - echo ' echo "Unsupported architecture: $TARGETARCH" >&2' >> /tmp/download_supautils.sh && \ - echo ' exit 1' >> /tmp/download_supautils.sh && \ - echo 'fi' >> /tmp/download_supautils.sh && \ - echo 'CHECKSUM=$(echo $CHECKSUM | sed "s/^sha256://")' >> /tmp/download_supautils.sh && \ - echo 'curl -fsSL -o /tmp/supautils.deb \\' >> /tmp/download_supautils.sh && \ - echo ' "https://github.com/tealbase/supautils/releases/download/v${supautils_release}/supautils-v${supautils_release}-pg${postgresql_major}-$ARCH-linux-gnu.deb"' >> /tmp/download_supautils.sh && \ - echo 'echo "$CHECKSUM /tmp/supautils.deb" | sha256sum -c -' >> /tmp/download_supautils.sh && \ - chmod +x /tmp/download_supautils.sh - -# Run the script to download and verify the package -RUN /tmp/download_supautils.sh && rm /tmp/download_supautils.sh - -#################### -# setup-wal-g.yml -#################### -FROM base as walg -ARG wal_g_release -# ADD "https://github.com/wal-g/wal-g/releases/download/v${wal_g_release}/wal-g-pg-ubuntu-20.04-${TARGETARCH}.tar.gz" /tmp/wal-g.tar.gz -RUN arch=$([ "$TARGETARCH" = "arm64" ] && echo "aarch64" || echo "$TARGETARCH") && \ - apt-get update && apt-get install -y --no-install-recommends curl && \ - curl -kL "https://github.com/wal-g/wal-g/releases/download/v${wal_g_release}/wal-g-pg-ubuntu-20.04-${arch}.tar.gz" -o /tmp/wal-g.tar.gz && \ - tar -xvf /tmp/wal-g.tar.gz -C /tmp && \ - rm -rf /tmp/wal-g.tar.gz && \ - mv /tmp/wal-g-pg-ubuntu*20.04-$arch /tmp/wal-g - -#################### -# Collect extension packages -#################### -FROM scratch as extensions -COPY --from=postgis-source /tmp/*.deb /tmp/ -COPY --from=pgrouting-source /tmp/*.deb /tmp/ -COPY --from=pgtap-source /tmp/*.deb /tmp/ -COPY --from=pg_cron-source /tmp/*.deb /tmp/ -COPY --from=pgaudit-source /tmp/*.deb /tmp/ -COPY --from=pgjwt-source /tmp/*.deb /tmp/ -COPY --from=pgsql-http-source /tmp/*.deb /tmp/ -COPY --from=plpgsql_check-source /tmp/*.deb /tmp/ -COPY --from=pg-safeupdate-source /tmp/*.deb /tmp/ -COPY --from=timescaledb-source /tmp/*.deb /tmp/ -COPY --from=wal2json-source /tmp/*.deb /tmp/ -# COPY --from=pljava /tmp/*.deb /tmp/ -COPY --from=plv8 /tmp/*.deb /tmp/ -COPY --from=pg_plan_filter-source /tmp/*.deb /tmp/ -COPY --from=pg_net-source /tmp/*.deb /tmp/ -COPY --from=rum-source /tmp/*.deb /tmp/ -COPY --from=pgsodium-source /tmp/*.deb /tmp/ -COPY --from=pg_hashids-source /tmp/*.deb /tmp/ -COPY --from=pg_graphql /tmp/*.deb /tmp/ -COPY --from=pg_stat_monitor-source /tmp/*.deb /tmp/ -COPY --from=pg_jsonschema /tmp/*.deb /tmp/ -COPY --from=vault-source /tmp/*.deb /tmp/ -COPY --from=pgroonga-source /tmp/*.deb /tmp/ -COPY --from=wrappers /tmp/*.deb /tmp/ -COPY --from=hypopg-source /tmp/*.deb /tmp/ -COPY --from=pg_repack-source /tmp/*.deb /tmp/ -COPY --from=pgvector-source /tmp/*.deb /tmp/ -COPY --from=pg_tle-source /tmp/*.deb /tmp/ -COPY --from=index_advisor /tmp/*.deb /tmp/ -COPY --from=supautils /tmp/*.deb /tmp/ - -#################### -# Download gosu for easy step-down from root -#################### -FROM ubuntu:focal as gosu -ARG TARGETARCH -# Install dependencies -RUN apt-get update && apt-get install -y --no-install-recommends \ - gnupg \ - ca-certificates \ - && rm -rf /var/lib/apt/lists/* -# Download binary -ARG GOSU_VERSION=1.16 -ARG GOSU_GPG_KEY=B42F6819007F00F88E364FD4036A9C25BF357DD4 -ADD https://github.com/tianon/gosu/releases/download/$GOSU_VERSION/gosu-$TARGETARCH \ - /usr/local/bin/gosu -ADD https://github.com/tianon/gosu/releases/download/$GOSU_VERSION/gosu-$TARGETARCH.asc \ - /usr/local/bin/gosu.asc -# Verify checksum -RUN gpg --batch --keyserver hkps://keys.openpgp.org --recv-keys $GOSU_GPG_KEY && \ - gpg --batch --verify /usr/local/bin/gosu.asc /usr/local/bin/gosu && \ - gpgconf --kill all && \ - chmod +x /usr/local/bin/gosu - -#################### -# Build final image -#################### -FROM base as production - -# Setup extensions -COPY --from=extensions /tmp /tmp -COPY --from=walg /tmp/wal-g /usr/local/bin/ - -ENV DEBIAN_FRONTEND=noninteractive -RUN apt-get update && apt-get install -y --no-install-recommends \ - /tmp/*.deb \ - # Needed for anything using libcurl - # https://github.com/tealbase/postgres/issues/573 - ca-certificates \ - && rm -rf /var/lib/apt/lists/* /tmp/* - -# Initialise configs -COPY --chown=postgres:postgres ansible/files/postgresql_config/postgresql.conf.j2 /etc/postgresql/postgresql.conf -COPY --chown=postgres:postgres ansible/files/postgresql_config/pg_hba.conf.j2 /etc/postgresql/pg_hba.conf -COPY --chown=postgres:postgres ansible/files/postgresql_config/pg_ident.conf.j2 /etc/postgresql/pg_ident.conf -COPY --chown=postgres:postgres ansible/files/postgresql_config/postgresql-stdout-log.conf /etc/postgresql/logging.conf -COPY --chown=postgres:postgres ansible/files/postgresql_config/supautils.conf.j2 /etc/postgresql-custom/supautils.conf -COPY --chown=postgres:postgres ansible/files/postgresql_extension_custom_scripts /etc/postgresql-custom/extension-custom-scripts -COPY --chown=postgres:postgres ansible/files/pgsodium_getkey_urandom.sh.j2 /usr/lib/postgresql/${postgresql_major}/bin/pgsodium_getkey.sh -COPY --chown=postgres:postgres ansible/files/postgresql_config/custom_read_replica.conf.j2 /etc/postgresql-custom/read-replica.conf -COPY --chown=postgres:postgres ansible/files/postgresql_config/custom_walg.conf.j2 /etc/postgresql-custom/wal-g.conf -COPY --chown=postgres:postgres ansible/files/walg_helper_scripts/wal_fetch.sh /home/postgres/wal_fetch.sh -COPY ansible/files/walg_helper_scripts/wal_change_ownership.sh /root/wal_change_ownership.sh - -RUN sed -i \ - -e "s|#unix_socket_directories = '/tmp'|unix_socket_directories = '/var/run/postgresql'|g" \ - -e "s|#session_preload_libraries = ''|session_preload_libraries = 'supautils'|g" \ - -e "s|#include = '/etc/postgresql-custom/supautils.conf'|include = '/etc/postgresql-custom/supautils.conf'|g" \ - -e "s|#include = '/etc/postgresql-custom/wal-g.conf'|include = '/etc/postgresql-custom/wal-g.conf'|g" /etc/postgresql/postgresql.conf && \ - echo "pljava.libjvm_location = '/usr/lib/jvm/java-11-openjdk-${TARGETARCH}/lib/server/libjvm.so'" >> /etc/postgresql/postgresql.conf && \ - echo "pgsodium.getkey_script= '/usr/lib/postgresql/${postgresql_major}/bin/pgsodium_getkey.sh'" >> /etc/postgresql/postgresql.conf && \ - useradd --create-home --shell /bin/bash wal-g -G postgres && \ - mkdir -p /etc/postgresql-custom && \ - chown postgres:postgres /etc/postgresql-custom - -# Include schema migrations -COPY migrations/db /docker-entrypoint-initdb.d/ -COPY ansible/files/pgbouncer_config/pgbouncer_auth_schema.sql /docker-entrypoint-initdb.d/init-scripts/00-schema.sql -COPY ansible/files/stat_extension.sql /docker-entrypoint-initdb.d/migrations/00-extension.sql - -# Add upstream entrypoint script -COPY --from=gosu /usr/local/bin/gosu /usr/local/bin/gosu -ADD --chmod=0755 \ - https://github.com/docker-library/postgres/raw/master/15/bullseye/docker-entrypoint.sh \ - /usr/local/bin/ -ENTRYPOINT ["docker-entrypoint.sh"] - -HEALTHCHECK --interval=2s --timeout=2s --retries=10 CMD pg_isready -U postgres -h localhost -STOPSIGNAL SIGINT -EXPOSE 5432 - -ENV POSTGRES_HOST=/var/run/postgresql -CMD ["postgres", "-D", "/etc/postgresql"] - -#################### -# Update build cache -#################### -FROM ccache as stats -COPY --from=extensions /tmp/*.deb /dev/null -# Additional packages that are separately built from source -# COPY --from=plv8-deb /tmp/*.deb /dev/null -# Cache mount is only populated by docker build --no-cache -RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ - ccache -s && \ - cp -r /ccache/* /tmp -FROM scratch as buildcache -COPY --from=stats /tmp / diff --git a/Dockerfile-156 b/Dockerfile-15 similarity index 88% rename from Dockerfile-156 rename to Dockerfile-15 index 473e157..6e12cdc 100644 --- a/Dockerfile-156 +++ b/Dockerfile-15 @@ -10,7 +10,6 @@ ARG pgrouting_release=3.4.1 ARG pgtap_release=1.2.0 ARG pg_cron_release=1.6.2 ARG pgaudit_release=1.7.0 -ARG pgjwt_release=9742dab1b2f297ad3811120db7b21451bca2d3c9 ARG pgsql_http_release=1.5.0 ARG plpgsql_check_release=2.2.5 ARG pg_safeupdate_release=1.4 @@ -24,14 +23,14 @@ ARG rum_release=1.3.13 ARG pg_hashids_release=cd0e1b31d52b394a0df64079406a14a4f7387cd6 ARG libsodium_release=1.0.18 ARG pgsodium_release=3.1.6 -ARG pg_graphql_release=1.5.1 +ARG pg_graphql_release=1.5.11 ARG pg_stat_monitor_release=1.1.1 ARG pg_jsonschema_release=0.1.4 ARG pg_repack_release=1.4.8 ARG vault_release=0.2.8 ARG groonga_release=12.0.8 ARG pgroonga_release=2.4.0 -ARG wrappers_release=0.3.0 +ARG wrappers_release=0.5.0 ARG hypopg_release=1.3.1 ARG pgvector_release=0.4.0 ARG pg_tle_release=1.3.2 @@ -39,7 +38,7 @@ ARG index_advisor_release=0.2.0 ARG supautils_release=2.2.0 ARG wal_g_release=2.0.1 -FROM ubuntu:focal as base +FROM ubuntu:noble as base RUN apt update -y && apt install -y \ curl \ @@ -67,6 +66,7 @@ WORKDIR /nixpg RUN nix profile install .#psql_15/bin +RUN nix store gc WORKDIR / @@ -75,11 +75,9 @@ WORKDIR / RUN mkdir -p /usr/lib/postgresql/bin \ /usr/lib/postgresql/share/postgresql \ /usr/share/postgresql \ - # /usr/lib/postgresql/share/postgresql/contrib \ - #/usr/lib/postgresql/share/postgresql/timezonesets \ - #/usr/lib/postgresql/share/postgresql/tsearch_data \ - # /usr/lib/postgresql/share/postgresql/extension \ + /var/lib/postgresql \ && chown -R postgres:postgres /usr/lib/postgresql \ + && chown -R postgres:postgres /var/lib/postgresql \ && chown -R postgres:postgres /usr/share/postgresql # Create symbolic links @@ -121,14 +119,14 @@ ENV PGDATA=/var/lib/postgresql/data #################### FROM base as walg ARG wal_g_release -# ADD "https://github.com/wal-g/wal-g/releases/download/v${wal_g_release}/wal-g-pg-ubuntu-20.04-${TARGETARCH}.tar.gz" /tmp/wal-g.tar.gz -RUN arch=$([ "$TARGETARCH" = "arm64" ] && echo "aarch64" || echo "$TARGETARCH") && \ - apt-get update && apt-get install -y --no-install-recommends curl && \ - curl -kL "https://github.com/wal-g/wal-g/releases/download/v${wal_g_release}/wal-g-pg-ubuntu-20.04-aarch64.tar.gz" -o /tmp/wal-g.tar.gz && \ - tar -xvf /tmp/wal-g.tar.gz -C /tmp && \ - rm -rf /tmp/wal-g.tar.gz && \ - mv /tmp/wal-g-pg-ubuntu*20.04-aarch64 /tmp/wal-g +WORKDIR /nixpg + +RUN nix profile install .#wal-g-3 && \ + ln -s /nix/var/nix/profiles/default/bin/wal-g-3 /tmp/wal-g +RUN nix store gc + +WORKDIR / # #################### # # Download gosu for easy step-down from root # #################### @@ -180,7 +178,8 @@ RUN sed -i \ -e "s|#include = '/etc/postgresql-custom/wal-g.conf'|include = '/etc/postgresql-custom/wal-g.conf'|g" /etc/postgresql/postgresql.conf && \ echo "cron.database_name = 'postgres'" >> /etc/postgresql/postgresql.conf && \ #echo "pljava.libjvm_location = '/usr/lib/jvm/java-11-openjdk-${TARGETARCH}/lib/server/libjvm.so'" >> /etc/postgresql/postgresql.conf && \ - echo "pgsodium.getkey_script= '/usr/lib/postgresql/bin/pgsodium_getkey.sh'" >> /etc/postgresql/postgresql.conf && \ + echo "pgsodium.getkey_script= '/usr/lib/postgresql/bin/pgsodium_getkey.sh'" >> /etc/postgresql/postgresql.conf && \ + echo "vault.getkey_script= '/usr/lib/postgresql/bin/pgsodium_getkey.sh'" >> /etc/postgresql/postgresql.conf && \ echo 'auto_explain.log_min_duration = 10s' >> /etc/postgresql/postgresql.conf && \ usermod -aG postgres wal-g && \ mkdir -p /etc/postgresql-custom && \ @@ -195,7 +194,7 @@ COPY ansible/files/stat_extension.sql /docker-entrypoint-initdb.d/migrations/00- COPY --from=gosu /usr/local/bin/gosu /usr/local/bin/gosu ADD --chmod=0755 \ https://github.com/docker-library/postgres/raw/master/15/bullseye/docker-entrypoint.sh \ - /usr/local/bin/ + /usr/local/bin/docker-entrypoint.sh RUN mkdir -p /var/run/postgresql && chown postgres:postgres /var/run/postgresql @@ -217,7 +216,8 @@ RUN echo "C.UTF-8 UTF-8" > /etc/locale.gen && echo "en_US.UTF-8 UTF-8" >> /etc/l ENV LANG en_US.UTF-8 ENV LANGUAGE en_US:en ENV LC_ALL en_US.UTF-8 -ENV LC_CTYPE=C.UTF-8 -ENV LC_COLLATE=C.UTF-8 ENV LOCALE_ARCHIVE /usr/lib/locale/locale-archive +RUN mkdir -p /usr/share/postgresql/extension/ && \ + ln -s /usr/lib/postgresql/bin/pgsodium_getkey.sh /usr/share/postgresql/extension/pgsodium_getkey && \ + chmod +x /usr/lib/postgresql/bin/pgsodium_getkey.sh CMD ["postgres", "-D", "/etc/postgresql"] diff --git a/Dockerfile-17 b/Dockerfile-17 new file mode 100644 index 0000000..806828f --- /dev/null +++ b/Dockerfile-17 @@ -0,0 +1,232 @@ +# syntax=docker/dockerfile:1.6 +ARG postgresql_major=17-orioledb +ARG postgresql_release=${postgresql_major}.1 + +# Bump default build arg to build a package from source +# Bump vars.yml to specify runtime package version +ARG sfcgal_release=1.3.10 +ARG postgis_release=3.3.2 +ARG pgrouting_release=3.4.1 +ARG pgtap_release=1.2.0 +ARG pg_cron_release=1.6.2 +ARG pgaudit_release=1.7.0 +ARG pgjwt_release=9742dab1b2f297ad3811120db7b21451bca2d3c9 +ARG pgsql_http_release=1.5.0 +ARG plpgsql_check_release=2.2.5 +ARG pg_safeupdate_release=1.4 +ARG timescaledb_release=2.9.1 +ARG wal2json_release=2_5 +ARG pljava_release=1.6.4 +ARG plv8_release=3.1.5 +ARG pg_plan_filter_release=5081a7b5cb890876e67d8e7486b6a64c38c9a492 +ARG pg_net_release=0.7.1 +ARG rum_release=1.3.13 +ARG pg_hashids_release=cd0e1b31d52b394a0df64079406a14a4f7387cd6 +ARG libsodium_release=1.0.18 +ARG pgsodium_release=3.1.6 +ARG pg_graphql_release=1.5.11 +ARG pg_stat_monitor_release=1.1.1 +ARG pg_jsonschema_release=0.1.4 +ARG pg_repack_release=1.4.8 +ARG vault_release=0.2.8 +ARG groonga_release=12.0.8 +ARG pgroonga_release=2.4.0 +ARG wrappers_release=0.5.0 +ARG hypopg_release=1.3.1 +ARG pgvector_release=0.4.0 +ARG pg_tle_release=1.3.2 +ARG index_advisor_release=0.2.0 +ARG supautils_release=2.2.0 +ARG wal_g_release=3.0.5 + +FROM ubuntu:noble as base + +RUN apt update -y && apt install -y \ + curl \ + gnupg \ + lsb-release \ + software-properties-common \ + wget \ + sudo \ + tree \ + && apt clean + + +RUN adduser --system --home /var/lib/postgresql --no-create-home --shell /bin/bash --group --gecos "PostgreSQL administrator" postgres +RUN adduser --system --no-create-home --shell /bin/bash --group wal-g +RUN curl --proto '=https' --tlsv1.2 -sSf -L https://install.determinate.systems/nix | sh -s -- install linux \ +--init none \ +--no-confirm \ +--extra-conf "substituters = https://cache.nixos.org https://nix-postgres-artifacts.s3.amazonaws.com" \ +--extra-conf "trusted-public-keys = nix-postgres-artifacts:dGZlQOvKcNEjvT7QEAJbcV6b6uk7VF/hWMjhYleiaLI=% cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=" + +ENV PATH="${PATH}:/nix/var/nix/profiles/default/bin" + +COPY . /nixpg + +WORKDIR /nixpg + +RUN nix profile install .#psql_17/bin + +RUN nix store gc + +WORKDIR / + + +RUN mkdir -p /usr/lib/postgresql/bin \ + /usr/lib/postgresql/share/postgresql \ + /usr/share/postgresql \ + /var/lib/postgresql \ + && chown -R postgres:postgres /usr/lib/postgresql \ + && chown -R postgres:postgres /var/lib/postgresql \ + && chown -R postgres:postgres /usr/share/postgresql + +# Create symbolic links +RUN ln -s /nix/var/nix/profiles/default/bin/* /usr/lib/postgresql/bin/ \ + && ln -s /nix/var/nix/profiles/default/bin/* /usr/bin/ \ + && chown -R postgres:postgres /usr/bin + +# Create symbolic links for PostgreSQL shares +RUN ln -s /nix/var/nix/profiles/default/share/postgresql/* /usr/lib/postgresql/share/postgresql/ +RUN ln -s /nix/var/nix/profiles/default/share/postgresql/* /usr/share/postgresql/ +RUN chown -R postgres:postgres /usr/lib/postgresql/share/postgresql/ +RUN chown -R postgres:postgres /usr/share/postgresql/ +# Create symbolic links for contrib directory +RUN tree /nix > /tmp/tree.txt && cat /tmp/tree.txt && cat /tmp/tree.txt >&2 + +RUN chown -R postgres:postgres /usr/lib/postgresql + +RUN ln -sf /usr/lib/postgresql/share/postgresql/timezonesets /usr/share/postgresql/timezonesets + + +RUN apt-get update && \ + apt-get install -y --no-install-recommends tzdata + +RUN ln -fs /usr/share/zoneinfo/Etc/UTC /etc/localtime && \ + dpkg-reconfigure --frontend noninteractive tzdata + +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + build-essential \ + checkinstall \ + cmake + +ENV PGDATA=/var/lib/postgresql/data + +#################### +# setup-wal-g.yml +#################### +FROM base as walg +ARG wal_g_release + +WORKDIR /nixpg + +RUN nix profile install .#wal-g-3 && \ + ln -s /nix/var/nix/profiles/default/bin/wal-g-3 /tmp/wal-g + +RUN nix store gc + +WORKDIR / +# #################### +# # Download gosu for easy step-down from root +# #################### +FROM base as gosu +ARG TARGETARCH +# Install dependencies +RUN apt-get update && apt-get install -y --no-install-recommends \ + gnupg \ + ca-certificates \ + && rm -rf /var/lib/apt/lists/* +# Download binary +ARG GOSU_VERSION=1.16 +ARG GOSU_GPG_KEY=B42F6819007F00F88E364FD4036A9C25BF357DD4 +ADD https://github.com/tianon/gosu/releases/download/$GOSU_VERSION/gosu-$TARGETARCH \ + /usr/local/bin/gosu +ADD https://github.com/tianon/gosu/releases/download/$GOSU_VERSION/gosu-$TARGETARCH.asc \ + /usr/local/bin/gosu.asc +# Verify checksum +RUN gpg --batch --keyserver hkps://keys.openpgp.org --recv-keys $GOSU_GPG_KEY && \ + gpg --batch --verify /usr/local/bin/gosu.asc /usr/local/bin/gosu && \ + gpgconf --kill all && \ + chmod +x /usr/local/bin/gosu + +# #################### +# # Build final image +# #################### +FROM gosu as production +RUN id postgres || (echo "postgres user does not exist" && exit 1) +# # Setup extensions +COPY --from=walg /tmp/wal-g /usr/local/bin/ + +# # Initialise configs +COPY --chown=postgres:postgres ansible/files/postgresql_config/postgresql.conf.j2 /etc/postgresql/postgresql.conf +COPY --chown=postgres:postgres ansible/files/postgresql_config/pg_hba.conf.j2 /etc/postgresql/pg_hba.conf +COPY --chown=postgres:postgres ansible/files/postgresql_config/pg_ident.conf.j2 /etc/postgresql/pg_ident.conf +COPY --chown=postgres:postgres ansible/files/postgresql_config/postgresql-stdout-log.conf /etc/postgresql/logging.conf +COPY --chown=postgres:postgres ansible/files/postgresql_config/supautils.conf.j2 /etc/postgresql-custom/supautils.conf +COPY --chown=postgres:postgres ansible/files/postgresql_extension_custom_scripts /etc/postgresql-custom/extension-custom-scripts +COPY --chown=postgres:postgres ansible/files/pgsodium_getkey_urandom.sh.j2 /usr/lib/postgresql/bin/pgsodium_getkey.sh +COPY --chown=postgres:postgres ansible/files/postgresql_config/custom_read_replica.conf.j2 /etc/postgresql-custom/read-replica.conf +COPY --chown=postgres:postgres ansible/files/postgresql_config/custom_walg.conf.j2 /etc/postgresql-custom/wal-g.conf +COPY --chown=postgres:postgres ansible/files/walg_helper_scripts/wal_fetch.sh /home/postgres/wal_fetch.sh +COPY ansible/files/walg_helper_scripts/wal_change_ownership.sh /root/wal_change_ownership.sh + +RUN sed -i \ + -e "s|#unix_socket_directories = '/tmp'|unix_socket_directories = '/var/run/postgresql'|g" \ + -e "s|#session_preload_libraries = ''|session_preload_libraries = 'supautils'|g" \ + -e "s|#include = '/etc/postgresql-custom/supautils.conf'|include = '/etc/postgresql-custom/supautils.conf'|g" \ + -e "s|#include = '/etc/postgresql-custom/wal-g.conf'|include = '/etc/postgresql-custom/wal-g.conf'|g" /etc/postgresql/postgresql.conf && \ + echo "cron.database_name = 'postgres'" >> /etc/postgresql/postgresql.conf && \ + #echo "pljava.libjvm_location = '/usr/lib/jvm/java-11-openjdk-${TARGETARCH}/lib/server/libjvm.so'" >> /etc/postgresql/postgresql.conf && \ + echo "pgsodium.getkey_script= '/usr/lib/postgresql/bin/pgsodium_getkey.sh'" >> /etc/postgresql/postgresql.conf && \ + echo "vault.getkey_script= '/usr/lib/postgresql/bin/pgsodium_getkey.sh'" >> /etc/postgresql/postgresql.conf && \ + echo 'auto_explain.log_min_duration = 10s' >> /etc/postgresql/postgresql.conf && \ + usermod -aG postgres wal-g && \ + mkdir -p /etc/postgresql-custom && \ + chown postgres:postgres /etc/postgresql-custom + + # Remove items from postgresql.conf +RUN sed -i 's/ timescaledb,//g;' "/etc/postgresql/postgresql.conf" + #as of pg 16.4 + this db_user_namespace totally deprecated and will break the server if setting is present +RUN sed -i 's/db_user_namespace = off/#db_user_namespace = off/g;' "/etc/postgresql/postgresql.conf" +RUN sed -i 's/ timescaledb,//g; s/ plv8,//g' "/etc/postgresql-custom/supautils.conf" + + + +# # Include schema migrations +COPY migrations/db /docker-entrypoint-initdb.d/ +COPY ansible/files/pgbouncer_config/pgbouncer_auth_schema.sql /docker-entrypoint-initdb.d/init-scripts/00-schema.sql +COPY ansible/files/stat_extension.sql /docker-entrypoint-initdb.d/migrations/00-extension.sql + +# # Add upstream entrypoint script +COPY --from=gosu /usr/local/bin/gosu /usr/local/bin/gosu +ADD --chmod=0755 \ + https://github.com/docker-library/postgres/raw/master/17/bullseye/docker-entrypoint.sh \ + /usr/local/bin/docker-entrypoint.sh + +RUN mkdir -p /var/run/postgresql && chown postgres:postgres /var/run/postgresql + +ENTRYPOINT ["docker-entrypoint.sh"] + +HEALTHCHECK --interval=2s --timeout=2s --retries=10 CMD pg_isready -U postgres -h localhost +STOPSIGNAL SIGINT +EXPOSE 5432 + +ENV POSTGRES_HOST=/var/run/postgresql +ENV POSTGRES_USER=tealbase_admin +ENV POSTGRES_DB=postgres +ENV POSTGRES_INITDB_ARGS="--allow-group-access --locale-provider=icu --encoding=UTF-8 --icu-locale=en_US.UTF-8" +RUN apt-get update && apt-get install -y --no-install-recommends \ + locales \ + && rm -rf /var/lib/apt/lists/* && \ + localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8 \ + && localedef -i C -c -f UTF-8 -A /usr/share/locale/locale.alias C.UTF-8 +RUN echo "C.UTF-8 UTF-8" > /etc/locale.gen && echo "en_US.UTF-8 UTF-8" >> /etc/locale.gen && locale-gen +ENV LANG en_US.UTF-8 +ENV LANGUAGE en_US:en +ENV LC_ALL en_US.UTF-8 +ENV LOCALE_ARCHIVE /usr/lib/locale/locale-archive +RUN mkdir -p /usr/share/postgresql/extension/ && \ + ln -s /usr/lib/postgresql/bin/pgsodium_getkey.sh /usr/share/postgresql/extension/pgsodium_getkey && \ + chmod +x /usr/lib/postgresql/bin/pgsodium_getkey.sh +CMD ["postgres", "-D", "/etc/postgresql"] diff --git a/Dockerfile-kubernetes b/Dockerfile-kubernetes new file mode 100644 index 0000000..e27ec2c --- /dev/null +++ b/Dockerfile-kubernetes @@ -0,0 +1,12 @@ +FROM alpine:3.22 + +ADD ./output-cloudimg/packer-cloudimg /disk/image.qcow2 + +RUN apk add --no-cache qemu-system-aarch64 qemu-img openssh-client aavmf virtiofsd \ + && truncate -s 64M /root/varstore.img \ + && truncate -s 64M /root/efi.img \ + && dd if=/usr/share/AAVMF/QEMU_EFI.fd of=/root/efi.img conv=notrunc \ + && qemu-img create -f qcow2 /tmp/disk.qcow2 -b /disk/image.qcow2 -F qcow2 \ + && apk del --no-cache aavmf qemu-img + +CMD exec /bin/sh -c "trap : TERM INT; sleep 9999999999d & wait" diff --git a/Dockerfile-orioledb-17 b/Dockerfile-orioledb-17 new file mode 100644 index 0000000..1f92da2 --- /dev/null +++ b/Dockerfile-orioledb-17 @@ -0,0 +1,238 @@ +# syntax=docker/dockerfile:1.6 +ARG postgresql_major=17-orioledb +ARG postgresql_release=${postgresql_major}.1 + +# Bump default build arg to build a package from source +# Bump vars.yml to specify runtime package version +ARG sfcgal_release=1.3.10 +ARG postgis_release=3.3.2 +ARG pgrouting_release=3.4.1 +ARG pgtap_release=1.2.0 +ARG pg_cron_release=1.6.2 +ARG pgaudit_release=1.7.0 +ARG pgjwt_release=9742dab1b2f297ad3811120db7b21451bca2d3c9 +ARG pgsql_http_release=1.5.0 +ARG plpgsql_check_release=2.2.5 +ARG pg_safeupdate_release=1.4 +ARG timescaledb_release=2.9.1 +ARG wal2json_release=2_5 +ARG pljava_release=1.6.4 +ARG plv8_release=3.1.5 +ARG pg_plan_filter_release=5081a7b5cb890876e67d8e7486b6a64c38c9a492 +ARG pg_net_release=0.7.1 +ARG rum_release=1.3.13 +ARG pg_hashids_release=cd0e1b31d52b394a0df64079406a14a4f7387cd6 +ARG libsodium_release=1.0.18 +ARG pgsodium_release=3.1.6 +ARG pg_graphql_release=1.5.11 +ARG pg_stat_monitor_release=1.1.1 +ARG pg_jsonschema_release=0.1.4 +ARG pg_repack_release=1.4.8 +ARG vault_release=0.2.8 +ARG groonga_release=12.0.8 +ARG pgroonga_release=2.4.0 +ARG wrappers_release=0.5.0 +ARG hypopg_release=1.3.1 +ARG pgvector_release=0.4.0 +ARG pg_tle_release=1.3.2 +ARG index_advisor_release=0.2.0 +ARG supautils_release=2.2.0 +ARG wal_g_release=3.0.5 + +FROM ubuntu:noble as base + +RUN apt update -y && apt install -y \ + curl \ + gnupg \ + lsb-release \ + software-properties-common \ + wget \ + sudo \ + tree \ + && apt clean + + +RUN adduser --system --home /var/lib/postgresql --no-create-home --shell /bin/bash --group --gecos "PostgreSQL administrator" postgres +RUN adduser --system --no-create-home --shell /bin/bash --group wal-g +RUN curl --proto '=https' --tlsv1.2 -sSf -L https://install.determinate.systems/nix | sh -s -- install linux \ +--init none \ +--no-confirm \ +--extra-conf "substituters = https://cache.nixos.org https://nix-postgres-artifacts.s3.amazonaws.com" \ +--extra-conf "trusted-public-keys = nix-postgres-artifacts:dGZlQOvKcNEjvT7QEAJbcV6b6uk7VF/hWMjhYleiaLI=% cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=" + +ENV PATH="${PATH}:/nix/var/nix/profiles/default/bin" + +COPY . /nixpg + +WORKDIR /nixpg + +RUN nix profile install .#psql_orioledb-17/bin + +RUN nix store gc + +WORKDIR / + + +RUN mkdir -p /usr/lib/postgresql/bin \ + /usr/lib/postgresql/share/postgresql \ + /usr/share/postgresql \ + /var/lib/postgresql \ + && chown -R postgres:postgres /usr/lib/postgresql \ + && chown -R postgres:postgres /var/lib/postgresql \ + && chown -R postgres:postgres /usr/share/postgresql + +# Create symbolic links +RUN ln -s /nix/var/nix/profiles/default/bin/* /usr/lib/postgresql/bin/ \ + && ln -s /nix/var/nix/profiles/default/bin/* /usr/bin/ \ + && chown -R postgres:postgres /usr/bin + +# Create symbolic links for PostgreSQL shares +RUN ln -s /nix/var/nix/profiles/default/share/postgresql/* /usr/lib/postgresql/share/postgresql/ +RUN ln -s /nix/var/nix/profiles/default/share/postgresql/* /usr/share/postgresql/ +RUN chown -R postgres:postgres /usr/lib/postgresql/share/postgresql/ +RUN chown -R postgres:postgres /usr/share/postgresql/ +# Create symbolic links for contrib directory +RUN tree /nix > /tmp/tree.txt && cat /tmp/tree.txt && cat /tmp/tree.txt >&2 + +RUN chown -R postgres:postgres /usr/lib/postgresql + +RUN ln -sf /usr/lib/postgresql/share/postgresql/timezonesets /usr/share/postgresql/timezonesets + + +RUN apt-get update && \ + apt-get install -y --no-install-recommends tzdata + +RUN ln -fs /usr/share/zoneinfo/Etc/UTC /etc/localtime && \ + dpkg-reconfigure --frontend noninteractive tzdata + +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + build-essential \ + checkinstall \ + cmake + +ENV PGDATA=/var/lib/postgresql/data + +#################### +# setup-wal-g.yml +#################### +FROM base as walg +ARG wal_g_release + +WORKDIR /nixpg + +RUN nix profile install .#wal-g-3 && \ + ln -s /nix/var/nix/profiles/default/bin/wal-g-3 /tmp/wal-g + +RUN nix store gc + +WORKDIR / +# #################### +# # Download gosu for easy step-down from root +# #################### +FROM base as gosu +ARG TARGETARCH +# Install dependencies +RUN apt-get update && apt-get install -y --no-install-recommends \ + gnupg \ + ca-certificates \ + && rm -rf /var/lib/apt/lists/* +# Download binary +ARG GOSU_VERSION=1.16 +ARG GOSU_GPG_KEY=B42F6819007F00F88E364FD4036A9C25BF357DD4 +ADD https://github.com/tianon/gosu/releases/download/$GOSU_VERSION/gosu-$TARGETARCH \ + /usr/local/bin/gosu +ADD https://github.com/tianon/gosu/releases/download/$GOSU_VERSION/gosu-$TARGETARCH.asc \ + /usr/local/bin/gosu.asc +# Verify checksum +RUN gpg --batch --keyserver hkps://keys.openpgp.org --recv-keys $GOSU_GPG_KEY && \ + gpg --batch --verify /usr/local/bin/gosu.asc /usr/local/bin/gosu && \ + gpgconf --kill all && \ + chmod +x /usr/local/bin/gosu + +# #################### +# # Build final image +# #################### +FROM gosu as production +RUN id postgres || (echo "postgres user does not exist" && exit 1) +# # Setup extensions +COPY --from=walg /tmp/wal-g /usr/local/bin/ + +# # Initialise configs +COPY --chown=postgres:postgres ansible/files/postgresql_config/postgresql.conf.j2 /etc/postgresql/postgresql.conf +COPY --chown=postgres:postgres ansible/files/postgresql_config/pg_hba.conf.j2 /etc/postgresql/pg_hba.conf +COPY --chown=postgres:postgres ansible/files/postgresql_config/pg_ident.conf.j2 /etc/postgresql/pg_ident.conf +COPY --chown=postgres:postgres ansible/files/postgresql_config/postgresql-stdout-log.conf /etc/postgresql/logging.conf +COPY --chown=postgres:postgres ansible/files/postgresql_config/supautils.conf.j2 /etc/postgresql-custom/supautils.conf +COPY --chown=postgres:postgres ansible/files/postgresql_extension_custom_scripts /etc/postgresql-custom/extension-custom-scripts +COPY --chown=postgres:postgres ansible/files/pgsodium_getkey_urandom.sh.j2 /usr/lib/postgresql/bin/pgsodium_getkey.sh +COPY --chown=postgres:postgres ansible/files/postgresql_config/custom_read_replica.conf.j2 /etc/postgresql-custom/read-replica.conf +COPY --chown=postgres:postgres ansible/files/postgresql_config/custom_walg.conf.j2 /etc/postgresql-custom/wal-g.conf +COPY --chown=postgres:postgres ansible/files/walg_helper_scripts/wal_fetch.sh /home/postgres/wal_fetch.sh +COPY ansible/files/walg_helper_scripts/wal_change_ownership.sh /root/wal_change_ownership.sh + +RUN sed -i \ + -e "s|#unix_socket_directories = '/tmp'|unix_socket_directories = '/var/run/postgresql'|g" \ + -e "s|#session_preload_libraries = ''|session_preload_libraries = 'supautils'|g" \ + -e "s|#include = '/etc/postgresql-custom/supautils.conf'|include = '/etc/postgresql-custom/supautils.conf'|g" \ + -e "s|#include = '/etc/postgresql-custom/wal-g.conf'|include = '/etc/postgresql-custom/wal-g.conf'|g" /etc/postgresql/postgresql.conf && \ + echo "cron.database_name = 'postgres'" >> /etc/postgresql/postgresql.conf && \ + #echo "pljava.libjvm_location = '/usr/lib/jvm/java-11-openjdk-${TARGETARCH}/lib/server/libjvm.so'" >> /etc/postgresql/postgresql.conf && \ + echo "pgsodium.getkey_script= '/usr/lib/postgresql/bin/pgsodium_getkey.sh'" >> /etc/postgresql/postgresql.conf && \ + echo "vault.getkey_script= '/usr/lib/postgresql/bin/pgsodium_getkey.sh'" >> /etc/postgresql/postgresql.conf && \ + echo 'auto_explain.log_min_duration = 10s' >> /etc/postgresql/postgresql.conf && \ + usermod -aG postgres wal-g && \ + mkdir -p /etc/postgresql-custom && \ + chown postgres:postgres /etc/postgresql-custom + + # Remove items from postgresql.conf +RUN sed -i 's/ timescaledb,//g;' "/etc/postgresql/postgresql.conf" + #as of pg 16.4 + this db_user_namespace totally deprecated and will break the server if setting is present +RUN sed -i 's/db_user_namespace = off/#db_user_namespace = off/g;' "/etc/postgresql/postgresql.conf" +RUN sed -i 's/ timescaledb,//g; s/ plv8,//g; s/ postgis,//g; s/ pgrouting,//g' "/etc/postgresql-custom/supautils.conf" +RUN sed -i 's/\(shared_preload_libraries.*\)'\''\(.*\)$/\1, orioledb'\''\2/' "/etc/postgresql/postgresql.conf" +RUN echo "default_table_access_method = 'orioledb'" >> "/etc/postgresql/postgresql.conf" + + + +# # Include schema migrations +COPY migrations/db /docker-entrypoint-initdb.d/ +COPY ansible/files/pgbouncer_config/pgbouncer_auth_schema.sql /docker-entrypoint-initdb.d/init-scripts/00-schema.sql +COPY ansible/files/stat_extension.sql /docker-entrypoint-initdb.d/migrations/00-extension.sql +# Enable orioledb extension first +RUN echo "CREATE EXTENSION orioledb;" > /docker-entrypoint-initdb.d/init-scripts/00-pre-init.sql && \ + chown postgres:postgres /docker-entrypoint-initdb.d/init-scripts/00-pre-init.sql + +# # Add upstream entrypoint script +COPY --from=gosu /usr/local/bin/gosu /usr/local/bin/gosu +ADD --chmod=0755 \ + https://github.com/docker-library/postgres/raw/master/17/bullseye/docker-entrypoint.sh \ + /usr/local/bin/docker-entrypoint.sh + +RUN mkdir -p /var/run/postgresql && chown postgres:postgres /var/run/postgresql + +ENTRYPOINT ["docker-entrypoint.sh"] + +HEALTHCHECK --interval=2s --timeout=2s --retries=10 CMD pg_isready -U postgres -h localhost +STOPSIGNAL SIGINT +EXPOSE 5432 + +ENV POSTGRES_HOST=/var/run/postgresql +ENV POSTGRES_USER=tealbase_admin +ENV POSTGRES_DB=postgres +ENV POSTGRES_INITDB_ARGS="--allow-group-access --locale-provider=icu --encoding=UTF-8 --icu-locale=en_US.UTF-8" +RUN apt-get update && apt-get install -y --no-install-recommends \ + locales \ + && rm -rf /var/lib/apt/lists/* && \ + localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8 \ + && localedef -i C -c -f UTF-8 -A /usr/share/locale/locale.alias C.UTF-8 +RUN echo "C.UTF-8 UTF-8" > /etc/locale.gen && echo "en_US.UTF-8 UTF-8" >> /etc/locale.gen && locale-gen +ENV LANG en_US.UTF-8 +ENV LANGUAGE en_US:en +ENV LC_ALL en_US.UTF-8 +ENV LOCALE_ARCHIVE /usr/lib/locale/locale-archive +RUN mkdir -p /usr/share/postgresql/extension/ && \ + ln -s /usr/lib/postgresql/bin/pgsodium_getkey.sh /usr/share/postgresql/extension/pgsodium_getkey && \ + chmod +x /usr/lib/postgresql/bin/pgsodium_getkey.sh + +CMD ["postgres", "-D", "/etc/postgresql"] diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..610322b --- /dev/null +++ b/Makefile @@ -0,0 +1,16 @@ +UPSTREAM_NIX_GIT_SHA := $(shell git rev-parse HEAD) +GIT_SHA := $(shell git describe --tags --always --dirty) + +init: qemu-arm64-nix.pkr.hcl + packer init qemu-arm64-nix.pkr.hcl + +output-cloudimg/packer-cloudimg: ansible qemu-arm64-nix.pkr.hcl + packer build -var "git_sha=$(UPSTREAM_NIX_GIT_SHA)" qemu-arm64-nix.pkr.hcl + +alpine-image: output-cloudimg/packer-cloudimg + sudo nerdctl build . -t tealbase-postgres-test:$(GIT_SHA) -f ./Dockerfile-kubernetes + +clean: + rm -rf output-cloudimg + +.PHONY: alpine-image init clean diff --git a/README.md b/README.md index 95fa829..413d08e 100644 --- a/README.md +++ b/README.md @@ -3,66 +3,130 @@ Unmodified Postgres with some useful plugins. Our goal with this repo is not to modify Postgres, but to provide some of the most common extensions with a one-click install. ## Primary Features -- ✅ Postgres [15](https://www.postgresql.org/about/news/postgresql-15-released-2526/). -- ✅ Ubuntu 20.04 (Focal Fossa). +- ✅ Postgres [postgresql-15.8](https://www.postgresql.org/docs/15/index.html) +- ✅ Postgres [postgresql-17.4](https://www.postgresql.org/docs/17/index.html) +- ✅ Postgres [orioledb-postgresql-17_6](https://github.com/orioledb/orioledb) +- ✅ Ubuntu 24.04 (Noble Numbat). - ✅ [wal_level](https://www.postgresql.org/docs/current/runtime-config-wal.html) = logical and [max_replication_slots](https://www.postgresql.org/docs/current/runtime-config-replication.html) = 5. Ready for replication. - ✅ [Large Systems Extensions](https://github.com/aws/aws-graviton-getting-started#building-for-graviton-and-graviton2). Enabled for ARM images. - ## Extensions + +### PostgreSQL 15 Extensions | Extension | Version | Description | | ------------- | :-------------: | ------------- | -| [Postgres contrib modules](https://www.postgresql.org/docs/current/contrib.html) | - | Because everyone should enable `pg_stat_statements`. | -| [PostGIS](https://postgis.net/) | [3.3.2](https://git.osgeo.org/gitea/postgis/postgis/raw/tag/3.3.2/NEWS) | Postgres' most popular extension - support for geographic objects. | -| [pgRouting](https://pgrouting.org/) | [v3.4.1](https://github.com/pgRouting/pgrouting/releases/tag/v3.4.1) | Extension of PostGIS - provides geospatial routing functionalities. | -| [pgTAP](https://pgtap.org/) | [v1.2.0](https://github.com/theory/pgtap/releases/tag/v1.2.0) | Unit Testing for Postgres. | -| [pg_cron](https://github.com/citusdata/pg_cron) | [v1.6.2](https://github.com/citusdata/pg_cron/releases/tag/v1.6.2) | Run CRON jobs inside Postgres. | -| [pgAudit](https://www.pgaudit.org/) | [1.7.0](https://github.com/pgaudit/pgaudit/releases/tag/1.7.0) | Generate highly compliant audit logs. | -| [pgjwt](https://github.com/michelp/pgjwt) | [commit](https://github.com/michelp/pgjwt/commit/9742dab1b2f297ad3811120db7b21451bca2d3c9) | Generate JSON Web Tokens (JWT) in Postgres. | -| [pgsql-http](https://github.com/pramsey/pgsql-http) | [1.5.0](https://github.com/pramsey/pgsql-http/releases/tag/v1.5.0) | HTTP client for Postgres. | -| [plpgsql_check](https://github.com/okbob/plpgsql_check) | [2.2.3](https://github.com/okbob/plpgsql_check/releases/tag/v2.2.3) | Linter tool for PL/pgSQL. | -| [pg-safeupdate](https://github.com/eradman/pg-safeupdate) | [1.4](https://github.com/eradman/pg-safeupdate/releases/tag/1.4) | Protect your data from accidental updates or deletes. | -| [wal2json](https://github.com/eulerto/wal2json) | [commit](https://github.com/eulerto/wal2json/commit/53b548a29ebd6119323b6eb2f6013d7c5fe807ec) | JSON output plugin for logical replication decoding. | -| [PL/Java](https://github.com/tada/pljava) | [1.6.4](https://github.com/tada/pljava/releases/tag/V1_6_4) | Write in Java functions in Postgres. | -| [plv8](https://github.com/plv8/plv8) | [commit](https://github.com/plv8/plv8/commit/bcddd92f71530e117f2f98b92d206dafe824f73a) | Write in Javascript functions in Postgres. | -| [pg_plan_filter](https://github.com/pgexperts/pg_plan_filter) | [commit](https://github.com/pgexperts/pg_plan_filter/commit/5081a7b5cb890876e67d8e7486b6a64c38c9a492) | Only allow statements that fulfill set criteria to be executed. | -| [pg_net](https://github.com/tealbase/pg_net) | [v0.6.1](https://github.com/tealbase/pg_net/releases/tag/v0.6.1) | Expose the SQL interface for async networking. | -| [pg_repack](https://github.com/reorg/pg_repack) | [ver_1.5.0](https://github.com/reorg/pg_repack/releases/tag/ver_1.5.0) | Tool to remove bloat from tables and indexes -| [rum](https://github.com/postgrespro/rum) | [1.3.13](https://github.com/postgrespro/rum/releases/tag/1.3.13) | An alternative to the GIN index. | -| [pg_hashids](https://github.com/iCyberon/pg_hashids) | [commit](https://github.com/iCyberon/pg_hashids/commit/83398bcbb616aac2970f5e77d93a3200f0f28e74) | Generate unique identifiers from numbers. | -| [pgsodium](https://github.com/michelp/pgsodium) | [3.1.0](https://github.com/michelp/pgsodium/releases/tag/2.0.0) | Modern encryption API using libsodium. | -| [pg_stat_monitor](https://github.com/percona/pg_stat_monitor) | [1.0.1](https://github.com/percona/pg_stat_monitor/releases/tag/1.0.1) | Query Performance Monitoring Tool for PostgreSQL -| [pgvector](https://github.com/pgvector/pgvector) | [v0.4.0](https://github.com/pgvector/pgvector/releases/tag/v0.4.0) | Open-source vector similarity search for Postgres - - -Can't find your favorite extension? Suggest for it to be added into future releases [here](https://github.com/tealbase/tealbase/discussions/679)! - -## Enhanced Security -*This is only available for our AWS EC2/ DO Droplet images* - -Aside from having [ufw](https://help.ubuntu.com/community/UFW),[fail2ban](https://www.fail2ban.org/wiki/index.php/Main_Page), and [unattended-upgrades](https://wiki.debian.org/UnattendedUpgrades) installed, we also have the following enhancements in place: -| Enhancement | Description | -| ------------- | ------------- | -| [fail2ban filter](https://github.com/tealbase/postgres/blob/develop/ansible/files/fail2ban_config/filter-postgresql.conf.j2) for PostgreSQL access | Monitors for brute force attempts over at port `5432`. | -| [fail2ban filter](https://github.com/tealbase/postgres/blob/develop/ansible/files/fail2ban_config/filter-pgbouncer.conf.j2) for PgBouncer access | Monitors for brute force attempts over at port `6543`. | - +| [hypopg](https://github.com/HypoPG/hypopg/archive/refs/tags/1.4.1.tar.gz) | [1.4.1](https://github.com/HypoPG/hypopg/archive/refs/tags/1.4.1.tar.gz) | Hypothetical Indexes for PostgreSQL | +| [index_advisor](https://github.com/olirice/index_advisor/archive/v0.2.0.tar.gz) | [0.2.0](https://github.com/olirice/index_advisor/archive/v0.2.0.tar.gz) | Recommend indexes to improve query performance in PostgreSQL | +| [pg-safeupdate](https://github.com/eradman/pg-safeupdate/archive/1.4.tar.gz) | [1.4](https://github.com/eradman/pg-safeupdate/archive/1.4.tar.gz) | A simple extension to PostgreSQL that requires criteria for UPDATE and DELETE | +| [pg_cron](https://github.com/citusdata/pg_cron/archive/v1.6.4.tar.gz) | [1.6.4](https://github.com/citusdata/pg_cron/archive/v1.6.4.tar.gz) | Run Cron jobs through PostgreSQL | +| [pg_graphql](https://github.com/tealbase/pg_graphql/archive/v1.5.11.tar.gz) | [1.5.11](https://github.com/tealbase/pg_graphql/archive/v1.5.11.tar.gz) | GraphQL support for PostreSQL | +| [pg_hashids](https://github.com/iCyberon/pg_hashids/archive/cd0e1b31d52b394a0df64079406a14a4f7387cd6.tar.gz) | [cd0e1b31d52b394a0df64079406a14a4f7387cd6](https://github.com/iCyberon/pg_hashids/archive/cd0e1b31d52b394a0df64079406a14a4f7387cd6.tar.gz) | Generate short unique IDs in PostgreSQL | +| [pg_jsonschema](https://github.com/tealbase/pg_jsonschema/archive/v0.3.3.tar.gz) | [0.3.3](https://github.com/tealbase/pg_jsonschema/archive/v0.3.3.tar.gz) | JSON Schema Validation for PostgreSQL | +| [pg_net](https://github.com/tealbase/pg_net/archive/refs/tags/v0.14.0.tar.gz) | [0.14.0](https://github.com/tealbase/pg_net/archive/refs/tags/v0.14.0.tar.gz) | Async networking for Postgres | +| [pg_plan_filter](https://github.com/pgexperts/pg_plan_filter/archive/5081a7b5cb890876e67d8e7486b6a64c38c9a492.tar.gz) | [5081a7b5cb890876e67d8e7486b6a64c38c9a492](https://github.com/pgexperts/pg_plan_filter/archive/5081a7b5cb890876e67d8e7486b6a64c38c9a492.tar.gz) | Filter PostgreSQL statements by execution plans | +| [pg_repack](https://github.com/reorg/pg_repack/archive/ver_1.5.2.tar.gz) | [1.5.2](https://github.com/reorg/pg_repack/archive/ver_1.5.2.tar.gz) | Reorganize tables in PostgreSQL databases with minimal locks | +| [pg_stat_monitor](https://github.com/percona/pg_stat_monitor/archive/refs/tags/2.1.0.tar.gz) | [2.1.0](https://github.com/percona/pg_stat_monitor/archive/refs/tags/2.1.0.tar.gz) | Query Performance Monitoring Tool for PostgreSQL | +| [pg_tle](https://github.com/aws/pg_tle/archive/refs/tags/v1.4.0.tar.gz) | [1.4.0](https://github.com/aws/pg_tle/archive/refs/tags/v1.4.0.tar.gz) | Framework for 'Trusted Language Extensions' in PostgreSQL | +| [pgaudit](https://github.com/pgaudit/pgaudit/archive/1.7.0.tar.gz) | [1.7.0](https://github.com/pgaudit/pgaudit/archive/1.7.0.tar.gz) | Open Source PostgreSQL Audit Logging | +| [pgjwt](https://github.com/michelp/pgjwt/archive/9742dab1b2f297ad3811120db7b21451bca2d3c9.tar.gz) | [9742dab1b2f297ad3811120db7b21451bca2d3c9](https://github.com/michelp/pgjwt/archive/9742dab1b2f297ad3811120db7b21451bca2d3c9.tar.gz) | PostgreSQL implementation of JSON Web Tokens | +| [pgmq](https://github.com/tembo-io/pgmq/archive/v1.4.4.tar.gz) | [1.4.4](https://github.com/tembo-io/pgmq/archive/v1.4.4.tar.gz) | A lightweight message queue. Like AWS SQS and RSMQ but on Postgres. | +| [pgroonga](https://packages.groonga.org/source/pgroonga/pgroonga-3.2.5.tar.gz) | [3.2.5](https://packages.groonga.org/source/pgroonga/pgroonga-3.2.5.tar.gz) | A PostgreSQL extension to use Groonga as the index | +| [pgrouting](https://github.com/pgRouting/pgrouting/archive/v3.4.1.tar.gz) | [3.4.1](https://github.com/pgRouting/pgrouting/archive/v3.4.1.tar.gz) | A PostgreSQL/PostGIS extension that provides geospatial routing functionality | +| [pgsodium](https://github.com/michelp/pgsodium/archive/refs/tags/v3.1.8.tar.gz) | [3.1.8](https://github.com/michelp/pgsodium/archive/refs/tags/v3.1.8.tar.gz) | Modern cryptography for PostgreSQL | +| [pgsql-http](https://github.com/pramsey/pgsql-http/archive/refs/tags/v1.6.1.tar.gz) | [1.6.1](https://github.com/pramsey/pgsql-http/archive/refs/tags/v1.6.1.tar.gz) | HTTP client for Postgres | +| [pgtap](https://github.com/theory/pgtap/archive/v1.2.0.tar.gz) | [1.2.0](https://github.com/theory/pgtap/archive/v1.2.0.tar.gz) | A unit testing framework for PostgreSQL | +| [pgvector](https://github.com/pgvector/pgvector/archive/refs/tags/v0.8.0.tar.gz) | [0.8.0](https://github.com/pgvector/pgvector/archive/refs/tags/v0.8.0.tar.gz) | Open-source vector similarity search for Postgres | +| [plpgsql-check](https://github.com/okbob/plpgsql_check/archive/v2.7.11.tar.gz) | [2.7.11](https://github.com/okbob/plpgsql_check/archive/v2.7.11.tar.gz) | Linter tool for language PL/pgSQL | +| [plv8](https://github.com/plv8/plv8/archive/v3.1.10.tar.gz) | [3.1.10](https://github.com/plv8/plv8/archive/v3.1.10.tar.gz) | V8 Engine Javascript Procedural Language add-on for PostgreSQL | +| [postgis](https://download.osgeo.org/postgis/source/postgis-3.3.7.tar.gz) | [3.3.7](https://download.osgeo.org/postgis/source/postgis-3.3.7.tar.gz) | Geographic Objects for PostgreSQL | +| [rum](https://github.com/postgrespro/rum/archive/1.3.14.tar.gz) | [1.3.14](https://github.com/postgrespro/rum/archive/1.3.14.tar.gz) | Full text search index method for PostgreSQL | +| [tealbase-wrappers](https://github.com/tealbase/wrappers/archive/v0.5.0.tar.gz) | [0.5.0](https://github.com/tealbase/wrappers/archive/v0.5.0.tar.gz) | Various Foreign Data Wrappers (FDWs) for PostreSQL | +| [supautils](https://github.com/tealbase/supautils/archive/refs/tags/v2.9.4.tar.gz) | [2.9.4](https://github.com/tealbase/supautils/archive/refs/tags/v2.9.4.tar.gz) | PostgreSQL extension for enhanced security | +| [timescaledb-apache](https://github.com/timescale/timescaledb/archive/2.16.1.tar.gz) | [2.16.1](https://github.com/timescale/timescaledb/archive/2.16.1.tar.gz) | Scales PostgreSQL for time-series data via automatic partitioning across time and space | +| [vault](https://github.com/tealbase/vault/archive/refs/tags/v0.3.1.tar.gz) | [0.3.1](https://github.com/tealbase/vault/archive/refs/tags/v0.3.1.tar.gz) | Store encrypted secrets in PostgreSQL | +| [wal2json](https://github.com/eulerto/wal2json/archive/wal2json_2_6.tar.gz) | [2_6](https://github.com/eulerto/wal2json/archive/wal2json_2_6.tar.gz) | PostgreSQL JSON output plugin for changeset extraction | + +### PostgreSQL 17 Extensions +| Extension | Version | Description | +| ------------- | :-------------: | ------------- | +| [hypopg](https://github.com/HypoPG/hypopg/archive/refs/tags/1.4.1.tar.gz) | [1.4.1](https://github.com/HypoPG/hypopg/archive/refs/tags/1.4.1.tar.gz) | Hypothetical Indexes for PostgreSQL | +| [index_advisor](https://github.com/olirice/index_advisor/archive/v0.2.0.tar.gz) | [0.2.0](https://github.com/olirice/index_advisor/archive/v0.2.0.tar.gz) | Recommend indexes to improve query performance in PostgreSQL | +| [pg-safeupdate](https://github.com/eradman/pg-safeupdate/archive/1.4.tar.gz) | [1.4](https://github.com/eradman/pg-safeupdate/archive/1.4.tar.gz) | A simple extension to PostgreSQL that requires criteria for UPDATE and DELETE | +| [pg_cron](https://github.com/citusdata/pg_cron/archive/v1.6.4.tar.gz) | [1.6.4](https://github.com/citusdata/pg_cron/archive/v1.6.4.tar.gz) | Run Cron jobs through PostgreSQL | +| [pg_graphql](https://github.com/tealbase/pg_graphql/archive/v1.5.11.tar.gz) | [1.5.11](https://github.com/tealbase/pg_graphql/archive/v1.5.11.tar.gz) | GraphQL support for PostreSQL | +| [pg_hashids](https://github.com/iCyberon/pg_hashids/archive/cd0e1b31d52b394a0df64079406a14a4f7387cd6.tar.gz) | [cd0e1b31d52b394a0df64079406a14a4f7387cd6](https://github.com/iCyberon/pg_hashids/archive/cd0e1b31d52b394a0df64079406a14a4f7387cd6.tar.gz) | Generate short unique IDs in PostgreSQL | +| [pg_jsonschema](https://github.com/tealbase/pg_jsonschema/archive/v0.3.3.tar.gz) | [0.3.3](https://github.com/tealbase/pg_jsonschema/archive/v0.3.3.tar.gz) | JSON Schema Validation for PostgreSQL | +| [pg_net](https://github.com/tealbase/pg_net/archive/refs/tags/v0.14.0.tar.gz) | [0.14.0](https://github.com/tealbase/pg_net/archive/refs/tags/v0.14.0.tar.gz) | Async networking for Postgres | +| [pg_plan_filter](https://github.com/pgexperts/pg_plan_filter/archive/5081a7b5cb890876e67d8e7486b6a64c38c9a492.tar.gz) | [5081a7b5cb890876e67d8e7486b6a64c38c9a492](https://github.com/pgexperts/pg_plan_filter/archive/5081a7b5cb890876e67d8e7486b6a64c38c9a492.tar.gz) | Filter PostgreSQL statements by execution plans | +| [pg_repack](https://github.com/reorg/pg_repack/archive/ver_1.5.2.tar.gz) | [1.5.2](https://github.com/reorg/pg_repack/archive/ver_1.5.2.tar.gz) | Reorganize tables in PostgreSQL databases with minimal locks | +| [pg_stat_monitor](https://github.com/percona/pg_stat_monitor/archive/refs/tags/2.1.0.tar.gz) | [2.1.0](https://github.com/percona/pg_stat_monitor/archive/refs/tags/2.1.0.tar.gz) | Query Performance Monitoring Tool for PostgreSQL | +| [pg_tle](https://github.com/aws/pg_tle/archive/refs/tags/v1.4.0.tar.gz) | [1.4.0](https://github.com/aws/pg_tle/archive/refs/tags/v1.4.0.tar.gz) | Framework for 'Trusted Language Extensions' in PostgreSQL | +| [pgaudit](https://github.com/pgaudit/pgaudit/archive/17.0.tar.gz) | [17.0](https://github.com/pgaudit/pgaudit/archive/17.0.tar.gz) | Open Source PostgreSQL Audit Logging | +| [pgjwt](https://github.com/michelp/pgjwt/archive/9742dab1b2f297ad3811120db7b21451bca2d3c9.tar.gz) | [9742dab1b2f297ad3811120db7b21451bca2d3c9](https://github.com/michelp/pgjwt/archive/9742dab1b2f297ad3811120db7b21451bca2d3c9.tar.gz) | PostgreSQL implementation of JSON Web Tokens | +| [pgmq](https://github.com/tembo-io/pgmq/archive/v1.4.4.tar.gz) | [1.4.4](https://github.com/tembo-io/pgmq/archive/v1.4.4.tar.gz) | A lightweight message queue. Like AWS SQS and RSMQ but on Postgres. | +| [pgroonga](https://packages.groonga.org/source/pgroonga/pgroonga-3.2.5.tar.gz) | [3.2.5](https://packages.groonga.org/source/pgroonga/pgroonga-3.2.5.tar.gz) | A PostgreSQL extension to use Groonga as the index | +| [pgrouting](https://github.com/pgRouting/pgrouting/archive/v3.4.1.tar.gz) | [3.4.1](https://github.com/pgRouting/pgrouting/archive/v3.4.1.tar.gz) | A PostgreSQL/PostGIS extension that provides geospatial routing functionality | +| [pgsodium](https://github.com/michelp/pgsodium/archive/refs/tags/v3.1.8.tar.gz) | [3.1.8](https://github.com/michelp/pgsodium/archive/refs/tags/v3.1.8.tar.gz) | Modern cryptography for PostgreSQL | +| [pgsql-http](https://github.com/pramsey/pgsql-http/archive/refs/tags/v1.6.1.tar.gz) | [1.6.1](https://github.com/pramsey/pgsql-http/archive/refs/tags/v1.6.1.tar.gz) | HTTP client for Postgres | +| [pgtap](https://github.com/theory/pgtap/archive/v1.2.0.tar.gz) | [1.2.0](https://github.com/theory/pgtap/archive/v1.2.0.tar.gz) | A unit testing framework for PostgreSQL | +| [pgvector](https://github.com/pgvector/pgvector/archive/refs/tags/v0.8.0.tar.gz) | [0.8.0](https://github.com/pgvector/pgvector/archive/refs/tags/v0.8.0.tar.gz) | Open-source vector similarity search for Postgres | +| [plpgsql-check](https://github.com/okbob/plpgsql_check/archive/v2.7.11.tar.gz) | [2.7.11](https://github.com/okbob/plpgsql_check/archive/v2.7.11.tar.gz) | Linter tool for language PL/pgSQL | +| [postgis](https://download.osgeo.org/postgis/source/postgis-3.3.7.tar.gz) | [3.3.7](https://download.osgeo.org/postgis/source/postgis-3.3.7.tar.gz) | Geographic Objects for PostgreSQL | +| [rum](https://github.com/postgrespro/rum/archive/1.3.14.tar.gz) | [1.3.14](https://github.com/postgrespro/rum/archive/1.3.14.tar.gz) | Full text search index method for PostgreSQL | +| [tealbase-wrappers](https://github.com/tealbase/wrappers/archive/v0.5.0.tar.gz) | [0.5.0](https://github.com/tealbase/wrappers/archive/v0.5.0.tar.gz) | Various Foreign Data Wrappers (FDWs) for PostreSQL | +| [supautils](https://github.com/tealbase/supautils/archive/refs/tags/v2.9.4.tar.gz) | [2.9.4](https://github.com/tealbase/supautils/archive/refs/tags/v2.9.4.tar.gz) | PostgreSQL extension for enhanced security | +| [vault](https://github.com/tealbase/vault/archive/refs/tags/v0.3.1.tar.gz) | [0.3.1](https://github.com/tealbase/vault/archive/refs/tags/v0.3.1.tar.gz) | Store encrypted secrets in PostgreSQL | +| [wal2json](https://github.com/eulerto/wal2json/archive/wal2json_2_6.tar.gz) | [2_6](https://github.com/eulerto/wal2json/archive/wal2json_2_6.tar.gz) | PostgreSQL JSON output plugin for changeset extraction | + +### PostgreSQL orioledb-17 Extensions +| Extension | Version | Description | +| ------------- | :-------------: | ------------- | +| [hypopg](https://github.com/HypoPG/hypopg/archive/refs/tags/1.4.1.tar.gz) | [1.4.1](https://github.com/HypoPG/hypopg/archive/refs/tags/1.4.1.tar.gz) | Hypothetical Indexes for PostgreSQL | +| [index_advisor](https://github.com/olirice/index_advisor/archive/v0.2.0.tar.gz) | [0.2.0](https://github.com/olirice/index_advisor/archive/v0.2.0.tar.gz) | Recommend indexes to improve query performance in PostgreSQL | +| [orioledb](https://github.com/orioledb/orioledb/archive/beta10.tar.gz) | [orioledb](https://github.com/orioledb/orioledb/archive/beta10.tar.gz) | orioledb | +| [pg-safeupdate](https://github.com/eradman/pg-safeupdate/archive/1.4.tar.gz) | [1.4](https://github.com/eradman/pg-safeupdate/archive/1.4.tar.gz) | A simple extension to PostgreSQL that requires criteria for UPDATE and DELETE | +| [pg_cron](https://github.com/citusdata/pg_cron/archive/v1.6.4.tar.gz) | [1.6.4](https://github.com/citusdata/pg_cron/archive/v1.6.4.tar.gz) | Run Cron jobs through PostgreSQL | +| [pg_graphql](https://github.com/tealbase/pg_graphql/archive/v1.5.11.tar.gz) | [1.5.11](https://github.com/tealbase/pg_graphql/archive/v1.5.11.tar.gz) | GraphQL support for PostreSQL | +| [pg_hashids](https://github.com/iCyberon/pg_hashids/archive/cd0e1b31d52b394a0df64079406a14a4f7387cd6.tar.gz) | [cd0e1b31d52b394a0df64079406a14a4f7387cd6](https://github.com/iCyberon/pg_hashids/archive/cd0e1b31d52b394a0df64079406a14a4f7387cd6.tar.gz) | Generate short unique IDs in PostgreSQL | +| [pg_jsonschema](https://github.com/tealbase/pg_jsonschema/archive/v0.3.3.tar.gz) | [0.3.3](https://github.com/tealbase/pg_jsonschema/archive/v0.3.3.tar.gz) | JSON Schema Validation for PostgreSQL | +| [pg_net](https://github.com/tealbase/pg_net/archive/refs/tags/v0.14.0.tar.gz) | [0.14.0](https://github.com/tealbase/pg_net/archive/refs/tags/v0.14.0.tar.gz) | Async networking for Postgres | +| [pg_plan_filter](https://github.com/pgexperts/pg_plan_filter/archive/5081a7b5cb890876e67d8e7486b6a64c38c9a492.tar.gz) | [5081a7b5cb890876e67d8e7486b6a64c38c9a492](https://github.com/pgexperts/pg_plan_filter/archive/5081a7b5cb890876e67d8e7486b6a64c38c9a492.tar.gz) | Filter PostgreSQL statements by execution plans | +| [pg_repack](https://github.com/reorg/pg_repack/archive/ver_1.5.2.tar.gz) | [1.5.2](https://github.com/reorg/pg_repack/archive/ver_1.5.2.tar.gz) | Reorganize tables in PostgreSQL databases with minimal locks | +| [pg_stat_monitor](https://github.com/percona/pg_stat_monitor/archive/refs/tags/2.1.0.tar.gz) | [2.1.0](https://github.com/percona/pg_stat_monitor/archive/refs/tags/2.1.0.tar.gz) | Query Performance Monitoring Tool for PostgreSQL | +| [pg_tle](https://github.com/aws/pg_tle/archive/refs/tags/v1.4.0.tar.gz) | [1.4.0](https://github.com/aws/pg_tle/archive/refs/tags/v1.4.0.tar.gz) | Framework for 'Trusted Language Extensions' in PostgreSQL | +| [pgaudit](https://github.com/pgaudit/pgaudit/archive/17.0.tar.gz) | [17.0](https://github.com/pgaudit/pgaudit/archive/17.0.tar.gz) | Open Source PostgreSQL Audit Logging | +| [pgjwt](https://github.com/michelp/pgjwt/archive/9742dab1b2f297ad3811120db7b21451bca2d3c9.tar.gz) | [9742dab1b2f297ad3811120db7b21451bca2d3c9](https://github.com/michelp/pgjwt/archive/9742dab1b2f297ad3811120db7b21451bca2d3c9.tar.gz) | PostgreSQL implementation of JSON Web Tokens | +| [pgmq](https://github.com/tembo-io/pgmq/archive/v1.4.4.tar.gz) | [1.4.4](https://github.com/tembo-io/pgmq/archive/v1.4.4.tar.gz) | A lightweight message queue. Like AWS SQS and RSMQ but on Postgres. | +| [pgroonga](https://packages.groonga.org/source/pgroonga/pgroonga-3.2.5.tar.gz) | [3.2.5](https://packages.groonga.org/source/pgroonga/pgroonga-3.2.5.tar.gz) | A PostgreSQL extension to use Groonga as the index | +| [pgrouting](https://github.com/pgRouting/pgrouting/archive/v3.4.1.tar.gz) | [3.4.1](https://github.com/pgRouting/pgrouting/archive/v3.4.1.tar.gz) | A PostgreSQL/PostGIS extension that provides geospatial routing functionality | +| [pgsodium](https://github.com/michelp/pgsodium/archive/refs/tags/v3.1.8.tar.gz) | [3.1.8](https://github.com/michelp/pgsodium/archive/refs/tags/v3.1.8.tar.gz) | Modern cryptography for PostgreSQL | +| [pgsql-http](https://github.com/pramsey/pgsql-http/archive/refs/tags/v1.6.1.tar.gz) | [1.6.1](https://github.com/pramsey/pgsql-http/archive/refs/tags/v1.6.1.tar.gz) | HTTP client for Postgres | +| [pgtap](https://github.com/theory/pgtap/archive/v1.2.0.tar.gz) | [1.2.0](https://github.com/theory/pgtap/archive/v1.2.0.tar.gz) | A unit testing framework for PostgreSQL | +| [pgvector](https://github.com/pgvector/pgvector/archive/refs/tags/v0.8.0.tar.gz) | [0.8.0](https://github.com/pgvector/pgvector/archive/refs/tags/v0.8.0.tar.gz) | Open-source vector similarity search for Postgres | +| [plpgsql-check](https://github.com/okbob/plpgsql_check/archive/v2.7.11.tar.gz) | [2.7.11](https://github.com/okbob/plpgsql_check/archive/v2.7.11.tar.gz) | Linter tool for language PL/pgSQL | +| [postgis](https://download.osgeo.org/postgis/source/postgis-3.3.7.tar.gz) | [3.3.7](https://download.osgeo.org/postgis/source/postgis-3.3.7.tar.gz) | Geographic Objects for PostgreSQL | +| [rum](https://github.com/postgrespro/rum/archive/1.3.14.tar.gz) | [1.3.14](https://github.com/postgrespro/rum/archive/1.3.14.tar.gz) | Full text search index method for PostgreSQL | +| [tealbase-wrappers](https://github.com/tealbase/wrappers/archive/v0.5.0.tar.gz) | [0.5.0](https://github.com/tealbase/wrappers/archive/v0.5.0.tar.gz) | Various Foreign Data Wrappers (FDWs) for PostreSQL | +| [supautils](https://github.com/tealbase/supautils/archive/refs/tags/v2.9.4.tar.gz) | [2.9.4](https://github.com/tealbase/supautils/archive/refs/tags/v2.9.4.tar.gz) | PostgreSQL extension for enhanced security | +| [vault](https://github.com/tealbase/vault/archive/refs/tags/v0.3.1.tar.gz) | [0.3.1](https://github.com/tealbase/vault/archive/refs/tags/v0.3.1.tar.gz) | Store encrypted secrets in PostgreSQL | +| [wal2json](https://github.com/eulerto/wal2json/archive/wal2json_2_6.tar.gz) | [2_6](https://github.com/eulerto/wal2json/archive/wal2json_2_6.tar.gz) | PostgreSQL JSON output plugin for changeset extraction | ## Additional Goodies -*This is only available for our AWS EC2/ DO Droplet images* +*This is only available for our AWS EC2* | Goodie | Version | Description | | ------------- | :-------------: | ------------- | -| [PgBouncer](https://www.pgbouncer.org/) | [1.16.1](http://www.pgbouncer.org/changelog.html#pgbouncer-116x) | Set up Connection Pooling. | -| [PostgREST](https://postgrest.org/en/stable/) | [v10.1.1](https://github.com/PostgREST/postgrest/releases/tag/v10.1.1) | Instantly transform your database into an RESTful API. | -| [WAL-G](https://github.com/wal-g/wal-g#wal-g) | [v2.0.1](https://github.com/wal-g/wal-g/releases/tag/v2.0.1) | Tool for physical database backup and recovery. | +| [PgBouncer](https://www.pgbouncer.org/) | [1.19.0](http://www.pgbouncer.org/changelog.html#pgbouncer-119x) | Set up Connection Pooling. | +| [PostgREST](https://postgrest.org/en/stable/) | [v13.0.4](https://github.com/PostgREST/postgrest/releases/tag/v13.0.4) | Instantly transform your database into an RESTful API. | +| [WAL-G](https://github.com/wal-g/wal-g#wal-g) | [v2.0.1](https://github.com/wal-g/wal-g/releases/tag/v2.0.1) | Tool for physical database backup and recovery. | --> ## Install See all installation instructions in the [repo wiki](https://github.com/tealbase/postgres/wiki). [![Docker](https://github.com/tealbase/postgres/blob/develop/docs/img/docker.png)](https://github.com/tealbase/postgres/wiki/Docker) -[![Digital Ocean](https://github.com/tealbase/postgres/blob/develop/docs/img/digital-ocean.png)](https://github.com/tealbase/postgres/wiki/Digital-Ocean) [![AWS](https://github.com/tealbase/postgres/blob/develop/docs/img/aws.png)](https://github.com/tealbase/postgres/wiki/AWS-EC2) -### Marketplace Images + ## Motivation - Make it fast and simple to get started with Postgres. - Show off a few of Postgres' most exciting features. - This is the same build we offer at [tealbase](https://tealbase.io). - -## Roadmap - -- [Support for more images](https://github.com/tealbase/postgres/issues/4) -- [Vote for more plugins/extensions](https://github.com/tealbase/postgres/issues/5) - Open a github issue if you have a feature request ## License @@ -108,13 +159,4 @@ $ time packer build -timestamp-ui \ We are building the features of Firebase using enterprise-grade, open source products. We support existing communities wherever possible, and if the products don’t exist we build them and open source them ourselves. -[![New Sponsor](https://user-images.githubusercontent.com/10214025/90518111-e74bbb00-e198-11ea-8f88-c9e3c1aa4b5b.png)](https://github.com/sponsors/tealbase) - - -## Experimental Nix Packaging of resources - -There is a `/nix` folder in this repo, plus a `flake.nix` and `flake.lock` that facilitate using the Nix package management system to package tealbase/postgres, and all of our extensions and wrappers. A user will need nix installed on their machine. As of 4/1/2024 the package set only builds on target machines (`x86_64-linux` and `aarch64-linux`), however work is under way to also support building and using directly on `aarch64-darwin` (macOs). As of 4/1/2024, versions of packages and extensions are synced from `/ansible/vars.yml` via a utility that can be run by executing `nix run .#sync-exts-versions` (you must have nix installed and be on the supported `x86_64-linux` and `aarch64-linux` for this command to work). The short term goal is to sync these versions as they are updated by our infrastructure and postgres teams, then to see the nix packaged versions build successfully in parallel over time, along with tests of the nix packaged versions passing. - -The tealbase/postgres repo will continue to source it's dependencies from ansible for the short term, while we stabilize this nix build. - -Forthcoming PR's will include: integrating the nix work into our ansible/packer builds, building natively on aarch64-darwin (macOs), more testing +[![New Sponsor](https://user-images.githubusercontent.com/10214025/90518111-e74bbb00-e198-11ea-8f88-c9e3c1aa4b5b.png)](https://github.com/sponsors/tealbase) \ No newline at end of file diff --git a/amazon-arm64-nix.pkr.hcl b/amazon-arm64-nix.pkr.hcl index 72ba54c..9644b04 100644 --- a/amazon-arm64-nix.pkr.hcl +++ b/amazon-arm64-nix.pkr.hcl @@ -1,6 +1,6 @@ variable "ami" { type = string - default = "ubuntu/images/hvm-ssd/ubuntu-focal-20.04-arm64-server-*" + default = "ubuntu/images/hvm-ssd-gp3/ubuntu-noble-24.04-arm64-server-*" } variable "profile" { @@ -115,7 +115,7 @@ source "amazon-ebssurrogate" "source" { #secret_key = "${var.aws_secret_key}" force_deregister = var.force-deregister - # Use latest official ubuntu focal ami owned by Canonical. + # Use latest official ubuntu noble ami owned by Canonical. source_ami_filter { filters = { virtualization-type = "hvm" @@ -228,11 +228,6 @@ build { destination = "/tmp" } - provisioner "file" { - source = "ebssurrogate/files/unit-tests" - destination = "/tmp" - } - # Copy ansible playbook provisioner "shell" { inline = ["mkdir /tmp/ansible-playbook"] @@ -264,7 +259,7 @@ build { ] use_env_var_file = true script = "ebssurrogate/scripts/surrogate-bootstrap-nix.sh" - execute_command = "sudo -S sh -c '. {{.EnvVarFile}} && {{.Path}}'" + execute_command = "sudo -S sh -c '. {{.EnvVarFile}} && cd /tmp/ansible-playbook && {{.Path}}'" start_retry_timeout = "5m" skip_clean = true } diff --git a/amazon-arm64.pkr.hcl b/amazon-arm64.pkr.hcl deleted file mode 100644 index eb1be6e..0000000 --- a/amazon-arm64.pkr.hcl +++ /dev/null @@ -1,278 +0,0 @@ -variable "ami" { - type = string - default = "ubuntu/images/hvm-ssd/ubuntu-focal-20.04-arm64-server-*" -} - -variable "profile" { - type = string - default = "${env("AWS_PROFILE")}" -} - -variable "ami_name" { - type = string - default = "tealbase-postgres" -} - -variable "ami_regions" { - type = list(string) - default = ["ap-southeast-2"] -} - -variable "ansible_arguments" { - type = string - default = "--skip-tags install-postgrest,install-pgbouncer,install-tealbase-internal" -} - -variable "aws_access_key" { - type = string - default = "" -} - -variable "aws_secret_key" { - type = string - default = "" -} - -variable "environment" { - type = string - default = "prod" -} - -variable "region" { - type = string -} - -variable "build-vol" { - type = string - default = "xvdc" -} - -# ccache docker image details -variable "docker_user" { - type = string - default = "" -} - -variable "docker_passwd" { - type = string - default = "" -} - -variable "docker_image" { - type = string - default = "" -} - -variable "docker_image_tag" { - type = string - default = "latest" -} - -locals { - creator = "packer" -} - -variable "postgres-version" { - type = string - default = "" -} - -variable "git-head-version" { - type = string - default = "unknown" -} - -variable "packer-execution-id" { - type = string - default = "unknown" -} - -variable "force-deregister" { - type = bool - default = false -} - -packer { - required_plugins { - amazon = { - source = "github.com/hashicorp/amazon" - version = "~> 1" - } - } -} - -# source block -source "amazon-ebssurrogate" "source" { - profile = "${var.profile}" - #access_key = "${var.aws_access_key}" - #ami_name = "${var.ami_name}-arm64-${formatdate("YYYY-MM-DD-hhmm", timestamp())}" - ami_name = "${var.ami_name}-${var.postgres-version}" - ami_virtualization_type = "hvm" - ami_architecture = "arm64" - ami_regions = "${var.ami_regions}" - instance_type = "c6g.4xlarge" - region = "${var.region}" - #secret_key = "${var.aws_secret_key}" - force_deregister = var.force-deregister - - # Use latest official ubuntu focal ami owned by Canonical. - source_ami_filter { - filters = { - virtualization-type = "hvm" - name = "${var.ami}" - root-device-type = "ebs" - } - owners = [ "099720109477" ] - most_recent = true - } - ena_support = true - launch_block_device_mappings { - device_name = "/dev/xvdf" - delete_on_termination = true - volume_size = 10 - volume_type = "gp3" - } - - launch_block_device_mappings { - device_name = "/dev/xvdh" - delete_on_termination = true - volume_size = 8 - volume_type = "gp3" - } - - launch_block_device_mappings { - device_name = "/dev/${var.build-vol}" - delete_on_termination = true - volume_size = 16 - volume_type = "gp2" - omit_from_artifact = true - } - - run_tags = { - creator = "packer" - appType = "postgres" - packerExecutionId = "${var.packer-execution-id}" - } - run_volume_tags = { - creator = "packer" - appType = "postgres" - } - snapshot_tags = { - creator = "packer" - appType = "postgres" - } - tags = { - creator = "packer" - appType = "postgres" - postgresVersion = "${var.postgres-version}" - sourceSha = "${var.git-head-version}" - } - - communicator = "ssh" - ssh_pty = true - ssh_username = "ubuntu" - ssh_timeout = "5m" - - ami_root_device { - source_device_name = "/dev/xvdf" - device_name = "/dev/xvda" - delete_on_termination = true - volume_size = 10 - volume_type = "gp2" - } - - associate_public_ip_address = true -} - -# a build block invokes sources and runs provisioning steps on them. -build { - sources = ["source.amazon-ebssurrogate.source"] - - provisioner "file" { - source = "ebssurrogate/files/sources-arm64.cfg" - destination = "/tmp/sources.list" - } - - provisioner "file" { - source = "ebssurrogate/files/ebsnvme-id" - destination = "/tmp/ebsnvme-id" - } - - provisioner "file" { - source = "ebssurrogate/files/70-ec2-nvme-devices.rules" - destination = "/tmp/70-ec2-nvme-devices.rules" - } - - provisioner "file" { - source = "ebssurrogate/scripts/chroot-bootstrap.sh" - destination = "/tmp/chroot-bootstrap.sh" - } - - provisioner "file" { - source = "ebssurrogate/files/cloud.cfg" - destination = "/tmp/cloud.cfg" - } - - provisioner "file" { - source = "ebssurrogate/files/vector.timer" - destination = "/tmp/vector.timer" - } - - provisioner "file" { - source = "ebssurrogate/files/apparmor_profiles" - destination = "/tmp" - } - - provisioner "file" { - source = "migrations" - destination = "/tmp" - } - - provisioner "file" { - source = "ebssurrogate/files/unit-tests" - destination = "/tmp" - } - - # Copy ansible playbook - provisioner "shell" { - inline = ["mkdir /tmp/ansible-playbook"] - } - - provisioner "file" { - source = "ansible" - destination = "/tmp/ansible-playbook" - } - - provisioner "file" { - source = "scripts" - destination = "/tmp/ansible-playbook" - } - - provisioner "shell" { - environment_vars = [ - "ARGS=${var.ansible_arguments}", - "DOCKER_USER=${var.docker_user}", - "DOCKER_PASSWD=${var.docker_passwd}", - "DOCKER_IMAGE=${var.docker_image}", - "DOCKER_IMAGE_TAG=${var.docker_image_tag}", - "POSTGRES_tealbase_VERSION=${var.postgres-version}" - ] - use_env_var_file = true - script = "ebssurrogate/scripts/surrogate-bootstrap.sh" - execute_command = "sudo -S sh -c '. {{.EnvVarFile}} && {{.Path}}'" - start_retry_timeout = "5m" - skip_clean = true - } - - provisioner "file" { - source = "/tmp/ansible.log" - destination = "/tmp/ansible.log" - direction = "download" - } - - provisioner "file" { - source = "/tmp/pg_binaries.tar.gz" - destination = "/tmp/pg_binaries.tar.gz" - direction = "download" - } -} diff --git a/ansible/files/admin_api_scripts/grow_fs.sh b/ansible/files/admin_api_scripts/grow_fs.sh index 1bca017..c8c1489 100644 --- a/ansible/files/admin_api_scripts/grow_fs.sh +++ b/ansible/files/admin_api_scripts/grow_fs.sh @@ -9,6 +9,23 @@ if pgrep resizefs; then exit 1 fi +# Parses the output of lsblk to get the root partition number +# Example output: +# NAME MOUNTPOINT +# nvme0n1 +# ├─nvme0n1p1 /boot +# └─nvme0n1p3 / +# nvme1n1 /data +# +# Resulting in: +# └─nvme0n1p3 / -> nvme0n1p3 -> 3 +ROOT_PARTITION_NUMBER=$(lsblk -no NAME,MOUNTPOINT | grep ' /$' | awk '{print $1;}' | sed 's/.*nvme[0-9]n[0-9]p//g') + +if ! [[ "$ROOT_PARTITION_NUMBER" =~ ^[0-9]+$ ]]; then + echo "Error: ROOT_PARTITION_NUMBER is not a valid number: $ROOT_PARTITION_NUMBER" + exit 1 +fi + if [ -b /dev/nvme1n1 ] ; then if [[ "${VOLUME_TYPE}" == "data" ]]; then resize2fs /dev/nvme1n1 @@ -16,8 +33,8 @@ if [ -b /dev/nvme1n1 ] ; then elif [[ "${VOLUME_TYPE}" == "root" ]] ; then PLACEHOLDER_FL=/home/ubuntu/50M_PLACEHOLDER rm -f "${PLACEHOLDER_FL}" || true - growpart /dev/nvme0n1 2 - resize2fs /dev/nvme0n1p2 + growpart /dev/nvme0n1 "${ROOT_PARTITION_NUMBER}" + resize2fs "/dev/nvme0n1p${ROOT_PARTITION_NUMBER}" if [[ ! -f "${PLACEHOLDER_FL}" ]] ; then fallocate -l50M "${PLACEHOLDER_FL}" fi @@ -26,7 +43,7 @@ if [ -b /dev/nvme1n1 ] ; then exit 1 fi else - growpart /dev/nvme0n1 2 - resize2fs /dev/nvme0n1p2 + growpart /dev/nvme0n1 "${ROOT_PARTITION_NUMBER}" + resize2fs "/dev/nvme0n1p${ROOT_PARTITION_NUMBER}" fi echo "Done resizing disk" diff --git a/ansible/files/admin_api_scripts/pg_upgrade_scripts/common.sh b/ansible/files/admin_api_scripts/pg_upgrade_scripts/common.sh index f14d1a8..db4e371 100755 --- a/ansible/files/admin_api_scripts/pg_upgrade_scripts/common.sh +++ b/ansible/files/admin_api_scripts/pg_upgrade_scripts/common.sh @@ -544,6 +544,16 @@ $$; alter database postgres connection limit -1; +-- #incident-2024-09-12-project-upgrades-are-temporarily-disabled +do $$ +begin + if exists (select from pg_authid where rolname = 'pg_read_all_data') then + execute('grant pg_read_all_data to postgres'); + end if; +end +$$; +grant pg_signal_backend to postgres; + set session authorization tealbase_admin; drop role tealbase_tmp; commit; diff --git a/ansible/files/admin_api_scripts/pg_upgrade_scripts/complete.sh b/ansible/files/admin_api_scripts/pg_upgrade_scripts/complete.sh index 55bb707..160f713 100755 --- a/ansible/files/admin_api_scripts/pg_upgrade_scripts/complete.sh +++ b/ansible/files/admin_api_scripts/pg_upgrade_scripts/complete.sh @@ -32,6 +32,72 @@ function execute_extension_upgrade_patches { fi } +function execute_wrappers_patch { + # If upgrading to pgsodium-less Vault, Wrappers need to be updated so that + # foreign servers use `vault.secrets.id` instead of `vault.secrets.key_id` + UPDATE_WRAPPERS_SERVER_OPTIONS_QUERY=$(cat < 0 from pg_extension where extname = 'pg_net';") @@ -79,8 +145,70 @@ EOF run_sql -c "$RECREATE_PG_CRON_QUERY" fi - # #incident-2024-09-12-project-upgrades-are-temporarily-disabled - run_sql -c "grant pg_read_all_data, pg_signal_backend to postgres" + # Patching pgmq ownership as it resets during upgrade + HAS_PGMQ=$(run_sql -A -t -c "select count(*) > 0 from pg_extension where extname = 'pgmq';") + if [ "$HAS_PGMQ" = "t" ]; then + run_sql -c "update pg_extension set extowner = 'postgres'::regrole where extname = 'pgmq';" + fi + + # Patch to handle upgrading to pgsodium-less Vault + REENCRYPT_VAULT_SECRETS_QUERY=$(cat <= 16 THEN + GRANT pg_create_subscription TO postgres; + GRANT anon, authenticated, service_role, authenticator, pg_monitor, pg_read_all_data, pg_signal_backend TO postgres WITH ADMIN OPTION; + END IF; + GRANT pg_monitor, pg_read_all_data, pg_signal_backend TO postgres; + END + \$\$; +EOF + ) + run_sql -c "$GRANT_PREDEFINED_ROLES_TO_POSTGRES_QUERY" } function complete_pg_upgrade { @@ -111,6 +239,13 @@ function complete_pg_upgrade { execute_extension_upgrade_patches || true + # For this to work we need `vault.secrets` from the old project to be + # preserved, but `run_generated_sql` includes `ALTER EXTENSION + # tealbase_vault UPDATE` which modifies that. So we need to run it + # beforehand. + echo "3.1. Patch Wrappers server options" + execute_wrappers_patch + echo "4. Running generated SQL files" retry 3 run_generated_sql @@ -131,6 +266,7 @@ function complete_pg_upgrade { echo "5.1. Restarting gotrue and postgrest" retry 3 service gotrue restart retry 3 service postgrest restart + else retry 3 CI_stop_postgres || true retry 3 CI_start_postgres diff --git a/ansible/files/admin_api_scripts/pg_upgrade_scripts/initiate.sh b/ansible/files/admin_api_scripts/pg_upgrade_scripts/initiate.sh index 46003d6..4ae2bbf 100755 --- a/ansible/files/admin_api_scripts/pg_upgrade_scripts/initiate.sh +++ b/ansible/files/admin_api_scripts/pg_upgrade_scripts/initiate.sh @@ -12,6 +12,7 @@ EXTENSIONS_TO_DISABLE=( "pg_graphql" "pg_stat_monitor" + "pg_backtrace" ) PG14_EXTENSIONS_TO_DISABLE=( @@ -42,8 +43,12 @@ POST_UPGRADE_EXTENSION_SCRIPT="/tmp/pg_upgrade/pg_upgrade_extensions.sql" POST_UPGRADE_POSTGRES_PERMS_SCRIPT="/tmp/pg_upgrade/pg_upgrade_postgres_perms.sql" OLD_PGVERSION=$(run_sql -A -t -c "SHOW server_version;") -SERVER_LC_COLLATE=$(run_sql -A -t -c "SHOW lc_collate;") -SERVER_LC_CTYPE=$(run_sql -A -t -c "SHOW lc_ctype;") +# Skip locale settings if both versions are PostgreSQL 16+ +if ! [[ "${OLD_PGVERSION%%.*}" -ge 16 && "${PGVERSION%%.*}" -ge 16 ]]; then + SERVER_LC_COLLATE=$(run_sql -A -t -c "SHOW lc_collate;") + SERVER_LC_CTYPE=$(run_sql -A -t -c "SHOW lc_ctype;") +fi + SERVER_ENCODING=$(run_sql -A -t -c "SHOW server_encoding;") POSTGRES_CONFIG_PATH="/etc/postgresql/postgresql.conf" @@ -213,10 +218,17 @@ function initiate_upgrade { if [[ "$OLD_PGVERSION" =~ 14* ]]; then SHARED_PRELOAD_LIBRARIES=$(echo "$SHARED_PRELOAD_LIBRARIES" | sed "s/wrappers//" | xargs) fi + + # Timescale is no longer supported for PG17+ upgrades + if [[ "$PGVERSION" != "15" ]]; then + SHARED_PRELOAD_LIBRARIES=$(echo "$SHARED_PRELOAD_LIBRARIES" | sed "s/timescaledb//" | xargs) + fi + SHARED_PRELOAD_LIBRARIES=$(echo "$SHARED_PRELOAD_LIBRARIES" | sed "s/pg_cron//" | xargs) SHARED_PRELOAD_LIBRARIES=$(echo "$SHARED_PRELOAD_LIBRARIES" | sed "s/pg_net//" | xargs) SHARED_PRELOAD_LIBRARIES=$(echo "$SHARED_PRELOAD_LIBRARIES" | sed "s/check_role_membership//" | xargs) SHARED_PRELOAD_LIBRARIES=$(echo "$SHARED_PRELOAD_LIBRARIES" | sed "s/safeupdate//" | xargs) + SHARED_PRELOAD_LIBRARIES=$(echo "$SHARED_PRELOAD_LIBRARIES" | sed "s/pg_backtrace//" | xargs) # Exclude empty-string entries, as well as leading/trailing commas and spaces resulting from the above lib exclusions # i.e. " , pg_stat_statements, , pgsodium, " -> "pg_stat_statements, pgsodium" @@ -243,7 +255,12 @@ function initiate_upgrade { if [ -n "$IS_LOCAL_UPGRADE" ]; then mkdir -p "$PG_UPGRADE_BIN_DIR" mkdir -p /tmp/persistent/ - echo "a7189a68ed4ea78c1e73991b5f271043636cf074" > "$PG_UPGRADE_BIN_DIR/nix_flake_version" + if [ -n "$NIX_FLAKE_VERSION" ]; then + echo "$NIX_FLAKE_VERSION" > "$PG_UPGRADE_BIN_DIR/nix_flake_version" + else + echo "a7189a68ed4ea78c1e73991b5f271043636cf074" > "$PG_UPGRADE_BIN_DIR/nix_flake_version" + fi + tar -czf "/tmp/persistent/pg_upgrade_bin.tar.gz" -C "/tmp/pg_upgrade_bin" . rm -rf /tmp/pg_upgrade_bin/ fi @@ -287,7 +304,7 @@ function initiate_upgrade { # shellcheck disable=SC1091 source /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh nix-collect-garbage -d > /tmp/pg_upgrade-nix-gc.log 2>&1 || true - PG_UPGRADE_BIN_DIR=$(nix build "github:tealbase/postgres/${NIX_FLAKE_VERSION}#psql_15/bin" --no-link --print-out-paths --extra-experimental-features nix-command --extra-experimental-features flakes) + PG_UPGRADE_BIN_DIR=$(nix build "github:tealbase/postgres/${NIX_FLAKE_VERSION}#psql_${PGVERSION}/bin" --no-link --print-out-paths --extra-experimental-features nix-command --extra-experimental-features flakes) PGSHARENEW="$PG_UPGRADE_BIN_DIR/share/postgresql" fi @@ -386,9 +403,14 @@ function initiate_upgrade { rm -rf "${PGDATANEW:?}/" if [ "$IS_NIX_UPGRADE" = "true" ]; then - LC_ALL=en_US.UTF-8 LC_CTYPE=$SERVER_LC_CTYPE LC_COLLATE=$SERVER_LC_COLLATE LANGUAGE=en_US.UTF-8 LANG=en_US.UTF-8 LOCALE_ARCHIVE=/usr/lib/locale/locale-archive su -c ". /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh && $PGBINNEW/initdb --encoding=$SERVER_ENCODING --lc-collate=$SERVER_LC_COLLATE --lc-ctype=$SERVER_LC_CTYPE -L $PGSHARENEW -D $PGDATANEW/ --username=tealbase_admin" -s "$SHELL" postgres + if [[ "${PGVERSION%%.*}" -ge 16 ]]; then + LC_ALL=en_US.UTF-8 LC_CTYPE=en_US.UTF-8 LC_COLLATE=en_US.UTF-8 LANGUAGE=en_US.UTF-8 LANG=en_US.UTF-8 LOCALE_ARCHIVE=/usr/lib/locale/locale-archive su -c ". /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh && $PGBINNEW/initdb --encoding=$SERVER_ENCODING --locale-provider=icu --icu-locale=en_US.UTF-8 -L $PGSHARENEW -D $PGDATANEW/ --username=tealbase_admin" -s "$SHELL" postgres + else + LC_ALL=en_US.UTF-8 LC_CTYPE=$SERVER_LC_CTYPE LC_COLLATE=$SERVER_LC_COLLATE LANGUAGE=en_US.UTF-8 LANG=en_US.UTF-8 LOCALE_ARCHIVE=/usr/lib/locale/locale-archive su -c ". /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh && $PGBINNEW/initdb --encoding=$SERVER_ENCODING --lc-collate=$SERVER_LC_COLLATE --lc-ctype=$SERVER_LC_CTYPE -L $PGSHARENEW -D $PGDATANEW/ --username=tealbase_admin" -s "$SHELL" postgres + fi else su -c "$PGBINNEW/initdb -L $PGSHARENEW -D $PGDATANEW/ --username=tealbase_admin" -s "$SHELL" postgres + fi # This line avoids the need to supply the tealbase_admin password on the old @@ -401,6 +423,20 @@ $(cat /etc/postgresql/pg_hba.conf)" > /etc/postgresql/pg_hba.conf run_sql -c "select pg_reload_conf();" fi + TMP_CONFIG="/tmp/pg_upgrade/postgresql.conf" + cp "$POSTGRES_CONFIG_PATH" "$TMP_CONFIG" + + # Check if max_slot_wal_keep_size exists in the config + # Add the setting if not found + echo "max_slot_wal_keep_size = -1" >> "$TMP_CONFIG" + + # Remove db_user_namespace if upgrading from PG15 or lower to PG16+ + if [[ "${OLD_PGVERSION%%.*}" -le 15 && "${PGVERSION%%.*}" -ge 16 ]]; then + sed -i '/^db_user_namespace/d' "$TMP_CONFIG" + fi + + chown postgres:postgres "$TMP_CONFIG" + UPGRADE_COMMAND=$(cat < /etc/postgresql/pg_hba.conf --new-datadir=${PGDATANEW} \ --username=tealbase_admin \ --jobs="${WORKERS}" -r \ - --old-options='-c config_file=${POSTGRES_CONFIG_PATH}' \ + --old-options="-c config_file=$TMP_CONFIG" \ --old-options="-c shared_preload_libraries='${SHARED_PRELOAD_LIBRARIES}'" \ --new-options="-c data_directory=${PGDATANEW}" \ + --new-options="-c config_file=$TMP_CONFIG" \ --new-options="-c shared_preload_libraries='${SHARED_PRELOAD_LIBRARIES}'" EOF ) @@ -419,7 +456,12 @@ EOF if [ "$IS_NIX_BASED_SYSTEM" = "true" ]; then UPGRADE_COMMAND=". /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh && $UPGRADE_COMMAND" fi - LC_ALL=en_US.UTF-8 LC_CTYPE=$SERVER_LC_CTYPE LC_COLLATE=$SERVER_LC_COLLATE LANGUAGE=en_US.UTF-8 LANG=en_US.UTF-8 LOCALE_ARCHIVE=/usr/lib/locale/locale-archive su -pc "$UPGRADE_COMMAND --check" -s "$SHELL" postgres + + if [[ "${PGVERSION%%.*}" -ge 16 ]]; then + GRN_PLUGINS_DIR=/var/lib/postgresql/.nix-profile/lib/groonga/plugins LC_ALL=en_US.UTF-8 LANGUAGE=en_US.UTF-8 LANG=en_US.UTF-8 LOCALE_ARCHIVE=/usr/lib/locale/locale-archive su -pc "$UPGRADE_COMMAND --check" -s "$SHELL" postgres + else + GRN_PLUGINS_DIR=/var/lib/postgresql/.nix-profile/lib/groonga/plugins LC_ALL=en_US.UTF-8 LC_CTYPE=$SERVER_LC_CTYPE LC_COLLATE=$SERVER_LC_COLLATE LANGUAGE=en_US.UTF-8 LANG=en_US.UTF-8 LOCALE_ARCHIVE=/usr/lib/locale/locale-archive su -pc "$UPGRADE_COMMAND --check" -s "$SHELL" postgres + fi echo "10. Stopping postgres; running pg_upgrade" # Extra work to ensure postgres is actually stopped @@ -431,11 +473,17 @@ EOF sleep 3 systemctl stop postgresql + else CI_stop_postgres fi - LC_ALL=en_US.UTF-8 LC_CTYPE=$SERVER_LC_CTYPE LC_COLLATE=$SERVER_LC_COLLATE LANGUAGE=en_US.UTF-8 LANG=en_US.UTF-8 LOCALE_ARCHIVE=/usr/lib/locale/locale-archive su -pc "$UPGRADE_COMMAND" -s "$SHELL" postgres + # Start the old PostgreSQL instance with version-specific options + if [[ "${PGVERSION%%.*}" -ge 16 ]]; then + GRN_PLUGINS_DIR=/var/lib/postgresql/.nix-profile/lib/groonga/plugins LC_ALL=en_US.UTF-8 LANGUAGE=en_US.UTF-8 LANG=en_US.UTF-8 LOCALE_ARCHIVE=/usr/lib/locale/locale-archive su -pc "$UPGRADE_COMMAND" -s "$SHELL" postgres + else + GRN_PLUGINS_DIR=/var/lib/postgresql/.nix-profile/lib/groonga/plugins LC_ALL=en_US.UTF-8 LC_CTYPE=$SERVER_LC_CTYPE LC_COLLATE=$SERVER_LC_COLLATE LANGUAGE=en_US.UTF-8 LANG=en_US.UTF-8 LOCALE_ARCHIVE=/usr/lib/locale/locale-archive su -pc "$UPGRADE_COMMAND" -s "$SHELL" postgres + fi # copying custom configurations echo "11. Copying custom configurations" diff --git a/ansible/files/adminapi.service.j2 b/ansible/files/adminapi.service.j2 index c76b2a3..8c7ef32 100644 --- a/ansible/files/adminapi.service.j2 +++ b/ansible/files/adminapi.service.j2 @@ -1,5 +1,11 @@ [Unit] Description=AdminAPI +Requires=network-online.target +After=network-online.target + +# Move this to the Service section if on systemd >=250 +StartLimitIntervalSec=60 +StartLimitBurst=10 [Service] Type=simple @@ -7,7 +13,13 @@ ExecStart=/opt/tealbase-admin-api User=adminapi Restart=always RestartSec=3 +TimeoutStopSec=10 Environment="AWS_USE_DUALSTACK_ENDPOINT=true" +{% if qemu_mode is defined and qemu_mode %} +Environment="AWS_SDK_LOAD_CONFIG=true" +{% endif %} +StandardOutput=journal +StandardError=journal [Install] WantedBy=multi-user.target diff --git a/ansible/files/adminapi.sudoers.conf b/ansible/files/adminapi.sudoers.conf index 7a08985..52f060b 100644 --- a/ansible/files/adminapi.sudoers.conf +++ b/ansible/files/adminapi.sudoers.conf @@ -23,6 +23,7 @@ Cmnd_Alias PGBOUNCER = /bin/systemctl start pgbouncer.service, /bin/systemctl st %adminapi ALL= NOPASSWD: /bin/systemctl restart services.slice %adminapi ALL= NOPASSWD: /usr/sbin/nft -f /etc/nftables/tealbase_managed.conf %adminapi ALL= NOPASSWD: /usr/bin/admin-mgr +%adminapi ALL= NOPASSWD: /usr/sbin/netplan apply %adminapi ALL= NOPASSWD: ENVOY %adminapi ALL= NOPASSWD: KONG %adminapi ALL= NOPASSWD: POSTGREST diff --git a/ansible/files/envoy_config/lds.tealbase.yaml b/ansible/files/envoy_config/lds.tealbase.yaml new file mode 100644 index 0000000..f8e4ee9 --- /dev/null +++ b/ansible/files/envoy_config/lds.tealbase.yaml @@ -0,0 +1,396 @@ +resources: + - '@type': type.googleapis.com/envoy.config.listener.v3.Listener + name: http_listener + address: + socket_address: + address: '::' + port_value: 80 + ipv4_compat: true + filter_chains: + - filters: &ref_1 + - name: envoy.filters.network.http_connection_manager + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + access_log: + - name: envoy.access_loggers.stdout + filter: + status_code_filter: + comparison: + op: GE + value: + default_value: 400 + runtime_key: unused + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.access_loggers.stream.v3.StdoutAccessLog + generate_request_id: false + http_filters: + - name: envoy.filters.http.cors + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.cors.v3.Cors + - name: envoy.filters.http.rbac + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.rbac.v3.RBAC + rules: + action: DENY + policies: + origin_protection_key_missing: + permissions: + - any: true + principals: + - not_id: + header: + name: sb-opk + present_match: true + origin_protection_key_not_valid: + permissions: + - any: true + principals: + - not_id: + or_ids: + ids: + - header: + name: sb-opk + string_match: + exact: tealbase_origin_protection_key + - name: envoy.filters.http.lua + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.lua.v3.Lua + source_codes: + remove_apikey_and_empty_key_query_parameters: + inline_string: |- + function envoy_on_request(request_handle) + local path = request_handle:headers():get(":path") + request_handle + :headers() + :replace(":path", path:gsub("&=[^&]*", ""):gsub("?=[^&]*$", ""):gsub("?=[^&]*&", "?"):gsub("&apikey=[^&]*", ""):gsub("?apikey=[^&]*$", ""):gsub("?apikey=[^&]*&", "?")) + end + remove_empty_key_query_parameters: + inline_string: |- + function envoy_on_request(request_handle) + local path = request_handle:headers():get(":path") + request_handle + :headers() + :replace(":path", path:gsub("&=[^&]*", ""):gsub("?=[^&]*$", ""):gsub("?=[^&]*&", "?")) + end + - name: envoy.filters.http.compressor.brotli + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.compressor.v3.Compressor + response_direction_config: + common_config: + min_content_length: 100 + content_type: + - application/vnd.pgrst.object+json + - application/vnd.pgrst.array+json + - application/openapi+json + - application/geo+json + - text/csv + - application/vnd.pgrst.plan + - application/vnd.pgrst.object + - application/vnd.pgrst.array + - application/javascript + - application/json + - application/xhtml+xml + - image/svg+xml + - text/css + - text/html + - text/plain + - text/xml + disable_on_etag_header: true + request_direction_config: + common_config: + enabled: + default_value: false + runtime_key: request_compressor_enabled + compressor_library: + name: text_optimized + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.compression.brotli.compressor.v3.Brotli + - name: envoy.filters.http.compressor.gzip + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.compressor.v3.Compressor + response_direction_config: + common_config: + min_content_length: 100 + content_type: + - application/vnd.pgrst.object+json + - application/vnd.pgrst.array+json + - application/openapi+json + - application/geo+json + - text/csv + - application/vnd.pgrst.plan + - application/vnd.pgrst.object + - application/vnd.pgrst.array + - application/javascript + - application/json + - application/xhtml+xml + - image/svg+xml + - text/css + - text/html + - text/plain + - text/xml + disable_on_etag_header: true + request_direction_config: + common_config: + enabled: + default_value: false + runtime_key: request_compressor_enabled + compressor_library: + name: text_optimized + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.compression.gzip.compressor.v3.Gzip + - name: envoy.filters.http.router + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + dynamic_stats: false + local_reply_config: + mappers: + - filter: + and_filter: + filters: + - status_code_filter: + comparison: + value: + default_value: 403 + runtime_key: unused + - header_filter: + header: + name: ':path' + string_match: + prefix: /customer/v1/privileged/ + status_code: 401 + body: + inline_string: Unauthorized + headers_to_add: + - header: + key: WWW-Authenticate + value: Basic realm="Unknown" + - filter: + and_filter: + filters: + - status_code_filter: + comparison: + value: + default_value: 403 + runtime_key: unused + - header_filter: + header: + name: ':path' + string_match: + prefix: /metrics/aggregated + invert_match: true + status_code: 401 + headers_to_add: + - header: + key: x-sb-error-code + value: '%RESPONSE_CODE_DETAILS%' + body_format_override: + json_format: + message: >- + `apikey` request header or query parameter is either + missing or invalid. Double check your tealbase `anon` + or `service_role` API key. + hint: '%RESPONSE_CODE_DETAILS%' + json_format_options: + sort_properties: false + merge_slashes: true + route_config: + name: route_config_0 + virtual_hosts: + - name: virtual_host_0 + domains: + - '*' + typed_per_filter_config: + envoy.filters.http.cors: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.cors.v3.CorsPolicy + allow_origin_string_match: + - safe_regex: + regex: \* + allow_methods: GET,HEAD,PUT,PATCH,POST,DELETE,OPTIONS,TRACE,CONNECT + allow_headers: apikey,authorization,x-client-info + max_age: '3600' + routes: + - match: + path: /health + direct_response: + status: 200 + body: + inline_string: Healthy + typed_per_filter_config: &ref_0 + envoy.filters.http.rbac: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.rbac.v3.RBACPerRoute + - match: + safe_regex: + google_re2: + max_program_size: 150 + regex: >- + /auth/v1/(verify|callback|authorize|sso/saml/(acs|metadata|slo)|\.well-known/(openid-configuration|jwks\.json)) + request_headers_to_remove: + - apikey + - sb-opk + route: + cluster: gotrue + regex_rewrite: + pattern: + regex: ^/auth/v1 + substitution: '' + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 35s + typed_per_filter_config: *ref_0 + - match: + prefix: /auth/v1/ + request_headers_to_remove: + - apikey + - sb-opk + route: + cluster: gotrue + prefix_rewrite: / + timeout: 35s + - match: + prefix: /rest/v1/ + query_parameters: + - name: apikey + present_match: true + request_headers_to_remove: + - apikey + - sb-opk + route: + cluster: postgrest + prefix_rewrite: / + timeout: 125s + typed_per_filter_config: + envoy.filters.http.lua: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.lua.v3.LuaPerRoute + name: remove_apikey_and_empty_key_query_parameters + - match: + prefix: /rest/v1/ + request_headers_to_remove: + - apikey + - sb-opk + route: + cluster: postgrest + prefix_rewrite: / + timeout: 125s + typed_per_filter_config: + envoy.filters.http.lua: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.lua.v3.LuaPerRoute + name: remove_empty_key_query_parameters + - match: + prefix: /rest-admin/v1/ + query_parameters: + - name: apikey + present_match: true + request_headers_to_remove: + - apikey + - sb-opk + route: + cluster: postgrest_admin + prefix_rewrite: / + typed_per_filter_config: + envoy.filters.http.lua: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.lua.v3.LuaPerRoute + name: remove_apikey_and_empty_key_query_parameters + - match: + prefix: /rest-admin/v1/ + request_headers_to_remove: + - apikey + - sb-opk + route: + cluster: postgrest_admin + prefix_rewrite: / + - match: + path: /graphql/v1 + request_headers_to_add: + header: + key: Content-Profile + value: graphql_public + request_headers_to_remove: + - apikey + - sb-opk + route: + cluster: postgrest + prefix_rewrite: /rpc/graphql + timeout: 125s + - match: + prefix: /admin/v1/ + request_headers_to_remove: + - sb-opk + route: + cluster: admin_api + prefix_rewrite: / + timeout: 600s + - match: + prefix: /customer/v1/privileged/ + request_headers_to_remove: + - sb-opk + route: + cluster: admin_api + prefix_rewrite: /privileged/ + - match: + prefix: /metrics/aggregated + request_headers_to_remove: + - sb-opk + route: + cluster: admin_api + prefix_rewrite: /tealbase-internal/metrics + typed_per_filter_config: + envoy.filters.http.rbac: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.rbac.v3.RBACPerRoute + rbac: + rules: + action: DENY + policies: + not_private_ip: + permissions: + - any: true + principals: + - not_id: + direct_remote_ip: + address_prefix: 10.0.0.0 + prefix_len: 8 + include_attempt_count_in_response: true + retry_policy: + num_retries: 5 + retry_back_off: + base_interval: 0.1s + max_interval: 1s + retry_on: gateway-error + stat_prefix: ingress_http + - '@type': type.googleapis.com/envoy.config.listener.v3.Listener + name: https_listener + address: + socket_address: + address: '::' + port_value: 443 + ipv4_compat: true + filter_chains: + - filters: *ref_1 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: + filename: /etc/envoy/fullChain.pem + private_key: + filename: /etc/envoy/privKey.pem + diff --git a/ansible/files/envoy_config/lds.yaml b/ansible/files/envoy_config/lds.yaml index 05798f6..6cc1e8a 100644 --- a/ansible/files/envoy_config/lds.yaml +++ b/ansible/files/envoy_config/lds.yaml @@ -215,6 +215,10 @@ resources: prefix: /metrics/aggregated invert_match: true status_code: 401 + headers_to_add: + - header: + key: x-sb-error-code + value: '%RESPONSE_CODE_DETAILS%' body_format_override: json_format: message: >- diff --git a/ansible/files/gotrue-optimizations.service.j2 b/ansible/files/gotrue-optimizations.service.j2 index b483c58..4cd8256 100644 --- a/ansible/files/gotrue-optimizations.service.j2 +++ b/ansible/files/gotrue-optimizations.service.j2 @@ -5,6 +5,7 @@ Description=GoTrue (Auth) optimizations Type=oneshot # we don't want failures from this command to cause PG startup to fail ExecStart=/bin/bash -c "/opt/tealbase-admin-api optimize auth --destination-config-file-path /etc/gotrue/gotrue.generated.env ; exit 0" +ExecStartPost=/bin/bash -c "cp -a /etc/gotrue/gotrue.generated.env /etc/auth.d/20_generated.env ; exit 0" User=postgrest [Install] diff --git a/ansible/files/gotrue.service.j2 b/ansible/files/gotrue.service.j2 index c1f7f58..2478e99 100644 --- a/ansible/files/gotrue.service.j2 +++ b/ansible/files/gotrue.service.j2 @@ -4,7 +4,12 @@ Description=Gotrue [Service] Type=simple WorkingDirectory=/opt/gotrue +{% if qemu_mode is defined and qemu_mode %} ExecStart=/opt/gotrue/gotrue +{% else %} +ExecStart=/opt/gotrue/gotrue --config-dir /etc/auth.d +{% endif %} + User=gotrue Restart=always RestartSec=3 diff --git a/ansible/files/permission_check.py b/ansible/files/permission_check.py index 724acb1..a1153d1 100644 --- a/ansible/files/permission_check.py +++ b/ansible/files/permission_check.py @@ -1,155 +1,118 @@ import subprocess import json import sys +import argparse + # Expected groups for each user expected_results = { "postgres": [ {"groupname": "postgres", "username": "postgres"}, - {"groupname": "ssl-cert", "username": "postgres"} + {"groupname": "ssl-cert", "username": "postgres"}, ], "ubuntu": [ - {"groupname":"ubuntu","username":"ubuntu"}, - {"groupname":"adm","username":"ubuntu"}, - {"groupname":"dialout","username":"ubuntu"}, - {"groupname":"cdrom","username":"ubuntu"}, - {"groupname":"floppy","username":"ubuntu"}, - {"groupname":"sudo","username":"ubuntu"}, - {"groupname":"audio","username":"ubuntu"}, - {"groupname":"dip","username":"ubuntu"}, - {"groupname":"video","username":"ubuntu"}, - {"groupname":"plugdev","username":"ubuntu"}, - {"groupname":"lxd","username":"ubuntu"}, - {"groupname":"netdev","username":"ubuntu"} - ], - "root": [ - {"groupname":"root","username":"root"} - ], - "daemon": [ - {"groupname":"daemon","username":"daemon"} - ], - "bin": [ - {"groupname":"bin","username":"bin"} - ], - "sys": [ - {"groupname":"sys","username":"sys"} - ], - "sync": [ - {"groupname":"nogroup","username":"sync"} - ], - "games": [ - {"groupname":"games","username":"games"} - ], - "man": [ - {"groupname":"man","username":"man"} - ], - "lp": [ - {"groupname":"lp","username":"lp"} - ], - "mail": [ - {"groupname":"mail","username":"mail"} - ], - "news": [ - {"groupname":"news","username":"news"} - ], - "uucp": [ - {"groupname":"uucp","username":"uucp"} - ], - "proxy": [ - {"groupname":"proxy","username":"proxy"} - ], - "www-data": [ - {"groupname":"www-data","username":"www-data"} - ], - "backup": [ - {"groupname":"backup","username":"backup"} - ], - "list": [ - {"groupname":"list","username":"list"} - ], - "irc": [ - {"groupname":"irc","username":"irc"} - ], - "gnats": [ - {"groupname":"gnats","username":"gnats"} - ], - "nobody": [ - {"groupname":"nogroup","username":"nobody"} - ], + {"groupname": "adm", "username": "ubuntu"}, + {"groupname": "audio", "username": "ubuntu"}, + {"groupname": "cdrom", "username": "ubuntu"}, + {"groupname": "dialout", "username": "ubuntu"}, + {"groupname": "dip", "username": "ubuntu"}, + {"groupname": "floppy", "username": "ubuntu"}, + {"groupname": "lxd", "username": "ubuntu"}, + {"groupname": "netdev", "username": "ubuntu"}, + {"groupname": "plugdev", "username": "ubuntu"}, + {"groupname": "sudo", "username": "ubuntu"}, + {"groupname": "ubuntu", "username": "ubuntu"}, + {"groupname": "video", "username": "ubuntu"}, + ], + "root": [{"groupname": "root", "username": "root"}], + "daemon": [{"groupname": "daemon", "username": "daemon"}], + "bin": [{"groupname": "bin", "username": "bin"}], + "sys": [{"groupname": "sys", "username": "sys"}], + "sync": [{"groupname": "nogroup", "username": "sync"}], + "games": [{"groupname": "games", "username": "games"}], + "man": [{"groupname": "man", "username": "man"}], + "lp": [{"groupname": "lp", "username": "lp"}], + "mail": [{"groupname": "mail", "username": "mail"}], + "news": [{"groupname": "news", "username": "news"}], + "uucp": [{"groupname": "uucp", "username": "uucp"}], + "proxy": [{"groupname": "proxy", "username": "proxy"}], + "www-data": [{"groupname": "www-data", "username": "www-data"}], + "backup": [{"groupname": "backup", "username": "backup"}], + "list": [{"groupname": "list", "username": "list"}], + "irc": [{"groupname": "irc", "username": "irc"}], + "nobody": [{"groupname": "nogroup", "username": "nobody"}], "systemd-network": [ - {"groupname":"systemd-network","username":"systemd-network"} + {"groupname": "systemd-network", "username": "systemd-network"} ], "systemd-resolve": [ - {"groupname":"systemd-resolve","username":"systemd-resolve"} + {"groupname": "systemd-resolve", "username": "systemd-resolve"} ], "systemd-timesync": [ - {"groupname":"systemd-timesync","username":"systemd-timesync"} - ], - "messagebus": [ - {"groupname":"messagebus","username":"messagebus"} + {"groupname": "systemd-timesync", "username": "systemd-timesync"} ], + "messagebus": [{"groupname": "messagebus", "username": "messagebus"}], "ec2-instance-connect": [ - {"groupname":"nogroup","username":"ec2-instance-connect"} - ], - "sshd": [ - {"groupname":"nogroup","username":"sshd"} + {"groupname": "nogroup", "username": "ec2-instance-connect"} ], + "sshd": [{"groupname": "nogroup", "username": "sshd"}], "wal-g": [ - {"groupname":"wal-g","username":"wal-g"}, - {"groupname":"postgres","username":"wal-g"} + {"groupname": "postgres", "username": "wal-g"}, + {"groupname": "wal-g", "username": "wal-g"}, ], "pgbouncer": [ - {"groupname":"pgbouncer","username":"pgbouncer"}, - {"groupname":"ssl-cert","username":"pgbouncer"}, - {"groupname":"postgres","username":"pgbouncer"} - ], - "gotrue": [ - {"groupname":"gotrue","username":"gotrue"} - ], - "envoy": [ - {"groupname":"envoy","username":"envoy"} - ], - "kong": [ - {"groupname":"kong","username":"kong"} - ], - "nginx": [ - {"groupname":"nginx","username":"nginx"} - ], + {"groupname": "pgbouncer", "username": "pgbouncer"}, + {"groupname": "postgres", "username": "pgbouncer"}, + {"groupname": "ssl-cert", "username": "pgbouncer"}, + ], + "gotrue": [{"groupname": "gotrue", "username": "gotrue"}], + "envoy": [{"groupname": "envoy", "username": "envoy"}], + "kong": [{"groupname": "kong", "username": "kong"}], + "nginx": [{"groupname": "nginx", "username": "nginx"}], "vector": [ - {"groupname":"vector","username":"vector"}, - {"groupname":"adm","username":"vector"}, - {"groupname":"systemd-journal","username":"vector"}, - {"groupname":"postgres","username":"vector"} + {"groupname": "adm", "username": "vector"}, + {"groupname": "postgres", "username": "vector"}, + {"groupname": "systemd-journal", "username": "vector"}, + {"groupname": "vector", "username": "vector"}, ], "adminapi": [ - {"groupname":"adminapi","username":"adminapi"}, - {"groupname":"root","username":"adminapi"}, - {"groupname":"systemd-journal","username":"adminapi"}, - {"groupname":"admin","username":"adminapi"}, - {"groupname":"postgres","username":"adminapi"}, - {"groupname":"pgbouncer","username":"adminapi"}, - {"groupname":"wal-g","username":"adminapi"}, - {"groupname":"postgrest","username":"adminapi"}, - {"groupname":"envoy","username":"adminapi"}, - {"groupname":"kong","username":"adminapi"}, - {"groupname":"vector","username":"adminapi"} - ], - "postgrest": [ - {"groupname":"postgrest","username":"postgrest"} + {"groupname": "admin", "username": "adminapi"}, + {"groupname": "adminapi", "username": "adminapi"}, + {"groupname": "envoy", "username": "adminapi"}, + {"groupname": "gotrue", "username": "adminapi"}, + {"groupname": "kong", "username": "adminapi"}, + {"groupname": "pgbouncer", "username": "adminapi"}, + {"groupname": "postgres", "username": "adminapi"}, + {"groupname": "postgrest", "username": "adminapi"}, + {"groupname": "root", "username": "adminapi"}, + {"groupname": "systemd-journal", "username": "adminapi"}, + {"groupname": "vector", "username": "adminapi"}, + {"groupname": "wal-g", "username": "adminapi"}, + ], + "postgrest": [{"groupname": "postgrest", "username": "postgrest"}], + "tcpdump": [{"groupname": "tcpdump", "username": "tcpdump"}], + "systemd-coredump": [ + {"groupname": "systemd-coredump", "username": "systemd-coredump"} ], - "tcpdump": [ - {"groupname":"tcpdump","username":"tcpdump"} + "tealbase-admin-agent": [ + {"groupname": "tealbase-admin-agent", "username": "tealbase-admin-agent"}, + {"groupname": "admin", "username": "tealbase-admin-agent"}, + {"groupname": "salt", "username": "tealbase-admin-agent"}, ], - "systemd-coredump": [ - {"groupname":"systemd-coredump","username":"systemd-coredump"} - ] } + +# postgresql.service is expected to mount /etc as read-only +expected_mount = "/etc ro" + + # This program depends on osquery being installed on the system # Function to run osquery def run_osquery(query): - process = subprocess.Popen(['osqueryi', '--json', query], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + process = subprocess.Popen( + ["osqueryi", "--json", query], stdout=subprocess.PIPE, stderr=subprocess.PIPE + ) output, error = process.communicate() - return output.decode('utf-8') + return output.decode("utf-8") + def parse_json(json_str): try: @@ -158,6 +121,7 @@ def parse_json(json_str): print("Error decoding JSON:", e) sys.exit(1) + def compare_results(username, query_result): expected_result = expected_results.get(username) if expected_result is None: @@ -167,38 +131,133 @@ def compare_results(username, query_result): if query_result == expected_result: print(f"The query result for user '{username}' matches the expected result.") else: - print(f"The query result for user '{username}' does not match the expected result.") + print( + f"The query result for user '{username}' does not match the expected result." + ) print("Expected:", expected_result) print("Got:", query_result) sys.exit(1) + def check_nixbld_users(): query = """ - SELECT u.username, g.groupname - FROM users u - JOIN user_groups ug ON u.uid = ug.uid - JOIN groups g ON ug.gid = g.gid + SELECT u.username, g.groupname + FROM users u + JOIN user_groups ug ON u.uid = ug.uid + JOIN groups g ON ug.gid = g.gid WHERE u.username LIKE 'nixbld%'; """ query_result = run_osquery(query) parsed_result = parse_json(query_result) - + for user in parsed_result: - if user['groupname'] != 'nixbld': - print(f"User '{user['username']}' is in group '{user['groupname']}' instead of 'nixbld'.") + if user["groupname"] != "nixbld": + print( + f"User '{user['username']}' is in group '{user['groupname']}' instead of 'nixbld'." + ) sys.exit(1) - + print("All nixbld users are in the 'nixbld' group.") -# Define usernames for which you want to compare results -usernames = ["postgres", "ubuntu", "root", "daemon", "bin", "sys", "sync", "games","man","lp","mail","news","uucp","proxy","www-data","backup","list","irc","gnats","nobody","systemd-network","systemd-resolve","systemd-timesync","messagebus","ec2-instance-connect","sshd","wal-g","pgbouncer","gotrue","envoy","kong","nginx","vector","adminapi","postgrest","tcpdump","systemd-coredump"] -# Iterate over usernames, run the query, and compare results -for username in usernames: - query = f"SELECT u.username, g.groupname FROM users u JOIN user_groups ug ON u.uid = ug.uid JOIN groups g ON ug.gid = g.gid WHERE u.username = '{username}';" +def check_postgresql_mount(): + # processes table has the nix .postgres-wrapped path as the + # binary path, rather than /usr/lib/postgresql/bin/postgres which + # is a symlink to /var/lib/postgresql/.nix-profile/bin/postgres, a script + # that ultimately calls /nix/store/...-postgresql-and-plugins-15.8/bin/.postgres-wrapped + query = """ + SELECT pid + FROM processes + WHERE path LIKE '%.postgres-wrapped%' + AND cmdline LIKE '%-D /etc/postgresql%'; + """ query_result = run_osquery(query) parsed_result = parse_json(query_result) - compare_results(username, parsed_result) -# Check if all nixbld users are in the nixbld group -check_nixbld_users() + pid = parsed_result[0].get("pid") + + # get the mounts for the process + with open(f"/proc/{pid}/mounts", "r") as o: + lines = [line for line in o if "/etc" in line and "ro," in line] + if len(lines) == 0: + print(f"Expected exactly 1 match, got 0") + sys.exit(1) + if len(lines) != 1: + print(f"Expected exactly 1 match, got {len(lines)}: {';'.join(lines)}") + sys.exit(1) + + print("postgresql.service mounts /etc as read-only.") + + +def main(): + parser = argparse.ArgumentParser( + prog="tealbase Postgres Artifact Permissions Checker", + description="Checks the Postgres Artifact for the appropriate users and group memberships", + ) + parser.add_argument( + "-q", + "--qemu", + action="store_true", + help="Whether we are checking a QEMU artifact", + ) + args = parser.parse_args() + qemu_artifact = args.qemu or False + + # Define usernames for which you want to compare results + usernames = [ + "postgres", + "ubuntu", + "root", + "daemon", + "bin", + "sys", + "sync", + "games", + "man", + "lp", + "mail", + "news", + "uucp", + "proxy", + "www-data", + "backup", + "list", + "irc", + "nobody", + "systemd-network", + "systemd-resolve", + "systemd-timesync", + "messagebus", + "sshd", + "wal-g", + "pgbouncer", + "gotrue", + "envoy", + "kong", + "nginx", + "vector", + "adminapi", + "postgrest", + "tcpdump", + "systemd-coredump", + "tealbase-admin-agent", + ] + if not qemu_artifact: + usernames.append("ec2-instance-connect") + + # Iterate over usernames, run the query, and compare results + for username in usernames: + query = f"SELECT u.username, g.groupname FROM users u JOIN user_groups ug ON u.uid = ug.uid JOIN groups g ON ug.gid = g.gid WHERE u.username = '{username}' ORDER BY g.groupname;" + query_result = run_osquery(query) + parsed_result = parse_json(query_result) + compare_results(username, parsed_result) + + # Check if all nixbld users are in the nixbld group + check_nixbld_users() + + # Check if postgresql.service is using a read-only mount for /etc + check_postgresql_mount() + + +if __name__ == "__main__": + main() diff --git a/ansible/files/pgbouncer_config/pgbouncer.service.j2 b/ansible/files/pgbouncer_config/pgbouncer.service.j2 index 5a0447b..c696255 100644 --- a/ansible/files/pgbouncer_config/pgbouncer.service.j2 +++ b/ansible/files/pgbouncer_config/pgbouncer.service.j2 @@ -15,6 +15,8 @@ ExecStart=/usr/local/bin/pgbouncer /etc/pgbouncer/pgbouncer.ini ExecReload=/bin/kill -HUP $MAINPID KillSignal=SIGINT LimitNOFILE=65536 +Restart=always +RestartSec=5 [Install] WantedBy=multi-user.target diff --git a/ansible/files/postgres_exporter.service.j2 b/ansible/files/postgres_exporter.service.j2 index 649ea75..2af6a45 100644 --- a/ansible/files/postgres_exporter.service.j2 +++ b/ansible/files/postgres_exporter.service.j2 @@ -3,7 +3,8 @@ Description=Postgres Exporter [Service] Type=simple -ExecStart=/opt/postgres_exporter/postgres_exporter --disable-settings-metrics --extend.query-path="/opt/postgres_exporter/queries.yml" --disable-default-metrics --no-collector.locks --no-collector.replication --no-collector.replication_slot --no-collector.stat_bgwriter --no-collector.stat_database --no-collector.stat_user_tables --no-collector.statio_user_tables --no-collector.wal +ExecStart=/opt/postgres_exporter/postgres_exporter --disable-settings-metrics --extend.query-path="/opt/postgres_exporter/queries.yml" --disable-default-metrics --no-collector.locks --no-collector.replication --no-collector.replication_slot --no-collector.stat_bgwriter --no-collector.stat_database --no-collector.stat_user_tables --no-collector.statio_user_tables --no-collector.wal {% if qemu_mode is defined and qemu_mode %}--no-collector.database {% endif %} + User=postgres Group=postgres Restart=always diff --git a/ansible/files/postgres_prestart.sh.j2 b/ansible/files/postgres_prestart.sh.j2 index ae5aa1a..3ffe54c 100644 --- a/ansible/files/postgres_prestart.sh.j2 +++ b/ansible/files/postgres_prestart.sh.j2 @@ -1,9 +1,49 @@ #!/bin/bash +check_orioledb_enabled() { + local pg_conf="/etc/postgresql/postgresql.conf" + if [ ! -f "$pg_conf" ]; then + return 0 + fi + grep "^shared_preload_libraries" "$pg_conf" | grep -c "orioledb" || return 0 +} + +get_shared_buffers() { + local opt_conf="/etc/postgresql-custom/generated-optimizations.conf" + if [ ! -f "$opt_conf" ]; then + return 0 + fi + grep "^shared_buffers = " "$opt_conf" | cut -d "=" -f2 | tr -d ' ' || return 0 +} + +update_orioledb_buffers() { + local pg_conf="/etc/postgresql/postgresql.conf" + local value="$1" + if grep -q "^orioledb.main_buffers = " "$pg_conf"; then + sed -i "s/^orioledb.main_buffers = .*/orioledb.main_buffers = $value/" "$pg_conf" + else + echo "orioledb.main_buffers = $value" >> "$pg_conf" + fi +} + +main() { + local has_orioledb=$(check_orioledb_enabled) + if [ "$has_orioledb" -lt 1 ]; then + return 0 + fi + local shared_buffers_value=$(get_shared_buffers) + if [ ! -z "$shared_buffers_value" ]; then + update_orioledb_buffers "$shared_buffers_value" + fi +} + +# Initial locale setup if [ $(cat /etc/locale.gen | grep -c en_US.UTF-8) -eq 0 ]; then - echo "en_US.UTF-8 UTF-8" >> /etc/locale.gen + echo "en_US.UTF-8 UTF-8" >> /etc/locale.gen fi if [ $(locale -a | grep -c en_US.utf8) -eq 0 ]; then - locale-gen + locale-gen fi + +main diff --git a/ansible/files/postgresql_config/postgresql.conf.j2 b/ansible/files/postgresql_config/postgresql.conf.j2 index 1604d94..f133be8 100644 --- a/ansible/files/postgresql_config/postgresql.conf.j2 +++ b/ansible/files/postgresql_config/postgresql.conf.j2 @@ -540,7 +540,7 @@ log_line_prefix = '%h %m [%p] %q%u@%d ' # special values: #log_parameter_max_length_on_error = 0 # when logging an error, limit logged # bind-parameter values to N bytes; # -1 means print in full, 0 disables -log_statement = 'none' # none, ddl, mod, all +log_statement = 'ddl' # none, ddl, mod, all #log_replication_commands = off #log_temp_files = -1 # log temporary files equal or larger # than the specified size in kilobytes; @@ -688,7 +688,7 @@ default_text_search_config = 'pg_catalog.english' #local_preload_libraries = '' #session_preload_libraries = '' -shared_preload_libraries = 'pg_stat_statements, pgaudit, plpgsql, plpgsql_check, pg_cron, pg_net, pgsodium, timescaledb, auto_explain, pg_tle, plan_filter' # (change requires restart) +shared_preload_libraries = 'pg_stat_statements, pgaudit, plpgsql, plpgsql_check, pg_cron, pg_net, pgsodium, timescaledb, auto_explain, pg_tle, plan_filter, tealbase_vault' # (change requires restart) jit_provider = 'llvmjit' # JIT library to use # - Other Defaults - diff --git a/ansible/files/postgresql_config/postgresql.service.j2 b/ansible/files/postgresql_config/postgresql.service.j2 index be219f5..c09d38b 100644 --- a/ansible/files/postgresql_config/postgresql.service.j2 +++ b/ansible/files/postgresql_config/postgresql.service.j2 @@ -20,6 +20,9 @@ Restart=always RestartSec=5 OOMScoreAdjust=-1000 EnvironmentFile=-/etc/environment.d/postgresql.env - +LimitNOFILE=16384 +{% if tealbase_internal is defined %} +ReadOnlyPaths=/etc +{% endif %} [Install] WantedBy=multi-user.target diff --git a/ansible/files/postgresql_config/supautils.conf.j2 b/ansible/files/postgresql_config/supautils.conf.j2 index 9ef30ab..9e54450 100644 --- a/ansible/files/postgresql_config/supautils.conf.j2 +++ b/ansible/files/postgresql_config/supautils.conf.j2 @@ -1,14 +1,15 @@ supautils.extensions_parameter_overrides = '{"pg_cron":{"schema":"pg_catalog"}}' -supautils.policy_grants = '{"postgres":["auth.audit_log_entries","auth.identities","auth.refresh_tokens","auth.sessions","auth.users","realtime.messages","storage.buckets","storage.migrations","storage.objects","storage.s3_multipart_uploads","storage.s3_multipart_uploads_parts"]}' -supautils.drop_trigger_grants = '{"postgres":["auth.audit_log_entries","auth.identities","auth.refresh_tokens","auth.sessions","auth.users","realtime.messages","storage.buckets","storage.migrations","storage.objects","storage.s3_multipart_uploads","storage.s3_multipart_uploads_parts"]}' +supautils.policy_grants = '{"postgres":["auth.audit_log_entries","auth.identities","auth.mfa_factors","auth.refresh_tokens","auth.sessions","auth.users","realtime.messages","storage.buckets","storage.migrations","storage.objects","storage.s3_multipart_uploads","storage.s3_multipart_uploads_parts"]}' +supautils.drop_trigger_grants = '{"postgres":["auth.audit_log_entries","auth.identities","auth.mfa_factors","auth.refresh_tokens","auth.sessions","auth.users","realtime.messages","storage.buckets","storage.migrations","storage.objects","storage.s3_multipart_uploads","storage.s3_multipart_uploads_parts"]}' # full list: address_standardizer, address_standardizer_data_us, adminpack, amcheck, autoinc, bloom, btree_gin, btree_gist, citext, cube, dblink, dict_int, dict_xsyn, earthdistance, file_fdw, fuzzystrmatch, hstore, http, hypopg, index_advisor, insert_username, intagg, intarray, isn, lo, ltree, moddatetime, old_snapshot, orioledb, pageinspect, pg_buffercache, pg_cron, pg_freespacemap, pg_graphql, pg_hashids, pg_jsonschema, pg_net, pg_prewarm, pg_repack, pg_stat_monitor, pg_stat_statements, pg_surgery, pg_tle, pg_trgm, pg_visibility, pg_walinspect, pgaudit, pgcrypto, pgjwt, pgmq, pgroonga, pgroonga_database, pgrouting, pgrowlocks, pgsodium, pgstattuple, pgtap, plcoffee, pljava, plls, plpgsql, plpgsql_check, plv8, postgis, postgis_raster, postgis_sfcgal, postgis_tiger_geocoder, postgis_topology, postgres_fdw, refint, rum, seg, sslinfo, tealbase_vault, supautils, tablefunc, tcn, timescaledb, tsm_system_rows, tsm_system_time, unaccent, uuid-ossp, vector, wrappers, xml2 -# omitted because may be unsafe: adminpack, amcheck, file_fdw, lo, old_snapshot, pageinspect, pg_buffercache, pg_freespacemap, pg_surgery, pg_visibility +# omitted because may be unsafe: adminpack, amcheck, file_fdw, lo, old_snapshot, pageinspect, pg_freespacemap, pg_surgery, pg_visibility # omitted because deprecated: intagg, xml2 # omitted because doesn't require superuser: pgmq -supautils.privileged_extensions = 'address_standardizer, address_standardizer_data_us, autoinc, bloom, btree_gin, btree_gist, citext, cube, dblink, dict_int, dict_xsyn, earthdistance, fuzzystrmatch, hstore, http, hypopg, index_advisor, insert_username, intarray, isn, ltree, moddatetime, orioledb, pg_cron, pg_graphql, pg_hashids, pg_jsonschema, pg_net, pg_repack, pg_stat_monitor, pg_stat_statements, pg_tle, pg_trgm, pg_walinspect, pgaudit, pgcrypto, pgjwt, pg_prewarm, pgroonga, pgroonga_database, pgrouting, pgrowlocks, pgstattuple, pgsodium, pgtap, plcoffee, pljava, plls, plpgsql, plpgsql_check, plv8, postgis, postgis_raster, postgis_sfcgal, postgis_tiger_geocoder, postgis_topology, postgres_fdw, refint, rum, seg, sslinfo, tealbase_vault, supautils, tablefunc, tcn, timescaledb, tsm_system_rows, tsm_system_time, unaccent, uuid-ossp, vector, wrappers' -supautils.privileged_extensions_custom_scripts_path = '/etc/postgresql-custom/extension-custom-scripts' +# omitted because protected: plpgsql +supautils.privileged_extensions = 'address_standardizer, address_standardizer_data_us, autoinc, bloom, btree_gin, btree_gist, citext, cube, dblink, dict_int, dict_xsyn, earthdistance, fuzzystrmatch, hstore, http, hypopg, index_advisor, insert_username, intarray, isn, ltree, moddatetime, orioledb, pg_buffercache, pg_cron, pg_graphql, pg_hashids, pg_jsonschema, pg_net, pg_prewarm, pg_repack, pg_stat_monitor, pg_stat_statements, pg_tle, pg_trgm, pg_walinspect, pgaudit, pgcrypto, pgjwt, pgroonga, pgroonga_database, pgrouting, pgrowlocks, pgsodium, pgstattuple, pgtap, plcoffee, pljava, plls, plpgsql_check, plv8, postgis, postgis_raster, postgis_sfcgal, postgis_tiger_geocoder, postgis_topology, postgres_fdw, refint, rum, seg, sslinfo, tealbase_vault, supautils, tablefunc, tcn, timescaledb, tsm_system_rows, tsm_system_time, unaccent, uuid-ossp, vector, wrappers' +supautils.extension_custom_scripts_path = '/etc/postgresql-custom/extension-custom-scripts' supautils.privileged_extensions_superuser = 'tealbase_admin' supautils.privileged_role = 'postgres' -supautils.privileged_role_allowed_configs = 'auto_explain.log_min_duration, auto_explain.log_nested_statements, log_lock_waits, log_min_messages, pg_net.batch_size, pg_net.ttl, pgaudit.log, pgaudit.log_catalog, pgaudit.log_client, pgaudit.log_level, pgaudit.log_relation, pgaudit.log_rows, pgaudit.log_statement, pgaudit.log_statement_once, pgaudit.role, pgrst.*, plan_filter.*, safeupdate.enabled, session_replication_role, track_io_timing' +supautils.privileged_role_allowed_configs = 'auto_explain.*, log_lock_waits, log_min_duration_statement, log_min_messages, log_replication_commands, log_statement, log_temp_files, pg_net.batch_size, pg_net.ttl, pg_stat_statements.*, pgaudit.log, pgaudit.log_catalog, pgaudit.log_client, pgaudit.log_level, pgaudit.log_relation, pgaudit.log_rows, pgaudit.log_statement, pgaudit.log_statement_once, pgaudit.role, pgrst.*, plan_filter.*, safeupdate.enabled, session_replication_role, track_io_timing, wal_compression' supautils.reserved_memberships = 'pg_read_server_files, pg_write_server_files, pg_execute_server_program, tealbase_admin, tealbase_auth_admin, tealbase_storage_admin, tealbase_read_only_user, tealbase_realtime_admin, tealbase_replication_admin, dashboard_user, pgbouncer, authenticator' supautils.reserved_roles = 'tealbase_admin, tealbase_auth_admin, tealbase_storage_admin, tealbase_read_only_user, tealbase_realtime_admin, tealbase_replication_admin, dashboard_user, pgbouncer, service_role*, authenticator*, authenticated*, anon*' diff --git a/ansible/files/postgresql_extension_custom_scripts/pg_repack/after-create.sql b/ansible/files/postgresql_extension_custom_scripts/pg_repack/after-create.sql new file mode 100644 index 0000000..b0ec306 --- /dev/null +++ b/ansible/files/postgresql_extension_custom_scripts/pg_repack/after-create.sql @@ -0,0 +1,4 @@ +grant all on all tables in schema repack to postgres; +grant all on schema repack to postgres; +alter default privileges in schema repack grant all on tables to postgres; +alter default privileges in schema repack grant all on sequences to postgres; diff --git a/ansible/files/postgresql_extension_custom_scripts/pgmq/after-create.sql b/ansible/files/postgresql_extension_custom_scripts/pgmq/after-create.sql index 8b126d4..050e07d 100644 --- a/ansible/files/postgresql_extension_custom_scripts/pgmq/after-create.sql +++ b/ansible/files/postgresql_extension_custom_scripts/pgmq/after-create.sql @@ -2,18 +2,172 @@ do $$ declare extoid oid := (select oid from pg_extension where extname = 'pgmq'); r record; + cls pg_class%rowtype; begin + set local search_path = ''; + +/* + Override the pgmq.drop_queue to check if relevant tables are owned + by the pgmq extension before attempting to run + `alter extension pgmq drop table ...` + this is necessary becasue, to enable nightly logical backups to include user queues + we automatically detach them from pgmq. + + this update is backwards compatible with version 1.4.4 but should be removed once we're on + physical backups everywhere +*/ +-- Detach and delete the official function +alter extension pgmq drop function pgmq.drop_queue; +drop function pgmq.drop_queue; + +-- Create and reattach the patched function +CREATE FUNCTION pgmq.drop_queue(queue_name TEXT) +RETURNS BOOLEAN AS $func$ +DECLARE + qtable TEXT := pgmq.format_table_name(queue_name, 'q'); + qtable_seq TEXT := qtable || '_msg_id_seq'; + fq_qtable TEXT := 'pgmq.' || qtable; + atable TEXT := pgmq.format_table_name(queue_name, 'a'); + fq_atable TEXT := 'pgmq.' || atable; + partitioned BOOLEAN; +BEGIN + EXECUTE FORMAT( + $QUERY$ + SELECT is_partitioned FROM pgmq.meta WHERE queue_name = %L + $QUERY$, + queue_name + ) INTO partitioned; + + -- NEW CONDITIONAL CHECK + if exists ( + select 1 + from pg_class c + join pg_depend d on c.oid = d.objid + join pg_extension e on d.refobjid = e.oid + where c.relname = qtable and e.extname = 'pgmq' + ) then + + EXECUTE FORMAT( + $QUERY$ + ALTER EXTENSION pgmq DROP TABLE pgmq.%I + $QUERY$, + qtable + ); + + end if; + + -- NEW CONDITIONAL CHECK + if exists ( + select 1 + from pg_class c + join pg_depend d on c.oid = d.objid + join pg_extension e on d.refobjid = e.oid + where c.relname = qtable_seq and e.extname = 'pgmq' + ) then + EXECUTE FORMAT( + $QUERY$ + ALTER EXTENSION pgmq DROP SEQUENCE pgmq.%I + $QUERY$, + qtable_seq + ); + + end if; + + -- NEW CONDITIONAL CHECK + if exists ( + select 1 + from pg_class c + join pg_depend d on c.oid = d.objid + join pg_extension e on d.refobjid = e.oid + where c.relname = atable and e.extname = 'pgmq' + ) then + + EXECUTE FORMAT( + $QUERY$ + ALTER EXTENSION pgmq DROP TABLE pgmq.%I + $QUERY$, + atable + ); + + end if; + + -- NO CHANGES PAST THIS POINT + + EXECUTE FORMAT( + $QUERY$ + DROP TABLE IF EXISTS pgmq.%I + $QUERY$, + qtable + ); + + EXECUTE FORMAT( + $QUERY$ + DROP TABLE IF EXISTS pgmq.%I + $QUERY$, + atable + ); + + IF EXISTS ( + SELECT 1 + FROM information_schema.tables + WHERE table_name = 'meta' and table_schema = 'pgmq' + ) THEN + EXECUTE FORMAT( + $QUERY$ + DELETE FROM pgmq.meta WHERE queue_name = %L + $QUERY$, + queue_name + ); + END IF; + + IF partitioned THEN + EXECUTE FORMAT( + $QUERY$ + DELETE FROM %I.part_config where parent_table in (%L, %L) + $QUERY$, + pgmq._get_pg_partman_schema(), fq_qtable, fq_atable + ); + END IF; + + RETURN TRUE; +END; +$func$ LANGUAGE plpgsql; + +alter extension pgmq add function pgmq.drop_queue; + + update pg_extension set extowner = 'postgres'::regrole where extname = 'pgmq'; + for r in (select * from pg_depend where refobjid = extoid) loop + + if r.classid = 'pg_type'::regclass then - execute(format('alter type %s owner to postgres;', r.objid::regtype)); + + -- store the type's relkind + select * into cls from pg_class c where c.reltype = r.objid; + + if r.objid::regtype::text like '%[]' then + -- do nothing (skipping array type) + + elsif cls.relkind in ('r', 'p', 'f', 'm') then + -- table-like objects (regular table, partitioned, foreign, materialized view) + execute format('alter table pgmq.%I owner to postgres;', cls.relname); + + else + execute(format('alter type %s owner to postgres;', r.objid::regtype)); + + end if; + elsif r.classid = 'pg_proc'::regclass then execute(format('alter function %s(%s) owner to postgres;', r.objid::regproc, pg_get_function_identity_arguments(r.objid))); + elsif r.classid = 'pg_class'::regclass then execute(format('alter table %s owner to postgres;', r.objid::regclass)); + else raise exception 'error on pgmq after-create script: unexpected object type %', r.classid; + end if; end loop; end $$; diff --git a/ansible/files/postgresql_extension_custom_scripts/pgsodium/after-create.sql b/ansible/files/postgresql_extension_custom_scripts/pgsodium/after-create.sql index 907c67e..38242ab 100644 --- a/ansible/files/postgresql_extension_custom_scripts/pgsodium/after-create.sql +++ b/ansible/files/postgresql_extension_custom_scripts/pgsodium/after-create.sql @@ -1,3 +1,26 @@ grant execute on function pgsodium.crypto_aead_det_decrypt(bytea, bytea, uuid, bytea) to service_role; grant execute on function pgsodium.crypto_aead_det_encrypt(bytea, bytea, uuid, bytea) to service_role; grant execute on function pgsodium.crypto_aead_det_keygen to service_role; + +CREATE OR REPLACE FUNCTION pgsodium.mask_role(masked_role regrole, source_name text, view_name text) +RETURNS void +LANGUAGE plpgsql +SECURITY DEFINER +SET search_path TO '' +AS $function$ +BEGIN + EXECUTE format( + 'GRANT SELECT ON pgsodium.key TO %s', + masked_role); + + EXECUTE format( + 'GRANT pgsodium_keyiduser, pgsodium_keyholder TO %s', + masked_role); + + EXECUTE format( + 'GRANT ALL ON %I TO %s', + view_name, + masked_role); + RETURN; +END +$function$; diff --git a/ansible/files/postgresql_extension_custom_scripts/pgsodium/before-create.sql b/ansible/files/postgresql_extension_custom_scripts/pgsodium/before-create.sql new file mode 100644 index 0000000..fb82a46 --- /dev/null +++ b/ansible/files/postgresql_extension_custom_scripts/pgsodium/before-create.sql @@ -0,0 +1,9 @@ +do $$ +declare + _extversion text := @extversion@; + _r record; +begin + if _extversion is not null and _extversion != '3.1.8' then + raise exception 'only pgsodium 3.1.8 is supported'; + end if; +end $$; diff --git a/ansible/files/postgresql_extension_custom_scripts/supabase_vault/after-create.sql b/ansible/files/postgresql_extension_custom_scripts/supabase_vault/after-create.sql new file mode 100644 index 0000000..f5c7284 --- /dev/null +++ b/ansible/files/postgresql_extension_custom_scripts/supabase_vault/after-create.sql @@ -0,0 +1,8 @@ +grant usage on schema vault to postgres with grant option; +grant select, delete, truncate, references on vault.secrets, vault.decrypted_secrets to postgres with grant option; +grant execute on function vault.create_secret, vault.update_secret, vault._crypto_aead_det_decrypt to postgres with grant option; + +-- service_role used to be able to manage secrets in Vault <=0.2.8 because it had privileges to pgsodium functions +grant usage on schema vault to service_role; +grant select, delete on vault.secrets, vault.decrypted_secrets to service_role; +grant execute on function vault.create_secret, vault.update_secret, vault._crypto_aead_det_decrypt to service_role; diff --git a/ansible/files/sodium_extension.sql b/ansible/files/sodium_extension.sql deleted file mode 100644 index a19cabf..0000000 --- a/ansible/files/sodium_extension.sql +++ /dev/null @@ -1,6 +0,0 @@ -create schema if not exists pgsodium; -create extension if not exists pgsodium with schema pgsodium cascade; - -grant pgsodium_keyiduser to postgres with admin option; -grant pgsodium_keyholder to postgres with admin option; -grant pgsodium_keymaker to postgres with admin option; diff --git a/ansible/files/supabase_admin_agent_config/tealbase-admin-agent.sudoers.conf b/ansible/files/supabase_admin_agent_config/tealbase-admin-agent.sudoers.conf new file mode 100644 index 0000000..ce18a39 --- /dev/null +++ b/ansible/files/supabase_admin_agent_config/tealbase-admin-agent.sudoers.conf @@ -0,0 +1,2 @@ +%tealbase-admin-agent ALL= NOPASSWD: /usr/bin/salt-call +%tealbase-admin-agent ALL= NOPASSWD: /usr/bin/gpg --homedir /etc/salt/gpgkeys --import, /usr/bin/gpg --homedir /etc/salt/gpgkeys --list-secret-keys * diff --git a/ansible/files/supabase_admin_agent_config/tealbase-admin-agent_salt.service b/ansible/files/supabase_admin_agent_config/tealbase-admin-agent_salt.service new file mode 100644 index 0000000..335d231 --- /dev/null +++ b/ansible/files/supabase_admin_agent_config/tealbase-admin-agent_salt.service @@ -0,0 +1,19 @@ +[Unit] +Description=Configuration management via tealbase-admin-agent salt +After=network.target + +[Service] +Type=oneshot +ExecStart=/opt/tealbase-admin-agent/tealbase-admin-agent --config /opt/tealbase-admin-agent/config.yaml salt --apply --store-result +User=tealbase-admin-agent +Group=tealbase-admin-agent +StandardOutput=journal +StandardError=journal +StateDirectory=tealbase-admin-agent +CacheDirectory=tealbase-admin-agent + +# Security hardening +PrivateTmp=true + +[Install] +WantedBy=multi-user.target diff --git a/ansible/files/supabase_admin_agent_config/tealbase-admin-agent_salt.timer.j2 b/ansible/files/supabase_admin_agent_config/tealbase-admin-agent_salt.timer.j2 new file mode 100644 index 0000000..cc1cd71 --- /dev/null +++ b/ansible/files/supabase_admin_agent_config/tealbase-admin-agent_salt.timer.j2 @@ -0,0 +1,13 @@ +[Unit] +Description=Run tealbase tealbase-admin-agent salt on a schedule +Requires=tealbase-admin-agent_salt.service + +[Timer] +OnCalendar=*:0/10 +# Random delay up to {{ tealbase_admin_agent_splay }} seconds splay +RandomizedDelaySec={{ tealbase_admin_agent_splay }} +AccuracySec=1s +Persistent=true + +[Install] +WantedBy=timers.target diff --git a/ansible/files/systemd-networkd/systemd-networkd-check-and-fix.service b/ansible/files/systemd-networkd/systemd-networkd-check-and-fix.service index 5e70943..939edc9 100644 --- a/ansible/files/systemd-networkd/systemd-networkd-check-and-fix.service +++ b/ansible/files/systemd-networkd/systemd-networkd-check-and-fix.service @@ -1,5 +1,6 @@ [Unit] Description=Check if systemd-networkd has broken NDisc routes and fix +# Remove Requisite to prevent being killed when restarting networkd Requisite=systemd-networkd.service After=systemd-networkd.service diff --git a/ansible/manifest-playbook.yml b/ansible/manifest-playbook.yml index da79c58..6de56e6 100644 --- a/ansible/manifest-playbook.yml +++ b/ansible/manifest-playbook.yml @@ -61,6 +61,22 @@ shell: | cd /tmp && tar -cJf admin-mgr-{{ adminmgr_release }}-arm64.tar.xz admin-mgr + - name: Download tealbase-admin-agent archive + get_url: + url: "https://tealbase-public-artifacts-bucket.s3.amazonaws.com/tealbase-admin-agent/v{{ tealbase_admin_agent_release }}/tealbase-admin-agent-{{ tealbase_admin_agent_release }}-linux-arm64.tar.gz" + dest: "/tmp/tealbase-admin-agent.tar.gz" + timeout: 90 + + - name: tealbase-admin-agent - unpack archive in /tmp + unarchive: + remote_src: yes + src: /tmp/tealbase-admin-agent.tar.gz + dest: /tmp + + - name: tealbase-admin-agent - pack archive + shell: | + cd /tmp && tar -cJf tealbase-admin-agent-{{ tealbase_admin_agent_release }}-arm64.tar.xz tealbase-admin-agent-{{ tealbase_admin_agent_release }}-linux-arm64 + - name: upload archives shell: | aws s3 cp /tmp/{{ item.file }} s3://{{ internal_artifacts_bucket }}/upgrades/{{ item.service }}/{{ item.file }} @@ -73,3 +89,5 @@ file: tealbase-admin-api-{{ adminapi_release }}-arm64.tar.xz - service: admin-mgr file: admin-mgr-{{ adminmgr_release }}-arm64.tar.xz + - service: tealbase-admin-agent + file: tealbase-admin-agent-{{ tealbase_admin_agent_release }}-arm64.tar.xz diff --git a/ansible/playbook.yml b/ansible/playbook.yml index aba045c..97775b9 100644 --- a/ansible/playbook.yml +++ b/ansible/playbook.yml @@ -38,7 +38,7 @@ - name: Install WAL-G import_tasks: tasks/setup-wal-g.yml - when: debpkg_mode or nixpkg_mode + when: debpkg_mode or nixpkg_mode or stage2_nix - name: Install Gotrue import_tasks: tasks/setup-gotrue.yml @@ -78,11 +78,11 @@ - install-tealbase-internal when: debpkg_mode or nixpkg_mode - - name: Fix IPv6 NDisc issues - import_tasks: tasks/fix_ipv6_ndisc.yml + - name: Fix IPv6 NDisc issues (disabled) + import_tasks: tasks/fix-ipv6-ndisc.yml tags: - install-tealbase-internal - when: debpkg_mode or nixpkg_mode + when: (debpkg_mode or nixpkg_mode) and (qemu_mode is undefined) - name: Start Postgres Database without Systemd become: yes @@ -142,6 +142,7 @@ import_tasks: tasks/setup-fail2ban.yml when: debpkg_mode or nixpkg_mode + # Install EC2 instance connect # Only for AWS images - name: install EC2 instance connect @@ -151,6 +152,7 @@ - ec2-instance-connect tags: - aws-only + when: qemu_mode is undefined # Install this at the end to prevent it from kicking in during the apt process, causing conflicts - name: Install security tools @@ -196,6 +198,12 @@ - collect-binaries when: debpkg_mode + - name: Install osquery from nixpkgs binary cache + become: yes + shell: | + apt autoremove -y --purge snapd + when: stage2_nix + - name: Install osquery from nixpkgs binary cache become: yes shell: | @@ -205,7 +213,9 @@ - name: Run osquery permission checks become: yes shell: | - sudo -u ubuntu bash -c ". /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh && /usr/bin/python3 /tmp/ansible-playbook/ansible/files/permission_check.py" + systemctl start postgresql.service + sudo -u ubuntu bash -c ". /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh && /usr/bin/python3 /tmp/ansible-playbook/ansible/files/permission_check.py {{ '--qemu' if qemu_mode is defined else '' }}" + systemctl stop postgresql.service when: stage2_nix - name: Remove osquery diff --git a/ansible/tasks/fix_ipv6_ndisc.yml b/ansible/tasks/fix-ipv6-ndisc.yml similarity index 87% rename from ansible/tasks/fix_ipv6_ndisc.yml rename to ansible/tasks/fix-ipv6-ndisc.yml index 7489a2f..8953fd8 100644 --- a/ansible/tasks/fix_ipv6_ndisc.yml +++ b/ansible/tasks/fix-ipv6-ndisc.yml @@ -25,9 +25,9 @@ - name: fix Network - reload systemd systemd: - daemon_reload: yes + daemon_reload: false -- name: fix Network - enable systemd timer +- name: fix Network - ensure systemd timer is installed but disabled systemd: name: systemd-networkd-check-and-fix.timer - enabled: true + enabled: false diff --git a/ansible/tasks/internal/admin-api.yml b/ansible/tasks/internal/admin-api.yml index d050d54..c3456d0 100644 --- a/ansible/tasks/internal/admin-api.yml +++ b/ansible/tasks/internal/admin-api.yml @@ -1,7 +1,7 @@ - name: adminapi - system user user: name: adminapi - groups: root,admin,envoy,kong,pgbouncer,postgres,postgrest,systemd-journal,vector,wal-g + groups: root,admin,envoy,gotrue,kong,pgbouncer,postgres,postgrest,systemd-journal,vector,wal-g append: yes - name: Move shell scripts to /root dir diff --git a/ansible/tasks/internal/tealbase-admin-agent.yml b/ansible/tasks/internal/tealbase-admin-agent.yml new file mode 100644 index 0000000..e130f49 --- /dev/null +++ b/ansible/tasks/internal/tealbase-admin-agent.yml @@ -0,0 +1,87 @@ +- name: tealbase-admin-agent - system group + group: + name: tealbase-admin-agent + system: yes + +- name: tealbase-admin-agent - system user + user: + name: tealbase-admin-agent + group: tealbase-admin-agent + groups: admin,salt + append: yes + system: yes + shell: /bin/sh + +- name: tealbase-admin-agent - config dir + file: + path: /opt/tealbase-admin-agent + owner: tealbase-admin-agent + state: directory + +- name: tealbase-admin-agent - gpg dir + file: + path: /etc/salt/gpgkeys + owner: root + group: salt + state: directory + +- name: give tealbase-admin-agent user permissions + copy: + src: files/tealbase_admin_agent_config/tealbase-admin-agent.sudoers.conf + dest: /etc/sudoers.d/tealbase-admin-agent + mode: "0644" + +- name: Setting arch (x86) + set_fact: + arch: "x86" + when: platform == "amd64" + +- name: Setting arch (arm) + set_fact: + arch: "arm64" + when: platform == "arm64" + +- name: Download tealbase-admin-agent archive + get_url: + url: "https://tealbase-public-artifacts-bucket.s3.amazonaws.com/tealbase-admin-agent/v{{ tealbase_admin_agent_release }}/tealbase-admin-agent-{{ tealbase_admin_agent_release }}-linux-{{ arch }}.tar.gz" + dest: "/tmp/tealbase-admin-agent.tar.gz" + timeout: 90 + +- name: tealbase-admin-agent - unpack archive in /opt + unarchive: + remote_src: yes + src: /tmp/tealbase-admin-agent.tar.gz + dest: /opt/tealbase-admin-agent/ + owner: tealbase-admin-agent + extra_opts: + - --strip-components=1 + +- name: tealbase-admin-agent - create symlink + ansible.builtin.file: + path: /opt/tealbase-admin-agent/tealbase-admin-agent + src: "/opt/tealbase-admin-agent/tealbase-admin-agent-linux-{{ arch }}" + state: link + owner: tealbase-admin-agent + mode: "0755" + force: yes + +- name: tealbase-admin-agent - create salt systemd timer file + template: + src: files/tealbase_admin_agent_config/tealbase-admin-agent_salt.timer.j2 + dest: /etc/systemd/system/tealbase-admin-agent_salt.timer + +- name: tealbase-admin-agent - create salt service file + copy: + src: files/tealbase_admin_agent_config/tealbase-admin-agent_salt.service + dest: /etc/systemd/system/tealbase-admin-agent_salt.service + +- name: tealbase-admin-agent - reload systemd + systemd: + daemon_reload: yes + +# Initially ensure tealbase-admin-agent is installed but not started +- name: tealbase-admin-agent - DISABLE service + systemd: + name: tealbase-admin-agent_salt + enabled: no + state: stopped diff --git a/ansible/tasks/postgres-extensions/01-postgis.yml b/ansible/tasks/postgres-extensions/01-postgis.yml deleted file mode 100644 index ae3d4e5..0000000 --- a/ansible/tasks/postgres-extensions/01-postgis.yml +++ /dev/null @@ -1,102 +0,0 @@ -# postgis -- name: postgis - download & install dependencies - apt: - pkg: - - libgeos-dev - - libproj-dev - - libgdal-dev - - libjson-c-dev - - libxml2-dev - - libboost-all-dev - - libcgal-dev - - libmpfr-dev - - libgmp-dev - - cmake - - libprotobuf-c-dev - - protobuf-c-compiler - update_cache: yes - cache_valid_time: 3600 - install_recommends: no - -- name: postgis - ensure dependencies do not get autoremoved - shell: | - set -e - apt-mark manual libgeos* libproj* libgdal* libjson-c* libxml2* libboost* libcgal* libmpfr* libgmp* - apt-mark auto libgeos*-dev libproj*-dev libgdal*-dev libjson-c*-dev libxml2*-dev libboost*-dev libcgal*-dev libmpfr*-dev libgmp*-dev - - become: yes - args: - executable: /bin/bash - -- name: postgis - download SFCGAL dependency - get_url: - url: "https://tealbase-public-artifacts-bucket.s3.amazonaws.com/sfcgal/SFCGAL-v{{ sfcgal_release }}.tar.gz" - dest: /tmp/SFCGAL-v{{ sfcgal_release }}.tar.gz - checksum: "{{ sfcgal_release_checksum }}" - timeout: 60 - -- name: postgis - unpack SFCGAL - unarchive: - remote_src: yes - src: /tmp/SFCGAL-v{{ sfcgal_release }}.tar.gz - dest: /tmp - become: yes - -- name: postgis - compile SFCGAL - shell: - cmd: "cmake ." - chdir: /tmp/SFCGAL-v{{ sfcgal_release }} - become: yes - -- name: postgis - build SFCGAL - community.general.make: - target: all - chdir: /tmp/SFCGAL-v{{ sfcgal_release }} - jobs: "{{ parallel_jobs | default(omit) }}" - become: yes - -- name: postgis - install SFCGAL - make: - chdir: /tmp/SFCGAL-v{{ sfcgal_release }} - target: install - become: yes - -- name: postgis - download latest release - shell: - cmd: "curl -sf -L https://tealbase-public-artifacts-bucket.s3.amazonaws.com/postgis-{{ postgis_release }}.tar.gz -o /tmp/postgis-{{ postgis_release }}.tar.gz" - -- name: postgis - unpack archive - unarchive: - remote_src: yes - src: /tmp/postgis-{{ postgis_release }}.tar.gz - dest: /tmp - become: yes - -- name: postgis - configure - shell: - cmd: "./configure --with-sfcgal" - chdir: /tmp/postgis-{{ postgis_release }} - become: yes - -- name: postgis - build - community.general.make: - target: all - chdir: /tmp/postgis-{{ postgis_release }} - jobs: "{{ parallel_jobs | default(omit) }}" - become: yes - -- name: postgis - install - make: - chdir: /tmp/postgis-{{ postgis_release }} - target: install - become: yes - -- name: postgis - SFCGAL cleanup - file: - state: absent - path: /tmp/SFCGAL-v{{ sfcgal_release }} - -- name: postgis - cleanup - file: - state: absent - path: /tmp/postgis-{{ postgis_release }} diff --git a/ansible/tasks/postgres-extensions/02-pgrouting.yml b/ansible/tasks/postgres-extensions/02-pgrouting.yml deleted file mode 100644 index 746870a..0000000 --- a/ansible/tasks/postgres-extensions/02-pgrouting.yml +++ /dev/null @@ -1,52 +0,0 @@ -# pgRouting -- name: pgRouting - download & install dependencies - apt: - pkg: - - libboost-all-dev - update_cache: yes - cache_valid_time: 3600 - install_recommends: no - -- name: pgRouting - download latest release - get_url: - url: "https://github.com/pgRouting/pgrouting/releases/download/v{{ pgrouting_release }}/pgrouting-{{ pgrouting_release }}.tar.gz" - dest: /tmp/pgrouting-{{ pgrouting_release }}.tar.gz - checksum: "{{ pgrouting_release_checksum }}" - timeout: 60 - -- name: pgRouting - unpack archive - unarchive: - remote_src: yes - src: /tmp/pgrouting-{{ pgrouting_release }}.tar.gz - dest: /tmp - become: yes - -- name: pgRouting - create build directory - file: - path: /tmp/pgrouting-{{ pgrouting_release }}/build - state: directory - become: yes - -- name: pgRouting - compile - shell: - cmd: "cmake -DBUILD_HTML=OFF -DBUILD_DOXY=OFF .." - chdir: /tmp/pgrouting-{{ pgrouting_release }}/build - become: yes - -- name: pgRouting - build - community.general.make: - target: all - chdir: /tmp/pgrouting-{{ pgrouting_release }}/build - jobs: "{{ parallel_jobs | default(omit) }}" - become: yes - -- name: pgRouting - install - make: - chdir: /tmp/pgrouting-{{ pgrouting_release }}/build - target: install - become: yes - -- name: pgRouting - cleanup - file: - state: absent - path: /tmp/pgrouting-{{ pgrouting_release }} diff --git a/ansible/tasks/postgres-extensions/03-pgtap.yml b/ansible/tasks/postgres-extensions/03-pgtap.yml deleted file mode 100644 index 9b818b9..0000000 --- a/ansible/tasks/postgres-extensions/03-pgtap.yml +++ /dev/null @@ -1,25 +0,0 @@ -# pgTAP -- name: pgTAP - download latest release - get_url: - url: "https://github.com/theory/pgtap/archive/v{{ pgtap_release }}.tar.gz" - dest: /tmp/pgtap-{{ pgtap_release }}.tar.gz - checksum: "{{ pgtap_release_checksum }}" - timeout: 60 - -- name: pgTAP - unpack archive - unarchive: - remote_src: yes - src: /tmp/pgtap-{{ pgtap_release }}.tar.gz - dest: /tmp - become: yes - -- name: pgTAP - install - make: - chdir: /tmp/pgtap-{{ pgtap_release }} - target: install - become: yes - -- name: pgTAP - cleanup - file: - state: absent - path: /tmp/pgtap-{{ pgtap_release }} diff --git a/ansible/tasks/postgres-extensions/04-pg_cron.yml b/ansible/tasks/postgres-extensions/04-pg_cron.yml deleted file mode 100644 index d9a11c0..0000000 --- a/ansible/tasks/postgres-extensions/04-pg_cron.yml +++ /dev/null @@ -1,30 +0,0 @@ -# pg_cron -- name: pg_cron - download latest release - get_url: - url: "https://github.com/citusdata/pg_cron/archive/refs/tags/v{{ pg_cron_release }}.tar.gz" - dest: /tmp/pg_cron-{{ pg_cron_release }}.tar.gz - checksum: "{{ pg_cron_release_checksum }}" - timeout: 60 - -- name: pg_cron - unpack archive - unarchive: - remote_src: yes - src: /tmp/pg_cron-{{ pg_cron_release }}.tar.gz - dest: /tmp - become: yes - -- name: pg_cron - build - make: - chdir: /tmp/pg_cron-{{ pg_cron_release }} - become: yes - -- name: pg_cron - install - make: - chdir: /tmp/pg_cron-{{ pg_cron_release }} - target: install - become: yes - -- name: pg_cron - cleanup - file: - state: absent - path: /tmp/pg_cron-{{ pg_cron_release }} diff --git a/ansible/tasks/postgres-extensions/05-pgaudit.yml b/ansible/tasks/postgres-extensions/05-pgaudit.yml deleted file mode 100644 index 5f88c84..0000000 --- a/ansible/tasks/postgres-extensions/05-pgaudit.yml +++ /dev/null @@ -1,43 +0,0 @@ -# pgAudit -- name: pgAudit - download & install dependencies - apt: - pkg: - - libssl-dev - - libkrb5-dev - update_cache: yes - install_recommends: no - -- name: pgAudit - download latest release - get_url: - url: "https://github.com/pgaudit/pgaudit/archive/refs/tags/{{ pgaudit_release }}.tar.gz" - dest: /tmp/pgaudit-{{ pgaudit_release }}.tar.gz - checksum: "{{ pgaudit_release_checksum }}" - timeout: 60 - -- name: pgAudit - unpack archive - unarchive: - remote_src: yes - src: /tmp/pgaudit-{{ pgaudit_release }}.tar.gz - dest: /tmp - become: yes - -- name: pgAudit - build - make: - chdir: /tmp/pgaudit-{{ pgaudit_release }} - target: check - params: - USE_PGXS: 1 - become: yes - -- name: pgAudit - install - make: - chdir: /tmp/pgaudit-{{ pgaudit_release }} - target: install - params: - USE_PGXS: 1 - become: yes - -- name: pgAudit - cleanup - file: - state: absent - path: /tmp/pgaudit-{{ pgaudit_release }} diff --git a/ansible/tasks/postgres-extensions/06-pgjwt.yml b/ansible/tasks/postgres-extensions/06-pgjwt.yml deleted file mode 100644 index 61890bf..0000000 --- a/ansible/tasks/postgres-extensions/06-pgjwt.yml +++ /dev/null @@ -1,17 +0,0 @@ -# pgjwt -- name: pgjwt - download from master branch - git: - repo: https://github.com/michelp/pgjwt.git - dest: /tmp/pgjwt - version: "{{ pgjwt_release }}" - -- name: pgjwt - install - make: - chdir: /tmp/pgjwt - target: install - become: yes - -- name: pgjwt - cleanup - file: - state: absent - path: /tmp/pgjwt diff --git a/ansible/tasks/postgres-extensions/07-pgsql-http.yml b/ansible/tasks/postgres-extensions/07-pgsql-http.yml deleted file mode 100644 index 73044d2..0000000 --- a/ansible/tasks/postgres-extensions/07-pgsql-http.yml +++ /dev/null @@ -1,43 +0,0 @@ -# pgsql-http -- name: pgsql-http - libcurl4 package - apt: - pkg: - - libcurl4 - state: absent - -- name: pgsql-http - download & install dependencies - apt: - pkg: - - libcurl4-gnutls-dev - update_cache: yes - install_recommends: no - -- name: pgsql-http - download latest release - get_url: - url: "https://github.com/pramsey/pgsql-http/archive/refs/tags/v{{ pgsql_http_release }}.tar.gz" - dest: /tmp/pgsql_http-{{ pgsql_http_release }}.tar.gz - checksum: "{{ pgsql_http_release_checksum }}" - timeout: 60 - -- name: pgsql-http - unpack archive - unarchive: - remote_src: yes - src: /tmp/pgsql_http-{{ pgsql_http_release }}.tar.gz - dest: /tmp - become: yes - -- name: pgsql-http - build - make: - chdir: /tmp/pgsql-http-{{ pgsql_http_release }} - become: yes - -- name: pgsql-http - install - make: - chdir: /tmp/pgsql-http-{{ pgsql_http_release }} - target: install - become: yes - -- name: pgsql-http - cleanup - file: - state: absent - path: /tmp/pgsql-http-{{ pgsql_http_release }} diff --git a/ansible/tasks/postgres-extensions/08-plpgsql_check.yml b/ansible/tasks/postgres-extensions/08-plpgsql_check.yml deleted file mode 100644 index 75bb041..0000000 --- a/ansible/tasks/postgres-extensions/08-plpgsql_check.yml +++ /dev/null @@ -1,38 +0,0 @@ -# plpgsql_check -- name: plpgsql_check - download & install dependencies - apt: - pkg: - - libicu-dev - update_cache: yes - install_recommends: no - -- name: plpgsql_check - download latest release - get_url: - url: "https://github.com/okbob/plpgsql_check/archive/refs/tags/v{{ plpgsql_check_release }}.tar.gz" - dest: /tmp/plpgsql_check-{{ plpgsql_check_release }}.tar.gz - checksum: "{{ plpgsql_check_release_checksum }}" - timeout: 60 - -- name: plpgsql_check - unpack archive - unarchive: - remote_src: yes - src: /tmp/plpgsql_check-{{ plpgsql_check_release }}.tar.gz - dest: /tmp - become: yes - -- name: plpgsql_check - clean - make: - chdir: /tmp/plpgsql_check-{{ plpgsql_check_release }} - target: clean - become: yes - -- name: plpgsql_check - install - make: - chdir: /tmp/plpgsql_check-{{ plpgsql_check_release }} - target: install - become: yes - -- name: plpgsql_check - cleanup - file: - state: absent - path: /tmp/plpgsql_check-{{ plpgsql_check_release }} diff --git a/ansible/tasks/postgres-extensions/09-pg-safeupdate.yml b/ansible/tasks/postgres-extensions/09-pg-safeupdate.yml deleted file mode 100644 index 36ae41c..0000000 --- a/ansible/tasks/postgres-extensions/09-pg-safeupdate.yml +++ /dev/null @@ -1,30 +0,0 @@ -# pg-safeupdate -- name: pg-safeupdate - download latest release - get_url: - url: "https://github.com/eradman/pg-safeupdate/archive/refs/tags/{{ pg_safeupdate_release }}.tar.gz" - dest: /tmp/pg_safeupdate-{{ pg_safeupdate_release }}.tar.gz - checksum: "{{ pg_safeupdate_release_checksum }}" - timeout: 60 - -- name: pg-safeupdate - unpack archive - unarchive: - remote_src: yes - src: /tmp/pg_safeupdate-{{ pg_safeupdate_release }}.tar.gz - dest: /tmp - become: yes - -- name: pg-safeupdate - build - make: - chdir: /tmp/pg-safeupdate-{{ pg_safeupdate_release }} - become: yes - -- name: pg-safeupdate - install - make: - chdir: /tmp/pg-safeupdate-{{ pg_safeupdate_release }} - target: install - become: yes - -- name: pg-safeupdate - cleanup - file: - state: absent - path: /tmp/pg-safeupdate-{{ pg_safeupdate_release }} diff --git a/ansible/tasks/postgres-extensions/10-timescaledb.yml b/ansible/tasks/postgres-extensions/10-timescaledb.yml deleted file mode 100644 index cb4b842..0000000 --- a/ansible/tasks/postgres-extensions/10-timescaledb.yml +++ /dev/null @@ -1,36 +0,0 @@ -# timescaledb -- name: timescaledb - download & install dependencies - apt: - pkg: - - cmake - update_cache: yes - install_recommends: no - -- name: timescaledb - download latest release - git: - repo: https://github.com/timescale/timescaledb.git - dest: /tmp/timescaledb - version: "{{ timescaledb_release }}" - become: yes - -- name: timescaledb - bootstrap - shell: - cmd: "./bootstrap -DAPACHE_ONLY=1" - chdir: /tmp/timescaledb - become: yes - -- name: timescaledb - build - make: - chdir: /tmp/timescaledb/build - become: yes - -- name: timescaledb - install - make: - chdir: /tmp/timescaledb/build - target: install - become: yes - -- name: timescaledb - cleanup - file: - state: absent - path: /tmp/timescaledb diff --git a/ansible/tasks/postgres-extensions/11-wal2json.yml b/ansible/tasks/postgres-extensions/11-wal2json.yml deleted file mode 100644 index c5abde9..0000000 --- a/ansible/tasks/postgres-extensions/11-wal2json.yml +++ /dev/null @@ -1,17 +0,0 @@ -# wal2json -- name: wal2json - download by commit sha - git: - repo: https://github.com/eulerto/wal2json.git - dest: /tmp/wal2json - version: "wal2json_{{ wal2json_release }}" - -- name: wal2json - install - make: - chdir: /tmp/wal2json - target: install - become: yes - -- name: wal2json - cleanup - file: - state: absent - path: /tmp/wal2json diff --git a/ansible/tasks/postgres-extensions/12-pljava.yml b/ansible/tasks/postgres-extensions/12-pljava.yml deleted file mode 100644 index 309c7a2..0000000 --- a/ansible/tasks/postgres-extensions/12-pljava.yml +++ /dev/null @@ -1,84 +0,0 @@ -# pljava -- name: pljava - download & install dependencies - apt: - pkg: - - maven - - default-jre - - default-jdk - - libssl-dev - update_cache: yes - install_recommends: no - -#TODO: revert to using main repo after PG15 support is merged: https://github.com/tada/pljava/pull/413 -# - name: pljava - download latest release -# get_url: -# url: https://github.com/tada/pljava/archive/V{{ pljava_release }}.tar.gz -# dest: /tmp/pljava-{{ pljava_release }}.tar.gz -# checksum: "{{ pljava_release_checksum }}" -# timeout: 60 - -# - name: pljava - unpack archive -# unarchive: -# remote_src: yes -# src: /tmp/pljava-{{ pljava_release }}.tar.gz -# dest: /tmp -# become: yes - -- name: pljava - download latest release - become: yes - git: - repo: https://github.com/tealbase/pljava.git - dest: /tmp/pljava-{{ pljava_release }} - version: "{{ pljava_release }}" - -- name: pljava - build - become: yes - shell: - cmd: mvn -T 1C clean install -Dmaven.test.skip -DskipTests -Dmaven.javadoc.skip=true - chdir: /tmp/pljava-{{ pljava_release }} - -- name: pljava - install - become: yes - shell: - cmd: java -jar pljava-packaging/target/pljava-pg{{ postgresql_major }}.jar - chdir: /tmp/pljava-{{ pljava_release }} - -- name: pljava - remove build dependencies - apt: - pkg: - - maven - - default-jre - - default-jdk - state: absent - -- name: pljava - install headless jdk - apt: - pkg: - - default-jdk-headless - update_cache: yes - install_recommends: no - -- name: Hold jre package - dpkg_selections: - name: default-jre-headless - selection: hold - when: async_mode - -- name: pljava - set pljava.libjvm_location - become: yes - lineinfile: - path: /etc/postgresql/postgresql.conf - state: present - line: pljava.libjvm_location = '/usr/lib/jvm/java-11-openjdk-{{ platform }}/lib/server/libjvm.so' - -- name: pljava - remove ~/.m2 directory - become: yes - file: - path: ~/.m2 - state: absent - -- name: pljava - cleanup - become: yes - file: - state: absent - path: /tmp/pljava-{{ pljava_release }} diff --git a/ansible/tasks/postgres-extensions/13-plv8.yml b/ansible/tasks/postgres-extensions/13-plv8.yml deleted file mode 100644 index 9f11735..0000000 --- a/ansible/tasks/postgres-extensions/13-plv8.yml +++ /dev/null @@ -1,73 +0,0 @@ -# plv8 -- name: plv8 - download & install dependencies - apt: - pkg: - - build-essential - - ca-certificates - - curl - - git-core - - gpp - - cpp - - pkg-config - - apt-transport-https - - cmake - - libc++-dev - - libc++abi-dev - - libc++1 - - libglib2.0-dev - - libtinfo5 - - libc++abi1 - - ninja-build - - python - update_cache: yes - install_recommends: no - -- name: plv8 - download latest release - git: - repo: https://github.com/plv8/plv8.git - dest: /tmp/plv8 - version: "v{{ plv8_release }}" - become: yes - -- name: Create a symbolic link - file: - src: /lib/aarch64-linux-gnu/libc++.so.1 - dest: /lib/aarch64-linux-gnu/libc++.so - state: link - when: platform == "arm64" - ignore_errors: yes # not needed for docker build - -- name: plv8 - enable ccache - become: yes - replace: - path: /tmp/plv8/Makefiles/Makefile.docker - regexp: "^GN_ARGS =" - replace: GN_ARGS = cc_wrapper=\"env CCACHE_SLOPPINESS=time_macros ccache\" - -- name: plv8 - build - make: - chdir: /tmp/plv8 - become: yes - when: not async_mode - -- name: plv8 - install - make: - chdir: /tmp/plv8 - target: install - become: yes - when: not async_mode - -- name: plv8 - cleanup - file: - state: absent - path: /tmp/plv8 - when: not async_mode - -- name: plv8 - build - make: - chdir: /tmp/plv8 - become: yes - async: 2000 - poll: 0 - register: plv8_build - when: async_mode diff --git a/ansible/tasks/postgres-extensions/14-pg_plan_filter.yml b/ansible/tasks/postgres-extensions/14-pg_plan_filter.yml deleted file mode 100644 index 0fa0990..0000000 --- a/ansible/tasks/postgres-extensions/14-pg_plan_filter.yml +++ /dev/null @@ -1,23 +0,0 @@ -# pg_plan_filter -- name: pg_plan_filter - download latest release - git: - repo: https://github.com/pgexperts/pg_plan_filter.git - dest: /tmp/pg_plan_filter - version: "{{ pg_plan_filter_release }}" - become: yes - -- name: pg_plan_filter - build - make: - chdir: /tmp/pg_plan_filter - become: yes - -- name: pg_plan_filter - install - make: - chdir: /tmp/pg_plan_filter - target: install - become: yes - -- name: pg_plan_filter - cleanup - file: - state: absent - path: /tmp/pg_plan_filter diff --git a/ansible/tasks/postgres-extensions/15-pg_net.yml b/ansible/tasks/postgres-extensions/15-pg_net.yml deleted file mode 100644 index 3f4419e..0000000 --- a/ansible/tasks/postgres-extensions/15-pg_net.yml +++ /dev/null @@ -1,37 +0,0 @@ -# pg_net -- name: pg_net - download & install dependencies - apt: - pkg: - - libcurl4-gnutls-dev - update_cache: yes - install_recommends: no - -- name: pg_net - download latest release - get_url: - url: "https://github.com/tealbase/pg_net/archive/refs/tags/v{{pg_net_release}}.tar.gz" - dest: /tmp/pg_net-{{ pg_net_release }}.tar.gz - checksum: "{{ pg_net_release_checksum }}" - timeout: 60 - -- name: pg_net - unpack archive - unarchive: - remote_src: yes - src: /tmp/pg_net-{{ pg_net_release }}.tar.gz - dest: /tmp - become: yes - -- name: pg_net - build - make: - chdir: /tmp/pg_net-{{ pg_net_release }} - become: yes - -- name: pg_net - install - make: - chdir: /tmp/pg_net-{{ pg_net_release }} - target: install - become: yes - -- name: pg_net - cleanup - file: - state: absent - path: /tmp/pg_net-{{ pg_net_release }} diff --git a/ansible/tasks/postgres-extensions/16-rum.yml b/ansible/tasks/postgres-extensions/16-rum.yml deleted file mode 100644 index f8cca16..0000000 --- a/ansible/tasks/postgres-extensions/16-rum.yml +++ /dev/null @@ -1,34 +0,0 @@ -# rum -- name: rum - download latest release - get_url: - url: "https://github.com/postgrespro/rum/archive/refs/tags/{{rum_release}}.tar.gz" - dest: /tmp/rum-{{ rum_release }}.tar.gz - checksum: "{{ rum_release_checksum }}" - timeout: 60 - -- name: rum - unpack archive - unarchive: - remote_src: yes - src: /tmp/rum-{{ rum_release }}.tar.gz - dest: /tmp - become: yes - -- name: rum - build - make: - chdir: /tmp/rum-{{ rum_release }} - params: - USE_PGXS: 1 - become: yes - -- name: rum - install - make: - chdir: /tmp/rum-{{ rum_release }} - target: install - params: - USE_PGXS: 1 - become: yes - -- name: rum - cleanup - file: - state: absent - path: /tmp/rum-{{ rum_release }} diff --git a/ansible/tasks/postgres-extensions/17-pg_hashids.yml b/ansible/tasks/postgres-extensions/17-pg_hashids.yml deleted file mode 100644 index 8bd0291..0000000 --- a/ansible/tasks/postgres-extensions/17-pg_hashids.yml +++ /dev/null @@ -1,22 +0,0 @@ -# pg_hashids -- name: pg_hashids - download from master branch - git: - repo: https://github.com/iCyberon/pg_hashids.git - dest: /tmp/pg_hashids - version: "{{ pg_hashids_release }}" - -- name: pg_hashids - build - make: - chdir: /tmp/pg_hashids - become: yes - -- name: pg_hashids - install - make: - chdir: /tmp/pg_hashids - target: install - become: yes - -- name: pg_hashids - cleanup - file: - state: absent - path: /tmp/pg_hashids diff --git a/ansible/tasks/postgres-extensions/18-pgsodium.yml b/ansible/tasks/postgres-extensions/18-pgsodium.yml deleted file mode 100644 index 95bc9a6..0000000 --- a/ansible/tasks/postgres-extensions/18-pgsodium.yml +++ /dev/null @@ -1,80 +0,0 @@ -# libsodium and pgsodium -- name: determine postgres bin directory - shell: pg_config --bindir - register: pg_bindir_output -- set_fact: - pg_bindir: "{{ pg_bindir_output.stdout }}" - -- name: libsodium - download libsodium - get_url: - url: "https://tealbase-public-artifacts-bucket.s3.amazonaws.com/libsodium/libsodium-{{ libsodium_release }}.tar.gz" - dest: /tmp/libsodium-{{ libsodium_release }}.tar.gz - checksum: "{{ libsodium_release_checksum }}" - timeout: 60 - -- name: libsodium - unpack archive - unarchive: - remote_src: yes - src: /tmp/libsodium-{{ libsodium_release }}.tar.gz - dest: /tmp - become: yes - -- name: libsodium - configure - shell: - cmd: ./configure - chdir: /tmp/libsodium-{{ libsodium_release }} - become: yes - -- name: libsodium - build - make: - chdir: /tmp/libsodium-{{ libsodium_release }} - become: yes - -- name: libsodium - install - make: - chdir: /tmp/libsodium-{{ libsodium_release }} - target: install - become: yes - -- name: pgsodium - download pgsodium - get_url: - url: "https://github.com/michelp/pgsodium/archive/refs/tags/v{{ pgsodium_release }}.tar.gz" - dest: /tmp/pgsodium-{{ pgsodium_release }}.tar.gz - checksum: "{{ pgsodium_release_checksum }}" - timeout: 60 - -- name: pgsodium - unpack archive - unarchive: - remote_src: yes - src: /tmp/pgsodium-{{ pgsodium_release }}.tar.gz - dest: /tmp - become: yes - -- name: pgsodium - build - make: - chdir: /tmp/pgsodium-{{ pgsodium_release }} - become: yes - -- name: pgsodium - install - make: - chdir: /tmp/pgsodium-{{ pgsodium_release }} - target: install - become: yes - -- name: pgsodium - set pgsodium.getkey_script - become: yes - lineinfile: - path: /etc/postgresql/postgresql.conf - state: present - # script is expected to be placed by finalization tasks for different target platforms - line: pgsodium.getkey_script= '{{ pg_bindir }}/pgsodium_getkey.sh' - -- name: libsodium - cleanup - file: - state: absent - path: /tmp/libsodium-{{ libsodium_release }} - -- name: pgsodium - cleanup - file: - state: absent - path: /tmp/pgsodium-{{ pgsodium_release }} diff --git a/ansible/tasks/postgres-extensions/19-pg_graphql.yml b/ansible/tasks/postgres-extensions/19-pg_graphql.yml deleted file mode 100644 index 2a2c113..0000000 --- a/ansible/tasks/postgres-extensions/19-pg_graphql.yml +++ /dev/null @@ -1,3 +0,0 @@ -- name: install pg_graphql - ansible.builtin.apt: - deb: "https://github.com/tealbase/pg_graphql/releases/download/v{{ pg_graphql_release }}/pg_graphql-v{{ pg_graphql_release }}-pg{{ postgresql_major }}-{{ platform }}-linux-gnu.deb" diff --git a/ansible/tasks/postgres-extensions/20-pg_stat_monitor.yml b/ansible/tasks/postgres-extensions/20-pg_stat_monitor.yml deleted file mode 100644 index bffddef..0000000 --- a/ansible/tasks/postgres-extensions/20-pg_stat_monitor.yml +++ /dev/null @@ -1,23 +0,0 @@ -# pg_stat_monitor -- name: pg_stat_monitor - download and install dependencies - git: - repo: https://github.com/percona/pg_stat_monitor.git - dest: /tmp/pg_stat_monitor - version: "{{ pg_stat_monitor_release }}" - become: yes - -- name: pg_stat_monitor build - make: - chdir: /tmp/pg_stat_monitor - params: USE_PGXS=1 - -- name: pg_stat_monitor install - make: - chdir: /tmp/pg_stat_monitor - target: install - params: USE_PGXS=1 - -- name: pg_stat_monitor cleanup - file: - state: absent - path: /tmp/pg_stat_monitor diff --git a/ansible/tasks/postgres-extensions/22-pg_jsonschema.yml b/ansible/tasks/postgres-extensions/22-pg_jsonschema.yml deleted file mode 100644 index fe5824f..0000000 --- a/ansible/tasks/postgres-extensions/22-pg_jsonschema.yml +++ /dev/null @@ -1,3 +0,0 @@ -- name: install pg_jsonschema - ansible.builtin.apt: - deb: "https://github.com/tealbase/pg_jsonschema/releases/download/v{{ pg_jsonschema_release }}/pg_jsonschema-v{{ pg_jsonschema_release }}-pg{{ postgresql_major }}-{{ platform }}-linux-gnu.deb" diff --git a/ansible/tasks/postgres-extensions/23-vault.yml b/ansible/tasks/postgres-extensions/23-vault.yml deleted file mode 100644 index dbb2db4..0000000 --- a/ansible/tasks/postgres-extensions/23-vault.yml +++ /dev/null @@ -1,31 +0,0 @@ -# vault - -- name: vault - download vault - get_url: - url: "https://github.com/tealbase/vault/archive/refs/tags/v{{ vault_release }}.tar.gz" - dest: /tmp/vault-{{ vault_release }}.tar.gz - checksum: "{{ vault_release_checksum }}" - timeout: 60 - -- name: vault - unpack archive - unarchive: - remote_src: yes - src: /tmp/vault-{{ vault_release }}.tar.gz - dest: /tmp - become: yes - -- name: vault - build - make: - chdir: /tmp/vault-{{ vault_release }} - become: yes - -- name: vault - install - make: - chdir: /tmp/vault-{{ vault_release }} - target: install - become: yes - -- name: vault - cleanup - file: - state: absent - path: /tmp/vault-{{ vault_release }} diff --git a/ansible/tasks/postgres-extensions/24-pgroonga.yml b/ansible/tasks/postgres-extensions/24-pgroonga.yml deleted file mode 100644 index f8baaa6..0000000 --- a/ansible/tasks/postgres-extensions/24-pgroonga.yml +++ /dev/null @@ -1,85 +0,0 @@ -# groonga and pgroonga -- name: groonga - download & install dependencies - apt: - pkg: - - zlib1g-dev - - liblzo2-dev - - libmsgpack-dev - - libzmq3-dev - - libevent-dev - - libmecab-dev - - mecab-naist-jdic - update_cache: yes - install_recommends: no - -- name: groonga - download groonga - get_url: - url: "https://packages.groonga.org/source/groonga/groonga-{{ groonga_release }}.tar.gz" - dest: /tmp/groonga-{{ groonga_release }}.tar.gz - checksum: "{{ groonga_release_checksum }}" - timeout: 60 - -- name: groonga - unpack archive - unarchive: - remote_src: yes - src: /tmp/groonga-{{ groonga_release }}.tar.gz - dest: /tmp - become: yes - -- name: groonga - configure - shell: - cmd: ./configure - chdir: /tmp/groonga-{{ groonga_release }} - become: yes - -- name: groonga - build - community.general.make: - target: all - chdir: /tmp/groonga-{{ groonga_release }} - jobs: "{{ parallel_jobs | default(omit) }}" - become: yes - -- name: groonga - install - make: - chdir: /tmp/groonga-{{ groonga_release }} - target: install - become: yes - -- name: pgroonga - download pgroonga - get_url: - url: "https://packages.groonga.org/source/pgroonga/pgroonga-{{ pgroonga_release }}.tar.gz" - dest: /tmp/pgroonga-{{ pgroonga_release }}.tar.gz - checksum: "{{ pgroonga_release_checksum }}" - timeout: 60 - -- name: pgroonga - unpack archive - unarchive: - remote_src: yes - src: /tmp/pgroonga-{{ pgroonga_release }}.tar.gz - dest: /tmp - become: yes - -- name: pgroonga - build - community.general.make: - target: all - chdir: /tmp/pgroonga-{{ pgroonga_release }} - jobs: "{{ parallel_jobs | default(omit) }}" - become: yes - -- name: pgroonga - install - make: - chdir: /tmp/pgroonga-{{ pgroonga_release }} - target: install - become: yes - -- name: groonga - cleanup - file: - state: absent - path: /tmp/groonga-{{ groonga_release }} - become: yes - -- name: pgroonga - cleanup - file: - state: absent - path: /tmp/pgroonga-{{ pgroonga_release }} - become: yes diff --git a/ansible/tasks/postgres-extensions/25-wrappers.yml b/ansible/tasks/postgres-extensions/25-wrappers.yml deleted file mode 100644 index 1aada50..0000000 --- a/ansible/tasks/postgres-extensions/25-wrappers.yml +++ /dev/null @@ -1,3 +0,0 @@ -- name: install wrappers - ansible.builtin.apt: - deb: "https://github.com/tealbase/wrappers/releases/download/v{{ wrappers_release }}/wrappers-v{{ wrappers_release }}-pg{{ postgresql_major }}-{{ platform }}-linux-gnu.deb" diff --git a/ansible/tasks/postgres-extensions/26-hypopg.yml b/ansible/tasks/postgres-extensions/26-hypopg.yml deleted file mode 100644 index 4a9afcf..0000000 --- a/ansible/tasks/postgres-extensions/26-hypopg.yml +++ /dev/null @@ -1,17 +0,0 @@ -# hypopg -- name: hypopg - download by commit sha - git: - repo: https://github.com/HypoPG/hypopg.git - dest: /tmp/hypopg - version: "{{ hypopg_release }}" - -- name: hypopg - install - make: - chdir: /tmp/hypopg - target: install - become: yes - -- name: hypopg - cleanup - file: - state: absent - path: /tmp/hypopg diff --git a/ansible/tasks/postgres-extensions/27-pg_repack.yml b/ansible/tasks/postgres-extensions/27-pg_repack.yml deleted file mode 100644 index 81ca801..0000000 --- a/ansible/tasks/postgres-extensions/27-pg_repack.yml +++ /dev/null @@ -1,38 +0,0 @@ -# pg_repack - - name: pg_repack - download & install dependencies - apt: - pkg: - - liblz4-dev - - libz-dev - - libzstd-dev - - libreadline-dev - update_cache: yes - install_recommends: no - - - name: pg_repack - download latest release - git: - repo: https://github.com/reorg/pg_repack.git - dest: /tmp/pg_repack - version: "ver_{{ pg_repack_release }}" - become: yes - - - name: pg_repack - build - make: - chdir: /tmp/pg_repack - params: - USE_PGXS: 1 - become: yes - - - name: pg_repack - install - make: - chdir: /tmp/pg_repack - target: install - params: - USE_PGXS: 1 - become: yes - - - name: pg_repack - cleanup - file: - state: absent - path: /tmp/pg_repack - diff --git a/ansible/tasks/postgres-extensions/28-pgvector.yml b/ansible/tasks/postgres-extensions/28-pgvector.yml deleted file mode 100644 index a673ab2..0000000 --- a/ansible/tasks/postgres-extensions/28-pgvector.yml +++ /dev/null @@ -1,23 +0,0 @@ -# pgvector -- name: pgvector - download latest release - git: - repo: https://github.com/pgvector/pgvector.git - dest: /tmp/pgvector - version: 'v{{ pgvector_release }}' - become: yes - -- name: pgvector - build - make: - chdir: /tmp/pgvector - become: yes - -- name: pgvector - install - make: - chdir: /tmp/pgvector - target: install - become: yes - -- name: pgvector - cleanup - file: - state: absent - path: /tmp/pgvector diff --git a/ansible/tasks/postgres-extensions/29-pg_tle.yml b/ansible/tasks/postgres-extensions/29-pg_tle.yml deleted file mode 100644 index ea0b199..0000000 --- a/ansible/tasks/postgres-extensions/29-pg_tle.yml +++ /dev/null @@ -1,12 +0,0 @@ -# pg_tle -- name: pg_tle - download - git: - repo: https://github.com/aws/pg_tle.git - dest: /tmp/pg_tle - version: v{{ pg_tle_release }} - -- name: pg_tle - install - make: - chdir: /tmp/pg_tle - target: install - become: yes diff --git a/ansible/tasks/postgres-extensions/99-finish_async_tasks.yml b/ansible/tasks/postgres-extensions/99-finish_async_tasks.yml deleted file mode 100644 index 2e0609b..0000000 --- a/ansible/tasks/postgres-extensions/99-finish_async_tasks.yml +++ /dev/null @@ -1,19 +0,0 @@ -## Verify plv8 status and complete plv8-install -- name: Check if plv8 is complete - async_status: - jid: "{{ plv8_build.ansible_job_id }}" - register: job_result - until: job_result.finished - delay: 60 - retries: 60 - -- name: plv8 - install - make: - chdir: /tmp/plv8 - target: install - become: yes - -- name: plv8 - cleanup - file: - state: absent - path: /tmp/plv8 diff --git a/ansible/tasks/setup-extensions.yml b/ansible/tasks/setup-extensions.yml deleted file mode 100644 index a560ae8..0000000 --- a/ansible/tasks/setup-extensions.yml +++ /dev/null @@ -1,91 +0,0 @@ -- name: Install plv8 - import_tasks: tasks/postgres-extensions/13-plv8.yml - -- name: Install pg_jsonschema - import_tasks: tasks/postgres-extensions/22-pg_jsonschema.yml - -- name: Install postgis - import_tasks: tasks/postgres-extensions/01-postgis.yml - -- name: Install pgrouting - import_tasks: tasks/postgres-extensions/02-pgrouting.yml - -- name: Install pgtap - import_tasks: tasks/postgres-extensions/03-pgtap.yml - -- name: Install pg_cron - import_tasks: tasks/postgres-extensions/04-pg_cron.yml - -- name: Install pgaudit - import_tasks: tasks/postgres-extensions/05-pgaudit.yml - -- name: Install pgjwt - import_tasks: tasks/postgres-extensions/06-pgjwt.yml - -- name: Install pgsql-http - import_tasks: tasks/postgres-extensions/07-pgsql-http.yml - -- name: Install plpgsql_check - import_tasks: tasks/postgres-extensions/08-plpgsql_check.yml - -- name: Install pg-safeupdate - import_tasks: tasks/postgres-extensions/09-pg-safeupdate.yml - -- name: Install timescaledb - import_tasks: tasks/postgres-extensions/10-timescaledb.yml - -- name: Install wal2json - import_tasks: tasks/postgres-extensions/11-wal2json.yml - -- name: Install pljava - import_tasks: tasks/postgres-extensions/12-pljava.yml - tags: - - legacy-incompatible - -- name: Install pg_plan_filter - import_tasks: tasks/postgres-extensions/14-pg_plan_filter.yml - -- name: Install pg_net - import_tasks: tasks/postgres-extensions/15-pg_net.yml - -- name: Install rum - import_tasks: tasks/postgres-extensions/16-rum.yml - -- name: Install pg_hashids - import_tasks: tasks/postgres-extensions/17-pg_hashids.yml - -- name: Install pgsodium - import_tasks: tasks/postgres-extensions/18-pgsodium.yml - -- name: Install pg_graphql - import_tasks: tasks/postgres-extensions/19-pg_graphql.yml - tags: - - legacy-incompatible - -- name: Install pg_stat_monitor - import_tasks: tasks/postgres-extensions/20-pg_stat_monitor.yml - -- name: Install vault - import_tasks: tasks/postgres-extensions/23-vault.yml - -- name: Install PGroonga - import_tasks: tasks/postgres-extensions/24-pgroonga.yml - -- name: Install wrappers - import_tasks: tasks/postgres-extensions/25-wrappers.yml - -- name: Install hypopg - import_tasks: tasks/postgres-extensions/26-hypopg.yml - - - name: Install pg_repack - import_tasks: tasks/postgres-extensions/27-pg_repack.yml - -- name: Install pgvector - import_tasks: tasks/postgres-extensions/28-pgvector.yml - -- name: Install Trusted Language Extensions - import_tasks: tasks/postgres-extensions/29-pg_tle.yml - -- name: Verify async task status - import_tasks: tasks/postgres-extensions/99-finish_async_tasks.yml - when: async_mode diff --git a/ansible/tasks/setup-fail2ban.yml b/ansible/tasks/setup-fail2ban.yml index ee0029d..38245e8 100644 --- a/ansible/tasks/setup-fail2ban.yml +++ b/ansible/tasks/setup-fail2ban.yml @@ -65,16 +65,9 @@ - install-tealbase-internal when: debpkg_mode or nixpkg_mode -# Restart -- name: fail2ban - restart - systemd: - name: fail2ban - state: restarted - when: debpkg_mode or nixpkg_mode - - name: fail2ban - disable service systemd: name: fail2ban enabled: no daemon_reload: yes - when: debpkg_mode or nixpkg_mode \ No newline at end of file + when: debpkg_mode or nixpkg_mode diff --git a/ansible/tasks/setup-gotrue.yml b/ansible/tasks/setup-gotrue.yml index 19f733a..d102ed4 100644 --- a/ansible/tasks/setup-gotrue.yml +++ b/ansible/tasks/setup-gotrue.yml @@ -30,6 +30,13 @@ owner: gotrue mode: 0775 +- name: gotrue - create /etc/auth.d + file: + path: /etc/auth.d + state: directory + owner: gotrue + mode: 0775 + - name: gotrue - unpack archive in /opt/gotrue unarchive: remote_src: yes diff --git a/ansible/tasks/setup-postgres.yml b/ansible/tasks/setup-postgres.yml index cbd7424..95536f6 100644 --- a/ansible/tasks/setup-postgres.yml +++ b/ansible/tasks/setup-postgres.yml @@ -23,7 +23,7 @@ - name: Postgres - install server apt: - name: postgresql-{{ postgresql_major }}={{ postgresql_release }}-1.pgdg20.04+1 + name: postgresql-{{ postgresql_major }}={{ postgresql_release }}-1.pgdg24.04+1 install_recommends: no when: debpkg_mode @@ -204,23 +204,58 @@ ansible_command_timeout: 60 when: debpkg_mode -- name: Initialize the database stage2_nix - become: yes - become_user: postgres - shell: source /var/lib/postgresql/.bashrc && /usr/lib/postgresql/bin/pg_ctl -D /var/lib/postgresql/data initdb -o "--allow-group-access" -o "--username=tealbase_admin" - args: - executable: /bin/bash - environment: - LANG: en_US.UTF-8 - LANGUAGE: en_US.UTF-8 - LC_ALL: en_US.UTF-8 - LC_CTYPE: en_US.UTF-8 - LOCALE_ARCHIVE: /usr/lib/locale/locale-archive - vars: - ansible_command_timeout: 60 - # Circumvents the following error: - # "Timeout (12s) waiting for privilege escalation prompt" - when: stage2_nix +- name: Make sure .bashrc exists + file: + path: /var/lib/postgresql/.bashrc + state: touch + owner: postgres + group: postgres + when: nixpkg_mode + +- name: Check psql_version and modify supautils.conf and postgresql.conf if necessary + block: + - name: Check if psql_version is psql_orioledb + set_fact: + is_psql_oriole: "{{ psql_version in ['psql_orioledb-17'] }}" + is_psql_17: "{{ psql_version in ['psql_17'] }}" + + - name: Initialize the database stage2_nix (non-orioledb) + become: yes + become_user: postgres + shell: source /var/lib/postgresql/.bashrc && /usr/lib/postgresql/bin/pg_ctl -D /var/lib/postgresql/data initdb -o "--allow-group-access" -o "--username=tealbase_admin" + args: + executable: /bin/bash + environment: + LANG: en_US.UTF-8 + LANGUAGE: en_US.UTF-8 + LC_ALL: en_US.UTF-8 + LC_CTYPE: en_US.UTF-8 + LOCALE_ARCHIVE: /usr/lib/locale/locale-archive + vars: + ansible_command_timeout: 60 + when: stage2_nix and not is_psql_oriole and not is_psql_17 + + - name: Initialize the database stage2_nix (orioledb) + become: yes + become_user: postgres + shell: > + source /var/lib/postgresql/.bashrc && /usr/lib/postgresql/bin/pg_ctl -D /var/lib/postgresql/data initdb + -o "--allow-group-access" + -o "--username=tealbase_admin" + -o "--locale-provider=icu" + -o "--encoding=UTF-8" + -o "--icu-locale=en_US.UTF-8" + args: + executable: /bin/bash + environment: + LANG: en_US.UTF-8 + LANGUAGE: en_US.UTF-8 + LC_ALL: en_US.UTF-8 + LC_CTYPE: en_US.UTF-8 + LOCALE_ARCHIVE: /usr/lib/locale/locale-archive + vars: + ansible_command_timeout: 60 + when: stage2_nix and (is_psql_oriole or is_psql_17) - name: copy PG systemd unit template: @@ -234,6 +269,13 @@ dest: /etc/systemd/system/database-optimizations.service when: debpkg_mode or stage2_nix +- name: initialize pg required state + become: yes + shell: | + mkdir -p /run/postgresql + chown -R postgres:postgres /run/postgresql + when: stage2_nix and qemu_mode is defined + - name: Restart Postgres Database without Systemd become: yes become_user: postgres @@ -257,13 +299,6 @@ daemon_reload: yes when: debpkg_mode or stage2_nix -- name: Make sure .bashrc exists - file: - path: /var/lib/postgresql/.bashrc - state: touch - owner: postgres - group: postgres - when: nixpkg_mode - name: Add LOCALE_ARCHIVE to .bashrc lineinfile: diff --git a/ansible/tasks/setup-postgrest.yml b/ansible/tasks/setup-postgrest.yml index a98d199..df9fecb 100644 --- a/ansible/tasks/setup-postgrest.yml +++ b/ansible/tasks/setup-postgrest.yml @@ -6,22 +6,28 @@ url: https://www.postgresql.org/media/keys/ACCC4CF8.asc state: present -- name: PostgREST - add Postgres PPA +- name: PostgREST - add Postgres PPA main apt_repository: - repo: "deb http://apt.postgresql.org/pub/repos/apt/ focal-pgdg {{ postgresql_major }}" + repo: "deb http://apt.postgresql.org/pub/repos/apt/ noble-pgdg {{ postgresql_major }}" state: present + filename: postgresql-pgdg -- name: PostgREST - update apt cache +- name: PostgREST - install system dependencies apt: - update_cache: yes - -# libpq is a C library that enables user programs to communicate with -# the PostgreSQL database server. -- name: PostgREST - system dependencies - apt: - pkg: + package: - libpq5 - libnuma-dev + update_cache: yes + state: present + +- name: PostgREST - verify libpq5 version + shell: dpkg -l libpq5 | grep '^ii' | awk '{print $3}' + register: libpq5_version + changed_when: false + +- name: Show installed libpq5 version + debug: + msg: "Installed libpq5 version: {{ libpq5_version.stdout }}" - name: PostgREST - remove Postgres PPA gpg key apt_key: @@ -30,7 +36,7 @@ - name: PostgREST - remove Postgres PPA apt_repository: - repo: "deb http://apt.postgresql.org/pub/repos/apt/ focal-pgdg {{ postgresql_major }}" + repo: "deb http://apt.postgresql.org/pub/repos/apt/ noble-pgdg {{ postgresql_major }}" state: absent - name: postgis - ensure dependencies do not get autoremoved @@ -50,7 +56,7 @@ - name: PostgREST - download ubuntu binary archive (x86) get_url: - url: "https://github.com/PostgREST/postgrest/releases/download/v{{ postgrest_release }}/postgrest-v{{ postgrest_release }}-linux-static-x64.tar.xz" + url: "https://github.com/PostgREST/postgrest/releases/download/v{{ postgrest_release }}/postgrest-v{{ postgrest_release }}-linux-static-x86-64.tar.xz" dest: /tmp/postgrest.tar.xz checksum: "{{ postgrest_x86_release_checksum }}" timeout: 60 @@ -88,7 +94,6 @@ #! /usr/bin/env bash set -euo pipefail set -x - cd "$(dirname "$0")" cat $@ > merged.conf dest: /etc/postgrest/merge.sh diff --git a/ansible/tasks/setup-system.yml b/ansible/tasks/setup-system.yml index 0783813..c1285bf 100644 --- a/ansible/tasks/setup-system.yml +++ b/ansible/tasks/setup-system.yml @@ -183,3 +183,17 @@ ansible.posix.sysctl: name: 'net.ipv4.ip_local_port_range' value: '1025 65000' + +#Set Sysctl params specific to keepalives +- name: Set net.ipv4.tcp_keepalive_time=1800 + ansible.builtin.sysctl: + name: net.ipv4.tcp_keepalive_time + value: 1800 + state: present + when: debpkg_mode or nixpkg_mode +- name: Set net.ipv4.tcp_keepalive_intvl=60 + ansible.builtin.sysctl: + name: net.ipv4.tcp_keepalive_intvl + value: 60 + state: present + when: debpkg_mode or nixpkg_mode diff --git a/ansible/tasks/setup-tealbase-internal.yml b/ansible/tasks/setup-tealbase-internal.yml index aea3a78..92f54d2 100644 --- a/ansible/tasks/setup-tealbase-internal.yml +++ b/ansible/tasks/setup-tealbase-internal.yml @@ -34,19 +34,19 @@ aws configure set default.s3.use_dualstack_endpoint true - name: install Vector for logging - become: yes + become: true apt: deb: "{{ vector_x86_deb }}" when: platform == "amd64" - name: install Vector for logging - become: yes + become: true apt: deb: "{{ vector_arm_deb }}" when: platform == "arm64" - name: add Vector to postgres group - become: yes + become: true shell: cmd: | usermod -a -G postgres vector @@ -72,21 +72,21 @@ daemon_reload: yes - name: Create checkpoints dir - become: yes + become: true file: path: /var/lib/vector state: directory owner: vector - name: Include file for generated optimizations in postgresql.conf - become: yes + become: true replace: path: /etc/postgresql/postgresql.conf regexp: "#include = '/etc/postgresql-custom/generated-optimizations.conf'" replace: "include = '/etc/postgresql-custom/generated-optimizations.conf'" - name: Include file for custom overrides in postgresql.conf - become: yes + become: true replace: path: /etc/postgresql/postgresql.conf regexp: "#include = '/etc/postgresql-custom/custom-overrides.conf'" @@ -114,3 +114,11 @@ import_tasks: internal/install-salt.yml tags: - aws-only + +- name: Install tealbase-admin-agent + import_tasks: internal/tealbase-admin-agent.yml + tags: + - aws-only + +- name: Envoy - use lds.tealbase.yaml for /etc/envoy/lds.yaml + command: mv /etc/envoy/lds.tealbase.yaml /etc/envoy/lds.yaml diff --git a/ansible/tasks/setup-wal-g.yml b/ansible/tasks/setup-wal-g.yml index bbc64cd..aa56fae 100644 --- a/ansible/tasks/setup-wal-g.yml +++ b/ansible/tasks/setup-wal-g.yml @@ -1,72 +1,8 @@ -# Downloading dependencies -- name: wal-g dependencies - become: yes - apt: - pkg: - - libbrotli-dev - - liblzo2-dev - - libsodium-dev - - cmake - -# install go dependency for WAL-G -- name: wal-g go dependency - get_url: - url: "https://golang.org/dl/go{{ golang_version }}.linux-{{ platform }}.tar.gz" - dest: /tmp - checksum: "{{ golang_version_checksum[platform] }}" - timeout: 60 - -- name: unpack go archive - unarchive: - remote_src: yes - src: "/tmp/go{{ golang_version }}.linux-{{ platform }}.tar.gz" - dest: /usr/local - -# Download WAL-G -- name: wal-g - download latest version - git: - repo: https://github.com/wal-g/wal-g.git - dest: /tmp/wal-g - version: "v{{ wal_g_release }}" - become: yes - -- name: wal-g - pg_clean - make: - chdir: /tmp/wal-g - target: pg_clean - params: - GOBIN: "/usr/local/bin" - PATH: "{{ ansible_env.PATH }}:/usr/local/go/bin" - USE_LIBSODIUM: true - become: yes - ignore_errors: yes - -- name: wal-g - deps - make: - chdir: /tmp/wal-g - target: deps - params: - GOBIN: "/usr/local/bin" - PATH: "{{ ansible_env.PATH }}:/usr/local/go/bin" - USE_LIBSODIUM: true - become: yes - ignore_errors: yes - -- name: wal-g - build and install - community.general.make: - chdir: /tmp/wal-g - target: pg_install - jobs: "{{ parallel_jobs | default(omit) }}" - params: - GOBIN: "/usr/local/bin" - PATH: "{{ ansible_env.PATH }}:/usr/local/go/bin" - USE_LIBSODIUM: true - become: yes - - name: Create wal-g group group: name: wal-g state: present + when: nixpkg_mode - name: Create wal-g user user: @@ -75,7 +11,7 @@ comment: WAL-G user group: wal-g groups: wal-g, postgres - + when: nixpkg_mode - name: Create a config directory owned by wal-g file: path: /etc/wal-g @@ -83,6 +19,37 @@ owner: wal-g group: wal-g mode: '0770' + when: nixpkg_mode + +- name: Install wal-g 2 from nix binary cache + become: yes + shell: | + sudo -u wal-g bash -c ". /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh && nix profile install github:tealbase/postgres/{{ git_commit_sha }}#wal-g-2" + when: stage2_nix + +- name: Install wal-g 3 from nix binary cache + become: yes + shell: | + sudo -u wal-g bash -c ". /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh && nix profile install github:tealbase/postgres/{{ git_commit_sha }}#wal-g-3" + when: stage2_nix + +- name: Create symlink for wal-g-3 from Nix profile to /usr/local/bin + ansible.builtin.file: + src: /home/wal-g/.nix-profile/bin/wal-g-3 + dest: /usr/local/bin/wal-g-v3 + state: link + force: yes # This will replace existing file/symlink if it exists + become: yes # Need sudo to write to /usr/local/bin + when: stage2_nix + +- name: Create symlink to make wal-g-v2 the default wal-g + ansible.builtin.file: + src: /home/wal-g/.nix-profile/bin/wal-g-2 + dest: /usr/local/bin/wal-g + state: link + force: yes + become: yes + when: stage2_nix - name: Create /etc/wal-g/config.json file: @@ -91,6 +58,7 @@ owner: wal-g group: wal-g mode: '0664' + when: stage2_nix - name: Move custom wal-g.conf file to /etc/postgresql-custom/wal-g.conf template: @@ -99,6 +67,7 @@ mode: 0664 owner: postgres group: postgres + when: stage2_nix - name: Add script to be run for restore_command template: @@ -107,6 +76,7 @@ mode: 0500 owner: postgres group: postgres + when: stage2_nix - name: Add helper script for wal_fetch.sh template: @@ -114,6 +84,7 @@ dest: /root/wal_change_ownership.sh mode: 0700 owner: root + when: stage2_nix - name: Include /etc/postgresql-custom/wal-g.conf in postgresql.conf become: yes @@ -121,10 +92,4 @@ path: /etc/postgresql/postgresql.conf regexp: "#include = '/etc/postgresql-custom/wal-g.conf'" replace: "include = '/etc/postgresql-custom/wal-g.conf'" - -# Clean up Go -- name: Uninstall Go - become: yes - file: - path: /usr/local/go - state: absent + when: stage2_nix diff --git a/ansible/tasks/stage2-setup-postgres.yml b/ansible/tasks/stage2-setup-postgres.yml index df7b7f0..0ed2066 100644 --- a/ansible/tasks/stage2-setup-postgres.yml +++ b/ansible/tasks/stage2-setup-postgres.yml @@ -3,11 +3,72 @@ # shell: | # sudo -u postgres bash -c ". /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh && nix profile install nixpkgs#openjdk11" # It was decided to leave pljava disabled at https://github.com/tealbase/postgres/pull/690 therefore removing this task + +- name: Check psql_version and modify supautils.conf and postgresql.conf if necessary + block: + - name: Check if psql_version is psql_orioledb-17 + set_fact: + is_psql_oriole: "{{ psql_version in ['psql_orioledb-17'] }}" + + - name: Check if psql_version is psql_17 + set_fact: + is_psql_17: "{{ psql_version in ['psql_17'] }}" + + - name: Check if psql_version is psql_15 + set_fact: + is_psql_15: "{{ psql_version in ['psql_15'] }}" + + - name: Remove specified extensions from postgresql.conf if orioledb-17 or 17 build + ansible.builtin.command: + cmd: > + sed -i 's/ timescaledb,//g' + /etc/postgresql/postgresql.conf + when: is_psql_oriole or is_psql_17 and stage2_nix + become: yes + + - name: Remove specified extensions from supautils.conf if orioledb-17 or 17 build + ansible.builtin.command: + cmd: > + sed -i 's/ timescaledb,//g; s/ plv8,//g' + /etc/postgresql-custom/supautils.conf + when: is_psql_oriole or is_psql_17 and stage2_nix + become: yes + + - name: Remove db_user_namespace from postgresql.conf if orioledb-17 or 17 build + ansible.builtin.command: + cmd: > + sed -i 's/db_user_namespace = off/#db_user_namespace = off/g;' + /etc/postgresql/postgresql.conf + when: is_psql_oriole or is_psql_17 and stage2_nix + become: yes + + - name: Append orioledb to shared_preload_libraries append within closing quote + ansible.builtin.command: + cmd: > + sed -i 's/\(shared_preload_libraries.*\)'\''\(.*\)$/\1, orioledb'\''\2/' + /etc/postgresql/postgresql.conf + when: is_psql_oriole and stage2_nix + become: yes + + - name: Add default_table_access_method setting + ansible.builtin.lineinfile: + path: /etc/postgresql/postgresql.conf + line: "default_table_access_method = 'orioledb'" + state: present + when: is_psql_oriole and stage2_nix + become: yes + + - name: Add ORIOLEDB_ENABLED environment variable + ansible.builtin.lineinfile: + path: /etc/environment + line: 'ORIOLEDB_ENABLED=true' + when: is_psql_oriole and stage2_nix + become: yes + - name: Install Postgres from nix binary cache become: yes shell: | - sudo -u postgres bash -c ". /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh && nix profile install github:tealbase/postgres/{{ git_commit_sha }}#psql_15/bin" -#TODO (samrose) switch pg_prove sourcing to develop branch once PR is merged + sudo -u postgres bash -c ". /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh && nix profile install github:tealbase/postgres/{{ git_commit_sha }}#{{psql_version}}/bin" when: stage2_nix - name: Install pg_prove from nix binary cache @@ -22,6 +83,18 @@ sudo -u postgres bash -c ". /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh && nix profile install github:tealbase/postgres/{{ git_commit_sha }}#tealbase-groonga" when: stage2_nix +- name: Install debug symbols for postgres version + become: yes + shell: | + sudo -u postgres bash -c ". /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh && nix profile install github:tealbase/postgres/{{ git_commit_sha }}#{{postgresql_version}}_debug" + when: stage2_nix + +- name: Install source files for postgresql version + become: yes + shell: | + sudo -u postgres bash -c ". /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh && nix profile install github:tealbase/postgres/{{ git_commit_sha }}#{{postgresql_version}}_src" + when: stage2_nix + - name: Set ownership and permissions for /etc/ssl/private become: yes file: @@ -88,18 +161,9 @@ group: postgres when: stage2_nix -# - name: Ensure /usr/lib/postgresql/share/postgresql/pljava directory exists -# file: -# path: /usr/lib/postgresql/share/postgresql/pljava -# state: directory -# owner: postgres -# group: postgres -# when: stage2_nix -# It was decided to leave pljava disabled at https://github.com/tealbase/postgres/pull/690 therefore removing this task - - name: import pgsodium_getkey script template: - src: /tmp/ansible-playbook/ansible/files/pgsodium_getkey_readonly.sh.j2 + src: files/pgsodium_getkey_readonly.sh.j2 dest: "/usr/lib/postgresql/bin/pgsodium_getkey.sh" owner: postgres group: postgres @@ -107,12 +171,11 @@ when: stage2_nix - name: Create symbolic links from /var/lib/postgresql/.nix-profile/bin to /usr/lib/postgresql/bin - file: - src: "{{ item }}" - dest: "/usr/lib/postgresql/bin/{{ item | basename }}" - state: link - with_fileglob: - - "/var/lib/postgresql/.nix-profile/bin/*" + shell: >- + find /var/lib/postgresql/.nix-profile/bin/ -maxdepth 1 -type f,l -exec sh -c 'ln -s "$0" "{{ item }}/$(basename $0)"' {} \; + loop: + - /usr/lib/postgresql/bin + - /usr/bin become: yes when: stage2_nix @@ -129,53 +192,21 @@ when: pg_config_stat.stat.exists and not pg_config_stat.stat.islnk and stage2_nix become: yes -- name: Create symbolic links from /var/lib/postgresql/.nix-profile/bin to /usr/bin - file: - src: "{{ item }}" - dest: "/usr/bin/{{ item | basename }}" - state: link - with_fileglob: - - "/var/lib/postgresql/.nix-profile/bin/*" - become: yes - when: stage2_nix - - name: Ensure postgres user has ownership of symlink - file: - path: "/usr/bin/{{ item | basename }}" - owner: postgres - group: postgres - with_fileglob: - - "/var/lib/postgresql/.nix-profile/bin/*" + shell: >- + find /var/lib/postgresql/.nix-profile/bin/ -maxdepth 1 -type f,l -exec chown postgres:postgres "/usr/bin/$(basename {})" \; become: yes when: stage2_nix -# - name: Create symbolic links from /var/lib/postgresql/.nix-profile/share/pljava to /usr/lib/postgresql/share/postgresql/pljava -# file: -# src: "{{ item }}" -# dest: "/usr/lib/postgresql/share/postgresql/pljava/{{ item | basename }}" -# state: link -# with_fileglob: -# - "/var/lib/postgresql/.nix-profile/share/pljava/*" -# become: yes -# It was decided to leave pljava disabled at https://github.com/tealbase/postgres/pull/690 therefore removing this task - - name: Create symbolic links from /var/lib/postgresql/.nix-profile/share/postgresql to /usr/lib/postgresql/share/postgresql - file: - src: "{{ item }}" - dest: "/usr/lib/postgresql/share/postgresql/{{ item | basename }}" - state: link - with_fileglob: - - "/var/lib/postgresql/.nix-profile/share/postgresql/*" + shell: >- + find /var/lib/postgresql/.nix-profile/share/postgresql/ -maxdepth 1 -type f,l -exec sh -c 'ln -s "$0" "/usr/lib/postgresql/share/postgresql/$(basename $0)"' {} \; become: yes when: stage2_nix - name: Create symbolic links from /var/lib/postgresql/.nix-profile/share/postgresql/extension to /usr/lib/postgresql/share/postgresql/extension - file: - src: "{{ item }}" - dest: "/usr/lib/postgresql/share/postgresql/extension/{{ item | basename }}" - state: link - with_fileglob: - - "/var/lib/postgresql/.nix-profile/share/postgresql/extension/*" + shell: >- + find /var/lib/postgresql/.nix-profile/share/postgresql/extension/ -maxdepth 1 -type f,l -exec sh -c 'ln -s "$0" "/usr/lib/postgresql/share/postgresql/extension/$(basename $0)"' {} \; become: yes when: stage2_nix @@ -186,31 +217,29 @@ recurse: yes when: stage2_nix -- name: Recursively create symbolic links and set permissions for the contrib/postgis-* dir - shell: > - sudo mkdir -p /usr/lib/postgresql/share/postgresql/contrib && \ - sudo find /var/lib/postgresql/.nix-profile/share/postgresql/contrib/ -mindepth 1 -type d -exec sh -c 'for dir do sudo ln -s "$dir" "/usr/lib/postgresql/share/postgresql/contrib/$(basename "$dir")"; done' sh {} + \ - && chown -R postgres:postgres "/usr/lib/postgresql/share/postgresql/contrib/" - become: yes - when: stage2_nix +- name: Check psql_version and run postgis linking if not oriole-xx + block: + - name: Check if psql_version is psql_orioledb-17 + set_fact: + is_psql_oriole: "{{ psql_version == 'psql_orioledb-17' }}" + + - name: Recursively create symbolic links and set permissions for the contrib/postgis-* dir + shell: > + sudo mkdir -p /usr/lib/postgresql/share/postgresql/contrib && \ + sudo find /var/lib/postgresql/.nix-profile/share/postgresql/contrib/ -mindepth 1 -type d -exec sh -c 'for dir do sudo ln -s "$dir" "/usr/lib/postgresql/share/postgresql/contrib/$(basename "$dir")"; done' sh {} + \ + && chown -R postgres:postgres "/usr/lib/postgresql/share/postgresql/contrib/" + become: yes + when: stage2_nix and not is_psql_oriole - name: Create symbolic links from /var/lib/postgresql/.nix-profile/share/postgresql/timezonesets to /usr/lib/postgresql/share/postgresql/timeszonesets - file: - src: "{{ item }}" - dest: "/usr/lib/postgresql/share/postgresql/timezonesets/{{ item | basename }}" - state: link - with_fileglob: - - "/var/lib/postgresql/.nix-profile/share/postgresql/timezonesets/*" + shell: >- + find /var/lib/postgresql/.nix-profile/share/postgresql/timezonesets/ -maxdepth 1 -type f,l -exec sh -c 'ln -s "$0" "/usr/lib/postgresql/share/postgresql/timezonesets/$(basename $0)"' {} \; become: yes when: stage2_nix - name: Create symbolic links from /var/lib/postgresql/.nix-profile/share/postgresql/tsearch_data to /usr/lib/postgresql/share/postgresql/tsearch_data - file: - src: "{{ item }}" - dest: "/usr/lib/postgresql/share/postgresql/tsearch_data/{{ item | basename }}" - state: link - with_fileglob: - - "/var/lib/postgresql/.nix-profile/share/postgresql/tsearch_data/*" + shell: >- + find /var/lib/postgresql/.nix-profile/share/postgresql/tsearch_data/ -maxdepth 1 -type f,l -exec sh -c 'ln -s "$0" "/usr/lib/postgresql/share/postgresql/tsearch_data/$(basename $0)"' {} \; become: yes when: stage2_nix @@ -227,8 +256,16 @@ line: pgsodium.getkey_script= '{{ pg_bindir }}/pgsodium_getkey.sh' when: stage2_nix +- name: Create symbolic link for pgsodium_getkey script + file: + src: "/usr/lib/postgresql/bin/pgsodium_getkey.sh" + dest: "/usr/lib/postgresql/share/postgresql/extension/pgsodium_getkey" + state: link + become: yes + when: stage2_nix + - name: Append GRN_PLUGINS_DIR to /etc/environment.d/postgresql.env ansible.builtin.lineinfile: path: /etc/environment.d/postgresql.env line: 'GRN_PLUGINS_DIR=/var/lib/postgresql/.nix-profile/lib/groonga/plugins' - become: yes \ No newline at end of file + become: yes diff --git a/ansible/tasks/test-image.yml b/ansible/tasks/test-image.yml index 3b3edc2..b152971 100644 --- a/ansible/tasks/test-image.yml +++ b/ansible/tasks/test-image.yml @@ -1,14 +1,36 @@ -- name: install pg_prove - apt: - pkg: - - libtap-parser-sourcehandler-pgtap-perl - when: debpkg_mode +# - name: Temporarily disable PG Sodium references in config +# become: yes +# become_user: postgres +# shell: +# cmd: sed -i.bak -e "s/pg_net,\ pgsodium,\ timescaledb/pg_net,\ timescaledb/g" -e "s/pgsodium.getkey_script=/#pgsodium.getkey_script=/g" /etc/postgresql/postgresql.conf +# when: debpkg_mode or stage2_nix -- name: Temporarily disable PG Sodium references in config +- name: Temporarily disable PG Sodium and tealbase Vault references in config become: yes become_user: postgres shell: - cmd: sed -i.bak -e "s/pg_net,\ pgsodium,\ timescaledb/pg_net,\ timescaledb/g" -e "s/pgsodium.getkey_script=/#pgsodium.getkey_script=/g" /etc/postgresql/postgresql.conf + cmd: > + sed -i.bak + -e 's/\(shared_preload_libraries = '\''.*\)pgsodium,\(.*'\''\)/\1\2/' + -e 's/\(shared_preload_libraries = '\''.*\)tealbase_vault,\(.*'\''\)/\1\2/' + -e 's/\(shared_preload_libraries = '\''.*\), *tealbase_vault'\''/\1'\''/' + -e 's/pgsodium.getkey_script=/#pgsodium.getkey_script=/' + /etc/postgresql/postgresql.conf + when: debpkg_mode or stage2_nix + +- name: Verify pgsodium and vault removal from config + become: yes + become_user: postgres + shell: + cmd: | + FOUND=$(grep -E "shared_preload_libraries.*pgsodium|shared_preload_libraries.*tealbase_vault|^pgsodium\.getkey_script" /etc/postgresql/postgresql.conf) + if [ ! -z "$FOUND" ]; then + echo "Found unremoved references:" + echo "$FOUND" + exit 1 + fi + register: verify_result + failed_when: verify_result.rc != 0 when: debpkg_mode or stage2_nix - name: Start Postgres Database to load all extensions. @@ -46,20 +68,6 @@ LOCALE_ARCHIVE: /usr/lib/locale/locale-archive when: stage2_nix -- name: Run Unit tests (with filename unit-test-*) on Postgres Database - shell: /usr/bin/pg_prove -U postgres -h localhost -d postgres -v /tmp/unit-tests/unit-test-*.sql - register: retval - failed_when: retval.rc != 0 - when: debpkg_mode or stage2_nix - -- name: Run migrations tests - shell: /usr/bin/pg_prove -U tealbase_admin -h localhost -d postgres -v tests/test.sql - register: retval - failed_when: retval.rc != 0 - when: debpkg_mode or stage2_nix - args: - chdir: /tmp/migrations - - name: Re-enable PG Sodium references in config become: yes become_user: postgres @@ -71,14 +79,6 @@ shell: /usr/lib/postgresql/bin/psql --no-password --no-psqlrc -d postgres -h localhost -U tealbase_admin -c 'SELECT pg_stat_statements_reset(); SELECT pg_stat_reset();' when: debpkg_mode or stage2_nix -- name: remove pg_prove - apt: - pkg: - - libtap-parser-sourcehandler-pgtap-perl - state: absent - autoremove: yes - when: debpkg_mode - - name: Stop Postgres Database become: yes become_user: postgres diff --git a/ansible/vars.yml b/ansible/vars.yml index 09d0125..96c136d 100644 --- a/ansible/vars.yml +++ b/ansible/vars.yml @@ -2,152 +2,60 @@ tealbase_internal: true ebssurrogate_mode: true async_mode: true -postgresql_major: "15" -postgresql_release: "15.1" -postgresql_release_checksum: sha256:ea2cf059a85882654b989acd07edc121833164a30340faee0d3615cf7058e66c +postgres_major: + - "15" + - "17" + - "orioledb-17" + +# Full version strings for each major version +postgres_release: + postgresorioledb-17: "17.5.1.009-orioledb" + postgres17: "17.4.1.066" + postgres15: "15.8.1.123" # Non Postgres Extensions pgbouncer_release: "1.19.0" pgbouncer_release_checksum: sha256:af0b05e97d0e1fd9ad45fe00ea6d2a934c63075f67f7e2ccef2ca59e3d8ce682 -# to get these use -# wget https://github.com/PostgREST/postgrest/releases/download/v12.2.3/postgrest-v12.2.3-ubuntu-aarch64.tar.xz -q -O- | sha1sum -# wget https://github.com/PostgREST/postgrest/releases/download/v12.2.3/postgrest-v12.2.3-linux-static-x64.tar.xz -q -O- | sha1sum -postgrest_release: "12.2.3" -postgrest_arm_release_checksum: sha1:fbfd6613d711ce1afa25c42d5df8f1b017f396f9 -postgrest_x86_release_checksum: sha1:61c513f91a8931be4062587b9d4a18b42acf5c05 +# The checksum can be found under "Assets", in the GitHub release page for each version. +# The binaries used are: ubuntu-aarch64 and linux-static. +# https://github.com/PostgREST/postgrest/releases +postgrest_release: "13.0.4" +postgrest_arm_release_checksum: sha256:2b400200fb15eb5849267e4375fbbc516dd727afadd8786815b48074ed8c03e1 +postgrest_x86_release_checksum: sha256:a0052c8d4726f52349e0298f98da51140ef4941855548590ee88331afa617811 -gotrue_release: 2.165.1 -gotrue_release_checksum: sha1:bbd62327d8612ac756177dde81d5368b660ca4c8 +gotrue_release: 2.177.0 +gotrue_release_checksum: sha1:664a26237618c4bfb1e33e4f03a540c3cef3e3c8 -aws_cli_release: "2.2.7" +aws_cli_release: "2.23.11" salt_minion_version: 3007 -golang_version: "1.19.3" +golang_version: "1.22.11" golang_version_checksum: - arm64: sha256:99de2fe112a52ab748fb175edea64b313a0c8d51d6157dba683a6be163fd5eab - amd64: sha256:74b9640724fd4e6bb0ed2a1bc44ae813a03f1e72a4c76253e2d5c015494430ba + arm64: sha256:0fc88d966d33896384fbde56e9a8d80a305dc17a9f48f1832e061724b1719991 + amd64: sha256:9ebfcab26801fa4cf0627c6439db7a4da4d3c6766142a3dd83508240e4f21031 envoy_release: 1.28.0 envoy_release_checksum: sha1:b0a06e9cfb170f1993f369beaa5aa9d7ec679ce5 envoy_hot_restarter_release_checksum: sha1:6d43b89d266fb2427a4b51756b649883b0617eda -kong_release_target: focal # if it works, it works +kong_release_target: focal kong_deb: kong_2.8.1_arm64.deb kong_deb_checksum: sha1:2086f6ccf8454fe64435252fea4d29d736d7ec61 nginx_release: 1.22.0 nginx_release_checksum: sha1:419efb77b80f165666e2ee406ad8ae9b845aba93 -wal_g_release: "2.0.1" - -sfcgal_release: "1.3.10" -sfcgal_release_checksum: sha256:4e39b3b2adada6254a7bdba6d297bb28e1a9835a9f879b74f37e2dab70203232 - postgres_exporter_release: "0.15.0" postgres_exporter_release_checksum: arm64: sha256:29ba62d538b92d39952afe12ee2e1f4401250d678ff4b354ff2752f4321c87a0 amd64: sha256:cb89fc5bf4485fb554e0d640d9684fae143a4b2d5fa443009bd29c59f9129e84 -adminapi_release: 0.71.1 -adminmgr_release: 0.24.0 - -# Postgres Extensions -postgis_release: "3.3.2" -postgis_release_checksum: sha256:9a2a219da005a1730a39d1959a1c7cec619b1efb009b65be80ffc25bad299068 - -pgrouting_release: "3.4.1" -pgrouting_release_checksum: sha256:a4e034efee8cf67582b67033d9c3ff714a09d8f5425339624879df50aff3f642 - -pgtap_release: "1.2.0" -pgtap_release_checksum: sha256:9c7c3de67ea41638e14f06da5da57bac6f5bd03fea05c165a0ec862205a5c052 - -pg_cron_release: "1.6.2" -pg_cron_release_checksum: sha256:9f4eb3193733c6fa93a6591406659aac54b82c24a5d91ffaf4ec243f717d94a0 - -pgaudit_release: "1.7.0" -pgaudit_release_checksum: sha256:8f4a73e451c88c567e516e6cba7dc1e23bc91686bb6f1f77f8f3126d428a8bd8 - -pgjwt_release: 9742dab1b2f297ad3811120db7b21451bca2d3c9 - -pgsql_http_release: "1.5.0" -pgsql_http_release_checksum: sha256:43efc9e82afcd110f205b86b8d28d1355d39b6b134161e9661a33a1346818f5d - -plpgsql_check_release: "2.2.5" -plpgsql_check_release_checksum: sha256:6c3a3c5faf3f9689425c6db8a6b20bf4cd5e7144a055e29538eae980c7232573 - -pg_safeupdate_release: "1.4" -pg_safeupdate_release_checksum: sha256:ff01d3d444d35924bd3d745c5695696292e2855042da4c30fe728fb3b6648122 - -timescaledb_release: "2.9.1" -timescaledb_release_checksum: sha256:883638f2e79d25ec88ee58f603f3c81c999b6364cb4c799919d363f04089b47b - -wal2json_release: "2_5" -wal2json_release_checksum: sha256:b516653575541cf221b99cf3f8be9b6821f6dbcfc125675c85f35090f824f00e - -supautils_release: "2.5.0" -supautils_release_arm64_deb_checksum: sha256:406e4a816f719bd6c4b2143e9bb38078fbe60d7e85018ec0aed5d76924e28000 -supautils_release_amd64_deb_checksum: sha256:71f182b478d8aaf167609dd382875cdce3fbe992e888988b3d51cdad39e08202 -supautils_release_tar_checksum: sha256:07c41244e4374248da9c2df2822152f3ae8f1e74c8a92d361300480193219b63 - -pljava_release: master -pljava_release_checksum: sha256:e99b1c52f7b57f64c8986fe6ea4a6cc09d78e779c1643db060d0ac66c93be8b6 - -plv8_release: "3.1.5" -plv8_release_checksum: sha256:2edf9a219844b2b6abae09c0bdb840c5b0d6e3dd418631744c7326c0b107cc10 - -pg_plan_filter_release: 5081a7b5cb890876e67d8e7486b6a64c38c9a492 - -pg_net_release: "0.9.2" -# To obtain the checksum use `wget https://github.com/tealbase/pg_net/archive/refs/tags/v0.9.2.tar.gz -q -O- | sha256sum` -pg_net_release_checksum: sha256:268c87c09ccd26e6566d2522cb02ba7918b4cbda37eb5076d2e790bbd994a087 - -rum_release: "1.3.13" -rum_release_checksum: sha256:6ab370532c965568df6210bd844ac6ba649f53055e48243525b0b7e5c4d69a7d - -pg_hashids_release: cd0e1b31d52b394a0df64079406a14a4f7387cd6 - -vector_x86_deb: "https://packages.timber.io/vector/0.22.3/vector_0.22.3-1_amd64.deb" -vector_arm_deb: "https://packages.timber.io/vector/0.22.3/vector_0.22.3-1_arm64.deb" - -libsodium_release: "1.0.18" -libsodium_release_checksum: sha256:6f504490b342a4f8a4c4a02fc9b866cbef8622d5df4e5452b46be121e46636c1 - -pgsodium_release: "3.1.8" -pgsodium_release_checksum: sha256:4d027aeee5163f3f33740d269938a120d1593a41c3701c920d2a1de80aa97486 - -pg_graphql_release: "1.5.7" - -pg_jsonschema_release: "0.2.0" - -pg_stat_monitor_release: "1.1.1" -pg_stat_monitor_release_checksum: sha256:1756a02d5a6dd66b892d15920257c69a17a67d48d3d4e2f189b681b83001ec2a - -vault_release: "0.2.9" -vault_release_checksum: sha256:1e813216395c59bb94c92be47ce8b70ba19ccc0efbcdb1fb14ed6d34a42c6cdb - -groonga_release: "13.0.1" -groonga_release_checksum: sha256:1c2d1a6981c1ad3f02a11aff202b15ba30cb1c6147f1fa9195b519a2b728f8ba - -pgroonga_release: "3.0.7" -pgroonga_release_checksum: sha256:885ff3878cc30e9030e5fc56d561bc8b66df3ede1562c9d802bc0ea04fe5c203 - -wrappers_release: "0.4.2" - -hypopg_release: "1.4.1" -hypopg_release_checksum: sha256:9afe6357fd389d8d33fad81703038ce520b09275ec00153c6c89282bcdedd6bc - -pg_repack_release: "1.5.0" -pg_repack_release_checksum: sha256:9a14d6a95bfa29f856aa10538238622c1f351d38eb350b196c06720a878ccc52 - -pgvector_release: "0.8.0" -pgvector_release_checksum: sha256:867a2c328d4928a5a9d6f052cd3bc78c7d60228a9b914ad32aa3db88e9de27b0 - -pg_tle_release: "1.3.2" -pg_tle_release_checksum: sha256:d04f72d88b21b954656609743560684ac42645b64a36c800d4d2f84d1f180de1 - -index_advisor_release: "0.2.0" -index_advisor_checksum: sha256:2d3642012a9185cda51f1e82ba43d64a81b24a2655a3ac3afdcbbd95d46a1a27 +adminapi_release: 0.84.1 +adminmgr_release: 0.25.1 +tealbase_admin_agent_release: 1.4.37 +tealbase_admin_agent_splay: 30 -pg_backtrace_release: "1.1" +vector_x86_deb: "https://packages.timber.io/vector/0.48.X/vector_0.48.0-1_amd64.deb" +vector_arm_deb: "https://packages.timber.io/vector/0.48.X/vector_0.48.0-1_arm64.deb" diff --git a/aogithub/CODEOWNERS b/aogithub/CODEOWNERS new file mode 100644 index 0000000..9cdd01d --- /dev/null +++ b/aogithub/CODEOWNERS @@ -0,0 +1,4 @@ +* @tealbase/backend @tealbase/postgres +migrations/ @tealbase/dev-workflows @tealbase/postgres @tealbase/backend +docker/orioledb @tealbase/postgres @tealbase/backend +common.vars.pkr.hcl @tealbase/postgres @tealbase/backend diff --git a/.github/FUNDING.yml b/aogithub/FUNDING.yml similarity index 100% rename from .github/FUNDING.yml rename to aogithub/FUNDING.yml diff --git a/.github/PULL_REQUEST_TEMPLATE/default.md b/aogithub/PULL_REQUEST_TEMPLATE/default.md similarity index 100% rename from .github/PULL_REQUEST_TEMPLATE/default.md rename to aogithub/PULL_REQUEST_TEMPLATE/default.md diff --git a/.github/PULL_REQUEST_TEMPLATE/extension_upgrade.md b/aogithub/PULL_REQUEST_TEMPLATE/extension_upgrade.md similarity index 100% rename from .github/PULL_REQUEST_TEMPLATE/extension_upgrade.md rename to aogithub/PULL_REQUEST_TEMPLATE/extension_upgrade.md diff --git a/aogithub/actions/shared-checkout/action.yml b/aogithub/actions/shared-checkout/action.yml new file mode 100644 index 0000000..59a236d --- /dev/null +++ b/aogithub/actions/shared-checkout/action.yml @@ -0,0 +1,12 @@ +name: Checkout +description: Checkout repository for pull requests and branches +runs: + using: "composite" + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + repository: ${{ github.event.pull_request.head.repo.full_name || github.repository }} + ref: ${{ github.event.pull_request.head.sha || github.sha }} + fetch-depth: 0 + fetch-tags: true diff --git a/.github/pull_request_template.md b/aogithub/pull_request_template.md similarity index 100% rename from .github/pull_request_template.md rename to aogithub/pull_request_template.md diff --git a/aogithub/workflows/ami-release-nix-single.yml b/aogithub/workflows/ami-release-nix-single.yml new file mode 100644 index 0000000..8117bd8 --- /dev/null +++ b/aogithub/workflows/ami-release-nix-single.yml @@ -0,0 +1,156 @@ +name: Release Single AMI Nix + +on: + workflow_dispatch: + inputs: + postgres_version: + description: 'PostgreSQL major version to build (e.g. 15)' + required: true + type: string + branch: + description: 'Branch to run the workflow from' + required: true + type: string + default: 'main' + +permissions: + contents: write + id-token: write + +jobs: + build: + runs-on: large-linux-arm + timeout-minutes: 150 + + steps: + - name: Checkout Repo + uses: tealbase/postgres/.github/actions/shared-checkout@HEAD + with: + ref: ${{ github.event.inputs.branch }} + - name: aws-creds + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ secrets.DEV_AWS_ROLE }} + aws-region: "us-east-1" + output-credentials: true + role-duration-seconds: 7200 + + - name: Get current branch SHA + id: get_sha + run: | + echo "sha=$(git rev-parse HEAD)" >> $GITHUB_OUTPUT + + - uses: DeterminateSystems/nix-installer-action@main + + - name: Set PostgreSQL version environment variable + run: echo "POSTGRES_MAJOR_VERSION=${{ github.event.inputs.postgres_version }}" >> $GITHUB_ENV + + - name: Generate common-nix.vars.pkr.hcl + run: | + PG_VERSION=$(nix run nixpkgs#yq -- '.postgres_release["postgres'${{ env.POSTGRES_MAJOR_VERSION }}'"]' ansible/vars.yml) + PG_VERSION=$(echo "$PG_VERSION" | tr -d '"') # Remove any surrounding quotes + echo 'postgres-version = "'$PG_VERSION'"' > common-nix.vars.pkr.hcl + # Ensure there's a newline at the end of the file + echo "" >> common-nix.vars.pkr.hcl + + - name: Build AMI stage 1 + env: + POSTGRES_MAJOR_VERSION: ${{ env.POSTGRES_MAJOR_VERSION }} + run: | + packer init amazon-arm64-nix.pkr.hcl + GIT_SHA=${{ steps.get_sha.outputs.sha }} + packer build -var "git-head-version=${GIT_SHA}" -var "packer-execution-id=${GITHUB_RUN_ID}" -var-file="development-arm.vars.pkr.hcl" -var-file="common-nix.vars.pkr.hcl" -var "ansible_arguments=-e postgresql_major=${POSTGRES_MAJOR_VERSION}" amazon-arm64-nix.pkr.hcl + + - name: Build AMI stage 2 + env: + POSTGRES_MAJOR_VERSION: ${{ env.POSTGRES_MAJOR_VERSION }} + run: | + packer init stage2-nix-psql.pkr.hcl + GIT_SHA=${{ steps.get_sha.outputs.sha }} + POSTGRES_MAJOR_VERSION=${{ env.POSTGRES_MAJOR_VERSION }} + packer build -var "git_sha=${GIT_SHA}" -var "git-head-version=${GIT_SHA}" -var "packer-execution-id=${GITHUB_RUN_ID}" -var "postgres_major_version=${POSTGRES_MAJOR_VERSION}" -var-file="development-arm.vars.pkr.hcl" -var-file="common-nix.vars.pkr.hcl" stage2-nix-psql.pkr.hcl + + - name: Grab release version + id: process_release_version + run: | + VERSION=$(cat common-nix.vars.pkr.hcl | sed -e 's/postgres-version = "\(.*\)"/\1/g') + echo "version=$VERSION" >> $GITHUB_OUTPUT + + - name: Create nix flake revision tarball + run: | + GIT_SHA=${{ steps.get_sha.outputs.sha }} + MAJOR_VERSION=${{ env.POSTGRES_MAJOR_VERSION }} + + mkdir -p "/tmp/pg_upgrade_bin/${MAJOR_VERSION}" + echo "$GIT_SHA" >> "/tmp/pg_upgrade_bin/${MAJOR_VERSION}/nix_flake_version" + tar -czf "/tmp/pg_binaries.tar.gz" -C "/tmp/pg_upgrade_bin" . + + - name: configure aws credentials - staging + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ secrets.DEV_AWS_ROLE }} + aws-region: "us-east-1" + + - name: Upload software manifest to s3 staging + run: | + cd ansible + ansible-playbook -i localhost \ + -e "ami_release_version=${{ steps.process_release_version.outputs.version }}" \ + -e "internal_artifacts_bucket=${{ secrets.ARTIFACTS_BUCKET }}" \ + -e "postgres_major_version=${{ env.POSTGRES_MAJOR_VERSION }}" \ + manifest-playbook.yml + + - name: Upload nix flake revision to s3 staging + run: | + aws s3 cp /tmp/pg_binaries.tar.gz s3://${{ secrets.ARTIFACTS_BUCKET }}/upgrades/postgres/tealbase-postgres-${{ steps.process_release_version.outputs.version }}/20.04.tar.gz + aws s3 cp /tmp/pg_binaries.tar.gz s3://${{ secrets.ARTIFACTS_BUCKET }}/upgrades/postgres/tealbase-postgres-${{ steps.process_release_version.outputs.version }}/24.04.tar.gz + aws s3 cp /tmp/pg_binaries.tar.gz s3://${{ secrets.ARTIFACTS_BUCKET }}/upgrades/postgres/tealbase-postgres-${{ steps.process_release_version.outputs.version }}/upgrade_bundle.tar.gz + + - name: configure aws credentials - prod + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ secrets.PROD_AWS_ROLE }} + aws-region: "us-east-1" + + - name: Upload software manifest to s3 prod + run: | + cd ansible + ansible-playbook -i localhost \ + -e "ami_release_version=${{ steps.process_release_version.outputs.version }}" \ + -e "internal_artifacts_bucket=${{ secrets.PROD_ARTIFACTS_BUCKET }}" \ + -e "postgres_major_version=${{ env.POSTGRES_MAJOR_VERSION }}" \ + manifest-playbook.yml + + - name: Upload nix flake revision to s3 prod + run: | + aws s3 cp /tmp/pg_binaries.tar.gz s3://${{ secrets.PROD_ARTIFACTS_BUCKET }}/upgrades/postgres/tealbase-postgres-${{ steps.process_release_version.outputs.version }}/20.04.tar.gz + aws s3 cp /tmp/pg_binaries.tar.gz s3://${{ secrets.PROD_ARTIFACTS_BUCKET }}/upgrades/postgres/tealbase-postgres-${{ steps.process_release_version.outputs.version }}/24.04.tar.gz + aws s3 cp /tmp/pg_binaries.tar.gz s3://${{ secrets.PROD_ARTIFACTS_BUCKET }}/upgrades/postgres/tealbase-postgres-${{ steps.process_release_version.outputs.version }}/upgrade_bundle.tar.gz + + - name: Create release + uses: softprops/action-gh-release@v2 + with: + name: ${{ steps.process_release_version.outputs.version }} + tag_name: ${{ steps.process_release_version.outputs.version }} + target_commitish: ${{ steps.get_sha.outputs.sha }} + + - name: Slack Notification on Failure + if: ${{ failure() }} + uses: rtCamp/action-slack-notify@v2 + env: + SLACK_WEBHOOK: ${{ secrets.SLACK_NOTIFICATIONS_WEBHOOK }} + SLACK_USERNAME: 'gha-failures-notifier' + SLACK_COLOR: 'danger' + SLACK_MESSAGE: 'Building Postgres AMI failed' + SLACK_FOOTER: '' + + - name: Cleanup resources after build + if: ${{ always() }} + run: | + aws ec2 describe-instances --filters "Name=tag:packerExecutionId,Values=${GITHUB_RUN_ID}" --query "Reservations[].Instances[].InstanceId" --output text | xargs -r aws ec2 terminate-instances --instance-ids + + - name: Cleanup resources on build cancellation + if: ${{ cancelled() }} + run: | + aws ec2 describe-instances --filters "Name=tag:packerExecutionId,Values=${GITHUB_RUN_ID}" --query "Reservations[].Instances[].InstanceId" --output text | xargs -r aws ec2 terminate-instances --instance-ids + diff --git a/aogithub/workflows/ami-release-nix.yml b/aogithub/workflows/ami-release-nix.yml new file mode 100644 index 0000000..1256bda --- /dev/null +++ b/aogithub/workflows/ami-release-nix.yml @@ -0,0 +1,177 @@ +name: Release AMI Nix + +on: + push: + branches: + - develop + - release/* + paths: + - '.github/workflows/ami-release-nix.yml' + - 'common-nix.vars.pkr.hcl' + - 'ansible/vars.yml' + workflow_dispatch: + +permissions: + contents: write + id-token: write + +jobs: + prepare: + runs-on: large-linux-x86 + outputs: + postgres_versions: ${{ steps.set-versions.outputs.postgres_versions }} + steps: + - name: Checkout Repo + uses: tealbase/postgres/.github/actions/shared-checkout@HEAD + + - uses: DeterminateSystems/nix-installer-action@main + + - name: Set PostgreSQL versions + id: set-versions + run: | + VERSIONS=$(nix run nixpkgs#yq -- '.postgres_major[]' ansible/vars.yml | nix run nixpkgs#jq -- -R -s -c 'split("\n")[:-1]') + echo "postgres_versions=$VERSIONS" >> $GITHUB_OUTPUT + + build: + needs: prepare + strategy: + matrix: + postgres_version: ${{ fromJson(needs.prepare.outputs.postgres_versions) }} + include: + - runner: large-linux-arm + runs-on: ${{ matrix.runner }} + timeout-minutes: 150 + + steps: + - name: Checkout Repo + uses: tealbase/postgres/.github/actions/shared-checkout@HEAD + - name: aws-creds + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ secrets.DEV_AWS_ROLE }} + aws-region: "us-east-1" + output-credentials: true + role-duration-seconds: 7200 + - uses: DeterminateSystems/nix-installer-action@main + + - name: Run checks if triggered manually + if: ${{ github.event_name == 'workflow_dispatch' }} + run: | + SUFFIX=$(nix run nixpkgs#yq -- ".postgres_release[\"postgres${{ matrix.postgres_version }}\"]" ansible/vars.yml | sed -E 's/[0-9\.]+(.*)$/\1/') + if [[ -z "$SUFFIX" ]] ; then + echo "Version must include non-numeric characters if built manually." + exit 1 + fi + + - name: Set PostgreSQL version environment variable + run: echo "POSTGRES_MAJOR_VERSION=${{ matrix.postgres_version }}" >> $GITHUB_ENV + + - name: Generate common-nix.vars.pkr.hcl + run: | + PG_VERSION=$(nix run nixpkgs#yq -- '.postgres_release["postgres'${{ matrix.postgres_version }}'"]' ansible/vars.yml) + PG_VERSION=$(echo "$PG_VERSION" | tr -d '"') # Remove any surrounding quotes + echo 'postgres-version = "'$PG_VERSION'"' > common-nix.vars.pkr.hcl + # Ensure there's a newline at the end of the file + echo "" >> common-nix.vars.pkr.hcl + + - name: Build AMI stage 1 + env: + POSTGRES_MAJOR_VERSION: ${{ env.POSTGRES_MAJOR_VERSION }} + run: | + packer init amazon-arm64-nix.pkr.hcl + GIT_SHA=${{github.sha}} + # why is postgresql_major defined here instead of where the _three_ other postgresql_* variables are defined? + packer build -var "git-head-version=${GIT_SHA}" -var "packer-execution-id=${GITHUB_RUN_ID}" -var-file="development-arm.vars.pkr.hcl" -var-file="common-nix.vars.pkr.hcl" -var "ansible_arguments=-e postgresql_major=${POSTGRES_MAJOR_VERSION}" amazon-arm64-nix.pkr.hcl + + - name: Build AMI stage 2 + env: + POSTGRES_MAJOR_VERSION: ${{ env.POSTGRES_MAJOR_VERSION }} + run: | + packer init stage2-nix-psql.pkr.hcl + GIT_SHA=${{github.sha}} + POSTGRES_MAJOR_VERSION=${{ env.POSTGRES_MAJOR_VERSION }} + packer build -var "git_sha=${GIT_SHA}" -var "git-head-version=${GIT_SHA}" -var "packer-execution-id=${GITHUB_RUN_ID}" -var "postgres_major_version=${POSTGRES_MAJOR_VERSION}" -var-file="development-arm.vars.pkr.hcl" -var-file="common-nix.vars.pkr.hcl" stage2-nix-psql.pkr.hcl + + - name: Grab release version + id: process_release_version + run: | + VERSION=$(cat common-nix.vars.pkr.hcl | sed -e 's/postgres-version = "\(.*\)"/\1/g') + echo "version=$VERSION" >> $GITHUB_OUTPUT + + - name: Create nix flake revision tarball + run: | + GIT_SHA=${{github.sha}} + MAJOR_VERSION=${{ env.POSTGRES_MAJOR_VERSION }} + + mkdir -p "/tmp/pg_upgrade_bin/${MAJOR_VERSION}" + echo "$GIT_SHA" >> "/tmp/pg_upgrade_bin/${MAJOR_VERSION}/nix_flake_version" + tar -czf "/tmp/pg_binaries.tar.gz" -C "/tmp/pg_upgrade_bin" . + + - name: configure aws credentials - staging + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ secrets.DEV_AWS_ROLE }} + aws-region: "us-east-1" + + - name: Upload software manifest to s3 staging + run: | + cd ansible + ansible-playbook -i localhost \ + -e "ami_release_version=${{ steps.process_release_version.outputs.version }}" \ + -e "internal_artifacts_bucket=${{ secrets.ARTIFACTS_BUCKET }}" \ + -e "postgres_major_version=${{ env.POSTGRES_MAJOR_VERSION }}" \ + manifest-playbook.yml + + - name: Upload nix flake revision to s3 staging + run: | + aws s3 cp /tmp/pg_binaries.tar.gz s3://${{ secrets.ARTIFACTS_BUCKET }}/upgrades/postgres/tealbase-postgres-${{ steps.process_release_version.outputs.version }}/20.04.tar.gz + aws s3 cp /tmp/pg_binaries.tar.gz s3://${{ secrets.ARTIFACTS_BUCKET }}/upgrades/postgres/tealbase-postgres-${{ steps.process_release_version.outputs.version }}/24.04.tar.gz + aws s3 cp /tmp/pg_binaries.tar.gz s3://${{ secrets.ARTIFACTS_BUCKET }}/upgrades/postgres/tealbase-postgres-${{ steps.process_release_version.outputs.version }}/upgrade_bundle.tar.gz + + - name: configure aws credentials - prod + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ secrets.PROD_AWS_ROLE }} + aws-region: "us-east-1" + + - name: Upload software manifest to s3 prod + run: | + cd ansible + ansible-playbook -i localhost \ + -e "ami_release_version=${{ steps.process_release_version.outputs.version }}" \ + -e "internal_artifacts_bucket=${{ secrets.PROD_ARTIFACTS_BUCKET }}" \ + -e "postgres_major_version=${{ env.POSTGRES_MAJOR_VERSION }}" \ + manifest-playbook.yml + + - name: Upload nix flake revision to s3 prod + run: | + aws s3 cp /tmp/pg_binaries.tar.gz s3://${{ secrets.PROD_ARTIFACTS_BUCKET }}/upgrades/postgres/tealbase-postgres-${{ steps.process_release_version.outputs.version }}/20.04.tar.gz + aws s3 cp /tmp/pg_binaries.tar.gz s3://${{ secrets.PROD_ARTIFACTS_BUCKET }}/upgrades/postgres/tealbase-postgres-${{ steps.process_release_version.outputs.version }}/24.04.tar.gz + aws s3 cp /tmp/pg_binaries.tar.gz s3://${{ secrets.PROD_ARTIFACTS_BUCKET }}/upgrades/postgres/tealbase-postgres-${{ steps.process_release_version.outputs.version }}/upgrade_bundle.tar.gz + + - name: Create release + uses: softprops/action-gh-release@v2 + with: + name: ${{ steps.process_release_version.outputs.version }} + tag_name: ${{ steps.process_release_version.outputs.version }} + target_commitish: ${{github.sha}} + + - name: Slack Notification on Failure + if: ${{ failure() }} + uses: rtCamp/action-slack-notify@v2 + env: + SLACK_WEBHOOK: ${{ secrets.SLACK_NOTIFICATIONS_WEBHOOK }} + SLACK_USERNAME: 'gha-failures-notifier' + SLACK_COLOR: 'danger' + SLACK_MESSAGE: 'Building Postgres AMI failed' + SLACK_FOOTER: '' + + - name: Cleanup resources after build + if: ${{ always() }} + run: | + aws ec2 describe-instances --filters "Name=tag:packerExecutionId,Values=${GITHUB_RUN_ID}" --query "Reservations[].Instances[].InstanceId" --output text | xargs -r aws ec2 terminate-instances --instance-ids + + - name: Cleanup resources on build cancellation + if: ${{ cancelled() }} + run: | + aws ec2 describe-instances --filters "Name=tag:packerExecutionId,Values=${GITHUB_RUN_ID}" --query "Reservations[].Instances[].InstanceId" --output text | xargs -r aws ec2 terminate-instances --instance-ids diff --git a/.github/workflows/check-shellscripts.yml b/aogithub/workflows/check-shellscripts.yml similarity index 83% rename from .github/workflows/check-shellscripts.yml rename to aogithub/workflows/check-shellscripts.yml index b796bdb..1eb15ee 100644 --- a/.github/workflows/check-shellscripts.yml +++ b/aogithub/workflows/check-shellscripts.yml @@ -7,11 +7,15 @@ on: pull_request: workflow_dispatch: +permissions: + contents: read + jobs: build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - name: Checkout Repo + uses: tealbase/postgres/.github/actions/shared-checkout@HEAD - name: Run ShellCheck uses: ludeeus/action-shellcheck@master env: diff --git a/aogithub/workflows/ci.yml b/aogithub/workflows/ci.yml new file mode 100644 index 0000000..b6ac1cf --- /dev/null +++ b/aogithub/workflows/ci.yml @@ -0,0 +1,50 @@ +name: Check merge requirements + +on: + pull_request: + +permissions: + contents: read + +jobs: + check-release-version: + timeout-minutes: 5 + runs-on: ubuntu-latest + steps: + - name: Checkout Repo + uses: tealbase/postgres/.github/actions/shared-checkout@HEAD + + - name: Load postgres_release values + id: load_postgres_release + uses: mikefarah/yq@master + with: + args: eval '.postgres_release' ansible/vars.yml + # The output will be available as steps.load_postgres_release.outputs.stdout + + - name: Run checks + run: | + POSTGRES_RELEASES="${{ steps.load_postgres_release.outputs.stdout }}" + + # Iterate through each release + for release in $(echo "$POSTGRES_RELEASES" | yq eval 'keys | .[]' -); do + VERSION=$(echo "$POSTGRES_RELEASES" | yq eval ".\"$release\"" -) + if [[ "$release" == "postgresorioledb-17" ]]; then + # Check for suffix after -orioledb + if [[ "$VERSION" =~ -orioledb(.*) ]]; then + SUFFIX="${BASH_REMATCH[1]}" + if [[ -n "$SUFFIX" ]]; then + echo "We no longer allow merging versions with suffixes after -orioledb." + exit 1 + fi + fi + else + # Check for suffix after version digits + if [[ "$VERSION" =~ ([0-9]+\.[0-9]+\.[0-9]+)(.*) ]]; then + SUFFIX="${BASH_REMATCH[2]}" + if [[ -n "$SUFFIX" ]]; then + echo "We no longer allow merging versions with suffixes after version $VERSION." + exit 1 + fi + fi + fi + done diff --git a/aogithub/workflows/dockerhub-release-matrix.yml b/aogithub/workflows/dockerhub-release-matrix.yml new file mode 100644 index 0000000..e41d126 --- /dev/null +++ b/aogithub/workflows/dockerhub-release-matrix.yml @@ -0,0 +1,253 @@ +name: Release all major versions on Dockerhub + +on: + push: + branches: + - develop + - release/* + paths: + - ".github/workflows/dockerhub-release-matrix.yml" + - "ansible/vars.yml" + workflow_dispatch: + +permissions: + contents: read + id-token: write + +jobs: + prepare: + runs-on: large-linux-x86 + outputs: + matrix_config: ${{ steps.set-matrix.outputs.matrix_config }} + steps: + - uses: DeterminateSystems/nix-installer-action@main + - name: Checkout Repo + uses: tealbase/postgres/.github/actions/shared-checkout@HEAD + - name: Generate build matrix + id: set-matrix + run: | + nix run nixpkgs#nushell -- -c 'let versions = (open ansible/vars.yml | get postgres_major) + let matrix = ($versions | each { |ver| + let version = ($ver | str trim) + let dockerfile = $"Dockerfile-($version)" + if ($dockerfile | path exists) { + { + version: $version, + dockerfile: $dockerfile + } + } else { + null + } + } | compact) + + let matrix_config = { + include: $matrix + } + + $"matrix_config=($matrix_config | to json -r)" | save --append $env.GITHUB_OUTPUT' + build: + needs: prepare + strategy: + matrix: ${{ fromJson(needs.prepare.outputs.matrix_config) }} + runs-on: large-linux-x86 + outputs: + build_args: ${{ steps.args.outputs.result }} + steps: + - name: Checkout Repo + uses: tealbase/postgres/.github/actions/shared-checkout@HEAD + - uses: DeterminateSystems/nix-installer-action@main + - name: Set PostgreSQL version environment variable + run: echo "POSTGRES_MAJOR_VERSION=${{ matrix.version }}" >> $GITHUB_ENV + + - id: args + run: | + nix run nixpkgs#nushell -- -c ' + open ansible/vars.yml + | items { |key value| {name: $key, item: $value} } + | where { |it| ($it.item | describe) == "string" } + | each { |it| $"($it.name)=($it.item)" } + | str join "\n" + | save --append $env.GITHUB_OUTPUT + ' + build_release_image: + needs: [prepare, build] + strategy: + matrix: + postgres: ${{ fromJson(needs.prepare.outputs.matrix_config).include }} + arch: [amd64, arm64] + runs-on: ${{ matrix.arch == 'amd64' && 'large-linux-x86' || 'large-linux-arm' }} + timeout-minutes: 180 + steps: + - name: Checkout Repo + uses: tealbase/postgres/.github/actions/shared-checkout@HEAD + - uses: DeterminateSystems/nix-installer-action@main + - run: docker context create builders + - uses: docker/setup-buildx-action@v3 + with: + endpoint: builders + - uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + - name: Get image tag + id: image + run: | + if [[ "${{ matrix.arch }}" == "arm64" ]]; then + pg_version=$(nix run nixpkgs#nushell -- -c ' + let version = "${{ matrix.postgres.version }}" + let release_key = if ($version | str contains "orioledb") { + $"postgresorioledb-17" + } else { + $"postgres($version)" + } + open ansible/vars.yml | get postgres_release | get $release_key | str trim + ') + echo "pg_version=tealbase/postgres:$pg_version" >> $GITHUB_OUTPUT + else + pg_version=$(nix run nixpkgs#nushell -- -c ' + let version = "${{ matrix.postgres.version }}" + let release_key = if ($version | str contains "orioledb") { + $"postgresorioledb-17" + } else { + $"postgres($version)" + } + open ansible/vars.yml | get postgres_release | get $release_key | str trim + ') + echo "pg_version=tealbase/postgres:$pg_version" >> $GITHUB_OUTPUT + fi + - id: build + uses: docker/build-push-action@v5 + with: + push: true + build-args: | + ${{ needs.build.outputs.build_args }} + target: production + tags: ${{ steps.image.outputs.pg_version }}_${{ matrix.arch }} + platforms: linux/${{ matrix.arch }} + cache-from: type=gha,scope=${{ github.ref_name }}-latest-${{ matrix.arch }} + cache-to: type=gha,mode=max,scope=${{ github.ref_name }}-latest-${{ matrix.arch }} + file: ${{ matrix.postgres.dockerfile }} + merge_manifest: + needs: [prepare, build, build_release_image] + strategy: + matrix: + include: ${{ fromJson(needs.prepare.outputs.matrix_config).include }} + runs-on: large-linux-x86 + steps: + - name: Checkout Repo + uses: tealbase/postgres/.github/actions/shared-checkout@HEAD + - uses: DeterminateSystems/nix-installer-action@main + - uses: docker/setup-buildx-action@v3 + - uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + - name: Get image tag + id: get_version + run: | + nix run nixpkgs#nushell -- -c ' + let version = "${{ matrix.version }}" + let release_key = if ($version | str contains "orioledb") { + $"postgresorioledb-17" + } else { + $"postgres($version)" + } + let pg_version = (open ansible/vars.yml | get postgres_release | get $release_key | str trim) + $"pg_version=tealbase/postgres:($pg_version)" | save --append $env.GITHUB_OUTPUT + ' + - name: Output version + id: output_version + run: | + echo "result=${{ steps.get_version.outputs.pg_version }}" >> $GITHUB_OUTPUT + - name: Collect versions + id: collect_versions + run: | + echo "${{ steps.output_version.outputs.result }}" >> results.txt # Append results + - name: Upload Results Artifact + uses: actions/upload-artifact@v4 + with: + name: merge_results-${{ matrix.version }} + path: results.txt + if-no-files-found: warn + - name: Merge multi-arch manifests + run: | + docker buildx imagetools create -t ${{ steps.get_version.outputs.pg_version }} \ + ${{ steps.get_version.outputs.pg_version }}_amd64 \ + ${{ steps.get_version.outputs.pg_version }}_arm64 + combine_results: + needs: [prepare, merge_manifest] + runs-on: large-linux-x86 + steps: + - name: Checkout Repo + uses: tealbase/postgres/.github/actions/shared-checkout@HEAD + - uses: DeterminateSystems/nix-installer-action@main + + - name: Debug Input from Prepare + run: | + echo "Raw matrix_config output:" + echo "${{ needs.prepare.outputs.matrix_config }}" + - name: Get Versions from Matrix Config + id: get_versions + run: | + nix run nixpkgs#nushell -- -c ' + # Parse the matrix configuration directly + let matrix_config = (${{ toJson(needs.prepare.outputs.matrix_config) }} | from json) + + # Get versions directly from include array + let versions = ($matrix_config.include | get version) + + echo "Versions: $versions" + + # Convert the versions to a comma-separated string + let versions_str = ($versions | str join ",") + $"versions=$versions_str" | save --append $env.GITHUB_ENV + ' + - name: Download Results Artifacts + uses: actions/download-artifact@v4 + with: + pattern: merge_results-* + - name: Combine Results + id: combine + run: | + nix run nixpkgs#nushell -- -c ' + # Get all results files and process them in one go + let files = (ls **/results.txt | get name) + echo $"Found files: ($files)" + + let matrix = { + include: ( + $files + | each { |file| open $file } # Open each file + | each { |content| $content | lines } # Split into lines + | flatten # Flatten the nested lists + | where { |line| $line != "" } # Filter empty lines + | each { |line| + # Extract just the version part after the last colon + let version = ($line | parse "tealbase/postgres:{version}" | get version.0) + {version: $version} + } + ) + } + + let json_output = ($matrix | to json -r) # -r for raw output + echo $"Debug output: ($json_output)" + + $"matrix=($json_output)" | save --append $env.GITHUB_OUTPUT + ' + - name: Debug Combined Results + run: | + echo "Combined Results: '${{ steps.combine.outputs.matrix }}'" + outputs: + matrix: ${{ steps.combine.outputs.matrix }} + publish: + needs: combine_results + permissions: + contents: read + packages: write + id-token: write + strategy: + matrix: ${{ fromJson(needs.combine_results.outputs.matrix) }} + uses: ./.github/workflows/mirror.yml + with: + version: ${{ matrix.version }} + secrets: inherit diff --git a/aogithub/workflows/manual-docker-release.yml b/aogithub/workflows/manual-docker-release.yml new file mode 100644 index 0000000..783b4d2 --- /dev/null +++ b/aogithub/workflows/manual-docker-release.yml @@ -0,0 +1,262 @@ +name: Manual Docker Artifacts Release + +on: + workflow_dispatch: + inputs: + postgresVersion: + description: 'Optional. Postgres version to publish against, i.e. 15.1.1.78' + required: false + +permissions: + id-token: write + contents: read + +jobs: + prepare: + runs-on: large-linux-x86 + outputs: + matrix_config: ${{ steps.set-matrix.outputs.matrix_config }} + steps: + - uses: DeterminateSystems/nix-installer-action@main + - name: Checkout Repo + uses: tealbase/postgres/.github/actions/shared-checkout@HEAD + - name: Generate build matrix + id: set-matrix + run: | + nix run nixpkgs#nushell -- -c 'let versions = (open ansible/vars.yml | get postgres_major) + let matrix = ($versions | each { |ver| + let version = ($ver | str trim) + let dockerfile = $"Dockerfile-($version)" + if ($dockerfile | path exists) { + { + version: $version, + dockerfile: $dockerfile + } + } else { + null + } + } | compact) + + let matrix_config = { + include: $matrix + } + + $"matrix_config=($matrix_config | to json -r)" | save --append $env.GITHUB_OUTPUT' + build: + needs: prepare + strategy: + matrix: ${{ fromJson(needs.prepare.outputs.matrix_config) }} + runs-on: large-linux-x86 + outputs: + build_args: ${{ steps.args.outputs.result }} + steps: + - name: Checkout Repo + uses: tealbase/postgres/.github/actions/shared-checkout@HEAD + - uses: DeterminateSystems/nix-installer-action@main + - name: Set PostgreSQL version environment variable + run: echo "POSTGRES_MAJOR_VERSION=${{ matrix.version }}" >> $GITHUB_ENV + + - id: args + run: | + nix run nixpkgs#nushell -- -c ' + open ansible/vars.yml + | items { |key value| {name: $key, item: $value} } + | where { |it| ($it.item | describe) == "string" } + | each { |it| $"($it.name)=($it.item)" } + | str join "\n" + | save --append $env.GITHUB_OUTPUT + ' + build_release_image: + needs: [prepare, build] + strategy: + matrix: + postgres: ${{ fromJson(needs.prepare.outputs.matrix_config).include }} + arch: [amd64, arm64] + runs-on: ${{ matrix.arch == 'amd64' && 'large-linux-x86' || 'large-linux-arm' }} + timeout-minutes: 180 + steps: + - name: Checkout Repo + uses: tealbase/postgres/.github/actions/shared-checkout@HEAD + - uses: DeterminateSystems/nix-installer-action@main + - run: docker context create builders + - uses: docker/setup-buildx-action@v3 + with: + endpoint: builders + - uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + - name: Get image tag + id: image + run: | + if [[ "${{ matrix.arch }}" == "arm64" ]]; then + pg_version=$(nix run nixpkgs#nushell -- -c ' + let version = "${{ matrix.postgres.version }}" + let release_key = if ($version | str contains "orioledb") { + $"postgresorioledb-17" + } else { + $"postgres($version)" + } + let base_version = (open ansible/vars.yml | get postgres_release | get $release_key | str trim) + let final_version = if "${{ inputs.postgresVersion }}" != "" { + "${{ inputs.postgresVersion }}" + } else { + $base_version + } + $final_version | str trim + ') + echo "pg_version=tealbase/postgres:$pg_version" >> $GITHUB_OUTPUT + else + pg_version=$(nix run nixpkgs#nushell -- -c ' + let version = "${{ matrix.postgres.version }}" + let release_key = if ($version | str contains "orioledb") { + $"postgresorioledb-17" + } else { + $"postgres($version)" + } + let base_version = (open ansible/vars.yml | get postgres_release | get $release_key | str trim) + let final_version = if "${{ inputs.postgresVersion }}" != "" { + "${{ inputs.postgresVersion }}" + } else { + $base_version + } + $final_version | str trim + ') + echo "pg_version=tealbase/postgres:$pg_version" >> $GITHUB_OUTPUT + fi + - id: build + uses: docker/build-push-action@v5 + with: + push: true + build-args: | + ${{ needs.build.outputs.build_args }} + target: production + tags: ${{ steps.image.outputs.pg_version }}_${{ matrix.arch }} + platforms: linux/${{ matrix.arch }} + cache-from: type=gha,scope=${{ github.ref_name }}-latest-${{ matrix.arch }} + cache-to: type=gha,mode=max,scope=${{ github.ref_name }}-latest-${{ matrix.arch }} + file: ${{ matrix.postgres.dockerfile }} + merge_manifest: + needs: [prepare, build, build_release_image] + strategy: + matrix: + include: ${{ fromJson(needs.prepare.outputs.matrix_config).include }} + runs-on: large-linux-x86 + steps: + - name: Checkout Repo + uses: tealbase/postgres/.github/actions/shared-checkout@HEAD + - uses: DeterminateSystems/nix-installer-action@main + - uses: docker/setup-buildx-action@v3 + - uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + - name: Get image tag + id: get_version + run: | + nix run nixpkgs#nushell -- -c ' + let version = "${{ matrix.version }}" + let release_key = if ($version | str contains "orioledb") { + $"postgresorioledb-17" + } else { + $"postgres($version)" + } + let pg_version = (open ansible/vars.yml | get postgres_release | get $release_key | str trim) + $"pg_version=tealbase/postgres:($pg_version)" | save --append $env.GITHUB_OUTPUT + ' + - name: Output version + id: output_version + run: | + echo "result=${{ steps.get_version.outputs.pg_version }}" >> $GITHUB_OUTPUT + - name: Collect versions + id: collect_versions + run: | + echo "${{ steps.output_version.outputs.result }}" >> results.txt # Append results + - name: Upload Results Artifact + uses: actions/upload-artifact@v4 + with: + name: merge_results-${{ matrix.version }} + path: results.txt + if-no-files-found: warn + - name: Merge multi-arch manifests + run: | + docker buildx imagetools create -t ${{ steps.get_version.outputs.pg_version }} \ + ${{ steps.get_version.outputs.pg_version }}_amd64 \ + ${{ steps.get_version.outputs.pg_version }}_arm64 + combine_results: + needs: [prepare, merge_manifest] + runs-on: large-linux-x86 + steps: + - name: Checkout Repo + uses: tealbase/postgres/.github/actions/shared-checkout@HEAD + - uses: DeterminateSystems/nix-installer-action@main + + - name: Debug Input from Prepare + run: | + echo "Raw matrix_config output:" + echo "${{ needs.prepare.outputs.matrix_config }}" + - name: Get Versions from Matrix Config + id: get_versions + run: | + nix run nixpkgs#nushell -- -c ' + # Parse the matrix configuration directly + let matrix_config = (${{ toJson(needs.prepare.outputs.matrix_config) }} | from json) + + # Get versions directly from include array + let versions = ($matrix_config.include | get version) + + echo "Versions: $versions" + + # Convert the versions to a comma-separated string + let versions_str = ($versions | str join ",") + $"versions=$versions_str" | save --append $env.GITHUB_ENV + ' + - name: Download Results Artifacts + uses: actions/download-artifact@v4 + with: + pattern: merge_results-* + - name: Combine Results + id: combine + run: | + nix run nixpkgs#nushell -- -c ' + # Get all results files and process them in one go + let files = (ls **/results.txt | get name) + echo $"Found files: ($files)" + + let matrix = { + include: ( + $files + | each { |file| open $file } # Open each file + | each { |content| $content | lines } # Split into lines + | flatten # Flatten the nested lists + | where { |line| $line != "" } # Filter empty lines + | each { |line| + # Extract just the version part after the last colon + let version = ($line | parse "tealbase/postgres:{version}" | get version.0) + {version: $version} + } + ) + } + + let json_output = ($matrix | to json -r) # -r for raw output + echo $"Debug output: ($json_output)" + + $"matrix=($json_output)" | save --append $env.GITHUB_OUTPUT + ' + - name: Debug Combined Results + run: | + echo "Combined Results: '${{ steps.combine.outputs.matrix }}'" + outputs: + matrix: ${{ steps.combine.outputs.matrix }} + publish: + permissions: + contents: read + packages: write + id-token: write + needs: combine_results + strategy: + matrix: ${{ fromJson(needs.combine_results.outputs.matrix) }} + uses: ./.github/workflows/mirror.yml + with: + version: ${{ inputs.postgresVersion != '' && inputs.postgresVersion || matrix.version }} + secrets: inherit diff --git a/.github/workflows/mirror-postgrest.yml b/aogithub/workflows/mirror-postgrest.yml similarity index 76% rename from .github/workflows/mirror-postgrest.yml rename to aogithub/workflows/mirror-postgrest.yml index c84647c..c0d9838 100644 --- a/.github/workflows/mirror-postgrest.yml +++ b/aogithub/workflows/mirror-postgrest.yml @@ -8,13 +8,17 @@ on: - ".github/workflows/mirror-postgrest.yml" - "common.vars*" +permissions: + contents: read + jobs: version: runs-on: ubuntu-latest outputs: postgrest_release: ${{ steps.args.outputs.result }} steps: - - uses: actions/checkout@v4 + - name: Checkout Repo + uses: tealbase/postgres/.github/actions/shared-checkout@HEAD - id: args uses: mikefarah/yq@master with: @@ -27,7 +31,7 @@ jobs: contents: read packages: write id-token: write - uses: supabase/cli/.github/workflows/mirror-image.yml@main + uses: tealbase/cli/.github/workflows/mirror-image.yml@main with: image: postgrest/postgrest:v${{ needs.version.outputs.postgrest_release }} secrets: inherit diff --git a/.github/workflows/mirror.yml b/aogithub/workflows/mirror.yml similarity index 97% rename from .github/workflows/mirror.yml rename to aogithub/workflows/mirror.yml index 268a397..2411ab7 100644 --- a/.github/workflows/mirror.yml +++ b/aogithub/workflows/mirror.yml @@ -13,6 +13,9 @@ on: required: true type: string +permissions: + contents: read + jobs: mirror: runs-on: ubuntu-latest diff --git a/aogithub/workflows/nix-build.yml b/aogithub/workflows/nix-build.yml new file mode 100644 index 0000000..3e092c9 --- /dev/null +++ b/aogithub/workflows/nix-build.yml @@ -0,0 +1,124 @@ +name: Nix CI + +on: + push: + branches: + - develop + - release/* + pull_request: + workflow_dispatch: + +permissions: + id-token: write + # required by testinfra-ami-build dependent workflows + contents: write + packages: write + +jobs: + build-run-image: + strategy: + fail-fast: false + matrix: + include: + - runner: large-linux-x86 + arch: amd64 + - runner: large-linux-arm + arch: arm64 + - runner: macos-latest-xlarge + arch: arm64 + runs-on: ${{ matrix.runner }} + timeout-minutes: 180 + steps: + - name: Checkout Repo + uses: tealbase/postgres/.github/actions/shared-checkout@HEAD + - name: aws-creds + uses: aws-actions/configure-aws-credentials@v4 + if: ${{ github.secret_source == 'Actions' }} + with: + role-to-assume: ${{ secrets.DEV_AWS_ROLE }} + aws-region: "us-east-1" + output-credentials: true + role-duration-seconds: 7200 + - name: Setup AWS credentials for Nix + if: ${{ github.secret_source == 'Actions' }} + run: | + sudo -H aws configure set aws_access_key_id $AWS_ACCESS_KEY_ID + sudo -H aws configure set aws_secret_access_key $AWS_SECRET_ACCESS_KEY + sudo -H aws configure set aws_session_token $AWS_SESSION_TOKEN + - name: write secret key + # use python so we don't interpolate the secret into the workflow logs, in case of bugs + run: | + sudo mkdir -p /etc/nix + sudo -E python -c "import os; file = open('/etc/nix/nix-secret-key', 'w'); file.write(os.environ['NIX_SIGN_SECRET_KEY']); file.close()" + env: + NIX_SIGN_SECRET_KEY: ${{ secrets.NIX_SIGN_SECRET_KEY }} + - name: Setup cache script + if: ${{ github.secret_source == 'Actions' }} + run: | + cat << 'EOF' | sudo tee /etc/nix/upload-to-cache.sh > /dev/null + #!/usr/bin/env bash + set -euf + export IFS=' ' + /nix/var/nix/profiles/default/bin/nix copy --to 's3://nix-postgres-artifacts?secret-key=/etc/nix/nix-secret-key' $OUT_PATHS + EOF + sudo chmod +x /etc/nix/upload-to-cache.sh + - name: Install nix + uses: cachix/install-nix-action@v27 + if: ${{ github.secret_source == 'Actions' }} + with: + install_url: https://releases.nixos.org/nix/nix-2.29.1/install + extra_nix_config: | + substituters = https://cache.nixos.org https://nix-postgres-artifacts.s3.amazonaws.com + trusted-public-keys = nix-postgres-artifacts:dGZlQOvKcNEjvT7QEAJbcV6b6uk7VF/hWMjhYleiaLI=% cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY= + post-build-hook = /etc/nix/upload-to-cache.sh + - name: Install nix + uses: cachix/install-nix-action@v27 + if: ${{ github.secret_source == 'None' }} + with: + install_url: https://releases.nixos.org/nix/nix-2.29.1/install + extra_nix_config: | + substituters = https://cache.nixos.org https://nix-postgres-artifacts.s3.amazonaws.com + trusted-public-keys = nix-postgres-artifacts:dGZlQOvKcNEjvT7QEAJbcV6b6uk7VF/hWMjhYleiaLI=% cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY= + - name: Aggressive disk cleanup for DuckDB build + if: matrix.runner == 'macos-latest-xlarge' + run: | + echo "=== BEFORE CLEANUP ===" + df -h + # Remove major space consumers + sudo rm -rf /usr/share/dotnet || true + sudo rm -rf /usr/local/lib/android || true + sudo rm -rf /Applications/Xcode.app/Contents/Developer/Platforms/iPhoneSimulator.platform || true + sudo rm -rf /Applications/Xcode.app/Contents/Developer/Platforms/watchOS.platform || true + sudo rm -rf /Applications/Xcode.app/Contents/Developer/Platforms/tvOS.platform || true + # Clean everything possible + sudo rm -rf /opt/ghc || true + sudo rm -rf /usr/local/share/boost || true + sudo rm -rf /opt/homebrew || true + sudo xcrun simctl delete all 2>/dev/null || true + # Aggressive cache cleanup + sudo rm -rf /System/Library/Caches/* 2>/dev/null || true + sudo rm -rf /Library/Caches/* 2>/dev/null || true + sudo rm -rf ~/Library/Caches/* 2>/dev/null || true + sudo rm -rf /private/var/log/* 2>/dev/null || true + sudo rm -rf /tmp/* 2>/dev/null || true + echo "=== AFTER CLEANUP ===" + df -h + - name: Build psql bundle + run: > + nix run "github:Mic92/nix-fast-build?rev=b1dae483ab7d4139a6297e02b6de9e5d30e43d48" + -- --skip-cached --no-nom ${{ matrix.runner == 'macos-latest-xlarge' && '--max-jobs 1' || '' }} + --flake ".#checks.$(nix eval --raw --impure --expr 'builtins.currentSystem')" + env: + AWS_ACCESS_KEY_ID: ${{ env.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ env.AWS_SECRET_ACCESS_KEY }} + AWS_SESSION_TOKEN: ${{ env.AWS_SESSION_TOKEN }} + + run-testinfra: + needs: build-run-image + if: ${{ success() }} + uses: ./.github/workflows/testinfra-ami-build.yml + + run-tests: + needs: build-run-image + if: ${{ success() }} + uses: ./.github/workflows/test.yml diff --git a/aogithub/workflows/publish-migrations-prod.yml b/aogithub/workflows/publish-migrations-prod.yml new file mode 100644 index 0000000..cc13a24 --- /dev/null +++ b/aogithub/workflows/publish-migrations-prod.yml @@ -0,0 +1,41 @@ +name: Release Migrations - Prod + +on: + workflow_dispatch: + +jobs: + build: + runs-on: large-linux-arm + timeout-minutes: 15 + permissions: + id-token: write + contents: read + + steps: + - name: Guard + run: | + if [ $GITHUB_REF != 'refs/heads/develop' ]; then + echo "This action can only be run on the develop branch" + exit 1 + fi + env: + GITHUB_REF: ${{ github.ref }} + + - name: Checkout repo + uses: tealbase/postgres/.github/actions/shared-checkout@HEAD + + - name: Merging migration files + run: cat $(ls -1) > ../migration-output.sql + working-directory: ${{ github.workspace }}/migrations/db/migrations + + - name: configure aws credentials - prod + uses: aws-actions/configure-aws-credentials@v1 + with: + role-to-assume: ${{ secrets.PROD_AWS_ROLE }} + aws-region: "ap-southeast-1" + + - name: Deploy to S3 prod + shell: bash + run: aws s3 sync migrations/db s3://$AWS_S3_BUCKET/migrations/db --delete + env: + AWS_S3_BUCKET: ${{ secrets.PG_INIT_SCRIPT_S3_BUCKET_PROD }} diff --git a/.github/workflows/publish-migrations.yml b/aogithub/workflows/publish-migrations-staging.yml similarity index 60% rename from .github/workflows/publish-migrations.yml rename to aogithub/workflows/publish-migrations-staging.yml index 1abc9f2..587bbce 100644 --- a/.github/workflows/publish-migrations.yml +++ b/aogithub/workflows/publish-migrations-staging.yml @@ -1,13 +1,14 @@ -name: Release Migrations +name: Release Migrations - Staging on: push: branches: - develop + workflow_dispatch: jobs: build: - runs-on: [self-hosted, linux] + runs-on: large-linux-arm timeout-minutes: 15 permissions: id-token: write @@ -15,8 +16,7 @@ jobs: steps: - name: Checkout Repo - uses: actions/checkout@v2 - + uses: tealbase/postgres/.github/actions/shared-checkout@HEAD - name: Merging migration files run: cat $(ls -1) > ../migration-output.sql working-directory: ${{ github.workspace }}/migrations/db/migrations @@ -32,15 +32,3 @@ jobs: run: aws s3 sync migrations/db s3://$AWS_S3_BUCKET/migrations/db --delete env: AWS_S3_BUCKET: ${{ secrets.PG_INIT_SCRIPT_S3_BUCKET_STAGING }} - - - name: configure aws credentials - prod - uses: aws-actions/configure-aws-credentials@v1 - with: - role-to-assume: ${{ secrets.PROD_AWS_ROLE }} - aws-region: "ap-southeast-1" - - - name: Deploy to S3 prod - shell: bash - run: aws s3 sync migrations/db s3://$AWS_S3_BUCKET/migrations/db --delete - env: - AWS_S3_BUCKET: ${{ secrets.PG_INIT_SCRIPT_S3_BUCKET_PROD }} diff --git a/aogithub/workflows/publish-nix-pgupgrade-bin-flake-version.yml b/aogithub/workflows/publish-nix-pgupgrade-bin-flake-version.yml new file mode 100644 index 0000000..74e9679 --- /dev/null +++ b/aogithub/workflows/publish-nix-pgupgrade-bin-flake-version.yml @@ -0,0 +1,130 @@ +name: Publish nix pg_upgrade_bin flake version + +on: + workflow_dispatch: + inputs: + postgresVersion: + description: 'Optional. Postgres version to publish against, i.e. 15.1.1.78' + required: false + +permissions: + id-token: write + +jobs: + prepare: + runs-on: large-linux-x86 + outputs: + postgres_versions: ${{ steps.set-versions.outputs.postgres_versions }} + steps: + - name: Checkout Repo + uses: tealbase/postgres/.github/actions/shared-checkout@HEAD + + - uses: DeterminateSystems/nix-installer-action@main + + - name: Set PostgreSQL versions + id: set-versions + run: | + VERSIONS=$(nix run nixpkgs#yq -- '.postgres_major[]' ansible/vars.yml | nix run nixpkgs#jq -- -R -s -c 'split("\n")[:-1]') + echo "postgres_versions=$VERSIONS" >> $GITHUB_OUTPUT + + publish-staging: + needs: prepare + runs-on: large-linux-x86 + strategy: + matrix: + postgres_version: ${{ fromJson(needs.prepare.outputs.postgres_versions) }} + + steps: + - name: Checkout Repo + uses: tealbase/postgres/.github/actions/shared-checkout@HEAD + + - uses: DeterminateSystems/nix-installer-action@main + + - name: Grab release version + id: process_release_version + run: | + VERSION=$(nix run nixpkgs#yq -- '.postgres_release["postgres'${{ matrix.postgres_version }}'"]' ansible/vars.yml) + VERSION=$(echo "$VERSION" | tr -d '"') # Remove any surrounding quotes + if [[ "${{ inputs.postgresVersion }}" != "" ]]; then + VERSION="${{ inputs.postgresVersion }}" + fi + echo "version=$VERSION" >> "$GITHUB_OUTPUT" + echo "major_version=$(echo $VERSION | cut -d'.' -f1)" >> "$GITHUB_OUTPUT" + + - name: Create a tarball containing the latest nix flake version + working-directory: /tmp/ + run: | + mkdir -p "${{ steps.process_release_version.outputs.major_version }}" + echo "$GITHUB_SHA" > "${{ steps.process_release_version.outputs.major_version }}/nix_flake_version" + tar -czvf pg_upgrade_bin.tar.gz "${{ steps.process_release_version.outputs.major_version }}" + + - name: configure aws credentials - staging + uses: aws-actions/configure-aws-credentials@v1 + with: + role-to-assume: ${{ secrets.DEV_AWS_ROLE }} + aws-region: "us-east-1" + + - name: Upload pg_upgrade scripts to s3 staging + run: | + aws s3 cp /tmp/pg_upgrade_bin.tar.gz s3://${{ secrets.ARTIFACTS_BUCKET }}/upgrades/postgres/tealbase-postgres-${{ steps.process_release_version.outputs.version }}/20.04.tar.gz + aws s3 cp /tmp/pg_upgrade_bin.tar.gz s3://${{ secrets.ARTIFACTS_BUCKET }}/upgrades/postgres/tealbase-postgres-${{ steps.process_release_version.outputs.version }}/24.04.tar.gz + aws s3 cp /tmp/pg_upgrade_bin.tar.gz s3://${{ secrets.ARTIFACTS_BUCKET }}/upgrades/postgres/tealbase-postgres-${{ steps.process_release_version.outputs.version }}/upgrade_bundle.tar.gz + + - name: Slack Notification on Failure + if: ${{ failure() }} + uses: rtCamp/action-slack-notify@v2 + env: + SLACK_WEBHOOK: ${{ secrets.SLACK_NOTIFICATIONS_WEBHOOK }} + SLACK_USERNAME: 'gha-failures-notifier' + SLACK_COLOR: 'danger' + SLACK_MESSAGE: 'Publishing pg_upgrade binaries flake version failed' + SLACK_FOOTER: '' + + publish-prod: + runs-on: large-linux-x86 + if: github.ref_name == 'develop' || contains( github.ref, 'release' ) + needs: prepare + strategy: + matrix: + postgres_version: ${{ fromJson(needs.prepare.outputs.postgres_versions) }} + + steps: + - name: Checkout Repo + uses: tealbase/postgres/.github/actions/shared-checkout@HEAD + + - name: Grab release version + id: process_release_version + run: | + VERSION=$(nix run nixpkgs#yq -- '.postgres_release["postgres'${{ matrix.postgres_version }}'"]' ansible/vars.yml) + VERSION=$(echo $VERSION | tr -d '"') # Remove any surrounding quotes + echo "version=$VERSION" >> "$GITHUB_OUTPUT" + echo "major_version=$(echo $VERSION | cut -d'.' -f1)" >> "$GITHUB_OUTPUT" + + - name: Create a tarball containing the latest nix flake version + working-directory: /tmp/ + run: | + mkdir -p "${{ steps.process_release_version.outputs.major_version }}" + echo "$GITHUB_SHA" > "${{ steps.process_release_version.outputs.major_version }}/nix_flake_version" + tar -czvf pg_upgrade_bin.tar.gz "${{ steps.process_release_version.outputs.major_version }}" + + - name: configure aws credentials - prod + uses: aws-actions/configure-aws-credentials@v1 + with: + role-to-assume: ${{ secrets.PROD_AWS_ROLE }} + aws-region: "us-east-1" + + - name: Upload pg_upgrade scripts to s3 prod + run: | + aws s3 cp /tmp/pg_upgrade_bin.tar.gz s3://${{ secrets.PROD_ARTIFACTS_BUCKET }}/upgrades/postgres/tealbase-postgres-${{ steps.process_release_version.outputs.version }}/20.04.tar.gz + aws s3 cp /tmp/pg_upgrade_bin.tar.gz s3://${{ secrets.PROD_ARTIFACTS_BUCKET }}/upgrades/postgres/tealbase-postgres-${{ steps.process_release_version.outputs.version }}/24.04.tar.gz + aws s3 cp /tmp/pg_upgrade_bin.tar.gz s3://${{ secrets.PROD_ARTIFACTS_BUCKET }}/upgrades/postgres/tealbase-postgres-${{ steps.process_release_version.outputs.version }}/upgrade_bundle.tar.gz + + - name: Slack Notification on Failure + if: ${{ failure() }} + uses: rtCamp/action-slack-notify@v2 + env: + SLACK_WEBHOOK: ${{ secrets.SLACK_NOTIFICATIONS_WEBHOOK }} + SLACK_USERNAME: 'gha-failures-notifier' + SLACK_COLOR: 'danger' + SLACK_MESSAGE: 'Publishing pg_upgrade binaries flake version failed' + SLACK_FOOTER: '' diff --git a/.github/workflows/publish-nix-pgupgrade-scripts.yml b/aogithub/workflows/publish-nix-pgupgrade-scripts.yml similarity index 57% rename from .github/workflows/publish-nix-pgupgrade-scripts.yml rename to aogithub/workflows/publish-nix-pgupgrade-scripts.yml index eb5f7a7..e9792c7 100644 --- a/.github/workflows/publish-nix-pgupgrade-scripts.yml +++ b/aogithub/workflows/publish-nix-pgupgrade-scripts.yml @@ -7,7 +7,7 @@ on: - release/* paths: - '.github/workflows/publish-nix-pgupgrade-scripts.yml' - - 'common-nix.vars.pkr.hcl' + - 'ansible/vars.yml' workflow_dispatch: inputs: postgresVersion: @@ -18,19 +18,42 @@ permissions: id-token: write jobs: + prepare: + runs-on: large-linux-x86 + outputs: + postgres_versions: ${{ steps.set-versions.outputs.postgres_versions }} + steps: + - name: Checkout Repo + uses: tealbase/postgres/.github/actions/shared-checkout@HEAD + + - uses: DeterminateSystems/nix-installer-action@main + + - name: Set PostgreSQL versions + id: set-versions + run: | + VERSIONS=$(nix run nixpkgs#yq -- '.postgres_major[]' ansible/vars.yml | nix run nixpkgs#jq -- -R -s -c 'split("\n")[:-1]') + echo "postgres_versions=$VERSIONS" >> $GITHUB_OUTPUT + publish-staging: - runs-on: ubuntu-latest + needs: prepare + runs-on: large-linux-x86 + strategy: + matrix: + postgres_version: ${{ fromJson(needs.prepare.outputs.postgres_versions) }} steps: - name: Checkout Repo - uses: actions/checkout@v3 + uses: tealbase/postgres/.github/actions/shared-checkout@HEAD + + - uses: DeterminateSystems/nix-installer-action@main - name: Grab release version id: process_release_version run: | - VERSION=$(grep 'postgres-version' common-nix.vars.pkr.hcl | sed -e 's/postgres-version = "\(.*\)"/\1/g') + VERSION=$(nix run nixpkgs#yq -- '.postgres_release["postgres'${{ matrix.postgres_version }}'"]' ansible/vars.yml) + VERSION=$(echo "$VERSION" | tr -d '"') # Remove any surrounding quotes if [[ "${{ inputs.postgresVersion }}" != "" ]]; then - VERSION=${{ inputs.postgresVersion }} + VERSION="${{ inputs.postgresVersion }}" fi echo "version=$VERSION" >> "$GITHUB_OUTPUT" @@ -48,7 +71,7 @@ jobs: - name: Upload pg_upgrade scripts to s3 staging run: | - aws s3 cp /tmp/pg_upgrade_scripts.tar.gz s3://${{ secrets.ARTIFACTS_BUCKET }}/upgrades/postgres/supabase-postgres-${{ steps.process_release_version.outputs.version }}/pg_upgrade_scripts.tar.gz + aws s3 cp /tmp/pg_upgrade_scripts.tar.gz "s3://${{ secrets.ARTIFACTS_BUCKET }}/upgrades/postgres/tealbase-postgres-${{ steps.process_release_version.outputs.version }}/pg_upgrade_scripts.tar.gz" - name: Slack Notification on Failure if: ${{ failure() }} @@ -59,21 +82,29 @@ jobs: SLACK_COLOR: 'danger' SLACK_MESSAGE: 'Publishing pg_upgrade scripts failed' SLACK_FOOTER: '' - publish-prod: - runs-on: ubuntu-latest + needs: prepare + runs-on: large-linux-x86 if: github.ref_name == 'develop' || contains( github.ref, 'release' ) + strategy: + matrix: + postgres_version: ${{ fromJson(needs.prepare.outputs.postgres_versions) }} + + steps: - name: Checkout Repo - uses: actions/checkout@v3 + uses: tealbase/postgres/.github/actions/shared-checkout@HEAD + - uses: DeterminateSystems/nix-installer-action@main + - name: Grab release version id: process_release_version run: | - VERSION=$(grep 'postgres-version' common-nix.vars.pkr.hcl | sed -e 's/postgres-version = "\(.*\)"/\1/g') + VERSION=$(nix run nixpkgs#yq -- '.postgres_release["postgres'${{ matrix.postgres_version }}'"]' ansible/vars.yml) + VERSION=$(echo "$VERSION" | tr -d '"') # Remove any surrounding quotes if [[ "${{ inputs.postgresVersion }}" != "" ]]; then - VERSION=${{ inputs.postgresVersion }} + VERSION="${{ inputs.postgresVersion }}" fi echo "version=$VERSION" >> "$GITHUB_OUTPUT" @@ -91,7 +122,7 @@ jobs: - name: Upload pg_upgrade scripts to s3 prod run: | - aws s3 cp /tmp/pg_upgrade_scripts.tar.gz s3://${{ secrets.PROD_ARTIFACTS_BUCKET }}/upgrades/postgres/supabase-postgres-${{ steps.process_release_version.outputs.version }}/pg_upgrade_scripts.tar.gz + aws s3 cp /tmp/pg_upgrade_scripts.tar.gz "s3://${{ secrets.PROD_ARTIFACTS_BUCKET }}/upgrades/postgres/tealbase-postgres-${{ steps.process_release_version.outputs.version }}/pg_upgrade_scripts.tar.gz" - name: Slack Notification on Failure if: ${{ failure() }} diff --git a/aogithub/workflows/qemu-image-build.yml b/aogithub/workflows/qemu-image-build.yml new file mode 100644 index 0000000..931543f --- /dev/null +++ b/aogithub/workflows/qemu-image-build.yml @@ -0,0 +1,155 @@ +name: Build QEMU image + +on: + push: + branches: + - develop + - release/* + paths: + - '.github/workflows/qemu-image-build.yml' + - 'qemu-arm64-nix.pkr.hcl' + - 'common-nix.vars.pkr.hcl' + - 'ansible/vars.yml' + - 'scripts/*' + workflow_dispatch: + +permissions: + contents: read + id-token: write + +jobs: + prepare: + runs-on: ubuntu-latest + outputs: + postgres_versions: ${{ steps.set-versions.outputs.postgres_versions }} + steps: + - name: Checkout Repo + uses: tealbase/postgres/.github/actions/shared-checkout@HEAD + + - uses: DeterminateSystems/nix-installer-action@main + + - name: Set PostgreSQL versions - only builds pg17 atm + id: set-versions + run: | + VERSIONS=$(nix run nixpkgs#yq -- '.postgres_major[1]' ansible/vars.yml | nix run nixpkgs#jq -- -R -s -c 'split("\n")[:-1]') + echo "postgres_versions=$VERSIONS" >> $GITHUB_OUTPUT + + build: + needs: prepare + strategy: + matrix: + postgres_version: ${{ fromJson(needs.prepare.outputs.postgres_versions) }} + runs-on: arm-native-runner + timeout-minutes: 150 + permissions: + contents: write + packages: write + id-token: write + + steps: + - name: Checkout Repo + uses: tealbase/postgres/.github/actions/shared-checkout@HEAD + + - uses: DeterminateSystems/nix-installer-action@main + + - name: Run checks if triggered manually + if: ${{ github.event_name == 'workflow_dispatch' }} + run: | + SUFFIX=$(sudo nix run nixpkgs#yq -- ".postgres_release[\"postgres${{ matrix.postgres_version }}\"]" ansible/vars.yml | sed -E 's/[0-9\.]+(.*)$/\1/') + if [[ -z $SUFFIX ]] ; then + echo "Version must include non-numeric characters if built manually." + exit 1 + fi + + - name: enable KVM support + run: | + sudo chown runner /dev/kvm + sudo chmod 666 /dev/kvm + + - name: Set PostgreSQL version environment variable + run: echo "POSTGRES_MAJOR_VERSION=${{ matrix.postgres_version }}" >> $GITHUB_ENV + + - name: Generate common-nix.vars.pkr.hcl + run: | + curl -L https://github.com/mikefarah/yq/releases/download/v4.45.1/yq_linux_arm64 -o yq && chmod +x yq + PG_VERSION=$(./yq '.postgres_release["postgres'${{ matrix.postgres_version }}'"]' ansible/vars.yml) + PG_VERSION=$(echo "$PG_VERSION" | tr -d '"') # Remove any surrounding quotes + echo 'postgres-version = "'$PG_VERSION'"' > common-nix.vars.pkr.hcl + echo 'postgres-major-version = "'$POSTGRES_MAJOR_VERSION'"' >> common-nix.vars.pkr.hcl + # Ensure there's a newline at the end of the file + echo "" >> common-nix.vars.pkr.hcl + + # TODO (darora): not quite sure why I'm having to uninstall and re-install these deps, but the build fails w/o this + - name: Install dependencies + run: | + sudo apt-get update + sudo apt-get remove -y qemu-efi-aarch64 cloud-image-utils qemu-system-arm qemu-utils + sudo apt-get install -y qemu-efi-aarch64 cloud-image-utils qemu-system-arm qemu-utils + + - name: Build QEMU artifact + run: | + make init + GIT_SHA=${{github.sha}} + export PACKER_LOG=1 + packer build -var "git_sha=${GIT_SHA}" -var-file="common-nix.vars.pkr.hcl" qemu-arm64-nix.pkr.hcl + + - name: Grab release version + id: process_release_version + run: | + VERSION=$(cat common-nix.vars.pkr.hcl | sed -e 's/postgres-version = "\(.*\)"/\1/g') + echo "version=$VERSION" >> $GITHUB_OUTPUT + + - name: configure aws credentials - staging + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ secrets.CONTROL_PLANE_DEV_ROLE }} + aws-region: "us-east-1" + + - name: Login to Amazon ECR + id: login-ecr-private-dev + uses: aws-actions/amazon-ecr-login@v2 + + - name: Build image + env: + IMAGE_TAG: ${{ steps.process_release_version.outputs.version }} + run: | + docker build -f Dockerfile-kubernetes -t "postgres:$IMAGE_TAG" . + + - name: Push docker image to Amazon ECR + env: + REGISTRY: 812073016711.dkr.ecr.us-east-1.amazonaws.com + REPOSITORY: postgres-vm-image + IMAGE_TAG: ${{ steps.process_release_version.outputs.version }} + run: | + docker tag "postgres:$IMAGE_TAG" "$REGISTRY/$REPOSITORY:$IMAGE_TAG" + docker push "$REGISTRY/$REPOSITORY:$IMAGE_TAG" + + # TODO (darora): temporarily also push to prod account from here - add a guard to only publish proper tagged releases to prod? + - name: configure aws credentials - prod + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ secrets.CONTROL_PLANE_PROD_ROLE }} + aws-region: "us-east-1" + + - name: Login to Amazon ECR + id: login-ecr-private-prod + uses: aws-actions/amazon-ecr-login@v2 + + - name: Push docker image to Amazon ECR + env: + REGISTRY: 156470330064.dkr.ecr.us-east-1.amazonaws.com + REPOSITORY: postgres-vm-image + IMAGE_TAG: ${{ steps.process_release_version.outputs.version }} + run: | + docker tag "postgres:$IMAGE_TAG" "$REGISTRY/$REPOSITORY:$IMAGE_TAG" + docker push "$REGISTRY/$REPOSITORY:$IMAGE_TAG" + + - name: Cleanup resources after build + if: ${{ always() }} + run: | + aws ec2 describe-instances --filters "Name=tag:packerExecutionId,Values=${GITHUB_RUN_ID}" --query "Reservations[].Instances[].InstanceId" --output text | xargs -r aws ec2 terminate-instances --instance-ids + + - name: Cleanup resources on build cancellation + if: ${{ cancelled() }} + run: | + aws ec2 describe-instances --filters "Name=tag:packerExecutionId,Values=${GITHUB_RUN_ID}" --query "Reservations[].Instances[].InstanceId" --output text | xargs -r aws ec2 terminate-instances --instance-ids diff --git a/aogithub/workflows/test.yml b/aogithub/workflows/test.yml new file mode 100644 index 0000000..293c366 --- /dev/null +++ b/aogithub/workflows/test.yml @@ -0,0 +1,79 @@ +name: Test Database +on: + workflow_dispatch: + workflow_call: + +permissions: + contents: read + id-token: write + +jobs: + prepare: + runs-on: large-linux-x86 + outputs: + postgres_versions: ${{ steps.set-versions.outputs.postgres_versions }} + steps: + - name: Checkout Repo + uses: tealbase/postgres/.github/actions/shared-checkout@HEAD + - uses: DeterminateSystems/nix-installer-action@main + with: + extra-conf: | + substituters = https://cache.nixos.org https://nix-postgres-artifacts.s3.amazonaws.com + trusted-public-keys = nix-postgres-artifacts:dGZlQOvKcNEjvT7QEAJbcV6b6uk7VF/hWMjhYleiaLI=% cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY= + - name: Set PostgreSQL versions + id: set-versions + run: | + VERSIONS=$(nix run nixpkgs#yq -- '.postgres_major[]' ansible/vars.yml | nix run nixpkgs#jq -- -R -s -c "split(\"\n\")[:-1]") + echo "postgres_versions=$VERSIONS" >> $GITHUB_OUTPUT + build: + needs: prepare + strategy: + matrix: + postgres_version: ${{ fromJson(needs.prepare.outputs.postgres_versions) }} + include: + - runner: large-linux-x86 + arch: amd64 + - runner: large-linux-arm + arch: arm64 + runs-on: ${{ matrix.runner }} + timeout-minutes: 180 + env: + POSTGRES_PORT: 5478 + POSTGRES_PASSWORD: password + steps: + - name: Checkout Repo + uses: tealbase/postgres/.github/actions/shared-checkout@HEAD + - uses: DeterminateSystems/nix-installer-action@main + with: + extra-conf: | + substituters = https://cache.nixos.org https://nix-postgres-artifacts.s3.amazonaws.com + trusted-public-keys = nix-postgres-artifacts:dGZlQOvKcNEjvT7QEAJbcV6b6uk7VF/hWMjhYleiaLI=% cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY= + - name: Set PostgreSQL version environment variable + run: echo "POSTGRES_MAJOR_VERSION=${{ matrix.postgres_version }}" >> $GITHUB_ENV + - name: Strip quotes from pg major and set env var + run: | + stripped_version=$(echo "${{ matrix.postgres_version }}" | sed 's/^"\(.*\)"$/\1/') + echo "PGMAJOR=$stripped_version" >> $GITHUB_ENV + - name: Generate common-nix.vars.pkr.hcl + run: | + PG_VERSION=$(nix run nixpkgs#yq -- '.postgres_release["postgres'${{ matrix.postgres_version }}'"]' ansible/vars.yml) + PG_VERSION=$(echo $PG_VERSION | tr -d '"') # Remove any surrounding quotes + echo 'postgres-version = "'$PG_VERSION'"' > common-nix.vars.pkr.hcl + echo "" >> common-nix.vars.pkr.hcl + - id: settings + run: sed -r 's/(\s|\")+//g' common-nix.vars.pkr.hcl >> $GITHUB_OUTPUT + - name: Generate args + id: args + run: | + ARGS=$(nix run nixpkgs#yq -- 'to_entries | map(select(.value|type == "!!str")) | map(.key + "=" + .value) | join("\n")' ansible/vars.yml) + echo "result<> $GITHUB_OUTPUT + echo "$ARGS" >> $GITHUB_OUTPUT + echo "EOF" >> $GITHUB_OUTPUT + - name: verify schema.sql is committed + run: | + nix run github:tealbase/postgres/${{ github.sha }}#dbmate-tool -- --version ${{ env.PGMAJOR }} --flake-url github:tealbase/postgres/${{ github.sha }} + if ! git diff --exit-code --quiet migrations/schema-${{ env.PGMAJOR }}.sql; then + echo "Detected changes in schema.sql:" + git diff migrations/schema-${{ env.PGMAJOR }}.sql + exit 1 + fi diff --git a/.github/workflows/testinfra-nix.yml b/aogithub/workflows/testinfra-ami-build.yml similarity index 55% rename from .github/workflows/testinfra-nix.yml rename to aogithub/workflows/testinfra-ami-build.yml index 3835a9a..8d12223 100644 --- a/.github/workflows/testinfra-nix.yml +++ b/aogithub/workflows/testinfra-ami-build.yml @@ -1,19 +1,41 @@ name: Testinfra Integration Tests Nix on: - pull_request: workflow_dispatch: + workflow_call: + +permissions: + contents: read + id-token: write jobs: + prepare: + runs-on: large-linux-x86 + outputs: + postgres_versions: ${{ steps.set-versions.outputs.postgres_versions }} + steps: + - name: Checkout Repo + uses: tealbase/postgres/.github/actions/shared-checkout@HEAD + + - uses: DeterminateSystems/nix-installer-action@main + + - name: Set PostgreSQL versions + id: set-versions + run: | + VERSIONS=$(nix run nixpkgs#yq -- '.postgres_major[]' ansible/vars.yml | nix run nixpkgs#jq -- -R -s -c 'split("\n")[:-1]') + echo "postgres_versions=$VERSIONS" >> $GITHUB_OUTPUT + test-ami-nix: + needs: prepare strategy: fail-fast: false matrix: + postgres_version: ${{ fromJson(needs.prepare.outputs.postgres_versions) }} include: - runner: arm-runner arch: arm64 - ubuntu_release: focal - ubuntu_version: 20.04 + ubuntu_release: noble + ubuntu_version: 24.04 mcpu: neoverse-n1 runs-on: ${{ matrix.runner }} timeout-minutes: 150 @@ -24,7 +46,7 @@ jobs: steps: - name: Checkout Repo - uses: actions/checkout@v4 + uses: tealbase/postgres/.github/actions/shared-checkout@HEAD - id: args uses: mikefarah/yq@master @@ -40,45 +62,56 @@ jobs: - name: Generate random string id: random run: echo "random_string=$(openssl rand -hex 8)" >> $GITHUB_OUTPUT - + + - name: Set PostgreSQL version environment variable + run: echo "POSTGRES_MAJOR_VERSION=${{ matrix.postgres_version }}" >> $GITHUB_ENV + + - name: Generate common-nix.vars.pkr.hcl + run: | + PG_VERSION=$(sudo nix run nixpkgs#yq -- '.postgres_release["postgres'${{ matrix.postgres_version }}'"]' ansible/vars.yml) + PG_VERSION=$(echo "$PG_VERSION" | tr -d '"') # Remove any surrounding quotes + echo 'postgres-version = "'$PG_VERSION'"' > common-nix.vars.pkr.hcl + # Ensure there's a newline at the end of the file + echo "" >> common-nix.vars.pkr.hcl + - name: Build AMI stage 1 run: | packer init amazon-arm64-nix.pkr.hcl GIT_SHA=${{github.sha}} - packer build -var "git-head-version=${GIT_SHA}" -var "packer-execution-id=${GITHUB_RUN_ID}" -var-file="development-arm.vars.pkr.hcl" -var-file="common-nix.vars.pkr.hcl" -var "ansible_arguments=" -var "postgres-version=${{ steps.random.outputs.random_string }}" -var "region=ap-southeast-1" -var 'ami_regions=["ap-southeast-1"]' -var "force-deregister=true" amazon-arm64-nix.pkr.hcl - + packer build -var "git-head-version=${GIT_SHA}" -var "packer-execution-id=${GITHUB_RUN_ID}" -var-file="development-arm.vars.pkr.hcl" -var-file="common-nix.vars.pkr.hcl" -var "ansible_arguments=" -var "postgres-version=${{ steps.random.outputs.random_string }}" -var "region=ap-southeast-1" -var 'ami_regions=["ap-southeast-1"]' -var "force-deregister=true" -var "ansible_arguments=-e postgresql_major=${POSTGRES_MAJOR_VERSION}" amazon-arm64-nix.pkr.hcl + - name: Build AMI stage 2 run: | packer init stage2-nix-psql.pkr.hcl GIT_SHA=${{github.sha}} - packer build -var "git-head-version=${GIT_SHA}" -var "packer-execution-id=${GITHUB_RUN_ID}" -var-file="development-arm.vars.pkr.hcl" -var-file="common-nix.vars.pkr.hcl" -var "postgres-version=${{ steps.random.outputs.random_string }}" -var "region=ap-southeast-1" -var 'ami_regions=["ap-southeast-1"]' -var "force-deregister=true" -var "git_sha=${GITHUB_SHA}" stage2-nix-psql.pkr.hcl + packer build -var "git-head-version=${GIT_SHA}" -var "packer-execution-id=${GITHUB_RUN_ID}" -var "postgres_major_version=${POSTGRES_MAJOR_VERSION}" -var-file="development-arm.vars.pkr.hcl" -var-file="common-nix.vars.pkr.hcl" -var "postgres-version=${{ steps.random.outputs.random_string }}" -var "region=ap-southeast-1" -var 'ami_regions=["ap-southeast-1"]' -var "force-deregister=true" -var "git_sha=${GITHUB_SHA}" stage2-nix-psql.pkr.hcl - name: Run tests timeout-minutes: 10 env: - AMI_NAME: "supabase-postgres-${{ steps.random.outputs.random_string }}" + AMI_NAME: "tealbase-postgres-${{ steps.random.outputs.random_string }}" run: | # TODO: use poetry for pkg mgmt pip3 install boto3 boto3-stubs[essential] docker ec2instanceconnectcli pytest pytest-testinfra[paramiko,docker] requests - pytest -vv -s testinfra/test_ami_nix.py - + pytest -vv -s testinfra/test_ami_nix.py + - name: Cleanup resources on build cancellation if: ${{ cancelled() }} run: | - aws ec2 --region ap-southeast-1 describe-instances --filters "Name=tag:packerExecutionId,Values=${GITHUB_RUN_ID}" --query "Reservations[].Instances[].InstanceId" --output text | xargs -n 1 -I {} aws ec2 terminate-instances --region ap-southeast-1 --instance-ids {} - - - name: Cleanup resources on build cancellation + aws ec2 --region ap-southeast-1 describe-instances --filters "Name=tag:packerExecutionId,Values=${GITHUB_RUN_ID}" --query "Reservations[].Instances[].InstanceId" --output text | xargs -r aws ec2 terminate-instances --region ap-southeast-1 --instance-ids + + - name: Cleanup resources after build if: ${{ always() }} run: | - aws ec2 --region ap-southeast-1 describe-instances --filters "Name=tag:testinfra-run-id,Values=${GITHUB_RUN_ID}" --query "Reservations[].Instances[].InstanceId" --output text | xargs -n 1 -I {} aws ec2 terminate-instances --region ap-southeast-1 --instance-ids {} || true + aws ec2 --region ap-southeast-1 describe-instances --filters "Name=tag:testinfra-run-id,Values=${GITHUB_RUN_ID}" --query "Reservations[].Instances[].InstanceId" --output text | xargs -r aws ec2 terminate-instances --region ap-southeast-1 --instance-ids || true - name: Cleanup AMIs if: always() run: | # Define AMI name patterns - STAGE1_AMI_NAME="supabase-postgres-ci-ami-test-stage-1" + STAGE1_AMI_NAME="tealbase-postgres-ci-ami-test-stage-1" STAGE2_AMI_NAME="${{ steps.random.outputs.random_string }}" - + # Function to deregister AMIs by name pattern deregister_ami_by_name() { local ami_name_pattern=$1 @@ -88,7 +121,7 @@ jobs: aws ec2 deregister-image --region ap-southeast-1 --image-id $ami_id done } - + # Deregister AMIs deregister_ami_by_name "$STAGE1_AMI_NAME" - deregister_ami_by_name "$STAGE2_AMI_NAME" \ No newline at end of file + deregister_ami_by_name "$STAGE2_AMI_NAME" diff --git a/common-nix.vars.pkr.hcl b/common-nix.vars.pkr.hcl deleted file mode 100644 index 587e717..0000000 --- a/common-nix.vars.pkr.hcl +++ /dev/null @@ -1 +0,0 @@ -postgres-version = "15.6.1.146" diff --git a/common.vars.pkr.hcl b/common.vars.pkr.hcl deleted file mode 100644 index 871647b..0000000 --- a/common.vars.pkr.hcl +++ /dev/null @@ -1 +0,0 @@ -postgres-version = "15.1.1.95" diff --git a/digitalOcean.json b/digitalOcean.json deleted file mode 100644 index 378973d..0000000 --- a/digitalOcean.json +++ /dev/null @@ -1,45 +0,0 @@ -{ - "variables": { - "do_token": "", - "image_name": "ubuntu-20-04-x64", - "region": "sgp1", - "snapshot_regions": "sgp1", - "snapshot_name": "tealbase-postgres-13.3.0", - "ansible_arguments": "--skip-tags,update-only,--skip-tags,aws-only,-e,tealbase_internal='false'" - }, - "builders": [ - { - "type": "digitalocean", - "api_token": "{{user `do_token`}}", - "image": "{{user `image_name`}}", - "region": "{{user `region`}}", - "snapshot_regions": "{{user `snapshot_regions`}}", - "size": "s-1vcpu-1gb", - "ssh_username": "root", - "snapshot_name": "{{user `snapshot_name`}}" - } - ], - "provisioners": [ - { - "type": "shell", - "inline": [ - "while [ ! -f /var/lib/cloud/instance/boot-finished ]; do echo 'Waiting for cloud-init...'; sleep 1; done" - ] - }, - { - "type": "ansible", - "user": "root", - "playbook_file": "ansible/playbook.yml", - "extra_arguments": "{{user `ansible_arguments`}}" - }, - { - "type": "shell", - "scripts": [ - "scripts/01-postgres_check.sh", - "scripts/90-cleanup.sh", - "scripts/91-log_cleanup.sh", - "scripts/99-img_check.sh" - ] - } - ] -} diff --git a/docker/Dockerfile b/docker/Dockerfile index 116377b..53a9602 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,8 +1,8 @@ -ARG ubuntu_release=focal +ARG ubuntu_release=noble FROM ubuntu:${ubuntu_release} as base ARG ubuntu_release=flocal -ARG ubuntu_release_no=20.04 +ARG ubuntu_release_no=24.04 ARG postgresql_major=15 ARG postgresql_release=${postgresql_major}.1 diff --git a/docker/all-in-one/Dockerfile b/docker/all-in-one/Dockerfile deleted file mode 100644 index d93e46f..0000000 --- a/docker/all-in-one/Dockerfile +++ /dev/null @@ -1,311 +0,0 @@ -ARG postgres_version=15.1.1.49 - -ARG pgbouncer_release=1.18.0 -ARG postgrest_release=10.1.2 -ARG gotrue_release=2.130.0 -ARG adminapi_release=0.64.1 -ARG adminmgr_release=0.22.1 -ARG vector_release=0.22.3 -ARG postgres_exporter_release=0.15.0 -ARG envoy_release=1.28.0 - -# Update `gateway-28` in the URL below if upgrading above v2.8.x. -ARG kong_release=2.8.1 - -FROM tealbase/postgres:${postgres_version} as base -ARG TARGETARCH -ARG postgresql_major - -FROM base as builder -# Install build dependencies -RUN apt-get update && apt-get install -y \ - postgresql-server-dev-${postgresql_major} \ - build-essential \ - checkinstall \ - pkg-config \ - cmake \ - && rm -rf /var/lib/apt/lists/* - -#################### -# Install pgbouncer -#################### -FROM builder as pgbouncer-source -# Download and extract -ARG pgbouncer_release -ADD "https://www.pgbouncer.org/downloads/files/${pgbouncer_release}/pgbouncer-${pgbouncer_release}.tar.gz" /tmp/pgbouncer.tar.gz -RUN tar -xvf /tmp/pgbouncer.tar.gz -C /tmp && \ - rm -rf /tmp/pgbouncer.tar.gz -# Install build dependencies -RUN apt-get update && apt-get install -y \ - libevent-dev \ - && rm -rf /var/lib/apt/lists/* -# Build from source -WORKDIR /tmp/pgbouncer-${pgbouncer_release} -RUN ./configure --prefix=/usr/local -RUN make -j$(nproc) -# Create debian package -RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --requires=libevent-2.1-7 --nodoc - -FROM base as pgbouncer -# Download pre-built packages -RUN apt-get update && apt-get install -y --no-install-recommends --download-only \ - pgbouncer \ - && rm -rf /var/lib/apt/lists/* -RUN mv /var/cache/apt/archives/*.deb /tmp/ - -#################### -# Install PostgREST -#################### -FROM postgrest/postgrest:v${postgrest_release} as pgrst - -#################### -# Install GoTrue -#################### -FROM tealbase/gotrue:v${gotrue_release} as gotrue - -#################### -# Install Envoy -#################### -FROM envoyproxy/envoy:v${envoy_release} as envoy - -#################### -# Install Kong -#################### -FROM base as kong -ARG kong_release -ADD "https://packages.konghq.com/public/gateway-28/deb/ubuntu/pool/focal/main/k/ko/kong_${kong_release}/kong_${kong_release}_${TARGETARCH}.deb" \ - /tmp/kong.deb - -#################### -# Install admin api -#################### -FROM base as adminapi -ARG adminapi_release -ADD "https://tealbase-public-artifacts-bucket.s3.amazonaws.com/tealbase-admin-api/v${adminapi_release}/tealbase-admin-api_${adminapi_release}_linux_${TARGETARCH}.tar.gz" /tmp/tealbase-admin-api.tar.gz -RUN tar -xvf /tmp/tealbase-admin-api.tar.gz -C /tmp && \ - rm -rf /tmp/tealbase-admin-api.tar.gz - -#################### -# Install admin mgr -#################### -FROM base as adminmgr -ARG adminmgr_release -ADD "https://tealbase-public-artifacts-bucket.s3.amazonaws.com/admin-mgr/v${adminmgr_release}/admin-mgr_${adminmgr_release}_linux_${TARGETARCH}.tar.gz" /tmp/admin-mgr.tar.gz -RUN tar -xvf /tmp/admin-mgr.tar.gz -C /tmp && \ - rm -rf /tmp/admin-mgr.tar.gz - -#################### -# Install Prometheus Exporter -#################### -FROM base as exporter -ARG postgres_exporter_release -ADD "https://github.com/prometheus-community/postgres_exporter/releases/download/v${postgres_exporter_release}/postgres_exporter-${postgres_exporter_release}.linux-${TARGETARCH}.tar.gz" /tmp/postgres_exporter.tar.gz -RUN tar -xvf /tmp/postgres_exporter.tar.gz -C /tmp --strip-components 1 && \ - rm -rf /tmp/postgres_exporter.tar.gz - -#################### -# Install vector -#################### -FROM base as vector -ARG vector_release -ADD "https://packages.timber.io/vector/${vector_release}/vector_${vector_release}-1_${TARGETARCH}.deb" /tmp/vector.deb - -#################### -# Install supervisord -#################### -FROM base as supervisor -# Download pre-built packages -RUN apt-get update -y && apt-get install -y --no-install-recommends --download-only \ - supervisor \ - && rm -rf /var/lib/apt/lists/* -RUN mv /var/cache/apt/archives/*.deb /tmp/ - -#################### -# Create the final image for production -#################### -FROM base as production - -# Copy dependencies from previous build stages -COPY --from=pgbouncer /tmp/*.deb /tmp/ -COPY --from=vector /tmp/*.deb /tmp/ -COPY --from=kong /tmp/*.deb /tmp/ -COPY --from=supervisor /tmp/*.deb /tmp/ - -# Install runtime dependencies -RUN apt-get update && apt-get install -y --no-install-recommends \ - /tmp/*.deb \ - # For health check - curl \ - # For parsing init payload - jq \ - # Security tools - fail2ban \ - # sudo - sudo \ - vim-tiny \ - less \ - libnuma1 \ - logrotate \ - dumb-init \ - # pg_egress_collect deps - tcpdump libio-async-perl \ - && rm -rf /var/lib/apt/lists/* /tmp/* \ - && mkdir -p /dist \ - && mkdir -p /data/opt && chmod go+rwx /data/opt - -#################### -# Install salt -#################### -ENV DEBIAN_FRONTEND noninteractive -ENV SALT_VERSION 3006 - -# Install one-dir salt -RUN mkdir /etc/apt/keyrings \ - && curl -fsSL -o /etc/apt/keyrings/salt-archive-keyring-2023-arm.gpg https://repo.saltproject.io/salt/py3/ubuntu/20.04/arm64/SALT-PROJECT-GPG-PUBKEY-2023.gpg \ - && echo "deb [signed-by=/etc/apt/keyrings/salt-archive-keyring-2023-arm.gpg arch=arm64] https://repo.saltproject.io/salt/py3/ubuntu/20.04/arm64/$SALT_VERSION focal main" | tee /etc/apt/sources.list.d/salt.list \ - && curl -fsSL -o /etc/apt/keyrings/salt-archive-keyring-2023-amd.gpg https://repo.saltproject.io/salt/py3/ubuntu/20.04/amd64/SALT-PROJECT-GPG-PUBKEY-2023.gpg \ - && echo "deb [signed-by=/etc/apt/keyrings/salt-archive-keyring-2023-amd.gpg arch=amd64] https://repo.saltproject.io/salt/py3/ubuntu/20.04/amd64/$SALT_VERSION focal main" | tee -a /etc/apt/sources.list.d/salt.list \ - && apt-get clean && apt-get update \ - && apt-get install -y salt-minion - -ADD docker/all-in-one/etc/salt/minion /etc/salt/minion - - -# Copy single binary dependencies -COPY --from=pgrst /bin/postgrest /dist/ -COPY --from=gotrue /usr/local/bin/auth /dist/gotrue -COPY --from=gotrue /usr/local/etc/auth /opt/gotrue/ -COPY --from=envoy /usr/local/bin/envoy /dist/ -COPY --from=adminapi /tmp/tealbase-admin-api /dist/ -COPY --chown=root:root --from=adminmgr /tmp/admin-mgr /dist/ -COPY --from=exporter /tmp/postgres_exporter /opt/postgres_exporter/ -COPY docker/all-in-one/opt/postgres_exporter /opt/postgres_exporter/ - -# Configuring dangling symlinks for binaries -RUN ln -s /data/opt/tealbase-admin-api /opt/tealbase-admin-api \ - && ln -s /data/opt/postgrest /opt/postgrest \ - && ln -s /data/opt/gotrue /opt/gotrue/gotrue \ - && ln -s /data/opt/admin-mgr /usr/bin/admin-mgr - -# Scripts for adminapi -COPY ansible/files/admin_api_scripts /root -COPY --chown=adminapi:adminapi docker/all-in-one/etc/adminapi /etc/adminapi -COPY --chmod=644 docker/all-in-one/etc/sudoers.d /etc/sudoers.d/ - -# Script for pg_egress_collect -COPY --chown=adminapi:adminapi docker/all-in-one/opt/pg_egress_collect /opt/pg_egress_collect - -# Customizations for pgbouncer -COPY docker/all-in-one/etc/pgbouncer /etc/pgbouncer -COPY docker/all-in-one/etc/pgbouncer-custom /etc/pgbouncer-custom -COPY docker/all-in-one/etc/tmpfiles.d /etc/tmpfiles.d - -# Customizations for postgres -COPY --chown=postgres:postgres docker/all-in-one/etc/postgresql/pg_hba.conf /etc/postgresql/ -COPY --chown=postgres:postgres docker/all-in-one/etc/postgresql/logging.conf /etc/postgresql/ -COPY --chown=postgres:postgres docker/all-in-one/etc/postgresql-custom /etc/postgresql-custom -COPY --chown=postgres:postgres docker/all-in-one/etc/postgresql.schema.sql /etc/postgresql.schema.sql - -# Customizations for postgres_exporter -COPY --chown=postgres:postgres docker/all-in-one/opt/postgres_exporter/queries.yml /opt/postgres_exporter/queries.yml - -# Customizations for fail2ban -COPY docker/all-in-one/etc/fail2ban/filter.d /etc/fail2ban/filter.d/ -COPY docker/all-in-one/etc/fail2ban/jail.d /etc/fail2ban/jail.d/ - -# Customizations for postgrest -COPY --chown=postgrest:postgrest docker/all-in-one/etc/postgrest/bootstrap.sh /etc/postgrest/bootstrap.sh -COPY --chown=postgrest:postgrest docker/all-in-one/etc/postgrest/base.conf /etc/postgrest/base.conf -COPY --chown=postgrest:postgrest docker/all-in-one/etc/postgrest/generated.conf /etc/postgrest/generated.conf - -# Customizations for logrotate -COPY docker/all-in-one/etc/logrotate.d/walg.conf /etc/logrotate.d/walg.conf -COPY docker/all-in-one/etc/logrotate.d/postgresql.conf /etc/logrotate.d/postgresql.conf - -# Customizations for gotrue -COPY docker/all-in-one/etc/gotrue.env /etc/gotrue.env - -# Customizations for envoy -ARG envoy_release -ADD --chmod=755 --chown=envoy:envoy "https://raw.githubusercontent.com/envoyproxy/envoy/v${envoy_release}/restarter/hot-restarter.py" /opt/envoy-hot-restarter.py -COPY --chmod=775 --chown=envoy:envoy ansible/files/envoy_config/ /etc/envoy/ -COPY --chmod=755 --chown=envoy:envoy ansible/files/start-envoy.sh /opt/ - -# Customizations for kong -COPY docker/all-in-one/etc/kong/kong.conf /etc/kong/kong.conf -COPY docker/all-in-one/etc/kong/kong.yml /etc/kong/kong.yml - -# Customizations for vector -COPY --chown=vector:vector docker/all-in-one/etc/vector/vector.yaml /etc/vector/vector.yaml - -# Customizations for supervisor -COPY docker/all-in-one/etc/supervisor /etc/supervisor - -# Customizations for supa-shutdown -COPY --chown=adminapi:adminapi docker/all-in-one/etc/supa-shutdown /etc/supa-shutdown -COPY docker/all-in-one/configure-shim.sh /usr/local/bin/configure-shim.sh - -# Configure service ports -ENV PGRST_SERVER_PORT=3000 -ENV PGRST_ADMIN_SERVER_PORT=3001 -EXPOSE ${PGRST_SERVER_PORT} - -ENV GOTRUE_SITE_URL=http://localhost:${PGRST_SERVER_PORT} -ENV GOTRUE_API_PORT=9999 -EXPOSE ${GOTRUE_API_PORT} - -ENV ENVOY_HTTP_PORT=8000 -ENV ENVOY_HTTPS_PORT=8443 - -ENV KONG_HTTP_PORT=8000 -ENV KONG_HTTPS_PORT=8443 - -ENV HTTP_PORT=${ENVOY_HTTP_PORT:-KONG_HTTP_PORT} -ENV HTTP_PORT=${ENVOY_HTTPS_PORT:-KONG_HTTPS_PORT} -EXPOSE ${HTTP_PORT} ${HTTPS_PORT} - -ENV ADMIN_API_CERT_DIR=/etc/ssl/adminapi -ENV ADMIN_API_PORT=8085 -EXPOSE ${ADMIN_API_PORT} - -ENV PGBOUNCER_PORT=6543 -EXPOSE ${PGBOUNCER_PORT} - -ENV PGEXPORTER_PORT=9187 -EXPOSE ${PGEXPORTER_PORT} - -ENV VECTOR_API_PORT=9001 - -# Create system users -RUN useradd --create-home --shell /bin/bash postgrest && \ - useradd --create-home --shell /bin/bash gotrue && \ - useradd --create-home --shell /bin/bash envoy && \ - useradd --create-home --shell /bin/bash pgbouncer -G postgres,ssl-cert && \ - useradd --create-home --shell /bin/bash adminapi -G root,envoy,kong,pgbouncer,postgres,postgrest,wal-g && \ - usermod --append --shell /bin/bash -G postgres vector -RUN mkdir -p /etc/wal-g && \ - chown -R adminapi:adminapi /etc/wal-g && \ - chmod g+w /etc/wal-g -RUN mkdir -p /var/log/wal-g \ - && chown -R postgres:postgres /var/log/wal-g \ - && chmod +x /dist/admin-mgr \ - && chmod ug+s /dist/admin-mgr \ - && touch /etc/wal-g/config.json \ - && chown adminapi:adminapi /etc/wal-g/config.json \ - && echo '{"WALG_S3_PREFIX": "s3://foo/bar/"}' > /etc/wal-g/config.json -RUN chown -R adminapi:adminapi /etc/adminapi -RUN sed -i "s;#include = '/etc/postgresql-custom/generated-optimizations.conf';include = '/etc/postgresql-custom/generated-optimizations.conf';" /etc/postgresql/postgresql.conf - -# Add healthcheck and entrypoint scripts -COPY docker/all-in-one/healthcheck.sh /usr/local/bin/ -HEALTHCHECK --interval=3s --timeout=2s --start-period=4s --retries=10 CMD [ "healthcheck.sh" ] - -COPY docker/all-in-one/init /init -COPY docker/all-in-one/entrypoint.sh /usr/local/bin/ -COPY docker/all-in-one/postgres-entrypoint.sh /usr/local/bin/ -COPY docker/all-in-one/shutdown.sh /usr/local/bin/supa-shutdown.sh -COPY docker/all-in-one/run-logrotate.sh /usr/local/bin/run-logrotate.sh - -ENTRYPOINT [ "/usr/bin/dumb-init" ] - -CMD [ "entrypoint.sh"] diff --git a/docker/all-in-one/README.md b/docker/all-in-one/README.md deleted file mode 100644 index 47fa762..0000000 --- a/docker/all-in-one/README.md +++ /dev/null @@ -1,59 +0,0 @@ -# tealbase All-in-One - -All tealbase backend services bundled in a single Docker image for quick local testing and edge deployment. - -## Build - -```bash -# cwd: repo root -docker build -f docker/all-in-one/Dockerfile -t tealbase/all-in-one . -``` - -## Run - -```bash -docker run --rm -it \ - -e POSTGRES_PASSWORD=postgres \ - -e JWT_SECRET=super-secret-jwt-token-with-at-least-32-characters-long \ - -e ANON_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJhbm9uIiwKICAgICJpc3MiOiAic3VwYWJhc2UtZGVtbyIsCiAgICAiaWF0IjogMTY0MTc2OTIwMCwKICAgICJleHAiOiAxNzk5NTM1NjAwCn0.dc_X5iR_VP_qT0zsiyj_I_OZ2T9FtRU2BBNWN8Bu4GE \ - -e SERVICE_ROLE_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJzZXJ2aWNlX3JvbGUiLAogICAgImlzcyI6ICJzdXBhYmFzZS1kZW1vIiwKICAgICJpYXQiOiAxNjQxNzY5MjAwLAogICAgImV4cCI6IDE3OTk1MzU2MDAKfQ.DaYlNEoUrrEn2Ig7tqibS-PHK5vgusbcbo7X36XVt4Q \ - -e ADMIN_API_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJyb2xlIjoic3VwYWJhc2VfYWRtaW4iLCJpc3MiOiJzdXBhYmFzZS1kZW1vIiwiaWF0IjoxNjQxNzY5MjAwLCJleHAiOjE3OTk1MzU2MDB9.Y9mSNVuTw2TdfryoaqM5wySvwQemGGWfSe9ixcklVfM \ - -e DATA_VOLUME_MOUNTPOINT=/data \ - -e MACHINE_TYPE=shared_cpu_1x_512m \ - -p 5432:5432 \ - -p 8000:8000 \ - tealbase/all-in-one -``` - -Use bind mount to start from an existing physical backup: `-v $(pwd)/data:/var/lib/postgresql/data` - -Alternatively, the container may be initialised using a payload tarball. - -```bash -docker run --rm \ - -e POSTGRES_PASSWORD=postgres \ - -e INIT_PAYLOAD_PRESIGNED_URL= \ - -p 5432:5432 \ - -p 8000:8000 \ - -it tealbase/all-in-one -``` - -## Test - -```bash -curl -H "apikey: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJhbm9uIiwKICAgICJpc3MiOiAic3VwYWJhc2UtZGVtbyIsCiAgICAiaWF0IjogMTY0MTc2OTIwMCwKICAgICJleHAiOiAxNzk5NTM1NjAwCn0.dc_X5iR_VP_qT0zsiyj_I_OZ2T9FtRU2BBNWN8Bu4GE" \ - localhost:8000/rest/v1/ | jq -``` - -## TODO - -- [x] optimise admin config -- [x] propagate shutdown signals -- [x] add http health checks -- [x] generate dynamic JWT -- [ ] ufw / nftables -- [x] log rotation -- [x] egress metrics -- [x] vector -- [ ] apparmor -- [x] wal-g diff --git a/docker/all-in-one/configure-shim.sh b/docker/all-in-one/configure-shim.sh deleted file mode 100755 index f42f155..0000000 --- a/docker/all-in-one/configure-shim.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash - -INITIAL_BINARY_PATH=$1 -SYMLINK_PATH=$2 - -SYMLINK_TARGET=$(readlink -m "$SYMLINK_PATH") - -if [ ! -f "$SYMLINK_TARGET" ]; then - cp "$INITIAL_BINARY_PATH" "$SYMLINK_TARGET" - - PERMS=$(stat -c "%a" "$INITIAL_BINARY_PATH") - chmod "$PERMS" "$SYMLINK_TARGET" - - OWNER_GROUP=$(stat -c "%u:%g" "$INITIAL_BINARY_PATH") - chown "$OWNER_GROUP" "$SYMLINK_TARGET" -fi diff --git a/docker/all-in-one/entrypoint.sh b/docker/all-in-one/entrypoint.sh deleted file mode 100755 index 586ead7..0000000 --- a/docker/all-in-one/entrypoint.sh +++ /dev/null @@ -1,366 +0,0 @@ -#!/bin/bash -set -eou pipefail - -START_TIME=$(date +%s%N) - -PG_CONF=/etc/postgresql/postgresql.conf -SUPERVISOR_CONF=/etc/supervisor/supervisord.conf - -export DATA_VOLUME_MOUNTPOINT=${DATA_VOLUME_MOUNTPOINT:-/data} -export CONFIGURED_FLAG_PATH=${CONFIGURED_FLAG_PATH:-$DATA_VOLUME_MOUNTPOINT/machine.configured} - -export MAX_IDLE_TIME_MINUTES=${MAX_IDLE_TIME_MINUTES:-5} - -function calculate_duration { - local start_time=$1 - local end_time=$2 - - local duration=$((end_time - start_time)) - local milliseconds=$((duration / 1000000)) - - echo "$milliseconds" -} - -# Ref: https://gist.github.com/sj26/88e1c6584397bb7c13bd11108a579746 -function retry { - # Pass 0 for unlimited retries - local retries=$1 - shift - - local start=$EPOCHSECONDS - local count=0 - until "$@"; do - exit=$? - # Reset count if service has been running for more than 2 minutes - local elapsed=$((EPOCHSECONDS - start)) - if [ $elapsed -gt 120 ]; then - count=0 - fi - # Exponential backoff up to n tries - local wait=$((2 ** count)) - count=$((count + 1)) - if [ $count -ge "$retries" ] && [ "$retries" -gt 0 ]; then - echo "Retry $count/$retries exited $exit, no more retries left." - return $exit - fi - echo "Retry $count/$retries exited $exit, retrying in $wait seconds..." - sleep $wait - start=$EPOCHSECONDS - done - return 0 -} - -function configure_services { - # Start services after migrations are run - for file in /init/configure-*.sh; do - retry 0 "$file" - done -} - -function enable_swap { - fallocate -l 1G /mnt/swapfile - chmod 600 /mnt/swapfile - mkswap /mnt/swapfile - swapon /mnt/swapfile -} - -function push_lsn_checkpoint_file { - if [ "${PLATFORM_DEPLOYMENT:-}" != "true" ]; then - echo "Skipping push of LSN checkpoint file" - return - fi - - /usr/bin/admin-mgr lsn-checkpoint-push --immediately || echo "Failed to push LSN checkpoint" -} - -function graceful_shutdown { - echo "$(date): Received SIGINT. Shutting down." - - # Postgres ships the latest WAL file using archive_command during shutdown, in a blocking operation - # This is to ensure that the WAL file is shipped, just in case - sleep 0.2 - push_lsn_checkpoint_file -} - -function enable_autoshutdown { - sed -i "s/autostart=.*/autostart=true/" /etc/supervisor/base-services/supa-shutdown.conf -} - -function enable_lsn_checkpoint_push { - sed -i "s/autostart=.*/autostart=true/" /etc/supervisor/base-services/lsn-checkpoint-push.conf - sed -i "s/autorestart=.*/autorestart=true/" /etc/supervisor/base-services/lsn-checkpoint-push.conf -} - -function disable_fail2ban { - sed -i "s/autostart=.*/autostart=false/" /etc/supervisor/services/fail2ban.conf - sed -i "s/autorestart=.*/autorestart=false/" /etc/supervisor/services/fail2ban.conf -} - -function setup_postgres { - tar -xzvf "$INIT_PAYLOAD_PATH" -C / ./etc/postgresql.schema.sql - mv /etc/postgresql.schema.sql /docker-entrypoint-initdb.d/migrations/99-schema.sql - - tar -xzvf "$INIT_PAYLOAD_PATH" -C / ./etc/postgresql-custom/pgsodium_root.key - echo "include = '/etc/postgresql-custom/postgresql-platform-defaults.conf'" >>$PG_CONF - - # TODO (darora): walg enablement is temporarily performed here until changes from https://github.com/tealbase/postgres/pull/639 get picked up - # other things will still be needed in the future (auth_delay config) - sed -i \ - -e "s|#include = '/etc/postgresql-custom/custom-overrides.conf'|include = '/etc/postgresql-custom/custom-overrides.conf'|g" \ - -e "s|#include = '/etc/postgresql-custom/wal-g.conf'|include = '/etc/postgresql-custom/wal-g.conf'|g" \ - -e "s|shared_preload_libraries = '\(.*\)'|shared_preload_libraries = '\1, auth_delay'|" \ - -e "/# Automatically generated optimizations/i auth_delay.milliseconds = '3000'" \ - "${PG_CONF}" - - # Setup ssl certs - mkdir -p /etc/ssl/certs/postgres - tar -xzvf "$INIT_PAYLOAD_PATH" -C /etc/ssl/certs/postgres/ --strip-components 2 ./ssl/server.crt - tar -xzvf "$INIT_PAYLOAD_PATH" -C /etc/ssl/certs/postgres/ --strip-components 2 ./ssl/ca.crt - tar -xzvf "$INIT_PAYLOAD_PATH" -C /etc/ssl/private/ --strip-components 2 ./ssl/server.key - # tar -xzvf "$INIT_PAYLOAD_PATH" -C /etc/ssl/certs/postgres/ ./ssl/server-intermediate.srl - - PGSSLROOTCERT=/etc/ssl/certs/postgres/ca.crt - PGSSLCERT=/etc/ssl/certs/postgres/server.crt - PGSSLKEY=/etc/ssl/private/server.key - chown root:postgres $PGSSLROOTCERT $PGSSLKEY $PGSSLCERT - chmod 640 $PGSSLROOTCERT $PGSSLKEY $PGSSLCERT - - # Change ssl back to on in postgres.conf - sed -i -e "s|ssl = off|ssl = on|g" \ - -e "s|ssl_ca_file = ''|ssl_ca_file = '$PGSSLROOTCERT'|g" \ - -e "s|ssl_cert_file = ''|ssl_cert_file = '$PGSSLCERT'|g" \ - -e "s|ssl_key_file = ''|ssl_key_file = '$PGSSLKEY'|g" \ - $PG_CONF - - if [ "${DATA_VOLUME_MOUNTPOINT}" ]; then - mkdir -p "${DATA_VOLUME_MOUNTPOINT}/opt" - /usr/local/bin/configure-shim.sh /dist/tealbase-admin-api /opt/tealbase-admin-api - /opt/tealbase-admin-api optimize db --destination-config-file-path /etc/postgresql-custom/generated-optimizations.conf - - # Preserve postgresql configs across restarts - POSTGRESQL_CUSTOM_DIR="${DATA_VOLUME_MOUNTPOINT}/etc/postgresql-custom" - - mkdir -p "${POSTGRESQL_CUSTOM_DIR}" - - if [ ! -f "${CONFIGURED_FLAG_PATH}" ]; then - echo "Copying existing custom postgresql config from /etc/postgresql-custom to ${POSTGRESQL_CUSTOM_DIR}" - cp -R "/etc/postgresql-custom/." "${POSTGRESQL_CUSTOM_DIR}/" - fi - - rm -rf "/etc/postgresql-custom" - ln -s "${POSTGRESQL_CUSTOM_DIR}" "/etc/postgresql-custom" - chown -R postgres:postgres "/etc/postgresql-custom" - chown -R postgres:postgres "${POSTGRESQL_CUSTOM_DIR}" - chmod g+rx "${POSTGRESQL_CUSTOM_DIR}" - - # Preserve wal-g configs across restarts - WALG_CONF_DIR="${DATA_VOLUME_MOUNTPOINT}/etc/wal-g" - mkdir -p "${WALG_CONF_DIR}" - - if [ ! -f "${CONFIGURED_FLAG_PATH}" ]; then - echo "Copying existing custom wal-g config from /etc/wal-g to ${WALG_CONF_DIR}" - cp -R "/etc/wal-g/." "${WALG_CONF_DIR}/" - fi - - rm -rf "/etc/wal-g" - ln -s "${WALG_CONF_DIR}" "/etc/wal-g" - chown -R adminapi:adminapi "/etc/wal-g" - chown -R adminapi:adminapi "${WALG_CONF_DIR}" - chmod g+rx "/etc/wal-g" - chmod g+rx "${WALG_CONF_DIR}" - fi - DURATION=$(calculate_duration "$START_TIME" "$(date +%s%N)") - echo "E: Execution time to setting up postgresql: $DURATION milliseconds" -} - -function setup_credentials { - # Load credentials from init json - tar -xzvf "$INIT_PAYLOAD_PATH" -C / ./tmp/init.json - export ANON_KEY=${ANON_KEY:-$(jq -r '.["anon_key"]' /tmp/init.json)} - export SERVICE_ROLE_KEY=${SERVICE_ROLE_KEY:-$(jq -r '.["service_key"]' /tmp/init.json)} - export ADMIN_API_KEY=${ADMIN_API_KEY:-$(jq -r '.["tealbase_admin_key"]' /tmp/init.json)} - export JWT_SECRET=${JWT_SECRET:-$(jq -r '.["jwt_secret"]' /tmp/init.json)} - DURATION=$(calculate_duration "$START_TIME" "$(date +%s%N)") - echo "E: Execution time to setting up credentials: $DURATION milliseconds" -} - -function report_health { - if [ -z "${REPORTING_TOKEN:-}" ]; then - echo "Skipped health reporting: missing REPORTING_TOKEN" - exit 0 - fi - if [ -d "$ADMIN_API_CERT_DIR" ]; then - retry 10 curl -sSkf "https://localhost:$ADMIN_API_PORT/health-reporter/send" -X POST -H "apikey: $ADMIN_API_KEY" - else - retry 10 curl -sSf "http://localhost:$ADMIN_API_PORT/health-reporter/send" -X POST -H "apikey: $ADMIN_API_KEY" - fi -} - -function run_prelaunch_hooks { - if [ -f "/etc/postgresql-custom/supautils.conf" ]; then - sed -i -e 's/dblink, //' "/etc/postgresql-custom/supautils.conf" - fi -} - -function start_supervisor { - # Start health reporting - report_health & - - # Start supervisord - /usr/bin/supervisord -c $SUPERVISOR_CONF -} - -DELEGATED_ARCHIVE_PATH=/data/delegated-init.tar.gz -DELEGATED_ENTRY_PATH=/data/delegated-entry.sh - -function fetch_and_execute_delegated_payload { - curl -s --time-cond $DELEGATED_ARCHIVE_PATH -o $DELEGATED_ARCHIVE_PATH "$DELEGATED_INIT_LOCATION" - - if [ ! -f $DELEGATED_ARCHIVE_PATH ]; then - echo "No delegated payload found, bailing" - return - fi - - # only extract a valid archive - if tar -tzf "$DELEGATED_ARCHIVE_PATH" &>/dev/null; then - TAR_MTIME_EPOCH=$(tar -tvzf "$DELEGATED_ARCHIVE_PATH" delegated-entry.sh | awk '{print $4, $5}' | xargs -I {} date -d {} +%s) - - if [ -f $DELEGATED_ENTRY_PATH ]; then - FILE_MTIME_EPOCH=$(stat -c %Y "$DELEGATED_ENTRY_PATH") - - if [ "$TAR_MTIME_EPOCH" -gt "$FILE_MTIME_EPOCH" ]; then - tar -xvzf "$DELEGATED_ARCHIVE_PATH" -C /data - else - echo "TAR archive is not newer, skipping extraction" - fi - else - tar -xvzf "$DELEGATED_ARCHIVE_PATH" -C /data - fi - else - echo "Invalid TAR archive" - return - fi - - # Run our delegated entry script here - if [ -f "$DELEGATED_ENTRY_PATH" ]; then - chmod +x $DELEGATED_ENTRY_PATH - bash -c "$DELEGATED_ENTRY_PATH $START_TIME" - fi -} - -# Increase max number of open connections -ulimit -n 65536 - -# Update pgsodium root key -if [ "${PGSODIUM_ROOT_KEY:-}" ]; then - echo "${PGSODIUM_ROOT_KEY}" >/etc/postgresql-custom/pgsodium_root.key -fi - -# Update pgdata directory -if [ "${PGDATA_REAL:-}" ]; then - mkdir -p "${PGDATA_REAL}" - chown -R postgres:postgres "${PGDATA_REAL}" - chmod -R g+rx "${PGDATA_REAL}" -fi - -if [ "${PGDATA:-}" ]; then - if [ "${PGDATA_REAL:-}" ]; then - mkdir -p "$(dirname "${PGDATA}")" - rm -rf "${PGDATA}" - ln -s "${PGDATA_REAL}" "${PGDATA}" - chmod -R g+rx "${PGDATA}" - else - mkdir -p "$PGDATA" - chown postgres:postgres "$PGDATA" - fi - sed -i "s|data_directory = '.*'|data_directory = '$PGDATA'|g" $PG_CONF -fi - -# Download and extract init payload from s3 -export INIT_PAYLOAD_PATH=${INIT_PAYLOAD_PATH:-/tmp/payload.tar.gz} - -if [ "${INIT_PAYLOAD_PRESIGNED_URL:-}" ]; then - curl -fsSL "$INIT_PAYLOAD_PRESIGNED_URL" -o "/tmp/payload.tar.gz" || true - if [ -f "/tmp/payload.tar.gz" ] && [ "/tmp/payload.tar.gz" != "$INIT_PAYLOAD_PATH" ]; then - mv "/tmp/payload.tar.gz" "$INIT_PAYLOAD_PATH" - fi -fi - -if [ "${DATA_VOLUME_MOUNTPOINT}" ]; then - BASE_LOGS_FOLDER="${DATA_VOLUME_MOUNTPOINT}/logs" - - for folder in "postgresql" "services" "wal-g"; do - mkdir -p "${BASE_LOGS_FOLDER}/${folder}" - rm -rf "/var/log/${folder}" - ln -s "${BASE_LOGS_FOLDER}/${folder}" "/var/log/${folder}" - done - - chown -R postgres:postgres "${BASE_LOGS_FOLDER}" - - mkdir -p "${DATA_VOLUME_MOUNTPOINT}/etc/logrotate" -fi - -# Process init payload -if [ -f "$INIT_PAYLOAD_PATH" ]; then - setup_credentials - setup_postgres -else - echo "Skipped extracting init payload: $INIT_PAYLOAD_PATH does not exist" -fi - -mkdir -p /var/log/services - -SUPERVISOR_CONF=/etc/supervisor/supervisord.conf -find /etc/supervisor/ -type d -exec chmod 0770 {} + -find /etc/supervisor/ -type f -exec chmod 0660 {} + - -# Start services in the background -if [ "${POSTGRES_ONLY:-}" == "true" ]; then - sed -i "s| - postgrest| # - postgrest|g" /etc/adminapi/adminapi.yaml - sed -i "s|files = services/\*.conf base-services/\*.conf|files = base-services/\*.conf|g" $SUPERVISOR_CONF - /init/configure-adminapi.sh -else - sed -i "s| # - postgrest| - postgrest|g" /etc/adminapi/adminapi.yaml - sed -i "s|files = base-services/\*.conf|files = services/\*.conf base-services/\*.conf|g" $SUPERVISOR_CONF - configure_services -fi - -if [ "${AUTOSHUTDOWN_ENABLED:-}" == "true" ]; then - enable_autoshutdown -fi - -if [ "${ENVOY_ENABLED:-}" == "true" ]; then - sed -i "s/autostart=.*/autostart=true/" /etc/supervisor/services/envoy.conf - sed -i "s/autostart=.*/autostart=false/" /etc/supervisor/services/kong.conf - sed -i "s/kong/envoy/" /etc/supervisor/services/group.conf -fi - -if [ "${FAIL2BAN_DISABLED:-}" == "true" ]; then - disable_fail2ban -fi - -if [ "${GOTRUE_DISABLED:-}" == "true" ]; then - sed -i "s/autostart=.*/autostart=false/" /etc/supervisor/services/gotrue.conf - sed -i "s/autorestart=.*/autorestart=false/" /etc/supervisor/services/gotrue.conf -fi - -if [ "${PLATFORM_DEPLOYMENT:-}" == "true" ]; then - if [ "${SWAP_DISABLED:-}" != "true" ]; then - enable_swap - fi - enable_lsn_checkpoint_push - - trap graceful_shutdown SIGINT -fi - -touch "$CONFIGURED_FLAG_PATH" -run_prelaunch_hooks - -if [ -n "${DELEGATED_INIT_LOCATION:-}" ]; then - fetch_and_execute_delegated_payload -else - DURATION=$(calculate_duration "$START_TIME" "$(date +%s%N)") - echo "E: Execution time to starting supervisor: $DURATION milliseconds" - start_supervisor - push_lsn_checkpoint_file -fi diff --git a/docker/all-in-one/etc/adminapi/adminapi.yaml b/docker/all-in-one/etc/adminapi/adminapi.yaml deleted file mode 100644 index 682f4ad..0000000 --- a/docker/all-in-one/etc/adminapi/adminapi.yaml +++ /dev/null @@ -1,76 +0,0 @@ -port: 8085 -host: 0.0.0.0 -ref: {{ .ProjectRef }} -jwt_secret: {{ .JwtSecret }} -metric_collectors: - - filesystem - - meminfo - - netdev - - loadavg - - cpu - - diskstats - - vmstat -node_exporter_additional_args: - - "--collector.filesystem.ignored-mount-points=^/(boot|sys|dev|run).*" - - "--collector.netdev.device-exclude=lo" -# cert_path: /etc/ssl/adminapi/server.crt -# key_path: /etc/ssl/adminapi/server.key -upstream_metrics_refresh_duration: 60s -pgbouncer_endpoints: - - "postgres://pgbouncer:{{ .PgbouncerPassword }}@localhost:6543/pgbouncer" -fail2ban_socket: /var/run/fail2ban/fail2ban.sock -upstream_metrics_sources: - - name: system - url: "https://localhost:8085/metrics" - labels_to_attach: - - name: tealbase_project_ref - value: {{ .ProjectRef }} - - name: service_type - value: db - skip_tls_verify: true - - name: postgresql - url: "http://localhost:9187/metrics" - labels_to_attach: - - name: tealbase_project_ref - value: {{ .ProjectRef }} - - name: service_type - value: postgresql - - name: gotrue - url: "http://localhost:9122/metrics" - labels_to_attach: - - name: tealbase_project_ref - value: {{ .ProjectRef }} - - name: service_type - value: gotrue -monitoring: - disk_usage: - enabled: true -upgrades_config: - region: us-east-1 - s3_bucket_name: tealbase-internal-artifacts-prod-bucket - common_prefix: upgrades - destination_dir: /tmp -firewall: - enabled: true - internal_ports: - - 9187 - - 8085 - - 9122 - privileged_ports: - - 22 - privileged_ports_allowlist: - - 0.0.0.0/0 - filtered_ports: - - 5432 - - 6543 - unfiltered_ports: - - 80 - - 443 - managed_rules_file: /etc/nftables/tealbase_managed.conf -pg_egress_collect_path: /tmp/pg_egress_collect.txt -health_reporting: - api_url: {{ .tealbaseUrl }} - project_token: {{ .ReportingToken }} - check_services: - # - postgres - # - postgrest diff --git a/docker/all-in-one/etc/fail2ban/filter.d/pgbouncer.conf b/docker/all-in-one/etc/fail2ban/filter.d/pgbouncer.conf deleted file mode 100644 index b2d59c1..0000000 --- a/docker/all-in-one/etc/fail2ban/filter.d/pgbouncer.conf +++ /dev/null @@ -1,2 +0,0 @@ -[Definition] -failregex = ^.+@:.+error: password authentication failed$ diff --git a/docker/all-in-one/etc/fail2ban/filter.d/postgresql.conf b/docker/all-in-one/etc/fail2ban/filter.d/postgresql.conf deleted file mode 100644 index c17b51e..0000000 --- a/docker/all-in-one/etc/fail2ban/filter.d/postgresql.conf +++ /dev/null @@ -1,8 +0,0 @@ -[Definition] -failregex = ^.*,.*,.*,.*,":.*password authentication failed for user.*$ -ignoreregex = ^.*,.*,.*,.*,"127\.0\.0\.1.*password authentication failed for user.*$ - ^.*,.*,.*,.*,":.*password authentication failed for user ""tealbase_admin".*$ - ^.*,.*,.*,.*,":.*password authentication failed for user ""tealbase_auth_admin".*$ - ^.*,.*,.*,.*,":.*password authentication failed for user ""tealbase_storage_admin".*$ - ^.*,.*,.*,.*,":.*password authentication failed for user ""authenticator".*$ - ^.*,.*,.*,.*,":.*password authentication failed for user ""pgbouncer".*$ diff --git a/docker/all-in-one/etc/fail2ban/jail.d/jail.local b/docker/all-in-one/etc/fail2ban/jail.d/jail.local deleted file mode 100644 index 44e8210..0000000 --- a/docker/all-in-one/etc/fail2ban/jail.d/jail.local +++ /dev/null @@ -1,4 +0,0 @@ -[DEFAULT] - -banaction = nftables-multiport -banaction_allports = nftables-allports diff --git a/docker/all-in-one/etc/fail2ban/jail.d/pgbouncer.conf b/docker/all-in-one/etc/fail2ban/jail.d/pgbouncer.conf deleted file mode 100644 index c8b3c49..0000000 --- a/docker/all-in-one/etc/fail2ban/jail.d/pgbouncer.conf +++ /dev/null @@ -1,7 +0,0 @@ -[pgbouncer] -enabled = true -port = 6543 -protocol = tcp -filter = pgbouncer -logpath = /var/log/services/pgbouncer.log -maxretry = 3 diff --git a/docker/all-in-one/etc/fail2ban/jail.d/postgresql.conf b/docker/all-in-one/etc/fail2ban/jail.d/postgresql.conf deleted file mode 100644 index 0ec1819..0000000 --- a/docker/all-in-one/etc/fail2ban/jail.d/postgresql.conf +++ /dev/null @@ -1,8 +0,0 @@ -[postgresql] -enabled = true -port = 5432 -protocol = tcp -filter = postgresql -logpath = /var/log/postgresql/auth-failures.csv -maxretry = 3 -ignoreip = 192.168.0.0/16 172.17.1.0/20 diff --git a/docker/all-in-one/etc/fail2ban/jail.d/sshd.local b/docker/all-in-one/etc/fail2ban/jail.d/sshd.local deleted file mode 100644 index 7033738..0000000 --- a/docker/all-in-one/etc/fail2ban/jail.d/sshd.local +++ /dev/null @@ -1,3 +0,0 @@ -[sshd] - -enabled = false diff --git a/docker/all-in-one/etc/gotrue.env b/docker/all-in-one/etc/gotrue.env deleted file mode 100644 index 3c53d17..0000000 --- a/docker/all-in-one/etc/gotrue.env +++ /dev/null @@ -1,9 +0,0 @@ -API_EXTERNAL_URL=api_external_url -GOTRUE_API_HOST=gotrue_api_host -GOTRUE_SITE_URL=gotrue_site_url -GOTRUE_DB_DRIVER=postgres -GOTRUE_DB_DATABASE_URL=postgres://tealbase_auth_admin@localhost/postgres?sslmode=disable -GOTRUE_DB_MIGRATIONS_PATH=/opt/gotrue/migrations -GOTRUE_JWT_ADMIN_ROLES=tealbase_admin,service_role -GOTRUE_JWT_AUD=authenticated -GOTRUE_JWT_SECRET=gotrue_jwt_secret diff --git a/docker/all-in-one/etc/kong/kong.conf b/docker/all-in-one/etc/kong/kong.conf deleted file mode 100644 index 4778902..0000000 --- a/docker/all-in-one/etc/kong/kong.conf +++ /dev/null @@ -1,37 +0,0 @@ -database = off -declarative_config = /etc/kong/kong.yml - -# plugins defined in the dockerfile -plugins = request-transformer,cors,key-auth,basic-auth,http-log,ip-restriction,rate-limiting - -admin_listen = off -proxy_listen = 0.0.0.0:80 reuseport backlog=16384, 0.0.0.0:443 http2 ssl reuseport backlog=16834, [::]:80 reuseport backlog=16384, [::]:443 http2 ssl reuseport backlog=16348 - -nginx_http_log_format = custom_log '$remote_addr - $remote_user [$time_local] "$request" $status $body_bytes_sent "$http_referer" "$http_user_agent" $request_time $request_length' -nginx_http_client_body_buffer_size = 512k -proxy_access_log = off -dns_stale_ttl = 60 -nginx_proxy_proxy_max_temp_file_size = 0 -nginx_proxy_proxy_buffer_size = 128k -nginx_proxy_proxy_buffers = 4 256k -nginx_proxy_proxy_busy_buffers_size = 256k -nginx_proxy_proxy_read_timeout = 120s -nginx_proxy_proxy_ssl_verify = off -nginx_http_gzip=on -nginx_http_gzip_comp_level=6 -nginx_http_gzip_min_length=256 -nginx_http_gzip_proxied=any -nginx_http_gzip_vary=on -nginx_http_gzip_types=text/plain application/xml application/openapi+json application/json - -# the upstream requests will be timed out after 60s idle anyway -# this ensures that we're not unnecessarily cycling them -upstream_keepalive_max_requests = 0 -# the pool size can be (and ought to be) scaled up on larger instances -upstream_keepalive_pool_size = 500 - -nginx_events_use = epoll -# can be tuned to be higher on larger boxes (4096 is totally fine) -nginx_events_worker_connections = 1024 -anonymous_reports = off -headers = latency_tokens diff --git a/docker/all-in-one/etc/kong/kong.yml b/docker/all-in-one/etc/kong/kong.yml deleted file mode 100644 index f87f12b..0000000 --- a/docker/all-in-one/etc/kong/kong.yml +++ /dev/null @@ -1,88 +0,0 @@ -# ############################################################################################## -# Updating this file also requires a corresponding update in worker/src/lib/config-utils/kong.ts -# ############################################################################################## -_format_version: '1.1' -services: - - { - name: auth-v1-open, - url: 'http://localhost:9999/verify', - routes: [{ name: auth-v1-open, strip_path: true, paths: [/auth/v1/verify] }], - plugins: [{ name: cors }], - } - - { - name: auth-v1-open-callback, - url: 'http://localhost:9999/callback', - routes: [{ name: auth-v1-open-callback, strip_path: true, paths: [/auth/v1/callback] }], - plugins: [{ name: cors }], - } - - { - name: auth-v1-open-authorize, - url: 'http://localhost:9999/authorize', - routes: [{ name: auth-v1-open-authorize, strip_path: true, paths: [/auth/v1/authorize] }], - plugins: [{ name: cors }], - } - - { - name: auth-v1-open-saml, - url: 'http://localhost:9999/sso/saml/', - routes: [{ name: auth-v1-open-saml, strip_path: true, paths: [/auth/v1/sso/saml/] }], - plugins: [{ name: cors }], - } - - { - name: auth-v1, - url: 'http://localhost:9999/', - routes: [{ name: auth-v1, strip_path: true, paths: [/auth/v1/] }], - plugins: [{ name: cors }, { name: key-auth, config: { hide_credentials: false } }], - } - - { - name: rest-v1-admin, - url: 'http://localhost:3001/', - routes: [{ name: rest-admin-v1, strip_path: true, paths: [/rest-admin/v1/] }], - plugins: [{ name: cors }, { name: key-auth, config: { hide_credentials: true } }], - } - - { - name: rest-v1, - url: 'http://localhost:3000/', - routes: [{ name: rest-v1, strip_path: true, paths: [/rest/v1/] }], - plugins: [{ name: cors }, { name: key-auth, config: { hide_credentials: true } }], - } - - { - name: graphql-v1, - url: 'http://localhost:3000/rpc/graphql', - routes: [{ name: graphql-v1, strip_path: true, paths: [/graphql/v1] }], - plugins: - [ - { name: cors }, - { name: key-auth, config: { hide_credentials: true } }, - { - name: request-transformer, - config: { add: { headers: [Content-Profile:graphql_public] } }, - }, - ], - } - - { - name: admin-v1, - url: 'https://localhost:8085/', - routes: [{ name: admin-v1, strip_path: true, paths: [/admin/v1/] }], - plugins: [{ name: cors }, { name: key-auth, config: { hide_credentials: false } }], - } - - { - name: admin-v1-user-routes, - url: 'https://localhost:8085/privileged', - routes: [{ name: admin-v1-user-routes, strip_path: true, paths: [/customer/v1/privileged] }], - plugins: [{ name: cors }, { name: basic-auth, config: { hide_credentials: false } }], - } - - { - name: admin-v1-metrics, - url: 'https://localhost:8085/metrics/aggregated', - routes: [{ name: admin-v1-metrics, strip_path: true, paths: [/tealbase-internal/metrics] }], - plugins: [{ name: cors }, { name: ip-restriction, config: { allow: [10.0.0.0/8] } }], - } -consumers: - - { username: anon-key, keyauth_credentials: [{ key: anon_key }] } - - { username: service_role-key, keyauth_credentials: [{ key: service_key }] } - - { username: tealbase-admin-key, keyauth_credentials: [{ key: tealbase_admin_key }] } -basicauth_credentials: - - consumer: service_role-key - username: 'service_role' - password: service_key -plugins: [] diff --git a/docker/all-in-one/etc/logrotate.d/postgresql.conf b/docker/all-in-one/etc/logrotate.d/postgresql.conf deleted file mode 100644 index 6e2b882..0000000 --- a/docker/all-in-one/etc/logrotate.d/postgresql.conf +++ /dev/null @@ -1,11 +0,0 @@ -/var/log/postgresql/postgresql.csv { - size 50M - rotate 4 - compress - delaycompress - notifempty - missingok - postrotate - sudo -u postgres /usr/lib/postgresql/15/bin/pg_ctl -D /var/lib/postgresql/data logrotate - endscript -} diff --git a/docker/all-in-one/etc/logrotate.d/walg.conf b/docker/all-in-one/etc/logrotate.d/walg.conf deleted file mode 100644 index 49eeb59..0000000 --- a/docker/all-in-one/etc/logrotate.d/walg.conf +++ /dev/null @@ -1,9 +0,0 @@ -/var/log/wal-g/*.log { - size 50M - rotate 3 - copytruncate - delaycompress - compress - notifempty - missingok -} diff --git a/docker/all-in-one/etc/pgbouncer-custom/generated-optimizations.ini b/docker/all-in-one/etc/pgbouncer-custom/generated-optimizations.ini deleted file mode 100644 index e69de29..0000000 diff --git a/docker/all-in-one/etc/pgbouncer-custom/ssl-config.ini b/docker/all-in-one/etc/pgbouncer-custom/ssl-config.ini deleted file mode 100644 index 69a8025..0000000 --- a/docker/all-in-one/etc/pgbouncer-custom/ssl-config.ini +++ /dev/null @@ -1,4 +0,0 @@ -client_tls_sslmode = allow -client_tls_ca_file = /etc/ssl/certs/postgres/ca.crt -client_tls_key_file = /etc/ssl/private/server.key -client_tls_cert_file = /etc/ssl/certs/postgres/server.crt diff --git a/docker/all-in-one/etc/pgbouncer/pgbouncer.ini b/docker/all-in-one/etc/pgbouncer/pgbouncer.ini deleted file mode 100644 index 5a36ac1..0000000 --- a/docker/all-in-one/etc/pgbouncer/pgbouncer.ini +++ /dev/null @@ -1,363 +0,0 @@ -;;; -;;; PgBouncer configuration file -;;; - -;; database name = connect string -;; -;; connect string params: -;; dbname= host= port= user= password= auth_user= -;; client_encoding= datestyle= timezone= -;; pool_size= reserve_pool= max_db_connections= -;; pool_mode= connect_query= application_name= -[databases] -* = host=localhost auth_user=pgbouncer - -;; foodb over Unix socket -;foodb = - -;; redirect bardb to bazdb on localhost -;bardb = host=localhost dbname=bazdb - -;; access to dest database will go with single user -;forcedb = host=localhost port=300 user=baz password=foo client_encoding=UNICODE datestyle=ISO connect_query='SELECT 1' - -;; use custom pool sizes -;nondefaultdb = pool_size=50 reserve_pool=10 - -;; use auth_user with auth_query if user not present in auth_file -;; auth_user must exist in auth_file -; foodb = auth_user=bar - -;; fallback connect string -;* = host=testserver - -;; User-specific configuration -[users] - -;user1 = pool_mode=transaction max_user_connections=10 - -;; Configuration section -[pgbouncer] - -;;; -;;; Administrative settings -;;; - -pidfile = /var/run/pgbouncer/pgbouncer.pid - -;;; -;;; Where to wait for clients -;;; - -;; IP address or * which means all IPs -listen_addr = * -listen_port = 6543 - -;; Unix socket is also used for -R. -;; On Debian it should be /var/run/postgresql -unix_socket_dir = /tmp -;unix_socket_mode = 0777 -;unix_socket_group = - -;;; -;;; TLS settings for accepting clients -;;; - -;; disable, allow, require, verify-ca, verify-full -;client_tls_sslmode = disable - -;; Path to file that contains trusted CA certs -;client_tls_ca_file = - -;; Private key and cert to present to clients. -;; Required for accepting TLS connections from clients. -;client_tls_key_file = -;client_tls_cert_file = - -;; fast, normal, secure, legacy, -;client_tls_ciphers = fast - -;; all, secure, tlsv1.0, tlsv1.1, tlsv1.2, tlsv1.3 -;client_tls_protocols = secure - -;; none, auto, legacy -;client_tls_dheparams = auto - -;; none, auto, -;client_tls_ecdhcurve = auto - -;;; -;;; TLS settings for connecting to backend databases -;;; - -;; disable, allow, require, verify-ca, verify-full -;server_tls_sslmode = disable - -;; Path to that contains trusted CA certs -;server_tls_ca_file = - -;; Private key and cert to present to backend. -;; Needed only if backend server require client cert. -;server_tls_key_file = -;server_tls_cert_file = - -;; all, secure, tlsv1.0, tlsv1.1, tlsv1.2, tlsv1.3 -;server_tls_protocols = secure - -;; fast, normal, secure, legacy, -;server_tls_ciphers = fast - -;;; -;;; Authentication settings -;;; - -;; any, trust, plain, md5, cert, hba, pam -auth_type = scram-sha-256 -auth_file = /etc/pgbouncer/userlist.txt - -;; Path to HBA-style auth config -;auth_hba_file = - -;; Query to use to fetch password from database. Result -;; must have 2 columns - username and password hash. -auth_query = SELECT * FROM pgbouncer.get_auth($1) - -;;; -;;; Users allowed into database 'pgbouncer' -;;; - -;; comma-separated list of users who are allowed to change settings -admin_users = pgbouncer - -;; comma-separated list of users who are just allowed to use SHOW command -stats_users = pgbouncer - -;;; -;;; Pooler personality questions -;;; - -;; When server connection is released back to pool: -;; session - after client disconnects (default) -;; transaction - after transaction finishes -;; statement - after statement finishes -pool_mode = transaction - -;; Query for cleaning connection immediately after releasing from -;; client. No need to put ROLLBACK here, pgbouncer does not reuse -;; connections where transaction is left open. -;server_reset_query = DISCARD ALL - -;; Whether server_reset_query should run in all pooling modes. If it -;; is off, server_reset_query is used only for session-pooling. -;server_reset_query_always = 0 - -;; Comma-separated list of parameters to ignore when given in startup -;; packet. Newer JDBC versions require the extra_float_digits here. -ignore_startup_parameters = extra_float_digits - -;; When taking idle server into use, this query is run first. -;server_check_query = select 1 - -;; If server was used more recently that this many seconds ago, -; skip the check query. Value 0 may or may not run in immediately. -;server_check_delay = 30 - -;; Close servers in session pooling mode after a RECONNECT, RELOAD, -;; etc. when they are idle instead of at the end of the session. -;server_fast_close = 0 - -;; Use as application_name on server. -;application_name_add_host = 0 - -;; Period for updating aggregated stats. -;stats_period = 60 - -;;; -;;; Connection limits -;;; - -;; Total number of clients that can connect -;max_client_conn = 100 - -;; Default pool size. 20 is good number when transaction pooling -;; is in use, in session pooling it needs to be the number of -;; max clients you want to handle at any moment -default_pool_size = 15 - -;; Minimum number of server connections to keep in pool. -;min_pool_size = 0 - -; how many additional connection to allow in case of trouble -;reserve_pool_size = 0 - -;; If a clients needs to wait more than this many seconds, use reserve -;; pool. -;reserve_pool_timeout = 5 - -;; Maximum number of server connections for a database -;max_db_connections = 0 - -;; Maximum number of server connections for a user -;max_user_connections = 0 - -;; If off, then server connections are reused in LIFO manner -;server_round_robin = 0 - -;;; -;;; Logging -;;; - -;; Syslog settings -;syslog = 0 -;syslog_facility = daemon -;syslog_ident = pgbouncer - -;; log if client connects or server connection is made -;log_connections = 1 - -;; log if and why connection was closed -;log_disconnections = 1 - -;; log error messages pooler sends to clients -;log_pooler_errors = 1 - -;; write aggregated stats into log -;log_stats = 1 - -;; Logging verbosity. Same as -v switch on command line. -;verbose = 0 - -;;; -;;; Timeouts -;;; - -;; Close server connection if its been connected longer. -;server_lifetime = 3600 - -;; Close server connection if its not been used in this time. Allows -;; to clean unnecessary connections from pool after peak. -;server_idle_timeout = 600 - -;; Cancel connection attempt if server does not answer takes longer. -;server_connect_timeout = 15 - -;; If server login failed (server_connect_timeout or auth failure) -;; then wait this many second. -;server_login_retry = 15 - -;; Dangerous. Server connection is closed if query does not return in -;; this time. Should be used to survive network problems, _not_ as -;; statement_timeout. (default: 0) -;query_timeout = 0 - -;; Dangerous. Client connection is closed if the query is not -;; assigned to a server in this time. Should be used to limit the -;; number of queued queries in case of a database or network -;; failure. (default: 120) -;query_wait_timeout = 120 - -;; Dangerous. Client connection is closed if no activity in this -;; time. Should be used to survive network problems. (default: 0) -;client_idle_timeout = 0 - -;; Disconnect clients who have not managed to log in after connecting -;; in this many seconds. -;client_login_timeout = 60 - -;; Clean automatically created database entries (via "*") if they stay -;; unused in this many seconds. -; autodb_idle_timeout = 3600 - -;; Close connections which are in "IDLE in transaction" state longer -;; than this many seconds. -;idle_transaction_timeout = 0 - -;; How long SUSPEND/-R waits for buffer flush before closing -;; connection. -;suspend_timeout = 10 - -;;; -;;; Low-level tuning options -;;; - -;; buffer for streaming packets -;pkt_buf = 4096 - -;; man 2 listen -;listen_backlog = 128 - -;; Max number pkt_buf to process in one event loop. -;sbuf_loopcnt = 5 - -;; Maximum PostgreSQL protocol packet size. -;max_packet_size = 2147483647 - -;; Set SO_REUSEPORT socket option -;so_reuseport = 0 - -;; networking options, for info: man 7 tcp - -;; Linux: Notify program about new connection only if there is also -;; data received. (Seconds to wait.) On Linux the default is 45, on -;; other OS'es 0. -;tcp_defer_accept = 0 - -;; In-kernel buffer size (Linux default: 4096) -;tcp_socket_buffer = 0 - -;; whether tcp keepalive should be turned on (0/1) -;tcp_keepalive = 1 - -;; The following options are Linux-specific. They also require -;; tcp_keepalive=1. - -;; Count of keepalive packets -;tcp_keepcnt = 0 - -;; How long the connection can be idle before sending keepalive -;; packets -;tcp_keepidle = 0 - -;; The time between individual keepalive probes -;tcp_keepintvl = 0 - -;; How long may transmitted data remain unacknowledged before TCP -;; connection is closed (in milliseconds) -;tcp_user_timeout = 0 - -;; DNS lookup caching time -;dns_max_ttl = 15 - -;; DNS zone SOA lookup period -;dns_zone_check_period = 0 - -;; DNS negative result caching time -;dns_nxdomain_ttl = 15 - -;; Custom resolv.conf file, to set custom DNS servers or other options -;; (default: empty = use OS settings) -;resolv_conf = /etc/pgbouncer/resolv.conf - -;;; -;;; Random stuff -;;; - -;; Hackish security feature. Helps against SQL injection: when PQexec -;; is disabled, multi-statement cannot be made. -;disable_pqexec = 0 - -;; Config file to use for next RELOAD/SIGHUP -;; By default contains config file from command line. -;conffile - -;; Windows service name to register as. job_name is alias for -;; service_name, used by some Skytools scripts. -;service_name = pgbouncer -;job_name = pgbouncer - -;; Read additional config from other file -;%include /etc/pgbouncer/pgbouncer-other.ini - -%include /etc/pgbouncer-custom/generated-optimizations.ini -%include /etc/pgbouncer-custom/custom-overrides.ini -# %include /etc/pgbouncer-custom/ssl-config.ini diff --git a/docker/all-in-one/etc/pgbouncer/userlist.txt b/docker/all-in-one/etc/pgbouncer/userlist.txt deleted file mode 100644 index e69de29..0000000 diff --git a/docker/all-in-one/etc/postgresql-custom/custom-overrides.conf b/docker/all-in-one/etc/postgresql-custom/custom-overrides.conf deleted file mode 100644 index e69de29..0000000 diff --git a/docker/all-in-one/etc/postgresql-custom/generated-optimizations.conf b/docker/all-in-one/etc/postgresql-custom/generated-optimizations.conf deleted file mode 100644 index e69de29..0000000 diff --git a/docker/all-in-one/etc/postgresql-custom/postgresql-platform-defaults.conf b/docker/all-in-one/etc/postgresql-custom/postgresql-platform-defaults.conf deleted file mode 100644 index e62a1de..0000000 --- a/docker/all-in-one/etc/postgresql-custom/postgresql-platform-defaults.conf +++ /dev/null @@ -1,9 +0,0 @@ -# these get imported _after_ the user specified overrides -row_security = on -wal_level = logical -max_wal_senders = 10 -max_replication_slots = 5 -log_connections = on -statement_timeout = 120000 -jit = off -pgaudit.log = 'ddl' diff --git a/docker/all-in-one/etc/postgresql.schema.sql b/docker/all-in-one/etc/postgresql.schema.sql deleted file mode 100644 index ecffdf7..0000000 --- a/docker/all-in-one/etc/postgresql.schema.sql +++ /dev/null @@ -1,16 +0,0 @@ -\set admin_pass `echo "${tealbase_ADMIN_PASSWORD:-$POSTGRES_PASSWORD}"` -\set pgrst_pass `echo "${AUTHENTICATOR_PASSWORD:-$POSTGRES_PASSWORD}"` -\set pgbouncer_pass `echo "${PGBOUNCER_PASSWORD:-$POSTGRES_PASSWORD}"` -\set auth_pass `echo "${tealbase_AUTH_ADMIN_PASSWORD:-$POSTGRES_PASSWORD}"` -\set storage_pass `echo "${tealbase_STORAGE_ADMIN_PASSWORD:-$POSTGRES_PASSWORD}"` -\set replication_pass `echo "${tealbase_REPLICATION_ADMIN_PASSWORD:-$POSTGRES_PASSWORD}"` -\set read_only_pass `echo "${tealbase_READ_ONLY_USER_PASSWORD:-$POSTGRES_PASSWORD}"` - -ALTER USER tealbase_admin WITH PASSWORD :'admin_pass'; -ALTER USER authenticator WITH PASSWORD :'pgrst_pass'; -ALTER USER pgbouncer WITH PASSWORD :'pgbouncer_pass'; -ALTER USER tealbase_auth_admin WITH PASSWORD :'auth_pass'; -ALTER USER tealbase_storage_admin WITH PASSWORD :'storage_pass'; -ALTER USER tealbase_replication_admin WITH PASSWORD :'replication_pass'; -ALTER ROLE tealbase_read_only_user WITH PASSWORD :'read_only_pass'; -ALTER ROLE tealbase_admin SET search_path TO "$user",public,auth,extensions; diff --git a/docker/all-in-one/etc/postgresql/logging.conf b/docker/all-in-one/etc/postgresql/logging.conf deleted file mode 100644 index b8d64da..0000000 --- a/docker/all-in-one/etc/postgresql/logging.conf +++ /dev/null @@ -1,33 +0,0 @@ -# - Where to Log - - -log_destination = 'csvlog' # Valid values are combinations of - # stderr, csvlog, syslog, and eventlog, - # depending on platform. csvlog - # requires logging_collector to be on. - -# This is used when logging to stderr: -logging_collector = on # Enable capturing of stderr and csvlog - # into log files. Required to be on for - # csvlogs. - # (change requires restart) - -# These are only used if logging_collector is on: -log_directory = '/var/log/postgresql' # directory where log files are written, - # can be absolute or relative to PGDATA -log_filename = 'postgresql.log' # log file name pattern, - # can include strftime() escapes -log_file_mode = 0640 # creation mode for log files, - # begin with 0 to use octal notation -log_rotation_age = 0 # Automatic rotation of logfiles will - # happen after that time. 0 disables. -log_rotation_size = 0 # Automatic rotation of logfiles will - # happen after that much log output. - # 0 disables. -#log_truncate_on_rotation = off # If on, an existing log file with the - # same name as the new log file will be - # truncated rather than appended to. - # But such truncation only occurs on - # time-driven rotation, not on restarts - # or size-driven rotation. Default is - # off, meaning append to existing files - # in all cases. diff --git a/docker/all-in-one/etc/postgresql/pg_hba.conf b/docker/all-in-one/etc/postgresql/pg_hba.conf deleted file mode 100755 index 76bd2f0..0000000 --- a/docker/all-in-one/etc/postgresql/pg_hba.conf +++ /dev/null @@ -1,94 +0,0 @@ -# PostgreSQL Client Authentication Configuration File -# =================================================== -# -# Refer to the "Client Authentication" section in the PostgreSQL -# documentation for a complete description of this file. A short -# synopsis follows. -# -# This file controls: which hosts are allowed to connect, how clients -# are authenticated, which PostgreSQL user names they can use, which -# databases they can access. Records take one of these forms: -# -# local DATABASE USER METHOD [OPTIONS] -# host DATABASE USER ADDRESS METHOD [OPTIONS] -# hostssl DATABASE USER ADDRESS METHOD [OPTIONS] -# hostnossl DATABASE USER ADDRESS METHOD [OPTIONS] -# hostgssenc DATABASE USER ADDRESS METHOD [OPTIONS] -# hostnogssenc DATABASE USER ADDRESS METHOD [OPTIONS] -# -# (The uppercase items must be replaced by actual values.) -# -# The first field is the connection type: "local" is a Unix-domain -# socket, "host" is either a plain or SSL-encrypted TCP/IP socket, -# "hostssl" is an SSL-encrypted TCP/IP socket, and "hostnossl" is a -# non-SSL TCP/IP socket. Similarly, "hostgssenc" uses a -# GSSAPI-encrypted TCP/IP socket, while "hostnogssenc" uses a -# non-GSSAPI socket. -# -# DATABASE can be "all", "sameuser", "samerole", "replication", a -# database name, or a comma-separated list thereof. The "all" -# keyword does not match "replication". Access to replication -# must be enabled in a separate record (see example below). -# -# USER can be "all", a user name, a group name prefixed with "+", or a -# comma-separated list thereof. In both the DATABASE and USER fields -# you can also write a file name prefixed with "@" to include names -# from a separate file. -# -# ADDRESS specifies the set of hosts the record matches. It can be a -# host name, or it is made up of an IP address and a CIDR mask that is -# an integer (between 0 and 32 (IPv4) or 128 (IPv6) inclusive) that -# specifies the number of significant bits in the mask. A host name -# that starts with a dot (.) matches a suffix of the actual host name. -# Alternatively, you can write an IP address and netmask in separate -# columns to specify the set of hosts. Instead of a CIDR-address, you -# can write "samehost" to match any of the server's own IP addresses, -# or "samenet" to match any address in any subnet that the server is -# directly connected to. -# -# METHOD can be "trust", "reject", "md5", "password", "scram-sha-256", -# "gss", "sspi", "ident", "peer", "pam", "ldap", "radius" or "cert". -# Note that "password" sends passwords in clear text; "md5" or -# "scram-sha-256" are preferred since they send encrypted passwords. -# -# OPTIONS are a set of options for the authentication in the format -# NAME=VALUE. The available options depend on the different -# authentication methods -- refer to the "Client Authentication" -# section in the documentation for a list of which options are -# available for which authentication methods. -# -# Database and user names containing spaces, commas, quotes and other -# special characters must be quoted. Quoting one of the keywords -# "all", "sameuser", "samerole" or "replication" makes the name lose -# its special character, and just match a database or username with -# that name. -# -# This file is read on server startup and when the server receives a -# SIGHUP signal. If you edit the file on a running system, you have to -# SIGHUP the server for the changes to take effect, run "pg_ctl reload", -# or execute "SELECT pg_reload_conf()". -# -# Put your actual configuration here -# ---------------------------------- -# -# If you want to allow non-local connections, you need to add more -# "host" records. In that case you will also need to make PostgreSQL -# listen on a non-local interface via the listen_addresses -# configuration parameter, or via the -i or -h command line switches. - -# TYPE DATABASE USER ADDRESS METHOD - -# trust local connections -local all tealbase_admin scram-sha-256 -local all all peer map=tealbase_map -host all all 127.0.0.1/32 trust -host all all ::1/128 trust - -# IPv4 external connections -host all all 10.0.0.0/8 scram-sha-256 -host all all 172.16.0.0/12 scram-sha-256 -host all all 192.168.0.0/16 scram-sha-256 -host all all 0.0.0.0/0 scram-sha-256 - -# IPv6 external connections -host all all ::0/0 scram-sha-256 diff --git a/docker/all-in-one/etc/postgrest/base.conf b/docker/all-in-one/etc/postgrest/base.conf deleted file mode 100644 index e5120ed..0000000 --- a/docker/all-in-one/etc/postgrest/base.conf +++ /dev/null @@ -1,7 +0,0 @@ -server-port="pgrst_server_port" -admin-server-port="pgrst_admin_server_port" -db-schema="pgrst_db_schemas" -db-extra-search-path="pgrst_db_extra_search_path" -db-anon-role="pgrst_db_anon_role" -jwt-secret="pgrst_jwt_secret" -db-uri="postgres://authenticator@localhost:5432/postgres?application_name=postgrest" diff --git a/docker/all-in-one/etc/postgrest/bootstrap.sh b/docker/all-in-one/etc/postgrest/bootstrap.sh deleted file mode 100755 index 9ac21d2..0000000 --- a/docker/all-in-one/etc/postgrest/bootstrap.sh +++ /dev/null @@ -1,8 +0,0 @@ -#! /usr/bin/env bash -set -euo pipefail -set -x - -cd "$(dirname "$0")" -cat $@ > merged.conf - -/opt/postgrest merged.conf diff --git a/docker/all-in-one/etc/postgrest/generated.conf b/docker/all-in-one/etc/postgrest/generated.conf deleted file mode 100644 index e69de29..0000000 diff --git a/docker/all-in-one/etc/salt/minion b/docker/all-in-one/etc/salt/minion deleted file mode 100644 index 29d8406..0000000 --- a/docker/all-in-one/etc/salt/minion +++ /dev/null @@ -1,71 +0,0 @@ -# Minions can connect to multiple masters simultaneously (all masters -# are "hot"), or can be configured to failover if a master becomes -# unavailable. Multiple hot masters are configured by setting this -# value to "str". Failover masters can be requested by setting -# to "failover". MAKE SURE TO SET master_alive_interval if you are -# using failover. -# Setting master_type to 'disable' lets you have a running minion (with engines and -# beacons) without a master connection -master_type: disable - -# The minion can locally cache the return data from jobs sent to it, this -# can be a good way to keep track of jobs the minion has executed -# (on the minion side). By default this feature is disabled, to enable, set -# cache_jobs to True. -cache_jobs: True - -# The minion can take a while to start up when lspci and/or dmidecode is used -# to populate the grains for the minion. Set this to False if you do not need -# GPU hardware grains for your minion. -enable_gpu_grains: False - -# Backup files that are replaced by file.managed and file.recurse under -# 'cachedir'/file_backup relative to their original location and appended -# with a timestamp. The only valid setting is "minion". Disabled by default. -# -# Alternatively this can be specified for each file in state files: -# /etc/ssh/sshd_config: -# file.managed: -# - source: salt://ssh/sshd_config -# - backup: minion -# -backup_mode: minion - -##### File Directory Settings ##### -########################################## -# The Salt Minion can redirect all file server operations to a local directory, -# this allows for the same state tree that is on the master to be used if -# copied completely onto the minion. This is a literal copy of the settings on -# the master but used to reference a local directory on the minion. - -# Set the file client. The client defaults to looking on the master server for -# files, but can be directed to look at the local file directory setting -# defined below by setting it to "local". Setting a local file_client runs the -# minion in masterless mode. -file_client: local - -# The file directory works on environments passed to the minion, each environment -# can have multiple root directories, the subdirectories in the multiple file -# roots cannot match, otherwise the downloaded files will not be able to be -# reliably ensured. A base environment is required to house the top file. -# Example: -# file_roots: -# base: -# - /srv/salt/ -# dev: -# - /srv/salt/dev/services -# - /srv/salt/dev/states -# prod: -# - /srv/salt/prod/services -# - /srv/salt/prod/states -# -file_roots: - base: - - /data/salt/state - -# The Salt pillar is searched for locally if file_client is set to local. If -# this is the case, and pillar data is defined, then the pillar_roots need to -# also be configured on the minion: -pillar_roots: - base: - - /data/salt/pillar diff --git a/docker/all-in-one/etc/sudoers.d/adminapi b/docker/all-in-one/etc/sudoers.d/adminapi deleted file mode 100644 index e386014..0000000 --- a/docker/all-in-one/etc/sudoers.d/adminapi +++ /dev/null @@ -1,27 +0,0 @@ -Cmnd_Alias ENVOY = /usr/bin/supervisorctl start services\:envoy, /usr/bin/supervisorctl stop services\:envoy, /usr/bin/supervisorctl restart services\:envoy, /usr/bin/supervisorctl status services\:envoy -Cmnd_Alias KONG = /usr/bin/supervisorctl start services\:kong, /usr/bin/supervisorctl stop services\:kong, /usr/bin/supervisorctl restart services\:kong, /usr/bin/supervisorctl status services\:kong -Cmnd_Alias POSTGREST = /usr/bin/supervisorctl start services\:postgrest, /usr/bin/supervisorctl stop services\:postgrest, /usr/bin/supervisorctl restart services\:postgrest, /usr/bin/supervisorctl status services\:postgrest -Cmnd_Alias GOTRUE = /usr/bin/supervisorctl start services\:gotrue, /usr/bin/supervisorctl stop services\:gotrue, /usr/bin/supervisorctl restart services\:gotrue, /usr/bin/supervisorctl status services\:gotrue -Cmnd_Alias PGBOUNCER = /usr/bin/supervisorctl start pgbouncer, /usr/bin/supervisorctl stop pgbouncer, /usr/bin/supervisorctl restart pgbouncer, /usr/bin/supervisorctl status pgbouncer - -%adminapi ALL= NOPASSWD: /root/grow_fs.sh -%adminapi ALL= NOPASSWD: /root/manage_readonly_mode.sh -%adminapi ALL= NOPASSWD: /etc/adminapi/pg_upgrade_scripts/prepare.sh -%adminapi ALL= NOPASSWD: /etc/adminapi/pg_upgrade_scripts/initiate.sh -%adminapi ALL= NOPASSWD: /etc/adminapi/pg_upgrade_scripts/complete.sh -%adminapi ALL= NOPASSWD: /etc/adminapi/pg_upgrade_scripts/check.sh -%adminapi ALL= NOPASSWD: /etc/adminapi/pg_upgrade_scripts/common.sh -%adminapi ALL= NOPASSWD: /etc/adminapi/pg_upgrade_scripts/pgsodium_getkey.sh -%adminapi ALL= NOPASSWD: /usr/bin/supervisorctl reread -%adminapi ALL= NOPASSWD: /usr/bin/supervisorctl update -%adminapi ALL= NOPASSWD: /usr/bin/supervisorctl restart postgresql -%adminapi ALL= NOPASSWD: /usr/bin/supervisorctl status postgresql -%adminapi ALL= NOPASSWD: /usr/bin/supervisorctl restart adminapi -%adminapi ALL= NOPASSWD: /usr/bin/supervisorctl restart services\:* -%adminapi ALL= NOPASSWD: /usr/sbin/nft -f /etc/nftables/tealbase_managed.conf -%adminapi ALL= NOPASSWD: /usr/bin/admin-mgr -%adminapi ALL= NOPASSWD: ENVOY -%adminapi ALL= NOPASSWD: KONG -%adminapi ALL= NOPASSWD: POSTGREST -%adminapi ALL= NOPASSWD: GOTRUE -%adminapi ALL= NOPASSWD: PGBOUNCER diff --git a/docker/all-in-one/etc/supa-shutdown/shutdown.conf b/docker/all-in-one/etc/supa-shutdown/shutdown.conf deleted file mode 100644 index 384b935..0000000 --- a/docker/all-in-one/etc/supa-shutdown/shutdown.conf +++ /dev/null @@ -1 +0,0 @@ -SHUTDOWN_IDLE_TIME_MINUTES= diff --git a/docker/all-in-one/etc/supervisor/base-services/adminapi.conf b/docker/all-in-one/etc/supervisor/base-services/adminapi.conf deleted file mode 100644 index 2f5d0cd..0000000 --- a/docker/all-in-one/etc/supervisor/base-services/adminapi.conf +++ /dev/null @@ -1,10 +0,0 @@ -[program:adminapi] -command=/opt/tealbase-admin-api -user=adminapi -autorestart=true -autostart=true -startretries=1000 -stdout_logfile=/var/log/services/adminapi.log -redirect_stderr=true -stdout_logfile_maxbytes=10MB -priority=50 diff --git a/docker/all-in-one/etc/supervisor/base-services/logrotate.conf b/docker/all-in-one/etc/supervisor/base-services/logrotate.conf deleted file mode 100644 index a1ccea6..0000000 --- a/docker/all-in-one/etc/supervisor/base-services/logrotate.conf +++ /dev/null @@ -1,11 +0,0 @@ -[program:logrotate] -command=/usr/local/bin/run-logrotate.sh -autostart=true -autorestart=true -user=root -startretries=1000 -stdout_logfile=/var/log/services/logrotate.log -redirect_stderr=true -stdout_logfile_maxbytes=10MB -priority=50 -environment=DATA_VOLUME_MOUNTPOINT="%(ENV_DATA_VOLUME_MOUNTPOINT)s" diff --git a/docker/all-in-one/etc/supervisor/base-services/lsn-checkpoint-push.conf b/docker/all-in-one/etc/supervisor/base-services/lsn-checkpoint-push.conf deleted file mode 100644 index 36abcf8..0000000 --- a/docker/all-in-one/etc/supervisor/base-services/lsn-checkpoint-push.conf +++ /dev/null @@ -1,10 +0,0 @@ -[program:lsn-checkpoint-push] -command=/usr/bin/admin-mgr lsn-checkpoint-push --watch -user=root -autorestart=false -autostart=false -startretries=1000 -stdout_logfile=/var/log/services/lsn-push.log -redirect_stderr=true -stdout_logfile_maxbytes=10MB -priority=50 diff --git a/docker/all-in-one/etc/supervisor/base-services/pg_egress_collect.conf b/docker/all-in-one/etc/supervisor/base-services/pg_egress_collect.conf deleted file mode 100644 index d340a9c..0000000 --- a/docker/all-in-one/etc/supervisor/base-services/pg_egress_collect.conf +++ /dev/null @@ -1,10 +0,0 @@ -[program:pg_egress_collect] -command=/bin/bash -c "tcpdump -s 128 -Q out -nn -tt -vv -p -l 'tcp and (port 5432 or port 6543)' | perl /opt/pg_egress_collect/pg_egress_collect.pl" -user=root -autorestart=true -autostart=true -startretries=1000 -stdout_logfile=/var/log/services/pg_egress_collect.log -redirect_stderr=true -stdout_logfile_maxbytes=10MB -priority=50 diff --git a/docker/all-in-one/etc/supervisor/base-services/postgresql.conf b/docker/all-in-one/etc/supervisor/base-services/postgresql.conf deleted file mode 100644 index a8b5c5d..0000000 --- a/docker/all-in-one/etc/supervisor/base-services/postgresql.conf +++ /dev/null @@ -1,13 +0,0 @@ -[program:postgresql] -command=/usr/local/bin/postgres-entrypoint.sh postgres -D /etc/postgresql -user=postgres -stopsignal=INT -autorestart=true -autostart=true -startretries=1000 -priority=1 -# Inherit env vars from https://github.com/tealbase/postgres/blob/develop/Dockerfile#L800 -environment=POSTGRES_PASSWORD="%(ENV_POSTGRES_PASSWORD)s",POSTGRES_HOST="%(ENV_POSTGRES_HOST)s",HOME="/var/lib/postgresql" -stdout_logfile=/var/log/postgresql/init.log -redirect_stderr=true -stdout_logfile_maxbytes=10MB diff --git a/docker/all-in-one/etc/supervisor/base-services/supa-shutdown.conf b/docker/all-in-one/etc/supervisor/base-services/supa-shutdown.conf deleted file mode 100644 index 06b24a7..0000000 --- a/docker/all-in-one/etc/supervisor/base-services/supa-shutdown.conf +++ /dev/null @@ -1,11 +0,0 @@ -[program:supa-shutdown] -command=/usr/local/bin/supa-shutdown.sh -user=root -autorestart=true -autostart=false -startretries=1000 -stdout_logfile=/var/log/services/supa-shutdown.log -redirect_stderr=true -stdout_logfile_maxbytes=10MB -priority=50 -environment=MAX_IDLE_TIME_MINUTES="%(ENV_MAX_IDLE_TIME_MINUTES)s" diff --git a/docker/all-in-one/etc/supervisor/services/envoy.conf b/docker/all-in-one/etc/supervisor/services/envoy.conf deleted file mode 100644 index 2b33807..0000000 --- a/docker/all-in-one/etc/supervisor/services/envoy.conf +++ /dev/null @@ -1,10 +0,0 @@ -[program:envoy] -command=/opt/envoy-hot-restarter.py /opt/start-envoy.sh -user=envoy -autorestart=true -autostart=false -stopasgroup=true -startretries=1000 -stdout_logfile=/var/log/services/envoy.log -redirect_stderr=true -stdout_logfile_maxbytes=10MB diff --git a/docker/all-in-one/etc/supervisor/services/exporter.conf b/docker/all-in-one/etc/supervisor/services/exporter.conf deleted file mode 100644 index fbe53d7..0000000 --- a/docker/all-in-one/etc/supervisor/services/exporter.conf +++ /dev/null @@ -1,11 +0,0 @@ -[program:exporter] -command=/opt/postgres_exporter/postgres_exporter --disable-settings-metrics --extend.query-path=/opt/postgres_exporter/queries.yml --disable-default-metrics --no-collector.locks --no-collector.replication --no-collector.replication_slot --no-collector.stat_bgwriter --no-collector.stat_database --no-collector.stat_user_tables --no-collector.statio_user_tables --no-collector.wal -user=root -autorestart=true -autostart=true -startretries=1000 -environment=DATA_SOURCE_NAME="host=localhost dbname=postgres sslmode=disable user=tealbase_admin pg_stat_statements.track=none application_name=postgres_exporter" -stdout_logfile=/var/log/services/exporter.log -redirect_stderr=true -stdout_logfile_maxbytes=10MB -priority=150 diff --git a/docker/all-in-one/etc/supervisor/services/fail2ban.conf b/docker/all-in-one/etc/supervisor/services/fail2ban.conf deleted file mode 100644 index 8000386..0000000 --- a/docker/all-in-one/etc/supervisor/services/fail2ban.conf +++ /dev/null @@ -1,9 +0,0 @@ -[program:fail2ban] -command=/usr/bin/fail2ban-client -f start -user=root -autorestart=true -autostart=true -stdout_logfile=/var/log/services/fail2ban.log -redirect_stderr=true -stdout_logfile_maxbytes=10MB -priority=200 diff --git a/docker/all-in-one/etc/supervisor/services/gotrue.conf b/docker/all-in-one/etc/supervisor/services/gotrue.conf deleted file mode 100644 index 679057e..0000000 --- a/docker/all-in-one/etc/supervisor/services/gotrue.conf +++ /dev/null @@ -1,10 +0,0 @@ -[program:gotrue] -directory=/opt/gotrue -command=/opt/gotrue/gotrue --config /etc/gotrue.env -user=gotrue -startretries=1000 -autorestart=true -autostart=true -stdout_logfile=/var/log/services/gotrue.log -redirect_stderr=true -stdout_logfile_maxbytes=10MB diff --git a/docker/all-in-one/etc/supervisor/services/group.conf b/docker/all-in-one/etc/supervisor/services/group.conf deleted file mode 100644 index ef6673d..0000000 --- a/docker/all-in-one/etc/supervisor/services/group.conf +++ /dev/null @@ -1,3 +0,0 @@ -[group:services] -programs=gotrue,kong,postgrest -priority=100 diff --git a/docker/all-in-one/etc/supervisor/services/kong.conf b/docker/all-in-one/etc/supervisor/services/kong.conf deleted file mode 100644 index 04f5219..0000000 --- a/docker/all-in-one/etc/supervisor/services/kong.conf +++ /dev/null @@ -1,11 +0,0 @@ -[program:kong] -command=/init/start-kong.sh -user=kong -autorestart=true -autostart=true -stopasgroup=true -startretries=1000 -environment=KONG_NGINX_DAEMON="off" -stdout_logfile=/var/log/services/kong.log -redirect_stderr=true -stdout_logfile_maxbytes=10MB diff --git a/docker/all-in-one/etc/supervisor/services/pgbouncer.conf b/docker/all-in-one/etc/supervisor/services/pgbouncer.conf deleted file mode 100644 index 6926c34..0000000 --- a/docker/all-in-one/etc/supervisor/services/pgbouncer.conf +++ /dev/null @@ -1,10 +0,0 @@ -[program:pgbouncer] -command=/usr/sbin/pgbouncer /etc/pgbouncer/pgbouncer.ini -user=pgbouncer -stopsignal=INT -autorestart=false -autostart=false -stdout_logfile=/var/log/services/pgbouncer.log -redirect_stderr=true -stdout_logfile_maxbytes=10MB -priority=150 diff --git a/docker/all-in-one/etc/supervisor/services/postgrest.conf b/docker/all-in-one/etc/supervisor/services/postgrest.conf deleted file mode 100644 index ad43b52..0000000 --- a/docker/all-in-one/etc/supervisor/services/postgrest.conf +++ /dev/null @@ -1,10 +0,0 @@ -[program:postgrest] -command=/etc/postgrest/bootstrap.sh /etc/postgrest/generated.conf /etc/postgrest/base.conf -user=postgrest -autorestart=true -autostart=true -stopasgroup=true -startretries=1000 -stdout_logfile=/var/log/services/postgrest.log -redirect_stderr=true -stdout_logfile_maxbytes=10MB diff --git a/docker/all-in-one/etc/supervisor/supervisord.conf b/docker/all-in-one/etc/supervisor/supervisord.conf deleted file mode 100644 index d64f40f..0000000 --- a/docker/all-in-one/etc/supervisor/supervisord.conf +++ /dev/null @@ -1,170 +0,0 @@ -; Sample supervisor config file. -; -; For more information on the config file, please see: -; http://supervisord.org/configuration.html -; -; Notes: -; - Shell expansion ("~" or "$HOME") is not supported. Environment -; variables can be expanded using this syntax: "%(ENV_HOME)s". -; - Quotes around values are not supported, except in the case of -; the environment= options as shown below. -; - Comments must have a leading space: "a=b ;comment" not "a=b;comment". -; - Command will be truncated if it looks like a config file comment, e.g. -; "command=bash -c 'foo ; bar'" will truncate to "command=bash -c 'foo ". -; -; Warning: -; Paths throughout this example file use /tmp because it is available on most -; systems. You will likely need to change these to locations more appropriate -; for your system. Some systems periodically delete older files in /tmp. -; Notably, if the socket file defined in the [unix_http_server] section below -; is deleted, supervisorctl will be unable to connect to supervisord. - -[unix_http_server] -file=/tmp/supervisor.sock ; the path to the socket file -chmod=0760 ; socket file mode (default 0700) -chown=root:root ; socket file uid:gid owner -;username=user ; default is no username (open server) -;password=123 ; default is no password (open server) - -; Security Warning: -; The inet HTTP server is not enabled by default. The inet HTTP server is -; enabled by uncommenting the [inet_http_server] section below. The inet -; HTTP server is intended for use within a trusted environment only. It -; should only be bound to localhost or only accessible from within an -; isolated, trusted network. The inet HTTP server does not support any -; form of encryption. The inet HTTP server does not use authentication -; by default (see the username= and password= options to add authentication). -; Never expose the inet HTTP server to the public internet. - -;[inet_http_server] ; inet (TCP) server disabled by default -;port=127.0.0.1:9001 ; ip_address:port specifier, *:port for all iface -;username=user ; default is no username (open server) -;password=123 ; default is no password (open server) - -[supervisord] -logfile=/tmp/supervisord.log ; main log file; default $CWD/supervisord.log -logfile_maxbytes=50MB ; max main logfile bytes b4 rotation; default 50MB -logfile_backups=10 ; # of main logfile backups; 0 means none, default 10 -loglevel=info ; log level; default info; others: debug,warn,trace -pidfile=/tmp/supervisord.pid ; supervisord pidfile; default supervisord.pid -nodaemon=true ; start in foreground if true; default false -silent=false ; no logs to stdout if true; default false -minfds=1024 ; min. avail startup file descriptors; default 1024 -minprocs=200 ; min. avail process descriptors;default 200 -user=root ; setuid to this UNIX account at startup; recommended if root -;umask=022 ; process file creation umask; default 022 -;identifier=supervisor ; supervisord identifier, default is 'supervisor' -;directory=/tmp ; default is not to cd during start -;nocleanup=true ; don't clean up tempfiles at start; default false -;childlogdir=/tmp ; 'AUTO' child log dir, default $TEMP -;environment=KEY="value" ; key value pairs to add to environment -;strip_ansi=false ; strip ansi escape codes in logs; def. false - -; The rpcinterface:supervisor section must remain in the config file for -; RPC (supervisorctl/web interface) to work. Additional interfaces may be -; added by defining them in separate [rpcinterface:x] sections. - -[rpcinterface:supervisor] -supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface - -; The supervisorctl section configures how supervisorctl will connect to -; supervisord. configure it match the settings in either the unix_http_server -; or inet_http_server section. - -[supervisorctl] -serverurl=unix:///tmp/supervisor.sock ; use a unix:// URL for a unix socket -;serverurl=http://127.0.0.1:9001 ; use an http:// url to specify an inet socket -;username=chris ; should be same as in [*_http_server] if set -;password=123 ; should be same as in [*_http_server] if set -;prompt=mysupervisor ; cmd line prompt (default "supervisor") -;history_file=~/.sc_history ; use readline history if available - -; The sample program section below shows all possible program subsection values. -; Create one or more 'real' program: sections to be able to control them under -; supervisor. - -;[program:theprogramname] -;command=/bin/cat ; the program (relative uses PATH, can take args) -;process_name=%(program_name)s ; process_name expr (default %(program_name)s) -;numprocs=1 ; number of processes copies to start (def 1) -;directory=/tmp ; directory to cwd to before exec (def no cwd) -;umask=022 ; umask for process (default None) -;priority=999 ; the relative start priority (default 999) -;autostart=true ; start at supervisord start (default: true) -;startsecs=1 ; # of secs prog must stay up to be running (def. 1) -;startretries=3 ; max # of serial start failures when starting (default 3) -;autorestart=unexpected ; when to restart if exited after running (def: unexpected) -;exitcodes=0 ; 'expected' exit codes used with autorestart (default 0) -;stopsignal=QUIT ; signal used to kill process (default TERM) -;stopwaitsecs=10 ; max num secs to wait b4 SIGKILL (default 10) -;stopasgroup=false ; send stop signal to the UNIX process group (default false) -;killasgroup=false ; SIGKILL the UNIX process group (def false) -;user=chrism ; setuid to this UNIX account to run the program -;redirect_stderr=true ; redirect proc stderr to stdout (default false) -;stdout_logfile=/a/path ; stdout log path, NONE for none; default AUTO -;stdout_logfile_maxbytes=1MB ; max # logfile bytes b4 rotation (default 50MB) -;stdout_logfile_backups=10 ; # of stdout logfile backups (0 means none, default 10) -;stdout_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0) -;stdout_events_enabled=false ; emit events on stdout writes (default false) -;stdout_syslog=false ; send stdout to syslog with process name (default false) -;stderr_logfile=/a/path ; stderr log path, NONE for none; default AUTO -;stderr_logfile_maxbytes=1MB ; max # logfile bytes b4 rotation (default 50MB) -;stderr_logfile_backups=10 ; # of stderr logfile backups (0 means none, default 10) -;stderr_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0) -;stderr_events_enabled=false ; emit events on stderr writes (default false) -;stderr_syslog=false ; send stderr to syslog with process name (default false) -;environment=A="1",B="2" ; process environment additions (def no adds) -;serverurl=AUTO ; override serverurl computation (childutils) - -; The sample eventlistener section below shows all possible eventlistener -; subsection values. Create one or more 'real' eventlistener: sections to be -; able to handle event notifications sent by supervisord. - -;[eventlistener:theeventlistenername] -;command=/bin/eventlistener ; the program (relative uses PATH, can take args) -;process_name=%(program_name)s ; process_name expr (default %(program_name)s) -;numprocs=1 ; number of processes copies to start (def 1) -;events=EVENT ; event notif. types to subscribe to (req'd) -;buffer_size=10 ; event buffer queue size (default 10) -;directory=/tmp ; directory to cwd to before exec (def no cwd) -;umask=022 ; umask for process (default None) -;priority=-1 ; the relative start priority (default -1) -;autostart=true ; start at supervisord start (default: true) -;startsecs=1 ; # of secs prog must stay up to be running (def. 1) -;startretries=3 ; max # of serial start failures when starting (default 3) -;autorestart=unexpected ; autorestart if exited after running (def: unexpected) -;exitcodes=0 ; 'expected' exit codes used with autorestart (default 0) -;stopsignal=QUIT ; signal used to kill process (default TERM) -;stopwaitsecs=10 ; max num secs to wait b4 SIGKILL (default 10) -;stopasgroup=false ; send stop signal to the UNIX process group (default false) -;killasgroup=false ; SIGKILL the UNIX process group (def false) -;user=chrism ; setuid to this UNIX account to run the program -;redirect_stderr=false ; redirect_stderr=true is not allowed for eventlisteners -;stdout_logfile=/a/path ; stdout log path, NONE for none; default AUTO -;stdout_logfile_maxbytes=1MB ; max # logfile bytes b4 rotation (default 50MB) -;stdout_logfile_backups=10 ; # of stdout logfile backups (0 means none, default 10) -;stdout_events_enabled=false ; emit events on stdout writes (default false) -;stdout_syslog=false ; send stdout to syslog with process name (default false) -;stderr_logfile=/a/path ; stderr log path, NONE for none; default AUTO -;stderr_logfile_maxbytes=1MB ; max # logfile bytes b4 rotation (default 50MB) -;stderr_logfile_backups=10 ; # of stderr logfile backups (0 means none, default 10) -;stderr_events_enabled=false ; emit events on stderr writes (default false) -;stderr_syslog=false ; send stderr to syslog with process name (default false) -;environment=A="1",B="2" ; process environment additions -;serverurl=AUTO ; override serverurl computation (childutils) - -; The sample group section below shows all possible group values. Create one -; or more 'real' group: sections to create "heterogeneous" process groups. - -;[group:thegroupname] -;programs=progname1,progname2 ; each refers to 'x' in [program:x] definitions -;priority=999 ; the relative start priority (default 999) - -; The [include] section can just contain the "files" setting. This -; setting can list multiple files (separated by whitespace or -; newlines). It can also contain wildcards. The filenames are -; interpreted as relative to this file. Included files *cannot* -; include files themselves. - -[include] -files = base-services/*.conf diff --git a/docker/all-in-one/etc/tmpfiles.d/pgbouncer.conf b/docker/all-in-one/etc/tmpfiles.d/pgbouncer.conf deleted file mode 100644 index d5d2cd4..0000000 --- a/docker/all-in-one/etc/tmpfiles.d/pgbouncer.conf +++ /dev/null @@ -1,2 +0,0 @@ -# Directory for PostgreSQL sockets, lockfiles and stats tempfiles -d /run/pgbouncer 2775 pgbouncer postgres - - \ No newline at end of file diff --git a/docker/all-in-one/etc/vector/vector.yaml b/docker/all-in-one/etc/vector/vector.yaml deleted file mode 100644 index 8bcf867..0000000 --- a/docker/all-in-one/etc/vector/vector.yaml +++ /dev/null @@ -1,306 +0,0 @@ -data_dir: /var/lib/vector -sources: - gotrue_log: - type: file - include: - - /var/log/services/gotrue.log - - postgrest_log: - type: file - include: - - /var/log/services/postgrest.log - - pgbouncer_log: - type: file - include: - - /var/log/services/pgbouncer.log - - pitr_log: - type: file - include: - - /var/log/wal-g/pitr.log - read_from: end - - postgres_log: - type: file - include: - - /var/log/postgresql/postgres*.csv - read_from: end - multiline: - start_pattern: '^20[0-9][0-9]-[0-1][0-9]-[0-3][0-9] [0-2][0-9]:[0-5][0-9]:[0-5][0-9].[0-9]{3} UTC,"' - mode: halt_before - condition_pattern: '^20[0-9][0-9]-[0-1][0-9]-[0-3][0-9] [0-2][0-9]:[0-5][0-9]:[0-5][0-9].[0-9]{3} UTC,"' - timeout_ms: 500 - -transforms: - csv_parse: - type: remap - inputs: - - postgres_log - source: |- - csv_data = parse_csv!(.message) - .metadata.parsed.timestamp = csv_data[0] - .metadata.parsed.user_name = csv_data[1] - .metadata.parsed.database_name = csv_data[2] - .metadata.parsed.process_id = to_int(csv_data[3]) ?? null - .metadata.parsed.connection_from = csv_data[4] - .metadata.parsed.session_id = csv_data[5] - .metadata.parsed.session_line_num = to_int(csv_data[6]) ?? null - .metadata.parsed.command_tag = csv_data[7] - .metadata.parsed.session_start_time = csv_data[8] - .metadata.parsed.virtual_transaction_id = csv_data[9] - .metadata.parsed.transaction_id = to_int(csv_data[10]) ?? null - .metadata.parsed.error_severity = csv_data[11] - .metadata.parsed.sql_state_code = csv_data[12] - .metadata.parsed.message = csv_data[13] - .metadata.parsed.detail = csv_data[14] - .metadata.parsed.hint = csv_data[15] - .metadata.parsed.internal_query = csv_data[16] - .metadata.parsed.internal_query_pos = to_int(csv_data[17]) ?? null - .metadata.parsed.context = csv_data[18] - .metadata.parsed.query = csv_data[19] - .metadata.parsed.query_pos = to_int(csv_data[20]) ?? null - .metadata.parsed.location = csv_data[21] - .metadata.parsed.application_name = csv_data[22] - .metadata.parsed.backend_type = csv_data[23] - .metadata.parsed.leader_pid = to_int(csv_data[24]) ?? null - .metadata.parsed.query_id = to_int(csv_data[25]) ?? null - - z_ts = replace!(.metadata.parsed.timestamp, " UTC", "Z") - iso8601_ts = replace(z_ts, " ", "T") - - .timestamp = iso8601_ts - - # Sends original csv log line duplicating data. Used for QA. - # .metadata.parsed_from = .message - - .message = del(.metadata.parsed.message) - .metadata.host = del(.host) - del(.file) - del(.source_type) - - drop_metrics: - type: filter - inputs: - - csv_parse - condition: > - .metadata.parsed.application_name != "postgres_exporter" && .metadata.parsed.application_name != "realtime_rls" && !contains!(.message, "disconnection: session time") - - add_project_ref: - type: add_fields - inputs: - - drop_metrics - fields: - project: {{ .ProjectRef }} - - auth_failures: - type: filter - inputs: - - postgres_log - condition: >- - contains!(.message, "password authentication failed for user") - - filter_pgbouncer_stats: - type: filter - inputs: - - pgbouncer_log - condition: >- - !starts_with!(.message, "stats:") && !starts_with!(.message, "kernel file descriptor limit") && !contains!(.message, "FIXME") - - filter_postgrest_stats: - type: filter - inputs: - - postgrest_log - condition: >- - !starts_with!(.message, "+") && !starts_with!(.message, "INFO:") && !contains!(.message, "Admin server listening") - - gotrue_to_object: - inputs: - - gotrue_log - type: remap - source: |2- - .project = "{{ .ProjectRef }}" - - .parsed, err = parse_json(.message) - if err == null { - .metadata = .parsed - .metadata.msg = .parsed.msg - .timestamp = del(.metadata.time) - } - del(.parsed) - .metadata.host = del(.host) - - del(.source_type) - del(.PRIORITY) - del(.SYSLOG_FACILITY) - del(.SYSLOG_IDENTIFIER) - del(._BOOT_ID) - del(._CAP_EFFECTIVE) - del(._CMDLINE) - del(._COMM) - del(._EXE) - del(._GID) - del(._MACHINE_ID) - del(._PID) - del(._SELINUX_CONTEXT) - del(._STREAM_ID) - del(._SYSTEMD_CGROUP) - del(._SYSTEMD_INVOCATION_ID) - del(._SYSTEMD_SLICE) - del(._SYSTEMD_UNIT) - del(._TRANSPORT) - del(._UID) - del(.__MONOTONIC_TIMESTAMP) - del(.__REALTIME_TIMESTAMP) - - postgrest_to_object: - inputs: - - filter_postgrest_stats - type: remap - source: |2- - .project = "{{ .ProjectRef }}" - - # removes timestamp embedded in log since Vector already sends it - .message = replace!(.message, r'^\d+/\w+/\d+:\d+:\d+:\d+\s\+\d+:\s', "") - .metadata.host = del(.host) - del(.source_type) - del(.PRIORITY) - del(.SYSLOG_FACILITY) - del(.SYSLOG_IDENTIFIER) - del(._BOOT_ID) - del(._CAP_EFFECTIVE) - del(._CMDLINE) - del(._COMM) - del(._EXE) - del(._GID) - del(._MACHINE_ID) - del(._PID) - del(._SELINUX_CONTEXT) - del(._STREAM_ID) - del(._SYSTEMD_CGROUP) - del(._SYSTEMD_INVOCATION_ID) - del(._SYSTEMD_SLICE) - del(._SYSTEMD_UNIT) - del(._TRANSPORT) - del(._UID) - del(.__MONOTONIC_TIMESTAMP) - del(.__REALTIME_TIMESTAMP) - - pgbouncer_to_object: - inputs: - - filter_pgbouncer_stats - type: remap - source: |2- - .project = "{{ .ProjectRef }}" - .metadata.host = del(.host) - del(.source_type) - del(.PRIORITY) - del(.SYSLOG_IDENTIFIER) - del(._BOOT_ID) - del(._CAP_EFFECTIVE) - del(._CMDLINE) - del(._COMM) - del(._EXE) - del(._GID) - del(._MACHINE_ID) - del(._PID) - del(._SELINUX_CONTEXT) - del(._SOURCE_REALTIME_TIMESTAMP) - del(._SYSTEMD_CGROUP) - del(._SYSTEMD_INVOCATION_ID) - del(._SYSTEMD_SLICE) - del(._SYSTEMD_UNIT) - del(._TRANSPORT) - del(._UID) - del(.__MONOTONIC_TIMESTAMP) - del(.__REALTIME_TIMESTAMP) - - pitr_to_object: - inputs: - - pitr_log - type: remap - source: |2- - .project = "{{ .ProjectRef }}" - - .parsed, err = parse_key_value(.message) - if err == null { - .metadata = .parsed - .metadata.host = del(.host) - .message = del(.metadata.msg) - .timestamp = del(.metadata.time) - } - - del(.parsed) - del(.source_type) - del(.file) - - filter_pitr_error: - inputs: - - pitr_to_object - type: filter - condition: > - .metadata.level != "info" - -sinks: - http_gotrue: - type: "http" - inputs: - - gotrue_to_object - encoding: - codec: "json" - method: "post" - compression: none - request: - retry_max_duration_secs: 10 - uri: "https://{{ .LogflareHost }}/logs?api_key={{ .ApiKey }}&source={{ .GotrueSource }}" - - http_postgrest: - type: http - inputs: - - postgrest_to_object - encoding: - codec: "json" - method: "post" - compression: none - request: - retry_max_duration_secs: 10 - uri: "https://{{ .LogflareHost }}/logs?api_key={{ .ApiKey }}&source={{ .PostgrestSource }}" - - http_pgbouncer: - type: http - inputs: - - pgbouncer_to_object - encoding: - codec: json - compression: none - uri: "https://{{ .LogflareHost }}/logs?api_key={{ .ApiKey }}&source={{ .PgbouncerSource }}" - - http_pitr_error: - type: http - inputs: - - filter_pitr_error - encoding: - codec: json - compression: none - uri: "https://{{ .LogflareHost }}/logs?api_key={{ .ApiKey }}&source={{ .PitrErrorsSource }}" - - http_postgres: - type: http - inputs: - - add_project_ref - encoding: - codec: "json" - method: "post" - compression: none - request: - retry_max_duration_secs: 10 - uri: "https://{{ .LogflareHost }}/logs?api_key={{ .ApiKey }}&source={{ .DbSource }}" - - file_postgres: - type: file - inputs: - - auth_failures - encoding: - codec: text - path: >- - /var/log/postgresql/auth-failures.csv diff --git a/docker/all-in-one/healthcheck.sh b/docker/all-in-one/healthcheck.sh deleted file mode 100755 index 030c6d5..0000000 --- a/docker/all-in-one/healthcheck.sh +++ /dev/null @@ -1,46 +0,0 @@ -#!/bin/bash -set -eou pipefail - -# database up -pg_isready -U postgres -h localhost -p 5432 - -if [ -f "/tmp/init.json" ]; then - ADMIN_API_KEY=${ADMIN_API_KEY:-$(jq -r '.["tealbase_admin_key"]' /tmp/init.json)} -fi - -# adminapi up -if [ -d "$ADMIN_API_CERT_DIR" ]; then - curl -sSkf "https://localhost:$ADMIN_API_PORT/health" -H "apikey: $ADMIN_API_KEY" -else - curl -sSf "http://localhost:$ADMIN_API_PORT/health" -H "apikey: $ADMIN_API_KEY" -fi - -if [ "${POSTGRES_ONLY:-}" ]; then - exit 0 -fi - -# postgrest up -curl -sSfI "http://localhost:$PGRST_ADMIN_SERVER_PORT/ready" - -# gotrue up -curl -sSf "http://localhost:$GOTRUE_API_PORT/health" - -if [ "${ENVOY_ENABLED:-}" == "true" ]; then - # envoy up - curl -sSfI "http://localhost:$ENVOY_HTTP_PORT/health" -else - # kong up - kong health -fi - -# fail2ban up -fail2ban-client status - -# prometheus exporter up -curl -sSfI "http://localhost:$PGEXPORTER_PORT/metrics" - -# vector is up (if starting logflare) -# TODO: make this non-conditional once we set up local logflare for testinfra -if [ -n "${LOGFLARE_API_KEY:-}" ]; then - curl -sSfI "http://localhost:$VECTOR_API_PORT/health" -fi diff --git a/docker/all-in-one/init/configure-admin-mgr.sh b/docker/all-in-one/init/configure-admin-mgr.sh deleted file mode 100755 index 98ebf6c..0000000 --- a/docker/all-in-one/init/configure-admin-mgr.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash -set -eou pipefail - -touch "/var/log/wal-g/pitr.log" -chown postgres:postgres "/var/log/wal-g/pitr.log" -chmod 0666 "/var/log/wal-g/pitr.log" - -/usr/local/bin/configure-shim.sh /dist/admin-mgr /usr/bin/admin-mgr diff --git a/docker/all-in-one/init/configure-adminapi.sh b/docker/all-in-one/init/configure-adminapi.sh deleted file mode 100755 index 3c82898..0000000 --- a/docker/all-in-one/init/configure-adminapi.sh +++ /dev/null @@ -1,56 +0,0 @@ -#!/bin/bash -set -eou pipefail - -ADMIN_API_CONF=/etc/adminapi/adminapi.yaml -touch /var/log/services/adminapi.log - -ADMINAPI_CUSTOM_DIR="${DATA_VOLUME_MOUNTPOINT}/etc/adminapi" - -/usr/local/bin/configure-shim.sh /dist/tealbase-admin-api /opt/tealbase-admin-api - -if [ -f "${INIT_PAYLOAD_PATH:-}" ]; then - echo "init adminapi payload" - tar -xzvf "$INIT_PAYLOAD_PATH" -C / ./etc/adminapi/adminapi.yaml - chown adminapi:adminapi ./etc/adminapi/adminapi.yaml - - mkdir -p $ADMIN_API_CERT_DIR - tar -xzvf "$INIT_PAYLOAD_PATH" -C $ADMIN_API_CERT_DIR --strip-components 2 ./ssl/server.crt - tar -xzvf "$INIT_PAYLOAD_PATH" -C $ADMIN_API_CERT_DIR --strip-components 2 ./ssl/server.key - chown -R adminapi:root $ADMIN_API_CERT_DIR - chmod 700 -R $ADMIN_API_CERT_DIR -else - PROJECT_REF=${PROJECT_REF:-default} - PGBOUNCER_PASSWORD=${PGBOUNCER_PASSWORD:-$POSTGRES_PASSWORD} - tealbase_URL=${tealbase_URL:-https://api.tealbase.io/system} - REPORTING_TOKEN=${REPORTING_TOKEN:-token} - - sed -i "s|{{ .JwtSecret }}|$JWT_SECRET|g" $ADMIN_API_CONF - sed -i "s|{{ .PgbouncerPassword }}|$PGBOUNCER_PASSWORD|g" $ADMIN_API_CONF - sed -i "s|{{ .ProjectRef }}|$PROJECT_REF|g" $ADMIN_API_CONF - sed -i "s|{{ .tealbaseUrl }}|$tealbase_URL|g" $ADMIN_API_CONF - sed -i "s|{{ .ReportingToken }}|$REPORTING_TOKEN|g" $ADMIN_API_CONF -fi - -# Allow adminapi to write to /etc and manage Postgres configs -chmod g+w /etc -chmod -R 0775 /etc/postgresql -chmod -R 0775 /etc/postgresql-custom -chmod -R 0775 /etc/pgbouncer-custom - -# Update api port -sed -i "s|^port: .*$|port: ${ADMIN_API_PORT:-8085}|g" $ADMIN_API_CONF - -if [ "${DATA_VOLUME_MOUNTPOINT}" ]; then - mkdir -p "${ADMINAPI_CUSTOM_DIR}" - if [ ! -f "${CONFIGURED_FLAG_PATH}" ]; then - echo "Copying existing custom adminapi config from /etc/adminapi to ${ADMINAPI_CUSTOM_DIR}" - cp -R "/etc/adminapi/." "${ADMINAPI_CUSTOM_DIR}/" - fi - - rm -rf "/etc/adminapi" - ln -s "${ADMINAPI_CUSTOM_DIR}" "/etc/adminapi" - chown -R adminapi:adminapi "/etc/adminapi" - - chown -R adminapi:adminapi "${ADMINAPI_CUSTOM_DIR}" - chmod g+wrx "${ADMINAPI_CUSTOM_DIR}" -fi diff --git a/docker/all-in-one/init/configure-autoshutdown.sh b/docker/all-in-one/init/configure-autoshutdown.sh deleted file mode 100755 index 66343e5..0000000 --- a/docker/all-in-one/init/configure-autoshutdown.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash -set -eou pipefail - -mkdir -p /etc/supa-shutdown - -AUTOSHUTDOWN_CUSTOM_DIR="${DATA_VOLUME_MOUNTPOINT}/etc/supa-shutdown" -if [ "${DATA_VOLUME_MOUNTPOINT}" ]; then - mkdir -p "${AUTOSHUTDOWN_CUSTOM_DIR}" - - AUTOSHUTDOWN_CUSTOM_CONFIG_FILE_PATH="${AUTOSHUTDOWN_CUSTOM_DIR}/shutdown.conf" - if [ ! -f "${AUTOSHUTDOWN_CUSTOM_CONFIG_FILE_PATH}" ]; then - echo "Copying existing custom shutdown config from /etc/supa-shutdown to ${AUTOSHUTDOWN_CUSTOM_CONFIG_FILE_PATH}" - cp "/etc/supa-shutdown/shutdown.conf" "${AUTOSHUTDOWN_CUSTOM_CONFIG_FILE_PATH}" - fi - - rm -f "/etc/supa-shutdown/shutdown.conf" - ln -s "${AUTOSHUTDOWN_CUSTOM_CONFIG_FILE_PATH}" "/etc/supa-shutdown/shutdown.conf" - chmod g+wrx "${AUTOSHUTDOWN_CUSTOM_DIR}" - chown -R adminapi:adminapi "/etc/supa-shutdown/shutdown.conf" - chown -R adminapi:adminapi "${AUTOSHUTDOWN_CUSTOM_CONFIG_FILE_PATH}" -fi diff --git a/docker/all-in-one/init/configure-envoy.sh b/docker/all-in-one/init/configure-envoy.sh deleted file mode 100755 index 06dbafc..0000000 --- a/docker/all-in-one/init/configure-envoy.sh +++ /dev/null @@ -1,53 +0,0 @@ -#!/bin/bash -set -eou pipefail - -if [[ "${ENVOY_ENABLED:-}" != "true" ]]; then - exit -fi - -ENVOY_CDS_CONF=/etc/envoy/cds.yaml -ENVOY_LDS_CONF=/etc/envoy/lds.yaml -touch /var/log/services/envoy.log - -/usr/local/bin/configure-shim.sh /dist/envoy /opt/envoy - -if [[ -n "${DATA_VOLUME_MOUNTPOINT}" ]]; then - ENVOY_CUSTOM_DIR="${DATA_VOLUME_MOUNTPOINT}/etc/envoy" - mkdir -p "${ENVOY_CUSTOM_DIR}" - if [[ ! -f "${CONFIGURED_FLAG_PATH}" ]]; then - echo "Copying existing custom envoy config from /etc/envoy/ to ${ENVOY_CUSTOM_DIR}" - cp -R "/etc/envoy/." "${ENVOY_CUSTOM_DIR}/" - fi - - rm -rf "/etc/envoy" - ln -s "${ENVOY_CUSTOM_DIR}" "/etc/envoy" - chown -R envoy:envoy "/etc/envoy" - chmod -R g+w "/etc/envoy" - - chown -R envoy:envoy "${ENVOY_CUSTOM_DIR}" - chmod -R g+w "${ENVOY_CUSTOM_DIR}" - chmod g+rx "${ENVOY_CUSTOM_DIR}" -fi - -if [[ -f "${INIT_PAYLOAD_PATH:-}" ]]; then - echo "init envoy payload" - tar -xzvhf "${INIT_PAYLOAD_PATH}" -C / ./etc/envoy/ - chown -HR envoy:envoy /etc/envoy - chmod -HR g+w /etc/envoy -fi - -# Inject project specific configuration -# "c2VydmljZV9yb2xlOnNlcnZpY2Vfa2V5" is base64-encoded "service_role:service_key". -sed -i -e "s|anon_key|${ANON_KEY}|g" \ - -e "s|service_key|${SERVICE_ROLE_KEY}|g" \ - -e "s|tealbase_admin_key|${ADMIN_API_KEY}|g" \ - -e "s|c2VydmljZV9yb2xlOnNlcnZpY2Vfa2V5|$(echo -n "service_role:${SERVICE_ROLE_KEY}" | base64 --wrap 0)|g" \ - "${ENVOY_LDS_CONF}" - -# Update Envoy ports -sed -i "s|port_value: 80$|port_value: ${ENVOY_HTTP_PORT}|g" "${ENVOY_LDS_CONF}" -sed -i "s|port_value: 443$|port_value: ${ENVOY_HTTPS_PORT}|g" "${ENVOY_LDS_CONF}" -sed -i "s|port_value: 3000$|port_value: ${PGRST_SERVER_PORT}|g" "${ENVOY_CDS_CONF}" -sed -i "s|port_value: 3001$|port_value: ${PGRST_ADMIN_SERVER_PORT}|g" "${ENVOY_CDS_CONF}" -sed -i "s|port_value: 8085$|port_value: ${ADMIN_API_PORT}|g" "${ENVOY_CDS_CONF}" -sed -i "s|port_value: 9999$|port_value: ${GOTRUE_API_PORT}|g" "${ENVOY_CDS_CONF}" diff --git a/docker/all-in-one/init/configure-exporter.sh b/docker/all-in-one/init/configure-exporter.sh deleted file mode 100755 index 93498c4..0000000 --- a/docker/all-in-one/init/configure-exporter.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/bash -set -eou pipefail - -touch /var/log/services/exporter.log - diff --git a/docker/all-in-one/init/configure-fail2ban.sh b/docker/all-in-one/init/configure-fail2ban.sh deleted file mode 100755 index 39b0a27..0000000 --- a/docker/all-in-one/init/configure-fail2ban.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash -set -eou pipefail - -mkdir -p /var/run/fail2ban -touch /var/log/services/fail2ban.log -touch /var/log/postgresql/auth-failures.csv diff --git a/docker/all-in-one/init/configure-gotrue.sh b/docker/all-in-one/init/configure-gotrue.sh deleted file mode 100755 index 5fe4ad2..0000000 --- a/docker/all-in-one/init/configure-gotrue.sh +++ /dev/null @@ -1,40 +0,0 @@ -#!/bin/bash -set -eou pipefail - -touch /var/log/services/gotrue.log - -GOTRUE_CUSTOM_DIR="${DATA_VOLUME_MOUNTPOINT}/etc/gotrue" -GOTRUE_CUSTOM_CONFIG_FILE_PATH="${DATA_VOLUME_MOUNTPOINT}/etc/gotrue/gotrue.env" - -/usr/local/bin/configure-shim.sh /dist/gotrue /opt/gotrue/gotrue - -if [ "${DATA_VOLUME_MOUNTPOINT}" ]; then - mkdir -p "${GOTRUE_CUSTOM_DIR}" - chmod g+wrx "${GOTRUE_CUSTOM_DIR}" - chown adminapi:adminapi "${GOTRUE_CUSTOM_DIR}" - - if [ ! -f "${CONFIGURED_FLAG_PATH}" ]; then - echo "Copying existing GoTrue config from /etc/gotrue.env to ${GOTRUE_CUSTOM_CONFIG_FILE_PATH}" - cp "/etc/gotrue.env" "${GOTRUE_CUSTOM_CONFIG_FILE_PATH}" - fi - - rm -f "/etc/gotrue.env" - ln -s "${GOTRUE_CUSTOM_CONFIG_FILE_PATH}" "/etc/gotrue.env" - chown -R adminapi:adminapi "/etc/gotrue.env" - - chown -R adminapi:adminapi "${GOTRUE_CUSTOM_CONFIG_FILE_PATH}" - chmod g+rx "${GOTRUE_CUSTOM_CONFIG_FILE_PATH}" -fi - -if [ -f "${INIT_PAYLOAD_PATH:-}" ]; then - if [ ! -f "${CONFIGURED_FLAG_PATH}" ]; then - echo "init gotrue payload" - tar -h --overwrite -xzvf "$INIT_PAYLOAD_PATH" -C / ./etc/gotrue.env - chown -R adminapi:adminapi /etc/gotrue.env - fi -else - sed -i "s|api_external_url|${API_EXTERNAL_URL:-http://localhost}|g" /etc/gotrue.env - sed -i "s|gotrue_api_host|${GOTRUE_API_HOST:-0.0.0.0}|g" /etc/gotrue.env - sed -i "s|gotrue_site_url|$GOTRUE_SITE_URL|g" /etc/gotrue.env - sed -i "s|gotrue_jwt_secret|$JWT_SECRET|g" /etc/gotrue.env -fi diff --git a/docker/all-in-one/init/configure-kong.sh b/docker/all-in-one/init/configure-kong.sh deleted file mode 100755 index 9ca16be..0000000 --- a/docker/all-in-one/init/configure-kong.sh +++ /dev/null @@ -1,48 +0,0 @@ -#!/bin/bash -set -eou pipefail - -KONG_CONF=/etc/kong/kong.yml -KONG_CUSTOM_DIR="${DATA_VOLUME_MOUNTPOINT}/etc/kong" - -touch /var/log/services/kong.log - -if [ -f "${INIT_PAYLOAD_PATH:-}" ]; then - echo "init kong payload" - # Setup ssl termination - tar -xzvf "$INIT_PAYLOAD_PATH" -C / ./etc/kong/ - chown -R adminapi:adminapi ./etc/kong/kong.yml - chown -R adminapi:adminapi ./etc/kong/*pem - echo "ssl_cipher_suite = intermediate" >> /etc/kong/kong.conf - echo "ssl_cert = /etc/kong/fullChain.pem" >> /etc/kong/kong.conf - echo "ssl_cert_key = /etc/kong/privKey.pem" >> /etc/kong/kong.conf -else - # Default gateway config - export KONG_DNS_ORDER=LAST,A,CNAME - export KONG_PROXY_ERROR_LOG=syslog:server=unix:/dev/log - export KONG_ADMIN_ERROR_LOG=syslog:server=unix:/dev/log -fi - -# Inject project specific configuration -sed -i -e "s|anon_key|$ANON_KEY|g" \ - -e "s|service_key|$SERVICE_ROLE_KEY|g" \ - -e "s|tealbase_admin_key|$ADMIN_API_KEY|g" \ - $KONG_CONF - -# Update kong ports -sed -i "s|:80 |:$KONG_HTTP_PORT |g" /etc/kong/kong.conf -sed -i "s|:443 |:$KONG_HTTPS_PORT |g" /etc/kong/kong.conf - -if [ "${DATA_VOLUME_MOUNTPOINT}" ]; then - mkdir -p "${KONG_CUSTOM_DIR}" - if [ ! -f "${CONFIGURED_FLAG_PATH}" ]; then - echo "Copying existing custom kong config from /etc/kong/kong.yml to ${KONG_CUSTOM_DIR}" - cp /etc/kong/kong.yml "${KONG_CUSTOM_DIR}/kong.yml" - fi - - rm -rf "/etc/kong/kong.yml" - ln -s "${KONG_CUSTOM_DIR}/kong.yml" "/etc/kong/kong.yml" - chown -R adminapi:adminapi "/etc/kong/kong.yml" - - chown -R adminapi:adminapi "${KONG_CUSTOM_DIR}" - chmod g+wrx "${KONG_CUSTOM_DIR}" -fi \ No newline at end of file diff --git a/docker/all-in-one/init/configure-pg_egress_collect.sh b/docker/all-in-one/init/configure-pg_egress_collect.sh deleted file mode 100755 index 17051ab..0000000 --- a/docker/all-in-one/init/configure-pg_egress_collect.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash -set -eou pipefail - -PG_EGRESS_COLLECT_FILE=/tmp/pg_egress_collect.txt - -if [ "${DATA_VOLUME_MOUNTPOINT:-}" != "" ]; then - if [ ! -L $PG_EGRESS_COLLECT_FILE ]; then - if [ -f $PG_EGRESS_COLLECT_FILE ]; then - rm -f $PG_EGRESS_COLLECT_FILE - fi - touch "${DATA_VOLUME_MOUNTPOINT}/pg_egress_collect.txt" - ln -s "${DATA_VOLUME_MOUNTPOINT}/pg_egress_collect.txt" $PG_EGRESS_COLLECT_FILE - fi -fi diff --git a/docker/all-in-one/init/configure-pgbouncer.sh b/docker/all-in-one/init/configure-pgbouncer.sh deleted file mode 100755 index 2d66203..0000000 --- a/docker/all-in-one/init/configure-pgbouncer.sh +++ /dev/null @@ -1,46 +0,0 @@ -#!/bin/bash -set -eou pipefail - -touch /var/log/services/pgbouncer.log - -mkdir -p /var/run/pgbouncer -chown pgbouncer:postgres /var/run/pgbouncer - -PGBOUNCER_CONF=/etc/pgbouncer/pgbouncer.ini - -if [ -f "${INIT_PAYLOAD_PATH:-}" ]; then - echo "init pgbouncer payload" - sed -i -E "s|^# (%include /etc/pgbouncer-custom/ssl-config.ini)$|\1|g" $PGBOUNCER_CONF - - tar -xzvf "$INIT_PAYLOAD_PATH" -C /etc/pgbouncer/ --strip-components 3 ./etc/pgbouncer/userlist.txt - chown -R pgbouncer:pgbouncer /etc/pgbouncer/userlist.txt -fi - -if [ "${DATA_VOLUME_MOUNTPOINT}" ]; then - /opt/tealbase-admin-api optimize pgbouncer --destination-config-file-path /etc/pgbouncer-custom/generated-optimizations.ini - - # Preserve pgbouncer configs across restarts - PGBOUNCER_DIR="${DATA_VOLUME_MOUNTPOINT}/etc/pgbouncer" - PGBOUNCER_CUSTOM_DIR="${DATA_VOLUME_MOUNTPOINT}/etc/pgbouncer-custom" - - mkdir -p "${PGBOUNCER_DIR}" - mkdir -p "${PGBOUNCER_CUSTOM_DIR}" - - if [ ! -f "${CONFIGURED_FLAG_PATH}" ]; then - echo "Copying existing custom pgbouncer config from /etc/pgbouncer-custom to ${PGBOUNCER_CUSTOM_DIR}" - cp -R "/etc/pgbouncer-custom/." "${PGBOUNCER_CUSTOM_DIR}/" - cp -R "/etc/pgbouncer/." "${PGBOUNCER_DIR}/" - fi - - rm -rf "/etc/pgbouncer-custom" - ln -s "${PGBOUNCER_CUSTOM_DIR}" "/etc/pgbouncer-custom" - chown -R pgbouncer:pgbouncer "/etc/pgbouncer-custom" - chown -R pgbouncer:pgbouncer "${PGBOUNCER_CUSTOM_DIR}" - chmod -R g+wrx "${PGBOUNCER_CUSTOM_DIR}" - - rm -rf "/etc/pgbouncer" - ln -s "${PGBOUNCER_DIR}" "/etc/pgbouncer" - chown -R pgbouncer:pgbouncer "/etc/pgbouncer" - chown -R pgbouncer:pgbouncer "${PGBOUNCER_DIR}" - chmod -R g+wrx "${PGBOUNCER_DIR}" -fi diff --git a/docker/all-in-one/init/configure-postgrest.sh b/docker/all-in-one/init/configure-postgrest.sh deleted file mode 100755 index 178e64f..0000000 --- a/docker/all-in-one/init/configure-postgrest.sh +++ /dev/null @@ -1,41 +0,0 @@ -#!/bin/bash -set -eou pipefail - -touch /var/log/services/postgrest.log - -# Default in-database config -sed -i "s|pgrst_server_port|${PGRST_SERVER_PORT:-3000}|g" /etc/postgrest/base.conf -sed -i "s|pgrst_admin_server_port|${PGRST_ADMIN_SERVER_PORT:-3001}|g" /etc/postgrest/base.conf -sed -i "s|pgrst_db_schemas|${PGRST_DB_SCHEMAS:-public,storage,graphql_public}|g" /etc/postgrest/base.conf -sed -i "s|pgrst_db_extra_search_path|${PGRST_DB_SCHEMAS:-public,extensions}|g" /etc/postgrest/base.conf -sed -i "s|pgrst_db_anon_role|${PGRST_DB_ANON_ROLE:-anon}|g" /etc/postgrest/base.conf -sed -i "s|pgrst_jwt_secret|$JWT_SECRET|g" /etc/postgrest/base.conf - -/usr/local/bin/configure-shim.sh /dist/postgrest /opt/postgrest - -if [ -f "${INIT_PAYLOAD_PATH:-}" ]; then - echo "init postgrest payload" - tar -xzvf "$INIT_PAYLOAD_PATH" -C / ./etc/postgrest/base.conf - chown -R postgrest:postgrest /etc/postgrest -fi - -if [ "${DATA_VOLUME_MOUNTPOINT}" ]; then - POSTGREST_CUSTOM_DIR="${DATA_VOLUME_MOUNTPOINT}/etc/postgrest" - mkdir -p "${POSTGREST_CUSTOM_DIR}" - if [ ! -f "${CONFIGURED_FLAG_PATH}" ]; then - echo "Copying existing custom PostgREST config from /etc/postgrest/ to ${POSTGREST_CUSTOM_DIR}" - cp -R "/etc/postgrest/." "${POSTGREST_CUSTOM_DIR}/" - fi - - rm -rf "/etc/postgrest" - ln -s "${POSTGREST_CUSTOM_DIR}" "/etc/postgrest" - chown -R postgrest:postgrest "/etc/postgrest" - - chown -R postgrest:postgrest "${POSTGREST_CUSTOM_DIR}" - chmod g+wrx "${POSTGREST_CUSTOM_DIR}" -fi - -PGRST_CONF=/etc/postgrest/generated.conf - -/opt/tealbase-admin-api optimize postgrest --destination-config-file-path $PGRST_CONF -cat /etc/postgrest/base.conf >> $PGRST_CONF diff --git a/docker/all-in-one/init/configure-vector.sh b/docker/all-in-one/init/configure-vector.sh deleted file mode 100755 index 9177a0f..0000000 --- a/docker/all-in-one/init/configure-vector.sh +++ /dev/null @@ -1,56 +0,0 @@ -#!/bin/bash -set -eou pipefail - -VECTOR_CONF=/etc/vector/vector.yaml -touch /var/log/services/vector.log - -if [ -f "${INIT_PAYLOAD_PATH:-}" ]; then - echo "init vector payload" - tar -xzvf "$INIT_PAYLOAD_PATH" -C /etc/vector/ --strip-components 2 ./tmp/init.json - PROJECT_REF=$(jq -r '.["project_ref"]' /etc/vector/init.json) - LOGFLARE_DB_SOURCE=$(jq -r '.["logflare_db_source"]' /etc/vector/init.json) - LOGFLARE_GOTRUE_SOURCE=$(jq -r '.["logflare_gotrue_source"]' /etc/vector/init.json) - LOGFLARE_POSTGREST_SOURCE=$(jq -r '.["logflare_postgrest_source"]' /etc/vector/init.json) - LOGFLARE_PGBOUNCER_SOURCE=$(jq -r '.["logflare_pgbouncer_source"]' /etc/vector/init.json) - LOGFLARE_PITR_ERRORS_SOURCE=$(jq -r '.["logflare_pitr_errors_source"]' /etc/vector/init.json) - LOGFLARE_API_KEY=$(jq -r '.["logflare_api_key"]' /etc/vector/init.json) -fi - -# Exit early if not starting logflare -if [ -z "${LOGFLARE_API_KEY:-}" ]; then - echo "Skipped starting vector: missing LOGFLARE_API_KEY" - exit 0 -fi - -# Add vector to support both base-services and services config -cat < /etc/supervisor/services/vector.conf - -[program:vector] -command=/usr/bin/vector --config-yaml /etc/vector/vector.yaml -user=root -autorestart=true -stdout_logfile=/var/log/services/vector.log -redirect_stderr=true -stdout_logfile_maxbytes=10MB -priority=250 - -EOF - -VECTOR_API_PORT=${VECTOR_API_PORT:-9001} -PROJECT_REF=${PROJECT_REF:-default} -LOGFLARE_HOST=${LOGFLARE_HOST:-api.logflare.app} -LOGFLARE_DB_SOURCE=${LOGFLARE_DB_SOURCE:-postgres.logs} -LOGFLARE_GOTRUE_SOURCE=${LOGFLARE_GOTRUE_SOURCE:-gotrue.logs.prod} -LOGFLARE_POSTGREST_SOURCE=${LOGFLARE_POSTGREST_SOURCE:-postgREST.logs.prod} -LOGFLARE_PGBOUNCER_SOURCE=${LOGFLARE_PGBOUNCER_SOURCE:-pgbouncer.logs.prod} -LOGFLARE_PITR_ERRORS_SOURCE=${LOGFLARE_PITR_ERRORS_SOURCE:-pitr_errors.logs.prod} - -sed -i "s|{{ .ApiPort }}|$VECTOR_API_PORT|g" $VECTOR_CONF -sed -i "s|{{ .ProjectRef }}|$PROJECT_REF|g" $VECTOR_CONF -sed -i "s|{{ .LogflareHost }}|$LOGFLARE_HOST|g" $VECTOR_CONF -sed -i "s|{{ .ApiKey }}|$LOGFLARE_API_KEY|g" $VECTOR_CONF -sed -i "s|{{ .DbSource }}|$LOGFLARE_DB_SOURCE|g" $VECTOR_CONF -sed -i "s|{{ .GotrueSource }}|$LOGFLARE_GOTRUE_SOURCE|g" $VECTOR_CONF -sed -i "s|{{ .PostgrestSource }}|$LOGFLARE_POSTGREST_SOURCE|g" $VECTOR_CONF -sed -i "s|{{ .PgbouncerSource }}|$LOGFLARE_PGBOUNCER_SOURCE|g" $VECTOR_CONF -sed -i "s|{{ .PitrErrorsSource }}|$LOGFLARE_PITR_ERRORS_SOURCE|g" $VECTOR_CONF diff --git a/docker/all-in-one/init/start-kong.sh b/docker/all-in-one/init/start-kong.sh deleted file mode 100755 index 7418d26..0000000 --- a/docker/all-in-one/init/start-kong.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash -set -eou pipefail - -# In the event of a restart, properly stop any running kong instances first -# Confirmed by running /usr/local/bin/kong health -trap '/usr/local/bin/kong quit' EXIT -/usr/local/bin/kong start diff --git a/docker/all-in-one/opt/pg_egress_collect/pg_egress_collect.pl b/docker/all-in-one/opt/pg_egress_collect/pg_egress_collect.pl deleted file mode 100644 index 2acc98a..0000000 --- a/docker/all-in-one/opt/pg_egress_collect/pg_egress_collect.pl +++ /dev/null @@ -1,126 +0,0 @@ -#!/usr/bin/env perl - -# This script receive tcpdump output through STDIN and does: -# -# 1. extract outgoing TCP packet length on the 1st non-loopback device port 5432 and 6543 -# 2. sum the length up to one minute -# 3. save the total length to file (default is /tmp/pg_egress_collect.txt) per minute -# -# Usage: -# -# tcpdump -s 128 -Q out -nn -tt -vv -p -l 'tcp and (port 5432 or port 6543)' | perl pg_egress_collect.pl -o /tmp/output.txt -# - -use POSIX; -use List::Util qw(sum); -use Getopt::Long 'HelpMessage'; -use IO::Async::Loop; -use IO::Async::Stream; -use IO::Async::Timer::Periodic; - -use strict; -use warnings; - -# total captured packets lenth in a time frame -my $captured_len = 0; - -# extract tcp packet length captured by tcpdump -# -# Sample input lines: -# -# 1674013833.940253 IP (tos 0x0, ttl 64, id 0, offset 0, flags [DF], proto TCP (6), length 60) -# 10.112.101.122.5432 > 220.235.16.223.62599: Flags [S.], cksum 0x5de3 (incorrect -> 0x63da), seq 2314200657, ack 2071735457, win 62643, options [mss 8961,sackOK,TS val 3358598837 ecr 1277499190,nop,wscale 7], length 0 -# 1674013833.989257 IP (tos 0x0, ttl 64, id 24975, offset 0, flags [DF], proto TCP (6), length 52) -# 10.112.101.122.5432 > 220.235.16.223.62599: Flags [.], cksum 0x5ddb (incorrect -> 0xa25b), seq 1, ack 9, win 490, options [nop,nop,TS val 3358598885 ecr 1277499232], length 0 -sub extract_packet_length { - my ($line) = @_; - - #print("debug: >> " . $line); - - if ($line =~ /^\s+\d+\.\d+\.\d+\.\d+\..*, length (\d+)$/) { - # extract tcp packet length and add it up - my $len = $1; - $captured_len += $len; - } -} - -# write total length to file -sub write_file { - my ($output) = @_; - - my $now = strftime "%F %T", localtime time; - print "[$now] write captured len $captured_len to $output\n"; - - open(my $fh, "+>", $output) or die "Could not open file '$output' $!"; - print $fh "$captured_len"; - close($fh) or die "Could not write file '$output' $!"; -} - -# main -sub main { - # get arguments - GetOptions( - "interval:i" => \(my $interval = 60), - "output:s" => \(my $output = "/tmp/pg_egress_collect.txt"), - "help" => sub { HelpMessage(0) }, - ) or HelpMessage(1); - - my $loop = IO::Async::Loop->new; - - # tcpdump extractor - my $extractor = IO::Async::Stream->new_for_stdin( - on_read => sub { - my ($self, $buffref, $eof) = @_; - - while($$buffref =~ s/^(.*\n)//) { - my $line = $1; - extract_packet_length($line); - } - - return 0; - }, - ); - - # schedule file writer per minute - my $writer = IO::Async::Timer::Periodic->new( - interval => $interval, - on_tick => sub { - write_file($output); - - # reset total captured length - $captured_len = 0; - }, - ); - $writer->start; - - print "pg_egress_collect started, egress data will be saved to $output at interval $interval seconds.\n"; - - $loop->add($extractor); - $loop->add($writer); - $loop->run; -} - -main(); - -__END__ - -=head1 NAME - -pg_egress_collect.pl - collect egress from tcpdump output, extract TCP packet length, aggregate in specified interval and write to output file. - -=head1 SYNOPSIS - -pg_egress_collect.pl [-i interval] [-o output] - -Options: - - -i, --interval interval - output file write interval, in seconds, default is 60 seconds - - -o, --output output - output file path, default is /tmp/pg_egress_collect.txt - - -h, --help - print this help message - -=cut diff --git a/docker/all-in-one/opt/postgres_exporter/queries.yml b/docker/all-in-one/opt/postgres_exporter/queries.yml deleted file mode 100644 index c9652e3..0000000 --- a/docker/all-in-one/opt/postgres_exporter/queries.yml +++ /dev/null @@ -1,345 +0,0 @@ -set_timeout: - master: true - cache_seconds: 5 - query: "set statement_timeout to '20s'" - -pg_database: - master: true - cache_seconds: 60 - query: "SELECT SUM(pg_database_size(pg_database.datname)) / (1024 * 1024) as size_mb FROM pg_database" - metrics: - - size_mb: - usage: "GAUGE" - description: "Disk space used by the database" - -pg_stat_bgwriter: - master: true - cache_seconds: 60 - query: | - select checkpoints_timed as checkpoints_timed_total, - checkpoints_req as checkpoints_req_total, - checkpoint_write_time as checkpoint_write_time_total, - checkpoint_sync_time as checkpoint_sync_time_total, - buffers_checkpoint as buffers_checkpoint_total, - buffers_clean as buffers_clean_total, - maxwritten_clean as maxwritten_clean_total, - buffers_backend as buffers_backend_total, - buffers_backend_fsync as buffers_backend_fsync_total, - buffers_alloc as buffers_alloc_total, - stats_reset - from pg_stat_bgwriter - metrics: - - checkpoints_timed_total: - usage: "COUNTER" - description: "Scheduled checkpoints performed" - - checkpoints_req_total: - usage: "COUNTER" - description: "Requested checkpoints performed" - - checkpoint_write_time_total: - usage: "COUNTER" - description: "Time spent writing checkpoint files to disk" - - checkpoint_sync_time_total: - usage: "COUNTER" - description: "Time spent synchronizing checkpoint files to disk" - - buffers_checkpoint_total: - usage: "COUNTER" - description: "Buffers written during checkpoints" - - buffers_clean_total: - usage: "COUNTER" - description: "Buffers written by bg writter" - - maxwritten_clean_total: - usage: "COUNTER" - description: "Number of times bg writer stopped a cleaning scan because it had written too many buffers" - - buffers_backend_total: - usage: "COUNTER" - description: "Buffers written directly by a backend" - - buffers_backend_fsync_total: - usage: "COUNTER" - description: "fsync calls executed by a backend directly" - - buffers_alloc_total: - usage: "COUNTER" - description: "Buffers allocated" - - stats_reset: - usage: "COUNTER" - description: "Most recent stat reset time" - -pg_stat_database: - master: true - cache_seconds: 60 - query: | - SELECT sum(numbackends) as num_backends, - sum(xact_commit) as xact_commit_total, - sum(xact_rollback) as xact_rollback_total, - sum(blks_read) as blks_read_total, - sum(blks_hit) as blks_hit_total, - sum(tup_returned) as tup_returned_total, - sum(tup_fetched) as tup_fetched_total, - sum(tup_inserted) as tup_inserted_total, - sum(tup_updated) as tup_updated_total, - sum(tup_deleted) as tup_deleted_total, - sum(conflicts) as conflicts_total, - sum(temp_files) as temp_files_total, - sum(temp_bytes) as temp_bytes_total, - sum(deadlocks) as deadlocks_total, - max(stats_reset) as most_recent_reset - FROM pg_stat_database - metrics: - - num_backends: - usage: "GAUGE" - description: "The number of active backends" - - xact_commit_total: - usage: "COUNTER" - description: "Transactions committed" - - xact_rollback_total: - usage: "COUNTER" - description: "Transactions rolled back" - - blks_read_total: - usage: "COUNTER" - description: "Number of disk blocks read" - - blks_hit_total: - usage: "COUNTER" - description: "Disk blocks found in buffer cache" - - tup_returned_total: - usage: "COUNTER" - description: "Rows returned by queries" - - tup_fetched_total: - usage: "COUNTER" - description: "Rows fetched by queries" - - tup_inserted_total: - usage: "COUNTER" - description: "Rows inserted" - - tup_updated_total: - usage: "COUNTER" - description: "Rows updated" - - tup_deleted_total: - usage: "COUNTER" - description: "Rows deleted" - - conflicts_total: - usage: "COUNTER" - description: "Queries canceled due to conflicts with recovery" - - temp_files_total: - usage: "COUNTER" - description: "Temp files created by queries" - - temp_bytes_total: - usage: "COUNTER" - description: "Temp data written by queries" - - deadlocks_total: - usage: "COUNTER" - description: "Deadlocks detected" - - most_recent_reset: - usage: "COUNTER" - description: "The most recent time one of the databases had its statistics reset" - -pg_stat_database_conflicts: - master: true - cache_seconds: 60 - query: | - SELECT sum(confl_tablespace) as confl_tablespace_total, - sum(confl_lock) as confl_lock_total, - sum(confl_snapshot) as confl_snapshot_total, - sum(confl_bufferpin) as confl_bufferpin_total, - sum(confl_deadlock) as confl_deadlock_total - from pg_stat_database_conflicts - metrics: - - confl_tablespace_total: - usage: "COUNTER" - description: "Queries cancelled due to dropped tablespaces" - - confl_lock_total: - usage: "COUNTER" - description: "Queries cancelled due to lock timeouts" - - confl_snapshot_total: - usage: "COUNTER" - description: "Queries cancelled due to old snapshots" - - confl_bufferpin_total: - usage: "COUNTER" - description: "Queries cancelled due to pinned buffers" - - confl_deadlock_total: - usage: "COUNTER" - description: "Queries cancelled due to deadlocks" - -pg_stat_statements: - master: true - cache_seconds: 60 - query: "SELECT sum(calls) as total_queries, sum(total_exec_time / 1000) as total_time_seconds FROM extensions.pg_stat_statements t1 JOIN pg_database t3 ON (t1.dbid=t3.oid)" - metrics: - - total_queries: - usage: "COUNTER" - description: "Number of times executed" - - total_time_seconds: - usage: "COUNTER" - description: "Total time spent, in seconds" - -pg_ls_archive_statusdir: - master: true - cache_seconds: 60 - query: "select count(*) as wal_pending_count from pg_ls_archive_statusdir() where name like '%.ready'" - metrics: - - wal_pending_count: - usage: "COUNTER" - description: "Number of not yet archived WAL files" - -auth_users: - master: true - cache_seconds: 21600 # 6 hours - query: "select count(id) as user_count from auth.users" - metrics: - - user_count: - usage: "GAUGE" - description: "Number of users in the project db" - -realtime: - master: true - cache_seconds: 60 - query: "select count(1) as postgres_changes_total_subscriptions, count(distinct subscription_id) as postgres_changes_client_subscriptions from realtime.subscription" - metrics: - - postgres_changes_total_subscriptions: - usage: "GAUGE" - description: "Total subscription records listening for Postgres changes" - - postgres_changes_client_subscriptions: - usage: "GAUGE" - description: "Client subscriptions listening for Postgres changes" - -replication: - master: true - cache_seconds: 60 - query: "SELECT slot_name, pg_wal_lsn_diff(pg_current_wal_lsn(), restart_lsn) AS realtime_lag_bytes, active AS realtime_slot_status FROM pg_replication_slots WHERE slot_name LIKE ANY (ARRAY['realtime', 'realtime_rls', 'tealbase_realtime_replication_slot%'])" - metrics: - - realtime_slot_name: - usage: "LABEL" - description: "Replication Slot Name for Realtime" - - realtime_lag_bytes: - usage: "GAUGE" - description: "Replication Lag for Realtime" - - realtime_slot_status: - usage: "GAUGE" - description: "Replication Slot Active Status" - -replication_slots: - master: true - cache_seconds: 60 - query: "SELECT max(pg_wal_lsn_diff(pg_current_wal_lsn(), restart_lsn)) AS max_lag_bytes FROM pg_replication_slots" - metrics: - - max_lag_bytes: - usage: "GAUGE" - description: "Max Replication Lag" - -storage: - master: true - cache_seconds: 60 - query: "select sum(size) / (1024 * 1024) as storage_size_mb from storage.get_size_by_bucket()" - metrics: - - storage_size_mb: - usage: "GAUGE" - description: "The total size used for all storage buckets, in mb" - -tealbase_usage_metrics: - # pg_stat_statements collects metrics from all databases on the cluster, so querying just the master db should be sufficient - master: true - cache_seconds: 60 - query: | - select sum(calls) as user_queries_total - from extensions.pg_stat_statements - where query <> 'SELECT version()' - and query <> 'BEGIN ISOLATION LEVEL READ COMMITTED READ ONLY' - and query <> 'COMMIT' - and query <> 'SET client_encoding = ''UTF8''' - and query <> 'SET client_min_messages TO WARNING' - and query <> 'LISTEN "ddl_command_end"' - and query <> 'LISTEN "pgrst"' - and query <> 'SELECT * FROM migrations ORDER BY id' - and query <> 'SELECT COUNT(*) = $1 FROM pg_publication WHERE pubname = $2' - and query <> 'SELECT COUNT(*) >= $1 FROM pg_replication_slots WHERE slot_name = $2' - and query <> 'SELECT EXISTS (SELECT schema_migrations.* FROM schema_migrations AS schema_migrations WHERE version = $1)' - and query <> 'SELECT current_setting($1)::integer, current_setting($2)' - and query <> 'SELECT pg_advisory_unlock($1)' - and query <> 'SELECT pg_try_advisory_lock($1)' - and query <> 'SELECT slot_name, pg_wal_lsn_diff(pg_current_wal_lsn(), restart_lsn) FROM pg_replication_slots' - and query <> 'SELECT typname::text, oid::int4, typarray::int4 FROM pg_type WHERE typname IN ($1,$2) ORDER BY typname' - and query <> 'select * from schema_migrations' - and query <> 'set local schema ''''' - and query <> 'SELECT SUM(pg_database_size(pg_database.datname)) / ($1 * $2) as size_mb FROM pg_database' - and query not like 'select set_config(%' - and query not like '%LATERAL (SELECT * FROM pg_namespace WHERE pg_namespace.oid = other.relnamespace) AS ns2%' - and query not like '%LEFT JOIN (pg_collation co JOIN pg_namespace nco ON co.collnamespace = nco.oid)%' - and query not like '%LEFT JOIN pg_description as d ON d.objoid = p.oid%' - and query not like '%LEFT JOIN pg_description as d on d.objoid = c.oid%' - and query not like '%-- CTE to replace information_schema.key_column_usage to remove owner limit%' - and query not like '%join pg_namespace sch on sch.oid = tbl.relnamespace%' - and query not like '%select setdatabase, unnest(setconfig) as setting from pg_catalog.pg_db_role_setting%' - and lower(trim(regexp_replace(regexp_replace(query, E'\n', ' ', 'g'), E'\\s+', ' ', 'g'))) not in - ('with rows as ( select id from net.http_request_queue order by id limit $1 ) delete from net.http_request_queue q using rows where q.id = rows.id returning q.id, q.method, q.url, timeout_milliseconds, array(select key || $2 || value from jsonb_each_text(q.headers)), q.body', - 'with rows as ( select ctid from net._http_response where created < now() - $1 order by created limit $2 ) delete from net._http_response r using rows where r.ctid = rows.ctid', - -- version of query above before https://github.com/tealbase/pg_net/commit/eaa721e11761da07d01fc04b5114c90cd7973b83 - 'with rows as ( select ctid from net._http_response where created < $1 - $2 order by created limit $3 ) delete from net._http_response r using rows where r.ctid = rows.ctid', - 'select exists ( select $2 from pg_catalog.pg_class c where c.relname = $1 and c.relkind = $3 )', - 'select description from pg_namespace n left join pg_description d on d.objoid = n.oid where n.nspname = $1', - 'select concat(schemaname, $1, tablename, $2, policyname) as policy from pg_policies order by 1 desc', - 'select concat(table_schema, $1, table_name) as table from information_schema.tables where table_schema not like $2 and table_schema <> $3 order by 1 desc', - 'select concat(conrelid::regclass, $1, conname) as fk from pg_constraint where contype = $2 order by 1 desc', - 'select datname from pg_database where datallowconn = $1 order by oid asc', - 'select count(*) > $1 as pgsodium_enabled from pg_extension where extname = $2', - 'select count(*) > $1 as keys_created from pgsodium.key') - and query <> 'insert into schema_migrations (version) values ($1)' - -- temporarily included for older versions of pg_net - and query not like 'SELECT%FROM net.http_request_queue%' - and query not like 'DELETE FROM net.http_request_queue%' - and query not like '%source: project usage%' - and query not like 'select name, setting from pg_settings where name in ($1, $2)%' - and userid not in (select oid from pg_roles where rolname in ('authenticator', 'pgbouncer', 'tealbase_admin', 'tealbase_storage_admin')) - metrics: - - user_queries_total: - usage: "COUNTER" - description: "The total number of user queries executed" - -pg_settings: - master: true - cache-seconds: 30 - query: "SELECT COUNT(*) as default_transaction_read_only FROM pg_settings WHERE name = 'default_transaction_read_only' AND setting = 'on';" - metrics: - - default_transaction_read_only: - usage: "GAUGE" - description: "Default transaction mode set to read only" - -pg_status: - master: true - cache-seconds: 60 - query: "SELECT CASE WHEN pg_is_in_recovery() = false THEN 0 ELSE 1 END as in_recovery" - metrics: - - in_recovery: - usage: "GAUGE" - description: "Database in recovery" - -# specific to read replicas -# for primary databases, all columns will always return a value of 0 -# --- -# for checking replication lag (physical_replication_lag_seconds) -# we firstly check if the replica is connected to its primary -# and if last WAL received is equivalent to last WAL replayed -# if so return 0 -# otherwise calculate replication lag as per usual -physical_replication_lag: - master: true - cache-seconds: 60 - query: | - select - case - when (select count(*) from pg_stat_wal_receiver) = 1 and pg_last_wal_receive_lsn() = pg_last_wal_replay_lsn() - then 0 - else coalesce(extract(epoch from now() - pg_last_xact_replay_timestamp()),0) - end as physical_replication_lag_seconds, - case - when pg_is_in_recovery() - then case when pg_is_wal_replay_paused() = false then 0 else 1 end - else 0 - end as is_wal_replay_paused, - (select count(*) from pg_stat_wal_receiver) as is_connected_to_primary - metrics: - - physical_replication_lag_seconds: - usage: "GAUGE" - description: "Physical replication lag in seconds" - - is_wal_replay_paused: - usage: "GAUGE" - description: "Check if WAL replay has been paused" - - is_connected_to_primary: - usage: "GAUGE" - description: "Monitor connection to the primary database" diff --git a/docker/all-in-one/postgres-entrypoint.sh b/docker/all-in-one/postgres-entrypoint.sh deleted file mode 100755 index 2e4d657..0000000 --- a/docker/all-in-one/postgres-entrypoint.sh +++ /dev/null @@ -1,358 +0,0 @@ -#!/usr/bin/env bash - -# Downloaded from https://github.com/docker-library/postgres/raw/master/15/bullseye/docker-entrypoint.sh -# Changes needed to make adminapi able to read the recovery.signal file: -# -44: chmod 00700 "$PGDATA" || : -# +44: chmod 00750 "$PGDATA" || : -# -# We're already including the original file in the base postgres Docker image. - -set -Eeo pipefail - -# usage: file_env VAR [DEFAULT] -# ie: file_env 'XYZ_DB_PASSWORD' 'example' -# (will allow for "$XYZ_DB_PASSWORD_FILE" to fill in the value of -# "$XYZ_DB_PASSWORD" from a file, especially for Docker's secrets feature) -file_env() { - local var="$1" - local fileVar="${var}_FILE" - local def="${2:-}" - if [ "${!var:-}" ] && [ "${!fileVar:-}" ]; then - printf >&2 'error: both %s and %s are set (but are exclusive)\n' "$var" "$fileVar" - exit 1 - fi - local val="$def" - if [ "${!var:-}" ]; then - val="${!var}" - elif [ "${!fileVar:-}" ]; then - val="$(< "${!fileVar}")" - fi - export "$var"="$val" - unset "$fileVar" -} - -# check to see if this file is being run or sourced from another script -_is_sourced() { - # https://unix.stackexchange.com/a/215279 - [ "${#FUNCNAME[@]}" -ge 2 ] \ - && [ "${FUNCNAME[0]}" = '_is_sourced' ] \ - && [ "${FUNCNAME[1]}" = 'source' ] -} - -# used to create initial postgres directories and if run as root, ensure ownership to the "postgres" user -docker_create_db_directories() { - local user; user="$(id -u)" - - mkdir -p "$PGDATA" - # ignore failure since there are cases where we can't chmod (and PostgreSQL might fail later anyhow - it's picky about permissions of this directory) - chmod 00750 "$PGDATA" || : - - # ignore failure since it will be fine when using the image provided directory; see also https://github.com/docker-library/postgres/pull/289 - mkdir -p /var/run/postgresql || : - chmod 03775 /var/run/postgresql || : - - # Create the transaction log directory before initdb is run so the directory is owned by the correct user - if [ -n "${POSTGRES_INITDB_WALDIR:-}" ]; then - mkdir -p "$POSTGRES_INITDB_WALDIR" - if [ "$user" = '0' ]; then - find "$POSTGRES_INITDB_WALDIR" \! -user postgres -exec chown postgres '{}' + - fi - chmod 700 "$POSTGRES_INITDB_WALDIR" - fi - - # allow the container to be started with `--user` - if [ "$user" = '0' ]; then - find "$PGDATA" \! -user postgres -exec chown postgres '{}' + - find /var/run/postgresql \! -user postgres -exec chown postgres '{}' + - fi -} - -# initialize empty PGDATA directory with new database via 'initdb' -# arguments to `initdb` can be passed via POSTGRES_INITDB_ARGS or as arguments to this function -# `initdb` automatically creates the "postgres", "template0", and "template1" dbnames -# this is also where the database user is created, specified by `POSTGRES_USER` env -docker_init_database_dir() { - # "initdb" is particular about the current user existing in "/etc/passwd", so we use "nss_wrapper" to fake that if necessary - # see https://github.com/docker-library/postgres/pull/253, https://github.com/docker-library/postgres/issues/359, https://cwrap.org/nss_wrapper.html - local uid; uid="$(id -u)" - if ! getent passwd "$uid" &> /dev/null; then - # see if we can find a suitable "libnss_wrapper.so" (https://salsa.debian.org/sssd-team/nss-wrapper/-/commit/b9925a653a54e24d09d9b498a2d913729f7abb15) - local wrapper - for wrapper in {/usr,}/lib{/*,}/libnss_wrapper.so; do - if [ -s "$wrapper" ]; then - NSS_WRAPPER_PASSWD="$(mktemp)" - NSS_WRAPPER_GROUP="$(mktemp)" - export LD_PRELOAD="$wrapper" NSS_WRAPPER_PASSWD NSS_WRAPPER_GROUP - local gid; gid="$(id -g)" - printf 'postgres:x:%s:%s:PostgreSQL:%s:/bin/false\n' "$uid" "$gid" "$PGDATA" > "$NSS_WRAPPER_PASSWD" - printf 'postgres:x:%s:\n' "$gid" > "$NSS_WRAPPER_GROUP" - break - fi - done - fi - - if [ -n "${POSTGRES_INITDB_WALDIR:-}" ]; then - set -- --waldir "$POSTGRES_INITDB_WALDIR" "$@" - fi - - # --pwfile refuses to handle a properly-empty file (hence the "\n"): https://github.com/docker-library/postgres/issues/1025 - eval 'initdb --username="$POSTGRES_USER" --pwfile=<(printf "%s\n" "$POSTGRES_PASSWORD") '"$POSTGRES_INITDB_ARGS"' "$@"' - - # unset/cleanup "nss_wrapper" bits - if [[ "${LD_PRELOAD:-}" == */libnss_wrapper.so ]]; then - rm -f "$NSS_WRAPPER_PASSWD" "$NSS_WRAPPER_GROUP" - unset LD_PRELOAD NSS_WRAPPER_PASSWD NSS_WRAPPER_GROUP - fi -} - -# print large warning if POSTGRES_PASSWORD is long -# error if both POSTGRES_PASSWORD is empty and POSTGRES_HOST_AUTH_METHOD is not 'trust' -# print large warning if POSTGRES_HOST_AUTH_METHOD is set to 'trust' -# assumes database is not set up, ie: [ -z "$DATABASE_ALREADY_EXISTS" ] -docker_verify_minimum_env() { - # check password first so we can output the warning before postgres - # messes it up - if [ "${#POSTGRES_PASSWORD}" -ge 100 ]; then - cat >&2 <<-'EOWARN' - - WARNING: The supplied POSTGRES_PASSWORD is 100+ characters. - - This will not work if used via PGPASSWORD with "psql". - - https://www.postgresql.org/message-id/flat/E1Rqxp2-0004Qt-PL%40wrigleys.postgresql.org (BUG #6412) - https://github.com/docker-library/postgres/issues/507 - - EOWARN - fi - if [ -z "$POSTGRES_PASSWORD" ] && [ 'trust' != "$POSTGRES_HOST_AUTH_METHOD" ]; then - # The - option suppresses leading tabs but *not* spaces. :) - cat >&2 <<-'EOE' - Error: Database is uninitialized and superuser password is not specified. - You must specify POSTGRES_PASSWORD to a non-empty value for the - superuser. For example, "-e POSTGRES_PASSWORD=password" on "docker run". - - You may also use "POSTGRES_HOST_AUTH_METHOD=trust" to allow all - connections without a password. This is *not* recommended. - - See PostgreSQL documentation about "trust": - https://www.postgresql.org/docs/current/auth-trust.html - EOE - exit 1 - fi - if [ 'trust' = "$POSTGRES_HOST_AUTH_METHOD" ]; then - cat >&2 <<-'EOWARN' - ******************************************************************************** - WARNING: POSTGRES_HOST_AUTH_METHOD has been set to "trust". This will allow - anyone with access to the Postgres port to access your database without - a password, even if POSTGRES_PASSWORD is set. See PostgreSQL - documentation about "trust": - https://www.postgresql.org/docs/current/auth-trust.html - In Docker's default configuration, this is effectively any other - container on the same system. - - It is not recommended to use POSTGRES_HOST_AUTH_METHOD=trust. Replace - it with "-e POSTGRES_PASSWORD=password" instead to set a password in - "docker run". - ******************************************************************************** - EOWARN - fi -} - -# usage: docker_process_init_files [file [file [...]]] -# ie: docker_process_init_files /always-initdb.d/* -# process initializer files, based on file extensions and permissions -docker_process_init_files() { - # psql here for backwards compatibility "${psql[@]}" - psql=( docker_process_sql ) - - printf '\n' - local f - for f; do - case "$f" in - *.sh) - # https://github.com/docker-library/postgres/issues/450#issuecomment-393167936 - # https://github.com/docker-library/postgres/pull/452 - if [ -x "$f" ]; then - printf '%s: running %s\n' "$0" "$f" - "$f" - else - printf '%s: sourcing %s\n' "$0" "$f" - . "$f" - fi - ;; - *.sql) printf '%s: running %s\n' "$0" "$f"; docker_process_sql -f "$f"; printf '\n' ;; - *.sql.gz) printf '%s: running %s\n' "$0" "$f"; gunzip -c "$f" | docker_process_sql; printf '\n' ;; - *.sql.xz) printf '%s: running %s\n' "$0" "$f"; xzcat "$f" | docker_process_sql; printf '\n' ;; - *.sql.zst) printf '%s: running %s\n' "$0" "$f"; zstd -dc "$f" | docker_process_sql; printf '\n' ;; - *) printf '%s: ignoring %s\n' "$0" "$f" ;; - esac - printf '\n' - done -} - -# Execute sql script, passed via stdin (or -f flag of pqsl) -# usage: docker_process_sql [psql-cli-args] -# ie: docker_process_sql --dbname=mydb <<<'INSERT ...' -# ie: docker_process_sql -f my-file.sql -# ie: docker_process_sql > "$PGDATA/pg_hba.conf" -} - -# start socket-only postgresql server for setting up or running scripts -# all arguments will be passed along as arguments to `postgres` (via pg_ctl) -docker_temp_server_start() { - if [ "$1" = 'postgres' ]; then - shift - fi - - # internal start of server in order to allow setup using psql client - # does not listen on external TCP/IP and waits until start finishes - set -- "$@" -c listen_addresses='' -p "${PGPORT:-5432}" - - PGUSER="${PGUSER:-$POSTGRES_USER}" \ - pg_ctl -D "$PGDATA" \ - -o "$(printf '%q ' "$@")" \ - -w start -} - -# stop postgresql server after done setting up user and running scripts -docker_temp_server_stop() { - PGUSER="${PGUSER:-postgres}" \ - pg_ctl -D "$PGDATA" -m fast -w stop -} - -# check arguments for an option that would cause postgres to stop -# return true if there is one -_pg_want_help() { - local arg - for arg; do - case "$arg" in - # postgres --help | grep 'then exit' - # leaving out -C on purpose since it always fails and is unhelpful: - # postgres: could not access the server configuration file "/var/lib/postgresql/data/postgresql.conf": No such file or directory - -'?'|--help|--describe-config|-V|--version) - return 0 - ;; - esac - done - return 1 -} - -_main() { - # if first arg looks like a flag, assume we want to run postgres server - if [ "${1:0:1}" = '-' ]; then - set -- postgres "$@" - fi - - if [ "$1" = 'postgres' ] && ! _pg_want_help "$@"; then - docker_setup_env - # setup data directories and permissions (when run as root) - docker_create_db_directories - if [ "$(id -u)" = '0' ]; then - # then restart script as postgres user - exec gosu postgres "$BASH_SOURCE" "$@" - fi - - # only run initialization on an empty data directory - if [ -z "$DATABASE_ALREADY_EXISTS" ]; then - docker_verify_minimum_env - - # check dir permissions to reduce likelihood of half-initialized database - ls /docker-entrypoint-initdb.d/ > /dev/null - - docker_init_database_dir - pg_setup_hba_conf "$@" - - # PGPASSWORD is required for psql when authentication is required for 'local' connections via pg_hba.conf and is otherwise harmless - # e.g. when '--auth=md5' or '--auth-local=md5' is used in POSTGRES_INITDB_ARGS - export PGPASSWORD="${PGPASSWORD:-$POSTGRES_PASSWORD}" - docker_temp_server_start "$@" - - docker_setup_db - docker_process_init_files /docker-entrypoint-initdb.d/* - - docker_temp_server_stop - unset PGPASSWORD - - cat <<-'EOM' - - PostgreSQL init process complete; ready for start up. - - EOM - else - cat <<-'EOM' - - PostgreSQL Database directory appears to contain a database; Skipping initialization - - EOM - fi - fi - - exec "$@" -} - -if ! _is_sourced; then - _main "$@" -fi diff --git a/docker/all-in-one/run-logrotate.sh b/docker/all-in-one/run-logrotate.sh deleted file mode 100755 index 40805f8..0000000 --- a/docker/all-in-one/run-logrotate.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash - -set -eou pipefail - -while true; do - sleep 1800 - /usr/sbin/logrotate /etc/logrotate.conf --state "${DATA_VOLUME_MOUNTPOINT}/etc/logrotate/logrotate.state" --verbose -done diff --git a/docker/all-in-one/shutdown.sh b/docker/all-in-one/shutdown.sh deleted file mode 100755 index c974b98..0000000 --- a/docker/all-in-one/shutdown.sh +++ /dev/null @@ -1,96 +0,0 @@ -#!/bin/bash - -# This script provides a method of shutting down the machine/container when the database has been idle -# for a certain amount of time (configurable via the MAX_IDLE_TIME_MINUTES env var) -# -# It checks for any active (non-idle) connections and for any connections which have been idle for more than MAX_IDLE_TIME_MINUTES. -# If there are no active connections and no idle connections, it then checks if the last disconnection event happened more than MAX_IDLE_TIME_MINUTES ago. -# -# If all of these conditions are met, then Postgres is shut down, allowing it to wrap up any pending transactions (such as WAL shippipng) and gracefully exit. -# To terminate the machine/container, a SIGTERM signal is sent to the top-level process (supervisord) which will then shut down all other processes and exit. - -DEFAULT_MAX_IDLE_TIME_MINUTES=${MAX_IDLE_TIME_MINUTES:-5} -CONFIG_FILE_PATH=${CONFIG_FILE_PATH:-/etc/supa-shutdown/shutdown.conf} - -run_sql() { - psql -h localhost -U tealbase_admin -d postgres "$@" -} - -check_activity() { - pg_isready -h localhost > /dev/null 2>&1 || (echo "Postgres is not ready yet" && exit 1) - - QUERY=$(cat </dev/null || echo 0) - NOW=$(date +%s) - TIME_SINCE_LAST_DISCONNECT="$((NOW - LAST_DISCONNECT_TIME))" - - if [ $TIME_SINCE_LAST_DISCONNECT -gt "$((MAX_IDLE_TIME_MINUTES * 60))" ]; then - echo "$(date): No active connections for $MAX_IDLE_TIME_MINUTES minutes. Shutting down." - - supervisorctl stop postgresql - - # Postgres ships the latest WAL file using archive_command during shutdown, in a blocking operation - # This is to ensure that the WAL file is shipped, just in case - sleep 1 - - /usr/bin/admin-mgr lsn-checkpoint-push --immediately || echo "Failed to push LSN checkpoint" - - kill -s TERM "$(supervisorctl pid)" - fi -} - -# Wait for Postgres to be up -until pg_isready -h localhost > /dev/null 2>&1; - do sleep 3 -done - -# Enable logging of disconnections so the script can check when the last disconnection happened -run_sql -c "ALTER SYSTEM SET log_disconnections = 'on';" -run_sql -c "SELECT pg_reload_conf();" - -sleep $((DEFAULT_MAX_IDLE_TIME_MINUTES * 60)) -while true; do - if [ -f "$CONFIG_FILE_PATH" ]; then - source "$CONFIG_FILE_PATH" - - if [ -z "$SHUTDOWN_IDLE_TIME_MINUTES" ]; then - MAX_IDLE_TIME_MINUTES="$DEFAULT_MAX_IDLE_TIME_MINUTES" - else - MAX_IDLE_TIME_MINUTES="$SHUTDOWN_IDLE_TIME_MINUTES" - fi - else - MAX_IDLE_TIME_MINUTES="$DEFAULT_MAX_IDLE_TIME_MINUTES" - fi - - if [ "$MAX_IDLE_TIME_MINUTES" -gt 0 ] && [ "$MAX_IDLE_TIME_MINUTES" -lt 50000000 ]; then - check_activity - fi - - sleep 30 -done diff --git a/docker/nix/build_nix.sh b/docker/nix/build_nix.sh old mode 100644 new mode 100755 index 5035be0..118fdf6 --- a/docker/nix/build_nix.sh +++ b/docker/nix/build_nix.sh @@ -5,13 +5,5 @@ nix --version if [ -d "/workspace" ]; then cd /workspace fi -SYSTEM=$(nix-instantiate --eval -E builtins.currentSystem | tr -d '"') -nix build .#psql_15/bin -o psql_15 -nix flake check -L -nix copy --to s3://nix-postgres-artifacts?secret-key=nix-secret-key ./psql_15 -if [ "$SYSTEM" = "aarch64-linux" ]; then - nix build .#postgresql_15_debug -o ./postgresql_15_debug - nix build .#postgresql_15_src -o ./postgresql_15_src - nix copy --to s3://nix-postgres-artifacts?secret-key=nix-secret-key ./postgresql_15_debug-debug - nix copy --to s3://nix-postgres-artifacts?secret-key=nix-secret-key ./postgresql_15_src -fi + +nix run "github:Mic92/nix-fast-build?rev=b1dae483ab7d4139a6297e02b6de9e5d30e43d48" -- --skip-cached --no-nom --flake ".#checks" diff --git a/docker/orioledb/Dockerfile b/docker/orioledb/Dockerfile deleted file mode 100644 index 5581b4e..0000000 --- a/docker/orioledb/Dockerfile +++ /dev/null @@ -1,1059 +0,0 @@ -# syntax=docker/dockerfile:1.6 -ARG postgresql_major=15 -ARG postgresql_release=${postgresql_major}.1 - -# Bump default build arg to build a package from source -# Bump vars.yml to specify runtime package version -ARG sfcgal_release=1.3.10 -ARG postgis_release=3.3.2 -ARG pgrouting_release=3.4.1 -ARG pgtap_release=1.2.0 -ARG pg_cron_release=1.6.2 -ARG pgaudit_release=1.7.0 -ARG pgjwt_release=9742dab1b2f297ad3811120db7b21451bca2d3c9 -ARG pgsql_http_release=1.5.0 -ARG plpgsql_check_release=2.2.5 -ARG pg_safeupdate_release=1.4 -ARG timescaledb_release=2.9.1 -ARG wal2json_release=2_5 -ARG pljava_release=1.6.4 -ARG plv8_release=3.1.5 -ARG pg_plan_filter_release=5081a7b5cb890876e67d8e7486b6a64c38c9a492 -ARG pg_net_release=0.9.2 -ARG rum_release=1.3.13 -ARG pg_hashids_release=cd0e1b31d52b394a0df64079406a14a4f7387cd6 -ARG libsodium_release=1.0.18 -ARG pgsodium_release=3.1.6 -ARG pg_graphql_release=1.5.1 -ARG pg_stat_monitor_release=1.1.1 -ARG pg_jsonschema_release=0.2.0 -ARG pg_repack_release=1.4.8 -ARG vault_release=0.2.8 -ARG groonga_release=12.0.8 -ARG pgroonga_release=2.4.0 -ARG wrappers_release=0.2.0 -ARG hypopg_release=1.3.1 -ARG pgvector_release=0.4.0 -ARG pg_tle_release=1.3.2 -ARG index_advisor_release=0.2.0 -ARG supautils_release=2.5.0 -ARG wal_g_release=2.0.1 - -#################### -# Install postgres -#################### -FROM orioledb/orioledb:latest-pg${postgresql_major}-ubuntu as base -# Redeclare args for use in subsequent stages -ARG TARGETARCH -ARG postgresql_major - -ENV PATH=$PATH:/usr/lib/postgresql/${postgresql_major}/bin -ENV PGDATA=/var/lib/postgresql/data - -# Make the "en_US.UTF-8" locale so postgres will be utf-8 enabled by default -# RUN localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8 -ENV LANG=en_US.UTF-8 -ENV LC_CTYPE=C.UTF-8 -ENV LC_COLLATE=C - -FROM base as builder -# Install build dependencies -RUN apt-get update && apt-get install -y --no-install-recommends \ - build-essential \ - checkinstall \ - cmake \ - && rm -rf /var/lib/apt/lists/* /tmp/* - -FROM builder as ccache -# Cache large build artifacts -RUN apt-get update && apt-get install -y --no-install-recommends \ - clang \ - ccache \ - && rm -rf /var/lib/apt/lists/* -ENV CCACHE_DIR=/ccache -ENV PATH=/usr/lib/ccache:$PATH -# Used to update ccache -ARG CACHE_EPOCH - -FROM builder as rust-toolchain -ENV PATH=/root/.cargo/bin:$PATH -RUN apt-get update && apt-get install -y --no-install-recommends curl pkg-config && \ - curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --profile minimal --default-toolchain stable && \ - rustup --version && \ - rustc --version && \ - cargo --version - -RUN cargo install cargo-pgrx --version 0.10.2 --locked -RUN cargo pgrx init --pg${postgresql_major} $(which pg_config) - -#################### -# 01-postgis.yml -#################### -FROM ccache as sfcgal -# Download and extract -ARG sfcgal_release -ARG sfcgal_release_checksum -ADD --checksum=${sfcgal_release_checksum} \ - "https://tealbase-public-artifacts-bucket.s3.amazonaws.com/sfcgal/SFCGAL-v${sfcgal_release}.tar.gz" \ - /tmp/sfcgal.tar.gz -RUN tar -xvf /tmp/sfcgal.tar.gz -C /tmp --one-top-level --strip-components 1 && \ - rm -rf /tmp/sfcgal.tar.gz -# Install build dependencies -RUN apt-get update && apt-get install -y --no-install-recommends \ - libcgal-dev \ - libboost-serialization1.71-dev \ - libmpfr-dev \ - libgmp-dev \ - && rm -rf /var/lib/apt/lists/* -# Build from source -WORKDIR /tmp/sfcgal/build -RUN cmake .. -RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ - make -j$(nproc) -# Create debian package -RUN checkinstall -D --install=yes --fstrans=no --backup=no --pakdir=/tmp --pkgname=sfcgal --pkgversion=${sfcgal_release} --requires=libgmpxx4ldbl,libboost-serialization1.71.0,libmpfr6 --nodoc - -FROM sfcgal as postgis-source -# Download and extract -ARG postgis_release -ARG postgis_release_checksum -ADD --checksum=${postgis_release_checksum} \ - "https://tealbase-public-artifacts-bucket.s3.amazonaws.com/postgis-${postgis_release}.tar.gz" \ - /tmp/postgis.tar.gz -RUN tar -xvf /tmp/postgis.tar.gz -C /tmp && \ - rm -rf /tmp/postgis.tar.gz -# Install build dependencies -RUN apt-get update && apt-get install -y --no-install-recommends \ - protobuf-c-compiler \ - libgeos-dev \ - libproj-dev \ - libgdal-dev \ - libjson-c-dev \ - libxml2-dev \ - libprotobuf-c-dev \ - && rm -rf /var/lib/apt/lists/* -# Build from source -WORKDIR /tmp/postgis-${postgis_release} -RUN ./configure --with-sfcgal -RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ - make -j$(nproc) -# Create debian package -RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --requires=libgeos-c1v5,libproj15,libjson-c4,libprotobuf-c1,libgdal26 --nodoc - -#################### -# 02-pgrouting.yml -#################### -FROM ccache as pgrouting-source -# Download and extract -ARG pgrouting_release -ARG pgrouting_release_checksum -ADD --checksum=${pgrouting_release_checksum} \ - "https://github.com/pgRouting/pgrouting/releases/download/v${pgrouting_release}/pgrouting-${pgrouting_release}.tar.gz" \ - /tmp/pgrouting.tar.gz -RUN tar -xvf /tmp/pgrouting.tar.gz -C /tmp && \ - rm -rf /tmp/pgrouting.tar.gz -# Install build dependencies -RUN apt-get update && apt-get install -y --no-install-recommends \ - libboost-all-dev \ - && rm -rf /var/lib/apt/lists/* -# Build from source -WORKDIR /tmp/pgrouting-${pgrouting_release}/build -RUN cmake -DBUILD_HTML=OFF -DBUILD_DOXY=OFF .. -RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ - make -j$(nproc) -# Create debian package -RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --pkgname=pgrouting --pkgversion=${pgrouting_release} --nodoc - -#################### -# 03-pgtap.yml -#################### -FROM builder as pgtap-source -# Download and extract -ARG pgtap_release -ARG pgtap_release_checksum -ADD --checksum=${pgtap_release_checksum} \ - "https://github.com/theory/pgtap/archive/v${pgtap_release}.tar.gz" \ - /tmp/pgtap.tar.gz -RUN tar -xvf /tmp/pgtap.tar.gz -C /tmp && \ - rm -rf /tmp/pgtap.tar.gz -# Build from source -WORKDIR /tmp/pgtap-${pgtap_release} -RUN make -j$(nproc) -# Create debian package -RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --nodoc - -#################### -# 04-pg_cron.yml -#################### -FROM ccache as pg_cron-source -# Download and extract -ARG pg_cron_release -ARG pg_cron_release_checksum -ADD --checksum=${pg_cron_release_checksum} \ - "https://github.com/citusdata/pg_cron/archive/refs/tags/v${pg_cron_release}.tar.gz" \ - /tmp/pg_cron.tar.gz -RUN tar -xvf /tmp/pg_cron.tar.gz -C /tmp && \ - rm -rf /tmp/pg_cron.tar.gz -# Build from source -WORKDIR /tmp/pg_cron-${pg_cron_release} -# error: redefinition of typedef 'snapshot_hook_type' is a C11 feature [-Werror,-Wtypedef-redefinition] -RUN sed -i -e "s|-std=c99|-std=c11|g" Makefile -RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ - make -j$(nproc) -# Create debian package -RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --nodoc - -#################### -# 05-pgaudit.yml -#################### -FROM ccache as pgaudit-source -# Download and extract -ARG pgaudit_release -ARG pgaudit_release_checksum -ADD --checksum=${pgaudit_release_checksum} \ - "https://github.com/pgaudit/pgaudit/archive/refs/tags/${pgaudit_release}.tar.gz" \ - /tmp/pgaudit.tar.gz -RUN tar -xvf /tmp/pgaudit.tar.gz -C /tmp && \ - rm -rf /tmp/pgaudit.tar.gz -# Install build dependencies -RUN apt-get update && apt-get install -y --no-install-recommends \ - libssl-dev \ - libkrb5-dev \ - && rm -rf /var/lib/apt/lists/* -# Build from source -WORKDIR /tmp/pgaudit-${pgaudit_release} -ENV USE_PGXS=1 -RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ - make -j$(nproc) -# Create debian package -RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --nodoc - -#################### -# 06-pgjwt.yml -#################### -FROM builder as pgjwt-source -# Download and extract -ARG pgjwt_release -ADD "https://github.com/michelp/pgjwt.git#${pgjwt_release}" \ - /tmp/pgjwt-${pgjwt_release} -# Build from source -WORKDIR /tmp/pgjwt-${pgjwt_release} -RUN make -j$(nproc) -# Create debian package -RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --pkgversion=1 --nodoc - -#################### -# 07-pgsql-http.yml -#################### -FROM ccache as pgsql-http-source -# Download and extract -ARG pgsql_http_release -ARG pgsql_http_release_checksum -ADD --checksum=${pgsql_http_release_checksum} \ - "https://github.com/pramsey/pgsql-http/archive/refs/tags/v${pgsql_http_release}.tar.gz" \ - /tmp/pgsql-http.tar.gz -RUN tar -xvf /tmp/pgsql-http.tar.gz -C /tmp && \ - rm -rf /tmp/pgsql-http.tar.gz -# Install build dependencies -RUN apt-get update && apt-get install -y --no-install-recommends \ - libcurl4-gnutls-dev \ - && rm -rf /var/lib/apt/lists/* -# Build from source -WORKDIR /tmp/pgsql-http-${pgsql_http_release} -RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ - make -j$(nproc) -# Create debian package -RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --requires=libcurl3-gnutls --nodoc - -#################### -# 08-plpgsql_check.yml -#################### -FROM ccache as plpgsql_check-source -# Download and extract -ARG plpgsql_check_release -ARG plpgsql_check_release_checksum -ADD --checksum=${plpgsql_check_release_checksum} \ - "https://github.com/okbob/plpgsql_check/archive/refs/tags/v${plpgsql_check_release}.tar.gz" \ - /tmp/plpgsql_check.tar.gz -RUN tar -xvf /tmp/plpgsql_check.tar.gz -C /tmp && \ - rm -rf /tmp/plpgsql_check.tar.gz -# Install build dependencies -RUN apt-get update && apt-get install -y --no-install-recommends \ - libicu-dev \ - && rm -rf /var/lib/apt/lists/* -# Build from source -WORKDIR /tmp/plpgsql_check-${plpgsql_check_release} -RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ - make -j$(nproc) -# Create debian package -RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --nodoc - -#################### -# 09-pg-safeupdate.yml -#################### -FROM ccache as pg-safeupdate-source -# Download and extract -ARG pg_safeupdate_release -ARG pg_safeupdate_release_checksum -ADD --checksum=${pg_safeupdate_release_checksum} \ - "https://github.com/eradman/pg-safeupdate/archive/refs/tags/${pg_safeupdate_release}.tar.gz" \ - /tmp/pg-safeupdate.tar.gz -RUN tar -xvf /tmp/pg-safeupdate.tar.gz -C /tmp && \ - rm -rf /tmp/pg-safeupdate.tar.gz -# Build from source -WORKDIR /tmp/pg-safeupdate-${pg_safeupdate_release} -RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ - make -j$(nproc) -# Create debian package -RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --nodoc - -#################### -# 10-timescaledb.yml -#################### -FROM ccache as timescaledb-source -# Download and extract -ARG timescaledb_release -ARG timescaledb_release_checksum -ADD --checksum=${timescaledb_release_checksum} \ - "https://github.com/timescale/timescaledb/archive/refs/tags/${timescaledb_release}.tar.gz" \ - /tmp/timescaledb.tar.gz -RUN tar -xvf /tmp/timescaledb.tar.gz -C /tmp && \ - rm -rf /tmp/timescaledb.tar.gz -# Build from source -WORKDIR /tmp/timescaledb-${timescaledb_release}/build -RUN cmake -DAPACHE_ONLY=1 .. -# error: too few arguments to function ‘table_tuple_update’ -# error: too few arguments to function ‘table_tuple_delete’ -RUN sed -i \ - -e "1981s|);|, NULL);|g" \ - -e "2567s|);|, NULL);|g" \ - ../src/nodes/hypertable_modify.c -RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ - make -j$(nproc) -# Create debian package -RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --pkgname=timescaledb --pkgversion=${timescaledb_release} --nodoc - -#################### -# 11-wal2json.yml -#################### -FROM ccache as wal2json-source -# Download and extract -ARG wal2json_release -ARG wal2json_release_checksum -ADD --checksum=${wal2json_release_checksum} \ - "https://github.com/eulerto/wal2json/archive/refs/tags/wal2json_${wal2json_release}.tar.gz" \ - /tmp/wal2json.tar.gz -RUN tar -xvf /tmp/wal2json.tar.gz -C /tmp --one-top-level --strip-components 1 && \ - rm -rf /tmp/wal2json.tar.gz -# Build from source -WORKDIR /tmp/wal2json -RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ - make -j$(nproc) -# Create debian package -ENV version=${wal2json_release} -RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --pkgversion="\${version/_/.}" --nodoc - -#################### -# 12-pljava.yml -#################### -FROM builder as pljava-source -# Download and extract -# TODO: revert to using main repo after PG15 support is merged: https://github.com/tada/pljava/pull/413 -ARG pljava_release=master -ARG pljava_release_checksum=sha256:e99b1c52f7b57f64c8986fe6ea4a6cc09d78e779c1643db060d0ac66c93be8b6 -ADD --checksum=${pljava_release_checksum} \ - "https://github.com/tealbase/pljava/archive/refs/heads/${pljava_release}.tar.gz" \ - /tmp/pljava.tar.gz -RUN tar -xvf /tmp/pljava.tar.gz -C /tmp && \ - rm -rf /tmp/pljava.tar.gz -# Install build dependencies -RUN apt-get update && apt-get install -y --no-install-recommends \ - maven \ - default-jdk \ - libssl-dev \ - libkrb5-dev \ - && rm -rf /var/lib/apt/lists/* -# Build from source -WORKDIR /tmp/pljava-${pljava_release} -RUN mvn -T 1C clean install -Dmaven.test.skip -DskipTests -Dmaven.javadoc.skip=true -# Create debian package -RUN cp pljava-packaging/target/pljava-pg${postgresql_major}.jar /tmp/ - -#################### -# 13-plv8.yml -#################### -FROM ccache as plv8-source -# Download and extract -ARG plv8_release -ARG plv8_release_checksum -ADD --checksum=${plv8_release_checksum} \ - "https://github.com/plv8/plv8/archive/refs/tags/v${plv8_release}.tar.gz" \ - /tmp/plv8.tar.gz -RUN tar -xvf /tmp/plv8.tar.gz -C /tmp && \ - rm -rf /tmp/plv8.tar.gz -# Install build dependencies -RUN apt-get update && apt-get install -y --no-install-recommends \ - ca-certificates \ - pkg-config \ - ninja-build \ - git \ - libtinfo5 \ - && rm -rf /var/lib/apt/lists/* -# Build from source -WORKDIR /tmp/plv8-${plv8_release} -ENV DOCKER=1 -RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ - make -# Create debian package -RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --nodoc - -FROM scratch as plv8-deb -COPY --from=plv8-source /tmp/*.deb /tmp/ - -FROM ghcr.io/tealbase/plv8:${plv8_release}-pg${postgresql_major} as plv8 - -#################### -# 14-pg_plan_filter.yml -#################### -FROM ccache as pg_plan_filter-source -# Download and extract -ARG pg_plan_filter_release -ADD "https://github.com/pgexperts/pg_plan_filter.git#${pg_plan_filter_release}" \ - /tmp/pg_plan_filter-${pg_plan_filter_release} -# Build from source -WORKDIR /tmp/pg_plan_filter-${pg_plan_filter_release} -RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ - make -j$(nproc) -# Create debian package -RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --pkgversion=1 --nodoc - -#################### -# 15-pg_net.yml -#################### -FROM ccache as pg_net-source -# Download and extract -ARG pg_net_release -ARG pg_net_release_checksum -ADD --checksum=${pg_net_release_checksum} \ - "https://github.com/tealbase/pg_net/archive/refs/tags/v${pg_net_release}.tar.gz" \ - /tmp/pg_net.tar.gz -RUN tar -xvf /tmp/pg_net.tar.gz -C /tmp && \ - rm -rf /tmp/pg_net.tar.gz -# Install build dependencies -RUN apt-get update && apt-get install -y --no-install-recommends \ - libcurl4-gnutls-dev \ - && rm -rf /var/lib/apt/lists/* -# Build from source -WORKDIR /tmp/pg_net-${pg_net_release} -RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ - make -j$(nproc) -# Create debian package -RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --requires=libcurl3-gnutls --nodoc - -#################### -# 16-rum.yml -#################### -FROM ccache as rum-source -# Download and extract -ARG rum_release -ARG rum_release_checksum -ADD --checksum=${rum_release_checksum} \ - "https://github.com/postgrespro/rum/archive/refs/tags/${rum_release}.tar.gz" \ - /tmp/rum.tar.gz -RUN tar -xvf /tmp/rum.tar.gz -C /tmp && \ - rm -rf /tmp/rum.tar.gz -# Install build dependencies -RUN apt-get update && apt-get install -y --no-install-recommends \ - systemtap-sdt-dev \ - && rm -rf /var/lib/apt/lists/* -# Build from source -WORKDIR /tmp/rum-${rum_release} -RUN sed -i \ - # error: typedef redefinition with different types ('struct SortTuple' vs 'struct SortTuple') - -e "183s|^|// |g" \ - -e "184s|^|// |g" \ - -e "185s|^|// |g" \ - -e "186s|^|// |g" \ - -e "187s|^|// |g" \ - -e "188s|^|// |g" \ - -e "189s|^|// |g" \ - # error: static declaration of 'tuplesort_begin_common' follows non-static declaration - -e "621s|static ||g" \ - # error: static declaration of 'tuplesort_begin_common' follows non-static declaration - -e "846s|static ||g" \ - # error: static declaration of 'tuplesort_gettuple_common' follows non-static declaration - -e "2308s|static ||g" \ - src/tuplesort15.c -ENV USE_PGXS=1 -RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ - make -j$(nproc) -# Create debian package -RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --nodoc - -#################### -# 17-pg_hashids.yml -#################### -FROM ccache as pg_hashids-source -# Download and extract -ARG pg_hashids_release -ADD "https://github.com/iCyberon/pg_hashids.git#${pg_hashids_release}" \ - /tmp/pg_hashids-${pg_hashids_release} -# Build from source -WORKDIR /tmp/pg_hashids-${pg_hashids_release} -RUN make -j$(nproc) -# Create debian package -RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --pkgversion=1 --nodoc - -#################### -# 18-pgsodium.yml -#################### -FROM ccache as libsodium -# Download and extract -ARG libsodium_release -ARG libsodium_release_checksum -ADD --checksum=${libsodium_release_checksum} \ - "https://tealbase-public-artifacts-bucket.s3.amazonaws.com/libsodium/libsodium-${libsodium_release}.tar.gz" \ - /tmp/libsodium.tar.gz -RUN tar -xvf /tmp/libsodium.tar.gz -C /tmp && \ - rm -rf /tmp/libsodium.tar.gz -# Build from source -WORKDIR /tmp/libsodium-${libsodium_release} -RUN ./configure -RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ - make -j$(nproc) -RUN make install - -FROM libsodium as pgsodium-source -# Download and extract -ARG pgsodium_release -ARG pgsodium_release_checksum -ADD --checksum=${pgsodium_release_checksum} \ - "https://github.com/michelp/pgsodium/archive/refs/tags/v${pgsodium_release}.tar.gz" \ - /tmp/pgsodium.tar.gz -RUN tar -xvf /tmp/pgsodium.tar.gz -C /tmp && \ - rm -rf /tmp/pgsodium.tar.gz -# Build from source -WORKDIR /tmp/pgsodium-${pgsodium_release} -RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ - make -j$(nproc) -# Create debian package -RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --requires=libsodium23 --nodoc - -#################### -# 19-pg_graphql.yml -#################### -FROM rust-toolchain as pg_graphql-source -# Download and extract -ARG pg_graphql_release -ARG pg_graphql_release_checksum -ADD --checksum=${pg_graphql_release_checksum} \ - "https://github.com/tealbase/pg_graphql/archive/refs/tags/v${pg_graphql_release}.tar.gz" \ - /tmp/pg_graphql.tar.gz -RUN tar -xvf /tmp/pg_graphql.tar.gz -C /tmp && \ - rm -rf /tmp/pg_graphql.tar.gz -WORKDIR /tmp/pg_graphql-${pg_graphql_release} -RUN cargo pgrx package --no-default-features --features pg${postgresql_major} - -# Create installable package -RUN mkdir archive -RUN cp target/release/pg_graphql-pg${postgresql_major}/usr/local/share/postgresql/extension/pg_graphql* archive -RUN cp target/release/pg_graphql-pg${postgresql_major}/usr/local/lib/postgresql/pg_graphql.so archive - -# name of the package directory before packaging -ENV package_dir=pg_graphql-v${pg_graphql_release}-pg${postgresql_major}-${TARGETARCH}-linux-gnu - -## Copy files into directory structure -RUN mkdir -p ${package_dir}/usr/lib/postgresql/lib -RUN mkdir -p ${package_dir}/var/lib/postgresql/extension -RUN cp archive/*.so ${package_dir}/usr/lib/postgresql/lib -RUN cp archive/*.control ${package_dir}/var/lib/postgresql/extension -RUN cp archive/*.sql ${package_dir}/var/lib/postgresql/extension - -# symlinks to Copy files into directory structure -RUN mkdir -p ${package_dir}/usr/local/lib/postgresql -WORKDIR ${package_dir}/usr/local/lib/postgresql -RUN cp -s ../../../lib/postgresql/lib/*.so . -WORKDIR ../../../../.. - -RUN mkdir -p ${package_dir}/usr/local/share/postgresql/extension -WORKDIR ${package_dir}/usr/local/share/postgresql/extension -RUN cp -s ../../../../../var/lib/postgresql/extension/pg_graphql.control . -RUN cp -s ../../../../../var/lib/postgresql/extension/pg_graphql*.sql . -WORKDIR ../../../../../.. - -RUN mkdir -p ${package_dir}/DEBIAN -RUN touch ${package_dir}/DEBIAN/control -RUN echo 'Package: pg-graphql' >> ${package_dir}/DEBIAN/control -RUN echo 'Version:' ${pg_graphql_release} >> ${package_dir}/DEBIAN/control -RUN echo "Architecture: ${TARGETARCH}" >> ${package_dir}/DEBIAN/control -RUN echo 'Maintainer: tealbase' >> ${package_dir}/DEBIAN/control -RUN echo 'Description: A PostgreSQL extension' >> ${package_dir}/DEBIAN/control - -# Create deb package -RUN chown -R root:root ${package_dir} -RUN chmod -R 00755 ${package_dir} -RUN dpkg-deb --build --root-owner-group ${package_dir} -RUN cp ./*.deb /tmp/pg_graphql.deb - -#################### -# 20-pg_stat_monitor.yml -#################### -FROM ccache as pg_stat_monitor-source -# Download and extract -ARG pg_stat_monitor_release -ARG pg_stat_monitor_release_checksum -ADD --checksum=${pg_stat_monitor_release_checksum} \ - "https://github.com/percona/pg_stat_monitor/archive/refs/tags/${pg_stat_monitor_release}.tar.gz" \ - /tmp/pg_stat_monitor.tar.gz -RUN tar -xvf /tmp/pg_stat_monitor.tar.gz -C /tmp && \ - rm -rf /tmp/pg_stat_monitor.tar.gz -# Build from source -WORKDIR /tmp/pg_stat_monitor-${pg_stat_monitor_release} -ENV USE_PGXS=1 -RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ - make -j$(nproc) -# Create debian package -RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --nodoc - -#################### -# 22-pg_jsonschema.yml -#################### -FROM rust-toolchain as pg_jsonschema-source -# Download and extract -ARG pg_jsonschema_release -ARG pg_jsonschema_release_checksum -ADD --checksum=${pg_jsonschema_release_checksum} \ - "https://github.com/tealbase/pg_jsonschema/archive/refs/tags/v${pg_jsonschema_release}.tar.gz" \ - /tmp/pg_jsonschema.tar.gz -RUN tar -xvf /tmp/pg_jsonschema.tar.gz -C /tmp && \ - rm -rf /tmp/pg_jsonschema.tar.gz -WORKDIR /tmp/pg_jsonschema-${pg_jsonschema_release} -RUN cargo pgrx package --no-default-features --features pg${postgresql_major} - -# Create installable package -RUN mkdir archive -RUN cp target/release/pg_jsonschema-pg${postgresql_major}/usr/local/share/postgresql/extension/pg_jsonschema* archive -RUN cp target/release/pg_jsonschema-pg${postgresql_major}/usr/local/lib/postgresql/pg_jsonschema.so archive - -# name of the package directory before packaging -ENV package_dir=pg_jsonschema-v${pg_jsonschema_release}-pg${postgresql_major}-${TARGETARCH}-linux-gnu - -## Copy files into directory structure -RUN mkdir -p ${package_dir}/usr/lib/postgresql/lib -RUN mkdir -p ${package_dir}/var/lib/postgresql/extension -RUN cp archive/*.so ${package_dir}/usr/lib/postgresql/lib -RUN cp archive/*.control ${package_dir}/var/lib/postgresql/extension -RUN cp archive/*.sql ${package_dir}/var/lib/postgresql/extension - -# symlinks to Copy files into directory structure -RUN mkdir -p ${package_dir}/usr/local/lib/postgresql -WORKDIR ${package_dir}/usr/local/lib/postgresql -RUN cp -s ../../../lib/postgresql/lib/*.so . -WORKDIR ../../../../.. - -RUN mkdir -p ${package_dir}/usr/local/share/postgresql/extension -WORKDIR ${package_dir}/usr/local/share/postgresql/extension - -RUN cp -s ../../../../../var/lib/postgresql/extension/pg_jsonschema.control . -RUN cp -s ../../../../../var/lib/postgresql/extension/pg_jsonschema*.sql . -WORKDIR ../../../../../.. - -RUN mkdir -p ${package_dir}/DEBIAN -RUN touch ${package_dir}/DEBIAN/control -RUN echo 'Package: pg-jsonschema' >> ${package_dir}/DEBIAN/control -RUN echo 'Version:' ${pg_jsonschema_release} >> ${package_dir}/DEBIAN/control -RUN echo "Architecture: ${TARGETARCH}" >> ${package_dir}/DEBIAN/control -RUN echo 'Maintainer: tealbase' >> ${package_dir}/DEBIAN/control -RUN echo 'Description: A PostgreSQL extension' >> ${package_dir}/DEBIAN/control - -# Create deb package -RUN chown -R root:root ${package_dir} -RUN chmod -R 00755 ${package_dir} -RUN dpkg-deb --build --root-owner-group ${package_dir} -RUN cp ./*.deb /tmp/pg_jsonschema.deb - -#################### -# 23-vault.yml -#################### -FROM builder as vault-source -# Download and extract -ARG vault_release -ARG vault_release_checksum -ADD --checksum=${vault_release_checksum} \ - "https://github.com/tealbase/vault/archive/refs/tags/v${vault_release}.tar.gz" \ - /tmp/vault.tar.gz -RUN tar -xvf /tmp/vault.tar.gz -C /tmp && \ - rm -rf /tmp/vault.tar.gz -# Build from source -WORKDIR /tmp/vault-${vault_release} -RUN make -j$(nproc) -# Create debian package -RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --nodoc - -#################### -# 24-pgroonga.yml -#################### -FROM ccache as groonga -# Download and extract -ARG groonga_release -ARG groonga_release_checksum -ADD --checksum=${groonga_release_checksum} \ - "https://packages.groonga.org/source/groonga/groonga-${groonga_release}.tar.gz" \ - /tmp/groonga.tar.gz -RUN tar -xvf /tmp/groonga.tar.gz -C /tmp && \ - rm -rf /tmp/groonga.tar.gz -# Install build dependencies -RUN apt-get update && apt-get install -y --no-install-recommends \ - zlib1g-dev \ - liblz4-dev \ - libzstd-dev \ - libmsgpack-dev \ - libzmq3-dev \ - libevent-dev \ - libmecab-dev \ - rapidjson-dev \ - pkg-config \ - && rm -rf /var/lib/apt/lists/* -# Build from source -WORKDIR /tmp/groonga-${groonga_release} -RUN ./configure -RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ - make -j$(nproc) -# Create debian package -RUN checkinstall -D --install=yes --fstrans=no --backup=no --pakdir=/tmp --requires=zlib1g,liblz4-1,libzstd1,libmsgpackc2,libzmq5,libevent-2.1-7,libmecab2 --nodoc - -FROM groonga as pgroonga-source -# Download and extract -ARG pgroonga_release -ARG pgroonga_release_checksum -ADD --checksum=${pgroonga_release_checksum} \ - "https://packages.groonga.org/source/pgroonga/pgroonga-${pgroonga_release}.tar.gz" \ - /tmp/pgroonga.tar.gz -RUN tar -xvf /tmp/pgroonga.tar.gz -C /tmp && \ - rm -rf /tmp/pgroonga.tar.gz -# Build from source -WORKDIR /tmp/pgroonga-${pgroonga_release} -RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ - make -j$(nproc) -# Create debian package -RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --requires=mecab-naist-jdic --nodoc - -FROM scratch as pgroonga-deb -COPY --from=pgroonga-source /tmp/*.deb /tmp/ - -#################### -# 25-wrappers.yml -#################### -FROM rust-toolchain as wrappers-source -# Required by wrappers 0.2.0 -RUN cargo install cargo-pgrx --version 0.11.0 --locked -RUN cargo pgrx init --pg${postgresql_major} $(which pg_config) -# Download and extract -ARG wrappers_release -ARG wrappers_release_checksum -ADD --checksum=${wrappers_release_checksum} \ - "https://github.com/tealbase/wrappers/archive/refs/tags/v${wrappers_release}.tar.gz" \ - /tmp/wrappers.tar.gz -RUN tar -xvf /tmp/wrappers.tar.gz -C /tmp && \ - rm -rf /tmp/wrappers.tar.gz -WORKDIR /tmp/wrappers-${wrappers_release}/wrappers -RUN cargo pgrx package --no-default-features --features pg${postgresql_major},all_fdws - -ENV extension_dir=target/release/wrappers-pg${postgresql_major}/usr/local/share/postgresql/extension - -# copy schema file to version update sql files -# Note: some version numbers may be skipped -RUN cp ${extension_dir}/wrappers--${wrappers_release}.sql ${extension_dir}/wrappers--0.1.6--${wrappers_release}.sql -RUN cp ${extension_dir}/wrappers--${wrappers_release}.sql ${extension_dir}/wrappers--0.1.7--${wrappers_release}.sql -RUN cp ${extension_dir}/wrappers--${wrappers_release}.sql ${extension_dir}/wrappers--0.1.8--${wrappers_release}.sql -RUN cp ${extension_dir}/wrappers--${wrappers_release}.sql ${extension_dir}/wrappers--0.1.9--${wrappers_release}.sql -RUN cp ${extension_dir}/wrappers--${wrappers_release}.sql ${extension_dir}/wrappers--0.1.10--${wrappers_release}.sql -RUN cp ${extension_dir}/wrappers--${wrappers_release}.sql ${extension_dir}/wrappers--0.1.11--${wrappers_release}.sql -RUN cp ${extension_dir}/wrappers--${wrappers_release}.sql ${extension_dir}/wrappers--0.1.14--${wrappers_release}.sql -RUN cp ${extension_dir}/wrappers--${wrappers_release}.sql ${extension_dir}/wrappers--0.1.15--${wrappers_release}.sql -RUN cp ${extension_dir}/wrappers--${wrappers_release}.sql ${extension_dir}/wrappers--0.1.16--${wrappers_release}.sql -RUN cp ${extension_dir}/wrappers--${wrappers_release}.sql ${extension_dir}/wrappers--0.1.17--${wrappers_release}.sql -RUN cp ${extension_dir}/wrappers--${wrappers_release}.sql ${extension_dir}/wrappers--0.1.18--${wrappers_release}.sql - -# Create installable package -RUN mkdir archive -RUN cp target/release/wrappers-pg${postgresql_major}/usr/local/share/postgresql/extension/wrappers* archive -RUN cp target/release/wrappers-pg${postgresql_major}/usr/local/lib/postgresql/wrappers-${wrappers_release}.so archive - -# name of the package directory before packaging -ENV package_dir=wrappers-v${wrappers_release}-pg${postgresql_major}-${TARGETARCH}-linux-gnu - -## Copy files into directory structure -RUN mkdir -p ${package_dir}/usr/lib/postgresql/lib -RUN mkdir -p ${package_dir}/var/lib/postgresql/extension -RUN cp archive/*.so ${package_dir}/usr/lib/postgresql/lib -RUN cp archive/*.control ${package_dir}/var/lib/postgresql/extension -RUN cp archive/*.sql ${package_dir}/var/lib/postgresql/extension - -# symlinks to Copy files into directory structure -RUN mkdir -p ${package_dir}/usr/local/lib/postgresql -WORKDIR ${package_dir}/usr/local/lib/postgresql -RUN cp -s ../../../lib/postgresql/lib/*.so . -WORKDIR ../../../../.. - -RUN mkdir -p ${package_dir}/usr/local/share/postgresql/extension -WORKDIR ${package_dir}/usr/local/share/postgresql/extension - -RUN cp -s ../../../../../var/lib/postgresql/extension/wrappers.control . -RUN cp -s ../../../../../var/lib/postgresql/extension/wrappers*.sql . -WORKDIR ../../../../../.. - -RUN mkdir -p ${package_dir}/DEBIAN -RUN touch ${package_dir}/DEBIAN/control -RUN echo 'Package: wrappers' >> ${package_dir}/DEBIAN/control -RUN echo 'Version:' ${wrappers_release} >> ${package_dir}/DEBIAN/control -RUN echo "Architecture: ${TARGETARCH}" >> ${package_dir}/DEBIAN/control -RUN echo 'Maintainer: tealbase' >> ${package_dir}/DEBIAN/control -RUN echo 'Description: A PostgreSQL extension' >> ${package_dir}/DEBIAN/control - -# Create deb package -RUN chown -R root:root ${package_dir} -RUN chmod -R 00755 ${package_dir} -RUN dpkg-deb --build --root-owner-group ${package_dir} -RUN cp ./*.deb /tmp/wrappers.deb - -#################### -# 26-hypopg.yml -#################### -FROM ccache as hypopg-source -# Download and extract -ARG hypopg_release -ARG hypopg_release_checksum -ADD --checksum=${hypopg_release_checksum} \ - "https://github.com/HypoPG/hypopg/archive/refs/tags/${hypopg_release}.tar.gz" \ - /tmp/hypopg.tar.gz -RUN tar -xvf /tmp/hypopg.tar.gz -C /tmp && \ - rm -rf /tmp/hypopg.tar.gz -# Build from source -WORKDIR /tmp/hypopg-${hypopg_release} -RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ - make -j$(nproc) -# Create debian package -RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --nodoc - -#################### - # 27-pg_repack.yml - #################### - FROM ccache as pg_repack-source - ARG pg_repack_release - ARG pg_repack_release_checksum - ADD --checksum=${pg_repack_release_checksum} \ - "https://github.com/reorg/pg_repack/archive/refs/tags/ver_${pg_repack_release}.tar.gz" \ - /tmp/pg_repack.tar.gz - RUN tar -xvf /tmp/pg_repack.tar.gz -C /tmp && \ - rm -rf /tmp/pg_repack.tar.gz - # Install build dependencies - RUN apt-get update && apt-get install -y --no-install-recommends \ - liblz4-dev \ - libz-dev \ - libzstd-dev \ - libreadline-dev \ - && rm -rf /var/lib/apt/lists/* - # Build from source - WORKDIR /tmp/pg_repack-ver_${pg_repack_release} - ENV USE_PGXS=1 - RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ - make -j$(nproc) - # Create debian package - RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --pkgversion=${pg_repack_release} --nodoc - -#################### -# 28-pgvector.yml -#################### -FROM ccache as pgvector-source -ARG pgvector_release -ARG pgvector_release_checksum -ADD --checksum=${pgvector_release_checksum} \ - "https://github.com/pgvector/pgvector/archive/refs/tags/v${pgvector_release}.tar.gz" \ - /tmp/pgvector.tar.gz -RUN tar -xvf /tmp/pgvector.tar.gz -C /tmp && \ - rm -rf /tmp/pgvector.tar.gz -# Build from source -WORKDIR /tmp/pgvector-${pgvector_release} -# error: the clang compiler does not support '-march=native' -RUN sed -i -e "s|-march=native||g" Makefile -RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ - make -j$(nproc) -# Create debian package -RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --nodoc - -#################### -# 29-pg_tle.yml -#################### -FROM ccache as pg_tle-source -ARG pg_tle_release -ARG pg_tle_release_checksum -ADD --checksum=${pg_tle_release_checksum} \ - "https://github.com/aws/pg_tle/archive/refs/tags/v${pg_tle_release}.tar.gz" \ - /tmp/pg_tle.tar.gz -RUN tar -xvf /tmp/pg_tle.tar.gz -C /tmp && \ - rm -rf /tmp/pg_tle.tar.gz -RUN apt-get update && apt-get install -y --no-install-recommends \ - flex \ - libkrb5-dev \ - && rm -rf /var/lib/apt/lists/* -# Build from source -WORKDIR /tmp/pg_tle-${pg_tle_release} -RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ - make -j$(nproc) -# Create debian package -RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --nodoc - -###################### -# 30-index_advisor.yml -###################### -FROM ccache as index_advisor -ARG index_advisor_release -ARG index_advisor_release_checksum -ADD --checksum=${index_advisor_release_checksum} \ - "https://github.com/olirice/index_advisor/archive/refs/tags/v${index_advisor_release}.tar.gz" \ - /tmp/index_advisor.tar.gz -RUN tar -xvf /tmp/index_advisor.tar.gz -C /tmp && \ - rm -rf /tmp/index_advisor.tar.gz -# Build from source -WORKDIR /tmp/index_advisor-${index_advisor_release} -RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ - make -j$(nproc) -# Create debian package -RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --nodoc - -#################### -# internal/supautils.yml -#################### -FROM ccache as supautils-source -ARG supautils_release -ARG supautils_release_tar_checksum -ADD --checksum=${supautils_release_tar_checksum} \ - "https://github.com/tealbase/supautils/archive/refs/tags/v${supautils_release}.tar.gz" \ - /tmp/supautils.tar.gz -RUN tar -xvf /tmp/supautils.tar.gz -C /tmp && \ - rm -rf /tmp/supautils.tar.gz -# Install build dependencies -RUN apt-get update && apt-get install -y --no-install-recommends \ - libicu-dev \ - && rm -rf /var/lib/apt/lists/* -# Build from source -WORKDIR /tmp/supautils-${supautils_release} -RUN --mount=type=cache,target=/ccache,from=public.ecr.aws/tealbase/postgres:ccache \ - make -j$(nproc) -# Create debian package -RUN checkinstall -D --install=no --fstrans=no --backup=no --pakdir=/tmp --nodoc - -#################### -# setup-wal-g.yml -#################### -FROM base as walg -ARG wal_g_release -# ADD "https://github.com/wal-g/wal-g/releases/download/v${wal_g_release}/wal-g-pg-ubuntu-20.04-${TARGETARCH}.tar.gz" /tmp/wal-g.tar.gz -RUN arch=$([ "$TARGETARCH" = "arm64" ] && echo "aarch64" || echo "$TARGETARCH") && \ - apt-get update && apt-get install -y --no-install-recommends curl && \ - curl -kL "https://github.com/wal-g/wal-g/releases/download/v${wal_g_release}/wal-g-pg-ubuntu-20.04-${arch}.tar.gz" -o /tmp/wal-g.tar.gz && \ - tar -xvf /tmp/wal-g.tar.gz -C /tmp && \ - rm -rf /tmp/wal-g.tar.gz && \ - mv /tmp/wal-g-pg-ubuntu*20.04-$arch /tmp/wal-g - -#################### -# Collect extension packages -#################### -FROM scratch as extensions -COPY --from=postgis-source /tmp/*.deb /tmp/ -COPY --from=pgrouting-source /tmp/*.deb /tmp/ -COPY --from=pgtap-source /tmp/*.deb /tmp/ -COPY --from=pg_cron-source /tmp/*.deb /tmp/ -COPY --from=pgaudit-source /tmp/*.deb /tmp/ -COPY --from=pgjwt-source /tmp/*.deb /tmp/ -COPY --from=pgsql-http-source /tmp/*.deb /tmp/ -COPY --from=plpgsql_check-source /tmp/*.deb /tmp/ -COPY --from=pg-safeupdate-source /tmp/*.deb /tmp/ -COPY --from=timescaledb-source /tmp/*.deb /tmp/ -COPY --from=wal2json-source /tmp/*.deb /tmp/ -# COPY --from=pljava /tmp/*.deb /tmp/ -COPY --from=plv8 /tmp/*.deb /tmp/ -COPY --from=pg_plan_filter-source /tmp/*.deb /tmp/ -COPY --from=pg_net-source /tmp/*.deb /tmp/ -COPY --from=rum-source /tmp/*.deb /tmp/ -COPY --from=pgsodium-source /tmp/*.deb /tmp/ -COPY --from=pg_hashids-source /tmp/*.deb /tmp/ -COPY --from=pg_graphql-source /tmp/*.deb /tmp/ -COPY --from=pg_stat_monitor-source /tmp/*.deb /tmp/ -COPY --from=pg_jsonschema-source /tmp/*.deb /tmp/ -COPY --from=vault-source /tmp/*.deb /tmp/ -COPY --from=pgroonga-source /tmp/*.deb /tmp/ -COPY --from=wrappers-source /tmp/*.deb /tmp/ -COPY --from=hypopg-source /tmp/*.deb /tmp/ -COPY --from=pg_repack-source /tmp/*.deb /tmp/ -COPY --from=pgvector-source /tmp/*.deb /tmp/ -COPY --from=pg_tle-source /tmp/*.deb /tmp/ -COPY --from=index_advisor /tmp/*.deb /tmp/ -COPY --from=supautils-source /tmp/*.deb /tmp/ - -#################### -# Build final image -#################### -FROM base as production - -# Setup extensions -COPY --from=extensions /tmp /tmp -COPY --from=walg /tmp/wal-g /usr/local/bin/ - -ENV DEBIAN_FRONTEND=noninteractive -RUN apt-get update && apt-get install -y --no-install-recommends \ - /tmp/*.deb \ - # Needed for anything using libcurl - # https://github.com/tealbase/postgres/issues/573 - ca-certificates \ - && rm -rf /var/lib/apt/lists/* /tmp/* - -# Initialise configs -COPY --chown=postgres:postgres ansible/files/postgresql_config/postgresql.conf.j2 /etc/postgresql/postgresql.conf -COPY --chown=postgres:postgres ansible/files/postgresql_config/pg_hba.conf.j2 /etc/postgresql/pg_hba.conf -COPY --chown=postgres:postgres ansible/files/postgresql_config/pg_ident.conf.j2 /etc/postgresql/pg_ident.conf -COPY --chown=postgres:postgres ansible/files/postgresql_config/postgresql-stdout-log.conf /etc/postgresql/logging.conf -COPY --chown=postgres:postgres ansible/files/postgresql_config/supautils.conf.j2 /etc/postgresql-custom/supautils.conf -COPY --chown=postgres:postgres ansible/files/postgresql_extension_custom_scripts /etc/postgresql-custom/extension-custom-scripts -COPY --chown=postgres:postgres ansible/files/pgsodium_getkey_urandom.sh.j2 /usr/lib/postgresql/${postgresql_major}/bin/pgsodium_getkey.sh -COPY --chown=postgres:postgres ansible/files/postgresql_config/custom_read_replica.conf.j2 /etc/postgresql-custom/read-replica.conf -COPY --chown=postgres:postgres ansible/files/postgresql_config/custom_walg.conf.j2 /etc/postgresql-custom/wal-g.conf -COPY --chown=postgres:postgres ansible/files/walg_helper_scripts/wal_fetch.sh /home/postgres/wal_fetch.sh -COPY ansible/files/walg_helper_scripts/wal_change_ownership.sh /root/wal_change_ownership.sh - -RUN sed -i \ - -e "s|#unix_socket_directories = '/tmp'|unix_socket_directories = '/var/run/postgresql'|g" \ - -e "s|#session_preload_libraries = ''|session_preload_libraries = 'supautils'|g" \ - -e "s|shared_preload_libraries = '\(.*\)'|shared_preload_libraries = '\1, orioledb'|g" \ - -e "s|#max_wal_size = 1GB|max_wal_size = 8GB|g" \ - -e "s|#include = '/etc/postgresql-custom/supautils.conf'|include = '/etc/postgresql-custom/supautils.conf'|g" \ - -e "s|#include = '/etc/postgresql-custom/wal-g.conf'|include = '/etc/postgresql-custom/wal-g.conf'|g" /etc/postgresql/postgresql.conf && \ - echo "pljava.libjvm_location = '/usr/lib/jvm/java-11-openjdk-${TARGETARCH}/lib/server/libjvm.so'" >> /etc/postgresql/postgresql.conf && \ - echo "pgsodium.getkey_script= '/usr/lib/postgresql/${postgresql_major}/bin/pgsodium_getkey.sh'" >> /etc/postgresql/postgresql.conf && \ - echo "orioledb.main_buffers = 1GB" >> /etc/postgresql/postgresql.conf && \ - echo "orioledb.undo_buffers = 256MB" >> /etc/postgresql/postgresql.conf && \ - useradd --create-home --shell /bin/bash wal-g -G postgres && \ - mkdir -p /etc/postgresql-custom && \ - chown postgres:postgres /etc/postgresql-custom - -# Include schema migrations -COPY migrations/db /docker-entrypoint-initdb.d/ -COPY ansible/files/pgbouncer_config/pgbouncer_auth_schema.sql /docker-entrypoint-initdb.d/init-scripts/00-schema.sql -COPY ansible/files/stat_extension.sql /docker-entrypoint-initdb.d/migrations/00-extension.sql - -# Patch upstream entrypoint script -RUN sed -i \ - -e "s|su-exec|gosu|g" \ - -e "s|PGHOST= PGHOSTADDR=|PGHOST=\$POSTGRES_HOST|g" \ - /usr/local/bin/docker-entrypoint.sh && \ - mv /usr/local/bin/docker-entrypoint.sh /usr/local/bin/orioledb-entrypoint.sh - -COPY docker/orioledb/entrypoint.sh /usr/local/bin/docker-entrypoint.sh - -HEALTHCHECK --interval=2s --timeout=2s --retries=10 CMD pg_isready -U postgres -h localhost -STOPSIGNAL SIGINT -EXPOSE 5432 - -ENV POSTGRES_HOST=/var/run/postgresql -CMD ["postgres", "-D", "/etc/postgresql"] diff --git a/docker/orioledb/entrypoint.sh b/docker/orioledb/entrypoint.sh deleted file mode 100755 index b9a460b..0000000 --- a/docker/orioledb/entrypoint.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/usr/bin/env bash -set -eou pipefail - -PG_CONF=/etc/postgresql/postgresql.conf - -if [ "${S3_ENABLED:-}" == "true" ]; then - echo "Enabling OrioleDB S3 Backend..." - - echo " -archive_mode = on -archive_library = 'orioledb' -max_worker_processes = 50 # should fit orioledb.s3_num_workers as long as other workers -orioledb.s3_num_workers = 20 # should be enough for comfortable work -orioledb.s3_mode = true -orioledb.s3_host = '$S3_HOST' # replace with your bucket URL, accelerated buckets are recommended -orioledb.s3_region = '$S3_REGION' # replace with your S3 region -orioledb.s3_accesskey = '$S3_ACCESS_KEY' # replace with your S3 key -orioledb.s3_secretkey = '$S3_SECRET_KEY' # replace with your S3 secret key -" >> "$PG_CONF" -else - echo "Disabling OrioleDB S3 Backend..." - - sed -i \ - -e "/^archive_mode = on/d" \ - -e "/^archive_library = 'orioledb'/d" \ - -e "/^max_worker_processes = 50/d" \ - -e "/^orioledb.s3_num_workers = /d" \ - -e "/^orioledb.s3_mode = /d" \ - -e "/^orioledb.s3_host = /d" \ - -e "/^orioledb.s3_region = /d" \ - -e "/^orioledb.s3_accesskey = /d" \ - -e "/^orioledb.s3_secretkey = /d" \ - "$PG_CONF" -fi - -orioledb-entrypoint.sh "$@" diff --git a/ebssurrogate/files/sources-arm64.cfg b/ebssurrogate/files/sources-arm64.cfg index a236377..eed6c0f 100644 --- a/ebssurrogate/files/sources-arm64.cfg +++ b/ebssurrogate/files/sources-arm64.cfg @@ -1,10 +1,10 @@ -deb http://REGION.clouds.ports.ubuntu.com/ubuntu-ports/ focal main restricted -deb http://REGION.clouds.ports.ubuntu.com/ubuntu-ports/ focal-updates main restricted -deb http://REGION.clouds.ports.ubuntu.com/ubuntu-ports/ focal universe -deb http://REGION.clouds.ports.ubuntu.com/ubuntu-ports/ focal-updates universe -deb http://REGION.clouds.ports.ubuntu.com/ubuntu-ports/ focal multiverse -deb http://REGION.clouds.ports.ubuntu.com/ubuntu-ports/ focal-updates multiverse -deb http://REGION.clouds.ports.ubuntu.com/ubuntu-ports/ focal-backports main restricted universe multiverse -deb http://ports.ubuntu.com/ubuntu-ports focal-security main restricted -deb http://ports.ubuntu.com/ubuntu-ports focal-security universe -deb http://ports.ubuntu.com/ubuntu-ports focal-security multiverse +deb http://REGION.clouds.ports.ubuntu.com/ubuntu-ports/ noble main restricted +deb http://REGION.clouds.ports.ubuntu.com/ubuntu-ports/ noble-updates main restricted +deb http://REGION.clouds.ports.ubuntu.com/ubuntu-ports/ noble universe +deb http://REGION.clouds.ports.ubuntu.com/ubuntu-ports/ noble-updates universe +deb http://REGION.clouds.ports.ubuntu.com/ubuntu-ports/ noble multiverse +deb http://REGION.clouds.ports.ubuntu.com/ubuntu-ports/ noble-updates multiverse +deb http://REGION.clouds.ports.ubuntu.com/ubuntu-ports/ noble-backports main restricted universe multiverse +deb http://ports.ubuntu.com/ubuntu-ports noble-security main restricted +deb http://ports.ubuntu.com/ubuntu-ports noble-security universe +deb http://ports.ubuntu.com/ubuntu-ports noble-security multiverse diff --git a/ebssurrogate/files/sources.cfg b/ebssurrogate/files/sources.cfg index ec30118..a27be05 100644 --- a/ebssurrogate/files/sources.cfg +++ b/ebssurrogate/files/sources.cfg @@ -1,10 +1,10 @@ -deb http://REGION.ec2.archive.ubuntu.com/ubuntu/ focal main restricted -deb http://REGION.ec2.archive.ubuntu.com/ubuntu/ focal-updates main restricted -deb http://REGION.ec2.archive.ubuntu.com/ubuntu/ focal universe -deb http://REGION.ec2.archive.ubuntu.com/ubuntu/ focal-updates universe -deb http://REGION.ec2.archive.ubuntu.com/ubuntu/ focal multiverse -deb http://REGION.ec2.archive.ubuntu.com/ubuntu/ focal-updates multiverse -deb http://REGION.ec2.archive.ubuntu.com/ubuntu/ focal-backports main restricted universe multiverse -deb http://security.ubuntu.com/ubuntu focal-security main restricted -deb http://security.ubuntu.com/ubuntu focal-security universe -deb http://security.ubuntu.com/ubuntu focal-security multiverse +deb http://REGION.ec2.archive.ubuntu.com/ubuntu/ noble main restricted +deb http://REGION.ec2.archive.ubuntu.com/ubuntu/ noble-updates main restricted +deb http://REGION.ec2.archive.ubuntu.com/ubuntu/ noble universe +deb http://REGION.ec2.archive.ubuntu.com/ubuntu/ noble-updates universe +deb http://REGION.ec2.archive.ubuntu.com/ubuntu/ noble multiverse +deb http://REGION.ec2.archive.ubuntu.com/ubuntu/ noble-updates multiverse +deb http://REGION.ec2.archive.ubuntu.com/ubuntu/ noble-backports main restricted universe multiverse +deb http://security.ubuntu.com/ubuntu noble-security main restricted +deb http://security.ubuntu.com/ubuntu noble-security universe +deb http://security.ubuntu.com/ubuntu noble-security multiverse diff --git a/ebssurrogate/files/unit-tests/unit-test-01.sql b/ebssurrogate/files/unit-tests/unit-test-01.sql deleted file mode 100644 index 3f6ecdd..0000000 --- a/ebssurrogate/files/unit-tests/unit-test-01.sql +++ /dev/null @@ -1,33 +0,0 @@ -BEGIN; -CREATE EXTENSION IF NOT EXISTS pgtap; -SELECT plan(8); - --- Check installed extensions -SELECT extensions_are( - ARRAY[ - 'plpgsql', - 'pg_stat_statements', - 'pgsodium', - 'pgtap', - 'pg_graphql', - 'pgcrypto', - 'pgjwt', - 'uuid-ossp', - 'tealbase_vault' - ] -); - - --- Check schemas exists -SELECT has_schema('pg_toast'); -SELECT has_schema('pg_catalog'); -SELECT has_schema('information_schema'); -SELECT has_schema('public'); - --- Check that service_role can execute certain pgsodium functions -SELECT function_privs_are('pgsodium', 'crypto_aead_det_decrypt', array['bytea', 'bytea', 'uuid', 'bytea'], 'service_role', array['EXECUTE']); -SELECT function_privs_are('pgsodium', 'crypto_aead_det_encrypt', array['bytea', 'bytea', 'uuid', 'bytea'], 'service_role', array['EXECUTE']); -SELECT function_privs_are('pgsodium', 'crypto_aead_det_keygen', array[]::text[], 'service_role', array['EXECUTE']); - -SELECT * from finish(); -ROLLBACK; diff --git a/ebssurrogate/scripts/chroot-bootstrap-nix.sh b/ebssurrogate/scripts/chroot-bootstrap-nix.sh index e349556..90fd917 100755 --- a/ebssurrogate/scripts/chroot-bootstrap-nix.sh +++ b/ebssurrogate/scripts/chroot-bootstrap-nix.sh @@ -58,7 +58,7 @@ function update_install_packages { apt-get upgrade -y # Install OpenSSH and other packages - sudo add-apt-repository universe + sudo add-apt-repository --yes universe apt-get update apt-get install -y --no-install-recommends \ openssh-server \ @@ -157,10 +157,21 @@ function disable_fsck { # Don't request hostname during boot but set hostname function setup_hostname { - sed -i 's/gethostname()/ubuntu /g' /etc/dhcp/dhclient.conf - sed -i 's/host-name,//g' /etc/dhcp/dhclient.conf + # Set the static hostname echo "ubuntu" > /etc/hostname chmod 644 /etc/hostname + # Update netplan configuration to not send hostname + cat << EOF > /etc/netplan/01-hostname.yaml +network: + version: 2 + ethernets: + eth0: + dhcp4: true + dhcp4-overrides: + send-hostname: false +EOF + # Set proper permissions for netplan security + chmod 600 /etc/netplan/01-hostname.yaml } # Set options for the default interface @@ -172,6 +183,8 @@ network: eth0: dhcp4: true EOF + # Set proper permissions for netplan security + chmod 600 /etc/netplan/eth0.yaml } function disable_sshd_passwd_auth { diff --git a/ebssurrogate/scripts/chroot-bootstrap.sh b/ebssurrogate/scripts/chroot-bootstrap.sh deleted file mode 100755 index 8404bbc..0000000 --- a/ebssurrogate/scripts/chroot-bootstrap.sh +++ /dev/null @@ -1,204 +0,0 @@ -#!/usr/bin/env bash -# -# This script runs inside chrooted environment. It installs grub and its -# Configuration file. -# - -set -o errexit -set -o pipefail -set -o xtrace - -export DEBIAN_FRONTEND=noninteractive - -export APT_OPTIONS="-oAPT::Install-Recommends=false \ - -oAPT::Install-Suggests=false \ - -oAcquire::Languages=none" - -if [ $(dpkg --print-architecture) = "amd64" ]; -then - ARCH="amd64"; -else - ARCH="arm64"; -fi - - - -function update_install_packages { - source /etc/os-release - - # Update APT with new sources - cat /etc/apt/sources.list - apt-get $APT_OPTIONS update && apt-get $APT_OPTIONS --yes dist-upgrade - - # Do not configure grub during package install - if [ "${ARCH}" = "amd64" ]; then - echo 'grub-pc grub-pc/install_devices_empty select true' | debconf-set-selections - echo 'grub-pc grub-pc/install_devices select' | debconf-set-selections - # Install various packages needed for a booting system - apt-get install -y \ - linux-aws \ - grub-pc \ - e2fsprogs - else - apt-get install -y e2fsprogs - fi - # Install standard packages - apt-get install -y \ - sudo \ - wget \ - cloud-init \ - acpid \ - ec2-hibinit-agent \ - ec2-instance-connect \ - hibagent \ - ncurses-term \ - ssh-import-id \ - - # apt upgrade - apt-get upgrade -y - - # Install OpenSSH and other packages - sudo add-apt-repository universe - apt-get update - apt-get install -y --no-install-recommends \ - openssh-server \ - git \ - ufw \ - cron \ - logrotate \ - fail2ban \ - locales \ - at \ - less \ - python3-systemd - - if [ "${ARCH}" = "arm64" ]; then - apt-get $APT_OPTIONS --yes install linux-aws initramfs-tools dosfstools - fi -} - -function setup_locale { -cat << EOF >> /etc/locale.gen -en_US.UTF-8 UTF-8 -EOF - -cat << EOF > /etc/default/locale -LANG="C.UTF-8" -LC_CTYPE="C.UTF-8" -EOF - localedef -i en_US -f UTF-8 en_US.UTF-8 -} - -function install_packages_for_build { - apt-get install -y --no-install-recommends linux-libc-dev \ - acl \ - magic-wormhole sysstat \ - build-essential libreadline-dev zlib1g-dev flex bison libxml2-dev libxslt-dev libssl-dev libsystemd-dev libpq-dev libxml2-utils uuid-dev xsltproc ssl-cert \ - gcc-10 g++-10 \ - libgeos-dev libproj-dev libgdal-dev libjson-c-dev libboost-all-dev libcgal-dev libmpfr-dev libgmp-dev cmake \ - libkrb5-dev \ - maven default-jre default-jdk \ - curl gpp apt-transport-https cmake libc++-dev libc++abi-dev libc++1 libglib2.0-dev libtinfo5 libc++abi1 ninja-build python \ - liblzo2-dev - - source /etc/os-release - - apt-get install -y --no-install-recommends llvm-11-dev clang-11 - # Mark llvm as manual to prevent auto removal - apt-mark manual libllvm11:arm64 -} - -function setup_apparmor { - apt-get install -y apparmor apparmor-utils auditd - - # Copy apparmor profiles - cp -rv /tmp/apparmor_profiles/* /etc/apparmor.d/ -} - -function setup_grub_conf_arm64 { -cat << EOF > /etc/default/grub -GRUB_DEFAULT=0 -GRUB_TIMEOUT=0 -GRUB_TIMEOUT_STYLE="hidden" -GRUB_DISTRIBUTOR="tealbase postgresql" -GRUB_CMDLINE_LINUX_DEFAULT="nomodeset console=tty1 console=ttyS0 ipv6.disable=0" -EOF -} - -# Install GRUB -function install_configure_grub { - if [ "${ARCH}" = "arm64" ]; then - apt-get $APT_OPTIONS --yes install cloud-guest-utils fdisk grub-efi-arm64 efibootmgr - setup_grub_conf_arm64 - rm -rf /etc/grub.d/30_os-prober - sleep 1 - fi - grub-install /dev/xvdf && update-grub -} - -# skip fsck for first boot -function disable_fsck { - touch /fastboot -} - -# Don't request hostname during boot but set hostname -function setup_hostname { - sed -i 's/gethostname()/ubuntu /g' /etc/dhcp/dhclient.conf - sed -i 's/host-name,//g' /etc/dhcp/dhclient.conf - echo "ubuntu" > /etc/hostname - chmod 644 /etc/hostname -} - -# Set options for the default interface -function setup_eth0_interface { -cat << EOF > /etc/netplan/eth0.yaml -network: - version: 2 - ethernets: - eth0: - dhcp4: true -EOF -} - -function disable_sshd_passwd_auth { - sed -i -E -e 's/^#?\s*PasswordAuthentication\s+(yes|no)\s*$/PasswordAuthentication no/g' \ - -e 's/^#?\s*ChallengeResponseAuthentication\s+(yes|no)\s*$/ChallengeResponseAuthentication no/g' \ - /etc/ssh/sshd_config -} - -function create_admin_account { - groupadd admin -} - -#Set default target as multi-user -function set_default_target { - rm -f /etc/systemd/system/default.target - ln -s /lib/systemd/system/multi-user.target /etc/systemd/system/default.target -} - -# Setup ccache -function setup_ccache { - apt-get install ccache -y - mkdir -p /tmp/ccache - export PATH=/usr/lib/ccache:$PATH - echo "PATH=$PATH" >> /etc/environment -} - -# Clear apt caches -function cleanup_cache { - apt-get clean -} - -update_install_packages -setup_locale -#install_packages_for_build -install_configure_grub -setup_apparmor -setup_hostname -create_admin_account -set_default_target -setup_eth0_interface -disable_sshd_passwd_auth -disable_fsck -#setup_ccache -cleanup_cache diff --git a/ebssurrogate/scripts/qemu-bootstrap-nix.sh b/ebssurrogate/scripts/qemu-bootstrap-nix.sh new file mode 100755 index 0000000..b52b857 --- /dev/null +++ b/ebssurrogate/scripts/qemu-bootstrap-nix.sh @@ -0,0 +1,160 @@ +#!/usr/bin/env bash + +set -o errexit +set -o pipefail +set -o xtrace + +if [ $(dpkg --print-architecture) = "amd64" ]; then + ARCH="amd64" +else + ARCH="arm64" +fi + +function waitfor_boot_finished { + export DEBIAN_FRONTEND=noninteractive + + echo "args: ${ARGS}" + # Wait for cloudinit on the surrogate to complete before making progress + while [[ ! -f /var/lib/cloud/instance/boot-finished ]]; do + echo 'Waiting for cloud-init...' + sleep 1 + done +} + +function install_packages { + apt-get update && sudo apt-get install software-properties-common e2fsprogs nfs-common -y + add-apt-repository --yes --update ppa:ansible/ansible && sudo apt-get install ansible -y + ansible-galaxy collection install community.general +} + +function execute_playbook { + + tee /etc/ansible/ansible.cfg </dev/null +LOCALE_ARCHIVE=/usr/lib/locale/locale-archive +LANG="en_US.UTF-8" +LANGUAGE="en_US.UTF-8" +LC_ALL="en_US.UTF-8" +LC_CTYPE="en_US.UTF-8" +EOF +} + +function setup_locale { + cat <>/etc/locale.gen +en_US.UTF-8 UTF-8 +EOF + + cat </etc/default/locale +LANG="C.UTF-8" +LC_CTYPE="C.UTF-8" +EOF + locale-gen en_US.UTF-8 +} + +sed -i 's/- hosts: all/- hosts: localhost/' ansible/playbook.yml + +waitfor_boot_finished +install_packages +setup_postgesql_env +setup_locale +execute_playbook + +#################### +# stage 2 things +#################### + +function install_nix() { + sudo su -c "curl --proto '=https' --tlsv1.2 -sSf -L https://install.determinate.systems/nix | sh -s -- install --no-confirm \ + --extra-conf \"substituters = https://cache.nixos.org https://nix-postgres-artifacts.s3.amazonaws.com\" \ + --extra-conf \"trusted-public-keys = nix-postgres-artifacts:dGZlQOvKcNEjvT7QEAJbcV6b6uk7VF/hWMjhYleiaLI=% cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=\" " -s /bin/bash root + . /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh + +} + +function execute_stage2_playbook { + sudo tee /etc/ansible/ansible.cfg < sda - - # Create /dev/xvd* device symlink - if [[ ! -z "$mapping" ]] && [[ -b "${blkdev}" ]] && [[ ! -L "${mapping}" ]]; then - ln -s "$blkdev" "$mapping" - - blkdev_mappings["$blkdev"]="$mapping" - fi - done - - create_partition_table - - # NVMe EBS launch device partition mappings (symlinks): /dev/nvme*n*p* to /dev/xvd*[0-9]+ - declare -A partdev_mappings - for blkdev in "${!blkdev_mappings[@]}"; do # /dev/nvme*n* - mapping="${blkdev_mappings[$blkdev]}" - - # Create /dev/xvd*[0-9]+ partition device symlink - for partdev in "${blkdev}"p*; do - partnum=${partdev##*p} - if [[ ! -L "${mapping}${partnum}" ]]; then - ln -s "${blkdev}p${partnum}" "${mapping}${partnum}" - - partdev_mappings["${blkdev}p${partnum}"]="${mapping}${partnum}" - fi - done - done -} - - -#Download and install latest e2fsprogs for fast_commit feature,if required. -function format_and_mount_rootfs { - mkfs.ext4 -m0.1 /dev/xvdf2 - - mount -o noatime,nodiratime /dev/xvdf2 /mnt - if [ "${ARCH}" = "arm64" ]; then - mkfs.fat -F32 /dev/xvdf1 - mkdir -p /mnt/boot/efi - sleep 2 - mount /dev/xvdf1 /mnt/boot/efi - fi - - mkfs.ext4 /dev/xvdh - mkdir -p /mnt/data - mount -o defaults,discard /dev/xvdh /mnt/data -} - -function create_swapfile { - fallocate -l 1G /mnt/swapfile - chmod 600 /mnt/swapfile - mkswap /mnt/swapfile -} - -function format_build_partition { - mkfs.ext4 -O ^has_journal /dev/xvdc -} -function pull_docker { - apt-get install -y docker.io - docker run -itd --name ccachedata "${DOCKER_IMAGE}:${DOCKER_IMAGE_TAG}" sh - docker exec -itd ccachedata mkdir -p /build/ccache -} - -# Create fstab -function create_fstab { - FMT="%-42s %-11s %-5s %-17s %-5s %s" -cat > "/mnt/etc/fstab" << EOF -$(printf "${FMT}" "# DEVICE UUID" "MOUNTPOINT" "TYPE" "OPTIONS" "DUMP" "FSCK") -$(findmnt -no SOURCE /mnt | xargs blkid -o export | awk -v FMT="${FMT}" '/^UUID=/ { printf(FMT, $0, "/", "ext4", "defaults,discard", "0", "1" ) }') -$(findmnt -no SOURCE /mnt/boot/efi | xargs blkid -o export | awk -v FMT="${FMT}" '/^UUID=/ { printf(FMT, $0, "/boot/efi", "vfat", "umask=0077", "0", "1" ) }') -$(findmnt -no SOURCE /mnt/data | xargs blkid -o export | awk -v FMT="${FMT}" '/^UUID=/ { printf(FMT, $0, "/data", "ext4", "defaults,discard", "0", "2" ) }') -$(printf "$FMT" "/swapfile" "none" "swap" "sw" "0" "0") -EOF - unset FMT -} - -function setup_chroot_environment { - UBUNTU_VERSION=$(lsb_release -cs) # 'focal' for Ubuntu 20.04 - - # Bootstrap Ubuntu into /mnt - debootstrap --arch ${ARCH} --variant=minbase "$UBUNTU_VERSION" /mnt - - # Update ec2-region - REGION=$(curl --silent --fail http://169.254.169.254/latest/meta-data/placement/availability-zone | sed -E 's|[a-z]+$||g') - sed -i "s/REGION/${REGION}/g" /tmp/sources.list - cp /tmp/sources.list /mnt/etc/apt/sources.list - - if [ "${ARCH}" = "arm64" ]; then - create_fstab - fi - - # Create mount points and mount the filesystem - mkdir -p /mnt/{dev,proc,sys} - mount --rbind /dev /mnt/dev - mount --rbind /proc /mnt/proc - mount --rbind /sys /mnt/sys - - # Create build mount point and mount - mkdir -p /mnt/tmp - mount /dev/xvdc /mnt/tmp - chmod 777 /mnt/tmp - - # Copy apparmor profiles - chmod 644 /tmp/apparmor_profiles/* - cp -r /tmp/apparmor_profiles /mnt/tmp/ - - # Copy migrations - cp -r /tmp/migrations /mnt/tmp/ - - # Copy unit tests - cp -r /tmp/unit-tests /mnt/tmp/ - - # Copy the bootstrap script into place and execute inside chroot - cp /tmp/chroot-bootstrap.sh /mnt/tmp/chroot-bootstrap.sh - chroot /mnt /tmp/chroot-bootstrap.sh - rm -f /mnt/tmp/chroot-bootstrap.sh - echo "${POSTGRES_tealbase_VERSION}" > /mnt/root/tealbase-release - - # Copy the nvme identification script into /sbin inside the chroot - mkdir -p /mnt/sbin - cp /tmp/ebsnvme-id /mnt/sbin/ebsnvme-id - chmod +x /mnt/sbin/ebsnvme-id - - # Copy the udev rules for identifying nvme devices into the chroot - mkdir -p /mnt/etc/udev/rules.d - cp /tmp/70-ec2-nvme-devices.rules \ - /mnt/etc/udev/rules.d/70-ec2-nvme-devices.rules - - #Copy custom cloud-init - rm -f /mnt/etc/cloud/cloud.cfg - cp /tmp/cloud.cfg /mnt/etc/cloud/cloud.cfg - - sleep 2 -} - -function download_ccache { - docker cp ccachedata:/build/ccache/. /mnt/tmp/ccache -} - -function execute_playbook { - -tee /etc/ansible/ansible.cfg <&2 - exit 1 - fi - - # Ensure the temporary directory is removed on exit - trap 'rm -rf "$TMPDIR"' EXIT - - export PGDATA="$TMPDIR/pgdata" - export PGSODIUM_DIR="$TMPDIR/pgsodium" - - mkdir -p $PGDATA - mkdir -p $TMPDIR/logfile - # Generate a random key and store it in an environment variable - export PGSODIUM_KEY=$(head -c 32 /dev/urandom | od -A n -t x1 | tr -d ' \n') - export GRN_PLUGINS_DIR=${tealbase-groonga}/lib/groonga/plugins - # Create a simple script to echo the key - echo '#!/bin/sh' > $TMPDIR/getkey.sh - echo 'echo $PGSODIUM_KEY' >> $TMPDIR/getkey.sh - chmod +x $TMPDIR/getkey.sh - initdb --locale=C --username=tealbase_admin - substitute ${./nix/tests/postgresql.conf.in} $PGDATA/postgresql.conf \ - --subst-var-by PGSODIUM_GETKEY_SCRIPT "$TMPDIR/getkey.sh" - echo "listen_addresses = '*'" >> $PGDATA/postgresql.conf - echo "port = 5432" >> $PGDATA/postgresql.conf - echo "host all all 127.0.0.1/32 trust" >> $PGDATA/pg_hba.conf - #postgres -D "$PGDATA" -k "$TMPDIR" -h localhost -p 5432 >$TMPDIR/logfile/postgresql.log 2>&1 & - pg_ctl -D "$PGDATA" -l $TMPDIR/logfile/postgresql.log -o "-k $TMPDIR -p 5432" start - for i in {1..60}; do - if pg_isready -h localhost -p 5432; then - echo "PostgreSQL is ready" - break - fi - sleep 1 - if [ $i -eq 60 ]; then - echo "PostgreSQL is not ready after 60 seconds" - echo "PostgreSQL status:" - pg_ctl -D "$PGDATA" status - echo "PostgreSQL log content:" - cat $TMPDIR/logfile/postgresql.log - exit 1 - fi - done - createdb -p 5432 -h localhost --username=tealbase_admin testing - if ! psql -p 5432 -h localhost --username=tealbase_admin -d testing -v ON_ERROR_STOP=1 -Xaf ${./nix/tests/prime.sql}; then - echo "Error executing SQL file. PostgreSQL log content:" - cat $TMPDIR/logfile/postgresql.log - pg_ctl -D "$PGDATA" stop - exit 1 - fi - pg_prove -p 5432 -h localhost --username=tealbase_admin -d testing ${sqlTests}/*.sql - - mkdir -p $out/regression_output - pg_regress \ - --use-existing \ - --dbname=testing \ - --inputdir=${./nix/tests} \ - --outputdir=$out/regression_output \ - --host=localhost \ - --port=5432 \ - --user=tealbase_admin \ - $(ls ${./nix/tests/sql} | sed -e 's/\..*$//' | sort ) - - pg_ctl -D "$PGDATA" stop - mv $TMPDIR/logfile/postgresql.log $out - echo ${pgpkg} - ''; - in - rec { - # The list of all packages that can be built with 'nix build'. The list - # of names that can be used can be shown with 'nix flake show' - packages = flake-utils.lib.flattenTree basePackages // { - # Any extra packages we might want to include in our package - # set can go here. - inherit (pkgs) - # NOTE: comes from our cargo-pgrx-0-11-3.nix overlay - cargo-pgrx_0_11_3; - - }; - - # The list of exported 'checks' that are run with every run of 'nix - # flake check'. This is run in the CI system, as well. - checks = { - psql_15 = makeCheckHarness basePackages.psql_15.bin; - #psql_16 = makeCheckHarness basePackages.psql_16.bin; - #psql_orioledb_16 = makeCheckHarness basePackages.psql_orioledb_16.bin; - }; - - # Apps is a list of names of things that can be executed with 'nix run'; - # these are distinct from the things that can be built with 'nix build', - # so they need to be listed here too. - apps = - let - mkApp = attrName: binName: { - type = "app"; - program = "${basePackages."${attrName}"}/bin/${binName}"; - }; - in - { - start-server = mkApp "start-server" "start-postgres-server"; - start-client = mkApp "start-client" "start-postgres-client"; - start-replica = mkApp "start-replica" "start-postgres-replica"; - migration-test = mkApp "migrate-tool" "migrate-postgres"; - sync-exts-versions = mkApp "sync-exts-versions" "sync-exts-versions"; - }; - - # 'devShells.default' lists the set of packages that are included in the - # ambient $PATH environment when you run 'nix develop'. This is useful - # for development and puts many convenient devtools instantly within - # reach. - devShells.default = pkgs.mkShell { - packages = with pkgs; [ - coreutils - just - nix-update - #pg_prove - shellcheck - ansible - ansible-lint - (packer.overrideAttrs (oldAttrs: { - version = "1.7.8"; - })) - - basePackages.start-server - basePackages.start-client - basePackages.start-replica - basePackages.migrate-tool - basePackages.sync-exts-versions - ]; - shellHook = '' - export HISTFILE=.history - ''; - }; - } - ); + imports = [ + nix/apps.nix + nix/checks.nix + nix/config.nix + nix/devShells.nix + nix/ext + nix/fmt.nix + nix/hooks.nix + nix/nixpkgs.nix + nix/packages + nix/overlays + ]; + }); } diff --git a/docker/all-in-one/etc/pgbouncer-custom/custom-overrides.ini b/http/.gitkeep similarity index 100% rename from docker/all-in-one/etc/pgbouncer-custom/custom-overrides.ini rename to http/.gitkeep diff --git a/meta-data b/meta-data new file mode 100644 index 0000000..0551428 --- /dev/null +++ b/meta-data @@ -0,0 +1,2 @@ +instance-id: iid-local01 +local-hostname: packer-ubuntu diff --git a/migrations/Dockerfile.dbmate b/migrations/Dockerfile.dbmate new file mode 100644 index 0000000..29c80e6 --- /dev/null +++ b/migrations/Dockerfile.dbmate @@ -0,0 +1,23 @@ +FROM debian:bullseye-slim + +RUN apt-get update && apt-get install -y curl wget gnupg2 lsb-release + +RUN ARCH=$(dpkg --print-architecture); \ + case ${ARCH} in \ + amd64) DBMATE_ARCH="linux-amd64" ;; \ + arm64) DBMATE_ARCH="linux-arm64" ;; \ + *) echo "Unsupported architecture: ${ARCH}"; exit 1 ;; \ + esac && \ + curl -fsSL -o /usr/local/bin/dbmate \ + https://github.com/amacneil/dbmate/releases/latest/download/dbmate-${DBMATE_ARCH} && \ + chmod +x /usr/local/bin/dbmate + +RUN wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - +RUN echo "deb http://apt.postgresql.org/pub/repos/apt/ `lsb_release -cs`-pgdg main" | tee /etc/apt/sources.list.d/pgdg.list +RUN apt-get update && apt-get install -y postgresql-client-%VERSION% + +ENV PATH="/usr/lib/postgresql/%VERSION%/bin:${PATH}" + +RUN dbmate --version + +ENTRYPOINT ["dbmate"] diff --git a/migrations/README.md b/migrations/README.md index df08efa..f1c0ddc 100644 --- a/migrations/README.md +++ b/migrations/README.md @@ -1,3 +1,36 @@ +# Usage + +from the root of the `tealbase/postgres` project, you can run the following commands: + + +```shell +Usage: nix run .#dbmate-tool -- [options] + +Options: + -v, --version [15|16|orioledb-17|all] Specify the PostgreSQL version to use (required defaults to --version all) + -p, --port PORT Specify the port number to use (default: 5435) + -h, --help Show this help message + +Description: + Runs 'dbmate up' against a locally running the version of database you specify. Or 'all' to run against all versions. + NOTE: To create a migration, you must run 'nix develop' and then 'dbmate new ' to create a new migration file. + +Examples: + nix run .#dbmate-tool + nix run .#dbmate-tool -- --version 15 + nix run .#dbmate-tool -- --version 16 --port 5433 + +``` + +This can also be run from a github "flake url" for example: + +```shell +nix run github:tealbase/postgres#dbmate-tool -- --version 15 + +or + +nix run github:tealbase/postgres/mybranch#dbmate-tool -- --version 15 +``` # tealbase/migrations `tealbase/migrations` is a consolidation of SQL migrations from: @@ -9,6 +42,8 @@ aiming to provide a single source of truth for migrations on the platform that can be depended upon by those components. For more information on goals see [the RFC](https://www.notion.so/tealbase/Centralize-SQL-Migrations-cd3847ae027d4f2bba9defb2cc82f69a) + + ## How it was Created Migrations were pulled (in order) from: @@ -20,10 +55,12 @@ For compatibility with hosted projects, we include [migrate.sh](migrate.sh) that 1. Run all `db/init-scripts` with `postgres` superuser role. 2. Run all `db/migrations` with `tealbase_admin` superuser role. -3. Finalize role passwords with `/etc/postgres.schema.sql` if present. +3. Finalize role passwords with `/etc/postgresql.schema.sql` if present. Additionally, [tealbase/postgres](https://github.com/tealbase/postgres/blob/develop/ansible/playbook-docker.yml#L9) image contains several migration scripts to configure default extensions. These are run first by docker entrypoint and included in ami by ansible. + + ## Guidelines - Migrations are append only. Never edit existing migrations once they are on master. @@ -41,20 +78,35 @@ Additionally, [tealbase/postgres](https://github.com/tealbase/postgres/blob/deve ### Add a Migration +First, start a local postgres server in another terminal window: + ```shell -# Start the database server -docker-compose up +# Start the database server in another window +nix run .#start-server 15 +``` -# create a new migration -dbmate new '' +Then, in your main terminal window, run: + +```shell + +nix develop ``` +in the root of `tealbase/postgres`. -Then, populate the migration at `./db/migrations/xxxxxxxxx_` and make sure it execute sucessfully with +Next run: +``` shell +# Create a new migration (make sure to specify the migrations directory) +dbmate --migrations-dir="migrations/db/migrations" new '' +``` + +Then, execute the migration at `./migrations/db/xxxxxxxxx_` and make sure it runs successfully with: ```shell -dbmate up +dbmate --no-dump-schema --migrations-dir"migrations/db/migrations" up ``` +Note: Migrations are applied using the `tealbase_admin` superuser role, as specified in the "How it was Created" section above. + ### Adding a migration with docker-compose dbmate can optionally be run locally using docker: @@ -72,7 +124,32 @@ Then, populate the migration at `./db/migrations/xxxxxxxxx_` and m ```shell docker-compose run --rm dbmate up ``` +### Updating schema.sql for each major version + +After making changes to migrations, you should update the schema.sql files for each major version of PostgreSQL: + +```shell +# First, stop any running PostgreSQL servers +# Then from the root of tealbase/postgres run: +nix run .#dbmate-tool -- --version all +``` + +This will create automatically schema.sql file for each major version of PostgreSQL and OrioleDB (the files are named like `schema-`, `schema-oriole-`). Commit these changes to your repository and push to your branch. The workflow in `.github/workflows/test.yml` will re-run this command in CI, and perform a git diff to verify the idempotency of the migrations, and that the latest changes have been committed. ## Testing -Migrations are tested in CI to ensure they do not raise an exception against previously released `tealbase/postgres` docker images. The full version matrix is at [test.yml](./.github/workflows/test.yml) in the `tealbase-version` variable. +In addition to ci test mentioned above, you can test migrations locally by running the following test for each major version of postgres one at a time. + +Examples: + +``` +nix build .#checks.aarch64-darwin.psql_15 -L +nix build .#checks.aarch64-darwin.psql_17 -L +nix build .#checks.aarch64-darwin.psql_orioledb-17 -L +``` + +(Note that the evaluation and nix build of the postgres packages "bundle" of each major version must succeed here, even though we run one version at a time. If you made changes to postgres or extensions, or wrappers those may rebuild here when you run this. Otherwise they will usually download the prebuilt version from the tealbase nix binary cache) + +At the end of these commands, you will see the output of both `pg_regress` tests, and migration tests + +see [Adding Tests](https://github.com/tealbase/postgres/blob/develop/nix/docs/adding-tests.md) for more information. diff --git a/migrations/db/init-scripts/00000000000000-initial-schema.sql b/migrations/db/init-scripts/00000000000000-initial-schema.sql index 6abe2c3..becb941 100644 --- a/migrations/db/init-scripts/00000000000000-initial-schema.sql +++ b/migrations/db/init-scripts/00000000000000-initial-schema.sql @@ -18,7 +18,7 @@ grant pg_read_all_data to tealbase_read_only_user; create schema if not exists extensions; create extension if not exists "uuid-ossp" with schema extensions; create extension if not exists pgcrypto with schema extensions; -create extension if not exists pgjwt with schema extensions; + -- Set up auth roles for the developer create role anon nologin noinherit; diff --git a/migrations/db/init-scripts/README.md b/migrations/db/init-scripts/README.md new file mode 100644 index 0000000..c12fe3b --- /dev/null +++ b/migrations/db/init-scripts/README.md @@ -0,0 +1,7 @@ + +The effects of these migrations are tested on: + +- [nix/tests/sql/auth.out](../../../nix/tests/expected/auth.out) +- [nix/tests/sql/storage.out](../../../nix/tests/expected/storage.out) +- [nix/tests/sql/roles.out](../../../nix/tests/expected/roles.out) +- [nix/tests/sql/evtrigs.out](../../../nix/tests/expected/evtrigs.out) diff --git a/migrations/db/migrations/20221207154255_create_pgsodium_and_vault.sql b/migrations/db/migrations/20221207154255_create_pgsodium_and_vault.sql index 37c2344..31ada68 100644 --- a/migrations/db/migrations/20221207154255_create_pgsodium_and_vault.sql +++ b/migrations/db/migrations/20221207154255_create_pgsodium_and_vault.sql @@ -5,33 +5,47 @@ DECLARE pgsodium_exists boolean; vault_exists boolean; BEGIN - pgsodium_exists = ( - select count(*) = 1 - from pg_available_extensions - where name = 'pgsodium' - ); - - vault_exists = ( + IF EXISTS (SELECT FROM pg_available_extensions WHERE name = 'tealbase_vault' AND default_version != '0.2.8') THEN + CREATE EXTENSION IF NOT EXISTS tealbase_vault; + + -- for some reason extension custom scripts aren't run during AMI build, so + -- we manually run it here + grant usage on schema vault to postgres with grant option; + grant select, delete, truncate, references on vault.secrets, vault.decrypted_secrets to postgres with grant option; + grant execute on function vault.create_secret, vault.update_secret, vault._crypto_aead_det_decrypt to postgres with grant option; + grant usage on schema vault to service_role; + grant select, delete on vault.secrets, vault.decrypted_secrets to service_role; + grant execute on function vault.create_secret, vault.update_secret, vault._crypto_aead_det_decrypt to service_role; + ELSE + pgsodium_exists = ( select count(*) = 1 from pg_available_extensions - where name = 'tealbase_vault' - ); - - IF pgsodium_exists - THEN - create extension if not exists pgsodium; - - grant pgsodium_keyiduser to postgres with admin option; - grant pgsodium_keyholder to postgres with admin option; - grant pgsodium_keymaker to postgres with admin option; - - grant execute on function pgsodium.crypto_aead_det_decrypt(bytea, bytea, uuid, bytea) to service_role; - grant execute on function pgsodium.crypto_aead_det_encrypt(bytea, bytea, uuid, bytea) to service_role; - grant execute on function pgsodium.crypto_aead_det_keygen to service_role; - - IF vault_exists + where name = 'pgsodium' + and default_version in ('3.1.6', '3.1.7', '3.1.8', '3.1.9') + ); + + vault_exists = ( + select count(*) = 1 + from pg_available_extensions + where name = 'tealbase_vault' + ); + + IF pgsodium_exists THEN - create extension if not exists tealbase_vault; + create extension if not exists pgsodium; + + grant pgsodium_keyiduser to postgres with admin option; + grant pgsodium_keyholder to postgres with admin option; + grant pgsodium_keymaker to postgres with admin option; + + grant execute on function pgsodium.crypto_aead_det_decrypt(bytea, bytea, uuid, bytea) to service_role; + grant execute on function pgsodium.crypto_aead_det_encrypt(bytea, bytea, uuid, bytea) to service_role; + grant execute on function pgsodium.crypto_aead_det_keygen to service_role; + + IF vault_exists + THEN + create extension if not exists tealbase_vault; + END IF; END IF; END IF; END $$; diff --git a/migrations/db/migrations/20230529180330_alter_api_roles_for_inherit.sql b/migrations/db/migrations/20230529180330_alter_api_roles_for_inherit.sql index 4df82e3..d8291bb 100644 --- a/migrations/db/migrations/20230529180330_alter_api_roles_for_inherit.sql +++ b/migrations/db/migrations/20230529180330_alter_api_roles_for_inherit.sql @@ -4,7 +4,12 @@ ALTER ROLE authenticated inherit; ALTER ROLE anon inherit; ALTER ROLE service_role inherit; -GRANT pgsodium_keyholder to service_role; +DO $$ +BEGIN + IF EXISTS (SELECT FROM pg_roles WHERE rolname = 'pgsodium_keyholder') THEN + GRANT pgsodium_keyholder to service_role; + END IF; +END $$; -- migrate:down diff --git a/migrations/db/migrations/20231020085357_revoke_writes_on_cron_job_from_postgres.sql b/migrations/db/migrations/20231020085357_revoke_writes_on_cron_job_from_postgres.sql index 7db9a99..482ea29 100644 --- a/migrations/db/migrations/20231020085357_revoke_writes_on_cron_job_from_postgres.sql +++ b/migrations/db/migrations/20231020085357_revoke_writes_on_cron_job_from_postgres.sql @@ -40,6 +40,7 @@ END; $$; drop event trigger if exists issue_pg_cron_access; +alter function extensions.grant_pg_cron_access owner to tealbase_admin; CREATE EVENT TRIGGER issue_pg_cron_access ON ddl_command_end WHEN TAG IN ('CREATE EXTENSION') EXECUTE FUNCTION extensions.grant_pg_cron_access(); diff --git a/migrations/db/migrations/20241031003909_create_orioledb.sql b/migrations/db/migrations/20241031003909_create_orioledb.sql new file mode 100644 index 0000000..694fbb9 --- /dev/null +++ b/migrations/db/migrations/20241031003909_create_orioledb.sql @@ -0,0 +1,11 @@ +-- migrate:up +do $$ +begin + if exists (select 1 from pg_available_extensions where name = 'orioledb') then + if not exists (select 1 from pg_extension where extname = 'orioledb') then + create extension if not exists orioledb; + end if; + end if; +end $$; + +-- migrate:down diff --git a/migrations/db/migrations/20241215003910_backfill_pgmq_metadata.sql b/migrations/db/migrations/20241215003910_backfill_pgmq_metadata.sql new file mode 100644 index 0000000..5785272 --- /dev/null +++ b/migrations/db/migrations/20241215003910_backfill_pgmq_metadata.sql @@ -0,0 +1,79 @@ +-- migrate:up +do $$ +begin + -- Check if the pgmq.meta table exists + if exists ( + select + 1 + from + pg_catalog.pg_class c + join pg_catalog.pg_namespace n + on c.relnamespace = n.oid + where + n.nspname = 'pgmq' + and c.relname = 'meta' + and c.relkind = 'r' -- regular table + -- Make sure only expected columns exist and are correctly named + and ( + select array_agg(attname::text order by attname) + from pg_catalog.pg_attribute a + where + a.attnum > 0 + and a.attrelid = c.oid + ) = array['created_at', 'is_partitioned', 'is_unlogged', 'queue_name']::text[] + ) then + -- Insert data into pgmq.meta for all tables matching the naming pattern 'pgmq.q_' + insert into pgmq.meta (queue_name, is_partitioned, is_unlogged, created_at) + select + substring(c.relname from 3) as queue_name, + false as is_partitioned, + case when c.relpersistence = 'u' then true else false end as is_unlogged, + now() as created_at + from + pg_catalog.pg_class c + join pg_catalog.pg_namespace n + on c.relnamespace = n.oid + where + n.nspname = 'pgmq' + and c.relname like 'q_%' + and c.relkind in ('r', 'p', 'u') + on conflict (queue_name) do nothing; + end if; +end $$; + +-- For logical backups we detach the queue and archive tables from the pgmq extension +-- prior to pausing. Once detached, pgmq.drop_queue breaks. This re-attaches them +-- when a project is unpaused and allows pgmq.drop_queue to work normally. +do $$ +declare + ext_exists boolean; + tbl record; +begin + -- check if pgmq extension is installed + select exists(select 1 from pg_extension where extname = 'pgmq') into ext_exists; + + if ext_exists then + for tbl in + select c.relname as table_name + from pg_class c + join pg_namespace n on c.relnamespace = n.oid + where n.nspname = 'pgmq' + and c.relkind in ('r', 'u') -- include ordinary and unlogged tables + and (c.relname like 'q\_%' or c.relname like 'a\_%') + and c.oid not in ( + select d.objid + from pg_depend d + join pg_extension e on d.refobjid = e.oid + where e.extname = 'pgmq' + and d.classid = 'pg_class'::regclass + and d.deptype = 'e' + ) + loop + execute format('alter extension pgmq add table pgmq.%I', tbl.table_name); + end loop; + end if; +end; +$$; + + +-- migrate:down diff --git a/migrations/db/migrations/20250205060043_disable_log_statement_on_internal_roles.sql b/migrations/db/migrations/20250205060043_disable_log_statement_on_internal_roles.sql new file mode 100644 index 0000000..2d7fdf6 --- /dev/null +++ b/migrations/db/migrations/20250205060043_disable_log_statement_on_internal_roles.sql @@ -0,0 +1,6 @@ +-- migrate:up +alter role tealbase_admin set log_statement = none; +alter role tealbase_auth_admin set log_statement = none; +alter role tealbase_storage_admin set log_statement = none; + +-- migrate:down diff --git a/migrations/db/migrations/20250205144616_move_orioledb_to_extensions_schema.sql b/migrations/db/migrations/20250205144616_move_orioledb_to_extensions_schema.sql new file mode 100644 index 0000000..259a6b0 --- /dev/null +++ b/migrations/db/migrations/20250205144616_move_orioledb_to_extensions_schema.sql @@ -0,0 +1,26 @@ +-- migrate:up +do $$ +declare + ext_schema text; + extensions_schema_exists boolean; +begin + -- check if the "extensions" schema exists + select exists ( + select 1 from pg_namespace where nspname = 'extensions' + ) into extensions_schema_exists; + + if extensions_schema_exists then + -- check if the "orioledb" extension is in the "public" schema + select nspname into ext_schema + from pg_extension e + join pg_namespace n on e.extnamespace = n.oid + where extname = 'orioledb'; + + if ext_schema = 'public' then + execute 'alter extension orioledb set schema extensions'; + end if; + end if; +end $$; + +-- migrate:down + diff --git a/migrations/db/migrations/20250218031949_pgsodium_mask_role.sql b/migrations/db/migrations/20250218031949_pgsodium_mask_role.sql new file mode 100644 index 0000000..f44fa98 --- /dev/null +++ b/migrations/db/migrations/20250218031949_pgsodium_mask_role.sql @@ -0,0 +1,31 @@ +-- migrate:up + +DO $$ +BEGIN + IF EXISTS (SELECT FROM pg_extension WHERE extname = 'pgsodium') THEN + CREATE OR REPLACE FUNCTION pgsodium.mask_role(masked_role regrole, source_name text, view_name text) + RETURNS void + LANGUAGE plpgsql + SECURITY DEFINER + SET search_path TO '' + AS $function$ + BEGIN + EXECUTE format( + 'GRANT SELECT ON pgsodium.key TO %s', + masked_role); + + EXECUTE format( + 'GRANT pgsodium_keyiduser, pgsodium_keyholder TO %s', + masked_role); + + EXECUTE format( + 'GRANT ALL ON %I TO %s', + view_name, + masked_role); + RETURN; + END + $function$; + END IF; +END $$; + +-- migrate:down diff --git a/migrations/db/migrations/20250220051611_pg_net_perms_fix.sql b/migrations/db/migrations/20250220051611_pg_net_perms_fix.sql new file mode 100644 index 0000000..f914765 --- /dev/null +++ b/migrations/db/migrations/20250220051611_pg_net_perms_fix.sql @@ -0,0 +1,64 @@ +-- migrate:up +CREATE OR REPLACE FUNCTION extensions.grant_pg_net_access() +RETURNS event_trigger +LANGUAGE plpgsql +AS $$ +BEGIN + IF EXISTS ( + SELECT 1 + FROM pg_event_trigger_ddl_commands() AS ev + JOIN pg_extension AS ext + ON ev.objid = ext.oid + WHERE ext.extname = 'pg_net' + ) + THEN + IF NOT EXISTS ( + SELECT 1 + FROM pg_roles + WHERE rolname = 'tealbase_functions_admin' + ) + THEN + CREATE USER tealbase_functions_admin NOINHERIT CREATEROLE LOGIN NOREPLICATION; + END IF; + + GRANT USAGE ON SCHEMA net TO tealbase_functions_admin, postgres, anon, authenticated, service_role; + + IF EXISTS ( + SELECT FROM pg_extension + WHERE extname = 'pg_net' + -- all versions in use on existing projects as of 2025-02-20 + -- version 0.12.0 onwards don't need these applied + AND extversion IN ('0.2', '0.6', '0.7', '0.7.1', '0.8', '0.10.0', '0.11.0') + ) THEN + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; + + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; + + REVOKE ALL ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; + REVOKE ALL ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; + + GRANT EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO tealbase_functions_admin, postgres, anon, authenticated, service_role; + GRANT EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO tealbase_functions_admin, postgres, anon, authenticated, service_role; + END IF; + END IF; +END; +$$; + +DO $$ +BEGIN + IF EXISTS (SELECT FROM pg_extension WHERE extname = 'pg_net') + THEN + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY INVOKER; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY INVOKER; + + REVOKE EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) FROM tealbase_functions_admin, postgres, anon, authenticated, service_role; + REVOKE EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) FROM tealbase_functions_admin, postgres, anon, authenticated, service_role; + + GRANT ALL ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO PUBLIC; + GRANT ALL ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO PUBLIC; + END IF; +END $$; + +-- migrate:down diff --git a/migrations/db/migrations/20250312095419_pgbouncer_ownership.sql b/migrations/db/migrations/20250312095419_pgbouncer_ownership.sql new file mode 100644 index 0000000..b4c5b8a --- /dev/null +++ b/migrations/db/migrations/20250312095419_pgbouncer_ownership.sql @@ -0,0 +1,5 @@ +-- migrate:up +alter function pgbouncer.get_auth owner to tealbase_admin; +grant execute on function pgbouncer.get_auth(p_usename text) to postgres; + +-- migrate:down diff --git a/migrations/db/migrations/20250402065937_alter_internal_event_triggers_owner_to_tealbase_admin.sql b/migrations/db/migrations/20250402065937_alter_internal_event_triggers_owner_to_tealbase_admin.sql new file mode 100644 index 0000000..3ca18e3 --- /dev/null +++ b/migrations/db/migrations/20250402065937_alter_internal_event_triggers_owner_to_tealbase_admin.sql @@ -0,0 +1,10 @@ +-- migrate:up +drop event trigger if exists issue_pg_net_access; + +alter function extensions.grant_pg_net_access owner to tealbase_admin; + +CREATE EVENT TRIGGER issue_pg_net_access ON ddl_command_end + WHEN TAG IN ('CREATE EXTENSION') + EXECUTE FUNCTION extensions.grant_pg_net_access(); + +-- migrate:down diff --git a/migrations/db/migrations/20250402093753_grant_subscription_to_postgres_16_and_above.sql b/migrations/db/migrations/20250402093753_grant_subscription_to_postgres_16_and_above.sql new file mode 100644 index 0000000..9e9d881 --- /dev/null +++ b/migrations/db/migrations/20250402093753_grant_subscription_to_postgres_16_and_above.sql @@ -0,0 +1,13 @@ +-- migrate:up +DO $$ +DECLARE + major_version INT; +BEGIN + SELECT current_setting('server_version_num')::INT / 10000 INTO major_version; + + IF major_version >= 16 THEN + GRANT pg_create_subscription TO postgres; + END IF; +END $$; + +-- migrate:down diff --git a/migrations/db/migrations/20250417190610_update_pgbouncer_get_auth.sql b/migrations/db/migrations/20250417190610_update_pgbouncer_get_auth.sql new file mode 100644 index 0000000..5e6e6a5 --- /dev/null +++ b/migrations/db/migrations/20250417190610_update_pgbouncer_get_auth.sql @@ -0,0 +1,24 @@ +-- migrate:up + +create or replace function pgbouncer.get_auth(p_usename text) returns table (username text, password text) + language plpgsql security definer + as $$ +begin + raise debug 'PgBouncer auth request: %', p_usename; + + return query + select + rolname::text, + case when rolvaliduntil < now() + then null + else rolpassword::text + end + from pg_authid + where rolname=$1 and rolcanlogin; +end; +$$; + +-- from migrations/db/migrations/20250312095419_pgbouncer_ownership.sql +grant execute on function pgbouncer.get_auth(p_usename text) to postgres; + +-- migrate:down diff --git a/migrations/db/migrations/20250421084701_revoke_admin_roles_from_postgres.sql b/migrations/db/migrations/20250421084701_revoke_admin_roles_from_postgres.sql new file mode 100644 index 0000000..dbd2216 --- /dev/null +++ b/migrations/db/migrations/20250421084701_revoke_admin_roles_from_postgres.sql @@ -0,0 +1,10 @@ +-- migrate:up +revoke tealbase_storage_admin from postgres; +revoke create on schema storage from postgres; +revoke all on storage.migrations from anon, authenticated, service_role, postgres; + +revoke tealbase_auth_admin from postgres; +revoke create on schema auth from postgres; +revoke all on auth.schema_migrations from dashboard_user, postgres; + +-- migrate:down diff --git a/migrations/db/migrations/20250605172253_grant_with_admin_to_postgres_16_and_above.sql b/migrations/db/migrations/20250605172253_grant_with_admin_to_postgres_16_and_above.sql new file mode 100644 index 0000000..5f2cd57 --- /dev/null +++ b/migrations/db/migrations/20250605172253_grant_with_admin_to_postgres_16_and_above.sql @@ -0,0 +1,13 @@ +-- migrate:up +DO $$ +DECLARE + major_version INT; +BEGIN + SELECT current_setting('server_version_num')::INT / 10000 INTO major_version; + + IF major_version >= 16 THEN + GRANT anon, authenticated, service_role, authenticator, pg_monitor, pg_read_all_data, pg_signal_backend TO postgres WITH ADMIN OPTION; + END IF; +END $$; + +-- migrate:down diff --git a/migrations/db/migrations/20250623125453_tmp_grant_storage_tables_to_postgres_with_grant_option.sql b/migrations/db/migrations/20250623125453_tmp_grant_storage_tables_to_postgres_with_grant_option.sql new file mode 100644 index 0000000..465aee2 --- /dev/null +++ b/migrations/db/migrations/20250623125453_tmp_grant_storage_tables_to_postgres_with_grant_option.sql @@ -0,0 +1,6 @@ +-- migrate:up +-- TODO: remove this migration once STORAGE-211 is completed +-- DRI: bobbie +grant all on storage.buckets, storage.objects to postgres with grant option; + +-- migrate:down diff --git a/migrations/db/migrations/20250709135250_grant_storage_schema_to_postgres_with_grant_option.sql b/migrations/db/migrations/20250709135250_grant_storage_schema_to_postgres_with_grant_option.sql new file mode 100644 index 0000000..604ff99 --- /dev/null +++ b/migrations/db/migrations/20250709135250_grant_storage_schema_to_postgres_with_grant_option.sql @@ -0,0 +1,4 @@ +-- migrate:up +grant usage on schema storage to postgres with grant option; + +-- migrate:down diff --git a/migrations/db/migrations/20250710151649_tealbase_read_only_user_default_transaction_read_only.sql b/migrations/db/migrations/20250710151649_tealbase_read_only_user_default_transaction_read_only.sql new file mode 100644 index 0000000..18a387e --- /dev/null +++ b/migrations/db/migrations/20250710151649_tealbase_read_only_user_default_transaction_read_only.sql @@ -0,0 +1,4 @@ +-- migrate:up +alter role tealbase_read_only_user set default_transaction_read_only = on; + +-- migrate:down diff --git a/migrations/docker-compose.yaml b/migrations/docker-compose.yaml index 2005fd6..fc80b8c 100644 --- a/migrations/docker-compose.yaml +++ b/migrations/docker-compose.yaml @@ -33,7 +33,9 @@ services: command: pg_prove /tests/test.sql dbmate: - image: amacneil/dbmate:1.16.2 + build: + context: . + dockerfile: Dockerfile.dbmate depends_on: db: condition: service_healthy diff --git a/migrations/schema-15.sql b/migrations/schema-15.sql new file mode 100644 index 0000000..1aeadc2 --- /dev/null +++ b/migrations/schema-15.sql @@ -0,0 +1,999 @@ +-- +-- PostgreSQL database dump +-- + +-- Dumped from database version 15.8 +-- Dumped by pg_dump version 15.8 + +SET statement_timeout = 0; +SET lock_timeout = 0; +SET idle_in_transaction_session_timeout = 0; +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = on; +SELECT pg_catalog.set_config('search_path', '', false); +SET check_function_bodies = false; +SET xmloption = content; +SET client_min_messages = warning; +SET row_security = off; + +-- +-- Name: auth; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA auth; + + +-- +-- Name: extensions; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA extensions; + + +-- +-- Name: graphql; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA graphql; + + +-- +-- Name: graphql_public; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA graphql_public; + + +-- +-- Name: pgbouncer; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA pgbouncer; + + +-- +-- Name: realtime; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA realtime; + + +-- +-- Name: storage; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA storage; + + +-- +-- Name: vault; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA vault; + + +-- +-- Name: pg_graphql; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS pg_graphql WITH SCHEMA graphql; + + +-- +-- Name: EXTENSION pg_graphql; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION pg_graphql IS 'pg_graphql: GraphQL support'; + + +-- +-- Name: pg_stat_statements; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS pg_stat_statements WITH SCHEMA extensions; + + +-- +-- Name: EXTENSION pg_stat_statements; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION pg_stat_statements IS 'track planning and execution statistics of all SQL statements executed'; + + +-- +-- Name: pgcrypto; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS pgcrypto WITH SCHEMA extensions; + + +-- +-- Name: EXTENSION pgcrypto; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION pgcrypto IS 'cryptographic functions'; + + +-- +-- Name: tealbase_vault; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS tealbase_vault WITH SCHEMA vault; + + +-- +-- Name: EXTENSION tealbase_vault; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION tealbase_vault IS 'tealbase Vault Extension'; + + +-- +-- Name: uuid-ossp; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS "uuid-ossp" WITH SCHEMA extensions; + + +-- +-- Name: EXTENSION "uuid-ossp"; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION "uuid-ossp" IS 'generate universally unique identifiers (UUIDs)'; + + +-- +-- Name: email(); Type: FUNCTION; Schema: auth; Owner: - +-- + +CREATE FUNCTION auth.email() RETURNS text + LANGUAGE sql STABLE + AS $$ + select nullif(current_setting('request.jwt.claim.email', true), '')::text; +$$; + + +-- +-- Name: role(); Type: FUNCTION; Schema: auth; Owner: - +-- + +CREATE FUNCTION auth.role() RETURNS text + LANGUAGE sql STABLE + AS $$ + select nullif(current_setting('request.jwt.claim.role', true), '')::text; +$$; + + +-- +-- Name: uid(); Type: FUNCTION; Schema: auth; Owner: - +-- + +CREATE FUNCTION auth.uid() RETURNS uuid + LANGUAGE sql STABLE + AS $$ + select nullif(current_setting('request.jwt.claim.sub', true), '')::uuid; +$$; + + +-- +-- Name: grant_pg_cron_access(); Type: FUNCTION; Schema: extensions; Owner: - +-- + +CREATE FUNCTION extensions.grant_pg_cron_access() RETURNS event_trigger + LANGUAGE plpgsql + AS $$ +BEGIN + IF EXISTS ( + SELECT + FROM pg_event_trigger_ddl_commands() AS ev + JOIN pg_extension AS ext + ON ev.objid = ext.oid + WHERE ext.extname = 'pg_cron' + ) + THEN + grant usage on schema cron to postgres with grant option; + + alter default privileges in schema cron grant all on tables to postgres with grant option; + alter default privileges in schema cron grant all on functions to postgres with grant option; + alter default privileges in schema cron grant all on sequences to postgres with grant option; + + alter default privileges for user tealbase_admin in schema cron grant all + on sequences to postgres with grant option; + alter default privileges for user tealbase_admin in schema cron grant all + on tables to postgres with grant option; + alter default privileges for user tealbase_admin in schema cron grant all + on functions to postgres with grant option; + + grant all privileges on all tables in schema cron to postgres with grant option; + revoke all on table cron.job from postgres; + grant select on table cron.job to postgres with grant option; + END IF; +END; +$$; + + +-- +-- Name: FUNCTION grant_pg_cron_access(); Type: COMMENT; Schema: extensions; Owner: - +-- + +COMMENT ON FUNCTION extensions.grant_pg_cron_access() IS 'Grants access to pg_cron'; + + +-- +-- Name: grant_pg_graphql_access(); Type: FUNCTION; Schema: extensions; Owner: - +-- + +CREATE FUNCTION extensions.grant_pg_graphql_access() RETURNS event_trigger + LANGUAGE plpgsql + AS $_$ +DECLARE + func_is_graphql_resolve bool; +BEGIN + func_is_graphql_resolve = ( + SELECT n.proname = 'resolve' + FROM pg_event_trigger_ddl_commands() AS ev + LEFT JOIN pg_catalog.pg_proc AS n + ON ev.objid = n.oid + ); + + IF func_is_graphql_resolve + THEN + -- Update public wrapper to pass all arguments through to the pg_graphql resolve func + DROP FUNCTION IF EXISTS graphql_public.graphql; + create or replace function graphql_public.graphql( + "operationName" text default null, + query text default null, + variables jsonb default null, + extensions jsonb default null + ) + returns jsonb + language sql + as $$ + select graphql.resolve( + query := query, + variables := coalesce(variables, '{}'), + "operationName" := "operationName", + extensions := extensions + ); + $$; + + -- This hook executes when `graphql.resolve` is created. That is not necessarily the last + -- function in the extension so we need to grant permissions on existing entities AND + -- update default permissions to any others that are created after `graphql.resolve` + grant usage on schema graphql to postgres, anon, authenticated, service_role; + grant select on all tables in schema graphql to postgres, anon, authenticated, service_role; + grant execute on all functions in schema graphql to postgres, anon, authenticated, service_role; + grant all on all sequences in schema graphql to postgres, anon, authenticated, service_role; + alter default privileges in schema graphql grant all on tables to postgres, anon, authenticated, service_role; + alter default privileges in schema graphql grant all on functions to postgres, anon, authenticated, service_role; + alter default privileges in schema graphql grant all on sequences to postgres, anon, authenticated, service_role; + + -- Allow postgres role to allow granting usage on graphql and graphql_public schemas to custom roles + grant usage on schema graphql_public to postgres with grant option; + grant usage on schema graphql to postgres with grant option; + END IF; + +END; +$_$; + + +-- +-- Name: FUNCTION grant_pg_graphql_access(); Type: COMMENT; Schema: extensions; Owner: - +-- + +COMMENT ON FUNCTION extensions.grant_pg_graphql_access() IS 'Grants access to pg_graphql'; + + +-- +-- Name: grant_pg_net_access(); Type: FUNCTION; Schema: extensions; Owner: - +-- + +CREATE FUNCTION extensions.grant_pg_net_access() RETURNS event_trigger + LANGUAGE plpgsql + AS $$ +BEGIN + IF EXISTS ( + SELECT 1 + FROM pg_event_trigger_ddl_commands() AS ev + JOIN pg_extension AS ext + ON ev.objid = ext.oid + WHERE ext.extname = 'pg_net' + ) + THEN + IF NOT EXISTS ( + SELECT 1 + FROM pg_roles + WHERE rolname = 'tealbase_functions_admin' + ) + THEN + CREATE USER tealbase_functions_admin NOINHERIT CREATEROLE LOGIN NOREPLICATION; + END IF; + + GRANT USAGE ON SCHEMA net TO tealbase_functions_admin, postgres, anon, authenticated, service_role; + + IF EXISTS ( + SELECT FROM pg_extension + WHERE extname = 'pg_net' + -- all versions in use on existing projects as of 2025-02-20 + -- version 0.12.0 onwards don't need these applied + AND extversion IN ('0.2', '0.6', '0.7', '0.7.1', '0.8', '0.10.0', '0.11.0') + ) THEN + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; + + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; + + REVOKE ALL ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; + REVOKE ALL ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; + + GRANT EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO tealbase_functions_admin, postgres, anon, authenticated, service_role; + GRANT EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO tealbase_functions_admin, postgres, anon, authenticated, service_role; + END IF; + END IF; +END; +$$; + + +-- +-- Name: FUNCTION grant_pg_net_access(); Type: COMMENT; Schema: extensions; Owner: - +-- + +COMMENT ON FUNCTION extensions.grant_pg_net_access() IS 'Grants access to pg_net'; + + +-- +-- Name: pgrst_ddl_watch(); Type: FUNCTION; Schema: extensions; Owner: - +-- + +CREATE FUNCTION extensions.pgrst_ddl_watch() RETURNS event_trigger + LANGUAGE plpgsql + AS $$ +DECLARE + cmd record; +BEGIN + FOR cmd IN SELECT * FROM pg_event_trigger_ddl_commands() + LOOP + IF cmd.command_tag IN ( + 'CREATE SCHEMA', 'ALTER SCHEMA' + , 'CREATE TABLE', 'CREATE TABLE AS', 'SELECT INTO', 'ALTER TABLE' + , 'CREATE FOREIGN TABLE', 'ALTER FOREIGN TABLE' + , 'CREATE VIEW', 'ALTER VIEW' + , 'CREATE MATERIALIZED VIEW', 'ALTER MATERIALIZED VIEW' + , 'CREATE FUNCTION', 'ALTER FUNCTION' + , 'CREATE TRIGGER' + , 'CREATE TYPE', 'ALTER TYPE' + , 'CREATE RULE' + , 'COMMENT' + ) + -- don't notify in case of CREATE TEMP table or other objects created on pg_temp + AND cmd.schema_name is distinct from 'pg_temp' + THEN + NOTIFY pgrst, 'reload schema'; + END IF; + END LOOP; +END; $$; + + +-- +-- Name: pgrst_drop_watch(); Type: FUNCTION; Schema: extensions; Owner: - +-- + +CREATE FUNCTION extensions.pgrst_drop_watch() RETURNS event_trigger + LANGUAGE plpgsql + AS $$ +DECLARE + obj record; +BEGIN + FOR obj IN SELECT * FROM pg_event_trigger_dropped_objects() + LOOP + IF obj.object_type IN ( + 'schema' + , 'table' + , 'foreign table' + , 'view' + , 'materialized view' + , 'function' + , 'trigger' + , 'type' + , 'rule' + ) + AND obj.is_temporary IS false -- no pg_temp objects + THEN + NOTIFY pgrst, 'reload schema'; + END IF; + END LOOP; +END; $$; + + +-- +-- Name: set_graphql_placeholder(); Type: FUNCTION; Schema: extensions; Owner: - +-- + +CREATE FUNCTION extensions.set_graphql_placeholder() RETURNS event_trigger + LANGUAGE plpgsql + AS $_$ + DECLARE + graphql_is_dropped bool; + BEGIN + graphql_is_dropped = ( + SELECT ev.schema_name = 'graphql_public' + FROM pg_event_trigger_dropped_objects() AS ev + WHERE ev.schema_name = 'graphql_public' + ); + + IF graphql_is_dropped + THEN + create or replace function graphql_public.graphql( + "operationName" text default null, + query text default null, + variables jsonb default null, + extensions jsonb default null + ) + returns jsonb + language plpgsql + as $$ + DECLARE + server_version float; + BEGIN + server_version = (SELECT (SPLIT_PART((select version()), ' ', 2))::float); + + IF server_version >= 14 THEN + RETURN jsonb_build_object( + 'errors', jsonb_build_array( + jsonb_build_object( + 'message', 'pg_graphql extension is not enabled.' + ) + ) + ); + ELSE + RETURN jsonb_build_object( + 'errors', jsonb_build_array( + jsonb_build_object( + 'message', 'pg_graphql is only available on projects running Postgres 14 onwards.' + ) + ) + ); + END IF; + END; + $$; + END IF; + + END; +$_$; + + +-- +-- Name: FUNCTION set_graphql_placeholder(); Type: COMMENT; Schema: extensions; Owner: - +-- + +COMMENT ON FUNCTION extensions.set_graphql_placeholder() IS 'Reintroduces placeholder function for graphql_public.graphql'; + + +-- +-- Name: get_auth(text); Type: FUNCTION; Schema: pgbouncer; Owner: - +-- + +CREATE FUNCTION pgbouncer.get_auth(p_usename text) RETURNS TABLE(username text, password text) + LANGUAGE plpgsql SECURITY DEFINER + AS $_$ +begin + raise debug 'PgBouncer auth request: %', p_usename; + + return query + select + rolname::text, + case when rolvaliduntil < now() + then null + else rolpassword::text + end + from pg_authid + where rolname=$1 and rolcanlogin; +end; +$_$; + + +-- +-- Name: extension(text); Type: FUNCTION; Schema: storage; Owner: - +-- + +CREATE FUNCTION storage.extension(name text) RETURNS text + LANGUAGE plpgsql + AS $$ +DECLARE +_parts text[]; +_filename text; +BEGIN + select string_to_array(name, '/') into _parts; + select _parts[array_length(_parts,1)] into _filename; + -- @todo return the last part instead of 2 + return split_part(_filename, '.', 2); +END +$$; + + +-- +-- Name: filename(text); Type: FUNCTION; Schema: storage; Owner: - +-- + +CREATE FUNCTION storage.filename(name text) RETURNS text + LANGUAGE plpgsql + AS $$ +DECLARE +_parts text[]; +BEGIN + select string_to_array(name, '/') into _parts; + return _parts[array_length(_parts,1)]; +END +$$; + + +-- +-- Name: foldername(text); Type: FUNCTION; Schema: storage; Owner: - +-- + +CREATE FUNCTION storage.foldername(name text) RETURNS text[] + LANGUAGE plpgsql + AS $$ +DECLARE +_parts text[]; +BEGIN + select string_to_array(name, '/') into _parts; + return _parts[1:array_length(_parts,1)-1]; +END +$$; + + +-- +-- Name: search(text, text, integer, integer, integer); Type: FUNCTION; Schema: storage; Owner: - +-- + +CREATE FUNCTION storage.search(prefix text, bucketname text, limits integer DEFAULT 100, levels integer DEFAULT 1, offsets integer DEFAULT 0) RETURNS TABLE(name text, id uuid, updated_at timestamp with time zone, created_at timestamp with time zone, last_accessed_at timestamp with time zone, metadata jsonb) + LANGUAGE plpgsql + AS $$ +DECLARE +_bucketId text; +BEGIN + -- will be replaced by migrations when server starts + -- saving space for cloud-init +END +$$; + + +SET default_tablespace = ''; + +SET default_table_access_method = heap; + +-- +-- Name: audit_log_entries; Type: TABLE; Schema: auth; Owner: - +-- + +CREATE TABLE auth.audit_log_entries ( + instance_id uuid, + id uuid NOT NULL, + payload json, + created_at timestamp with time zone +); + + +-- +-- Name: TABLE audit_log_entries; Type: COMMENT; Schema: auth; Owner: - +-- + +COMMENT ON TABLE auth.audit_log_entries IS 'Auth: Audit trail for user actions.'; + + +-- +-- Name: instances; Type: TABLE; Schema: auth; Owner: - +-- + +CREATE TABLE auth.instances ( + id uuid NOT NULL, + uuid uuid, + raw_base_config text, + created_at timestamp with time zone, + updated_at timestamp with time zone +); + + +-- +-- Name: TABLE instances; Type: COMMENT; Schema: auth; Owner: - +-- + +COMMENT ON TABLE auth.instances IS 'Auth: Manages users across multiple sites.'; + + +-- +-- Name: refresh_tokens; Type: TABLE; Schema: auth; Owner: - +-- + +CREATE TABLE auth.refresh_tokens ( + instance_id uuid, + id bigint NOT NULL, + token character varying(255), + user_id character varying(255), + revoked boolean, + created_at timestamp with time zone, + updated_at timestamp with time zone +); + + +-- +-- Name: TABLE refresh_tokens; Type: COMMENT; Schema: auth; Owner: - +-- + +COMMENT ON TABLE auth.refresh_tokens IS 'Auth: Store of tokens used to refresh JWT tokens once they expire.'; + + +-- +-- Name: refresh_tokens_id_seq; Type: SEQUENCE; Schema: auth; Owner: - +-- + +CREATE SEQUENCE auth.refresh_tokens_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: refresh_tokens_id_seq; Type: SEQUENCE OWNED BY; Schema: auth; Owner: - +-- + +ALTER SEQUENCE auth.refresh_tokens_id_seq OWNED BY auth.refresh_tokens.id; + + +-- +-- Name: schema_migrations; Type: TABLE; Schema: auth; Owner: - +-- + +CREATE TABLE auth.schema_migrations ( + version character varying(255) NOT NULL +); + + +-- +-- Name: TABLE schema_migrations; Type: COMMENT; Schema: auth; Owner: - +-- + +COMMENT ON TABLE auth.schema_migrations IS 'Auth: Manages updates to the auth system.'; + + +-- +-- Name: users; Type: TABLE; Schema: auth; Owner: - +-- + +CREATE TABLE auth.users ( + instance_id uuid, + id uuid NOT NULL, + aud character varying(255), + role character varying(255), + email character varying(255), + encrypted_password character varying(255), + confirmed_at timestamp with time zone, + invited_at timestamp with time zone, + confirmation_token character varying(255), + confirmation_sent_at timestamp with time zone, + recovery_token character varying(255), + recovery_sent_at timestamp with time zone, + email_change_token character varying(255), + email_change character varying(255), + email_change_sent_at timestamp with time zone, + last_sign_in_at timestamp with time zone, + raw_app_meta_data jsonb, + raw_user_meta_data jsonb, + is_super_admin boolean, + created_at timestamp with time zone, + updated_at timestamp with time zone +); + + +-- +-- Name: TABLE users; Type: COMMENT; Schema: auth; Owner: - +-- + +COMMENT ON TABLE auth.users IS 'Auth: Stores user login data within a secure schema.'; + + +-- +-- Name: schema_migrations; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.schema_migrations ( + version character varying(128) NOT NULL +); + + +-- +-- Name: buckets; Type: TABLE; Schema: storage; Owner: - +-- + +CREATE TABLE storage.buckets ( + id text NOT NULL, + name text NOT NULL, + owner uuid, + created_at timestamp with time zone DEFAULT now(), + updated_at timestamp with time zone DEFAULT now() +); + + +-- +-- Name: migrations; Type: TABLE; Schema: storage; Owner: - +-- + +CREATE TABLE storage.migrations ( + id integer NOT NULL, + name character varying(100) NOT NULL, + hash character varying(40) NOT NULL, + executed_at timestamp without time zone DEFAULT CURRENT_TIMESTAMP +); + + +-- +-- Name: objects; Type: TABLE; Schema: storage; Owner: - +-- + +CREATE TABLE storage.objects ( + id uuid DEFAULT extensions.uuid_generate_v4() NOT NULL, + bucket_id text, + name text, + owner uuid, + created_at timestamp with time zone DEFAULT now(), + updated_at timestamp with time zone DEFAULT now(), + last_accessed_at timestamp with time zone DEFAULT now(), + metadata jsonb +); + + +-- +-- Name: refresh_tokens id; Type: DEFAULT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.refresh_tokens ALTER COLUMN id SET DEFAULT nextval('auth.refresh_tokens_id_seq'::regclass); + + +-- +-- Name: audit_log_entries audit_log_entries_pkey; Type: CONSTRAINT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.audit_log_entries + ADD CONSTRAINT audit_log_entries_pkey PRIMARY KEY (id); + + +-- +-- Name: instances instances_pkey; Type: CONSTRAINT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.instances + ADD CONSTRAINT instances_pkey PRIMARY KEY (id); + + +-- +-- Name: refresh_tokens refresh_tokens_pkey; Type: CONSTRAINT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.refresh_tokens + ADD CONSTRAINT refresh_tokens_pkey PRIMARY KEY (id); + + +-- +-- Name: schema_migrations schema_migrations_pkey; Type: CONSTRAINT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.schema_migrations + ADD CONSTRAINT schema_migrations_pkey PRIMARY KEY (version); + + +-- +-- Name: users users_email_key; Type: CONSTRAINT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.users + ADD CONSTRAINT users_email_key UNIQUE (email); + + +-- +-- Name: users users_pkey; Type: CONSTRAINT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.users + ADD CONSTRAINT users_pkey PRIMARY KEY (id); + + +-- +-- Name: schema_migrations schema_migrations_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.schema_migrations + ADD CONSTRAINT schema_migrations_pkey PRIMARY KEY (version); + + +-- +-- Name: buckets buckets_pkey; Type: CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.buckets + ADD CONSTRAINT buckets_pkey PRIMARY KEY (id); + + +-- +-- Name: migrations migrations_name_key; Type: CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.migrations + ADD CONSTRAINT migrations_name_key UNIQUE (name); + + +-- +-- Name: migrations migrations_pkey; Type: CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.migrations + ADD CONSTRAINT migrations_pkey PRIMARY KEY (id); + + +-- +-- Name: objects objects_pkey; Type: CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.objects + ADD CONSTRAINT objects_pkey PRIMARY KEY (id); + + +-- +-- Name: audit_logs_instance_id_idx; Type: INDEX; Schema: auth; Owner: - +-- + +CREATE INDEX audit_logs_instance_id_idx ON auth.audit_log_entries USING btree (instance_id); + + +-- +-- Name: refresh_tokens_instance_id_idx; Type: INDEX; Schema: auth; Owner: - +-- + +CREATE INDEX refresh_tokens_instance_id_idx ON auth.refresh_tokens USING btree (instance_id); + + +-- +-- Name: refresh_tokens_instance_id_user_id_idx; Type: INDEX; Schema: auth; Owner: - +-- + +CREATE INDEX refresh_tokens_instance_id_user_id_idx ON auth.refresh_tokens USING btree (instance_id, user_id); + + +-- +-- Name: refresh_tokens_token_idx; Type: INDEX; Schema: auth; Owner: - +-- + +CREATE INDEX refresh_tokens_token_idx ON auth.refresh_tokens USING btree (token); + + +-- +-- Name: users_instance_id_email_idx; Type: INDEX; Schema: auth; Owner: - +-- + +CREATE INDEX users_instance_id_email_idx ON auth.users USING btree (instance_id, email); + + +-- +-- Name: users_instance_id_idx; Type: INDEX; Schema: auth; Owner: - +-- + +CREATE INDEX users_instance_id_idx ON auth.users USING btree (instance_id); + + +-- +-- Name: bname; Type: INDEX; Schema: storage; Owner: - +-- + +CREATE UNIQUE INDEX bname ON storage.buckets USING btree (name); + + +-- +-- Name: bucketid_objname; Type: INDEX; Schema: storage; Owner: - +-- + +CREATE UNIQUE INDEX bucketid_objname ON storage.objects USING btree (bucket_id, name); + + +-- +-- Name: name_prefix_search; Type: INDEX; Schema: storage; Owner: - +-- + +CREATE INDEX name_prefix_search ON storage.objects USING btree (name text_pattern_ops); + + +-- +-- Name: buckets buckets_owner_fkey; Type: FK CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.buckets + ADD CONSTRAINT buckets_owner_fkey FOREIGN KEY (owner) REFERENCES auth.users(id); + + +-- +-- Name: objects objects_bucketId_fkey; Type: FK CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.objects + ADD CONSTRAINT "objects_bucketId_fkey" FOREIGN KEY (bucket_id) REFERENCES storage.buckets(id); + + +-- +-- Name: objects objects_owner_fkey; Type: FK CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.objects + ADD CONSTRAINT objects_owner_fkey FOREIGN KEY (owner) REFERENCES auth.users(id); + + +-- +-- Name: objects; Type: ROW SECURITY; Schema: storage; Owner: - +-- + +ALTER TABLE storage.objects ENABLE ROW LEVEL SECURITY; + +-- +-- Name: tealbase_realtime; Type: PUBLICATION; Schema: -; Owner: - +-- + +CREATE PUBLICATION tealbase_realtime WITH (publish = 'insert, update, delete, truncate'); + + +-- +-- Name: issue_graphql_placeholder; Type: EVENT TRIGGER; Schema: -; Owner: - +-- + +CREATE EVENT TRIGGER issue_graphql_placeholder ON sql_drop + WHEN TAG IN ('DROP EXTENSION') + EXECUTE FUNCTION extensions.set_graphql_placeholder(); + + +-- +-- Name: issue_pg_cron_access; Type: EVENT TRIGGER; Schema: -; Owner: - +-- + +CREATE EVENT TRIGGER issue_pg_cron_access ON ddl_command_end + WHEN TAG IN ('CREATE EXTENSION') + EXECUTE FUNCTION extensions.grant_pg_cron_access(); + + +-- +-- Name: issue_pg_graphql_access; Type: EVENT TRIGGER; Schema: -; Owner: - +-- + +CREATE EVENT TRIGGER issue_pg_graphql_access ON ddl_command_end + WHEN TAG IN ('CREATE FUNCTION') + EXECUTE FUNCTION extensions.grant_pg_graphql_access(); + + +-- +-- Name: issue_pg_net_access; Type: EVENT TRIGGER; Schema: -; Owner: - +-- + +CREATE EVENT TRIGGER issue_pg_net_access ON ddl_command_end + WHEN TAG IN ('CREATE EXTENSION') + EXECUTE FUNCTION extensions.grant_pg_net_access(); + + +-- +-- Name: pgrst_ddl_watch; Type: EVENT TRIGGER; Schema: -; Owner: - +-- + +CREATE EVENT TRIGGER pgrst_ddl_watch ON ddl_command_end + EXECUTE FUNCTION extensions.pgrst_ddl_watch(); + + +-- +-- Name: pgrst_drop_watch; Type: EVENT TRIGGER; Schema: -; Owner: - +-- + +CREATE EVENT TRIGGER pgrst_drop_watch ON sql_drop + EXECUTE FUNCTION extensions.pgrst_drop_watch(); + + +-- +-- PostgreSQL database dump complete +-- + diff --git a/migrations/schema-17.sql b/migrations/schema-17.sql new file mode 100644 index 0000000..7fca1d7 --- /dev/null +++ b/migrations/schema-17.sql @@ -0,0 +1,1000 @@ +-- +-- PostgreSQL database dump +-- + +-- Dumped from database version 17.4 +-- Dumped by pg_dump version 17.4 + +SET statement_timeout = 0; +SET lock_timeout = 0; +SET idle_in_transaction_session_timeout = 0; +SET transaction_timeout = 0; +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = on; +SELECT pg_catalog.set_config('search_path', '', false); +SET check_function_bodies = false; +SET xmloption = content; +SET client_min_messages = warning; +SET row_security = off; + +-- +-- Name: auth; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA auth; + + +-- +-- Name: extensions; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA extensions; + + +-- +-- Name: graphql; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA graphql; + + +-- +-- Name: graphql_public; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA graphql_public; + + +-- +-- Name: pgbouncer; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA pgbouncer; + + +-- +-- Name: realtime; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA realtime; + + +-- +-- Name: storage; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA storage; + + +-- +-- Name: vault; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA vault; + + +-- +-- Name: pg_graphql; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS pg_graphql WITH SCHEMA graphql; + + +-- +-- Name: EXTENSION pg_graphql; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION pg_graphql IS 'pg_graphql: GraphQL support'; + + +-- +-- Name: pg_stat_statements; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS pg_stat_statements WITH SCHEMA extensions; + + +-- +-- Name: EXTENSION pg_stat_statements; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION pg_stat_statements IS 'track planning and execution statistics of all SQL statements executed'; + + +-- +-- Name: pgcrypto; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS pgcrypto WITH SCHEMA extensions; + + +-- +-- Name: EXTENSION pgcrypto; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION pgcrypto IS 'cryptographic functions'; + + +-- +-- Name: tealbase_vault; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS tealbase_vault WITH SCHEMA vault; + + +-- +-- Name: EXTENSION tealbase_vault; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION tealbase_vault IS 'tealbase Vault Extension'; + + +-- +-- Name: uuid-ossp; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS "uuid-ossp" WITH SCHEMA extensions; + + +-- +-- Name: EXTENSION "uuid-ossp"; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION "uuid-ossp" IS 'generate universally unique identifiers (UUIDs)'; + + +-- +-- Name: email(); Type: FUNCTION; Schema: auth; Owner: - +-- + +CREATE FUNCTION auth.email() RETURNS text + LANGUAGE sql STABLE + AS $$ + select nullif(current_setting('request.jwt.claim.email', true), '')::text; +$$; + + +-- +-- Name: role(); Type: FUNCTION; Schema: auth; Owner: - +-- + +CREATE FUNCTION auth.role() RETURNS text + LANGUAGE sql STABLE + AS $$ + select nullif(current_setting('request.jwt.claim.role', true), '')::text; +$$; + + +-- +-- Name: uid(); Type: FUNCTION; Schema: auth; Owner: - +-- + +CREATE FUNCTION auth.uid() RETURNS uuid + LANGUAGE sql STABLE + AS $$ + select nullif(current_setting('request.jwt.claim.sub', true), '')::uuid; +$$; + + +-- +-- Name: grant_pg_cron_access(); Type: FUNCTION; Schema: extensions; Owner: - +-- + +CREATE FUNCTION extensions.grant_pg_cron_access() RETURNS event_trigger + LANGUAGE plpgsql + AS $$ +BEGIN + IF EXISTS ( + SELECT + FROM pg_event_trigger_ddl_commands() AS ev + JOIN pg_extension AS ext + ON ev.objid = ext.oid + WHERE ext.extname = 'pg_cron' + ) + THEN + grant usage on schema cron to postgres with grant option; + + alter default privileges in schema cron grant all on tables to postgres with grant option; + alter default privileges in schema cron grant all on functions to postgres with grant option; + alter default privileges in schema cron grant all on sequences to postgres with grant option; + + alter default privileges for user tealbase_admin in schema cron grant all + on sequences to postgres with grant option; + alter default privileges for user tealbase_admin in schema cron grant all + on tables to postgres with grant option; + alter default privileges for user tealbase_admin in schema cron grant all + on functions to postgres with grant option; + + grant all privileges on all tables in schema cron to postgres with grant option; + revoke all on table cron.job from postgres; + grant select on table cron.job to postgres with grant option; + END IF; +END; +$$; + + +-- +-- Name: FUNCTION grant_pg_cron_access(); Type: COMMENT; Schema: extensions; Owner: - +-- + +COMMENT ON FUNCTION extensions.grant_pg_cron_access() IS 'Grants access to pg_cron'; + + +-- +-- Name: grant_pg_graphql_access(); Type: FUNCTION; Schema: extensions; Owner: - +-- + +CREATE FUNCTION extensions.grant_pg_graphql_access() RETURNS event_trigger + LANGUAGE plpgsql + AS $_$ +DECLARE + func_is_graphql_resolve bool; +BEGIN + func_is_graphql_resolve = ( + SELECT n.proname = 'resolve' + FROM pg_event_trigger_ddl_commands() AS ev + LEFT JOIN pg_catalog.pg_proc AS n + ON ev.objid = n.oid + ); + + IF func_is_graphql_resolve + THEN + -- Update public wrapper to pass all arguments through to the pg_graphql resolve func + DROP FUNCTION IF EXISTS graphql_public.graphql; + create or replace function graphql_public.graphql( + "operationName" text default null, + query text default null, + variables jsonb default null, + extensions jsonb default null + ) + returns jsonb + language sql + as $$ + select graphql.resolve( + query := query, + variables := coalesce(variables, '{}'), + "operationName" := "operationName", + extensions := extensions + ); + $$; + + -- This hook executes when `graphql.resolve` is created. That is not necessarily the last + -- function in the extension so we need to grant permissions on existing entities AND + -- update default permissions to any others that are created after `graphql.resolve` + grant usage on schema graphql to postgres, anon, authenticated, service_role; + grant select on all tables in schema graphql to postgres, anon, authenticated, service_role; + grant execute on all functions in schema graphql to postgres, anon, authenticated, service_role; + grant all on all sequences in schema graphql to postgres, anon, authenticated, service_role; + alter default privileges in schema graphql grant all on tables to postgres, anon, authenticated, service_role; + alter default privileges in schema graphql grant all on functions to postgres, anon, authenticated, service_role; + alter default privileges in schema graphql grant all on sequences to postgres, anon, authenticated, service_role; + + -- Allow postgres role to allow granting usage on graphql and graphql_public schemas to custom roles + grant usage on schema graphql_public to postgres with grant option; + grant usage on schema graphql to postgres with grant option; + END IF; + +END; +$_$; + + +-- +-- Name: FUNCTION grant_pg_graphql_access(); Type: COMMENT; Schema: extensions; Owner: - +-- + +COMMENT ON FUNCTION extensions.grant_pg_graphql_access() IS 'Grants access to pg_graphql'; + + +-- +-- Name: grant_pg_net_access(); Type: FUNCTION; Schema: extensions; Owner: - +-- + +CREATE FUNCTION extensions.grant_pg_net_access() RETURNS event_trigger + LANGUAGE plpgsql + AS $$ +BEGIN + IF EXISTS ( + SELECT 1 + FROM pg_event_trigger_ddl_commands() AS ev + JOIN pg_extension AS ext + ON ev.objid = ext.oid + WHERE ext.extname = 'pg_net' + ) + THEN + IF NOT EXISTS ( + SELECT 1 + FROM pg_roles + WHERE rolname = 'tealbase_functions_admin' + ) + THEN + CREATE USER tealbase_functions_admin NOINHERIT CREATEROLE LOGIN NOREPLICATION; + END IF; + + GRANT USAGE ON SCHEMA net TO tealbase_functions_admin, postgres, anon, authenticated, service_role; + + IF EXISTS ( + SELECT FROM pg_extension + WHERE extname = 'pg_net' + -- all versions in use on existing projects as of 2025-02-20 + -- version 0.12.0 onwards don't need these applied + AND extversion IN ('0.2', '0.6', '0.7', '0.7.1', '0.8', '0.10.0', '0.11.0') + ) THEN + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; + + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; + + REVOKE ALL ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; + REVOKE ALL ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; + + GRANT EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO tealbase_functions_admin, postgres, anon, authenticated, service_role; + GRANT EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO tealbase_functions_admin, postgres, anon, authenticated, service_role; + END IF; + END IF; +END; +$$; + + +-- +-- Name: FUNCTION grant_pg_net_access(); Type: COMMENT; Schema: extensions; Owner: - +-- + +COMMENT ON FUNCTION extensions.grant_pg_net_access() IS 'Grants access to pg_net'; + + +-- +-- Name: pgrst_ddl_watch(); Type: FUNCTION; Schema: extensions; Owner: - +-- + +CREATE FUNCTION extensions.pgrst_ddl_watch() RETURNS event_trigger + LANGUAGE plpgsql + AS $$ +DECLARE + cmd record; +BEGIN + FOR cmd IN SELECT * FROM pg_event_trigger_ddl_commands() + LOOP + IF cmd.command_tag IN ( + 'CREATE SCHEMA', 'ALTER SCHEMA' + , 'CREATE TABLE', 'CREATE TABLE AS', 'SELECT INTO', 'ALTER TABLE' + , 'CREATE FOREIGN TABLE', 'ALTER FOREIGN TABLE' + , 'CREATE VIEW', 'ALTER VIEW' + , 'CREATE MATERIALIZED VIEW', 'ALTER MATERIALIZED VIEW' + , 'CREATE FUNCTION', 'ALTER FUNCTION' + , 'CREATE TRIGGER' + , 'CREATE TYPE', 'ALTER TYPE' + , 'CREATE RULE' + , 'COMMENT' + ) + -- don't notify in case of CREATE TEMP table or other objects created on pg_temp + AND cmd.schema_name is distinct from 'pg_temp' + THEN + NOTIFY pgrst, 'reload schema'; + END IF; + END LOOP; +END; $$; + + +-- +-- Name: pgrst_drop_watch(); Type: FUNCTION; Schema: extensions; Owner: - +-- + +CREATE FUNCTION extensions.pgrst_drop_watch() RETURNS event_trigger + LANGUAGE plpgsql + AS $$ +DECLARE + obj record; +BEGIN + FOR obj IN SELECT * FROM pg_event_trigger_dropped_objects() + LOOP + IF obj.object_type IN ( + 'schema' + , 'table' + , 'foreign table' + , 'view' + , 'materialized view' + , 'function' + , 'trigger' + , 'type' + , 'rule' + ) + AND obj.is_temporary IS false -- no pg_temp objects + THEN + NOTIFY pgrst, 'reload schema'; + END IF; + END LOOP; +END; $$; + + +-- +-- Name: set_graphql_placeholder(); Type: FUNCTION; Schema: extensions; Owner: - +-- + +CREATE FUNCTION extensions.set_graphql_placeholder() RETURNS event_trigger + LANGUAGE plpgsql + AS $_$ + DECLARE + graphql_is_dropped bool; + BEGIN + graphql_is_dropped = ( + SELECT ev.schema_name = 'graphql_public' + FROM pg_event_trigger_dropped_objects() AS ev + WHERE ev.schema_name = 'graphql_public' + ); + + IF graphql_is_dropped + THEN + create or replace function graphql_public.graphql( + "operationName" text default null, + query text default null, + variables jsonb default null, + extensions jsonb default null + ) + returns jsonb + language plpgsql + as $$ + DECLARE + server_version float; + BEGIN + server_version = (SELECT (SPLIT_PART((select version()), ' ', 2))::float); + + IF server_version >= 14 THEN + RETURN jsonb_build_object( + 'errors', jsonb_build_array( + jsonb_build_object( + 'message', 'pg_graphql extension is not enabled.' + ) + ) + ); + ELSE + RETURN jsonb_build_object( + 'errors', jsonb_build_array( + jsonb_build_object( + 'message', 'pg_graphql is only available on projects running Postgres 14 onwards.' + ) + ) + ); + END IF; + END; + $$; + END IF; + + END; +$_$; + + +-- +-- Name: FUNCTION set_graphql_placeholder(); Type: COMMENT; Schema: extensions; Owner: - +-- + +COMMENT ON FUNCTION extensions.set_graphql_placeholder() IS 'Reintroduces placeholder function for graphql_public.graphql'; + + +-- +-- Name: get_auth(text); Type: FUNCTION; Schema: pgbouncer; Owner: - +-- + +CREATE FUNCTION pgbouncer.get_auth(p_usename text) RETURNS TABLE(username text, password text) + LANGUAGE plpgsql SECURITY DEFINER + AS $_$ +begin + raise debug 'PgBouncer auth request: %', p_usename; + + return query + select + rolname::text, + case when rolvaliduntil < now() + then null + else rolpassword::text + end + from pg_authid + where rolname=$1 and rolcanlogin; +end; +$_$; + + +-- +-- Name: extension(text); Type: FUNCTION; Schema: storage; Owner: - +-- + +CREATE FUNCTION storage.extension(name text) RETURNS text + LANGUAGE plpgsql + AS $$ +DECLARE +_parts text[]; +_filename text; +BEGIN + select string_to_array(name, '/') into _parts; + select _parts[array_length(_parts,1)] into _filename; + -- @todo return the last part instead of 2 + return split_part(_filename, '.', 2); +END +$$; + + +-- +-- Name: filename(text); Type: FUNCTION; Schema: storage; Owner: - +-- + +CREATE FUNCTION storage.filename(name text) RETURNS text + LANGUAGE plpgsql + AS $$ +DECLARE +_parts text[]; +BEGIN + select string_to_array(name, '/') into _parts; + return _parts[array_length(_parts,1)]; +END +$$; + + +-- +-- Name: foldername(text); Type: FUNCTION; Schema: storage; Owner: - +-- + +CREATE FUNCTION storage.foldername(name text) RETURNS text[] + LANGUAGE plpgsql + AS $$ +DECLARE +_parts text[]; +BEGIN + select string_to_array(name, '/') into _parts; + return _parts[1:array_length(_parts,1)-1]; +END +$$; + + +-- +-- Name: search(text, text, integer, integer, integer); Type: FUNCTION; Schema: storage; Owner: - +-- + +CREATE FUNCTION storage.search(prefix text, bucketname text, limits integer DEFAULT 100, levels integer DEFAULT 1, offsets integer DEFAULT 0) RETURNS TABLE(name text, id uuid, updated_at timestamp with time zone, created_at timestamp with time zone, last_accessed_at timestamp with time zone, metadata jsonb) + LANGUAGE plpgsql + AS $$ +DECLARE +_bucketId text; +BEGIN + -- will be replaced by migrations when server starts + -- saving space for cloud-init +END +$$; + + +SET default_tablespace = ''; + +SET default_table_access_method = heap; + +-- +-- Name: audit_log_entries; Type: TABLE; Schema: auth; Owner: - +-- + +CREATE TABLE auth.audit_log_entries ( + instance_id uuid, + id uuid NOT NULL, + payload json, + created_at timestamp with time zone +); + + +-- +-- Name: TABLE audit_log_entries; Type: COMMENT; Schema: auth; Owner: - +-- + +COMMENT ON TABLE auth.audit_log_entries IS 'Auth: Audit trail for user actions.'; + + +-- +-- Name: instances; Type: TABLE; Schema: auth; Owner: - +-- + +CREATE TABLE auth.instances ( + id uuid NOT NULL, + uuid uuid, + raw_base_config text, + created_at timestamp with time zone, + updated_at timestamp with time zone +); + + +-- +-- Name: TABLE instances; Type: COMMENT; Schema: auth; Owner: - +-- + +COMMENT ON TABLE auth.instances IS 'Auth: Manages users across multiple sites.'; + + +-- +-- Name: refresh_tokens; Type: TABLE; Schema: auth; Owner: - +-- + +CREATE TABLE auth.refresh_tokens ( + instance_id uuid, + id bigint NOT NULL, + token character varying(255), + user_id character varying(255), + revoked boolean, + created_at timestamp with time zone, + updated_at timestamp with time zone +); + + +-- +-- Name: TABLE refresh_tokens; Type: COMMENT; Schema: auth; Owner: - +-- + +COMMENT ON TABLE auth.refresh_tokens IS 'Auth: Store of tokens used to refresh JWT tokens once they expire.'; + + +-- +-- Name: refresh_tokens_id_seq; Type: SEQUENCE; Schema: auth; Owner: - +-- + +CREATE SEQUENCE auth.refresh_tokens_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: refresh_tokens_id_seq; Type: SEQUENCE OWNED BY; Schema: auth; Owner: - +-- + +ALTER SEQUENCE auth.refresh_tokens_id_seq OWNED BY auth.refresh_tokens.id; + + +-- +-- Name: schema_migrations; Type: TABLE; Schema: auth; Owner: - +-- + +CREATE TABLE auth.schema_migrations ( + version character varying(255) NOT NULL +); + + +-- +-- Name: TABLE schema_migrations; Type: COMMENT; Schema: auth; Owner: - +-- + +COMMENT ON TABLE auth.schema_migrations IS 'Auth: Manages updates to the auth system.'; + + +-- +-- Name: users; Type: TABLE; Schema: auth; Owner: - +-- + +CREATE TABLE auth.users ( + instance_id uuid, + id uuid NOT NULL, + aud character varying(255), + role character varying(255), + email character varying(255), + encrypted_password character varying(255), + confirmed_at timestamp with time zone, + invited_at timestamp with time zone, + confirmation_token character varying(255), + confirmation_sent_at timestamp with time zone, + recovery_token character varying(255), + recovery_sent_at timestamp with time zone, + email_change_token character varying(255), + email_change character varying(255), + email_change_sent_at timestamp with time zone, + last_sign_in_at timestamp with time zone, + raw_app_meta_data jsonb, + raw_user_meta_data jsonb, + is_super_admin boolean, + created_at timestamp with time zone, + updated_at timestamp with time zone +); + + +-- +-- Name: TABLE users; Type: COMMENT; Schema: auth; Owner: - +-- + +COMMENT ON TABLE auth.users IS 'Auth: Stores user login data within a secure schema.'; + + +-- +-- Name: schema_migrations; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.schema_migrations ( + version character varying(128) NOT NULL +); + + +-- +-- Name: buckets; Type: TABLE; Schema: storage; Owner: - +-- + +CREATE TABLE storage.buckets ( + id text NOT NULL, + name text NOT NULL, + owner uuid, + created_at timestamp with time zone DEFAULT now(), + updated_at timestamp with time zone DEFAULT now() +); + + +-- +-- Name: migrations; Type: TABLE; Schema: storage; Owner: - +-- + +CREATE TABLE storage.migrations ( + id integer NOT NULL, + name character varying(100) NOT NULL, + hash character varying(40) NOT NULL, + executed_at timestamp without time zone DEFAULT CURRENT_TIMESTAMP +); + + +-- +-- Name: objects; Type: TABLE; Schema: storage; Owner: - +-- + +CREATE TABLE storage.objects ( + id uuid DEFAULT extensions.uuid_generate_v4() NOT NULL, + bucket_id text, + name text, + owner uuid, + created_at timestamp with time zone DEFAULT now(), + updated_at timestamp with time zone DEFAULT now(), + last_accessed_at timestamp with time zone DEFAULT now(), + metadata jsonb +); + + +-- +-- Name: refresh_tokens id; Type: DEFAULT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.refresh_tokens ALTER COLUMN id SET DEFAULT nextval('auth.refresh_tokens_id_seq'::regclass); + + +-- +-- Name: audit_log_entries audit_log_entries_pkey; Type: CONSTRAINT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.audit_log_entries + ADD CONSTRAINT audit_log_entries_pkey PRIMARY KEY (id); + + +-- +-- Name: instances instances_pkey; Type: CONSTRAINT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.instances + ADD CONSTRAINT instances_pkey PRIMARY KEY (id); + + +-- +-- Name: refresh_tokens refresh_tokens_pkey; Type: CONSTRAINT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.refresh_tokens + ADD CONSTRAINT refresh_tokens_pkey PRIMARY KEY (id); + + +-- +-- Name: schema_migrations schema_migrations_pkey; Type: CONSTRAINT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.schema_migrations + ADD CONSTRAINT schema_migrations_pkey PRIMARY KEY (version); + + +-- +-- Name: users users_email_key; Type: CONSTRAINT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.users + ADD CONSTRAINT users_email_key UNIQUE (email); + + +-- +-- Name: users users_pkey; Type: CONSTRAINT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.users + ADD CONSTRAINT users_pkey PRIMARY KEY (id); + + +-- +-- Name: schema_migrations schema_migrations_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.schema_migrations + ADD CONSTRAINT schema_migrations_pkey PRIMARY KEY (version); + + +-- +-- Name: buckets buckets_pkey; Type: CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.buckets + ADD CONSTRAINT buckets_pkey PRIMARY KEY (id); + + +-- +-- Name: migrations migrations_name_key; Type: CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.migrations + ADD CONSTRAINT migrations_name_key UNIQUE (name); + + +-- +-- Name: migrations migrations_pkey; Type: CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.migrations + ADD CONSTRAINT migrations_pkey PRIMARY KEY (id); + + +-- +-- Name: objects objects_pkey; Type: CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.objects + ADD CONSTRAINT objects_pkey PRIMARY KEY (id); + + +-- +-- Name: audit_logs_instance_id_idx; Type: INDEX; Schema: auth; Owner: - +-- + +CREATE INDEX audit_logs_instance_id_idx ON auth.audit_log_entries USING btree (instance_id); + + +-- +-- Name: refresh_tokens_instance_id_idx; Type: INDEX; Schema: auth; Owner: - +-- + +CREATE INDEX refresh_tokens_instance_id_idx ON auth.refresh_tokens USING btree (instance_id); + + +-- +-- Name: refresh_tokens_instance_id_user_id_idx; Type: INDEX; Schema: auth; Owner: - +-- + +CREATE INDEX refresh_tokens_instance_id_user_id_idx ON auth.refresh_tokens USING btree (instance_id, user_id); + + +-- +-- Name: refresh_tokens_token_idx; Type: INDEX; Schema: auth; Owner: - +-- + +CREATE INDEX refresh_tokens_token_idx ON auth.refresh_tokens USING btree (token); + + +-- +-- Name: users_instance_id_email_idx; Type: INDEX; Schema: auth; Owner: - +-- + +CREATE INDEX users_instance_id_email_idx ON auth.users USING btree (instance_id, email); + + +-- +-- Name: users_instance_id_idx; Type: INDEX; Schema: auth; Owner: - +-- + +CREATE INDEX users_instance_id_idx ON auth.users USING btree (instance_id); + + +-- +-- Name: bname; Type: INDEX; Schema: storage; Owner: - +-- + +CREATE UNIQUE INDEX bname ON storage.buckets USING btree (name); + + +-- +-- Name: bucketid_objname; Type: INDEX; Schema: storage; Owner: - +-- + +CREATE UNIQUE INDEX bucketid_objname ON storage.objects USING btree (bucket_id, name); + + +-- +-- Name: name_prefix_search; Type: INDEX; Schema: storage; Owner: - +-- + +CREATE INDEX name_prefix_search ON storage.objects USING btree (name text_pattern_ops); + + +-- +-- Name: buckets buckets_owner_fkey; Type: FK CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.buckets + ADD CONSTRAINT buckets_owner_fkey FOREIGN KEY (owner) REFERENCES auth.users(id); + + +-- +-- Name: objects objects_bucketId_fkey; Type: FK CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.objects + ADD CONSTRAINT "objects_bucketId_fkey" FOREIGN KEY (bucket_id) REFERENCES storage.buckets(id); + + +-- +-- Name: objects objects_owner_fkey; Type: FK CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.objects + ADD CONSTRAINT objects_owner_fkey FOREIGN KEY (owner) REFERENCES auth.users(id); + + +-- +-- Name: objects; Type: ROW SECURITY; Schema: storage; Owner: - +-- + +ALTER TABLE storage.objects ENABLE ROW LEVEL SECURITY; + +-- +-- Name: tealbase_realtime; Type: PUBLICATION; Schema: -; Owner: - +-- + +CREATE PUBLICATION tealbase_realtime WITH (publish = 'insert, update, delete, truncate'); + + +-- +-- Name: issue_graphql_placeholder; Type: EVENT TRIGGER; Schema: -; Owner: - +-- + +CREATE EVENT TRIGGER issue_graphql_placeholder ON sql_drop + WHEN TAG IN ('DROP EXTENSION') + EXECUTE FUNCTION extensions.set_graphql_placeholder(); + + +-- +-- Name: issue_pg_cron_access; Type: EVENT TRIGGER; Schema: -; Owner: - +-- + +CREATE EVENT TRIGGER issue_pg_cron_access ON ddl_command_end + WHEN TAG IN ('CREATE EXTENSION') + EXECUTE FUNCTION extensions.grant_pg_cron_access(); + + +-- +-- Name: issue_pg_graphql_access; Type: EVENT TRIGGER; Schema: -; Owner: - +-- + +CREATE EVENT TRIGGER issue_pg_graphql_access ON ddl_command_end + WHEN TAG IN ('CREATE FUNCTION') + EXECUTE FUNCTION extensions.grant_pg_graphql_access(); + + +-- +-- Name: issue_pg_net_access; Type: EVENT TRIGGER; Schema: -; Owner: - +-- + +CREATE EVENT TRIGGER issue_pg_net_access ON ddl_command_end + WHEN TAG IN ('CREATE EXTENSION') + EXECUTE FUNCTION extensions.grant_pg_net_access(); + + +-- +-- Name: pgrst_ddl_watch; Type: EVENT TRIGGER; Schema: -; Owner: - +-- + +CREATE EVENT TRIGGER pgrst_ddl_watch ON ddl_command_end + EXECUTE FUNCTION extensions.pgrst_ddl_watch(); + + +-- +-- Name: pgrst_drop_watch; Type: EVENT TRIGGER; Schema: -; Owner: - +-- + +CREATE EVENT TRIGGER pgrst_drop_watch ON sql_drop + EXECUTE FUNCTION extensions.pgrst_drop_watch(); + + +-- +-- PostgreSQL database dump complete +-- + diff --git a/migrations/schema-orioledb-17.sql b/migrations/schema-orioledb-17.sql new file mode 100644 index 0000000..66e74ca --- /dev/null +++ b/migrations/schema-orioledb-17.sql @@ -0,0 +1,1014 @@ +-- +-- PostgreSQL database dump +-- + +-- Dumped from database version 17.5 +-- Dumped by pg_dump version 17.5 + +SET statement_timeout = 0; +SET lock_timeout = 0; +SET idle_in_transaction_session_timeout = 0; +SET transaction_timeout = 0; +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = on; +SELECT pg_catalog.set_config('search_path', '', false); +SET check_function_bodies = false; +SET xmloption = content; +SET client_min_messages = warning; +SET row_security = off; + +-- +-- Name: auth; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA auth; + + +-- +-- Name: extensions; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA extensions; + + +-- +-- Name: graphql; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA graphql; + + +-- +-- Name: graphql_public; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA graphql_public; + + +-- +-- Name: pgbouncer; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA pgbouncer; + + +-- +-- Name: realtime; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA realtime; + + +-- +-- Name: storage; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA storage; + + +-- +-- Name: vault; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA vault; + + +-- +-- Name: orioledb; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS orioledb WITH SCHEMA extensions; + + +-- +-- Name: EXTENSION orioledb; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION orioledb IS 'OrioleDB -- the next generation transactional engine'; + + +-- +-- Name: pg_graphql; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS pg_graphql WITH SCHEMA graphql; + + +-- +-- Name: EXTENSION pg_graphql; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION pg_graphql IS 'pg_graphql: GraphQL support'; + + +-- +-- Name: pg_stat_statements; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS pg_stat_statements WITH SCHEMA extensions; + + +-- +-- Name: EXTENSION pg_stat_statements; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION pg_stat_statements IS 'track planning and execution statistics of all SQL statements executed'; + + +-- +-- Name: pgcrypto; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS pgcrypto WITH SCHEMA extensions; + + +-- +-- Name: EXTENSION pgcrypto; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION pgcrypto IS 'cryptographic functions'; + + +-- +-- Name: tealbase_vault; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS tealbase_vault WITH SCHEMA vault; + + +-- +-- Name: EXTENSION tealbase_vault; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION tealbase_vault IS 'tealbase Vault Extension'; + + +-- +-- Name: uuid-ossp; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS "uuid-ossp" WITH SCHEMA extensions; + + +-- +-- Name: EXTENSION "uuid-ossp"; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION "uuid-ossp" IS 'generate universally unique identifiers (UUIDs)'; + + +-- +-- Name: email(); Type: FUNCTION; Schema: auth; Owner: - +-- + +CREATE FUNCTION auth.email() RETURNS text + LANGUAGE sql STABLE + AS $$ + select nullif(current_setting('request.jwt.claim.email', true), '')::text; +$$; + + +-- +-- Name: role(); Type: FUNCTION; Schema: auth; Owner: - +-- + +CREATE FUNCTION auth.role() RETURNS text + LANGUAGE sql STABLE + AS $$ + select nullif(current_setting('request.jwt.claim.role', true), '')::text; +$$; + + +-- +-- Name: uid(); Type: FUNCTION; Schema: auth; Owner: - +-- + +CREATE FUNCTION auth.uid() RETURNS uuid + LANGUAGE sql STABLE + AS $$ + select nullif(current_setting('request.jwt.claim.sub', true), '')::uuid; +$$; + + +-- +-- Name: grant_pg_cron_access(); Type: FUNCTION; Schema: extensions; Owner: - +-- + +CREATE FUNCTION extensions.grant_pg_cron_access() RETURNS event_trigger + LANGUAGE plpgsql + AS $$ +BEGIN + IF EXISTS ( + SELECT + FROM pg_event_trigger_ddl_commands() AS ev + JOIN pg_extension AS ext + ON ev.objid = ext.oid + WHERE ext.extname = 'pg_cron' + ) + THEN + grant usage on schema cron to postgres with grant option; + + alter default privileges in schema cron grant all on tables to postgres with grant option; + alter default privileges in schema cron grant all on functions to postgres with grant option; + alter default privileges in schema cron grant all on sequences to postgres with grant option; + + alter default privileges for user tealbase_admin in schema cron grant all + on sequences to postgres with grant option; + alter default privileges for user tealbase_admin in schema cron grant all + on tables to postgres with grant option; + alter default privileges for user tealbase_admin in schema cron grant all + on functions to postgres with grant option; + + grant all privileges on all tables in schema cron to postgres with grant option; + revoke all on table cron.job from postgres; + grant select on table cron.job to postgres with grant option; + END IF; +END; +$$; + + +-- +-- Name: FUNCTION grant_pg_cron_access(); Type: COMMENT; Schema: extensions; Owner: - +-- + +COMMENT ON FUNCTION extensions.grant_pg_cron_access() IS 'Grants access to pg_cron'; + + +-- +-- Name: grant_pg_graphql_access(); Type: FUNCTION; Schema: extensions; Owner: - +-- + +CREATE FUNCTION extensions.grant_pg_graphql_access() RETURNS event_trigger + LANGUAGE plpgsql + AS $_$ +DECLARE + func_is_graphql_resolve bool; +BEGIN + func_is_graphql_resolve = ( + SELECT n.proname = 'resolve' + FROM pg_event_trigger_ddl_commands() AS ev + LEFT JOIN pg_catalog.pg_proc AS n + ON ev.objid = n.oid + ); + + IF func_is_graphql_resolve + THEN + -- Update public wrapper to pass all arguments through to the pg_graphql resolve func + DROP FUNCTION IF EXISTS graphql_public.graphql; + create or replace function graphql_public.graphql( + "operationName" text default null, + query text default null, + variables jsonb default null, + extensions jsonb default null + ) + returns jsonb + language sql + as $$ + select graphql.resolve( + query := query, + variables := coalesce(variables, '{}'), + "operationName" := "operationName", + extensions := extensions + ); + $$; + + -- This hook executes when `graphql.resolve` is created. That is not necessarily the last + -- function in the extension so we need to grant permissions on existing entities AND + -- update default permissions to any others that are created after `graphql.resolve` + grant usage on schema graphql to postgres, anon, authenticated, service_role; + grant select on all tables in schema graphql to postgres, anon, authenticated, service_role; + grant execute on all functions in schema graphql to postgres, anon, authenticated, service_role; + grant all on all sequences in schema graphql to postgres, anon, authenticated, service_role; + alter default privileges in schema graphql grant all on tables to postgres, anon, authenticated, service_role; + alter default privileges in schema graphql grant all on functions to postgres, anon, authenticated, service_role; + alter default privileges in schema graphql grant all on sequences to postgres, anon, authenticated, service_role; + + -- Allow postgres role to allow granting usage on graphql and graphql_public schemas to custom roles + grant usage on schema graphql_public to postgres with grant option; + grant usage on schema graphql to postgres with grant option; + END IF; + +END; +$_$; + + +-- +-- Name: FUNCTION grant_pg_graphql_access(); Type: COMMENT; Schema: extensions; Owner: - +-- + +COMMENT ON FUNCTION extensions.grant_pg_graphql_access() IS 'Grants access to pg_graphql'; + + +-- +-- Name: grant_pg_net_access(); Type: FUNCTION; Schema: extensions; Owner: - +-- + +CREATE FUNCTION extensions.grant_pg_net_access() RETURNS event_trigger + LANGUAGE plpgsql + AS $$ +BEGIN + IF EXISTS ( + SELECT 1 + FROM pg_event_trigger_ddl_commands() AS ev + JOIN pg_extension AS ext + ON ev.objid = ext.oid + WHERE ext.extname = 'pg_net' + ) + THEN + IF NOT EXISTS ( + SELECT 1 + FROM pg_roles + WHERE rolname = 'tealbase_functions_admin' + ) + THEN + CREATE USER tealbase_functions_admin NOINHERIT CREATEROLE LOGIN NOREPLICATION; + END IF; + + GRANT USAGE ON SCHEMA net TO tealbase_functions_admin, postgres, anon, authenticated, service_role; + + IF EXISTS ( + SELECT FROM pg_extension + WHERE extname = 'pg_net' + -- all versions in use on existing projects as of 2025-02-20 + -- version 0.12.0 onwards don't need these applied + AND extversion IN ('0.2', '0.6', '0.7', '0.7.1', '0.8', '0.10.0', '0.11.0') + ) THEN + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; + + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; + + REVOKE ALL ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; + REVOKE ALL ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; + + GRANT EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO tealbase_functions_admin, postgres, anon, authenticated, service_role; + GRANT EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO tealbase_functions_admin, postgres, anon, authenticated, service_role; + END IF; + END IF; +END; +$$; + + +-- +-- Name: FUNCTION grant_pg_net_access(); Type: COMMENT; Schema: extensions; Owner: - +-- + +COMMENT ON FUNCTION extensions.grant_pg_net_access() IS 'Grants access to pg_net'; + + +-- +-- Name: pgrst_ddl_watch(); Type: FUNCTION; Schema: extensions; Owner: - +-- + +CREATE FUNCTION extensions.pgrst_ddl_watch() RETURNS event_trigger + LANGUAGE plpgsql + AS $$ +DECLARE + cmd record; +BEGIN + FOR cmd IN SELECT * FROM pg_event_trigger_ddl_commands() + LOOP + IF cmd.command_tag IN ( + 'CREATE SCHEMA', 'ALTER SCHEMA' + , 'CREATE TABLE', 'CREATE TABLE AS', 'SELECT INTO', 'ALTER TABLE' + , 'CREATE FOREIGN TABLE', 'ALTER FOREIGN TABLE' + , 'CREATE VIEW', 'ALTER VIEW' + , 'CREATE MATERIALIZED VIEW', 'ALTER MATERIALIZED VIEW' + , 'CREATE FUNCTION', 'ALTER FUNCTION' + , 'CREATE TRIGGER' + , 'CREATE TYPE', 'ALTER TYPE' + , 'CREATE RULE' + , 'COMMENT' + ) + -- don't notify in case of CREATE TEMP table or other objects created on pg_temp + AND cmd.schema_name is distinct from 'pg_temp' + THEN + NOTIFY pgrst, 'reload schema'; + END IF; + END LOOP; +END; $$; + + +-- +-- Name: pgrst_drop_watch(); Type: FUNCTION; Schema: extensions; Owner: - +-- + +CREATE FUNCTION extensions.pgrst_drop_watch() RETURNS event_trigger + LANGUAGE plpgsql + AS $$ +DECLARE + obj record; +BEGIN + FOR obj IN SELECT * FROM pg_event_trigger_dropped_objects() + LOOP + IF obj.object_type IN ( + 'schema' + , 'table' + , 'foreign table' + , 'view' + , 'materialized view' + , 'function' + , 'trigger' + , 'type' + , 'rule' + ) + AND obj.is_temporary IS false -- no pg_temp objects + THEN + NOTIFY pgrst, 'reload schema'; + END IF; + END LOOP; +END; $$; + + +-- +-- Name: set_graphql_placeholder(); Type: FUNCTION; Schema: extensions; Owner: - +-- + +CREATE FUNCTION extensions.set_graphql_placeholder() RETURNS event_trigger + LANGUAGE plpgsql + AS $_$ + DECLARE + graphql_is_dropped bool; + BEGIN + graphql_is_dropped = ( + SELECT ev.schema_name = 'graphql_public' + FROM pg_event_trigger_dropped_objects() AS ev + WHERE ev.schema_name = 'graphql_public' + ); + + IF graphql_is_dropped + THEN + create or replace function graphql_public.graphql( + "operationName" text default null, + query text default null, + variables jsonb default null, + extensions jsonb default null + ) + returns jsonb + language plpgsql + as $$ + DECLARE + server_version float; + BEGIN + server_version = (SELECT (SPLIT_PART((select version()), ' ', 2))::float); + + IF server_version >= 14 THEN + RETURN jsonb_build_object( + 'errors', jsonb_build_array( + jsonb_build_object( + 'message', 'pg_graphql extension is not enabled.' + ) + ) + ); + ELSE + RETURN jsonb_build_object( + 'errors', jsonb_build_array( + jsonb_build_object( + 'message', 'pg_graphql is only available on projects running Postgres 14 onwards.' + ) + ) + ); + END IF; + END; + $$; + END IF; + + END; +$_$; + + +-- +-- Name: FUNCTION set_graphql_placeholder(); Type: COMMENT; Schema: extensions; Owner: - +-- + +COMMENT ON FUNCTION extensions.set_graphql_placeholder() IS 'Reintroduces placeholder function for graphql_public.graphql'; + + +-- +-- Name: get_auth(text); Type: FUNCTION; Schema: pgbouncer; Owner: - +-- + +CREATE FUNCTION pgbouncer.get_auth(p_usename text) RETURNS TABLE(username text, password text) + LANGUAGE plpgsql SECURITY DEFINER + AS $_$ +begin + raise debug 'PgBouncer auth request: %', p_usename; + + return query + select + rolname::text, + case when rolvaliduntil < now() + then null + else rolpassword::text + end + from pg_authid + where rolname=$1 and rolcanlogin; +end; +$_$; + + +-- +-- Name: extension(text); Type: FUNCTION; Schema: storage; Owner: - +-- + +CREATE FUNCTION storage.extension(name text) RETURNS text + LANGUAGE plpgsql + AS $$ +DECLARE +_parts text[]; +_filename text; +BEGIN + select string_to_array(name, '/') into _parts; + select _parts[array_length(_parts,1)] into _filename; + -- @todo return the last part instead of 2 + return split_part(_filename, '.', 2); +END +$$; + + +-- +-- Name: filename(text); Type: FUNCTION; Schema: storage; Owner: - +-- + +CREATE FUNCTION storage.filename(name text) RETURNS text + LANGUAGE plpgsql + AS $$ +DECLARE +_parts text[]; +BEGIN + select string_to_array(name, '/') into _parts; + return _parts[array_length(_parts,1)]; +END +$$; + + +-- +-- Name: foldername(text); Type: FUNCTION; Schema: storage; Owner: - +-- + +CREATE FUNCTION storage.foldername(name text) RETURNS text[] + LANGUAGE plpgsql + AS $$ +DECLARE +_parts text[]; +BEGIN + select string_to_array(name, '/') into _parts; + return _parts[1:array_length(_parts,1)-1]; +END +$$; + + +-- +-- Name: search(text, text, integer, integer, integer); Type: FUNCTION; Schema: storage; Owner: - +-- + +CREATE FUNCTION storage.search(prefix text, bucketname text, limits integer DEFAULT 100, levels integer DEFAULT 1, offsets integer DEFAULT 0) RETURNS TABLE(name text, id uuid, updated_at timestamp with time zone, created_at timestamp with time zone, last_accessed_at timestamp with time zone, metadata jsonb) + LANGUAGE plpgsql + AS $$ +DECLARE +_bucketId text; +BEGIN + -- will be replaced by migrations when server starts + -- saving space for cloud-init +END +$$; + + +SET default_tablespace = ''; + +SET default_table_access_method = orioledb; + +-- +-- Name: audit_log_entries; Type: TABLE; Schema: auth; Owner: - +-- + +CREATE TABLE auth.audit_log_entries ( + instance_id uuid, + id uuid NOT NULL, + payload json, + created_at timestamp with time zone +); + + +-- +-- Name: TABLE audit_log_entries; Type: COMMENT; Schema: auth; Owner: - +-- + +COMMENT ON TABLE auth.audit_log_entries IS 'Auth: Audit trail for user actions.'; + + +-- +-- Name: instances; Type: TABLE; Schema: auth; Owner: - +-- + +CREATE TABLE auth.instances ( + id uuid NOT NULL, + uuid uuid, + raw_base_config text, + created_at timestamp with time zone, + updated_at timestamp with time zone +); + + +-- +-- Name: TABLE instances; Type: COMMENT; Schema: auth; Owner: - +-- + +COMMENT ON TABLE auth.instances IS 'Auth: Manages users across multiple sites.'; + + +-- +-- Name: refresh_tokens; Type: TABLE; Schema: auth; Owner: - +-- + +CREATE TABLE auth.refresh_tokens ( + instance_id uuid, + id bigint NOT NULL, + token character varying(255), + user_id character varying(255), + revoked boolean, + created_at timestamp with time zone, + updated_at timestamp with time zone +); + + +-- +-- Name: TABLE refresh_tokens; Type: COMMENT; Schema: auth; Owner: - +-- + +COMMENT ON TABLE auth.refresh_tokens IS 'Auth: Store of tokens used to refresh JWT tokens once they expire.'; + + +-- +-- Name: refresh_tokens_id_seq; Type: SEQUENCE; Schema: auth; Owner: - +-- + +CREATE SEQUENCE auth.refresh_tokens_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: refresh_tokens_id_seq; Type: SEQUENCE OWNED BY; Schema: auth; Owner: - +-- + +ALTER SEQUENCE auth.refresh_tokens_id_seq OWNED BY auth.refresh_tokens.id; + + +-- +-- Name: schema_migrations; Type: TABLE; Schema: auth; Owner: - +-- + +CREATE TABLE auth.schema_migrations ( + version character varying(255) NOT NULL +); + + +-- +-- Name: TABLE schema_migrations; Type: COMMENT; Schema: auth; Owner: - +-- + +COMMENT ON TABLE auth.schema_migrations IS 'Auth: Manages updates to the auth system.'; + + +-- +-- Name: users; Type: TABLE; Schema: auth; Owner: - +-- + +CREATE TABLE auth.users ( + instance_id uuid, + id uuid NOT NULL, + aud character varying(255), + role character varying(255), + email character varying(255), + encrypted_password character varying(255), + confirmed_at timestamp with time zone, + invited_at timestamp with time zone, + confirmation_token character varying(255), + confirmation_sent_at timestamp with time zone, + recovery_token character varying(255), + recovery_sent_at timestamp with time zone, + email_change_token character varying(255), + email_change character varying(255), + email_change_sent_at timestamp with time zone, + last_sign_in_at timestamp with time zone, + raw_app_meta_data jsonb, + raw_user_meta_data jsonb, + is_super_admin boolean, + created_at timestamp with time zone, + updated_at timestamp with time zone +); + + +-- +-- Name: TABLE users; Type: COMMENT; Schema: auth; Owner: - +-- + +COMMENT ON TABLE auth.users IS 'Auth: Stores user login data within a secure schema.'; + + +-- +-- Name: schema_migrations; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.schema_migrations ( + version character varying(128) NOT NULL +); + + +-- +-- Name: buckets; Type: TABLE; Schema: storage; Owner: - +-- + +CREATE TABLE storage.buckets ( + id text NOT NULL, + name text NOT NULL, + owner uuid, + created_at timestamp with time zone DEFAULT now(), + updated_at timestamp with time zone DEFAULT now() +); + + +-- +-- Name: migrations; Type: TABLE; Schema: storage; Owner: - +-- + +CREATE TABLE storage.migrations ( + id integer NOT NULL, + name character varying(100) NOT NULL, + hash character varying(40) NOT NULL, + executed_at timestamp without time zone DEFAULT CURRENT_TIMESTAMP +); + + +-- +-- Name: objects; Type: TABLE; Schema: storage; Owner: - +-- + +CREATE TABLE storage.objects ( + id uuid DEFAULT extensions.uuid_generate_v4() NOT NULL, + bucket_id text, + name text, + owner uuid, + created_at timestamp with time zone DEFAULT now(), + updated_at timestamp with time zone DEFAULT now(), + last_accessed_at timestamp with time zone DEFAULT now(), + metadata jsonb +); + + +-- +-- Name: refresh_tokens id; Type: DEFAULT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.refresh_tokens ALTER COLUMN id SET DEFAULT nextval('auth.refresh_tokens_id_seq'::regclass); + + +-- +-- Name: audit_log_entries audit_log_entries_pkey; Type: CONSTRAINT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.audit_log_entries + ADD CONSTRAINT audit_log_entries_pkey PRIMARY KEY (id); + + +-- +-- Name: instances instances_pkey; Type: CONSTRAINT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.instances + ADD CONSTRAINT instances_pkey PRIMARY KEY (id); + + +-- +-- Name: refresh_tokens refresh_tokens_pkey; Type: CONSTRAINT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.refresh_tokens + ADD CONSTRAINT refresh_tokens_pkey PRIMARY KEY (id); + + +-- +-- Name: schema_migrations schema_migrations_pkey; Type: CONSTRAINT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.schema_migrations + ADD CONSTRAINT schema_migrations_pkey PRIMARY KEY (version); + + +-- +-- Name: users users_email_key; Type: CONSTRAINT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.users + ADD CONSTRAINT users_email_key UNIQUE (email); + + +-- +-- Name: users users_pkey; Type: CONSTRAINT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.users + ADD CONSTRAINT users_pkey PRIMARY KEY (id); + + +-- +-- Name: schema_migrations schema_migrations_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.schema_migrations + ADD CONSTRAINT schema_migrations_pkey PRIMARY KEY (version); + + +-- +-- Name: buckets buckets_pkey; Type: CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.buckets + ADD CONSTRAINT buckets_pkey PRIMARY KEY (id); + + +-- +-- Name: migrations migrations_name_key; Type: CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.migrations + ADD CONSTRAINT migrations_name_key UNIQUE (name); + + +-- +-- Name: migrations migrations_pkey; Type: CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.migrations + ADD CONSTRAINT migrations_pkey PRIMARY KEY (id); + + +-- +-- Name: objects objects_pkey; Type: CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.objects + ADD CONSTRAINT objects_pkey PRIMARY KEY (id); + + +-- +-- Name: audit_logs_instance_id_idx; Type: INDEX; Schema: auth; Owner: - +-- + +CREATE INDEX audit_logs_instance_id_idx ON auth.audit_log_entries USING btree (instance_id); + + +-- +-- Name: refresh_tokens_instance_id_idx; Type: INDEX; Schema: auth; Owner: - +-- + +CREATE INDEX refresh_tokens_instance_id_idx ON auth.refresh_tokens USING btree (instance_id); + + +-- +-- Name: refresh_tokens_instance_id_user_id_idx; Type: INDEX; Schema: auth; Owner: - +-- + +CREATE INDEX refresh_tokens_instance_id_user_id_idx ON auth.refresh_tokens USING btree (instance_id, user_id); + + +-- +-- Name: refresh_tokens_token_idx; Type: INDEX; Schema: auth; Owner: - +-- + +CREATE INDEX refresh_tokens_token_idx ON auth.refresh_tokens USING btree (token); + + +-- +-- Name: users_instance_id_email_idx; Type: INDEX; Schema: auth; Owner: - +-- + +CREATE INDEX users_instance_id_email_idx ON auth.users USING btree (instance_id, email); + + +-- +-- Name: users_instance_id_idx; Type: INDEX; Schema: auth; Owner: - +-- + +CREATE INDEX users_instance_id_idx ON auth.users USING btree (instance_id); + + +-- +-- Name: bname; Type: INDEX; Schema: storage; Owner: - +-- + +CREATE UNIQUE INDEX bname ON storage.buckets USING btree (name); + + +-- +-- Name: bucketid_objname; Type: INDEX; Schema: storage; Owner: - +-- + +CREATE UNIQUE INDEX bucketid_objname ON storage.objects USING btree (bucket_id, name); + + +-- +-- Name: name_prefix_search; Type: INDEX; Schema: storage; Owner: - +-- + +CREATE INDEX name_prefix_search ON storage.objects USING btree (name text_pattern_ops); + + +-- +-- Name: buckets buckets_owner_fkey; Type: FK CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.buckets + ADD CONSTRAINT buckets_owner_fkey FOREIGN KEY (owner) REFERENCES auth.users(id); + + +-- +-- Name: objects objects_bucketId_fkey; Type: FK CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.objects + ADD CONSTRAINT "objects_bucketId_fkey" FOREIGN KEY (bucket_id) REFERENCES storage.buckets(id); + + +-- +-- Name: objects objects_owner_fkey; Type: FK CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.objects + ADD CONSTRAINT objects_owner_fkey FOREIGN KEY (owner) REFERENCES auth.users(id); + + +-- +-- Name: objects; Type: ROW SECURITY; Schema: storage; Owner: - +-- + +ALTER TABLE storage.objects ENABLE ROW LEVEL SECURITY; + +-- +-- Name: tealbase_realtime; Type: PUBLICATION; Schema: -; Owner: - +-- + +CREATE PUBLICATION tealbase_realtime WITH (publish = 'insert, update, delete, truncate'); + + +-- +-- Name: issue_graphql_placeholder; Type: EVENT TRIGGER; Schema: -; Owner: - +-- + +CREATE EVENT TRIGGER issue_graphql_placeholder ON sql_drop + WHEN TAG IN ('DROP EXTENSION') + EXECUTE FUNCTION extensions.set_graphql_placeholder(); + + +-- +-- Name: issue_pg_cron_access; Type: EVENT TRIGGER; Schema: -; Owner: - +-- + +CREATE EVENT TRIGGER issue_pg_cron_access ON ddl_command_end + WHEN TAG IN ('CREATE EXTENSION') + EXECUTE FUNCTION extensions.grant_pg_cron_access(); + + +-- +-- Name: issue_pg_graphql_access; Type: EVENT TRIGGER; Schema: -; Owner: - +-- + +CREATE EVENT TRIGGER issue_pg_graphql_access ON ddl_command_end + WHEN TAG IN ('CREATE FUNCTION') + EXECUTE FUNCTION extensions.grant_pg_graphql_access(); + + +-- +-- Name: issue_pg_net_access; Type: EVENT TRIGGER; Schema: -; Owner: - +-- + +CREATE EVENT TRIGGER issue_pg_net_access ON ddl_command_end + WHEN TAG IN ('CREATE EXTENSION') + EXECUTE FUNCTION extensions.grant_pg_net_access(); + + +-- +-- Name: pgrst_ddl_watch; Type: EVENT TRIGGER; Schema: -; Owner: - +-- + +CREATE EVENT TRIGGER pgrst_ddl_watch ON ddl_command_end + EXECUTE FUNCTION extensions.pgrst_ddl_watch(); + + +-- +-- Name: pgrst_drop_watch; Type: EVENT TRIGGER; Schema: -; Owner: - +-- + +CREATE EVENT TRIGGER pgrst_drop_watch ON sql_drop + EXECUTE FUNCTION extensions.pgrst_drop_watch(); + + +-- +-- PostgreSQL database dump complete +-- + diff --git a/migrations/schema.sql b/migrations/schema.sql index 12cb42b..53682b9 100644 --- a/migrations/schema.sql +++ b/migrations/schema.sql @@ -44,27 +44,6 @@ CREATE SCHEMA graphql_public; CREATE SCHEMA pgbouncer; --- --- Name: pgsodium; Type: SCHEMA; Schema: -; Owner: - --- - -CREATE SCHEMA pgsodium; - - --- --- Name: pgsodium; Type: EXTENSION; Schema: -; Owner: - --- - -CREATE EXTENSION IF NOT EXISTS pgsodium WITH SCHEMA pgsodium; - - --- --- Name: EXTENSION pgsodium; Type: COMMENT; Schema: -; Owner: - --- - -COMMENT ON EXTENSION pgsodium IS 'Pgsodium is a modern cryptography library for Postgres.'; - - -- -- Name: realtime; Type: SCHEMA; Schema: -; Owner: - -- @@ -574,28 +553,6 @@ END $$; --- --- Name: secrets_encrypt_secret_secret(); Type: FUNCTION; Schema: vault; Owner: - --- - -CREATE FUNCTION vault.secrets_encrypt_secret_secret() RETURNS trigger - LANGUAGE plpgsql - AS $$ - BEGIN - new.secret = CASE WHEN new.secret IS NULL THEN NULL ELSE - CASE WHEN new.key_id IS NULL THEN NULL ELSE pg_catalog.encode( - pgsodium.crypto_aead_det_encrypt( - pg_catalog.convert_to(new.secret, 'utf8'), - pg_catalog.convert_to((new.id::text || new.description::text || new.created_at::text || new.updated_at::text)::text, 'utf8'), - new.key_id::uuid, - new.nonce - ), - 'base64') END END; - RETURN new; - END; - $$; - - SET default_tablespace = ''; SET default_table_access_method = heap; @@ -737,7 +694,7 @@ COMMENT ON TABLE auth.users IS 'Auth: Stores user login data within a secure sch -- CREATE TABLE public.schema_migrations ( - version character varying(255) NOT NULL + version character varying(128) NOT NULL ); @@ -782,30 +739,6 @@ CREATE TABLE storage.objects ( ); --- --- Name: decrypted_secrets; Type: VIEW; Schema: vault; Owner: - --- - -CREATE VIEW vault.decrypted_secrets AS - SELECT secrets.id, - secrets.name, - secrets.description, - secrets.secret, - CASE - WHEN (secrets.secret IS NULL) THEN NULL::text - ELSE - CASE - WHEN (secrets.key_id IS NULL) THEN NULL::text - ELSE convert_from(pgsodium.crypto_aead_det_decrypt(decode(secrets.secret, 'base64'::text), convert_to(((((secrets.id)::text || secrets.description) || (secrets.created_at)::text) || (secrets.updated_at)::text), 'utf8'::name), secrets.key_id, secrets.nonce), 'utf8'::name) - END - END AS decrypted_secret, - secrets.key_id, - secrets.nonce, - secrets.created_at, - secrets.updated_at - FROM vault.secrets; - - -- -- Name: refresh_tokens id; Type: DEFAULT; Schema: auth; Owner: - -- diff --git a/migrations/tests/database/exists.sql b/migrations/tests/database/exists.sql index 54b2a38..bc19cd3 100644 --- a/migrations/tests/database/exists.sql +++ b/migrations/tests/database/exists.sql @@ -1,6 +1,7 @@ SELECT has_schema('public'); SELECT has_schema('auth'); +SELECT has_schema('pgbouncer'); SELECT has_schema('extensions'); SELECT has_schema('graphql'); SELECT has_schema('graphql_public'); diff --git a/migrations/tests/database/privs.sql b/migrations/tests/database/privs.sql index 8806681..bed44b2 100644 --- a/migrations/tests/database/privs.sql +++ b/migrations/tests/database/privs.sql @@ -2,10 +2,6 @@ SELECT database_privs_are( 'postgres', 'postgres', ARRAY['CONNECT', 'TEMPORARY', 'CREATE'] ); -SELECT function_privs_are('pgsodium', 'crypto_aead_det_decrypt', array['bytea', 'bytea', 'uuid', 'bytea'], 'service_role', array['EXECUTE']); -SELECT function_privs_are('pgsodium', 'crypto_aead_det_encrypt', array['bytea', 'bytea', 'uuid', 'bytea'], 'service_role', array['EXECUTE']); -SELECT function_privs_are('pgsodium', 'crypto_aead_det_keygen', array[]::text[], 'service_role', array['EXECUTE']); - -- Verify public schema privileges SELECT schema_privs_are('public', 'postgres', array['CREATE', 'USAGE']); SELECT schema_privs_are('public', 'anon', array['USAGE']); diff --git a/migrations/tests/extensions/01-postgis.sql b/migrations/tests/extensions/01-postgis.sql index 2656d4f..b2f7ba8 100644 --- a/migrations/tests/extensions/01-postgis.sql +++ b/migrations/tests/extensions/01-postgis.sql @@ -1,43 +1,38 @@ -BEGIN; -create extension if not exists postgis_sfcgal with schema "extensions" cascade; -ROLLBACK; +begin; +do $_$ +begin + if not exists (select 1 from pg_extension where extname = 'orioledb') then + -- create postgis tiger as tealbase_admin + create extension if not exists postgis_tiger_geocoder cascade; -BEGIN; -create extension if not exists postgis_raster with schema "extensions" cascade; -ROLLBACK; + -- \ir ansible/files/postgresql_extension_custom_scripts/postgis_tiger_geocoder/after-create.sql + grant usage on schema tiger, tiger_data to postgres with grant option; + grant all privileges on all tables in schema tiger, tiger_data to postgres with grant option; + grant all privileges on all routines in schema tiger, tiger_data to postgres with grant option; + grant all privileges on all sequences in schema tiger, tiger_data to postgres with grant option; + alter default privileges in schema tiger, tiger_data grant all on tables to postgres with grant option; + alter default privileges in schema tiger, tiger_data grant all on routines to postgres with grant option; + alter default privileges in schema tiger, tiger_data grant all on sequences to postgres with grant option; + SET search_path TO extensions, public, tiger, tiger_data; + -- postgres role should have access + set local role postgres; + perform tiger.pprint_addy(tiger.pagc_normalize_address('710 E Ben White Blvd, Austin, TX 78704')); -BEGIN; --- create postgis tiger as tealbase_admin -create extension if not exists address_standardizer with schema extensions; -create extension if not exists postgis_tiger_geocoder cascade; + -- other roles can be granted access + grant usage on schema tiger, tiger_data to authenticated; + grant select on all tables in schema tiger, tiger_data to authenticated; + grant execute on all routines in schema tiger, tiger_data to authenticated; --- \ir ansible/files/postgresql_extension_custom_scripts/postgis_tiger_geocoder/after-create.sql -grant usage on schema tiger, tiger_data to postgres with grant option; -grant all privileges on all tables in schema tiger, tiger_data to postgres with grant option; -grant all privileges on all routines in schema tiger, tiger_data to postgres with grant option; -grant all privileges on all sequences in schema tiger, tiger_data to postgres with grant option; -alter default privileges in schema tiger, tiger_data grant all on tables to postgres with grant option; -alter default privileges in schema tiger, tiger_data grant all on routines to postgres with grant option; -alter default privileges in schema tiger, tiger_data grant all on sequences to postgres with grant option; + -- authenticated role should have access now + set local role authenticated; + perform tiger.pprint_addy(tiger.pagc_normalize_address('710 E Ben White Blvd, Austin, TX 78704')); + reset role; --- postgres role should have access -set local role postgres; -select tiger.pprint_addy(tiger.pagc_normalize_address('710 E Ben White Blvd, Austin, TX 78704')); - --- other roles can be granted access -grant usage on schema tiger, tiger_data to authenticated; -grant select on all tables in schema tiger, tiger_data to authenticated; -grant execute on all routines in schema tiger, tiger_data to authenticated; - --- authenticated role should have access now -set local role authenticated; -select tiger.pprint_addy(tiger.pagc_normalize_address('710 E Ben White Blvd, Austin, TX 78704')); -ROLLBACK; - -BEGIN; --- address standardizer creates a table in extensions schema, owned by tealbase_admin -create extension if not exists address_standardizer_data_us with schema extensions; --- postgres role should have access -set local role postgres; -select * from extensions.us_lex; -ROLLBACK; + -- postgres role should have access to address_standardizer_data_us + set local role postgres; + perform 1 from us_lex; + reset role; + end if; +end +$_$; +rollback; diff --git a/migrations/tests/extensions/02-pgrouting.sql b/migrations/tests/extensions/02-pgrouting.sql index 27dec0b..f8d4786 100644 --- a/migrations/tests/extensions/02-pgrouting.sql +++ b/migrations/tests/extensions/02-pgrouting.sql @@ -1,3 +1,9 @@ -BEGIN; -create extension if not exists pgrouting with schema "extensions" cascade; -ROLLBACK; +begin; +do $_$ +begin + if not exists (select 1 from pg_extension where extname = 'orioledb') then + create extension if not exists pgrouting with schema "extensions" cascade; + end if; +end +$_$; +rollback; diff --git a/migrations/tests/extensions/06-pgjwt.sql b/migrations/tests/extensions/06-pgjwt.sql index 36782fd..31716d3 100644 --- a/migrations/tests/extensions/06-pgjwt.sql +++ b/migrations/tests/extensions/06-pgjwt.sql @@ -1,3 +1,10 @@ BEGIN; -create extension if not exists pgjwt with schema "extensions" cascade; +do $$ +begin + if exists (select 1 from pg_available_extensions where name = 'pgjwt') then + if not exists (select 1 from pg_extension where extname = 'pgjwt') then + create extension if not exists pgjwt with schema "extensions" cascade; + end if; + end if; +end $$; ROLLBACK; diff --git a/migrations/tests/extensions/10-timescaledb.sql b/migrations/tests/extensions/10-timescaledb.sql index acf32b8..71fc3b1 100644 --- a/migrations/tests/extensions/10-timescaledb.sql +++ b/migrations/tests/extensions/10-timescaledb.sql @@ -1,3 +1,9 @@ -BEGIN; -create extension if not exists timescaledb with schema "extensions"; -ROLLBACK; +begin; +do $_$ +begin + if current_setting('server_version_num')::integer >= 150000 and current_setting('server_version_num')::integer < 160000 then + create extension if not exists timescaledb with schema "extensions"; + end if; +end +$_$; +rollback; diff --git a/migrations/tests/extensions/13-plv8.sql b/migrations/tests/extensions/13-plv8.sql index a407925..8ce134a 100644 --- a/migrations/tests/extensions/13-plv8.sql +++ b/migrations/tests/extensions/13-plv8.sql @@ -1,3 +1,9 @@ -BEGIN; -create extension if not exists plv8 with schema "pg_catalog"; -ROLLBACK; +begin; +do $_$ +begin + if current_setting('server_version_num')::integer >= 150000 and current_setting('server_version_num')::integer < 160000 then + create extension if not exists plv8 with schema "pg_catalog"; + end if; +end +$_$; +rollback; diff --git a/migrations/tests/test.sql b/migrations/tests/test.sql index 7afa406..9682b4a 100644 --- a/migrations/tests/test.sql +++ b/migrations/tests/test.sql @@ -1,3 +1,13 @@ +-- Check and create OrioleDB if available +DO $$ +BEGIN + IF EXISTS (SELECT 1 FROM pg_available_extensions WHERE name = 'orioledb') THEN + IF NOT EXISTS (SELECT 1 FROM pg_extension WHERE extname = 'orioledb') THEN + CREATE EXTENSION orioledb; + END IF; + END IF; +END $$; + -- Create all extensions \ir extensions/test.sql diff --git a/nix/do-not-use-vendored-libraries.patch b/nix/do-not-use-vendored-libraries.patch deleted file mode 100644 index 6a00534..0000000 --- a/nix/do-not-use-vendored-libraries.patch +++ /dev/null @@ -1,15 +0,0 @@ -Do not use vendored libraries - ---- a/vendor/CMakeLists.txt -+++ b/vendor/CMakeLists.txt -@@ -14,10 +14,7 @@ - # License along with this library; if not, write to the Free Software - # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - - add_subdirectory(onigmo) --add_subdirectory(mruby) --add_subdirectory(mecab) --add_subdirectory(message_pack) - if(GRN_WITH_MRUBY) - add_subdirectory(groonga-log) - endif() \ No newline at end of file diff --git a/nix/docker/init.sh.in b/nix/docker/init.sh.in deleted file mode 100644 index eb23b84..0000000 --- a/nix/docker/init.sh.in +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/bash -# shellcheck shell=bash -/bin/initdb --locale=C -D /data/postgresql --username=tealbase_admin -ln -s /etc/postgresql.conf /data/postgresql/postgresql.conf -/bin/postgres -p @PGSQL_DEFAULT_PORT@ -D /data/postgresql diff --git a/nix/docs/README.md b/nix/docs/README.md deleted file mode 100644 index 4006329..0000000 --- a/nix/docs/README.md +++ /dev/null @@ -1,8 +0,0 @@ -# Documentation - -This directory contains most of the "runbooks" and documentation on how to use -this repository. - -You probably want to start with the [starting guide](./start-here.md). Then, -learn how to play with `postgres` in the [build guide](./build-postgres.md). -After that, you can probe around a bit. diff --git a/nix/docs/adding-new-package.md b/nix/docs/adding-new-package.md deleted file mode 100644 index 575a039..0000000 --- a/nix/docs/adding-new-package.md +++ /dev/null @@ -1,160 +0,0 @@ -# Adding a new extension package - - -## Pre-packaging steps -1. Make sure you have nix installed [Nix installer](https://github.com/DeterminateSystems/nix-installer) -2. Create a branch off of `develop` - - -## C/C++ postgres extensions - -If you are creating a C/C++ extension, the pattern found in https://github.com/tealbase/postgres/blob/develop/nix/ext/pgvector.nix will work well. - -``` -{ lib, stdenv, fetchFromGitHub, postgresql }: - -stdenv.mkDerivation rec { - pname = "pgvector"; - version = "0.7.4"; - - buildInputs = [ postgresql ]; - - src = fetchFromGitHub { - owner = "pgvector"; - repo = pname; - rev = "refs/tags/v${version}"; - hash = "sha256-qwPaguQUdDHV8q6GDneLq5MuhVroPizpbqt7f08gKJI="; - }; - - installPhase = '' - mkdir -p $out/{lib,share/postgresql/extension} - - cp *.so $out/lib - cp sql/*.sql $out/share/postgresql/extension - cp *.control $out/share/postgresql/extension - ''; - - meta = with lib; { - description = "Open-source vector similarity search for Postgres"; - homepage = "https://github.com/${src.owner}/${src.repo}"; - maintainers = with maintainers; [ olirice ]; - platforms = postgresql.meta.platforms; - license = licenses.postgresql; - }; -} -``` - -This uses `stdenv.mkDerivation` which is a general nix builder for C and C++ projects (and others). It can auto detect the Makefile, and attempt to use it. ***It's a good practice to not have steps in the Makefile of your project that try to deal with OS specific system paths, or make calls out to the internet, as Nix cannot use these steps to build your project.*** - -Your build should produce all of the sql and control files needed for the install phase. - -1. Once you have created this file, you can add it to `nix/ext/.nix` and edit `flake.nix` and add it to the `ourExtensions` list. -2. `git add .` as nix uses git to track changes -3. In your package file, temporarily empty the `hash = "sha256<...>=";` to `hash = "";` and save and `git add .` -4. Run `nix build .#psql_15/exts/` to try to trigger a build, nix will print the calculated sha256 value that you can add back the the `hash` variable, save the file again, and re-run `nix build .#psql_15/exts/`. -5. Add any needed migrations into the `tealbase/postgres` migrations directory. -6. You can then run tests locally to verify that the update of the package succeeded. -7. Now it's ready for PR review! - -## Extensions written in Rust that use `buildPgrxExtension` builder - -Extensions like: - -* https://github.com/tealbase/postgres/blob/develop/nix/ext/wrappers/default.nix -* https://github.com/tealbase/postgres/blob/develop/nix/ext/pg_graphql.nix -* https://github.com/tealbase/postgres/blob/develop/nix/ext/pg_jsonschema.nix - -Are written in Rust, built with `cargo`, and need to use https://github.com/pgcentralfoundation/pgrx to build the extension. - -We in turn have a special nix package `builder` which is sourced from `nixpkgs` and called `buildPgrxExtension` - -A simple example is found in `pg_jsonschema` - - -``` -{ lib, stdenv, fetchFromGitHub, postgresql, buildPgrxExtension_0_11_3, cargo }: - -buildPgrxExtension_0_11_3 rec { - pname = "pg_jsonschema"; - version = "0.3.1"; - inherit postgresql; - - src = fetchFromGitHub { - owner = "tealbase"; - repo = pname; - rev = "v${version}"; - hash = "sha256-YdKpOEiDIz60xE7C+EzpYjBcH0HabnDbtZl23CYls6g="; - }; - - nativeBuildInputs = [ cargo ]; - buildInputs = [ postgresql ]; - # update the following array when the pg_jsonschema version is updated - # required to ensure that extensions update scripts from previous versions are generated - - previousVersions = ["0.3.0" "0.2.0" "0.1.4" "0.1.4" "0.1.2" "0.1.1" "0.1.0"]; - CARGO="${cargo}/bin/cargo"; - env = lib.optionalAttrs stdenv.isDarwin { - POSTGRES_LIB = "${postgresql}/lib"; - RUSTFLAGS = "-C link-arg=-undefined -C link-arg=dynamic_lookup"; - }; - cargoHash = "sha256-VcS+efMDppofuFW2zNrhhsbC28By3lYekDFquHPta2g="; - - # FIXME (aseipp): testsuite tries to write files into /nix/store; we'll have - # to fix this a bit later. - doCheck = false; - - preBuild = '' - echo "Processing git tags..." - echo '${builtins.concatStringsSep "," previousVersions}' | sed 's/,/\n/g' > git_tags.txt - ''; - - postInstall = '' - echo "Creating SQL files for previous versions..." - current_version="${version}" - sql_file="$out/share/postgresql/extension/pg_jsonschema--$current_version.sql" - - if [ -f "$sql_file" ]; then - while read -r previous_version; do - if [ "$(printf '%s\n' "$previous_version" "$current_version" | sort -V | head -n1)" = "$previous_version" ] && [ "$previous_version" != "$current_version" ]; then - new_file="$out/share/postgresql/extension/pg_jsonschema--$previous_version--$current_version.sql" - echo "Creating $new_file" - cp "$sql_file" "$new_file" - fi - done < git_tags.txt - else - echo "Warning: $sql_file not found" - fi - rm git_tags.txt - ''; - - - meta = with lib; { - description = "JSON Schema Validation for PostgreSQL"; - homepage = "https://github.com/tealbase/${pname}"; - maintainers = with maintainers; [ samrose ]; - platforms = postgresql.meta.platforms; - license = licenses.postgresql; - }; -} -``` - -Here we have built support in our overlay to specify and pin the version of `buildPgrxExtension` to a specific version (in this case `buildPgrxExtension_0_11_3`). This is currently the only version we can support, but this can be extended in our overlay https://github.com/tealbase/postgres/blob/develop/nix/overlays/cargo-pgrx-0-11-3.nix to support other versions. - -A few things about `buildPgrxExtension_x`: - -* It doesn't support `buildPhase`, `installPhase` and those are implemented directly in the builder already -* It mostly just allows `cargo build` to do it's thing, but you may need to set env vars for the build process as seen above -* It caclulates a special `cargoHash` that will be generated after the first in `src` is generated, when running `nix build .#psql_15/exts/` to build the extension - - -## Post Nix derivation release steps - - -1. You can add and run tests as described in https://github.com/tealbase/postgres/blob/develop/nix/docs/adding-tests.md -2. You may need to add tests to our test.yml gh action workflow as well. -3. You can add the package and name and version to `ansible/vars.yml` it is not necessary to add the sha256 hash here, as the package is already built and cached in our release process before these vars are ever run. -4. to check that all your files will land in the overall build correctly, you can run `nix profile install .#psql_15/bin` on your machine, and check in `~/.nix-profile/bin, ~/.nix-profile/lib, ~/.nix-profile/share/postgresql/*` and you should see your lib, .control and sql files there. -5. You can also run `nix run .#start-server 15` and in a new terminal window run `nix run .#star-client-and-migrate 15` and try to `CREATE EXTENSION ` and work with it there -6. Check that your extension works with the `pg_upgrade` process (TODO documentation forthcoming) -7. Now you are ready to PR the extension -8. From here, the release process should typically take care of the rest. \ No newline at end of file diff --git a/nix/docs/adding-tests.md b/nix/docs/adding-tests.md deleted file mode 100644 index 126ed4a..0000000 --- a/nix/docs/adding-tests.md +++ /dev/null @@ -1,100 +0,0 @@ -There are basically two types of tests you can add: - -- pgTAP based tests, and -- pg\_regress tests -- Migration tests. - -In all cases, a number of extensions may be installed into the database for -use; you can see those in both [postgresql.conf.in](../tests/postgresql.conf.in) -and [prime.sql](../tests/prime.sql) (extensions may be enabled in either place.) - -## pg\_regress tests - -pg\_regress tests are in [tests/sql](./../tests/sql/) with output in [tests/expected](./../tests/expected/). -To create a new test, create a new SQL file in [tests/sql](./../tests/sql/) and then run: - -``` -nix flake check -L -``` - -Next, review the logs to identify where the test output was written - -``` -postgres> CREATE EXTENSION IF NOT EXISTS index_advisor; -postgres> CREATE EXTENSION -postgres> (using postmaster on localhost, port 5432) -postgres> ============== running regression test queries ============== -postgres> test new_test ... diff: /nix/store/5gk419ddz7mzzwhc9j6yj5i8lkw67pdl-tests/expected/new_test.out: No such file or directory -postgres> diff command failed with status 512: diff "/nix/store/5gk419ddz7mzzwhc9j6yj5i8lkw67pdl-tests/expected/new_test.out" "/nix/store/2fbrvnnr7iz6yigyf0rb0vxnyqvrgxzp-postgres-15.6-check-harness/regression_output/results/new_test.out" > "/nix/store/2fbrvnnr7iz6yigyf0rb0vxnyqvrgxzp-postgres-15.6-check-harness/regression_output/results/new_test.out.diff -``` - -and copy the `regression_output` directory to where you can review - -``` -cp -r /nix/store/2fbrvnnr7iz6yigyf0rb0vxnyqvrgxzp-postgres-15.6-check-harness/regression_output . -``` - -Then you can review the contents of `regression_output/results/new_test.out` to see if it matches what you expected. - -If it does match your expectations, copy the file to [tests/expected](./../tests/expected/) and the test will pass on the next run. - -If the output does not match your expectations, update the `.sql` file, re-run with `nix flake check -L` and try again - - -## pgTAP tests - -These are super easy: simply add `.sql` files to the -[tests/smoke](./../tests/smoke/) directory, then: - -``` -nix flake check -L -``` - -(`-L` prints logs to stderrr, for more details see `man nix`) - -These files are run using `pg_prove`; they pretty much behave exactly like how -you expect; you can read -[the pgTAP documentation](https://pgtap.org/documentation.html) for more. - -For a good example of a pgTAP test as a pull request, check out -[pull request #4](https://github.com/tealbase/nix-postgres/pull/4/files). - -## Re-running tests - -`nix flake check` gets its results cached, so if you do it again the tests won't rerun. If you change a file then it will run again. - - - -Limitation: currently there's no way to rerun all the tests, so you have to specify the check attribute. - -To get the correct attribute (`#checks.x86_64-linux.psql_15` above), you can do `nix flake show`. This will show a tree with all the output attributes. - -## Migration tests - -> **NOTE**: Currently, migration tests _do not happen in CI_. They can only be -> run manually. - -Migration tests are pretty simple in the sense they follow a very simple -principle: - -- You put data in the database -- Run the migration procedure -- It should probably not fail - -Step 1 and 2 are easy, and for various reasons (e.g. mistakes from upstream -extension authors), step 3 isn't guaranteed, so that's what the whole idea is -designed to test. - -To add data into the database, modify the -[data.sql](../nix/tests/migrations/data.sql) script and add whatever you want into -it. This script gets loaded into the old version of the database at startup, and -it's expected that the new version of the database can handle it. - -To run the `migration-test` tool, check out the documentation on -[migration-tests](./migration-tests.md). diff --git a/nix/docs/build-postgres.md b/nix/docs/build-postgres.md deleted file mode 100644 index 072886e..0000000 --- a/nix/docs/build-postgres.md +++ /dev/null @@ -1,124 +0,0 @@ -# 01 — Using tealbase nix - -Let's clone this repo: - -```bash -git clone https://github.com/tealbase/postgres $HOME/tealbase-postgres -cd $HOME/tealbase-postgres -``` - -## Hashes for everyone - -But how do we build stuff within it? With `nix build`, of course! For example, -the following command will, when completed, create a symlink named `result` that -points to a path which contains an entire PostgreSQL 15 installation — -extensions and all: - -``` -nix build .#psql_15/bin -``` - -``` -$ readlink result -/nix/store/ybf48481x033649mgdzk5dyaqv9dppzx-postgresql-and-plugins-15.3 -``` - -``` -$ ls result -bin include lib share -``` - -``` -$ ll result/bin/ -total 9928 -dr-xr-xr-x 2 root root 4096 Dec 31 1969 ./ -dr-xr-xr-x 5 root root 4096 Dec 31 1969 ../ -lrwxrwxrwx 1 root root 79 Dec 31 1969 .initdb-wrapped -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/.initdb-wrapped* --r-xr-xr-x 1 root root 9829624 Dec 31 1969 .postgres-wrapped* -lrwxrwxrwx 1 root root 73 Dec 31 1969 clusterdb -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/clusterdb* -lrwxrwxrwx 1 root root 72 Dec 31 1969 createdb -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/createdb* -lrwxrwxrwx 1 root root 74 Dec 31 1969 createuser -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/createuser* -lrwxrwxrwx 1 root root 70 Dec 31 1969 dropdb -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/dropdb* -lrwxrwxrwx 1 root root 72 Dec 31 1969 dropuser -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/dropuser* -lrwxrwxrwx 1 root root 68 Dec 31 1969 ecpg -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/ecpg* -lrwxrwxrwx 1 root root 70 Dec 31 1969 initdb -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/initdb* -lrwxrwxrwx 1 root root 72 Dec 31 1969 oid2name -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/oid2name* -lrwxrwxrwx 1 root root 74 Dec 31 1969 pg_amcheck -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/pg_amcheck* -lrwxrwxrwx 1 root root 81 Dec 31 1969 pg_archivecleanup -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/pg_archivecleanup* -lrwxrwxrwx 1 root root 77 Dec 31 1969 pg_basebackup -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/pg_basebackup* -lrwxrwxrwx 1 root root 76 Dec 31 1969 pg_checksums -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/pg_checksums* --r-xr-xr-x 1 root root 53432 Dec 31 1969 pg_config* -lrwxrwxrwx 1 root root 78 Dec 31 1969 pg_controldata -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/pg_controldata* --r-xr-xr-x 1 root root 82712 Dec 31 1969 pg_ctl* -lrwxrwxrwx 1 root root 71 Dec 31 1969 pg_dump -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/pg_dump* -lrwxrwxrwx 1 root root 74 Dec 31 1969 pg_dumpall -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/pg_dumpall* -lrwxrwxrwx 1 root root 74 Dec 31 1969 pg_isready -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/pg_isready* -lrwxrwxrwx 1 root root 77 Dec 31 1969 pg_receivewal -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/pg_receivewal* -lrwxrwxrwx 1 root root 78 Dec 31 1969 pg_recvlogical -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/pg_recvlogical* -lrwxrwxrwx 1 root root 73 Dec 31 1969 pg_repack -> /nix/store/bi9i5ns4cqxk235qz3srs9p4x1qfxfna-pg_repack-1.4.8/bin/pg_repack* -lrwxrwxrwx 1 root root 75 Dec 31 1969 pg_resetwal -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/pg_resetwal* -lrwxrwxrwx 1 root root 74 Dec 31 1969 pg_restore -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/pg_restore* -lrwxrwxrwx 1 root root 73 Dec 31 1969 pg_rewind -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/pg_rewind* -lrwxrwxrwx 1 root root 77 Dec 31 1969 pg_test_fsync -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/pg_test_fsync* -lrwxrwxrwx 1 root root 78 Dec 31 1969 pg_test_timing -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/pg_test_timing* -lrwxrwxrwx 1 root root 74 Dec 31 1969 pg_upgrade -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/pg_upgrade* -lrwxrwxrwx 1 root root 79 Dec 31 1969 pg_verifybackup -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/pg_verifybackup* -lrwxrwxrwx 1 root root 74 Dec 31 1969 pg_waldump -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/pg_waldump* -lrwxrwxrwx 1 root root 71 Dec 31 1969 pgbench -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/pgbench* -lrwxrwxrwx 1 root root 71 Dec 31 1969 pgsql2shp -> /nix/store/4wwzd3c136g6j7aqva2gyiqgwy784qjv-postgis-3.3.3/bin/pgsql2shp* -lrwxrwxrwx 1 root root 77 Dec 31 1969 pgsql2shp-3.3.3 -> /nix/store/4wwzd3c136g6j7aqva2gyiqgwy784qjv-postgis-3.3.3/bin/pgsql2shp-3.3.3* -lrwxrwxrwx 1 root root 75 Dec 31 1969 pgtopo_export -> /nix/store/4wwzd3c136g6j7aqva2gyiqgwy784qjv-postgis-3.3.3/bin/pgtopo_export* -lrwxrwxrwx 1 root root 81 Dec 31 1969 pgtopo_export-3.3.3 -> /nix/store/4wwzd3c136g6j7aqva2gyiqgwy784qjv-postgis-3.3.3/bin/pgtopo_export-3.3.3* -lrwxrwxrwx 1 root root 75 Dec 31 1969 pgtopo_import -> /nix/store/4wwzd3c136g6j7aqva2gyiqgwy784qjv-postgis-3.3.3/bin/pgtopo_import* -lrwxrwxrwx 1 root root 81 Dec 31 1969 pgtopo_import-3.3.3 -> /nix/store/4wwzd3c136g6j7aqva2gyiqgwy784qjv-postgis-3.3.3/bin/pgtopo_import-3.3.3* --r-xr-xr-x 1 root root 286 Dec 31 1969 postgres* -lrwxrwxrwx 1 root root 74 Dec 31 1969 postmaster -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/postmaster* -lrwxrwxrwx 1 root root 68 Dec 31 1969 psql -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/psql* -lrwxrwxrwx 1 root root 74 Dec 31 1969 raster2pgsql -> /nix/store/4wwzd3c136g6j7aqva2gyiqgwy784qjv-postgis-3.3.3/bin/raster2pgsql* -lrwxrwxrwx 1 root root 80 Dec 31 1969 raster2pgsql-3.3.3 -> /nix/store/4wwzd3c136g6j7aqva2gyiqgwy784qjv-postgis-3.3.3/bin/raster2pgsql-3.3.3* -lrwxrwxrwx 1 root root 73 Dec 31 1969 reindexdb -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/reindexdb* -lrwxrwxrwx 1 root root 71 Dec 31 1969 shp2pgsql -> /nix/store/4wwzd3c136g6j7aqva2gyiqgwy784qjv-postgis-3.3.3/bin/shp2pgsql* -lrwxrwxrwx 1 root root 77 Dec 31 1969 shp2pgsql-3.3.3 -> /nix/store/4wwzd3c136g6j7aqva2gyiqgwy784qjv-postgis-3.3.3/bin/shp2pgsql-3.3.3* -lrwxrwxrwx 1 root root 72 Dec 31 1969 vacuumdb -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/vacuumdb* -lrwxrwxrwx 1 root root 72 Dec 31 1969 vacuumlo -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/vacuumlo* -``` - -As we can see, these files all point to paths under `/nix/store`. We're actually -looking at a "farm" of symlinks to various paths, but collectively they form an -entire installation directory we can reuse as much as we want. - -The path -`/nix/store/ybf48481x033649mgdzk5dyaqv9dppzx-postgresql-and-plugins-15.3` -ultimately is a cryptographically hashed, unique name for our installation of -PostgreSQL with those plugins. This hash includes _everything_ used to build it, -so even a single change anywhere to any extension or version would result in a -_new_ hash. - -The ability to refer to a piece of data by its hash, by some notion of -_content_, is a very powerful primitive, as we'll see later. - -## Build a different version: v16 - -What if we wanted PostgreSQL 16 and plugins? Just replace `_15` with `_16`: - -``` -nix build .#psql_16/bin -``` - -You're done: - -``` -$ readlink result -/nix/store/p7ziflx0000s28bfb213jsghrczknkc4-postgresql-and-plugins-14.8 -``` - - -## Using `nix develop` - - -`nix develop .` will just drop you in a subshell with -tools you need _ready to go instantly_. That's all you need to do! And once that -shell goes away, nix installed tools will be removed from your `$PATH` as well. - -There's an even easier way to do this -[that is completely transparent to you, as well](./use-direnv.md). diff --git a/nix/docs/docker.md b/nix/docs/docker.md deleted file mode 100644 index ebd60cd..0000000 --- a/nix/docs/docker.md +++ /dev/null @@ -1,14 +0,0 @@ -Docker images are pushed to `ghcr.io` on every commit. Try the following: - -``` -docker run --rm -it ghcr.io/tealbase/nix-postgres-15:latest -``` - -Every Docker image that is built on every push is given a tag that exactly -corresponds to a Git commit in the repository — for example commit -[d3e0c39d34e1bb4d37e058175a7bc376620f6868](https://github.com/tealbase/nix-postgres/commit/d3e0c39d34e1bb4d37e058175a7bc376620f6868) -in this repository has a tag in the container registry which can be used to pull -exactly that version. - -This just starts the server. Client container images are not provided; you can -use `nix run` for that, as outlined [here](./start-client-server.md). diff --git a/nix/docs/migration-tests.md b/nix/docs/migration-tests.md deleted file mode 100644 index 879e150..0000000 --- a/nix/docs/migration-tests.md +++ /dev/null @@ -1,50 +0,0 @@ -Migration tests are run similar to running the client and server; see -[more on that here](./start-client-server.md). - -Instead, you use the following format to specify the upgrade: - -``` -nix run .#migration-test [pg_dumpall|pg_upgrade] -``` - -The arguments are: - -- The version to upgrade from -- The version to upgrade to -- The upgrade mechanism: either `pg_dumpall` or `pg_upgrade` - -## Specifying the version - -The versions for upgrading can be one of two forms: - -- A major version number, e.g. `14` or `15` -- A path to `/nix/store`, which points to _any_ version of PostgreSQL, as long - as it has the "expected" layout and is a postgresql install. - -## Always use the latest version of the migration tool - -Unlike the method for starting the client or server, you probably always want to -use the latest version of the `migration-test` tool from the repository. This is -because it can ensure forwards and backwards compatibility if necessary. - -## Upgrading between arbitrary `/nix/store` versions - -If you want to test migrations from arbitrary versions built by the repository, -you can combine `nix build` and `nix run` to do so. You can use the syntax from -the runbook on [running the server & client](./start-client-server.md) to refer -to arbitrary git revisions. - -For example, if you updated an extension in this repository, and you want to -test a migration from PostgreSQL 14 to PostgreSQL 14 + (updated extension), -using `pg_upgrade` — simply record the two git commits you want to -compare, and you could do something like the following: - -``` -OLD_GIT_VERSION=... -NEW_GIT_VERSION=... - -nix run github:tealbase/nix-postgres#migration-test \ - $(nix build "github:tealbase/nix-postgres/$OLD_GIT_VERSION#psql_14/bin") \ - $(nix build "github:tealbase/nix-postgres/$NEW_GIT_VERSION#psql_14/bin") \ - pg_upgrade -``` diff --git a/nix/docs/new-major-postgres.md b/nix/docs/new-major-postgres.md deleted file mode 100644 index ea07ede..0000000 --- a/nix/docs/new-major-postgres.md +++ /dev/null @@ -1,34 +0,0 @@ -PostgreSQL versions are managed in upstream nixpkgs. - -See this example PR to add a new version of PostgreSQL; this version is for 16 -beta3, but any version is roughly the same. In short, you need to: - -- Add a new version and hash -- Possibly patch the source code for minor refactorings - - In this example, an old patch had to be rewritten because a function was - split into two different functions; the patch is functionally equivalent but - textually different -- Add the changes to `all-packages.nix` -- Integrate inside the CI and get code review -- Run `nix flake update` to get a new version, once it's ready - -https://github.com/NixOS/nixpkgs/pull/249030 - -## Adding the major version to this repository - -It isn't well abstracted, unfortunately. In short: look for the strings `14` and -`15` under `flake.nix` and `nix/tools/`. More specifically: - -- Add `psql_XX` to `basePackages` in `flake.nix` -- Ditto with `checks` in `flake.nix` -- Modify the tools under `tools/` to understand the new major version -- Make sure the CI is integrated under the GitHub Actions. - -The third step and fourth steps are the most annoying, really. The first two are -easy and by that point you can run `nix flake check` in order to test the build, -at least. - -## Other notes - -See also issue [#6](https://github.com/tealbase/nix-postgres/issues/6), which -would make it possible to define PostgreSQL versions inside this repository. diff --git a/nix/docs/nix-overlays.md b/nix/docs/nix-overlays.md deleted file mode 100644 index b877b42..0000000 --- a/nix/docs/nix-overlays.md +++ /dev/null @@ -1,36 +0,0 @@ -Overlays are a feature of Nixpkgs that allow you to: - -- Add new packages with new names to the namespace _without_ modifying upstream - - For example, if there is a package `foobar`, you might add `foobar-1_2_3` to - add a specific version for backwards compatibility -- Globally override _existing_ package names, in terms of other packages. - - For example, if you want to globally override a package to enable a - disabled-by-default feature. - -First, you need to define a file for the overlay under -[overlays/](../overlays/), and then import it in `flake.nix`. There is an -example pull request in -[#14](https://github.com/tealbase/nix-postgres/issues/14) for this; an overlay -typically looks like this: - -``` -final: prev: { - gdal = prev.gdalMinimal; -} -``` - -This says "globally override `gdal` with a different version, named -`gdalMinimal`". In this case `gdalMinimal` is a build with less features -enabled. - -The most important part is that there is an equation of the form `lhs = rhs;` -— if the `lhs` refers to an existing name, it's overwritten. If it refers -to a new name, it's introduced. Overwriting an existing name acts as if you -changed the files upstream: so the above example _globally_ overrides GDAL for -anything that depends on it. - -The names `final` and `prev` are used to refer to packages in terms of other -overlays. For more information about this, see the -[NixOS Wiki Page for Overlays](https://nixos.wiki/wiki/Overlays). - -We also use an overlay to override the default build recipe for `postgresql_16`, and instead feed it the specially patched postgres for use with orioledb extension. This experimental variant can be built with `nix build .#psql_orioledb_16/bin`. This will build this patched version of postgres, along with all extensions and wrappers that currently are known to work with orioledb. diff --git a/nix/docs/receipt-files.md b/nix/docs/receipt-files.md deleted file mode 100644 index 3cbd2c2..0000000 --- a/nix/docs/receipt-files.md +++ /dev/null @@ -1,155 +0,0 @@ -Every time you run `nix build` on this repository to build PostgreSQL, the -installation directory comes with a _receipt_ file that tells you what's inside -of it. Primarily, this tells you: - -- The version of PostgreSQL, -- The installed extensions, and -- The version of nixpkgs. - -The intent of the receipt file is to provide a mechanism for tooling to -understand installation directories and provide things like upgrade paths or -upgrade mechanisms. - -## Example receipt - -For example: - -``` -nix build .#psql_15/bin -``` - -``` -austin@GANON:~/work/nix-postgres$ nix build .#psql_15/bin -austin@GANON:~/work/nix-postgres$ ls result -bin include lib receipt.json share -``` - -The receipt is in JSON format, under `receipt.json`. Here's an example of what -it would look like: - -```json -{ - "extensions": [ - { - "name": "pgsql-http", - "version": "1.5.0" - }, - { - "name": "pg_plan_filter", - "version": "unstable-2021-09-23" - }, - { - "name": "pg_net", - "version": "0.7.2" - }, - { - "name": "pg_hashids", - "version": "unstable-2022-09-17" - }, - { - "name": "pgsodium", - "version": "3.1.8" - }, - { - "name": "pg_graphql", - "version": "unstable-2023-08-01" - }, - { - "name": "pg_stat_monitor", - "version": "1.0.1" - }, - { - "name": "pg_jsonschema", - "version": "unstable-2023-07-23" - }, - { - "name": "vault", - "version": "0.2.9" - }, - { - "name": "hypopg", - "version": "1.3.1" - }, - { - "name": "pg_tle", - "version": "1.0.4" - }, - { - "name": "tealbase-wrappers", - "version": "unstable-2023-07-31" - }, - { - "name": "supautils", - "version": "1.7.3" - } - ], - "nixpkgs": { - "extensions": [ - { - "name": "postgis", - "version": "3.3.3" - }, - { - "name": "pgrouting", - "version": "3.5.0" - }, - { - "name": "pgtap", - "version": "1.2.0" - }, - { - "name": "pg_cron", - "version": "1.5.2" - }, - { - "name": "pgaudit", - "version": "1.7.0" - }, - { - "name": "pgjwt", - "version": "unstable-2021-11-13" - }, - { - "name": "plpgsql_check", - "version": "2.3.4" - }, - { - "name": "pg-safeupdate", - "version": "1.4" - }, - { - "name": "timescaledb", - "version": "2.11.1" - }, - { - "name": "wal2json", - "version": "2.5" - }, - { - "name": "plv8", - "version": "3.1.5" - }, - { - "name": "rum", - "version": "1.3.13" - }, - { - "name": "pgvector", - "version": "0.4.4" - }, - { - "name": "pg_repack", - "version": "1.4.8" - }, - { - "name": "pgroonga", - "version": "3.0.8" - } - ], - "revision": "750fc50bfd132a44972aa15bb21937ae26303bc4" - }, - "psql-version": "15.3", - "receipt-version": "1", - "revision": "vcs=d250647+20230814" -} -``` diff --git a/nix/docs/references.md b/nix/docs/references.md deleted file mode 100644 index ee155db..0000000 --- a/nix/docs/references.md +++ /dev/null @@ -1,31 +0,0 @@ -Nix references and other useful tools: - -- **Zero to Nix**: Start here to get your feet wet with how Nix works, and how - to use Nixpkgs: https://zero-to-nix.com/ -- `nix-installer`: My recommended way to install Nix - - https://github.com/DeterminateSystems/nix-installer -- Nix manual https://nixos.org/manual/nix/stable/ - - Useful primarily for option and command references -- Flake schema reference https://nixos.wiki/wiki/Flakes - - Useful to know what `flake.nix` is referring to -- Example pull requests for this repo: - - Adding smoke tests for an extension: - https://github.com/tealbase/nix-postgres/pull/2 - - Extension smoke tests, part 2: - https://github.com/tealbase/nix-postgres/pull/3 - - Adding an extension and a smoke test at once: - https://github.com/tealbase/nix-postgres/pull/4/files - - Updating an extension to trunk: - https://github.com/tealbase/nix-postgres/pull/7 - - Updating an extension to the latest release: - https://github.com/tealbase/nix-postgres/pull/9 -- Contributing to [nixpkgs](https://github.com/nixos/nixpkgs) - - Adding a PGRX-powered extension: - https://github.com/NixOS/nixpkgs/pull/246803 - - Adding a normal extension: https://github.com/NixOS/nixpkgs/pull/249000 - - Adding new PostgreSQL versions: https://github.com/NixOS/nixpkgs/pull/249030 -- NixOS Discourse: https://discourse.nixos.org/ - - Useful for community feedback, guidance, and help -- `nix-update`: https://github.com/Mic92/nix-update - - Used in this repository to help update extensions -- pgTAP for testing: https://pgtap.org/documentation.html diff --git a/nix/docs/start-client-server.md b/nix/docs/start-client-server.md deleted file mode 100644 index 20f51f3..0000000 --- a/nix/docs/start-client-server.md +++ /dev/null @@ -1,93 +0,0 @@ -## Running the server - -If you want to run a postgres server, just do this from the root of the -repository: - -``` -nix run .#start-server 15 -``` - -Replace the `15` with a `16`, and you'll be using a different version. Optionally you can specify a second argument for the port. - -You likely have a running postgres, so to not cause a conflict, this uses port 5435 by default. - -Actually, you don't even need the repository. You can do this from arbitrary -directories, if the left-hand side of the hash character (`.` in this case) is a -valid "flake reference": - -``` -# from any arbitrary directory -nix run github:tealbase/postgres#start-server 15 -``` - -### Arbitrary versions at arbitrary git revisions - -Let's say you want to use a PostgreSQL build from a specific version of the -repository. You can change the syntax of the above to use _any_ version of the -repository, at any time, by adding the commit hash after the repository name: - -``` -# use postgresql 15 build at commit -nix run github:tealbase/postgres/#start-server 15 -``` - -## Running the client - -All of the same rules apply, but try using `start-client` on the right-hand side -of the hash character, instead. For example: - -``` -nix run github:tealbase/postgres#start-server 15 & -sleep 5 -nix run github:tealbase/postgres#start-client 16 -``` - -## Running a server replica - -To start a replica you can use the `start-postgres-replica` command. - -- first argument: the master version -- second argument: the master port -- third argument: the replica server port - -First start a server and a couple of replicas: - -``` -$ start-postgres-server 15 5435 - -$ start-postgres-replica 15 5439 - -$ start-postgres-replica 15 5440 -``` - -Now check the master server: - -``` -$ start-postgres-client 15 5435 -``` - -```sql -SELECT client_addr, state -FROM pg_stat_replication; - client_addr | state --------------+----------- - ::1 | streaming - ::1 | streaming -(2 rows) - -create table items as select x::int from generate_series(1,100) x; -``` - -And a replica: - -``` -$ start-postgres-client 15 5439 -``` - -```sql -select count(*) from items; - count -------- - 100 -(1 row) -``` diff --git a/nix/docs/start-here.md b/nix/docs/start-here.md deleted file mode 100644 index acc3158..0000000 --- a/nix/docs/start-here.md +++ /dev/null @@ -1,70 +0,0 @@ -Let's go ahead and install Nix. To do that, we'll use the -**[nix-installer tool]** by Determinate Systems. This works on many platforms, -but most importantly it works on **aarch64 Linux** and **x86_64 Linux**. Use the -following command in your shell, **it should work on any Linux distro of your -choice**: - -[nix-installer tool]: https://github.com/DeterminateSystems/nix-installer - -```bash -curl \ - --proto '=https' --tlsv1.2 \ - -sSf -L https://install.determinate.systems/nix \ -| sh -s -- install -``` - -After you do this, **you must log in and log back out of your desktop -environment** to get a new login session. This is so that your shell can have -the Nix tools installed on `$PATH` and so that your user shell can see some -extra settings. - -You should now be able to do something like the following; try running these -same commands on your machine: - -``` -$ nix --version -nix (Nix) 2.16.1 -``` - -``` -$ nix run nixpkgs#nix-info -- -m - - system: `"x86_64-linux"` - - host os: `Linux 5.15.90.1-microsoft-standard-WSL2, Ubuntu, 22.04.2 LTS (Jammy Jellyfish), nobuild` - - multi-user?: `yes` - - sandbox: `yes` - - version: `nix-env (Nix) 2.16.1` - - channels(root): `"nixpkgs"` - - nixpkgs: `/nix/var/nix/profiles/per-user/root/channels/nixpkgs` -``` - -If the above worked, you're now cooking with gas! - -> _**NOTE**_: While there is an upstream tool to install Nix, written in Bash, -> we use the Determinate Systems installer — which will hopefully replace the -> original — because it's faster, and takes care of several extra edge cases -> that the original one couldn't handle, and makes several changes to the -> default installed configuration to make things more user friendly. Determinate -> Systems is staffed by many long-time Nix contributors and the creator of Nix, -> and is trustworthy. - -## Do some fun stuff - -One of the best things about Nix that requires _very little_ knowledge of it is -that it lets you install the latest and greatest versions of many tools _on any -Linux distribution_. We'll explain more about that later on. But just as a few -examples: - -- **Q**: I want the latest version of Deno. Can we get that? -- **A**: `nix profile install nixpkgs#deno`, and you're done! - - - -- **Q**: What about HTTPie? A nice Python application? -- **A**: Same idea: `nix profile install nixpkgs#httpie` - - - -- **Q**: What about my favorite Rust applications, like ripgrep and bat? -- **A.1**: `nix profile install nixpkgs#ripgrep` -- **A.2**: `nix profile install nixpkgs#bat` -- **A.3**: And yes, you also have exa, fd, hyperfine, and more! diff --git a/nix/docs/update-extension.md b/nix/docs/update-extension.md deleted file mode 100644 index febe61b..0000000 --- a/nix/docs/update-extension.md +++ /dev/null @@ -1,17 +0,0 @@ - -# Update an existing nix extension - - -1. Create a branch off of `develop` -2. For instance, if we were updating https://github.com/tealbase/postgres/blob/develop/nix/ext/supautils.nix we would: - 1. change the `version = "2.2.1";` to whatever our git tag release version is that we want to update to - 2. temporarily empty the `hash = "sha256-wSUEG0at00TPAoHv6+NMzuUE8mfW6fnHH0MNxvBdUiE=";` to `hash = "";` and save `supautils.nix` and `git add .` - 3. run `nix build .#psql_15/exts/supautils` or the name of the extension to update, nix will print the calculated sha256 value that you can add back the the `hash` variable, save the file again, and re-run nix build .#psql_15/exts/supautils. - 4. NOTE: This step is only necessary for `buildPgrxExtension` packages, which includes tealbase-wrappers, pg_jsonschema, and pg_graphql. Otherwise you can skip this step. For our packages that are build with `buildPgrxExtension` you will need to prepend the previous version to the `previousVersions` variable before updating the version in the package (for instance if you are updating `tealbase-wrappers` extension from `0.4.1` to `0.4.2` then you would prepend `0.4.1` to this line https://github.com/tealbase/postgres/blob/develop/nix/ext/wrappers/default.nix#L18 ). - 5. Add any needed migrations into the `tealbase/postgres` migrations directory - 6. update the version in `ansible/vars.yml` as usual - 7. You can then run the `nix flake check -L` tests locally to verify that the update of the package succeeded. - 8. Now it's ready for PR review. - 9. Once the PR is approved, if you want the change to go out in a release, update the common-nix.vars.yml file with the new version prior to merging. - - diff --git a/nix/docs/use-direnv.md b/nix/docs/use-direnv.md deleted file mode 100644 index cf34a23..0000000 --- a/nix/docs/use-direnv.md +++ /dev/null @@ -1,102 +0,0 @@ -Have you ever used a tool like `pip`'s `bin/activate` script, or `rbenv`? These -tools populate your shell environment with the right tools and scripts and -dependencies (e.g. `PYTHONPATH`) to run your software. - -What if I told you there was a magical tool that worked like that, and could do -it for arbitrary languages and tools? - -That tool is called **[direnv](https://direnv.net)**. - -## Install direnv and use it in your shell - -First, install `direnv`: - -``` -$ nix profile install nixpkgs#direnv -``` - -``` -$ which direnv -/home/austin/.nix-profile/bin/direnv -``` - -Now, you need to activate it in your shell by hooking into it. If you're using -**Bash**, try putting this in your `.bashrc` and starting up a new interactive -shell: - -``` -eval "$(direnv hook bash)" -``` - -Not using bash? Check the -[direnv hook documentation](https://direnv.net/docs/hook.html) for more. - -## Set up `nix-postgres` - -Let's go back to the `nix-postgres` source code. - -``` -cd $HOME/tmp-nix-postgres -``` - -Now, normally, direnv is going to look for a file called `.envrc` and load that -if it exists. But to be polite, we don't do that by default; we keep a file -named `.envrc.recommended` in the repository instead, and encourage people to do -this: - -``` -echo "source_env .envrc.recommended" >> .envrc -``` - -All this says is "Load the code from `.envrc.recommended` directly", just like a -normal bash script using `source`. The idea of this pattern is to allow users to -have their own customized `.envrc` and piggyback on the committed code for -utility — and `.envrc` is `.gitignore`'d, so you can put e.g. secret -tokens inside without fear of committing them. - -Run the above command, and then... - -## What just happened? - -Oops, a big red error appeared? - -``` -$ echo "source_env .envrc.recommended" >> .envrc -direnv: error /home/austin/work/nix-postgres/.envrc is blocked. Run `direnv allow` to approve its content -``` - -What happened? By default, as a security measure, `direnv` _does not_ load or -execute any code from an `.envrc` file, and instead it MUST be allowed -explicitly. - -## `direnv allow` - -Our `.envrc.recommended` file will integrate with Nix directly. So run -`direnv allow`, and you'll suddenly see the following: - -``` -$ direnv allow -direnv: loading ~/work/nix-postgres/.envrc -direnv: loading ~/work/nix-postgres/.envrc.recommended -direnv: loading https://raw.githubusercontent.com/nix-community/nix-direnv/2.3.0/direnvrc (sha256-Dmd+j63L84wuzgyjITIfSxSD57Tx7v51DMxVZOsiUD8=) -direnv: using flake -direnv: nix-direnv: renewed cache -direnv: export +AR +AS +CC +CONFIG_SHELL +CXX +DETERMINISTIC_BUILD +HOST_PATH +IN_NIX_SHELL +LD +NIX_BINTOOLS +NIX_BINTOOLS_WRAPPER_TARGET_HOST_x86_64_unknown_linux_gnu +NIX_BUILD_CORES +NIX_CC +NIX_CC_WRAPPER_TARGET_HOST_x86_64_unknown_linux_gnu +NIX_CFLAGS_COMPILE +NIX_ENFORCE_NO_NATIVE +NIX_HARDENING_ENABLE +NIX_LDFLAGS +NIX_STORE +NM +OBJCOPY +OBJDUMP +PYTHONHASHSEED +PYTHONNOUSERSITE +PYTHONPATH +RANLIB +READELF +SIZE +SOURCE_DATE_EPOCH +STRINGS +STRIP +_PYTHON_HOST_PLATFORM +_PYTHON_SYSCONFIGDATA_NAME +__structuredAttrs +buildInputs +buildPhase +builder +cmakeFlags +configureFlags +depsBuildBuild +depsBuildBuildPropagated +depsBuildTarget +depsBuildTargetPropagated +depsHostHost +depsHostHostPropagated +depsTargetTarget +depsTargetTargetPropagated +doCheck +doInstallCheck +dontAddDisableDepTrack +mesonFlags +name +nativeBuildInputs +out +outputs +patches +phases +preferLocalBuild +propagatedBuildInputs +propagatedNativeBuildInputs +shell +shellHook +stdenv +strictDeps +system ~PATH ~XDG_DATA_DIRS -``` - -What just happened is that we populated the ambient shell environment with tools -specified inside of `flake.nix` — we'll cover Flakes later. But for now, -your tools are provisioned! - - -## The power of `direnv` - -`direnv` with Nix is a frighteningly good development combination for many -purposes. This is its main power: you can use it to create on-demand developer -shells for any language, tool, or environment, and all you need to do is `cd` to -the right directory. - -This is the power of `direnv`: your projects always, on demand, will have the -right tools configured and available, no matter if you last worked on them a day -ago or a year ago, or it was done by your teammate, or you have a brand new -computer that you've never programmed on. diff --git a/nix/ext/0001-build-Allow-using-V8-from-system.patch b/nix/ext/0001-build-Allow-using-V8-from-system.patch deleted file mode 100644 index ab2c6f0..0000000 --- a/nix/ext/0001-build-Allow-using-V8-from-system.patch +++ /dev/null @@ -1,46 +0,0 @@ -diff --git a/Makefile b/Makefile -index 38879cc..6e78eeb 100644 ---- a/Makefile -+++ b/Makefile -@@ -20,6 +20,7 @@ OBJS = $(SRCS:.cc=.o) - MODULE_big = plv8-$(PLV8_VERSION) - EXTENSION = plv8 - PLV8_DATA = plv8.control plv8--$(PLV8_VERSION).sql -+USE_SYSTEM_V8 = 0 - - - # Platform detection -@@ -41,6 +42,7 @@ PGXS := $(shell $(PG_CONFIG) --pgxs) - PG_VERSION_NUM := $(shell cat `$(PG_CONFIG) --includedir-server`/pg_config*.h \ - | perl -ne 'print $$1 and exit if /PG_VERSION_NUM\s+(\d+)/') - -+ifeq ($(USE_SYSTEM_V8),0) - AUTOV8_DIR = build/v8 - AUTOV8_OUT = build/v8/out.gn/obj - AUTOV8_STATIC_LIBS = -lv8_libplatform -lv8_libbase -@@ -66,6 +68,7 @@ v8: - make -f Makefiles/Makefile.macos v8 - endif - endif -+endif - - # enable direct jsonb conversion by default - CCFLAGS += -DJSONB_DIRECT_CONVERSION -@@ -83,6 +86,7 @@ ifdef BIGINT_GRACEFUL - endif - - -+ifeq ($(USE_SYSTEM_V8),0) - # We're gonna build static link. Rip it out after include Makefile - SHLIB_LINK := $(filter-out -lv8, $(SHLIB_LINK)) - -@@ -101,6 +105,7 @@ else - SHLIB_LINK += -lrt -std=c++14 - endif - endif -+endif - - DATA = $(PLV8_DATA) - ifndef DISABLE_DIALECT --- -2.37.3 diff --git a/nix/ext/hypopg.nix b/nix/ext/hypopg.nix deleted file mode 100644 index 4fc00a8..0000000 --- a/nix/ext/hypopg.nix +++ /dev/null @@ -1,31 +0,0 @@ -{ lib, stdenv, fetchFromGitHub, postgresql }: - -stdenv.mkDerivation rec { - pname = "hypopg"; - version = "1.4.1"; - - buildInputs = [ postgresql ]; - - src = fetchFromGitHub { - owner = "HypoPG"; - repo = pname; - rev = "refs/tags/${version}"; - hash = "sha256-88uKPSnITRZ2VkelI56jZ9GWazG/Rn39QlyHKJKSKMM="; - }; - - installPhase = '' - mkdir -p $out/{lib,share/postgresql/extension} - - cp *.so $out/lib - cp *.sql $out/share/postgresql/extension - cp *.control $out/share/postgresql/extension - ''; - - meta = with lib; { - description = "Hypothetical Indexes for PostgreSQL"; - homepage = "https://github.com/HypoPG/${pname}"; - maintainers = with maintainers; [ samrose ]; - platforms = postgresql.meta.platforms; - license = licenses.postgresql; - }; -} diff --git a/nix/ext/index_advisor.nix b/nix/ext/index_advisor.nix deleted file mode 100644 index 3ed5a5f..0000000 --- a/nix/ext/index_advisor.nix +++ /dev/null @@ -1,30 +0,0 @@ -{ lib, stdenv, fetchFromGitHub, postgresql }: - -stdenv.mkDerivation rec { - pname = "index_advisor"; - version = "0.2.0"; - - buildInputs = [ postgresql ]; - - src = fetchFromGitHub { - owner = "olirice"; - repo = pname; - rev = "v${version}"; - hash = "sha256-G0eQk2bY5CNPMeokN/nb05g03CuiplRf902YXFVQFbs="; - }; - - installPhase = '' - mkdir -p $out/{lib,share/postgresql/extension} - - cp *.sql $out/share/postgresql/extension - cp *.control $out/share/postgresql/extension - ''; - - meta = with lib; { - description = "Recommend indexes to improve query performance in PostgreSQL"; - homepage = "https://github.com/olirice/index_advisor"; - maintainers = with maintainers; [ samrose ]; - platforms = postgresql.meta.platforms; - license = licenses.postgresql; - }; -} diff --git a/nix/ext/mecab-naist-jdic/default.nix b/nix/ext/mecab-naist-jdic/default.nix deleted file mode 100644 index e4f3962..0000000 --- a/nix/ext/mecab-naist-jdic/default.nix +++ /dev/null @@ -1,41 +0,0 @@ -{ lib, stdenv, fetchurl, mecab }: - -stdenv.mkDerivation rec { - pname = "mecab-naist-jdic"; - version = "0.6.3b-20111013"; - - src = fetchurl { - url = "https://github.com/tealbase/mecab-naist-jdic/raw/main/mecab-naist-jdic-${version}.tar.gz"; - sha256 = "sha256-yzdwDcmne5U/K/OxW0nP7NZ4SFMKLPirywm1lMpWKMw="; - }; - - buildInputs = [ mecab ]; - - configureFlags = [ - "--with-charset=utf8" - ]; - - buildPhase = '' - runHook preBuild - make - ${mecab}/libexec/mecab/mecab-dict-index -d . -o . -f UTF-8 -t utf-8 - runHook postBuild - ''; - - installPhase = '' - runHook preInstall - - mkdir -p $out/lib/mecab/dic/naist-jdic - cp *.dic *.bin *.def $out/lib/mecab/dic/naist-jdic/ - - runHook postInstall - ''; - - meta = with lib; { - description = "Naist Japanese Dictionary for MeCab"; - homepage = "https://taku910.github.io/mecab/"; - license = licenses.gpl2; - platforms = platforms.unix; - maintainers = with maintainers; [ samrose ]; - }; -} \ No newline at end of file diff --git a/nix/ext/orioledb.nix b/nix/ext/orioledb.nix deleted file mode 100644 index 4d8c51b..0000000 --- a/nix/ext/orioledb.nix +++ /dev/null @@ -1,32 +0,0 @@ -{ lib, stdenv, fetchFromGitHub, curl, libkrb5, postgresql, python3, openssl }: - -stdenv.mkDerivation rec { - pname = "orioledb"; - name = pname; - src = fetchFromGitHub { - owner = "orioledb"; - repo = "orioledb"; - rev = "main"; - sha256 = "sha256-QbDp9S8JXO66sfaHZIQ3wFCVRxsAaaNSRgC6hvL3EKY="; - }; - version = "patches16_23"; - buildInputs = [ curl libkrb5 postgresql python3 openssl ]; - buildPhase = "make USE_PGXS=1 ORIOLEDB_PATCHSET_VERSION=23"; - installPhase = '' - runHook preInstall - mkdir -p $out/{lib,share/postgresql/extension} - - cp *.so $out/lib - cp *.sql $out/share/postgresql/extension - cp *.control $out/share/postgresql/extension - - runHook postInstall - ''; - doCheck = true; - meta = with lib; { - description = "orioledb"; - maintainers = with maintainers; [ samrose ]; - platforms = postgresql.meta.platforms; - license = licenses.postgresql; - }; -} diff --git a/nix/ext/pg-safeupdate.nix b/nix/ext/pg-safeupdate.nix deleted file mode 100644 index d24fab5..0000000 --- a/nix/ext/pg-safeupdate.nix +++ /dev/null @@ -1,29 +0,0 @@ -{ lib, stdenv, fetchFromGitHub, postgresql }: - -stdenv.mkDerivation rec { - pname = "pg-safeupdate"; - version = "1.4"; - - buildInputs = [ postgresql ]; - - src = fetchFromGitHub { - owner = "eradman"; - repo = pname; - rev = version; - hash = "sha256-1cyvVEC9MQGMr7Tg6EUbsVBrMc8ahdFS3+CmDkmAq4Y="; - }; - - installPhase = '' - install -D safeupdate${postgresql.dlSuffix} -t $out/lib - ''; - - meta = with lib; { - description = "A simple extension to PostgreSQL that requires criteria for UPDATE and DELETE"; - homepage = "https://github.com/eradman/pg-safeupdate"; - changelog = "https://github.com/eradman/pg-safeupdate/raw/${src.rev}/NEWS"; - platforms = postgresql.meta.platforms; - license = licenses.postgresql; - broken = versionOlder postgresql.version "14"; - maintainers = with maintainers; [ samrose ]; - }; -} diff --git a/nix/ext/pg_backtrace.nix b/nix/ext/pg_backtrace.nix deleted file mode 100644 index b016912..0000000 --- a/nix/ext/pg_backtrace.nix +++ /dev/null @@ -1,33 +0,0 @@ -{ lib, stdenv, fetchFromGitHub, postgresql }: - -stdenv.mkDerivation rec { - pname = "pg_backtrace"; - version = "1.1"; - - buildInputs = [ postgresql ]; - - src = fetchFromGitHub { - owner = "pashkinelfe"; - repo = pname; - rev = "d100bac815a7365e199263f5b3741baf71b14c70"; - hash = "sha256-IVCL4r4oj1Ams03D8y+XCFkckPFER/W9tQ68GkWQQMY="; - }; - - makeFlags = [ "USE_PGXS=1" ]; - - installPhase = '' - mkdir -p $out/{lib,share/postgresql/extension} - - cp *.so $out/lib - cp *.sql $out/share/postgresql/extension - cp *.control $out/share/postgresql/extension - ''; - - meta = with lib; { - description = "Updated fork of pg_backtrace"; - homepage = "https://github.com/pashkinelfe/pg_backtrace"; - maintainers = with maintainers; [ samrose ]; - platforms = postgresql.meta.platforms; - license = licenses.postgresql; - }; -} diff --git a/nix/ext/pg_cron.nix b/nix/ext/pg_cron.nix deleted file mode 100644 index 5c546c7..0000000 --- a/nix/ext/pg_cron.nix +++ /dev/null @@ -1,32 +0,0 @@ -{ lib, stdenv, fetchFromGitHub, postgresql }: - -stdenv.mkDerivation rec { - pname = "pg_cron"; - version = "1.6.4"; - - buildInputs = [ postgresql ]; - - src = fetchFromGitHub { - owner = "citusdata"; - repo = pname; - rev = "v${version}"; - hash = "sha256-t1DpFkPiSfdoGG2NgNT7g1lkvSooZoRoUrix6cBID40="; - }; - - installPhase = '' - mkdir -p $out/{lib,share/postgresql/extension} - - cp *.so $out/lib - cp *.sql $out/share/postgresql/extension - cp *.control $out/share/postgresql/extension - ''; - - meta = with lib; { - description = "Run Cron jobs through PostgreSQL"; - homepage = "https://github.com/citusdata/pg_cron"; - changelog = "https://github.com/citusdata/pg_cron/raw/v${version}/CHANGELOG.md"; - maintainers = with maintainers; [ samrose ]; - platforms = postgresql.meta.platforms; - license = licenses.postgresql; - }; -} diff --git a/nix/ext/pg_graphql.nix b/nix/ext/pg_graphql.nix deleted file mode 100644 index d7129e6..0000000 --- a/nix/ext/pg_graphql.nix +++ /dev/null @@ -1,39 +0,0 @@ -{ lib, stdenv, fetchFromGitHub, postgresql, buildPgrxExtension_0_11_3, cargo }: - -buildPgrxExtension_0_11_3 rec { - pname = "pg_graphql"; - version = "1.5.7"; - inherit postgresql; - - src = fetchFromGitHub { - owner = "tealbase"; - repo = pname; - rev = "v${version}"; - hash = "sha256-Q6XfcTKVOjo5pGy8QACc4QCHolKxEGU8e0TTC6Zg8go="; - }; - - nativeBuildInputs = [ cargo ]; - buildInputs = [ postgresql ]; - - CARGO="${cargo}/bin/cargo"; - #darwin env needs PGPORT to be unique for build to not clash with other pgrx extensions - env = lib.optionalAttrs stdenv.isDarwin { - POSTGRES_LIB = "${postgresql}/lib"; - RUSTFLAGS = "-C link-arg=-undefined -C link-arg=dynamic_lookup"; - PGPORT = "5434"; - }; - cargoHash = "sha256-WkHufMw8OvinMRYd06ZJACnVvY9OLi069nCgq3LSmMY="; - - # FIXME (aseipp): disable the tests since they try to install .control - # files into the wrong spot, aside from that the one main test seems - # to work, though - doCheck = false; - - meta = with lib; { - description = "GraphQL support for PostreSQL"; - homepage = "https://github.com/tealbase/${pname}"; - maintainers = with maintainers; [ samrose ]; - platforms = postgresql.meta.platforms; - license = licenses.postgresql; - }; -} diff --git a/nix/ext/pg_hashids.nix b/nix/ext/pg_hashids.nix deleted file mode 100644 index 41c3ba6..0000000 --- a/nix/ext/pg_hashids.nix +++ /dev/null @@ -1,31 +0,0 @@ -{ lib, stdenv, fetchFromGitHub, postgresql }: - -stdenv.mkDerivation rec { - pname = "pg_hashids"; - version = "cd0e1b31d52b394a0df64079406a14a4f7387cd6"; - - buildInputs = [ postgresql ]; - - src = fetchFromGitHub { - owner = "iCyberon"; - repo = pname; - rev = "${version}"; - hash = "sha256-Nmb7XLqQflYZfqj0yrewfb1Hl5YgEB5wfjBunPwIuOU="; - }; - - installPhase = '' - mkdir -p $out/{lib,share/postgresql/extension} - - cp *.so $out/lib - cp *.sql $out/share/postgresql/extension - cp *.control $out/share/postgresql/extension - ''; - - meta = with lib; { - description = "Generate short unique IDs in PostgreSQL"; - homepage = "https://github.com/iCyberon/pg_hashids"; - maintainers = with maintainers; [ samrose ]; - platforms = postgresql.meta.platforms; - license = licenses.postgresql; - }; -} diff --git a/nix/ext/pg_jsonschema.nix b/nix/ext/pg_jsonschema.nix deleted file mode 100644 index 5105e6b..0000000 --- a/nix/ext/pg_jsonschema.nix +++ /dev/null @@ -1,66 +0,0 @@ -{ lib, stdenv, fetchFromGitHub, postgresql, buildPgrxExtension_0_11_3, cargo }: - -buildPgrxExtension_0_11_3 rec { - pname = "pg_jsonschema"; - version = "0.3.1"; - inherit postgresql; - - src = fetchFromGitHub { - owner = "tealbase"; - repo = pname; - rev = "v${version}"; - hash = "sha256-YdKpOEiDIz60xE7C+EzpYjBcH0HabnDbtZl23CYls6g="; - }; - - nativeBuildInputs = [ cargo ]; - buildInputs = [ postgresql ]; - # update the following array when the pg_jsonschema version is updated - # required to ensure that extensions update scripts from previous versions are generated - - previousVersions = ["0.3.0" "0.2.0" "0.1.4" "0.1.4" "0.1.2" "0.1.1" "0.1.0"]; - CARGO="${cargo}/bin/cargo"; - #darwin env needs PGPORT to be unique for build to not clash with other pgrx extensions - env = lib.optionalAttrs stdenv.isDarwin { - POSTGRES_LIB = "${postgresql}/lib"; - RUSTFLAGS = "-C link-arg=-undefined -C link-arg=dynamic_lookup"; - PGPORT = "5433"; - }; - cargoHash = "sha256-VcS+efMDppofuFW2zNrhhsbC28By3lYekDFquHPta2g="; - - # FIXME (aseipp): testsuite tries to write files into /nix/store; we'll have - # to fix this a bit later. - doCheck = false; - - preBuild = '' - echo "Processing git tags..." - echo '${builtins.concatStringsSep "," previousVersions}' | sed 's/,/\n/g' > git_tags.txt - ''; - - postInstall = '' - echo "Creating SQL files for previous versions..." - current_version="${version}" - sql_file="$out/share/postgresql/extension/pg_jsonschema--$current_version.sql" - - if [ -f "$sql_file" ]; then - while read -r previous_version; do - if [ "$(printf '%s\n' "$previous_version" "$current_version" | sort -V | head -n1)" = "$previous_version" ] && [ "$previous_version" != "$current_version" ]; then - new_file="$out/share/postgresql/extension/pg_jsonschema--$previous_version--$current_version.sql" - echo "Creating $new_file" - cp "$sql_file" "$new_file" - fi - done < git_tags.txt - else - echo "Warning: $sql_file not found" - fi - rm git_tags.txt - ''; - - - meta = with lib; { - description = "JSON Schema Validation for PostgreSQL"; - homepage = "https://github.com/tealbase/${pname}"; - maintainers = with maintainers; [ samrose ]; - platforms = postgresql.meta.platforms; - license = licenses.postgresql; - }; -} \ No newline at end of file diff --git a/nix/ext/pg_net.nix b/nix/ext/pg_net.nix deleted file mode 100644 index 992e2c9..0000000 --- a/nix/ext/pg_net.nix +++ /dev/null @@ -1,33 +0,0 @@ -{ lib, stdenv, fetchFromGitHub, curl, postgresql }: - -stdenv.mkDerivation rec { - pname = "pg_net"; - version = "0.13.0"; - - buildInputs = [ curl postgresql ]; - - src = fetchFromGitHub { - owner = "tealbase"; - repo = pname; - rev = "refs/tags/v${version}"; - hash = "sha256-FRaTZPCJQPYAFmsJg22hYJJ0+gH1tMdDQoCQgiqEnaA="; - }; - - env.NIX_CFLAGS_COMPILE = "-Wno-error"; - - installPhase = '' - mkdir -p $out/{lib,share/postgresql/extension} - - cp *${postgresql.dlSuffix} $out/lib - cp sql/*.sql $out/share/postgresql/extension - cp *.control $out/share/postgresql/extension - ''; - - meta = with lib; { - description = "Async networking for Postgres"; - homepage = "https://github.com/tealbase/pg_net"; - maintainers = with maintainers; [ samrose ]; - platforms = postgresql.meta.platforms; - license = licenses.postgresql; - }; -} diff --git a/nix/ext/pg_plan_filter.nix b/nix/ext/pg_plan_filter.nix deleted file mode 100644 index 2d7d224..0000000 --- a/nix/ext/pg_plan_filter.nix +++ /dev/null @@ -1,30 +0,0 @@ -{ lib, stdenv, fetchFromGitHub, postgresql }: - -stdenv.mkDerivation rec { - pname = "pg_plan_filter"; - version = "5081a7b5cb890876e67d8e7486b6a64c38c9a492"; - - buildInputs = [ postgresql ]; - - src = fetchFromGitHub { - owner = "pgexperts"; - repo = pname; - rev = "${version}"; - hash = "sha256-YNeIfmccT/DtOrwDmpYFCuV2/P6k3Zj23VWBDkOh6sw="; - }; - - installPhase = '' - mkdir -p $out/{lib,share/postgresql/extension} - - cp *.so $out/lib - cp *.sql $out/share/postgresql/extension - ''; - - meta = with lib; { - description = "Filter PostgreSQL statements by execution plans"; - homepage = "https://github.com/pgexperts/${pname}"; - maintainers = with maintainers; [ samrose ]; - platforms = postgresql.meta.platforms; - license = licenses.postgresql; - }; -} diff --git a/nix/ext/pg_regress.nix b/nix/ext/pg_regress.nix deleted file mode 100644 index 6e581c4..0000000 --- a/nix/ext/pg_regress.nix +++ /dev/null @@ -1,24 +0,0 @@ -{ lib -, stdenv -, postgresql -}: - -stdenv.mkDerivation { - pname = "pg_regress"; - version = postgresql.version; - - phases = [ "installPhase" ]; - - installPhase = '' - mkdir -p $out/bin - cp ${postgresql}/lib/pgxs/src/test/regress/pg_regress $out/bin/ - ''; - - meta = with lib; { - description = "Regression testing tool for PostgreSQL"; - homepage = "https://www.postgresql.org/"; - maintainers = with maintainers; [ samrose ]; - platforms = postgresql.meta.platforms; - license = licenses.postgresql; - }; -} \ No newline at end of file diff --git a/nix/ext/pg_repack.nix b/nix/ext/pg_repack.nix deleted file mode 100644 index f324737..0000000 --- a/nix/ext/pg_repack.nix +++ /dev/null @@ -1,66 +0,0 @@ -{ lib -, stdenv -, fetchFromGitHub -, openssl -, postgresql -, postgresqlTestHook -, readline -, testers -, zlib -}: - -stdenv.mkDerivation (finalAttrs: { - pname = "pg_repack"; - version = "1.5.0"; - - buildInputs = postgresql.buildInputs ++ [ postgresql ]; - - src = fetchFromGitHub { - owner = "reorg"; - repo = "pg_repack"; - rev = "ver_${finalAttrs.version}"; - hash = "sha256-do80phyMxwcRIkYyUt9z02z7byNQhK+pbSaCUmzG+4c="; - }; - - installPhase = '' - install -D bin/pg_repack -t $out/bin/ - install -D lib/pg_repack${postgresql.dlSuffix} -t $out/lib/ - install -D lib/{pg_repack--${finalAttrs.version}.sql,pg_repack.control} -t $out/share/postgresql/extension - ''; - - passthru.tests = { - version = testers.testVersion { - package = finalAttrs.finalPackage; - }; - extension = stdenv.mkDerivation { - name = "plpgsql-check-test"; - dontUnpack = true; - doCheck = true; - buildInputs = [ postgresqlTestHook ]; - nativeCheckInputs = [ (postgresql.withPackages (ps: [ ps.pg_repack ])) ]; - postgresqlTestUserOptions = "LOGIN SUPERUSER"; - failureHook = "postgresqlStop"; - checkPhase = '' - runHook preCheck - psql -a -v ON_ERROR_STOP=1 -c "CREATE EXTENSION pg_repack;" - runHook postCheck - ''; - installPhase = "touch $out"; - }; - }; - - meta = with lib; { - description = "Reorganize tables in PostgreSQL databases with minimal locks"; - longDescription = '' - pg_repack is a PostgreSQL extension which lets you remove bloat from tables and indexes, and optionally restore - the physical order of clustered indexes. Unlike CLUSTER and VACUUM FULL it works online, without holding an - exclusive lock on the processed tables during processing. pg_repack is efficient to boot, - with performance comparable to using CLUSTER directly. - ''; - homepage = "https://github.com/reorg/pg_repack"; - license = licenses.bsd3; - maintainers = with maintainers; [ samrose ]; - inherit (postgresql.meta) platforms; - mainProgram = "pg_repack"; - }; -}) diff --git a/nix/ext/pg_stat_monitor.nix b/nix/ext/pg_stat_monitor.nix deleted file mode 100644 index 8784067..0000000 --- a/nix/ext/pg_stat_monitor.nix +++ /dev/null @@ -1,49 +0,0 @@ -{ lib, stdenv, fetchFromGitHub, postgresql }: - -let - # NOTE (aseipp): the 1.x series of pg_stat_monitor has some non-standard and - # weird build logic (Percona projects in general seem to have their own - # strange build harness) where it will try to pick the right .sql file to - # install into the extension dir based on the postgresql major version. for - # our purposes, we only need to support v13 and v14+, so just replicate this - # logic from the makefile and pick the right file here. - # - # this seems to all be cleaned up in version 2.0 of the extension, so ideally - # we could upgrade to it later on and nuke this. - # DEPRECATED sqlFilename = if lib.versionOlder postgresql.version "14" - # then "pg_stat_monitor--1.0.13.sql.in" - # else "pg_stat_monitor--1.0.14.sql.in"; - -in -stdenv.mkDerivation rec { - pname = "pg_stat_monitor"; - version = "2.1.0"; - - buildInputs = [ postgresql ]; - - src = fetchFromGitHub { - owner = "percona"; - repo = pname; - rev = "refs/tags/${version}"; - hash = "sha256-STJVvvrLVLe1JevNu6u6EftzAWv+X+J8lu66su7Or2s="; - }; - - makeFlags = [ "USE_PGXS=1" ]; - - installPhase = '' - mkdir -p $out/{lib,share/postgresql/extension} - - cp *.so $out/lib - cp *.sql $out/share/postgresql/extension - cp *.control $out/share/postgresql/extension - ''; - - meta = with lib; { - description = "Query Performance Monitoring Tool for PostgreSQL"; - homepage = "https://github.com/percona/${pname}"; - maintainers = with maintainers; [ samrose ]; - platforms = postgresql.meta.platforms; - license = licenses.postgresql; - broken = lib.versionOlder postgresql.version "15"; - }; -} diff --git a/nix/ext/pg_tle.nix b/nix/ext/pg_tle.nix deleted file mode 100644 index 6b1c7b1..0000000 --- a/nix/ext/pg_tle.nix +++ /dev/null @@ -1,36 +0,0 @@ -{ lib, stdenv, fetchFromGitHub, postgresql, flex, openssl, libkrb5 }: - -stdenv.mkDerivation rec { - pname = "pg_tle"; - version = "1.4.0"; - - nativeBuildInputs = [ flex ]; - buildInputs = [ openssl postgresql libkrb5 ]; - - src = fetchFromGitHub { - owner = "aws"; - repo = pname; - rev = "refs/tags/v${version}"; - hash = "sha256-crxj5R9jblIv0h8lpqddAoYe2UqgUlnvbOajKTzVces="; - }; - - - makeFlags = [ "FLEX=flex" ]; - - - installPhase = '' - mkdir -p $out/{lib,share/postgresql/extension} - - cp *.so $out/lib - cp *.sql $out/share/postgresql/extension - cp *.control $out/share/postgresql/extension - ''; - - meta = with lib; { - description = "Framework for 'Trusted Language Extensions' in PostgreSQL"; - homepage = "https://github.com/aws/${pname}"; - maintainers = with maintainers; [ samrose ]; - platforms = postgresql.meta.platforms; - license = licenses.postgresql; - }; -} diff --git a/nix/ext/pgaudit.nix b/nix/ext/pgaudit.nix deleted file mode 100644 index f6c5d8b..0000000 --- a/nix/ext/pgaudit.nix +++ /dev/null @@ -1,44 +0,0 @@ -{ lib, stdenv, fetchFromGitHub, libkrb5, openssl, postgresql }: -#adapted from https://github.com/NixOS/nixpkgs/blob/master/pkgs/servers/sql/postgresql/ext/pgaudit.nix -let - source = { - "16" = { - version = "16.0"; - hash = "sha256-8+tGOl1U5y9Zgu+9O5UDDE4bec4B0JC/BQ6GLhHzQzc="; - }; - "15" = { - version = "1.7.0"; - hash = "sha256-8pShPr4HJaJQPjW1iPJIpj3CutTx8Tgr+rOqoXtgCcw="; - }; - }.${lib.versions.major postgresql.version} or (throw "Source for pgaudit is not available for ${postgresql.version}"); -in -stdenv.mkDerivation { - pname = "pgaudit"; - inherit (source) version; - - src = fetchFromGitHub { - owner = "pgaudit"; - repo = "pgaudit"; - rev = source.version; - hash = source.hash; - }; - - buildInputs = [ libkrb5 openssl postgresql ]; - - makeFlags = [ "USE_PGXS=1" ]; - - installPhase = '' - install -D -t $out/lib pgaudit${postgresql.dlSuffix} - install -D -t $out/share/postgresql/extension *.sql - install -D -t $out/share/postgresql/extension *.control - ''; - - meta = with lib; { - description = "Open Source PostgreSQL Audit Logging"; - homepage = "https://github.com/pgaudit/pgaudit"; - changelog = "https://github.com/pgaudit/pgaudit/releases/tag/${source.version}"; - maintainers = with maintainers; [ samrose ]; - platforms = postgresql.meta.platforms; - license = licenses.postgresql; - }; -} diff --git a/nix/ext/pgjwt.nix b/nix/ext/pgjwt.nix deleted file mode 100644 index 2eb60f7..0000000 --- a/nix/ext/pgjwt.nix +++ /dev/null @@ -1,31 +0,0 @@ -{ lib, stdenv, fetchFromGitHub, postgresql, unstableGitUpdater }: - -stdenv.mkDerivation rec { - pname = "pgjwt"; - version = "9742dab1b2f297ad3811120db7b21451bca2d3c9"; - - src = fetchFromGitHub { - owner = "michelp"; - repo = "pgjwt"; - rev = "${version}"; - hash = "sha256-Hw3R9bMGDmh+dMzjmqZSy/rT4mX8cPU969OJiARFg10="; - }; - - dontBuild = true; - installPhase = '' - mkdir -p $out/share/postgresql/extension - cp pg*sql *.control $out/share/postgresql/extension - ''; - - passthru.updateScript = unstableGitUpdater { }; - - meta = with lib; { - description = "PostgreSQL implementation of JSON Web Tokens"; - longDescription = '' - sign() and verify() functions to create and verify JSON Web Tokens. - ''; - license = licenses.mit; - platforms = postgresql.meta.platforms; - maintainers = with maintainers; [samrose]; - }; -} diff --git a/nix/ext/pgmq.nix b/nix/ext/pgmq.nix deleted file mode 100644 index 97a3c27..0000000 --- a/nix/ext/pgmq.nix +++ /dev/null @@ -1,33 +0,0 @@ -{ lib, stdenv, fetchFromGitHub, postgresql }: - -stdenv.mkDerivation rec { - pname = "pgmq"; - version = "1.4.4"; - buildInputs = [ postgresql ]; - src = fetchFromGitHub { - owner = "tembo-io"; - repo = pname; - rev = "v${version}"; - hash = "sha256-z+8/BqIlHwlMnuIzMz6eylmYbSmhtsNt7TJf/CxbdVw="; - }; - - buildPhase = '' - cd pgmq-extension - ''; - - installPhase = '' - mkdir -p $out/{lib,share/postgresql/extension} - - mv sql/pgmq.sql $out/share/postgresql/extension/pgmq--${version}.sql - cp sql/*.sql $out/share/postgresql/extension - cp *.control $out/share/postgresql/extension - ''; - - meta = with lib; { - description = "A lightweight message queue. Like AWS SQS and RSMQ but on Postgres."; - homepage = "https://github.com/tembo-io/pgmq"; - maintainers = with maintainers; [ olirice ]; - platforms = postgresql.meta.platforms; - license = licenses.postgresql; - }; -} diff --git a/nix/ext/pgroonga.nix b/nix/ext/pgroonga.nix deleted file mode 100644 index 78cc064..0000000 --- a/nix/ext/pgroonga.nix +++ /dev/null @@ -1,61 +0,0 @@ -{ lib, stdenv, fetchurl, pkg-config, postgresql, msgpack-c, callPackage, mecab, makeWrapper }: -let - tealbase-groonga = callPackage ../tealbase-groonga.nix { }; -in -stdenv.mkDerivation rec { - pname = "pgroonga"; - version = "3.0.7"; - src = fetchurl { - url = "https://packages.groonga.org/source/${pname}/${pname}-${version}.tar.gz"; - sha256 = "sha256-iF/zh4zDDpAw5fxW1WG8i2bfPt4VYsnYArwOoE/lwgM="; - }; - nativeBuildInputs = [ pkg-config makeWrapper ]; - buildInputs = [ postgresql msgpack-c tealbase-groonga mecab ]; - propagatedBuildInputs = [ tealbase-groonga ]; - configureFlags = [ - "--with-mecab=${mecab}" - "--enable-mecab" - "--with-groonga=${tealbase-groonga}" - "--with-groonga-plugin-dir=${tealbase-groonga}/lib/groonga/plugins" - ]; - - makeFlags = [ - "HAVE_MSGPACK=1" - "MSGPACK_PACKAGE_NAME=msgpack-c" - "HAVE_MECAB=1" - ]; - - preConfigure = '' - export GROONGA_LIBS="-L${tealbase-groonga}/lib -lgroonga" - export GROONGA_CFLAGS="-I${tealbase-groonga}/include" - export MECAB_CONFIG="${mecab}/bin/mecab-config" - ''; - - installPhase = '' - mkdir -p $out/lib $out/share/postgresql/extension $out/bin - install -D pgroonga${postgresql.dlSuffix} -t $out/lib/ - install -D pgroonga.control -t $out/share/postgresql/extension - install -D data/pgroonga-*.sql -t $out/share/postgresql/extension - install -D pgroonga_database${postgresql.dlSuffix} -t $out/lib/ - install -D pgroonga_database.control -t $out/share/postgresql/extension - install -D data/pgroonga_database-*.sql -t $out/share/postgresql/extension - - echo "Debug: Groonga plugins directory contents:" - ls -l ${tealbase-groonga}/lib/groonga/plugins/tokenizers/ - ''; - - meta = with lib; { - description = "A PostgreSQL extension to use Groonga as the index"; - longDescription = '' - PGroonga is a PostgreSQL extension to use Groonga as the index. - PostgreSQL supports full text search against languages that use only alphabet and digit. - It means that PostgreSQL doesn't support full text search against Japanese, Chinese and so on. - You can use super fast full text search feature against all languages by installing PGroonga into your PostgreSQL. - ''; - homepage = "https://pgroonga.github.io/"; - changelog = "https://github.com/pgroonga/pgroonga/releases/tag/${version}"; - license = licenses.postgresql; - platforms = postgresql.meta.platforms; - maintainers = with maintainers; [ samrose ]; - }; -} \ No newline at end of file diff --git a/nix/ext/pgrouting.nix b/nix/ext/pgrouting.nix deleted file mode 100644 index 36edf34..0000000 --- a/nix/ext/pgrouting.nix +++ /dev/null @@ -1,31 +0,0 @@ -{ lib, stdenv, fetchFromGitHub, postgresql, perl, cmake, boost }: - -stdenv.mkDerivation rec { - pname = "pgrouting"; - version = "3.4.1"; - - nativeBuildInputs = [ cmake perl ]; - buildInputs = [ postgresql boost ]; - - src = fetchFromGitHub { - owner = "pgRouting"; - repo = pname; - rev = "v${version}"; - hash = "sha256-QC77AnPGpPQGEWi6JtJdiNsB2su5+aV2pKg5ImR2B0k="; - }; - - installPhase = '' - install -D lib/*.so -t $out/lib - install -D sql/pgrouting--${version}.sql -t $out/share/postgresql/extension - install -D sql/common/pgrouting.control -t $out/share/postgresql/extension - ''; - - meta = with lib; { - description = "A PostgreSQL/PostGIS extension that provides geospatial routing functionality"; - homepage = "https://pgrouting.org/"; - changelog = "https://github.com/pgRouting/pgrouting/releases/tag/v${version}"; - maintainers = with maintainers; [ steve-chavez samrose ]; - platforms = postgresql.meta.platforms; - license = licenses.gpl2Plus; - }; -} diff --git a/nix/ext/pgsodium.nix b/nix/ext/pgsodium.nix deleted file mode 100644 index e3b0da2..0000000 --- a/nix/ext/pgsodium.nix +++ /dev/null @@ -1,31 +0,0 @@ -{ lib, stdenv, fetchFromGitHub, libsodium, postgresql }: - -stdenv.mkDerivation rec { - pname = "pgsodium"; - version = "3.1.8"; - - buildInputs = [ libsodium postgresql ]; - - src = fetchFromGitHub { - owner = "michelp"; - repo = pname; - rev = "refs/tags/v${version}"; - hash = "sha256-j5F1PPdwfQRbV8XJ8Mloi8FvZF0MTl4eyIJcBYQy1E4="; - }; - - installPhase = '' - mkdir -p $out/{lib,share/postgresql/extension} - - cp *.so $out/lib - cp sql/*.sql $out/share/postgresql/extension - cp *.control $out/share/postgresql/extension - ''; - - meta = with lib; { - description = "Modern cryptography for PostgreSQL"; - homepage = "https://github.com/michelp/${pname}"; - maintainers = with maintainers; [ samrose ]; - platforms = postgresql.meta.platforms; - license = licenses.postgresql; - }; -} diff --git a/nix/ext/pgsql-http.nix b/nix/ext/pgsql-http.nix deleted file mode 100644 index 2edaa9d..0000000 --- a/nix/ext/pgsql-http.nix +++ /dev/null @@ -1,31 +0,0 @@ -{ lib, stdenv, fetchFromGitHub, curl, postgresql }: - -stdenv.mkDerivation rec { - pname = "pgsql-http"; - version = "1.6.0"; - - buildInputs = [ curl postgresql ]; - - src = fetchFromGitHub { - owner = "pramsey"; - repo = pname; - rev = "refs/tags/v${version}"; - hash = "sha256-CPHfx7vhWfxkXsoKTzyFuTt47BPMvzi/pi1leGcuD60="; - }; - - installPhase = '' - mkdir -p $out/{lib,share/postgresql/extension} - - cp *.so $out/lib - cp *.sql $out/share/postgresql/extension - cp *.control $out/share/postgresql/extension - ''; - - meta = with lib; { - description = "HTTP client for Postgres"; - homepage = "https://github.com/pramsey/${pname}"; - maintainers = with maintainers; [ samrose ]; - platforms = postgresql.meta.platforms; - license = licenses.postgresql; - }; -} diff --git a/nix/ext/pgtap.nix b/nix/ext/pgtap.nix deleted file mode 100644 index c5a17c9..0000000 --- a/nix/ext/pgtap.nix +++ /dev/null @@ -1,33 +0,0 @@ -{ lib, stdenv, fetchFromGitHub, postgresql, perl, perlPackages, which }: - -stdenv.mkDerivation rec { - pname = "pgtap"; - version = "1.2.0"; - - src = fetchFromGitHub { - owner = "theory"; - repo = "pgtap"; - rev = "v${version}"; - hash = "sha256-lb0PRffwo6J5a6Hqw1ggvn0cW7gPZ02OEcLPi9ineI8="; - }; - - nativeBuildInputs = [ postgresql perl perlPackages.TAPParserSourceHandlerpgTAP which ]; - - installPhase = '' - install -D {sql/pgtap--${version}.sql,pgtap.control} -t $out/share/postgresql/extension - ''; - - meta = with lib; { - description = "A unit testing framework for PostgreSQL"; - longDescription = '' - pgTAP is a unit testing framework for PostgreSQL written in PL/pgSQL and PL/SQL. - It includes a comprehensive collection of TAP-emitting assertion functions, - as well as the ability to integrate with other TAP-emitting test frameworks. - It can also be used in the xUnit testing style. - ''; - maintainers = with maintainers; [ samrose ]; - homepage = "https://pgtap.org"; - inherit (postgresql.meta) platforms; - license = licenses.mit; - }; -} diff --git a/nix/ext/pgvector.nix b/nix/ext/pgvector.nix deleted file mode 100644 index 965be36..0000000 --- a/nix/ext/pgvector.nix +++ /dev/null @@ -1,31 +0,0 @@ -{ lib, stdenv, fetchFromGitHub, postgresql }: - -stdenv.mkDerivation rec { - pname = "pgvector"; - version = "0.8.0"; - - buildInputs = [ postgresql ]; - - src = fetchFromGitHub { - owner = "pgvector"; - repo = pname; - rev = "refs/tags/v${version}"; - hash = "sha256-JsZV+I4eRMypXTjGmjCtMBXDVpqTIPHQa28ogXncE/Q="; - }; - - installPhase = '' - mkdir -p $out/{lib,share/postgresql/extension} - - cp *.so $out/lib - cp sql/*.sql $out/share/postgresql/extension - cp *.control $out/share/postgresql/extension - ''; - - meta = with lib; { - description = "Open-source vector similarity search for Postgres"; - homepage = "https://github.com/${src.owner}/${src.repo}"; - maintainers = with maintainers; [ olirice ]; - platforms = postgresql.meta.platforms; - license = licenses.postgresql; - }; -} diff --git a/nix/ext/pljava.nix b/nix/ext/pljava.nix deleted file mode 100644 index 16f8a59..0000000 --- a/nix/ext/pljava.nix +++ /dev/null @@ -1,51 +0,0 @@ -{ stdenv, lib, fetchFromGitHub, openssl, openjdk, maven, postgresql, libkrb5, makeWrapper, gcc, pkg-config, which }: - -maven.buildMavenPackage rec { - pname = "pljava"; - - version = "1.6.7"; - - src = fetchFromGitHub { - owner = "tada"; - repo = "pljava"; - rev = "V1_6_7"; - sha256 = "sha256-M17adSLsw47KZ2BoUwxyWkXKRD8TcexDAy61Yfw4fNU="; - - }; - - mvnParameters = "clean install -Dmaven.test.skip -DskipTests -Dmaven.javadoc.skip=true"; - mvnHash = "sha256-lcxRduh/nKcPL6YQIVTsNH0L4ga0LgJpQKgX5IPkRzs="; - - nativeBuildInputs = [ makeWrapper maven openjdk postgresql openssl postgresql gcc libkrb5 pkg-config ]; - buildInputs = [ stdenv.cc.cc.lib which]; - buildPhase = '' - export PATH=$(lib.makeBinPath [ postgresql ]):$PATH - - ''; - buildOffline = true; - - installPhase = '' - mkdir -p $out/pljavabuild - cp -r * $out/pljavabuild - mkdir -p $out/share/postgresql/extension/pljava - mkdir -p $out/share/postgresql/pljava - mkdir -p $out/lib - mkdir -p $out/etc - java -Dpgconfig=${postgresql}/bin/pg_config \ - -Dpgconfig.sharedir=$out/share \ - -Dpgconfig.sysconfdir=$out/etc/pljava.policy \ - -Dpgconfig.pkglibdir=$out/lib \ - -jar $out/pljavabuild/pljava-packaging/target/pljava-pg15.jar - cp $out/share/pljava/* $out/share/postgresql/extension/pljava - cp $out/share/pljava/* $out/share/postgresql/pljava - cp $out/share/extension/*.control $out/share/postgresql/extension - rm -r $out/pljavabuild - ''; - - meta = with lib; { - description = "PL/Java extension for PostgreSQL"; - homepage = https://github.com/tada/pljava; - license = licenses.bsd3; - maintainers = [ maintainers.samrose ]; # Update with actual maintainer info - }; -} diff --git a/nix/ext/plpgsql-check.nix b/nix/ext/plpgsql-check.nix deleted file mode 100644 index 7be2aac..0000000 --- a/nix/ext/plpgsql-check.nix +++ /dev/null @@ -1,46 +0,0 @@ -{ lib, stdenv, fetchFromGitHub, postgresql, postgresqlTestHook }: - -stdenv.mkDerivation rec { - pname = "plpgsql-check"; - version = "2.7.11"; - - src = fetchFromGitHub { - owner = "okbob"; - repo = "plpgsql_check"; - rev = "v${version}"; - hash = "sha256-vR3MvfmUP2QEAtXFpq0NCCKck3wZPD+H3QleHtyVQJs="; - }; - - buildInputs = [ postgresql ]; - - installPhase = '' - install -D -t $out/lib *${postgresql.dlSuffix} - install -D -t $out/share/postgresql/extension *.sql - install -D -t $out/share/postgresql/extension *.control - ''; - - passthru.tests.extension = stdenv.mkDerivation { - name = "plpgsql-check-test"; - dontUnpack = true; - doCheck = true; - buildInputs = [ postgresqlTestHook ]; - nativeCheckInputs = [ (postgresql.withPackages (ps: [ ps.plpgsql_check ])) ]; - postgresqlTestUserOptions = "LOGIN SUPERUSER"; - failureHook = "postgresqlStop"; - checkPhase = '' - runHook preCheck - psql -a -v ON_ERROR_STOP=1 -c "CREATE EXTENSION plpgsql_check;" - runHook postCheck - ''; - installPhase = "touch $out"; - }; - - meta = with lib; { - description = "Linter tool for language PL/pgSQL"; - homepage = "https://github.com/okbob/plpgsql_check"; - changelog = "https://github.com/okbob/plpgsql_check/releases/tag/v${version}"; - platforms = postgresql.meta.platforms; - license = licenses.mit; - maintainers = [ maintainers.marsam ]; - }; -} diff --git a/nix/ext/plv8.nix b/nix/ext/plv8.nix deleted file mode 100644 index 338bba6..0000000 --- a/nix/ext/plv8.nix +++ /dev/null @@ -1,194 +0,0 @@ -{ stdenv -, lib -, fetchFromGitHub -, v8 -, perl -, postgresql -# For passthru test on various systems, and local development on macos -# not we are not currently using passthru tests but retaining for possible contrib -# to nixpkgs -, runCommand -, coreutils -, gnugrep -, clang -, xcbuild -, darwin -, patchelf -}: - -stdenv.mkDerivation (finalAttrs: { - pname = "plv8"; - version = "3.1.10"; - - src = fetchFromGitHub { - owner = "plv8"; - repo = "plv8"; - rev = "v${finalAttrs.version}"; - hash = "sha256-g1A/XPC0dX2360Gzvmo9/FSQnM6Wt2K4eR0pH0p9fz4="; - }; - - patches = [ - # Allow building with system v8. - # https://github.com/plv8/plv8/pull/505 (rejected) - ./0001-build-Allow-using-V8-from-system.patch - ]; - - nativeBuildInputs = [ - perl - ] ++ lib.optionals stdenv.isDarwin [ - clang - xcbuild - ]; - - buildInputs = [ - v8 - postgresql - ] ++ lib.optionals stdenv.isDarwin [ - darwin.apple_sdk.frameworks.CoreFoundation - darwin.apple_sdk.frameworks.Kerberos - ]; - - buildFlags = [ "all" ]; - - makeFlags = [ - # Nixpkgs build a v8 monolith instead of separate v8_libplatform. - "USE_SYSTEM_V8=1" - "V8_OUTDIR=${v8}/lib" - "PG_CONFIG=${postgresql}/bin/pg_config" - ] ++ lib.optionals stdenv.isDarwin [ - "CC=${clang}/bin/clang" - "CXX=${clang}/bin/clang++" - "SHLIB_LINK=-L${v8}/lib -lv8_monolith -Wl,-rpath,${v8}/lib" - ] ++ lib.optionals (!stdenv.isDarwin) [ - "SHLIB_LINK=-lv8" - ]; - - NIX_LDFLAGS = (lib.optionals stdenv.isDarwin [ - "-L${postgresql}/lib" - "-L${v8}/lib" - "-lv8_monolith" - "-lpq" - "-lpgcommon" - "-lpgport" - "-F${darwin.apple_sdk.frameworks.CoreFoundation}/Library/Frameworks" - "-framework" "CoreFoundation" - "-F${darwin.apple_sdk.frameworks.Kerberos}/Library/Frameworks" - "-framework" "Kerberos" - "-undefined" "dynamic_lookup" - "-flat_namespace" - ]); - - installFlags = [ - # PGXS only supports installing to postgresql prefix so we need to redirect this - "DESTDIR=${placeholder "out"}" - ]; - - # No configure script. - dontConfigure = true; - - postPatch = '' - patchShebangs ./generate_upgrade.sh - substituteInPlace generate_upgrade.sh \ - --replace " 2.3.10 " " 2.3.10 2.3.11 2.3.12 2.3.13 2.3.14 2.3.15 " - - ${lib.optionalString stdenv.isDarwin '' - # Replace g++ with clang++ in Makefile - sed -i 's/g++/clang++/g' Makefile - ''} - ''; - - postInstall = '' - # Move the redirected to proper directory. - # There appear to be no references to the install directories - # so changing them does not cause issues. - mv "$out/nix/store"/*/* "$out" - rmdir "$out/nix/store"/* "$out/nix/store" "$out/nix" - mv "$out/lib/plv8-${finalAttrs.version}.so" "$out/lib/plv8.so" - ln -s "$out/lib/plv8.so" "$out/lib/plv8-${finalAttrs.version}.so" - sed -i 's|module_pathname = '"'"'$libdir/plv8-[0-9.]*'"'"'|module_pathname = '"'"'$libdir/plv8'"'"'|' "$out/share/postgresql/extension/plv8.control" - sed -i 's|module_pathname = '"'"'$libdir/plv8-[0-9.]*'"'"'|module_pathname = '"'"'$libdir/plv8'"'"'|' "$out/share/postgresql/extension/plcoffee.control" - sed -i 's|module_pathname = '"'"'$libdir/plv8-[0-9.]*'"'"'|module_pathname = '"'"'$libdir/plv8'"'"'|' "$out/share/postgresql/extension/plls.control" - ${lib.optionalString stdenv.isDarwin '' - install_name_tool -add_rpath "${v8}/lib" $out/lib/plv8.so - install_name_tool -add_rpath "${postgresql}/lib" $out/lib/plv8.so - install_name_tool -add_rpath "${stdenv.cc.cc.lib}/lib" $out/lib/plv8.so - install_name_tool -change @rpath/libv8_monolith.dylib ${v8}/lib/libv8_monolith.dylib $out/lib/plv8.so - ''} - - ${lib.optionalString (!stdenv.isDarwin) '' - ${patchelf}/bin/patchelf --set-rpath "${v8}/lib:${postgresql}/lib:${stdenv.cc.cc.lib}/lib" $out/lib/plv8.so - ''} - ''; - - passthru = { - tests = - let - postgresqlWithSelf = postgresql.withPackages (_: [ - finalAttrs.finalPackage - ]); - in { - smoke = runCommand "plv8-smoke-test" {} '' - export PATH=${lib.makeBinPath [ - postgresqlWithSelf - coreutils - gnugrep - ]} - db="$PWD/testdb" - initdb "$db" - postgres -k "$db" -D "$db" & - pid="$!" - - for i in $(seq 1 100); do - if psql -h "$db" -d postgres -c "" 2>/dev/null; then - break - elif ! kill -0 "$pid"; then - exit 1 - else - sleep 0.1 - fi - done - - psql -h "$db" -d postgres -c 'CREATE EXTENSION plv8; DO $$ plv8.elog(NOTICE, plv8.version); $$ LANGUAGE plv8;' 2> "$out" - grep -q "${finalAttrs.version}" "$out" - kill -0 "$pid" - ''; - - regression = stdenv.mkDerivation { - name = "plv8-regression"; - inherit (finalAttrs) src patches nativeBuildInputs buildInputs dontConfigure; - - buildPhase = '' - runHook preBuild - - # The regression tests need to be run in the order specified in the Makefile. - echo -e "include Makefile\nprint_regress_files:\n\t@echo \$(REGRESS)" > Makefile.regress - REGRESS_TESTS=$(make -f Makefile.regress print_regress_files) - - ${postgresql}/lib/pgxs/src/test/regress/pg_regress \ - --bindir='${postgresqlWithSelf}/bin' \ - --temp-instance=regress-instance \ - --dbname=contrib_regression \ - $REGRESS_TESTS - - runHook postBuild - ''; - - installPhase = '' - runHook preInstall - - touch "$out" - - runHook postInstall - ''; - }; - }; - }; - - meta = with lib; { - description = "V8 Engine Javascript Procedural Language add-on for PostgreSQL"; - homepage = "https://plv8.github.io/"; - maintainers = with maintainers; [ samrose ]; - platforms = [ "x86_64-linux" "aarch64-linux" "aarch64-darwin" ]; - license = licenses.postgresql; - }; -}) \ No newline at end of file diff --git a/nix/ext/postgis.nix b/nix/ext/postgis.nix deleted file mode 100644 index e0b6dfb..0000000 --- a/nix/ext/postgis.nix +++ /dev/null @@ -1,87 +0,0 @@ -{ fetchurl -, lib, stdenv -, perl -, libxml2 -, postgresql -, geos -, proj -, gdalMinimal -, json_c -, pkg-config -, file -, protobufc -, libiconv -, pcre2 -, nixosTests -, callPackage -}: - -let - gdal = gdalMinimal; - sfcgal = callPackage ./sfcgal/sfcgal.nix { }; -in -stdenv.mkDerivation rec { - pname = "postgis"; - version = "3.3.2"; - - outputs = [ "out" "doc" ]; - - src = fetchurl { - url = "https://download.osgeo.org/postgis/source/postgis-${version}.tar.gz"; - sha256 = "sha256-miohnaAFoXMKOdGVmhx87GGbHvsAm2W+gP/CW60pkGg="; - }; - - buildInputs = [ libxml2 postgresql geos proj gdal json_c protobufc pcre2.dev sfcgal ] - ++ lib.optional stdenv.isDarwin libiconv; - nativeBuildInputs = [ perl pkg-config ]; - dontDisableStatic = true; - - # postgis config directory assumes /include /lib from the same root for json-c library - env.NIX_LDFLAGS = "-L${lib.getLib json_c}/lib"; - - - preConfigure = '' - sed -i 's@/usr/bin/file@${file}/bin/file@' configure - configureFlags="--datadir=$out/share/postgresql --datarootdir=$out/share/postgresql --bindir=$out/bin --docdir=$doc/share/doc/${pname} --with-gdalconfig=${gdal}/bin/gdal-config --with-jsondir=${json_c.dev} --disable-extension-upgrades-install --with-sfcgal" - - makeFlags="PERL=${perl}/bin/perl datadir=$out/share/postgresql pkglibdir=$out/lib bindir=$out/bin docdir=$doc/share/doc/${pname}" - ''; - postConfigure = '' - sed -i "s|@mkdir -p \$(DESTDIR)\$(PGSQL_BINDIR)||g ; - s|\$(DESTDIR)\$(PGSQL_BINDIR)|$prefix/bin|g - " \ - "raster/loader/Makefile"; - sed -i "s|\$(DESTDIR)\$(PGSQL_BINDIR)|$prefix/bin|g - " \ - "raster/scripts/python/Makefile"; - mkdir -p $out/bin - - # postgis' build system assumes it is being installed to the same place as postgresql, and looks - # for the postgres binary relative to $PREFIX. We gently support this system using an illusion. - ln -s ${postgresql}/bin/postgres $out/bin/postgres - ''; - - # create aliases for all commands adding version information - postInstall = '' - # Teardown the illusory postgres used for building; see postConfigure. - rm $out/bin/postgres - - for prog in $out/bin/*; do # */ - ln -s $prog $prog-${version} - done - - mkdir -p $doc/share/doc/postgis - mv doc/* $doc/share/doc/postgis/ - ''; - - passthru.tests.postgis = nixosTests.postgis; - - meta = with lib; { - description = "Geographic Objects for PostgreSQL"; - homepage = "https://postgis.net/"; - changelog = "https://git.osgeo.org/gitea/postgis/postgis/raw/tag/${version}/NEWS"; - license = licenses.gpl2; - maintainers = with maintainers; [ samrose ]; - inherit (postgresql.meta) platforms; - }; -} diff --git a/nix/ext/rum.nix b/nix/ext/rum.nix deleted file mode 100644 index 16bf106..0000000 --- a/nix/ext/rum.nix +++ /dev/null @@ -1,31 +0,0 @@ -{ lib, stdenv, fetchFromGitHub, postgresql }: - -stdenv.mkDerivation rec { - pname = "rum"; - version = "1.3.13"; - - src = fetchFromGitHub { - owner = "postgrespro"; - repo = "rum"; - rev = version; - hash = "sha256-yy2xeDnk3fENN+En0st4mv60nZlqPafIzwf68jwJ5fE="; - }; - - buildInputs = [ postgresql ]; - - makeFlags = [ "USE_PGXS=1" ]; - - installPhase = '' - install -D -t $out/lib *${postgresql.dlSuffix} - install -D -t $out/share/postgresql/extension *.control - install -D -t $out/share/postgresql/extension *.sql - ''; - - meta = with lib; { - description = "Full text search index method for PostgreSQL"; - homepage = "https://github.com/postgrespro/rum"; - license = licenses.postgresql; - platforms = postgresql.meta.platforms; - maintainers = with maintainers; [ samrose ]; - }; -} diff --git a/nix/ext/sfcgal/sfcgal.nix b/nix/ext/sfcgal/sfcgal.nix deleted file mode 100644 index 54d7b52..0000000 --- a/nix/ext/sfcgal/sfcgal.nix +++ /dev/null @@ -1,31 +0,0 @@ -{ lib, stdenv, fetchFromGitLab, cgal, cmake, pkg-config, gmp, mpfr, boost }: - -stdenv.mkDerivation rec { - pname = "sfcgal"; - version = "61f3b08ade49493b56c6bafa98c7c1f84addbc10"; - - src = fetchFromGitLab { - owner = "sfcgal"; - repo = "SFCGAL"; - rev = "${version}"; - hash = "sha256-nKSqiFyMkZAYptIeShb1zFg9lYSny3kcGJfxdeTFqxw="; - }; - - nativeBuildInputs = [ cmake pkg-config cgal gmp mpfr boost ]; - - cmakeFlags = [ "-DCGAL_DIR=${cgal}" "-DCMAKE_PREFIX_PATH=${cgal}" ]; - - - postPatch = '' - substituteInPlace sfcgal.pc.in \ - --replace '$'{prefix}/@CMAKE_INSTALL_LIBDIR@ @CMAKE_INSTALL_FULL_LIBDIR@ - ''; - - meta = with lib; { - description = "A wrapper around CGAL that intents to implement 2D and 3D operations on OGC standards models"; - homepage = "https://sfcgal.gitlab.io/SFCGAL/"; - license = with licenses; [ gpl3Plus lgpl3Plus]; - platforms = platforms.all; - maintainers = with maintainers; [ samrose ]; - }; -} diff --git a/nix/ext/supautils.nix b/nix/ext/supautils.nix deleted file mode 100644 index baf806d..0000000 --- a/nix/ext/supautils.nix +++ /dev/null @@ -1,29 +0,0 @@ -{ lib, stdenv, fetchFromGitHub, postgresql }: - -stdenv.mkDerivation rec { - pname = "supautils"; - version = "2.5.0"; - - buildInputs = [ postgresql ]; - - src = fetchFromGitHub { - owner = "tealbase"; - repo = pname; - rev = "refs/tags/v${version}"; - hash = "sha256-NyAk+QpQEdibmFY4yceO/FzMOhRYhKXf4XUw9XJ5rOY="; - }; - - installPhase = '' - mkdir -p $out/lib - - install -D supautils${postgresql.dlSuffix} -t $out/lib - ''; - - meta = with lib; { - description = "PostgreSQL extension for enhanced security"; - homepage = "https://github.com/tealbase/${pname}"; - maintainers = with maintainers; [ steve-chavez ]; - platforms = postgresql.meta.platforms; - license = licenses.postgresql; - }; -} diff --git a/nix/ext/timescaledb-2.9.1.nix b/nix/ext/timescaledb-2.9.1.nix deleted file mode 100644 index ad955e8..0000000 --- a/nix/ext/timescaledb-2.9.1.nix +++ /dev/null @@ -1,51 +0,0 @@ -{ lib, stdenv, fetchFromGitHub, cmake, postgresql, openssl, libkrb5 }: - -stdenv.mkDerivation rec { - pname = "timescaledb-apache"; - version = "2.9.1"; - - nativeBuildInputs = [ cmake ]; - buildInputs = [ postgresql openssl libkrb5 ]; - - src = fetchFromGitHub { - owner = "timescale"; - repo = "timescaledb"; - rev = version; - hash = "sha256-fvVSxDiGZAewyuQ2vZDb0I6tmlDXl6trjZp8+qDBtb8="; - }; - - cmakeFlags = [ "-DSEND_TELEMETRY_DEFAULT=OFF" "-DREGRESS_CHECKS=OFF" "-DTAP_CHECKS=OFF" "-DAPACHE_ONLY=1" ] - ++ lib.optionals stdenv.isDarwin [ "-DLINTER=OFF" ]; - - # Fix the install phase which tries to install into the pgsql extension dir, - # and cannot be manually overridden. This is rather fragile but works OK. - postPatch = '' - for x in CMakeLists.txt sql/CMakeLists.txt; do - substituteInPlace "$x" \ - --replace 'DESTINATION "''${PG_SHAREDIR}/extension"' "DESTINATION \"$out/share/postgresql/extension\"" - done - - for x in src/CMakeLists.txt src/loader/CMakeLists.txt tsl/src/CMakeLists.txt; do - substituteInPlace "$x" \ - --replace 'DESTINATION ''${PG_PKGLIBDIR}' "DESTINATION \"$out/lib\"" - done - ''; - - - # timescaledb-2.9.1.so already exists in the lib directory - # we have no need for the timescaledb.so or control file - postInstall = '' - rm $out/lib/timescaledb.so - rm $out/share/postgresql/extension/timescaledb.control - ''; - - meta = with lib; { - description = "Scales PostgreSQL for time-series data via automatic partitioning across time and space"; - homepage = "https://www.timescale.com/"; - changelog = "https://github.com/timescale/timescaledb/blob/${version}/CHANGELOG.md"; - maintainers = with maintainers; [ samrose ]; - platforms = postgresql.meta.platforms; - license = licenses.asl20; - broken = versionOlder postgresql.version "13"; - }; -} diff --git a/nix/ext/timescaledb.nix b/nix/ext/timescaledb.nix deleted file mode 100644 index 1c87916..0000000 --- a/nix/ext/timescaledb.nix +++ /dev/null @@ -1,43 +0,0 @@ -{ lib, stdenv, fetchFromGitHub, cmake, postgresql, openssl, libkrb5 }: - -stdenv.mkDerivation rec { - pname = "timescaledb-apache"; - version = "2.16.1"; - - nativeBuildInputs = [ cmake ]; - buildInputs = [ postgresql openssl libkrb5 ]; - - src = fetchFromGitHub { - owner = "timescale"; - repo = "timescaledb"; - rev = version; - hash = "sha256-sLxWdBmih9mgiO51zLLxn9uwJVYc5JVHJjSWoADoJ+w="; - }; - - cmakeFlags = [ "-DSEND_TELEMETRY_DEFAULT=OFF" "-DREGRESS_CHECKS=OFF" "-DTAP_CHECKS=OFF" "-DAPACHE_ONLY=1" ] - ++ lib.optionals stdenv.isDarwin [ "-DLINTER=OFF" ]; - - # Fix the install phase which tries to install into the pgsql extension dir, - # and cannot be manually overridden. This is rather fragile but works OK. - postPatch = '' - for x in CMakeLists.txt sql/CMakeLists.txt; do - substituteInPlace "$x" \ - --replace 'DESTINATION "''${PG_SHAREDIR}/extension"' "DESTINATION \"$out/share/postgresql/extension\"" - done - - for x in src/CMakeLists.txt src/loader/CMakeLists.txt tsl/src/CMakeLists.txt; do - substituteInPlace "$x" \ - --replace 'DESTINATION ''${PG_PKGLIBDIR}' "DESTINATION \"$out/lib\"" - done - ''; - - meta = with lib; { - description = "Scales PostgreSQL for time-series data via automatic partitioning across time and space"; - homepage = "https://www.timescale.com/"; - changelog = "https://github.com/timescale/timescaledb/blob/${version}/CHANGELOG.md"; - maintainers = with maintainers; [ samrose ]; - platforms = postgresql.meta.platforms; - license = licenses.asl20; - broken = versionOlder postgresql.version "13"; - }; -} diff --git a/nix/ext/use-system-groonga.patch b/nix/ext/use-system-groonga.patch deleted file mode 100644 index 6d3042b..0000000 --- a/nix/ext/use-system-groonga.patch +++ /dev/null @@ -1,21 +0,0 @@ -diff --git a/CMakeLists.txt b/CMakeLists.txt -index 33b34477..f4ffefe5 100644 ---- a/CMakeLists.txt -+++ b/CMakeLists.txt -@@ -12,7 +12,6 @@ if(MSVC_VERSION LESS 1800) - message(FATAL_ERROR "PGroonga supports only MSVC 2013 or later") - endif() - --add_subdirectory(vendor/groonga) - - set(PGRN_POSTGRESQL_DIR "${CMAKE_INSTALL_PREFIX}" - CACHE PATH "PostgreSQL binary directory") -@@ -52,8 +51,6 @@ string(REGEX REPLACE "([0-9]+)\\.([0-9]+)\\.([0-9]+)" "\\3" - string(REGEX REPLACE ".*comment = '([^']+)'.*" "\\1" - PGRN_DESCRIPTION "${PGRN_CONTROL}") - --file(READ "${CMAKE_CURRENT_SOURCE_DIR}/vendor/groonga/bundled_message_pack_version" -- PGRN_BUNDLED_MESSAGE_PACK_VERSION) - string(STRIP - "${PGRN_BUNDLED_MESSAGE_PACK_VERSION}" - PGRN_BUNDLED_MESSAGE_PACK_VERSION) \ No newline at end of file diff --git a/nix/ext/vault.nix b/nix/ext/vault.nix deleted file mode 100644 index b840771..0000000 --- a/nix/ext/vault.nix +++ /dev/null @@ -1,30 +0,0 @@ -{ lib, stdenv, fetchFromGitHub, postgresql }: - -stdenv.mkDerivation rec { - pname = "vault"; - version = "0.2.9"; - - buildInputs = [ postgresql ]; - - src = fetchFromGitHub { - owner = "tealbase"; - repo = pname; - rev = "refs/tags/v${version}"; - hash = "sha256-kXTngBW4K6FkZM8HvJG2Jha6OQqbejhnk7tchxy031I="; - }; - - installPhase = '' - mkdir -p $out/{lib,share/postgresql/extension} - - cp sql/*.sql $out/share/postgresql/extension - cp *.control $out/share/postgresql/extension - ''; - - meta = with lib; { - description = "Store encrypted secrets in PostgreSQL"; - homepage = "https://github.com/tealbase/${pname}"; - maintainers = with maintainers; [ samrose ]; - platforms = postgresql.meta.platforms; - license = licenses.postgresql; - }; -} diff --git a/nix/ext/wal2json.nix b/nix/ext/wal2json.nix deleted file mode 100644 index 751eb64..0000000 --- a/nix/ext/wal2json.nix +++ /dev/null @@ -1,31 +0,0 @@ -{ lib, stdenv, fetchFromGitHub, postgresql }: - -stdenv.mkDerivation rec { - pname = "wal2json"; - version = "2_5"; - - src = fetchFromGitHub { - owner = "eulerto"; - repo = "wal2json"; - rev = "wal2json_${builtins.replaceStrings ["."] ["_"] version}"; - hash = "sha256-Gpc9uDKrs/dmVSFgdgHM453+TaEnhRh9t0gDbSn8FUI="; - }; - - buildInputs = [ postgresql ]; - - makeFlags = [ "USE_PGXS=1" ]; - - installPhase = '' - install -D -t $out/lib *${postgresql.dlSuffix} - install -D -t $out/share/postgresql/extension sql/*.sql - ''; - - meta = with lib; { - description = "PostgreSQL JSON output plugin for changeset extraction"; - homepage = "https://github.com/eulerto/wal2json"; - changelog = "https://github.com/eulerto/wal2json/releases/tag/wal2json_${version}"; - maintainers = with maintainers; [ samrose ]; - platforms = postgresql.meta.platforms; - license = licenses.bsd3; - }; -} diff --git a/nix/ext/wrappers/default.nix b/nix/ext/wrappers/default.nix deleted file mode 100644 index c9eacbb..0000000 --- a/nix/ext/wrappers/default.nix +++ /dev/null @@ -1,121 +0,0 @@ -{ lib -, stdenv -, fetchFromGitHub -, openssl -, pkg-config -, postgresql -, buildPgrxExtension_0_11_3 -, cargo -, darwin -, jq -}: - -buildPgrxExtension_0_11_3 rec { - pname = "tealbase-wrappers"; - version = "0.4.2"; - # update the following array when the wrappers version is updated - # required to ensure that extensions update scripts from previous versions are generated - previousVersions = ["0.4.1" "0.4.0" "0.3.1" "0.3.0" "0.2.0" "0.1.19" "0.1.18" "0.1.17" "0.1.16" "0.1.15" "0.1.14" "0.1.12" "0.1.11" "0.1.10" "0.1.9" "0.1.8" "0.1.7" "0.1.6" "0.1.5" "0.1.4" "0.1.1" "0.1.0"]; - inherit postgresql; - src = fetchFromGitHub { - owner = "tealbase"; - repo = "wrappers"; - rev = "v${version}"; - hash = "sha256-ut3IQED6ANXgabiHoEUdfSrwkuuYYSpRoeWdtBvSe64="; - }; - nativeBuildInputs = [ pkg-config cargo ]; - buildInputs = [ openssl ] ++ lib.optionals (stdenv.isDarwin) [ - darwin.apple_sdk.frameworks.CoreFoundation - darwin.apple_sdk.frameworks.Security - darwin.apple_sdk.frameworks.SystemConfiguration - ]; - OPENSSL_NO_VENDOR = 1; - #need to set this to 2 to avoid cpu starvation - CARGO_BUILD_JOBS = "2"; - CARGO="${cargo}/bin/cargo"; - cargoLock = { - lockFile = "${src}/Cargo.lock"; - outputHashes = { - "clickhouse-rs-1.0.0-alpha.1" = "sha256-0zmoUo/GLyCKDLkpBsnLAyGs1xz6cubJhn+eVqMEMaw="; - }; - }; - postPatch = "cp ${cargoLock.lockFile} Cargo.lock"; - buildAndTestSubdir = "wrappers"; - buildFeatures = [ - "helloworld_fdw" - "bigquery_fdw" - "clickhouse_fdw" - "stripe_fdw" - "firebase_fdw" - "s3_fdw" - "airtable_fdw" - "logflare_fdw" - "auth0_fdw" - "mssql_fdw" - "redis_fdw" - "cognito_fdw" - "wasm_fdw" - ]; - doCheck = false; - - preBuild = '' - echo "Processing git tags..." - echo '${builtins.concatStringsSep "," previousVersions}' | sed 's/,/\n/g' > git_tags.txt - ''; - - postInstall = '' - echo "Modifying main SQL file to use unversioned library name..." - current_version="${version}" - main_sql_file="$out/share/postgresql/extension/wrappers--$current_version.sql" - if [ -f "$main_sql_file" ]; then - sed -i 's|$libdir/wrappers-[0-9.]*|$libdir/wrappers|g' "$main_sql_file" - echo "Modified $main_sql_file" - else - echo "Warning: $main_sql_file not found" - fi - echo "Creating and modifying SQL files for previous versions..." - - if [ -f "$main_sql_file" ]; then - while read -r previous_version; do - if [ "$(printf '%s\n' "$previous_version" "$current_version" | sort -V | head -n1)" = "$previous_version" ] && [ "$previous_version" != "$current_version" ]; then - new_file="$out/share/postgresql/extension/wrappers--$previous_version--$current_version.sql" - echo "Creating $new_file" - cp "$main_sql_file" "$new_file" - sed -i 's|$libdir/wrappers-[0-9.]*|$libdir/wrappers|g' "$new_file" - echo "Modified $new_file" - fi - done < git_tags.txt - else - echo "Warning: $main_sql_file not found" - fi - mv $out/lib/wrappers-${version}.so $out/lib/wrappers.so - ln -s $out/lib/wrappers.so $out/lib/wrappers-${version}.so - - echo "Creating wrappers.so symlinks to support pg_upgrade..." - if [ -f "$out/lib/wrappers.so" ]; then - while read -r previous_version; do - if [ "$(printf '%s\n' "$previous_version" "$current_version" | sort -V | head -n1)" = "$previous_version" ] && [ "$previous_version" != "$current_version" ]; then - new_file="$out/lib/wrappers-$previous_version.so" - echo "Creating $new_file" - ln -s "$out/lib/wrappers.so" "$new_file" - fi - done < git_tags.txt - else - echo "Warning: $out/lib/wrappers.so not found" - fi - - rm git_tags.txt - echo "Contents of updated wrappers.control:" - cat "$out/share/postgresql/extension/wrappers.control" - echo "List of generated SQL files:" - ls -l $out/share/postgresql/extension/wrappers--*.sql - ''; - - meta = with lib; { - description = "Various Foreign Data Wrappers (FDWs) for PostreSQL"; - homepage = "https://github.com/tealbase/wrappers"; - maintainers = with maintainers; [ samrose ]; - platforms = postgresql.meta.platforms; - license = licenses.postgresql; - }; -} diff --git a/nix/fix-cmake-install-path.patch b/nix/fix-cmake-install-path.patch deleted file mode 100644 index 1fe317b..0000000 --- a/nix/fix-cmake-install-path.patch +++ /dev/null @@ -1,21 +0,0 @@ -Fix CMake install path - ---- a/CMakeLists.txt -+++ b/CMakeLists.txt -@@ -1141,11 +1141,11 @@ - - set(prefix "${CMAKE_INSTALL_PREFIX}") - set(exec_prefix "\${prefix}") --set(bindir "\${exec_prefix}/${CMAKE_INSTALL_BINDIR}") --set(sbindir "\${exec_prefix}/${CMAKE_INSTALL_SBINDIR}") --set(libdir "\${prefix}/${CMAKE_INSTALL_LIBDIR}") --set(includedir "\${prefix}/${CMAKE_INSTALL_INCLUDEDIR}") --set(datarootdir "\${prefix}/${CMAKE_INSTALL_DATAROOTDIR}") -+set(bindir "${CMAKE_INSTALL_FULL_BINDIR}") -+set(sbindir "${CMAKE_INSTALL_FULL_SBINDIR}") -+set(libdir "${CMAKE_INSTALL_FULL_LIBDIR}") -+set(includedir "${CMAKE_INSTALL_FULL_INCLUDEDIR}") -+set(datarootdir "${CMAKE_INSTALL_FULL_DATAROOTDIR}") - set(datadir "\${datarootdir}") - set(expanded_pluginsdir "${GRN_PLUGINS_DIR}") - set(GRN_EXPANDED_DEFAULT_DOCUMENT_ROOT "${GRN_DEFAULT_DOCUMENT_ROOT}") \ No newline at end of file diff --git a/nix/init.sh b/nix/init.sh deleted file mode 100755 index 3e872e4..0000000 --- a/nix/init.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash -# shellcheck shell=bash - -export PGUSER=tealbase_admin -export PGDATA=$PWD/postgres_data -export PGHOST=$PWD/postgres -export PGPORT=5432 -export PGPASS=postgres -export LOG_PATH=$PGHOST/LOG -export PGDATABASE=testdb -export DATABASE_URL="postgresql:///$PGDATABASE?host=$PGHOST&port=$PGPORT" -mkdir -p $PGHOST -if [ ! -d $PGDATA ]; then - echo 'Initializing postgresql database...' - initdb $PGDATA --locale=C --username $PGUSER -A md5 --pwfile=<(echo $PGPASS) --auth=trust - echo "listen_addresses='*'" >> $PGDATA/postgresql.conf - echo "unix_socket_directories='$PGHOST'" >> $PGDATA/postgresql.conf - echo "unix_socket_permissions=0700" >> $PGDATA/postgresql.conf -fi -chmod o-rwx $PGDATA diff --git a/nix/overlays/cargo-pgrx-0-11-3.nix b/nix/overlays/cargo-pgrx-0-11-3.nix deleted file mode 100644 index 41ba97d..0000000 --- a/nix/overlays/cargo-pgrx-0-11-3.nix +++ /dev/null @@ -1,7 +0,0 @@ -final: prev: { - #cargo-pgrx_0_11_3 = cargo-pgrx.cargo-pgrx_0_11_3; - - buildPgrxExtension_0_11_3 = prev.buildPgrxExtension.override { - cargo-pgrx = final.cargo-pgrx_0_11_3; - }; -} diff --git a/nix/overlays/gdal-small.nix b/nix/overlays/gdal-small.nix deleted file mode 100644 index 18be8a5..0000000 --- a/nix/overlays/gdal-small.nix +++ /dev/null @@ -1,14 +0,0 @@ -final: prev: { - # override the version of gdal used with postgis with the small version. - # significantly reduces overall closure size - gdal = prev.gdalMinimal.override { - /* other features can be enabled, reference: - https://github.com/NixOS/nixpkgs/blob/master/pkgs/development/libraries/gdal/default.nix - */ - - # useHDF = true; - # useArrow = true; - # useLibHEIF = true; - # ... - }; -} diff --git a/nix/overlays/psql_16-oriole.nix b/nix/overlays/psql_16-oriole.nix deleted file mode 100644 index d55af10..0000000 --- a/nix/overlays/psql_16-oriole.nix +++ /dev/null @@ -1,21 +0,0 @@ -final: prev: { - postgresql_16 = prev.postgresql_16.overrideAttrs (old: { - pname = "postgresql_16"; - version = "16_23"; - src = prev.fetchurl { - url = "https://github.com/orioledb/postgres/archive/refs/tags/patches16_23.tar.gz"; - sha256 = "sha256-xWmcqn3DYyBG0FsBNqPWTFzUidSJZgoPWI6Rt0N9oJ4="; - }; - buildInputs = old.buildInputs ++ [ - prev.bison - prev.docbook5 - prev.docbook_xsl - prev.docbook_xsl_ns - prev.docbook_xml_dtd_45 - prev.flex - prev.libxslt - prev.perl - ]; - }); - postgresql_orioledb_16 = final.postgresql_16; -} diff --git a/nix/postgresql/15.nix b/nix/postgresql/15.nix deleted file mode 100644 index 00dfc0c..0000000 --- a/nix/postgresql/15.nix +++ /dev/null @@ -1,4 +0,0 @@ -import ./generic.nix { - version = "15.6"; - hash = "sha256-hFUUbtnGnJOlfelUrq0DAsr60DXCskIXXWqh4X68svs="; -} diff --git a/nix/postgresql/default.nix b/nix/postgresql/default.nix deleted file mode 100644 index 6ee0452..0000000 --- a/nix/postgresql/default.nix +++ /dev/null @@ -1,20 +0,0 @@ -self: -let - #adapted from the postgresql nixpkgs package - versions = { - postgresql_15 = ./15.nix; - }; - - mkAttributes = jitSupport: - self.lib.mapAttrs' (version: path: - let - attrName = if jitSupport then "${version}_jit" else version; - in - self.lib.nameValuePair attrName (import path { - inherit jitSupport self; - }) - ) versions; - -in -# variations without and with JIT -(mkAttributes false) // (mkAttributes true) diff --git a/nix/postgresql/generic.nix b/nix/postgresql/generic.nix deleted file mode 100644 index 323b651..0000000 --- a/nix/postgresql/generic.nix +++ /dev/null @@ -1,309 +0,0 @@ -let - - generic = - # adapted from the nixpkgs postgresql package - # dependencies - { stdenv, lib, fetchurl, fetchpatch, makeWrapper - , glibc, zlib, readline, openssl, icu, lz4, zstd, systemd, libossp_uuid - , pkg-config, libxml2, tzdata, libkrb5, substituteAll, darwin - , linux-pam - - # This is important to obtain a version of `libpq` that does not depend on systemd. - , systemdSupport ? lib.meta.availableOn stdenv.hostPlatform systemd && !stdenv.hostPlatform.isStatic - , enableSystemd ? null - , gssSupport ? with stdenv.hostPlatform; !isWindows && !isStatic - - # for postgresql.pkgs - , self, newScope, buildEnv - - # source specification - , version, hash, muslPatches ? {} - - # for tests - , testers - - # JIT - , jitSupport - , nukeReferences, patchelf, llvmPackages - - # PL/Python - , pythonSupport ? false - , python3 - - # detection of crypt fails when using llvm stdenv, so we add it manually - # for <13 (where it got removed: https://github.com/postgres/postgres/commit/c45643d618e35ec2fe91438df15abd4f3c0d85ca) - , libxcrypt - } @args: - let - atLeast = lib.versionAtLeast version; - olderThan = lib.versionOlder version; - lz4Enabled = atLeast "14"; - zstdEnabled = atLeast "15"; - - systemdSupport' = if enableSystemd == null then systemdSupport else (lib.warn "postgresql: argument enableSystemd is deprecated, please use systemdSupport instead." enableSystemd); - - pname = "postgresql"; - - stdenv' = if jitSupport then llvmPackages.stdenv else stdenv; - in stdenv'.mkDerivation (finalAttrs: { - inherit version; - pname = pname + lib.optionalString jitSupport "-jit"; - - src = fetchurl { - url = "mirror://postgresql/source/v${version}/${pname}-${version}.tar.bz2"; - inherit hash; - }; - - hardeningEnable = lib.optionals (!stdenv'.cc.isClang) [ "pie" ]; - - outputs = [ "out" "lib" "doc" "man" ]; - setOutputFlags = false; # $out retains configureFlags :-/ - - buildInputs = [ - zlib - readline - openssl - (libxml2.override {python = python3;}) - icu - ] - ++ lib.optionals (olderThan "13") [ libxcrypt ] - ++ lib.optionals jitSupport [ llvmPackages.llvm ] - ++ lib.optionals lz4Enabled [ lz4 ] - ++ lib.optionals zstdEnabled [ zstd ] - ++ lib.optionals systemdSupport' [ systemd ] - ++ lib.optionals pythonSupport [ python3 ] - ++ lib.optionals gssSupport [ libkrb5 ] - ++ lib.optionals stdenv'.isLinux [ linux-pam ] - ++ lib.optionals (!stdenv'.isDarwin) [ libossp_uuid ]; - - nativeBuildInputs = [ - makeWrapper - pkg-config - ] - ++ lib.optionals jitSupport [ llvmPackages.llvm.dev nukeReferences patchelf ]; - - enableParallelBuilding = true; - - separateDebugInfo = true; - - buildFlags = [ "world" ]; - - # Makes cross-compiling work when xml2-config can't be executed on the host. - # Fixed upstream in https://github.com/postgres/postgres/commit/0bc8cebdb889368abdf224aeac8bc197fe4c9ae6 - env.NIX_CFLAGS_COMPILE = lib.optionalString (olderThan "13") "-I${libxml2.dev}/include/libxml2"; - - configureFlags = [ - "--with-openssl" - "--with-libxml" - "--with-icu" - "--sysconfdir=/etc" - "--libdir=$(lib)/lib" - "--with-system-tzdata=${tzdata}/share/zoneinfo" - "--enable-debug" - (lib.optionalString systemdSupport' "--with-systemd") - (if stdenv'.isDarwin then "--with-uuid=e2fs" else "--with-ossp-uuid") - ] ++ lib.optionals lz4Enabled [ "--with-lz4" ] - ++ lib.optionals zstdEnabled [ "--with-zstd" ] - ++ lib.optionals gssSupport [ "--with-gssapi" ] - ++ lib.optionals pythonSupport [ "--with-python" ] - ++ lib.optionals jitSupport [ "--with-llvm" ] - ++ lib.optionals stdenv'.isLinux [ "--with-pam" ]; - - patches = [ - (if atLeast "16" then ./patches/relative-to-symlinks-16+.patch else ./patches/relative-to-symlinks.patch) - ./patches/less-is-more.patch - ./patches/paths-for-split-outputs.patch - ./patches/specify_pkglibdir_at_runtime.patch - ./patches/paths-with-postgresql-suffix.patch - - (substituteAll { - src = ./patches/locale-binary-path.patch; - locale = "${if stdenv.isDarwin then darwin.adv_cmds else lib.getBin stdenv.cc.libc}/bin/locale"; - }) - ] ++ lib.optionals stdenv'.hostPlatform.isMusl ( - # Using fetchurl instead of fetchpatch on purpose: https://github.com/NixOS/nixpkgs/issues/240141 - map fetchurl (lib.attrValues muslPatches) - ) ++ lib.optionals stdenv'.isLinux [ - (if atLeast "13" then ./patches/socketdir-in-run-13+.patch else ./patches/socketdir-in-run.patch) - ]; - - installTargets = [ "install-world" ]; - - postPatch = '' - # Hardcode the path to pgxs so pg_config returns the path in $out - substituteInPlace "src/common/config_info.c" --subst-var out - '' + lib.optionalString jitSupport '' - # Force lookup of jit stuff in $out instead of $lib - substituteInPlace src/backend/jit/jit.c --replace pkglib_path \"$out/lib\" - substituteInPlace src/backend/jit/llvm/llvmjit.c --replace pkglib_path \"$out/lib\" - substituteInPlace src/backend/jit/llvm/llvmjit_inline.cpp --replace pkglib_path \"$out/lib\" - ''; - - postInstall = - '' - moveToOutput "lib/pgxs" "$out" # looks strange, but not deleting it - moveToOutput "lib/libpgcommon*.a" "$out" - moveToOutput "lib/libpgport*.a" "$out" - moveToOutput "lib/libecpg*" "$out" - - # Prevent a retained dependency on gcc-wrapper. - substituteInPlace "$out/lib/pgxs/src/Makefile.global" --replace ${stdenv'.cc}/bin/ld ld - - if [ -z "''${dontDisableStatic:-}" ]; then - # Remove static libraries in case dynamic are available. - for i in $out/lib/*.a $lib/lib/*.a; do - name="$(basename "$i")" - ext="${stdenv'.hostPlatform.extensions.sharedLibrary}" - if [ -e "$lib/lib/''${name%.a}$ext" ] || [ -e "''${i%.a}$ext" ]; then - rm "$i" - fi - done - fi - '' + lib.optionalString jitSupport '' - # Move the bitcode and libllvmjit.so library out of $lib; otherwise, every client that - # depends on libpq.so will also have libLLVM.so in its closure too, bloating it - moveToOutput "lib/bitcode" "$out" - moveToOutput "lib/llvmjit*" "$out" - - # In the case of JIT support, prevent a retained dependency on clang-wrapper - substituteInPlace "$out/lib/pgxs/src/Makefile.global" --replace ${stdenv'.cc}/bin/clang clang - nuke-refs $out/lib/llvmjit_types.bc $(find $out/lib/bitcode -type f) - - # Stop out depending on the default output of llvm - substituteInPlace $out/lib/pgxs/src/Makefile.global \ - --replace ${llvmPackages.llvm.out}/bin "" \ - --replace '$(LLVM_BINPATH)/' "" - - # Stop out depending on the -dev output of llvm - substituteInPlace $out/lib/pgxs/src/Makefile.global \ - --replace ${llvmPackages.llvm.dev}/bin/llvm-config llvm-config \ - --replace -I${llvmPackages.llvm.dev}/include "" - - ${lib.optionalString (!stdenv'.isDarwin) '' - # Stop lib depending on the -dev output of llvm - rpath=$(patchelf --print-rpath $out/lib/llvmjit.so) - nuke-refs -e $out $out/lib/llvmjit.so - # Restore the correct rpath - patchelf $out/lib/llvmjit.so --set-rpath "$rpath" - ''} - ''; - - postFixup = lib.optionalString (!stdenv'.isDarwin && stdenv'.hostPlatform.libc == "glibc") - '' - # initdb needs access to "locale" command from glibc. - wrapProgram $out/bin/initdb --prefix PATH ":" ${glibc.bin}/bin - ''; - - doCheck = !stdenv'.isDarwin; - # autodetection doesn't seem to able to find this, but it's there. - checkTarget = "check"; - - disallowedReferences = [ stdenv'.cc ]; - - passthru = let - this = self.callPackage generic args; - jitToggle = this.override { - jitSupport = !jitSupport; - }; - in - { - psqlSchema = lib.versions.major version; - - withJIT = if jitSupport then this else jitToggle; - withoutJIT = if jitSupport then jitToggle else this; - - dlSuffix = if olderThan "16" then ".so" else stdenv.hostPlatform.extensions.sharedLibrary; - - pkgs = let - scope = { - inherit jitSupport; - inherit (llvmPackages) llvm; - postgresql = this; - stdenv = stdenv'; - }; - newSelf = self // scope; - newSuper = { callPackage = newScope (scope // this.pkgs); }; - in import ./ext newSelf newSuper; - - withPackages = postgresqlWithPackages { - inherit makeWrapper buildEnv; - postgresql = this; - } - this.pkgs; - - tests = { - postgresql-wal-receiver = import ../../../../nixos/tests/postgresql-wal-receiver.nix { - inherit (stdenv) system; - pkgs = self; - package = this; - }; - pkg-config = testers.testMetaPkgConfig finalAttrs.finalPackage; - } // lib.optionalAttrs jitSupport { - postgresql-jit = import ../../../../nixos/tests/postgresql-jit.nix { - inherit (stdenv) system; - pkgs = self; - package = this; - }; - }; - }; - - meta = with lib; { - homepage = "https://www.postgresql.org"; - description = "Powerful, open source object-relational database system"; - license = licenses.postgresql; - changelog = "https://www.postgresql.org/docs/release/${finalAttrs.version}/"; - maintainers = with maintainers; [ thoughtpolice danbst globin ivan ma27 wolfgangwalther ]; - pkgConfigModules = [ "libecpg" "libecpg_compat" "libpgtypes" "libpq" ]; - platforms = platforms.unix; - - # JIT support doesn't work with cross-compilation. It is attempted to build LLVM-bytecode - # (`%.bc` is the corresponding `make(1)`-rule) for each sub-directory in `backend/` for - # the JIT apparently, but with a $(CLANG) that can produce binaries for the build, not the - # host-platform. - # - # I managed to get a cross-build with JIT support working with - # `depsBuildBuild = [ llvmPackages.clang ] ++ buildInputs`, but considering that the - # resulting LLVM IR isn't platform-independent this doesn't give you much. - # In fact, I tried to test the result in a VM-test, but as soon as JIT was used to optimize - # a query, postgres would coredump with `Illegal instruction`. - broken = (jitSupport && stdenv.hostPlatform != stdenv.buildPlatform) - # Allmost all tests fail FATAL errors for v12 and v13 - || (jitSupport && stdenv.hostPlatform.isMusl && olderThan "14"); - }; - }); - - postgresqlWithPackages = { postgresql, makeWrapper, buildEnv }: pkgs: f: buildEnv { - name = "postgresql-and-plugins-${postgresql.version}"; - paths = f pkgs ++ [ - postgresql - postgresql.lib - postgresql.man # in case user installs this into environment - ]; - nativeBuildInputs = [ makeWrapper ]; - - - # We include /bin to ensure the $out/bin directory is created, which is - # needed because we'll be removing the files from that directory in postBuild - # below. See #22653 - pathsToLink = ["/" "/bin"]; - - # Note: the duplication of executables is about 4MB size. - # So a nicer solution was patching postgresql to allow setting the - # libdir explicitly. - postBuild = '' - mkdir -p $out/bin - rm $out/bin/{pg_config,postgres,pg_ctl} - cp --target-directory=$out/bin ${postgresql}/bin/{postgres,pg_config,pg_ctl} - wrapProgram $out/bin/postgres --set NIX_PGLIBDIR $out/lib - ''; - - passthru.version = postgresql.version; - passthru.psqlSchema = postgresql.psqlSchema; - }; - -in -# passed by .nix -versionArgs: -# passed by default.nix -{ self, ... } @defaultArgs: -self.callPackage generic (defaultArgs // versionArgs) diff --git a/nix/postgresql/patches/less-is-more.patch b/nix/postgresql/patches/less-is-more.patch deleted file mode 100644 index a72d1a2..0000000 --- a/nix/postgresql/patches/less-is-more.patch +++ /dev/null @@ -1,11 +0,0 @@ ---- a/src/include/fe_utils/print.h -+++ b/src/include/fe_utils/print.h -@@ -18,7 +18,7 @@ - - /* This is not a particularly great place for this ... */ - #ifndef __CYGWIN__ --#define DEFAULT_PAGER "more" -+#define DEFAULT_PAGER "less" - #else - #define DEFAULT_PAGER "less" - #endif diff --git a/nix/postgresql/patches/locale-binary-path.patch b/nix/postgresql/patches/locale-binary-path.patch deleted file mode 100644 index 8068683..0000000 --- a/nix/postgresql/patches/locale-binary-path.patch +++ /dev/null @@ -1,11 +0,0 @@ ---- a/src/backend/commands/collationcmds.c -+++ b/src/backend/commands/collationcmds.c -@@ -611,7 +611,7 @@ pg_import_system_collations(PG_FUNCTION_ARGS) - aliases = (CollAliasData *) palloc(maxaliases * sizeof(CollAliasData)); - naliases = 0; - -- locale_a_handle = OpenPipeStream("locale -a", "r"); -+ locale_a_handle = OpenPipeStream("@locale@ -a", "r"); - if (locale_a_handle == NULL) - ereport(ERROR, - (errcode_for_file_access(), diff --git a/nix/postgresql/patches/paths-for-split-outputs.patch b/nix/postgresql/patches/paths-for-split-outputs.patch deleted file mode 100644 index 2134f7e..0000000 --- a/nix/postgresql/patches/paths-for-split-outputs.patch +++ /dev/null @@ -1,11 +0,0 @@ ---- a/src/common/config_info.c -+++ b/src/common/config_info.c -@@ -118,7 +118,7 @@ - i++; - - configdata[i].name = pstrdup("PGXS"); -+ strlcpy(path, "@out@/lib", sizeof(path)); -- get_pkglib_path(my_exec_path, path); - strlcat(path, "/pgxs/src/makefiles/pgxs.mk", sizeof(path)); - cleanup_path(path); - configdata[i].setting = pstrdup(path); diff --git a/nix/postgresql/patches/paths-with-postgresql-suffix.patch b/nix/postgresql/patches/paths-with-postgresql-suffix.patch deleted file mode 100644 index 04d2f55..0000000 --- a/nix/postgresql/patches/paths-with-postgresql-suffix.patch +++ /dev/null @@ -1,41 +0,0 @@ -Nix outputs put the `name' in each store path like -/nix/store/...-. This was confusing the Postgres make script -because it thought its data directory already had postgresql in its -directory. This lead to Postgres installing all of its fils in -$out/share. To fix this, we just look for postgres or psql in the part -after the / using make's notdir. - ---- ---- a/src/Makefile.global.in -+++ b/src/Makefile.global.in -@@ -102,15 +102,15 @@ datarootdir := @datarootdir@ - bindir := @bindir@ - - datadir := @datadir@ --ifeq "$(findstring pgsql, $(datadir))" "" --ifeq "$(findstring postgres, $(datadir))" "" -+ifeq "$(findstring pgsql, $(notdir $(datadir)))" "" -+ifeq "$(findstring postgres, $(notdir $(datadir)))" "" - override datadir := $(datadir)/postgresql - endif - endif - - sysconfdir := @sysconfdir@ --ifeq "$(findstring pgsql, $(sysconfdir))" "" --ifeq "$(findstring postgres, $(sysconfdir))" "" -+ifeq "$(findstring pgsql, $(notdir $(sysconfdir)))" "" -+ifeq "$(findstring postgres, $(notdir $(sysconfdir)))" "" - override sysconfdir := $(sysconfdir)/postgresql - endif - endif -@@ -136,8 +136,8 @@ endif - mandir := @mandir@ - - docdir := @docdir@ --ifeq "$(findstring pgsql, $(docdir))" "" --ifeq "$(findstring postgres, $(docdir))" "" -+ifeq "$(findstring pgsql, $(notdir $(docdir)))" "" -+ifeq "$(findstring postgres, $(notdir $(docdir)))" "" - override docdir := $(docdir)/postgresql - endif - endif diff --git a/nix/postgresql/patches/relative-to-symlinks-16+.patch b/nix/postgresql/patches/relative-to-symlinks-16+.patch deleted file mode 100644 index 996072e..0000000 --- a/nix/postgresql/patches/relative-to-symlinks-16+.patch +++ /dev/null @@ -1,13 +0,0 @@ -On NixOS we *want* stuff relative to symlinks. ---- ---- a/src/common/exec.c -+++ b/src/common/exec.c -@@ -238,6 +238,8 @@ - static int - normalize_exec_path(char *path) - { -+ return 0; -+ - /* - * We used to do a lot of work ourselves here, but now we just let - * realpath(3) do all the heavy lifting. diff --git a/nix/postgresql/patches/relative-to-symlinks.patch b/nix/postgresql/patches/relative-to-symlinks.patch deleted file mode 100644 index c9b199b..0000000 --- a/nix/postgresql/patches/relative-to-symlinks.patch +++ /dev/null @@ -1,13 +0,0 @@ -On NixOS we *want* stuff relative to symlinks. ---- ---- a/src/common/exec.c -+++ b/src/common/exec.c -@@ -218,6 +218,8 @@ - static int - resolve_symlinks(char *path) - { -+ return 0; -+ - #ifdef HAVE_READLINK - struct stat buf; - char orig_wd[MAXPGPATH], diff --git a/nix/postgresql/patches/socketdir-in-run-13+.patch b/nix/postgresql/patches/socketdir-in-run-13+.patch deleted file mode 100644 index fd808b6..0000000 --- a/nix/postgresql/patches/socketdir-in-run-13+.patch +++ /dev/null @@ -1,11 +0,0 @@ ---- a/src/include/pg_config_manual.h -+++ b/src/include/pg_config_manual.h -@@ -201,7 +201,7 @@ - * support them yet. - */ - #ifndef WIN32 --#define DEFAULT_PGSOCKET_DIR "/tmp" -+#define DEFAULT_PGSOCKET_DIR "/run/postgresql" - #else - #define DEFAULT_PGSOCKET_DIR "" - #endif diff --git a/nix/postgresql/patches/socketdir-in-run.patch b/nix/postgresql/patches/socketdir-in-run.patch deleted file mode 100644 index 4932ef6..0000000 --- a/nix/postgresql/patches/socketdir-in-run.patch +++ /dev/null @@ -1,11 +0,0 @@ ---- a/src/include/pg_config_manual.h -+++ b/src/include/pg_config_manual.h -@@ -179,7 +179,7 @@ - * here's where to twiddle it. You can also override this at runtime - * with the postmaster's -k switch. - */ --#define DEFAULT_PGSOCKET_DIR "/tmp" -+#define DEFAULT_PGSOCKET_DIR "/run/postgresql" - - /* - * This is the default event source for Windows event log. diff --git a/nix/postgresql/patches/specify_pkglibdir_at_runtime.patch b/nix/postgresql/patches/specify_pkglibdir_at_runtime.patch deleted file mode 100644 index b94fc9e..0000000 --- a/nix/postgresql/patches/specify_pkglibdir_at_runtime.patch +++ /dev/null @@ -1,28 +0,0 @@ ---- a/src/port/path.c -+++ b/src/port/path.c -@@ -714,7 +714,11 @@ - void - get_lib_path(const char *my_exec_path, char *ret_path) - { -- make_relative_path(ret_path, LIBDIR, PGBINDIR, my_exec_path); -+ char const * const nix_pglibdir = getenv("NIX_PGLIBDIR"); -+ if(nix_pglibdir == NULL) -+ make_relative_path(ret_path, LIBDIR, PGBINDIR, my_exec_path); -+ else -+ make_relative_path(ret_path, nix_pglibdir, PGBINDIR, my_exec_path); - } - - /* -@@ -723,7 +727,11 @@ - void - get_pkglib_path(const char *my_exec_path, char *ret_path) - { -- make_relative_path(ret_path, PKGLIBDIR, PGBINDIR, my_exec_path); -+ char const * const nix_pglibdir = getenv("NIX_PGLIBDIR"); -+ if(nix_pglibdir == NULL) -+ make_relative_path(ret_path, PKGLIBDIR, PGBINDIR, my_exec_path); -+ else -+ make_relative_path(ret_path, nix_pglibdir, PGBINDIR, my_exec_path); - } - - /* diff --git a/nix/tealbase-groonga.nix b/nix/tealbase-groonga.nix deleted file mode 100644 index 7b50b8e..0000000 --- a/nix/tealbase-groonga.nix +++ /dev/null @@ -1,75 +0,0 @@ -{ lib, stdenv, cmake, fetchurl, kytea, msgpack-c, mecab, pkg-config, rapidjson -, testers, xxHash, zstd, postgresqlPackages, makeWrapper, suggestSupport ? false -, zeromq, libevent, openssl, lz4Support ? false, lz4, zlibSupport ? true, zlib -, writeShellScriptBin, callPackage }: -let mecab-naist-jdic = callPackage ./ext/mecab-naist-jdic { }; -in stdenv.mkDerivation (finalAttrs: { - pname = "tealbase-groonga"; - version = "14.0.5"; - src = fetchurl { - url = - "https://packages.groonga.org/source/groonga/groonga-${finalAttrs.version}.tar.gz"; - hash = "sha256-y4UGnv8kK0z+br8wXpPf57NMXkdEJHcLCuTvYiubnIc="; - }; - patches = - [ ./fix-cmake-install-path.patch ./do-not-use-vendored-libraries.patch ]; - nativeBuildInputs = [ cmake pkg-config makeWrapper ]; - buildInputs = [ rapidjson xxHash zstd mecab kytea msgpack-c ] - ++ lib.optionals lz4Support [ lz4 ] ++ lib.optional zlibSupport [ zlib ] - ++ lib.optionals suggestSupport [ zeromq libevent ]; - cmakeFlags = [ - "-DWITH_MECAB=ON" - "-DMECAB_DICDIR=${mecab-naist-jdic}/lib/mecab/dic/naist-jdic" - "-DMECAB_CONFIG=${mecab}/bin/mecab-config" - "-DENABLE_MECAB_TOKENIZER=ON" - "-DMECAB_INCLUDE_DIR=${mecab}/include" - "-DMECAB_LIBRARY=${mecab}/lib/libmecab.so" - "-DGROONGA_ENABLE_TOKENIZER_MECAB=YES" - "-DGRN_WITH_MECAB=YES" - ]; - preConfigure = '' - export MECAB_DICDIR=${mecab-naist-jdic}/lib/mecab/dic/naist-jdic - echo "MeCab dictionary directory is: $MECAB_DICDIR" - ''; - buildPhase = '' - cmake --build . -- VERBOSE=1 - grep -i mecab CMakeCache.txt || (echo "MeCab not detected in CMake cache" && exit 1) - echo "CMake cache contents related to MeCab:" - grep -i mecab CMakeCache.txt - ''; - - # installPhase = '' - # mkdir -p $out/bin $out/lib/groonga/plugins - # cp -r lib/groonga/plugins/* $out/lib/groonga/plugins - # cp -r bin/* $out/bin - # echo "Installed Groonga plugins:" - # ls -l $out/lib/groonga/plugins - # ''; - - postInstall = '' - echo "Searching for MeCab-related files:" - find $out -name "*mecab*" - - echo "Checking Groonga plugins directory:" - ls -l $out/lib/groonga/plugins - - echo "Wrapping Groonga binary:" - wrapProgram $out/bin/groonga \ - --set GRN_PLUGINS_DIR $out/lib/groonga/plugins - - ''; - env.NIX_CFLAGS_COMPILE = - lib.optionalString zlibSupport "-I${zlib.dev}/include"; - - meta = with lib; { - homepage = "https://groonga.org/"; - description = "Open-source fulltext search engine and column store"; - license = licenses.lgpl21; - maintainers = [ maintainers.samrose ]; - platforms = platforms.all; - longDescription = '' - Groonga is an open-source fulltext search engine and column store. - It lets you write high-performance applications that requires fulltext search. - ''; - }; -}) diff --git a/nix/tests/expected/extensions_sql_interface.out b/nix/tests/expected/extensions_sql_interface.out deleted file mode 100644 index 5ab593d..0000000 --- a/nix/tests/expected/extensions_sql_interface.out +++ /dev/null @@ -1,6312 +0,0 @@ -/* - -The purpose of this test is to monitor the SQL interface exposed -by Postgres extensions so we have to manually review/approve any difference -that emerge as versions change. - -*/ -/* - -List all extensions that are not enabled -If a new entry shows up in this list, that means a new extension has been -added and you should `create extension ...` to enable it in ./nix/tests/prime - -*/ -select - name -from - pg_available_extensions -where - installed_version is null -order by - name asc; - name ---------- - pg_cron -(1 row) - -/* - -Monitor relocatability and config of each extension -- lesson learned from pg_cron - -*/ -select - extname as extension_name, - extrelocatable as is_relocatable -from - pg_extension -order by - extname asc; - extension_name | is_relocatable -------------------------------+---------------- - address_standardizer | t - address_standardizer_data_us | t - adminpack | f - amcheck | t - autoinc | t - bloom | t - btree_gin | t - btree_gist | t - citext | t - cube | t - dblink | t - dict_int | t - dict_xsyn | t - earthdistance | t - file_fdw | t - fuzzystrmatch | t - hstore | t - http | f - hypopg | t - index_advisor | t - insert_username | t - intagg | t - intarray | t - isn | t - lo | t - ltree | t - moddatetime | t - old_snapshot | t - pageinspect | t - pg_backtrace | t - pg_buffercache | t - pg_freespacemap | t - pg_graphql | f - pg_hashids | t - pg_jsonschema | f - pg_net | f - pg_prewarm | t - pg_repack | f - pg_stat_monitor | t - pg_stat_statements | t - pg_surgery | t - pg_tle | f - pg_trgm | t - pg_visibility | t - pg_walinspect | t - pgaudit | t - pgcrypto | t - pgjwt | f - pgmq | f - pgroonga | f - pgroonga_database | f - pgrouting | t - pgrowlocks | t - pgsodium | f - pgstattuple | t - pgtap | t - plcoffee | f - plls | f - plpgsql | f - plpgsql_check | f - plv8 | f - postgis | f - postgis_raster | f - postgis_sfcgal | t - postgis_tiger_geocoder | f - postgis_topology | f - postgres_fdw | t - refint | t - rum | t - seg | t - sslinfo | t - tealbase_vault | f - tablefunc | t - tcn | t - timescaledb | f - tsm_system_rows | t - tsm_system_time | t - unaccent | t - uuid-ossp | t - vector | t - wrappers | f - xml2 | f -(82 rows) - -/* - -Monitor extension public function interface - -*/ -select - e.extname as extension_name, - n.nspname as schema_name, - p.proname as function_name, - pg_catalog.pg_get_function_identity_arguments(p.oid) as argument_types, - pg_catalog.pg_get_function_result(p.oid) as return_type -from - pg_catalog.pg_proc p - join pg_catalog.pg_namespace n - on n.oid = p.pronamespace - join pg_catalog.pg_depend d - on d.objid = p.oid - join pg_catalog.pg_extension e - on e.oid = d.refobjid -where - d.deptype = 'e' -order by - e.extname, - n.nspname, - p.proname, - pg_catalog.pg_get_function_identity_arguments(p.oid); - extension_name | schema_name | function_name | argument_types | return_type -------------------------+--------------------------+--------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - address_standardizer | public | parse_address | text, OUT num text, OUT street text, OUT street2 text, OUT address1 text, OUT city text, OUT state text, OUT zip text, OUT zipplus text, OUT country text | record - address_standardizer | public | standardize_address | lextab text, gaztab text, rultab text, address text | stdaddr - address_standardizer | public | standardize_address | lextab text, gaztab text, rultab text, micro text, macro text | stdaddr - adminpack | pg_catalog | pg_file_rename | text, text | boolean - adminpack | pg_catalog | pg_file_rename | text, text, text | boolean - adminpack | pg_catalog | pg_file_sync | text | void - adminpack | pg_catalog | pg_file_unlink | text | boolean - adminpack | pg_catalog | pg_file_write | text, text, boolean | bigint - adminpack | pg_catalog | pg_logdir_ls | | SETOF record - amcheck | public | bt_index_check | index regclass | void - amcheck | public | bt_index_check | index regclass, heapallindexed boolean | void - amcheck | public | bt_index_parent_check | index regclass | void - amcheck | public | bt_index_parent_check | index regclass, heapallindexed boolean | void - amcheck | public | bt_index_parent_check | index regclass, heapallindexed boolean, rootdescend boolean | void - amcheck | public | verify_heapam | relation regclass, on_error_stop boolean, check_toast boolean, skip text, startblock bigint, endblock bigint, OUT blkno bigint, OUT offnum integer, OUT attnum integer, OUT msg text | SETOF record - autoinc | public | autoinc | | trigger - bloom | public | blhandler | internal | index_am_handler - btree_gin | public | gin_btree_consistent | internal, smallint, anyelement, integer, internal, internal | boolean - btree_gin | public | gin_compare_prefix_anyenum | anyenum, anyenum, smallint, internal | integer - btree_gin | public | gin_compare_prefix_bit | bit, bit, smallint, internal | integer - btree_gin | public | gin_compare_prefix_bool | boolean, boolean, smallint, internal | integer - btree_gin | public | gin_compare_prefix_bpchar | character, character, smallint, internal | integer - btree_gin | public | gin_compare_prefix_bytea | bytea, bytea, smallint, internal | integer - btree_gin | public | gin_compare_prefix_char | "char", "char", smallint, internal | integer - btree_gin | public | gin_compare_prefix_cidr | cidr, cidr, smallint, internal | integer - btree_gin | public | gin_compare_prefix_date | date, date, smallint, internal | integer - btree_gin | public | gin_compare_prefix_float4 | real, real, smallint, internal | integer - btree_gin | public | gin_compare_prefix_float8 | double precision, double precision, smallint, internal | integer - btree_gin | public | gin_compare_prefix_inet | inet, inet, smallint, internal | integer - btree_gin | public | gin_compare_prefix_int2 | smallint, smallint, smallint, internal | integer - btree_gin | public | gin_compare_prefix_int4 | integer, integer, smallint, internal | integer - btree_gin | public | gin_compare_prefix_int8 | bigint, bigint, smallint, internal | integer - btree_gin | public | gin_compare_prefix_interval | interval, interval, smallint, internal | integer - btree_gin | public | gin_compare_prefix_macaddr | macaddr, macaddr, smallint, internal | integer - btree_gin | public | gin_compare_prefix_macaddr8 | macaddr8, macaddr8, smallint, internal | integer - btree_gin | public | gin_compare_prefix_money | money, money, smallint, internal | integer - btree_gin | public | gin_compare_prefix_name | name, name, smallint, internal | integer - btree_gin | public | gin_compare_prefix_numeric | numeric, numeric, smallint, internal | integer - btree_gin | public | gin_compare_prefix_oid | oid, oid, smallint, internal | integer - btree_gin | public | gin_compare_prefix_text | text, text, smallint, internal | integer - btree_gin | public | gin_compare_prefix_time | time without time zone, time without time zone, smallint, internal | integer - btree_gin | public | gin_compare_prefix_timestamp | timestamp without time zone, timestamp without time zone, smallint, internal | integer - btree_gin | public | gin_compare_prefix_timestamptz | timestamp with time zone, timestamp with time zone, smallint, internal | integer - btree_gin | public | gin_compare_prefix_timetz | time with time zone, time with time zone, smallint, internal | integer - btree_gin | public | gin_compare_prefix_uuid | uuid, uuid, smallint, internal | integer - btree_gin | public | gin_compare_prefix_varbit | bit varying, bit varying, smallint, internal | integer - btree_gin | public | gin_enum_cmp | anyenum, anyenum | integer - btree_gin | public | gin_extract_query_anyenum | anyenum, internal, smallint, internal, internal | internal - btree_gin | public | gin_extract_query_bit | bit, internal, smallint, internal, internal | internal - btree_gin | public | gin_extract_query_bool | boolean, internal, smallint, internal, internal | internal - btree_gin | public | gin_extract_query_bpchar | character, internal, smallint, internal, internal | internal - btree_gin | public | gin_extract_query_bytea | bytea, internal, smallint, internal, internal | internal - btree_gin | public | gin_extract_query_char | "char", internal, smallint, internal, internal | internal - btree_gin | public | gin_extract_query_cidr | cidr, internal, smallint, internal, internal | internal - btree_gin | public | gin_extract_query_date | date, internal, smallint, internal, internal | internal - btree_gin | public | gin_extract_query_float4 | real, internal, smallint, internal, internal | internal - btree_gin | public | gin_extract_query_float8 | double precision, internal, smallint, internal, internal | internal - btree_gin | public | gin_extract_query_inet | inet, internal, smallint, internal, internal | internal - btree_gin | public | gin_extract_query_int2 | smallint, internal, smallint, internal, internal | internal - btree_gin | public | gin_extract_query_int4 | integer, internal, smallint, internal, internal | internal - btree_gin | public | gin_extract_query_int8 | bigint, internal, smallint, internal, internal | internal - btree_gin | public | gin_extract_query_interval | interval, internal, smallint, internal, internal | internal - btree_gin | public | gin_extract_query_macaddr | macaddr, internal, smallint, internal, internal | internal - btree_gin | public | gin_extract_query_macaddr8 | macaddr8, internal, smallint, internal, internal | internal - btree_gin | public | gin_extract_query_money | money, internal, smallint, internal, internal | internal - btree_gin | public | gin_extract_query_name | name, internal, smallint, internal, internal | internal - btree_gin | public | gin_extract_query_numeric | numeric, internal, smallint, internal, internal | internal - btree_gin | public | gin_extract_query_oid | oid, internal, smallint, internal, internal | internal - btree_gin | public | gin_extract_query_text | text, internal, smallint, internal, internal | internal - btree_gin | public | gin_extract_query_time | time without time zone, internal, smallint, internal, internal | internal - btree_gin | public | gin_extract_query_timestamp | timestamp without time zone, internal, smallint, internal, internal | internal - btree_gin | public | gin_extract_query_timestamptz | timestamp with time zone, internal, smallint, internal, internal | internal - btree_gin | public | gin_extract_query_timetz | time with time zone, internal, smallint, internal, internal | internal - btree_gin | public | gin_extract_query_uuid | uuid, internal, smallint, internal, internal | internal - btree_gin | public | gin_extract_query_varbit | bit varying, internal, smallint, internal, internal | internal - btree_gin | public | gin_extract_value_anyenum | anyenum, internal | internal - btree_gin | public | gin_extract_value_bit | bit, internal | internal - btree_gin | public | gin_extract_value_bool | boolean, internal | internal - btree_gin | public | gin_extract_value_bpchar | character, internal | internal - btree_gin | public | gin_extract_value_bytea | bytea, internal | internal - btree_gin | public | gin_extract_value_char | "char", internal | internal - btree_gin | public | gin_extract_value_cidr | cidr, internal | internal - btree_gin | public | gin_extract_value_date | date, internal | internal - btree_gin | public | gin_extract_value_float4 | real, internal | internal - btree_gin | public | gin_extract_value_float8 | double precision, internal | internal - btree_gin | public | gin_extract_value_inet | inet, internal | internal - btree_gin | public | gin_extract_value_int2 | smallint, internal | internal - btree_gin | public | gin_extract_value_int4 | integer, internal | internal - btree_gin | public | gin_extract_value_int8 | bigint, internal | internal - btree_gin | public | gin_extract_value_interval | interval, internal | internal - btree_gin | public | gin_extract_value_macaddr | macaddr, internal | internal - btree_gin | public | gin_extract_value_macaddr8 | macaddr8, internal | internal - btree_gin | public | gin_extract_value_money | money, internal | internal - btree_gin | public | gin_extract_value_name | name, internal | internal - btree_gin | public | gin_extract_value_numeric | numeric, internal | internal - btree_gin | public | gin_extract_value_oid | oid, internal | internal - btree_gin | public | gin_extract_value_text | text, internal | internal - btree_gin | public | gin_extract_value_time | time without time zone, internal | internal - btree_gin | public | gin_extract_value_timestamp | timestamp without time zone, internal | internal - btree_gin | public | gin_extract_value_timestamptz | timestamp with time zone, internal | internal - btree_gin | public | gin_extract_value_timetz | time with time zone, internal | internal - btree_gin | public | gin_extract_value_uuid | uuid, internal | internal - btree_gin | public | gin_extract_value_varbit | bit varying, internal | internal - btree_gin | public | gin_numeric_cmp | numeric, numeric | integer - btree_gist | public | cash_dist | money, money | money - btree_gist | public | date_dist | date, date | integer - btree_gist | public | float4_dist | real, real | real - btree_gist | public | float8_dist | double precision, double precision | double precision - btree_gist | public | gbt_bit_compress | internal | internal - btree_gist | public | gbt_bit_consistent | internal, bit, smallint, oid, internal | boolean - btree_gist | public | gbt_bit_penalty | internal, internal, internal | internal - btree_gist | public | gbt_bit_picksplit | internal, internal | internal - btree_gist | public | gbt_bit_same | gbtreekey_var, gbtreekey_var, internal | internal - btree_gist | public | gbt_bit_union | internal, internal | gbtreekey_var - btree_gist | public | gbt_bool_compress | internal | internal - btree_gist | public | gbt_bool_consistent | internal, boolean, smallint, oid, internal | boolean - btree_gist | public | gbt_bool_fetch | internal | internal - btree_gist | public | gbt_bool_penalty | internal, internal, internal | internal - btree_gist | public | gbt_bool_picksplit | internal, internal | internal - btree_gist | public | gbt_bool_same | gbtreekey2, gbtreekey2, internal | internal - btree_gist | public | gbt_bool_union | internal, internal | gbtreekey2 - btree_gist | public | gbt_bpchar_compress | internal | internal - btree_gist | public | gbt_bpchar_consistent | internal, character, smallint, oid, internal | boolean - btree_gist | public | gbt_bytea_compress | internal | internal - btree_gist | public | gbt_bytea_consistent | internal, bytea, smallint, oid, internal | boolean - btree_gist | public | gbt_bytea_penalty | internal, internal, internal | internal - btree_gist | public | gbt_bytea_picksplit | internal, internal | internal - btree_gist | public | gbt_bytea_same | gbtreekey_var, gbtreekey_var, internal | internal - btree_gist | public | gbt_bytea_union | internal, internal | gbtreekey_var - btree_gist | public | gbt_cash_compress | internal | internal - btree_gist | public | gbt_cash_consistent | internal, money, smallint, oid, internal | boolean - btree_gist | public | gbt_cash_distance | internal, money, smallint, oid, internal | double precision - btree_gist | public | gbt_cash_fetch | internal | internal - btree_gist | public | gbt_cash_penalty | internal, internal, internal | internal - btree_gist | public | gbt_cash_picksplit | internal, internal | internal - btree_gist | public | gbt_cash_same | gbtreekey16, gbtreekey16, internal | internal - btree_gist | public | gbt_cash_union | internal, internal | gbtreekey16 - btree_gist | public | gbt_date_compress | internal | internal - btree_gist | public | gbt_date_consistent | internal, date, smallint, oid, internal | boolean - btree_gist | public | gbt_date_distance | internal, date, smallint, oid, internal | double precision - btree_gist | public | gbt_date_fetch | internal | internal - btree_gist | public | gbt_date_penalty | internal, internal, internal | internal - btree_gist | public | gbt_date_picksplit | internal, internal | internal - btree_gist | public | gbt_date_same | gbtreekey8, gbtreekey8, internal | internal - btree_gist | public | gbt_date_union | internal, internal | gbtreekey8 - btree_gist | public | gbt_decompress | internal | internal - btree_gist | public | gbt_enum_compress | internal | internal - btree_gist | public | gbt_enum_consistent | internal, anyenum, smallint, oid, internal | boolean - btree_gist | public | gbt_enum_fetch | internal | internal - btree_gist | public | gbt_enum_penalty | internal, internal, internal | internal - btree_gist | public | gbt_enum_picksplit | internal, internal | internal - btree_gist | public | gbt_enum_same | gbtreekey8, gbtreekey8, internal | internal - btree_gist | public | gbt_enum_union | internal, internal | gbtreekey8 - btree_gist | public | gbt_float4_compress | internal | internal - btree_gist | public | gbt_float4_consistent | internal, real, smallint, oid, internal | boolean - btree_gist | public | gbt_float4_distance | internal, real, smallint, oid, internal | double precision - btree_gist | public | gbt_float4_fetch | internal | internal - btree_gist | public | gbt_float4_penalty | internal, internal, internal | internal - btree_gist | public | gbt_float4_picksplit | internal, internal | internal - btree_gist | public | gbt_float4_same | gbtreekey8, gbtreekey8, internal | internal - btree_gist | public | gbt_float4_union | internal, internal | gbtreekey8 - btree_gist | public | gbt_float8_compress | internal | internal - btree_gist | public | gbt_float8_consistent | internal, double precision, smallint, oid, internal | boolean - btree_gist | public | gbt_float8_distance | internal, double precision, smallint, oid, internal | double precision - btree_gist | public | gbt_float8_fetch | internal | internal - btree_gist | public | gbt_float8_penalty | internal, internal, internal | internal - btree_gist | public | gbt_float8_picksplit | internal, internal | internal - btree_gist | public | gbt_float8_same | gbtreekey16, gbtreekey16, internal | internal - btree_gist | public | gbt_float8_union | internal, internal | gbtreekey16 - btree_gist | public | gbt_inet_compress | internal | internal - btree_gist | public | gbt_inet_consistent | internal, inet, smallint, oid, internal | boolean - btree_gist | public | gbt_inet_penalty | internal, internal, internal | internal - btree_gist | public | gbt_inet_picksplit | internal, internal | internal - btree_gist | public | gbt_inet_same | gbtreekey16, gbtreekey16, internal | internal - btree_gist | public | gbt_inet_union | internal, internal | gbtreekey16 - btree_gist | public | gbt_int2_compress | internal | internal - btree_gist | public | gbt_int2_consistent | internal, smallint, smallint, oid, internal | boolean - btree_gist | public | gbt_int2_distance | internal, smallint, smallint, oid, internal | double precision - btree_gist | public | gbt_int2_fetch | internal | internal - btree_gist | public | gbt_int2_penalty | internal, internal, internal | internal - btree_gist | public | gbt_int2_picksplit | internal, internal | internal - btree_gist | public | gbt_int2_same | gbtreekey4, gbtreekey4, internal | internal - btree_gist | public | gbt_int2_union | internal, internal | gbtreekey4 - btree_gist | public | gbt_int4_compress | internal | internal - btree_gist | public | gbt_int4_consistent | internal, integer, smallint, oid, internal | boolean - btree_gist | public | gbt_int4_distance | internal, integer, smallint, oid, internal | double precision - btree_gist | public | gbt_int4_fetch | internal | internal - btree_gist | public | gbt_int4_penalty | internal, internal, internal | internal - btree_gist | public | gbt_int4_picksplit | internal, internal | internal - btree_gist | public | gbt_int4_same | gbtreekey8, gbtreekey8, internal | internal - btree_gist | public | gbt_int4_union | internal, internal | gbtreekey8 - btree_gist | public | gbt_int8_compress | internal | internal - btree_gist | public | gbt_int8_consistent | internal, bigint, smallint, oid, internal | boolean - btree_gist | public | gbt_int8_distance | internal, bigint, smallint, oid, internal | double precision - btree_gist | public | gbt_int8_fetch | internal | internal - btree_gist | public | gbt_int8_penalty | internal, internal, internal | internal - btree_gist | public | gbt_int8_picksplit | internal, internal | internal - btree_gist | public | gbt_int8_same | gbtreekey16, gbtreekey16, internal | internal - btree_gist | public | gbt_int8_union | internal, internal | gbtreekey16 - btree_gist | public | gbt_intv_compress | internal | internal - btree_gist | public | gbt_intv_consistent | internal, interval, smallint, oid, internal | boolean - btree_gist | public | gbt_intv_decompress | internal | internal - btree_gist | public | gbt_intv_distance | internal, interval, smallint, oid, internal | double precision - btree_gist | public | gbt_intv_fetch | internal | internal - btree_gist | public | gbt_intv_penalty | internal, internal, internal | internal - btree_gist | public | gbt_intv_picksplit | internal, internal | internal - btree_gist | public | gbt_intv_same | gbtreekey32, gbtreekey32, internal | internal - btree_gist | public | gbt_intv_union | internal, internal | gbtreekey32 - btree_gist | public | gbt_macad8_compress | internal | internal - btree_gist | public | gbt_macad8_consistent | internal, macaddr8, smallint, oid, internal | boolean - btree_gist | public | gbt_macad8_fetch | internal | internal - btree_gist | public | gbt_macad8_penalty | internal, internal, internal | internal - btree_gist | public | gbt_macad8_picksplit | internal, internal | internal - btree_gist | public | gbt_macad8_same | gbtreekey16, gbtreekey16, internal | internal - btree_gist | public | gbt_macad8_union | internal, internal | gbtreekey16 - btree_gist | public | gbt_macad_compress | internal | internal - btree_gist | public | gbt_macad_consistent | internal, macaddr, smallint, oid, internal | boolean - btree_gist | public | gbt_macad_fetch | internal | internal - btree_gist | public | gbt_macad_penalty | internal, internal, internal | internal - btree_gist | public | gbt_macad_picksplit | internal, internal | internal - btree_gist | public | gbt_macad_same | gbtreekey16, gbtreekey16, internal | internal - btree_gist | public | gbt_macad_union | internal, internal | gbtreekey16 - btree_gist | public | gbt_numeric_compress | internal | internal - btree_gist | public | gbt_numeric_consistent | internal, numeric, smallint, oid, internal | boolean - btree_gist | public | gbt_numeric_penalty | internal, internal, internal | internal - btree_gist | public | gbt_numeric_picksplit | internal, internal | internal - btree_gist | public | gbt_numeric_same | gbtreekey_var, gbtreekey_var, internal | internal - btree_gist | public | gbt_numeric_union | internal, internal | gbtreekey_var - btree_gist | public | gbt_oid_compress | internal | internal - btree_gist | public | gbt_oid_consistent | internal, oid, smallint, oid, internal | boolean - btree_gist | public | gbt_oid_distance | internal, oid, smallint, oid, internal | double precision - btree_gist | public | gbt_oid_fetch | internal | internal - btree_gist | public | gbt_oid_penalty | internal, internal, internal | internal - btree_gist | public | gbt_oid_picksplit | internal, internal | internal - btree_gist | public | gbt_oid_same | gbtreekey8, gbtreekey8, internal | internal - btree_gist | public | gbt_oid_union | internal, internal | gbtreekey8 - btree_gist | public | gbt_text_compress | internal | internal - btree_gist | public | gbt_text_consistent | internal, text, smallint, oid, internal | boolean - btree_gist | public | gbt_text_penalty | internal, internal, internal | internal - btree_gist | public | gbt_text_picksplit | internal, internal | internal - btree_gist | public | gbt_text_same | gbtreekey_var, gbtreekey_var, internal | internal - btree_gist | public | gbt_text_union | internal, internal | gbtreekey_var - btree_gist | public | gbt_time_compress | internal | internal - btree_gist | public | gbt_time_consistent | internal, time without time zone, smallint, oid, internal | boolean - btree_gist | public | gbt_time_distance | internal, time without time zone, smallint, oid, internal | double precision - btree_gist | public | gbt_time_fetch | internal | internal - btree_gist | public | gbt_time_penalty | internal, internal, internal | internal - btree_gist | public | gbt_time_picksplit | internal, internal | internal - btree_gist | public | gbt_time_same | gbtreekey16, gbtreekey16, internal | internal - btree_gist | public | gbt_time_union | internal, internal | gbtreekey16 - btree_gist | public | gbt_timetz_compress | internal | internal - btree_gist | public | gbt_timetz_consistent | internal, time with time zone, smallint, oid, internal | boolean - btree_gist | public | gbt_ts_compress | internal | internal - btree_gist | public | gbt_ts_consistent | internal, timestamp without time zone, smallint, oid, internal | boolean - btree_gist | public | gbt_ts_distance | internal, timestamp without time zone, smallint, oid, internal | double precision - btree_gist | public | gbt_ts_fetch | internal | internal - btree_gist | public | gbt_ts_penalty | internal, internal, internal | internal - btree_gist | public | gbt_ts_picksplit | internal, internal | internal - btree_gist | public | gbt_ts_same | gbtreekey16, gbtreekey16, internal | internal - btree_gist | public | gbt_ts_union | internal, internal | gbtreekey16 - btree_gist | public | gbt_tstz_compress | internal | internal - btree_gist | public | gbt_tstz_consistent | internal, timestamp with time zone, smallint, oid, internal | boolean - btree_gist | public | gbt_tstz_distance | internal, timestamp with time zone, smallint, oid, internal | double precision - btree_gist | public | gbt_uuid_compress | internal | internal - btree_gist | public | gbt_uuid_consistent | internal, uuid, smallint, oid, internal | boolean - btree_gist | public | gbt_uuid_fetch | internal | internal - btree_gist | public | gbt_uuid_penalty | internal, internal, internal | internal - btree_gist | public | gbt_uuid_picksplit | internal, internal | internal - btree_gist | public | gbt_uuid_same | gbtreekey32, gbtreekey32, internal | internal - btree_gist | public | gbt_uuid_union | internal, internal | gbtreekey32 - btree_gist | public | gbt_var_decompress | internal | internal - btree_gist | public | gbt_var_fetch | internal | internal - btree_gist | public | gbtreekey16_in | cstring | gbtreekey16 - btree_gist | public | gbtreekey16_out | gbtreekey16 | cstring - btree_gist | public | gbtreekey2_in | cstring | gbtreekey2 - btree_gist | public | gbtreekey2_out | gbtreekey2 | cstring - btree_gist | public | gbtreekey32_in | cstring | gbtreekey32 - btree_gist | public | gbtreekey32_out | gbtreekey32 | cstring - btree_gist | public | gbtreekey4_in | cstring | gbtreekey4 - btree_gist | public | gbtreekey4_out | gbtreekey4 | cstring - btree_gist | public | gbtreekey8_in | cstring | gbtreekey8 - btree_gist | public | gbtreekey8_out | gbtreekey8 | cstring - btree_gist | public | gbtreekey_var_in | cstring | gbtreekey_var - btree_gist | public | gbtreekey_var_out | gbtreekey_var | cstring - btree_gist | public | int2_dist | smallint, smallint | smallint - btree_gist | public | int4_dist | integer, integer | integer - btree_gist | public | int8_dist | bigint, bigint | bigint - btree_gist | public | interval_dist | interval, interval | interval - btree_gist | public | oid_dist | oid, oid | oid - btree_gist | public | time_dist | time without time zone, time without time zone | interval - btree_gist | public | ts_dist | timestamp without time zone, timestamp without time zone | interval - btree_gist | public | tstz_dist | timestamp with time zone, timestamp with time zone | interval - citext | public | citext | boolean | citext - citext | public | citext | character | citext - citext | public | citext | inet | citext - citext | public | citext_cmp | citext, citext | integer - citext | public | citext_eq | citext, citext | boolean - citext | public | citext_ge | citext, citext | boolean - citext | public | citext_gt | citext, citext | boolean - citext | public | citext_hash | citext | integer - citext | public | citext_hash_extended | citext, bigint | bigint - citext | public | citext_larger | citext, citext | citext - citext | public | citext_le | citext, citext | boolean - citext | public | citext_lt | citext, citext | boolean - citext | public | citext_ne | citext, citext | boolean - citext | public | citext_pattern_cmp | citext, citext | integer - citext | public | citext_pattern_ge | citext, citext | boolean - citext | public | citext_pattern_gt | citext, citext | boolean - citext | public | citext_pattern_le | citext, citext | boolean - citext | public | citext_pattern_lt | citext, citext | boolean - citext | public | citext_smaller | citext, citext | citext - citext | public | citextin | cstring | citext - citext | public | citextout | citext | cstring - citext | public | citextrecv | internal | citext - citext | public | citextsend | citext | bytea - citext | public | max | citext | citext - citext | public | min | citext | citext - citext | public | regexp_match | citext, citext | text[] - citext | public | regexp_match | citext, citext, text | text[] - citext | public | regexp_matches | citext, citext | SETOF text[] - citext | public | regexp_matches | citext, citext, text | SETOF text[] - citext | public | regexp_replace | citext, citext, text | text - citext | public | regexp_replace | citext, citext, text, text | text - citext | public | regexp_split_to_array | citext, citext | text[] - citext | public | regexp_split_to_array | citext, citext, text | text[] - citext | public | regexp_split_to_table | citext, citext | SETOF text - citext | public | regexp_split_to_table | citext, citext, text | SETOF text - citext | public | replace | citext, citext, citext | text - citext | public | split_part | citext, citext, integer | text - citext | public | strpos | citext, citext | integer - citext | public | texticlike | citext, citext | boolean - citext | public | texticlike | citext, text | boolean - citext | public | texticnlike | citext, citext | boolean - citext | public | texticnlike | citext, text | boolean - citext | public | texticregexeq | citext, citext | boolean - citext | public | texticregexeq | citext, text | boolean - citext | public | texticregexne | citext, citext | boolean - citext | public | texticregexne | citext, text | boolean - citext | public | translate | citext, citext, text | text - cube | public | cube | cube, double precision | cube - cube | public | cube | cube, double precision, double precision | cube - cube | public | cube | double precision | cube - cube | public | cube | double precision, double precision | cube - cube | public | cube | double precision[] | cube - cube | public | cube | double precision[], double precision[] | cube - cube | public | cube_cmp | cube, cube | integer - cube | public | cube_contained | cube, cube | boolean - cube | public | cube_contains | cube, cube | boolean - cube | public | cube_coord | cube, integer | double precision - cube | public | cube_coord_llur | cube, integer | double precision - cube | public | cube_dim | cube | integer - cube | public | cube_distance | cube, cube | double precision - cube | public | cube_enlarge | cube, double precision, integer | cube - cube | public | cube_eq | cube, cube | boolean - cube | public | cube_ge | cube, cube | boolean - cube | public | cube_gt | cube, cube | boolean - cube | public | cube_in | cstring | cube - cube | public | cube_inter | cube, cube | cube - cube | public | cube_is_point | cube | boolean - cube | public | cube_le | cube, cube | boolean - cube | public | cube_ll_coord | cube, integer | double precision - cube | public | cube_lt | cube, cube | boolean - cube | public | cube_ne | cube, cube | boolean - cube | public | cube_out | cube | cstring - cube | public | cube_overlap | cube, cube | boolean - cube | public | cube_recv | internal | cube - cube | public | cube_send | cube | bytea - cube | public | cube_size | cube | double precision - cube | public | cube_subset | cube, integer[] | cube - cube | public | cube_union | cube, cube | cube - cube | public | cube_ur_coord | cube, integer | double precision - cube | public | distance_chebyshev | cube, cube | double precision - cube | public | distance_taxicab | cube, cube | double precision - cube | public | g_cube_consistent | internal, cube, smallint, oid, internal | boolean - cube | public | g_cube_distance | internal, cube, smallint, oid, internal | double precision - cube | public | g_cube_penalty | internal, internal, internal | internal - cube | public | g_cube_picksplit | internal, internal | internal - cube | public | g_cube_same | cube, cube, internal | internal - cube | public | g_cube_union | internal, internal | cube - dblink | public | dblink | text | SETOF record - dblink | public | dblink | text, boolean | SETOF record - dblink | public | dblink | text, text | SETOF record - dblink | public | dblink | text, text, boolean | SETOF record - dblink | public | dblink_build_sql_delete | text, int2vector, integer, text[] | text - dblink | public | dblink_build_sql_insert | text, int2vector, integer, text[], text[] | text - dblink | public | dblink_build_sql_update | text, int2vector, integer, text[], text[] | text - dblink | public | dblink_cancel_query | text | text - dblink | public | dblink_close | text | text - dblink | public | dblink_close | text, boolean | text - dblink | public | dblink_close | text, text | text - dblink | public | dblink_close | text, text, boolean | text - dblink | public | dblink_connect | text | text - dblink | public | dblink_connect | text, text | text - dblink | public | dblink_connect_u | text | text - dblink | public | dblink_connect_u | text, text | text - dblink | public | dblink_current_query | | text - dblink | public | dblink_disconnect | | text - dblink | public | dblink_disconnect | text | text - dblink | public | dblink_error_message | text | text - dblink | public | dblink_exec | text | text - dblink | public | dblink_exec | text, boolean | text - dblink | public | dblink_exec | text, text | text - dblink | public | dblink_exec | text, text, boolean | text - dblink | public | dblink_fdw_validator | options text[], catalog oid | void - dblink | public | dblink_fetch | text, integer | SETOF record - dblink | public | dblink_fetch | text, integer, boolean | SETOF record - dblink | public | dblink_fetch | text, text, integer | SETOF record - dblink | public | dblink_fetch | text, text, integer, boolean | SETOF record - dblink | public | dblink_get_connections | | text[] - dblink | public | dblink_get_notify | OUT notify_name text, OUT be_pid integer, OUT extra text | SETOF record - dblink | public | dblink_get_notify | conname text, OUT notify_name text, OUT be_pid integer, OUT extra text | SETOF record - dblink | public | dblink_get_pkey | text | SETOF dblink_pkey_results - dblink | public | dblink_get_result | text | SETOF record - dblink | public | dblink_get_result | text, boolean | SETOF record - dblink | public | dblink_is_busy | text | integer - dblink | public | dblink_open | text, text | text - dblink | public | dblink_open | text, text, boolean | text - dblink | public | dblink_open | text, text, text | text - dblink | public | dblink_open | text, text, text, boolean | text - dblink | public | dblink_send_query | text, text | integer - dict_int | public | dintdict_init | internal | internal - dict_int | public | dintdict_lexize | internal, internal, internal, internal | internal - dict_xsyn | public | dxsyn_init | internal | internal - dict_xsyn | public | dxsyn_lexize | internal, internal, internal, internal | internal - earthdistance | public | earth | | double precision - earthdistance | public | earth_box | earth, double precision | cube - earthdistance | public | earth_distance | earth, earth | double precision - earthdistance | public | gc_to_sec | double precision | double precision - earthdistance | public | geo_distance | point, point | double precision - earthdistance | public | latitude | earth | double precision - earthdistance | public | ll_to_earth | double precision, double precision | earth - earthdistance | public | longitude | earth | double precision - earthdistance | public | sec_to_gc | double precision | double precision - file_fdw | public | file_fdw_handler | | fdw_handler - file_fdw | public | file_fdw_validator | text[], oid | void - fuzzystrmatch | public | difference | text, text | integer - fuzzystrmatch | public | dmetaphone | text | text - fuzzystrmatch | public | dmetaphone_alt | text | text - fuzzystrmatch | public | levenshtein | text, text | integer - fuzzystrmatch | public | levenshtein | text, text, integer, integer, integer | integer - fuzzystrmatch | public | levenshtein_less_equal | text, text, integer | integer - fuzzystrmatch | public | levenshtein_less_equal | text, text, integer, integer, integer, integer | integer - fuzzystrmatch | public | metaphone | text, integer | text - fuzzystrmatch | public | soundex | text | text - fuzzystrmatch | public | text_soundex | text | text - hstore | public | akeys | hstore | text[] - hstore | public | avals | hstore | text[] - hstore | public | defined | hstore, text | boolean - hstore | public | delete | hstore, hstore | hstore - hstore | public | delete | hstore, text | hstore - hstore | public | delete | hstore, text[] | hstore - hstore | public | each | hs hstore, OUT key text, OUT value text | SETOF record - hstore | public | exist | hstore, text | boolean - hstore | public | exists_all | hstore, text[] | boolean - hstore | public | exists_any | hstore, text[] | boolean - hstore | public | fetchval | hstore, text | text - hstore | public | ghstore_compress | internal | internal - hstore | public | ghstore_consistent | internal, hstore, smallint, oid, internal | boolean - hstore | public | ghstore_decompress | internal | internal - hstore | public | ghstore_in | cstring | ghstore - hstore | public | ghstore_options | internal | void - hstore | public | ghstore_out | ghstore | cstring - hstore | public | ghstore_penalty | internal, internal, internal | internal - hstore | public | ghstore_picksplit | internal, internal | internal - hstore | public | ghstore_same | ghstore, ghstore, internal | internal - hstore | public | ghstore_union | internal, internal | ghstore - hstore | public | gin_consistent_hstore | internal, smallint, hstore, integer, internal, internal | boolean - hstore | public | gin_extract_hstore | hstore, internal | internal - hstore | public | gin_extract_hstore_query | hstore, internal, smallint, internal, internal | internal - hstore | public | hs_concat | hstore, hstore | hstore - hstore | public | hs_contained | hstore, hstore | boolean - hstore | public | hs_contains | hstore, hstore | boolean - hstore | public | hstore | record | hstore - hstore | public | hstore | text, text | hstore - hstore | public | hstore | text[] | hstore - hstore | public | hstore | text[], text[] | hstore - hstore | public | hstore_cmp | hstore, hstore | integer - hstore | public | hstore_eq | hstore, hstore | boolean - hstore | public | hstore_ge | hstore, hstore | boolean - hstore | public | hstore_gt | hstore, hstore | boolean - hstore | public | hstore_hash | hstore | integer - hstore | public | hstore_hash_extended | hstore, bigint | bigint - hstore | public | hstore_in | cstring | hstore - hstore | public | hstore_le | hstore, hstore | boolean - hstore | public | hstore_lt | hstore, hstore | boolean - hstore | public | hstore_ne | hstore, hstore | boolean - hstore | public | hstore_out | hstore | cstring - hstore | public | hstore_recv | internal | hstore - hstore | public | hstore_send | hstore | bytea - hstore | public | hstore_subscript_handler | internal | internal - hstore | public | hstore_to_array | hstore | text[] - hstore | public | hstore_to_json | hstore | json - hstore | public | hstore_to_json_loose | hstore | json - hstore | public | hstore_to_jsonb | hstore | jsonb - hstore | public | hstore_to_jsonb_loose | hstore | jsonb - hstore | public | hstore_to_matrix | hstore | text[] - hstore | public | hstore_version_diag | hstore | integer - hstore | public | isdefined | hstore, text | boolean - hstore | public | isexists | hstore, text | boolean - hstore | public | populate_record | anyelement, hstore | anyelement - hstore | public | skeys | hstore | SETOF text - hstore | public | slice | hstore, text[] | hstore - hstore | public | slice_array | hstore, text[] | text[] - hstore | public | svals | hstore | SETOF text - hstore | public | tconvert | text, text | hstore - http | public | bytea_to_text | data bytea | text - http | public | http | request http_request | http_response - http | public | http_delete | uri character varying | http_response - http | public | http_delete | uri character varying, content character varying, content_type character varying | http_response - http | public | http_get | uri character varying | http_response - http | public | http_get | uri character varying, data jsonb | http_response - http | public | http_head | uri character varying | http_response - http | public | http_header | field character varying, value character varying | http_header - http | public | http_list_curlopt | | TABLE(curlopt text, value text) - http | public | http_patch | uri character varying, content character varying, content_type character varying | http_response - http | public | http_post | uri character varying, content character varying, content_type character varying | http_response - http | public | http_post | uri character varying, data jsonb | http_response - http | public | http_put | uri character varying, content character varying, content_type character varying | http_response - http | public | http_reset_curlopt | | boolean - http | public | http_set_curlopt | curlopt character varying, value character varying | boolean - http | public | text_to_bytea | data text | bytea - http | public | urlencode | data jsonb | text - http | public | urlencode | string bytea | text - http | public | urlencode | string character varying | text - hypopg | public | hypopg | OUT indexname text, OUT indexrelid oid, OUT indrelid oid, OUT innatts integer, OUT indisunique boolean, OUT indkey int2vector, OUT indcollation oidvector, OUT indclass oidvector, OUT indoption oidvector, OUT indexprs pg_node_tree, OUT indpred pg_node_tree, OUT amid oid | SETOF record - hypopg | public | hypopg_create_index | sql_order text, OUT indexrelid oid, OUT indexname text | SETOF record - hypopg | public | hypopg_drop_index | indexid oid | boolean - hypopg | public | hypopg_get_indexdef | indexid oid | text - hypopg | public | hypopg_hidden_indexes | | TABLE(indexid oid) - hypopg | public | hypopg_hide_index | indexid oid | boolean - hypopg | public | hypopg_relation_size | indexid oid | bigint - hypopg | public | hypopg_reset | | void - hypopg | public | hypopg_reset_index | | void - hypopg | public | hypopg_unhide_all_indexes | | void - hypopg | public | hypopg_unhide_index | indexid oid | boolean - index_advisor | public | index_advisor | query text | TABLE(startup_cost_before jsonb, startup_cost_after jsonb, total_cost_before jsonb, total_cost_after jsonb, index_statements text[], errors text[]) - insert_username | public | insert_username | | trigger - intagg | public | int_agg_final_array | internal | integer[] - intagg | public | int_agg_state | internal, integer | internal - intagg | public | int_array_aggregate | integer | integer[] - intagg | public | int_array_enum | integer[] | SETOF integer - intarray | public | _int_contained | integer[], integer[] | boolean - intarray | public | _int_contained_joinsel | internal, oid, internal, smallint, internal | double precision - intarray | public | _int_contained_sel | internal, oid, internal, integer | double precision - intarray | public | _int_contains | integer[], integer[] | boolean - intarray | public | _int_contains_joinsel | internal, oid, internal, smallint, internal | double precision - intarray | public | _int_contains_sel | internal, oid, internal, integer | double precision - intarray | public | _int_different | integer[], integer[] | boolean - intarray | public | _int_inter | integer[], integer[] | integer[] - intarray | public | _int_matchsel | internal, oid, internal, integer | double precision - intarray | public | _int_overlap | integer[], integer[] | boolean - intarray | public | _int_overlap_joinsel | internal, oid, internal, smallint, internal | double precision - intarray | public | _int_overlap_sel | internal, oid, internal, integer | double precision - intarray | public | _int_same | integer[], integer[] | boolean - intarray | public | _int_union | integer[], integer[] | integer[] - intarray | public | _intbig_in | cstring | intbig_gkey - intarray | public | _intbig_out | intbig_gkey | cstring - intarray | public | boolop | integer[], query_int | boolean - intarray | public | bqarr_in | cstring | query_int - intarray | public | bqarr_out | query_int | cstring - intarray | public | g_int_compress | internal | internal - intarray | public | g_int_consistent | internal, integer[], smallint, oid, internal | boolean - intarray | public | g_int_decompress | internal | internal - intarray | public | g_int_options | internal | void - intarray | public | g_int_penalty | internal, internal, internal | internal - intarray | public | g_int_picksplit | internal, internal | internal - intarray | public | g_int_same | integer[], integer[], internal | internal - intarray | public | g_int_union | internal, internal | integer[] - intarray | public | g_intbig_compress | internal | internal - intarray | public | g_intbig_consistent | internal, integer[], smallint, oid, internal | boolean - intarray | public | g_intbig_decompress | internal | internal - intarray | public | g_intbig_options | internal | void - intarray | public | g_intbig_penalty | internal, internal, internal | internal - intarray | public | g_intbig_picksplit | internal, internal | internal - intarray | public | g_intbig_same | intbig_gkey, intbig_gkey, internal | internal - intarray | public | g_intbig_union | internal, internal | intbig_gkey - intarray | public | ginint4_consistent | internal, smallint, integer[], integer, internal, internal, internal, internal | boolean - intarray | public | ginint4_queryextract | integer[], internal, smallint, internal, internal, internal, internal | internal - intarray | public | icount | integer[] | integer - intarray | public | idx | integer[], integer | integer - intarray | public | intarray_del_elem | integer[], integer | integer[] - intarray | public | intarray_push_array | integer[], integer[] | integer[] - intarray | public | intarray_push_elem | integer[], integer | integer[] - intarray | public | intset | integer | integer[] - intarray | public | intset_subtract | integer[], integer[] | integer[] - intarray | public | intset_union_elem | integer[], integer | integer[] - intarray | public | querytree | query_int | text - intarray | public | rboolop | query_int, integer[] | boolean - intarray | public | sort | integer[] | integer[] - intarray | public | sort | integer[], text | integer[] - intarray | public | sort_asc | integer[] | integer[] - intarray | public | sort_desc | integer[] | integer[] - intarray | public | subarray | integer[], integer | integer[] - intarray | public | subarray | integer[], integer, integer | integer[] - intarray | public | uniq | integer[] | integer[] - isn | public | btean13cmp | ean13, ean13 | integer - isn | public | btean13cmp | ean13, isbn | integer - isn | public | btean13cmp | ean13, isbn13 | integer - isn | public | btean13cmp | ean13, ismn | integer - isn | public | btean13cmp | ean13, ismn13 | integer - isn | public | btean13cmp | ean13, issn | integer - isn | public | btean13cmp | ean13, issn13 | integer - isn | public | btean13cmp | ean13, upc | integer - isn | public | btisbn13cmp | isbn13, ean13 | integer - isn | public | btisbn13cmp | isbn13, isbn | integer - isn | public | btisbn13cmp | isbn13, isbn13 | integer - isn | public | btisbncmp | isbn, ean13 | integer - isn | public | btisbncmp | isbn, isbn | integer - isn | public | btisbncmp | isbn, isbn13 | integer - isn | public | btismn13cmp | ismn13, ean13 | integer - isn | public | btismn13cmp | ismn13, ismn | integer - isn | public | btismn13cmp | ismn13, ismn13 | integer - isn | public | btismncmp | ismn, ean13 | integer - isn | public | btismncmp | ismn, ismn | integer - isn | public | btismncmp | ismn, ismn13 | integer - isn | public | btissn13cmp | issn13, ean13 | integer - isn | public | btissn13cmp | issn13, issn | integer - isn | public | btissn13cmp | issn13, issn13 | integer - isn | public | btissncmp | issn, ean13 | integer - isn | public | btissncmp | issn, issn | integer - isn | public | btissncmp | issn, issn13 | integer - isn | public | btupccmp | upc, ean13 | integer - isn | public | btupccmp | upc, upc | integer - isn | public | ean13_in | cstring | ean13 - isn | public | ean13_out | ean13 | cstring - isn | public | ean13_out | isbn13 | cstring - isn | public | ean13_out | ismn13 | cstring - isn | public | ean13_out | issn13 | cstring - isn | public | hashean13 | ean13 | integer - isn | public | hashisbn | isbn | integer - isn | public | hashisbn13 | isbn13 | integer - isn | public | hashismn | ismn | integer - isn | public | hashismn13 | ismn13 | integer - isn | public | hashissn | issn | integer - isn | public | hashissn13 | issn13 | integer - isn | public | hashupc | upc | integer - isn | public | is_valid | ean13 | boolean - isn | public | is_valid | isbn | boolean - isn | public | is_valid | isbn13 | boolean - isn | public | is_valid | ismn | boolean - isn | public | is_valid | ismn13 | boolean - isn | public | is_valid | issn | boolean - isn | public | is_valid | issn13 | boolean - isn | public | is_valid | upc | boolean - isn | public | isbn | ean13 | isbn - isn | public | isbn13 | ean13 | isbn13 - isn | public | isbn13_in | cstring | isbn13 - isn | public | isbn_in | cstring | isbn - isn | public | ismn | ean13 | ismn - isn | public | ismn13 | ean13 | ismn13 - isn | public | ismn13_in | cstring | ismn13 - isn | public | ismn_in | cstring | ismn - isn | public | isn_out | isbn | cstring - isn | public | isn_out | ismn | cstring - isn | public | isn_out | issn | cstring - isn | public | isn_out | upc | cstring - isn | public | isn_weak | | boolean - isn | public | isn_weak | boolean | boolean - isn | public | isneq | ean13, ean13 | boolean - isn | public | isneq | ean13, isbn | boolean - isn | public | isneq | ean13, isbn13 | boolean - isn | public | isneq | ean13, ismn | boolean - isn | public | isneq | ean13, ismn13 | boolean - isn | public | isneq | ean13, issn | boolean - isn | public | isneq | ean13, issn13 | boolean - isn | public | isneq | ean13, upc | boolean - isn | public | isneq | isbn, ean13 | boolean - isn | public | isneq | isbn, isbn | boolean - isn | public | isneq | isbn, isbn13 | boolean - isn | public | isneq | isbn13, ean13 | boolean - isn | public | isneq | isbn13, isbn | boolean - isn | public | isneq | isbn13, isbn13 | boolean - isn | public | isneq | ismn, ean13 | boolean - isn | public | isneq | ismn, ismn | boolean - isn | public | isneq | ismn, ismn13 | boolean - isn | public | isneq | ismn13, ean13 | boolean - isn | public | isneq | ismn13, ismn | boolean - isn | public | isneq | ismn13, ismn13 | boolean - isn | public | isneq | issn, ean13 | boolean - isn | public | isneq | issn, issn | boolean - isn | public | isneq | issn, issn13 | boolean - isn | public | isneq | issn13, ean13 | boolean - isn | public | isneq | issn13, issn | boolean - isn | public | isneq | issn13, issn13 | boolean - isn | public | isneq | upc, ean13 | boolean - isn | public | isneq | upc, upc | boolean - isn | public | isnge | ean13, ean13 | boolean - isn | public | isnge | ean13, isbn | boolean - isn | public | isnge | ean13, isbn13 | boolean - isn | public | isnge | ean13, ismn | boolean - isn | public | isnge | ean13, ismn13 | boolean - isn | public | isnge | ean13, issn | boolean - isn | public | isnge | ean13, issn13 | boolean - isn | public | isnge | ean13, upc | boolean - isn | public | isnge | isbn, ean13 | boolean - isn | public | isnge | isbn, isbn | boolean - isn | public | isnge | isbn, isbn13 | boolean - isn | public | isnge | isbn13, ean13 | boolean - isn | public | isnge | isbn13, isbn | boolean - isn | public | isnge | isbn13, isbn13 | boolean - isn | public | isnge | ismn, ean13 | boolean - isn | public | isnge | ismn, ismn | boolean - isn | public | isnge | ismn, ismn13 | boolean - isn | public | isnge | ismn13, ean13 | boolean - isn | public | isnge | ismn13, ismn | boolean - isn | public | isnge | ismn13, ismn13 | boolean - isn | public | isnge | issn, ean13 | boolean - isn | public | isnge | issn, issn | boolean - isn | public | isnge | issn, issn13 | boolean - isn | public | isnge | issn13, ean13 | boolean - isn | public | isnge | issn13, issn | boolean - isn | public | isnge | issn13, issn13 | boolean - isn | public | isnge | upc, ean13 | boolean - isn | public | isnge | upc, upc | boolean - isn | public | isngt | ean13, ean13 | boolean - isn | public | isngt | ean13, isbn | boolean - isn | public | isngt | ean13, isbn13 | boolean - isn | public | isngt | ean13, ismn | boolean - isn | public | isngt | ean13, ismn13 | boolean - isn | public | isngt | ean13, issn | boolean - isn | public | isngt | ean13, issn13 | boolean - isn | public | isngt | ean13, upc | boolean - isn | public | isngt | isbn, ean13 | boolean - isn | public | isngt | isbn, isbn | boolean - isn | public | isngt | isbn, isbn13 | boolean - isn | public | isngt | isbn13, ean13 | boolean - isn | public | isngt | isbn13, isbn | boolean - isn | public | isngt | isbn13, isbn13 | boolean - isn | public | isngt | ismn, ean13 | boolean - isn | public | isngt | ismn, ismn | boolean - isn | public | isngt | ismn, ismn13 | boolean - isn | public | isngt | ismn13, ean13 | boolean - isn | public | isngt | ismn13, ismn | boolean - isn | public | isngt | ismn13, ismn13 | boolean - isn | public | isngt | issn, ean13 | boolean - isn | public | isngt | issn, issn | boolean - isn | public | isngt | issn, issn13 | boolean - isn | public | isngt | issn13, ean13 | boolean - isn | public | isngt | issn13, issn | boolean - isn | public | isngt | issn13, issn13 | boolean - isn | public | isngt | upc, ean13 | boolean - isn | public | isngt | upc, upc | boolean - isn | public | isnle | ean13, ean13 | boolean - isn | public | isnle | ean13, isbn | boolean - isn | public | isnle | ean13, isbn13 | boolean - isn | public | isnle | ean13, ismn | boolean - isn | public | isnle | ean13, ismn13 | boolean - isn | public | isnle | ean13, issn | boolean - isn | public | isnle | ean13, issn13 | boolean - isn | public | isnle | ean13, upc | boolean - isn | public | isnle | isbn, ean13 | boolean - isn | public | isnle | isbn, isbn | boolean - isn | public | isnle | isbn, isbn13 | boolean - isn | public | isnle | isbn13, ean13 | boolean - isn | public | isnle | isbn13, isbn | boolean - isn | public | isnle | isbn13, isbn13 | boolean - isn | public | isnle | ismn, ean13 | boolean - isn | public | isnle | ismn, ismn | boolean - isn | public | isnle | ismn, ismn13 | boolean - isn | public | isnle | ismn13, ean13 | boolean - isn | public | isnle | ismn13, ismn | boolean - isn | public | isnle | ismn13, ismn13 | boolean - isn | public | isnle | issn, ean13 | boolean - isn | public | isnle | issn, issn | boolean - isn | public | isnle | issn, issn13 | boolean - isn | public | isnle | issn13, ean13 | boolean - isn | public | isnle | issn13, issn | boolean - isn | public | isnle | issn13, issn13 | boolean - isn | public | isnle | upc, ean13 | boolean - isn | public | isnle | upc, upc | boolean - isn | public | isnlt | ean13, ean13 | boolean - isn | public | isnlt | ean13, isbn | boolean - isn | public | isnlt | ean13, isbn13 | boolean - isn | public | isnlt | ean13, ismn | boolean - isn | public | isnlt | ean13, ismn13 | boolean - isn | public | isnlt | ean13, issn | boolean - isn | public | isnlt | ean13, issn13 | boolean - isn | public | isnlt | ean13, upc | boolean - isn | public | isnlt | isbn, ean13 | boolean - isn | public | isnlt | isbn, isbn | boolean - isn | public | isnlt | isbn, isbn13 | boolean - isn | public | isnlt | isbn13, ean13 | boolean - isn | public | isnlt | isbn13, isbn | boolean - isn | public | isnlt | isbn13, isbn13 | boolean - isn | public | isnlt | ismn, ean13 | boolean - isn | public | isnlt | ismn, ismn | boolean - isn | public | isnlt | ismn, ismn13 | boolean - isn | public | isnlt | ismn13, ean13 | boolean - isn | public | isnlt | ismn13, ismn | boolean - isn | public | isnlt | ismn13, ismn13 | boolean - isn | public | isnlt | issn, ean13 | boolean - isn | public | isnlt | issn, issn | boolean - isn | public | isnlt | issn, issn13 | boolean - isn | public | isnlt | issn13, ean13 | boolean - isn | public | isnlt | issn13, issn | boolean - isn | public | isnlt | issn13, issn13 | boolean - isn | public | isnlt | upc, ean13 | boolean - isn | public | isnlt | upc, upc | boolean - isn | public | isnne | ean13, ean13 | boolean - isn | public | isnne | ean13, isbn | boolean - isn | public | isnne | ean13, isbn13 | boolean - isn | public | isnne | ean13, ismn | boolean - isn | public | isnne | ean13, ismn13 | boolean - isn | public | isnne | ean13, issn | boolean - isn | public | isnne | ean13, issn13 | boolean - isn | public | isnne | ean13, upc | boolean - isn | public | isnne | isbn, ean13 | boolean - isn | public | isnne | isbn, isbn | boolean - isn | public | isnne | isbn, isbn13 | boolean - isn | public | isnne | isbn13, ean13 | boolean - isn | public | isnne | isbn13, isbn | boolean - isn | public | isnne | isbn13, isbn13 | boolean - isn | public | isnne | ismn, ean13 | boolean - isn | public | isnne | ismn, ismn | boolean - isn | public | isnne | ismn, ismn13 | boolean - isn | public | isnne | ismn13, ean13 | boolean - isn | public | isnne | ismn13, ismn | boolean - isn | public | isnne | ismn13, ismn13 | boolean - isn | public | isnne | issn, ean13 | boolean - isn | public | isnne | issn, issn | boolean - isn | public | isnne | issn, issn13 | boolean - isn | public | isnne | issn13, ean13 | boolean - isn | public | isnne | issn13, issn | boolean - isn | public | isnne | issn13, issn13 | boolean - isn | public | isnne | upc, ean13 | boolean - isn | public | isnne | upc, upc | boolean - isn | public | issn | ean13 | issn - isn | public | issn13 | ean13 | issn13 - isn | public | issn13_in | cstring | issn13 - isn | public | issn_in | cstring | issn - isn | public | make_valid | ean13 | ean13 - isn | public | make_valid | isbn | isbn - isn | public | make_valid | isbn13 | isbn13 - isn | public | make_valid | ismn | ismn - isn | public | make_valid | ismn13 | ismn13 - isn | public | make_valid | issn | issn - isn | public | make_valid | issn13 | issn13 - isn | public | make_valid | upc | upc - isn | public | upc | ean13 | upc - isn | public | upc_in | cstring | upc - lo | public | lo_manage | | trigger - lo | public | lo_oid | lo | oid - ltree | public | _lt_q_regex | ltree[], lquery[] | boolean - ltree | public | _lt_q_rregex | lquery[], ltree[] | boolean - ltree | public | _ltq_extract_regex | ltree[], lquery | ltree - ltree | public | _ltq_regex | ltree[], lquery | boolean - ltree | public | _ltq_rregex | lquery, ltree[] | boolean - ltree | public | _ltree_compress | internal | internal - ltree | public | _ltree_consistent | internal, ltree[], smallint, oid, internal | boolean - ltree | public | _ltree_extract_isparent | ltree[], ltree | ltree - ltree | public | _ltree_extract_risparent | ltree[], ltree | ltree - ltree | public | _ltree_gist_options | internal | void - ltree | public | _ltree_isparent | ltree[], ltree | boolean - ltree | public | _ltree_penalty | internal, internal, internal | internal - ltree | public | _ltree_picksplit | internal, internal | internal - ltree | public | _ltree_r_isparent | ltree, ltree[] | boolean - ltree | public | _ltree_r_risparent | ltree, ltree[] | boolean - ltree | public | _ltree_risparent | ltree[], ltree | boolean - ltree | public | _ltree_same | ltree_gist, ltree_gist, internal | internal - ltree | public | _ltree_union | internal, internal | ltree_gist - ltree | public | _ltxtq_exec | ltree[], ltxtquery | boolean - ltree | public | _ltxtq_extract_exec | ltree[], ltxtquery | ltree - ltree | public | _ltxtq_rexec | ltxtquery, ltree[] | boolean - ltree | public | index | ltree, ltree | integer - ltree | public | index | ltree, ltree, integer | integer - ltree | public | lca | ltree, ltree | ltree - ltree | public | lca | ltree, ltree, ltree | ltree - ltree | public | lca | ltree, ltree, ltree, ltree | ltree - ltree | public | lca | ltree, ltree, ltree, ltree, ltree | ltree - ltree | public | lca | ltree, ltree, ltree, ltree, ltree, ltree | ltree - ltree | public | lca | ltree, ltree, ltree, ltree, ltree, ltree, ltree | ltree - ltree | public | lca | ltree, ltree, ltree, ltree, ltree, ltree, ltree, ltree | ltree - ltree | public | lca | ltree[] | ltree - ltree | public | lquery_in | cstring | lquery - ltree | public | lquery_out | lquery | cstring - ltree | public | lquery_recv | internal | lquery - ltree | public | lquery_send | lquery | bytea - ltree | public | lt_q_regex | ltree, lquery[] | boolean - ltree | public | lt_q_rregex | lquery[], ltree | boolean - ltree | public | ltq_regex | ltree, lquery | boolean - ltree | public | ltq_rregex | lquery, ltree | boolean - ltree | public | ltree2text | ltree | text - ltree | public | ltree_addltree | ltree, ltree | ltree - ltree | public | ltree_addtext | ltree, text | ltree - ltree | public | ltree_cmp | ltree, ltree | integer - ltree | public | ltree_compress | internal | internal - ltree | public | ltree_consistent | internal, ltree, smallint, oid, internal | boolean - ltree | public | ltree_decompress | internal | internal - ltree | public | ltree_eq | ltree, ltree | boolean - ltree | public | ltree_ge | ltree, ltree | boolean - ltree | public | ltree_gist_in | cstring | ltree_gist - ltree | public | ltree_gist_options | internal | void - ltree | public | ltree_gist_out | ltree_gist | cstring - ltree | public | ltree_gt | ltree, ltree | boolean - ltree | public | ltree_in | cstring | ltree - ltree | public | ltree_isparent | ltree, ltree | boolean - ltree | public | ltree_le | ltree, ltree | boolean - ltree | public | ltree_lt | ltree, ltree | boolean - ltree | public | ltree_ne | ltree, ltree | boolean - ltree | public | ltree_out | ltree | cstring - ltree | public | ltree_penalty | internal, internal, internal | internal - ltree | public | ltree_picksplit | internal, internal | internal - ltree | public | ltree_recv | internal | ltree - ltree | public | ltree_risparent | ltree, ltree | boolean - ltree | public | ltree_same | ltree_gist, ltree_gist, internal | internal - ltree | public | ltree_send | ltree | bytea - ltree | public | ltree_textadd | text, ltree | ltree - ltree | public | ltree_union | internal, internal | ltree_gist - ltree | public | ltreeparentsel | internal, oid, internal, integer | double precision - ltree | public | ltxtq_exec | ltree, ltxtquery | boolean - ltree | public | ltxtq_in | cstring | ltxtquery - ltree | public | ltxtq_out | ltxtquery | cstring - ltree | public | ltxtq_recv | internal | ltxtquery - ltree | public | ltxtq_rexec | ltxtquery, ltree | boolean - ltree | public | ltxtq_send | ltxtquery | bytea - ltree | public | nlevel | ltree | integer - ltree | public | subltree | ltree, integer, integer | ltree - ltree | public | subpath | ltree, integer | ltree - ltree | public | subpath | ltree, integer, integer | ltree - ltree | public | text2ltree | text | ltree - moddatetime | public | moddatetime | | trigger - old_snapshot | public | pg_old_snapshot_time_mapping | OUT array_offset integer, OUT end_timestamp timestamp with time zone, OUT newest_xmin xid | SETOF record - pageinspect | public | brin_metapage_info | page bytea, OUT magic text, OUT version integer, OUT pagesperrange integer, OUT lastrevmappage bigint | record - pageinspect | public | brin_page_items | page bytea, index_oid regclass, OUT itemoffset integer, OUT blknum bigint, OUT attnum integer, OUT allnulls boolean, OUT hasnulls boolean, OUT placeholder boolean, OUT value text | SETOF record - pageinspect | public | brin_page_type | page bytea | text - pageinspect | public | brin_revmap_data | page bytea, OUT pages tid | SETOF tid - pageinspect | public | bt_metap | relname text, OUT magic integer, OUT version integer, OUT root bigint, OUT level bigint, OUT fastroot bigint, OUT fastlevel bigint, OUT last_cleanup_num_delpages bigint, OUT last_cleanup_num_tuples double precision, OUT allequalimage boolean | record - pageinspect | public | bt_page_items | page bytea, OUT itemoffset smallint, OUT ctid tid, OUT itemlen smallint, OUT nulls boolean, OUT vars boolean, OUT data text, OUT dead boolean, OUT htid tid, OUT tids tid[] | SETOF record - pageinspect | public | bt_page_items | relname text, blkno bigint, OUT itemoffset smallint, OUT ctid tid, OUT itemlen smallint, OUT nulls boolean, OUT vars boolean, OUT data text, OUT dead boolean, OUT htid tid, OUT tids tid[] | SETOF record - pageinspect | public | bt_page_stats | relname text, blkno bigint, OUT blkno bigint, OUT type "char", OUT live_items integer, OUT dead_items integer, OUT avg_item_size integer, OUT page_size integer, OUT free_size integer, OUT btpo_prev bigint, OUT btpo_next bigint, OUT btpo_level bigint, OUT btpo_flags integer | record - pageinspect | public | fsm_page_contents | page bytea | text - pageinspect | public | get_raw_page | text, bigint | bytea - pageinspect | public | get_raw_page | text, text, bigint | bytea - pageinspect | public | gin_leafpage_items | page bytea, OUT first_tid tid, OUT nbytes smallint, OUT tids tid[] | SETOF record - pageinspect | public | gin_metapage_info | page bytea, OUT pending_head bigint, OUT pending_tail bigint, OUT tail_free_size integer, OUT n_pending_pages bigint, OUT n_pending_tuples bigint, OUT n_total_pages bigint, OUT n_entry_pages bigint, OUT n_data_pages bigint, OUT n_entries bigint, OUT version integer | record - pageinspect | public | gin_page_opaque_info | page bytea, OUT rightlink bigint, OUT maxoff integer, OUT flags text[] | record - pageinspect | public | gist_page_items | page bytea, index_oid regclass, OUT itemoffset smallint, OUT ctid tid, OUT itemlen smallint, OUT dead boolean, OUT keys text | SETOF record - pageinspect | public | gist_page_items_bytea | page bytea, OUT itemoffset smallint, OUT ctid tid, OUT itemlen smallint, OUT dead boolean, OUT key_data bytea | SETOF record - pageinspect | public | gist_page_opaque_info | page bytea, OUT lsn pg_lsn, OUT nsn pg_lsn, OUT rightlink bigint, OUT flags text[] | record - pageinspect | public | hash_bitmap_info | index_oid regclass, blkno bigint, OUT bitmapblkno bigint, OUT bitmapbit integer, OUT bitstatus boolean | SETOF record - pageinspect | public | hash_metapage_info | page bytea, OUT magic bigint, OUT version bigint, OUT ntuples double precision, OUT ffactor integer, OUT bsize integer, OUT bmsize integer, OUT bmshift integer, OUT maxbucket bigint, OUT highmask bigint, OUT lowmask bigint, OUT ovflpoint bigint, OUT firstfree bigint, OUT nmaps bigint, OUT procid oid, OUT spares bigint[], OUT mapp bigint[] | record - pageinspect | public | hash_page_items | page bytea, OUT itemoffset integer, OUT ctid tid, OUT data bigint | SETOF record - pageinspect | public | hash_page_stats | page bytea, OUT live_items integer, OUT dead_items integer, OUT page_size integer, OUT free_size integer, OUT hasho_prevblkno bigint, OUT hasho_nextblkno bigint, OUT hasho_bucket bigint, OUT hasho_flag integer, OUT hasho_page_id integer | record - pageinspect | public | hash_page_type | page bytea | text - pageinspect | public | heap_page_item_attrs | page bytea, rel_oid regclass, OUT lp smallint, OUT lp_off smallint, OUT lp_flags smallint, OUT lp_len smallint, OUT t_xmin xid, OUT t_xmax xid, OUT t_field3 integer, OUT t_ctid tid, OUT t_infomask2 integer, OUT t_infomask integer, OUT t_hoff smallint, OUT t_bits text, OUT t_oid oid, OUT t_attrs bytea[] | SETOF record - pageinspect | public | heap_page_item_attrs | page bytea, rel_oid regclass, do_detoast boolean, OUT lp smallint, OUT lp_off smallint, OUT lp_flags smallint, OUT lp_len smallint, OUT t_xmin xid, OUT t_xmax xid, OUT t_field3 integer, OUT t_ctid tid, OUT t_infomask2 integer, OUT t_infomask integer, OUT t_hoff smallint, OUT t_bits text, OUT t_oid oid, OUT t_attrs bytea[] | SETOF record - pageinspect | public | heap_page_items | page bytea, OUT lp smallint, OUT lp_off smallint, OUT lp_flags smallint, OUT lp_len smallint, OUT t_xmin xid, OUT t_xmax xid, OUT t_field3 integer, OUT t_ctid tid, OUT t_infomask2 integer, OUT t_infomask integer, OUT t_hoff smallint, OUT t_bits text, OUT t_oid oid, OUT t_data bytea | SETOF record - pageinspect | public | heap_tuple_infomask_flags | t_infomask integer, t_infomask2 integer, OUT raw_flags text[], OUT combined_flags text[] | record - pageinspect | public | page_checksum | page bytea, blkno bigint | smallint - pageinspect | public | page_header | page bytea, OUT lsn pg_lsn, OUT checksum smallint, OUT flags smallint, OUT lower integer, OUT upper integer, OUT special integer, OUT pagesize integer, OUT version smallint, OUT prune_xid xid | record - pageinspect | public | tuple_data_split | rel_oid oid, t_data bytea, t_infomask integer, t_infomask2 integer, t_bits text | bytea[] - pageinspect | public | tuple_data_split | rel_oid oid, t_data bytea, t_infomask integer, t_infomask2 integer, t_bits text, do_detoast boolean | bytea[] - pg_backtrace | public | pg_backtrace_init | | void - pg_buffercache | public | pg_buffercache_pages | | SETOF record - pg_freespacemap | public | pg_freespace | regclass, bigint | smallint - pg_freespacemap | public | pg_freespace | rel regclass, OUT blkno bigint, OUT avail smallint | SETOF record - pg_graphql | graphql | _internal_resolve | query text, variables jsonb, "operationName" text, extensions jsonb | jsonb - pg_graphql | graphql | comment_directive | comment_ text | jsonb - pg_graphql | graphql | exception | message text | text - pg_graphql | graphql | get_schema_version | | integer - pg_graphql | graphql | increment_schema_version | | event_trigger - pg_graphql | graphql | resolve | query text, variables jsonb, "operationName" text, extensions jsonb | jsonb - pg_hashids | public | hash_decode | text, text, integer | integer - pg_hashids | public | hash_encode | bigint | text - pg_hashids | public | hash_encode | bigint, text | text - pg_hashids | public | hash_encode | bigint, text, integer | text - pg_hashids | public | id_decode | text | bigint[] - pg_hashids | public | id_decode | text, text | bigint[] - pg_hashids | public | id_decode | text, text, integer | bigint[] - pg_hashids | public | id_decode | text, text, integer, text | bigint[] - pg_hashids | public | id_decode_once | text | bigint - pg_hashids | public | id_decode_once | text, text | bigint - pg_hashids | public | id_decode_once | text, text, integer | bigint - pg_hashids | public | id_decode_once | text, text, integer, text | bigint - pg_hashids | public | id_encode | bigint | text - pg_hashids | public | id_encode | bigint, text | text - pg_hashids | public | id_encode | bigint, text, integer | text - pg_hashids | public | id_encode | bigint, text, integer, text | text - pg_hashids | public | id_encode | bigint[] | text - pg_hashids | public | id_encode | bigint[], text | text - pg_hashids | public | id_encode | bigint[], text, integer | text - pg_hashids | public | id_encode | bigint[], text, integer, text | text - pg_jsonschema | public | json_matches_schema | schema json, instance json | boolean - pg_jsonschema | public | jsonb_matches_schema | schema json, instance jsonb | boolean - pg_jsonschema | public | jsonschema_is_valid | schema json | boolean - pg_jsonschema | public | jsonschema_validation_errors | schema json, instance json | text[] - pg_net | net | _await_response | request_id bigint | boolean - pg_net | net | _encode_url_with_params_array | url text, params_array text[] | text - pg_net | net | _http_collect_response | request_id bigint, async boolean | net.http_response_result - pg_net | net | _urlencode_string | string character varying | text - pg_net | net | check_worker_is_up | | void - pg_net | net | http_collect_response | request_id bigint, async boolean | net.http_response_result - pg_net | net | http_delete | url text, params jsonb, headers jsonb, timeout_milliseconds integer | bigint - pg_net | net | http_get | url text, params jsonb, headers jsonb, timeout_milliseconds integer | bigint - pg_net | net | http_post | url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer | bigint - pg_net | net | worker_restart | | boolean - pg_prewarm | public | autoprewarm_dump_now | | bigint - pg_prewarm | public | autoprewarm_start_worker | | void - pg_prewarm | public | pg_prewarm | regclass, mode text, fork text, first_block bigint, last_block bigint | bigint - pg_repack | repack | conflicted_triggers | oid | SETOF name - pg_repack | repack | create_index_type | oid, oid | void - pg_repack | repack | create_log_table | oid | void - pg_repack | repack | create_table | oid, name | void - pg_repack | repack | disable_autovacuum | regclass | void - pg_repack | repack | get_alter_col_storage | oid | text - pg_repack | repack | get_assign | oid, text | text - pg_repack | repack | get_columns_for_create_as | oid | text - pg_repack | repack | get_compare_pkey | oid, text | text - pg_repack | repack | get_create_index_type | oid, name | text - pg_repack | repack | get_create_trigger | relid oid, pkid oid | text - pg_repack | repack | get_drop_columns | oid, text | text - pg_repack | repack | get_enable_trigger | relid oid | text - pg_repack | repack | get_index_columns | oid | text - pg_repack | repack | get_order_by | oid, oid | text - pg_repack | repack | get_storage_param | oid | text - pg_repack | repack | get_table_and_inheritors | regclass | regclass[] - pg_repack | repack | oid2text | oid | text - pg_repack | repack | repack_apply | sql_peek cstring, sql_insert cstring, sql_delete cstring, sql_update cstring, sql_pop cstring, count integer | integer - pg_repack | repack | repack_drop | oid, integer | void - pg_repack | repack | repack_index_swap | oid | void - pg_repack | repack | repack_indexdef | oid, oid, name, boolean | text - pg_repack | repack | repack_swap | oid | void - pg_repack | repack | repack_trigger | | trigger - pg_repack | repack | version | | text - pg_repack | repack | version_sql | | text - pg_stat_monitor | public | decode_error_level | elevel integer | text - pg_stat_monitor | public | get_cmd_type | cmd_type integer | text - pg_stat_monitor | public | get_histogram_timings | | text - pg_stat_monitor | public | histogram | _bucket integer, _quryid bigint | SETOF record - pg_stat_monitor | public | pg_stat_monitor_internal | showtext boolean, OUT bucket bigint, OUT userid oid, OUT username text, OUT dbid oid, OUT datname text, OUT client_ip bigint, OUT queryid bigint, OUT planid bigint, OUT query text, OUT query_plan text, OUT pgsm_query_id bigint, OUT top_queryid bigint, OUT top_query text, OUT application_name text, OUT relations text, OUT cmd_type integer, OUT elevel integer, OUT sqlcode text, OUT message text, OUT bucket_start_time timestamp with time zone, OUT calls bigint, OUT total_exec_time double precision, OUT min_exec_time double precision, OUT max_exec_time double precision, OUT mean_exec_time double precision, OUT stddev_exec_time double precision, OUT rows bigint, OUT plans bigint, OUT total_plan_time double precision, OUT min_plan_time double precision, OUT max_plan_time double precision, OUT mean_plan_time double precision, OUT stddev_plan_time double precision, OUT shared_blks_hit bigint, OUT shared_blks_read bigint, OUT shared_blks_dirtied bigint, OUT shared_blks_written bigint, OUT local_blks_hit bigint, OUT local_blks_read bigint, OUT local_blks_dirtied bigint, OUT local_blks_written bigint, OUT temp_blks_read bigint, OUT temp_blks_written bigint, OUT shared_blk_read_time double precision, OUT shared_blk_write_time double precision, OUT local_blk_read_time double precision, OUT local_blk_write_time double precision, OUT temp_blk_read_time double precision, OUT temp_blk_write_time double precision, OUT resp_calls text, OUT cpu_user_time double precision, OUT cpu_sys_time double precision, OUT wal_records bigint, OUT wal_fpi bigint, OUT wal_bytes numeric, OUT comments text, OUT jit_functions bigint, OUT jit_generation_time double precision, OUT jit_inlining_count bigint, OUT jit_inlining_time double precision, OUT jit_optimization_count bigint, OUT jit_optimization_time double precision, OUT jit_emission_count bigint, OUT jit_emission_time double precision, OUT jit_deform_count bigint, OUT jit_deform_time double precision, OUT stats_since timestamp with time zone, OUT minmax_stats_since timestamp with time zone, OUT toplevel boolean, OUT bucket_done boolean | SETOF record - pg_stat_monitor | public | pg_stat_monitor_reset | | void - pg_stat_monitor | public | pg_stat_monitor_version | | text - pg_stat_monitor | public | pgsm_create_11_view | | integer - pg_stat_monitor | public | pgsm_create_13_view | | integer - pg_stat_monitor | public | pgsm_create_14_view | | integer - pg_stat_monitor | public | pgsm_create_15_view | | integer - pg_stat_monitor | public | pgsm_create_17_view | | integer - pg_stat_monitor | public | pgsm_create_view | | integer - pg_stat_monitor | public | range | | text[] - pg_stat_statements | public | pg_stat_statements | showtext boolean, OUT userid oid, OUT dbid oid, OUT toplevel boolean, OUT queryid bigint, OUT query text, OUT plans bigint, OUT total_plan_time double precision, OUT min_plan_time double precision, OUT max_plan_time double precision, OUT mean_plan_time double precision, OUT stddev_plan_time double precision, OUT calls bigint, OUT total_exec_time double precision, OUT min_exec_time double precision, OUT max_exec_time double precision, OUT mean_exec_time double precision, OUT stddev_exec_time double precision, OUT rows bigint, OUT shared_blks_hit bigint, OUT shared_blks_read bigint, OUT shared_blks_dirtied bigint, OUT shared_blks_written bigint, OUT local_blks_hit bigint, OUT local_blks_read bigint, OUT local_blks_dirtied bigint, OUT local_blks_written bigint, OUT temp_blks_read bigint, OUT temp_blks_written bigint, OUT blk_read_time double precision, OUT blk_write_time double precision, OUT temp_blk_read_time double precision, OUT temp_blk_write_time double precision, OUT wal_records bigint, OUT wal_fpi bigint, OUT wal_bytes numeric, OUT jit_functions bigint, OUT jit_generation_time double precision, OUT jit_inlining_count bigint, OUT jit_inlining_time double precision, OUT jit_optimization_count bigint, OUT jit_optimization_time double precision, OUT jit_emission_count bigint, OUT jit_emission_time double precision | SETOF record - pg_stat_statements | public | pg_stat_statements_info | OUT dealloc bigint, OUT stats_reset timestamp with time zone | record - pg_stat_statements | public | pg_stat_statements_reset | userid oid, dbid oid, queryid bigint | void - pg_surgery | public | heap_force_freeze | reloid regclass, tids tid[] | void - pg_surgery | public | heap_force_kill | reloid regclass, tids tid[] | void - pg_tle | pgtle | available_extension_versions | OUT name name, OUT version text, OUT superuser boolean, OUT trusted boolean, OUT relocatable boolean, OUT schema name, OUT requires name[], OUT comment text | SETOF record - pg_tle | pgtle | available_extensions | OUT name name, OUT default_version text, OUT comment text | SETOF record - pg_tle | pgtle | create_base_type | typenamespace regnamespace, typename name, infunc regprocedure, outfunc regprocedure, internallength integer, alignment text, storage text | void - pg_tle | pgtle | create_base_type_if_not_exists | typenamespace regnamespace, typename name, infunc regprocedure, outfunc regprocedure, internallength integer, alignment text, storage text | boolean - pg_tle | pgtle | create_operator_func | typenamespace regnamespace, typename name, opfunc regprocedure | void - pg_tle | pgtle | create_operator_func_if_not_exists | typenamespace regnamespace, typename name, opfunc regprocedure | boolean - pg_tle | pgtle | create_shell_type | typenamespace regnamespace, typename name | void - pg_tle | pgtle | create_shell_type_if_not_exists | typenamespace regnamespace, typename name | boolean - pg_tle | pgtle | extension_update_paths | name name, OUT source text, OUT target text, OUT path text | SETOF record - pg_tle | pgtle | install_extension | name text, version text, description text, ext text, requires text[] | boolean - pg_tle | pgtle | install_extension_version_sql | name text, version text, ext text | boolean - pg_tle | pgtle | install_update_path | name text, fromvers text, tovers text, ext text | boolean - pg_tle | pgtle | pg_tle_feature_info_sql_drop | | event_trigger - pg_tle | pgtle | register_feature | proc regproc, feature pgtle.pg_tle_features | void - pg_tle | pgtle | register_feature_if_not_exists | proc regproc, feature pgtle.pg_tle_features | boolean - pg_tle | pgtle | set_default_version | name text, version text | boolean - pg_tle | pgtle | uninstall_extension | extname text | boolean - pg_tle | pgtle | uninstall_extension | extname text, version text | boolean - pg_tle | pgtle | uninstall_extension_if_exists | extname text | boolean - pg_tle | pgtle | uninstall_update_path | extname text, fromvers text, tovers text | boolean - pg_tle | pgtle | uninstall_update_path_if_exists | extname text, fromvers text, tovers text | boolean - pg_tle | pgtle | unregister_feature | proc regproc, feature pgtle.pg_tle_features | void - pg_tle | pgtle | unregister_feature_if_exists | proc regproc, feature pgtle.pg_tle_features | boolean - pg_trgm | public | gin_extract_query_trgm | text, internal, smallint, internal, internal, internal, internal | internal - pg_trgm | public | gin_extract_value_trgm | text, internal | internal - pg_trgm | public | gin_trgm_consistent | internal, smallint, text, integer, internal, internal, internal, internal | boolean - pg_trgm | public | gin_trgm_triconsistent | internal, smallint, text, integer, internal, internal, internal | "char" - pg_trgm | public | gtrgm_compress | internal | internal - pg_trgm | public | gtrgm_consistent | internal, text, smallint, oid, internal | boolean - pg_trgm | public | gtrgm_decompress | internal | internal - pg_trgm | public | gtrgm_distance | internal, text, smallint, oid, internal | double precision - pg_trgm | public | gtrgm_in | cstring | gtrgm - pg_trgm | public | gtrgm_options | internal | void - pg_trgm | public | gtrgm_out | gtrgm | cstring - pg_trgm | public | gtrgm_penalty | internal, internal, internal | internal - pg_trgm | public | gtrgm_picksplit | internal, internal | internal - pg_trgm | public | gtrgm_same | gtrgm, gtrgm, internal | internal - pg_trgm | public | gtrgm_union | internal, internal | gtrgm - pg_trgm | public | set_limit | real | real - pg_trgm | public | show_limit | | real - pg_trgm | public | show_trgm | text | text[] - pg_trgm | public | similarity | text, text | real - pg_trgm | public | similarity_dist | text, text | real - pg_trgm | public | similarity_op | text, text | boolean - pg_trgm | public | strict_word_similarity | text, text | real - pg_trgm | public | strict_word_similarity_commutator_op | text, text | boolean - pg_trgm | public | strict_word_similarity_dist_commutator_op | text, text | real - pg_trgm | public | strict_word_similarity_dist_op | text, text | real - pg_trgm | public | strict_word_similarity_op | text, text | boolean - pg_trgm | public | word_similarity | text, text | real - pg_trgm | public | word_similarity_commutator_op | text, text | boolean - pg_trgm | public | word_similarity_dist_commutator_op | text, text | real - pg_trgm | public | word_similarity_dist_op | text, text | real - pg_trgm | public | word_similarity_op | text, text | boolean - pg_visibility | public | pg_check_frozen | regclass, OUT t_ctid tid | SETOF tid - pg_visibility | public | pg_check_visible | regclass, OUT t_ctid tid | SETOF tid - pg_visibility | public | pg_truncate_visibility_map | regclass | void - pg_visibility | public | pg_visibility | regclass, OUT blkno bigint, OUT all_visible boolean, OUT all_frozen boolean, OUT pd_all_visible boolean | SETOF record - pg_visibility | public | pg_visibility | regclass, blkno bigint, OUT all_visible boolean, OUT all_frozen boolean, OUT pd_all_visible boolean | record - pg_visibility | public | pg_visibility_map | regclass, OUT blkno bigint, OUT all_visible boolean, OUT all_frozen boolean | SETOF record - pg_visibility | public | pg_visibility_map | regclass, blkno bigint, OUT all_visible boolean, OUT all_frozen boolean | record - pg_visibility | public | pg_visibility_map_summary | regclass, OUT all_visible bigint, OUT all_frozen bigint | record - pg_walinspect | public | pg_get_wal_record_info | in_lsn pg_lsn, OUT start_lsn pg_lsn, OUT end_lsn pg_lsn, OUT prev_lsn pg_lsn, OUT xid xid, OUT resource_manager text, OUT record_type text, OUT record_length integer, OUT main_data_length integer, OUT fpi_length integer, OUT description text, OUT block_ref text | record - pg_walinspect | public | pg_get_wal_records_info | start_lsn pg_lsn, end_lsn pg_lsn, OUT start_lsn pg_lsn, OUT end_lsn pg_lsn, OUT prev_lsn pg_lsn, OUT xid xid, OUT resource_manager text, OUT record_type text, OUT record_length integer, OUT main_data_length integer, OUT fpi_length integer, OUT description text, OUT block_ref text | SETOF record - pg_walinspect | public | pg_get_wal_records_info_till_end_of_wal | start_lsn pg_lsn, OUT start_lsn pg_lsn, OUT end_lsn pg_lsn, OUT prev_lsn pg_lsn, OUT xid xid, OUT resource_manager text, OUT record_type text, OUT record_length integer, OUT main_data_length integer, OUT fpi_length integer, OUT description text, OUT block_ref text | SETOF record - pg_walinspect | public | pg_get_wal_stats | start_lsn pg_lsn, end_lsn pg_lsn, per_record boolean, OUT "resource_manager/record_type" text, OUT count bigint, OUT count_percentage double precision, OUT record_size bigint, OUT record_size_percentage double precision, OUT fpi_size bigint, OUT fpi_size_percentage double precision, OUT combined_size bigint, OUT combined_size_percentage double precision | SETOF record - pg_walinspect | public | pg_get_wal_stats_till_end_of_wal | start_lsn pg_lsn, per_record boolean, OUT "resource_manager/record_type" text, OUT count bigint, OUT count_percentage double precision, OUT record_size bigint, OUT record_size_percentage double precision, OUT fpi_size bigint, OUT fpi_size_percentage double precision, OUT combined_size bigint, OUT combined_size_percentage double precision | SETOF record - pgaudit | public | pgaudit_ddl_command_end | | event_trigger - pgaudit | public | pgaudit_sql_drop | | event_trigger - pgcrypto | public | armor | bytea | text - pgcrypto | public | armor | bytea, text[], text[] | text - pgcrypto | public | crypt | text, text | text - pgcrypto | public | dearmor | text | bytea - pgcrypto | public | decrypt | bytea, bytea, text | bytea - pgcrypto | public | decrypt_iv | bytea, bytea, bytea, text | bytea - pgcrypto | public | digest | bytea, text | bytea - pgcrypto | public | digest | text, text | bytea - pgcrypto | public | encrypt | bytea, bytea, text | bytea - pgcrypto | public | encrypt_iv | bytea, bytea, bytea, text | bytea - pgcrypto | public | gen_random_bytes | integer | bytea - pgcrypto | public | gen_random_uuid | | uuid - pgcrypto | public | gen_salt | text | text - pgcrypto | public | gen_salt | text, integer | text - pgcrypto | public | hmac | bytea, bytea, text | bytea - pgcrypto | public | hmac | text, text, text | bytea - pgcrypto | public | pgp_armor_headers | text, OUT key text, OUT value text | SETOF record - pgcrypto | public | pgp_key_id | bytea | text - pgcrypto | public | pgp_pub_decrypt | bytea, bytea | text - pgcrypto | public | pgp_pub_decrypt | bytea, bytea, text | text - pgcrypto | public | pgp_pub_decrypt | bytea, bytea, text, text | text - pgcrypto | public | pgp_pub_decrypt_bytea | bytea, bytea | bytea - pgcrypto | public | pgp_pub_decrypt_bytea | bytea, bytea, text | bytea - pgcrypto | public | pgp_pub_decrypt_bytea | bytea, bytea, text, text | bytea - pgcrypto | public | pgp_pub_encrypt | text, bytea | bytea - pgcrypto | public | pgp_pub_encrypt | text, bytea, text | bytea - pgcrypto | public | pgp_pub_encrypt_bytea | bytea, bytea | bytea - pgcrypto | public | pgp_pub_encrypt_bytea | bytea, bytea, text | bytea - pgcrypto | public | pgp_sym_decrypt | bytea, text | text - pgcrypto | public | pgp_sym_decrypt | bytea, text, text | text - pgcrypto | public | pgp_sym_decrypt_bytea | bytea, text | bytea - pgcrypto | public | pgp_sym_decrypt_bytea | bytea, text, text | bytea - pgcrypto | public | pgp_sym_encrypt | text, text | bytea - pgcrypto | public | pgp_sym_encrypt | text, text, text | bytea - pgcrypto | public | pgp_sym_encrypt_bytea | bytea, text | bytea - pgcrypto | public | pgp_sym_encrypt_bytea | bytea, text, text | bytea - pgjwt | public | algorithm_sign | signables text, secret text, algorithm text | text - pgjwt | public | sign | payload json, secret text, algorithm text | text - pgjwt | public | try_cast_double | inp text | double precision - pgjwt | public | url_decode | data text | bytea - pgjwt | public | url_encode | data bytea | text - pgjwt | public | verify | token text, secret text, algorithm text | TABLE(header json, payload json, valid boolean) - pgmq | pgmq | _belongs_to_pgmq | table_name text | boolean - pgmq | pgmq | _ensure_pg_partman_installed | | void - pgmq | pgmq | _get_partition_col | partition_interval text | text - pgmq | pgmq | _get_pg_partman_major_version | | integer - pgmq | pgmq | _get_pg_partman_schema | | text - pgmq | pgmq | archive | queue_name text, msg_id bigint | boolean - pgmq | pgmq | archive | queue_name text, msg_ids bigint[] | SETOF bigint - pgmq | pgmq | convert_archive_partitioned | table_name text, partition_interval text, retention_interval text, leading_partition integer | void - pgmq | pgmq | create | queue_name text | void - pgmq | pgmq | create_non_partitioned | queue_name text | void - pgmq | pgmq | create_partitioned | queue_name text, partition_interval text, retention_interval text | void - pgmq | pgmq | create_unlogged | queue_name text | void - pgmq | pgmq | delete | queue_name text, msg_id bigint | boolean - pgmq | pgmq | delete | queue_name text, msg_ids bigint[] | SETOF bigint - pgmq | pgmq | detach_archive | queue_name text | void - pgmq | pgmq | drop_queue | queue_name text, partitioned boolean | boolean - pgmq | pgmq | format_table_name | queue_name text, prefix text | text - pgmq | pgmq | list_queues | | SETOF pgmq.queue_record - pgmq | pgmq | metrics | queue_name text | pgmq.metrics_result - pgmq | pgmq | metrics_all | | SETOF pgmq.metrics_result - pgmq | pgmq | pop | queue_name text | SETOF pgmq.message_record - pgmq | pgmq | purge_queue | queue_name text | bigint - pgmq | pgmq | read | queue_name text, vt integer, qty integer | SETOF pgmq.message_record - pgmq | pgmq | read_with_poll | queue_name text, vt integer, qty integer, max_poll_seconds integer, poll_interval_ms integer | SETOF pgmq.message_record - pgmq | pgmq | send | queue_name text, msg jsonb, delay integer | SETOF bigint - pgmq | pgmq | send_batch | queue_name text, msgs jsonb[], delay integer | SETOF bigint - pgmq | pgmq | set_vt | queue_name text, msg_id bigint, vt integer | SETOF pgmq.message_record - pgmq | pgmq | validate_queue_name | queue_name text | void - pgroonga | pgroonga | command | groongacommand text | text - pgroonga | pgroonga | command | groongacommand text, arguments text[] | text - pgroonga | pgroonga | command_escape_value | value text | text - pgroonga | pgroonga | contain_varchar_array | character varying[], character varying | boolean - pgroonga | pgroonga | escape | value bigint | text - pgroonga | pgroonga | escape | value boolean | text - pgroonga | pgroonga | escape | value double precision | text - pgroonga | pgroonga | escape | value integer | text - pgroonga | pgroonga | escape | value real | text - pgroonga | pgroonga | escape | value smallint | text - pgroonga | pgroonga | escape | value text | text - pgroonga | pgroonga | escape | value text, special_characters text | text - pgroonga | pgroonga | escape | value timestamp with time zone | text - pgroonga | pgroonga | escape | value timestamp without time zone | text - pgroonga | pgroonga | flush | indexname cstring | boolean - pgroonga | pgroonga | highlight_html | target text, keywords text[] | text - pgroonga | pgroonga | match_in_text | text, text[] | boolean - pgroonga | pgroonga | match_in_text_array | text[], text[] | boolean - pgroonga | pgroonga | match_in_varchar | character varying, character varying[] | boolean - pgroonga | pgroonga | match_jsonb | jsonb, text | boolean - pgroonga | pgroonga | match_positions_byte | target text, keywords text[] | integer[] - pgroonga | pgroonga | match_positions_character | target text, keywords text[] | integer[] - pgroonga | pgroonga | match_query | character varying, character varying | boolean - pgroonga | pgroonga | match_query | text, text | boolean - pgroonga | pgroonga | match_query | text[], text | boolean - pgroonga | pgroonga | match_regexp | character varying, character varying | boolean - pgroonga | pgroonga | match_regexp | text, text | boolean - pgroonga | pgroonga | match_script_jsonb | jsonb, text | boolean - pgroonga | pgroonga | match_term | target character varying, term character varying | boolean - pgroonga | pgroonga | match_term | target character varying[], term character varying | boolean - pgroonga | pgroonga | match_term | target text, term text | boolean - pgroonga | pgroonga | match_term | target text[], term text | boolean - pgroonga | pgroonga | match_text | text, text | boolean - pgroonga | pgroonga | match_text_array | text[], text | boolean - pgroonga | pgroonga | match_varchar | character varying, character varying | boolean - pgroonga | pgroonga | prefix_in_text | text, text[] | boolean - pgroonga | pgroonga | prefix_in_text_array | text[], text[] | boolean - pgroonga | pgroonga | prefix_rk_in_text | text, text[] | boolean - pgroonga | pgroonga | prefix_rk_in_text_array | text[], text[] | boolean - pgroonga | pgroonga | prefix_rk_text | text, text | boolean - pgroonga | pgroonga | prefix_rk_text_array | text[], text | boolean - pgroonga | pgroonga | prefix_text | text, text | boolean - pgroonga | pgroonga | prefix_text_array | text[], text | boolean - pgroonga | pgroonga | query_escape | query text | text - pgroonga | pgroonga | query_expand | tablename cstring, termcolumnname text, synonymscolumnname text, query text | text - pgroonga | pgroonga | query_extract_keywords | query text | text[] - pgroonga | pgroonga | query_in_text | text, text[] | boolean - pgroonga | pgroonga | query_in_text_array | text[], text[] | boolean - pgroonga | pgroonga | query_in_varchar | character varying, character varying[] | boolean - pgroonga | pgroonga | query_jsonb | jsonb, text | boolean - pgroonga | pgroonga | query_text | text, text | boolean - pgroonga | pgroonga | query_text_array | text[], text | boolean - pgroonga | pgroonga | query_varchar | character varying, character varying | boolean - pgroonga | pgroonga | regexp_text | text, text | boolean - pgroonga | pgroonga | regexp_varchar | character varying, character varying | boolean - pgroonga | pgroonga | score | "row" record | double precision - pgroonga | pgroonga | script_jsonb | jsonb, text | boolean - pgroonga | pgroonga | script_text | text, text | boolean - pgroonga | pgroonga | script_text_array | text[], text | boolean - pgroonga | pgroonga | script_varchar | character varying, character varying | boolean - pgroonga | pgroonga | similar_text | text, text | boolean - pgroonga | pgroonga | similar_text_array | text[], text | boolean - pgroonga | pgroonga | similar_varchar | character varying, character varying | boolean - pgroonga | pgroonga | snippet_html | target text, keywords text[], width integer | text[] - pgroonga | pgroonga | table_name | indexname cstring | text - pgroonga | public | pgroonga_command | groongacommand text | text - pgroonga | public | pgroonga_command | groongacommand text, arguments text[] | text - pgroonga | public | pgroonga_command_escape_value | value text | text - pgroonga | public | pgroonga_contain_varchar_array | character varying[], character varying | boolean - pgroonga | public | pgroonga_equal_query_text_array | targets text[], query text | boolean - pgroonga | public | pgroonga_equal_query_varchar_array | targets character varying[], query text | boolean - pgroonga | public | pgroonga_equal_text | target text, other text | boolean - pgroonga | public | pgroonga_equal_text_condition | target text, condition pgroonga_full_text_search_condition | boolean - pgroonga | public | pgroonga_equal_varchar | target character varying, other character varying | boolean - pgroonga | public | pgroonga_equal_varchar_condition | target character varying, condition pgroonga_full_text_search_condition | boolean - pgroonga | public | pgroonga_escape | value bigint | text - pgroonga | public | pgroonga_escape | value boolean | text - pgroonga | public | pgroonga_escape | value double precision | text - pgroonga | public | pgroonga_escape | value integer | text - pgroonga | public | pgroonga_escape | value real | text - pgroonga | public | pgroonga_escape | value smallint | text - pgroonga | public | pgroonga_escape | value text | text - pgroonga | public | pgroonga_escape | value text, special_characters text | text - pgroonga | public | pgroonga_escape | value timestamp with time zone | text - pgroonga | public | pgroonga_escape | value timestamp without time zone | text - pgroonga | public | pgroonga_flush | indexname cstring | boolean - pgroonga | public | pgroonga_handler | internal | index_am_handler - pgroonga | public | pgroonga_highlight_html | target text, keywords text[] | text - pgroonga | public | pgroonga_highlight_html | target text, keywords text[], indexname cstring | text - pgroonga | public | pgroonga_highlight_html | targets text[], keywords text[] | text[] - pgroonga | public | pgroonga_highlight_html | targets text[], keywords text[], indexname cstring | text[] - pgroonga | public | pgroonga_index_column_name | indexname cstring, columnindex integer | text - pgroonga | public | pgroonga_index_column_name | indexname cstring, columnname text | text - pgroonga | public | pgroonga_is_writable | | boolean - pgroonga | public | pgroonga_match_in_text | text, text[] | boolean - pgroonga | public | pgroonga_match_in_text_array | text[], text[] | boolean - pgroonga | public | pgroonga_match_in_varchar | character varying, character varying[] | boolean - pgroonga | public | pgroonga_match_jsonb | jsonb, text | boolean - pgroonga | public | pgroonga_match_positions_byte | target text, keywords text[] | integer[] - pgroonga | public | pgroonga_match_positions_byte | target text, keywords text[], indexname cstring | integer[] - pgroonga | public | pgroonga_match_positions_character | target text, keywords text[] | integer[] - pgroonga | public | pgroonga_match_positions_character | target text, keywords text[], indexname cstring | integer[] - pgroonga | public | pgroonga_match_query | character varying, character varying | boolean - pgroonga | public | pgroonga_match_query | text, text | boolean - pgroonga | public | pgroonga_match_query | text[], text | boolean - pgroonga | public | pgroonga_match_regexp | character varying, character varying | boolean - pgroonga | public | pgroonga_match_regexp | text, text | boolean - pgroonga | public | pgroonga_match_script_jsonb | jsonb, text | boolean - pgroonga | public | pgroonga_match_term | target character varying, term character varying | boolean - pgroonga | public | pgroonga_match_term | target character varying[], term character varying | boolean - pgroonga | public | pgroonga_match_term | target text, term text | boolean - pgroonga | public | pgroonga_match_term | target text[], term text | boolean - pgroonga | public | pgroonga_match_text | text, text | boolean - pgroonga | public | pgroonga_match_text_array | text[], text | boolean - pgroonga | public | pgroonga_match_text_array_condition | target text[], condition pgroonga_full_text_search_condition | boolean - pgroonga | public | pgroonga_match_text_array_condition_with_scorers | target text[], condition pgroonga_full_text_search_condition_with_scorers | boolean - pgroonga | public | pgroonga_match_text_condition | target text, condition pgroonga_full_text_search_condition | boolean - pgroonga | public | pgroonga_match_text_condition_with_scorers | target text, condition pgroonga_full_text_search_condition_with_scorers | boolean - pgroonga | public | pgroonga_match_varchar | character varying, character varying | boolean - pgroonga | public | pgroonga_match_varchar_condition | target character varying, condition pgroonga_full_text_search_condition | boolean - pgroonga | public | pgroonga_match_varchar_condition_with_scorers | target character varying, condition pgroonga_full_text_search_condition_with_scorers | boolean - pgroonga | public | pgroonga_normalize | target text | text - pgroonga | public | pgroonga_normalize | target text, normalizername text | text - pgroonga | public | pgroonga_not_prefix_in_text | text, text[] | boolean - pgroonga | public | pgroonga_prefix_in_text | text, text[] | boolean - pgroonga | public | pgroonga_prefix_in_text_array | text[], text[] | boolean - pgroonga | public | pgroonga_prefix_in_varchar | character varying, character varying[] | boolean - pgroonga | public | pgroonga_prefix_in_varchar_array | character varying[], character varying[] | boolean - pgroonga | public | pgroonga_prefix_rk_in_text | text, text[] | boolean - pgroonga | public | pgroonga_prefix_rk_in_text_array | text[], text[] | boolean - pgroonga | public | pgroonga_prefix_rk_in_varchar | character varying, character varying[] | boolean - pgroonga | public | pgroonga_prefix_rk_in_varchar_array | character varying[], character varying[] | boolean - pgroonga | public | pgroonga_prefix_rk_text | text, text | boolean - pgroonga | public | pgroonga_prefix_rk_text_array | text[], text | boolean - pgroonga | public | pgroonga_prefix_rk_varchar | character varying, character varying | boolean - pgroonga | public | pgroonga_prefix_rk_varchar_array | character varying[], character varying | boolean - pgroonga | public | pgroonga_prefix_text | text, text | boolean - pgroonga | public | pgroonga_prefix_text_array | text[], text | boolean - pgroonga | public | pgroonga_prefix_text_condition | text, condition pgroonga_full_text_search_condition | boolean - pgroonga | public | pgroonga_prefix_varchar | character varying, character varying | boolean - pgroonga | public | pgroonga_prefix_varchar_array | character varying[], character varying | boolean - pgroonga | public | pgroonga_prefix_varchar_condition | target character varying, conditoin pgroonga_full_text_search_condition | boolean - pgroonga | public | pgroonga_query_escape | query text | text - pgroonga | public | pgroonga_query_expand | tablename cstring, termcolumnname text, synonymscolumnname text, query text | text - pgroonga | public | pgroonga_query_extract_keywords | query text, index_name text | text[] - pgroonga | public | pgroonga_query_in_text | text, text[] | boolean - pgroonga | public | pgroonga_query_in_text_array | text[], text[] | boolean - pgroonga | public | pgroonga_query_in_varchar | character varying, character varying[] | boolean - pgroonga | public | pgroonga_query_jsonb | jsonb, text | boolean - pgroonga | public | pgroonga_query_text | text, text | boolean - pgroonga | public | pgroonga_query_text_array | text[], text | boolean - pgroonga | public | pgroonga_query_text_array_condition | targets text[], condition pgroonga_full_text_search_condition | boolean - pgroonga | public | pgroonga_query_text_array_condition_with_scorers | targets text[], condition pgroonga_full_text_search_condition_with_scorers | boolean - pgroonga | public | pgroonga_query_text_condition | target text, condition pgroonga_full_text_search_condition | boolean - pgroonga | public | pgroonga_query_text_condition_with_scorers | target text, condition pgroonga_full_text_search_condition_with_scorers | boolean - pgroonga | public | pgroonga_query_varchar | character varying, character varying | boolean - pgroonga | public | pgroonga_query_varchar_condition | target character varying, condition pgroonga_full_text_search_condition | boolean - pgroonga | public | pgroonga_query_varchar_condition_with_scorers | target character varying, condition pgroonga_full_text_search_condition_with_scorers | boolean - pgroonga | public | pgroonga_regexp_in_text | text, text[] | boolean - pgroonga | public | pgroonga_regexp_in_varchar | character varying, character varying[] | boolean - pgroonga | public | pgroonga_regexp_text | text, text | boolean - pgroonga | public | pgroonga_regexp_varchar | character varying, character varying | boolean - pgroonga | public | pgroonga_result_to_jsonb_objects | result jsonb | jsonb - pgroonga | public | pgroonga_result_to_recordset | result jsonb | SETOF record - pgroonga | public | pgroonga_score | "row" record | double precision - pgroonga | public | pgroonga_score | tableoid oid, ctid tid | double precision - pgroonga | public | pgroonga_script_jsonb | jsonb, text | boolean - pgroonga | public | pgroonga_script_text | text, text | boolean - pgroonga | public | pgroonga_script_text_array | text[], text | boolean - pgroonga | public | pgroonga_script_varchar | character varying, character varying | boolean - pgroonga | public | pgroonga_set_writable | newwritable boolean | boolean - pgroonga | public | pgroonga_similar_text | text, text | boolean - pgroonga | public | pgroonga_similar_text_array | text[], text | boolean - pgroonga | public | pgroonga_similar_varchar | character varying, character varying | boolean - pgroonga | public | pgroonga_snippet_html | target text, keywords text[], width integer | text[] - pgroonga | public | pgroonga_table_name | indexname cstring | text - pgroonga | public | pgroonga_tokenize | target text, VARIADIC options text[] | json[] - pgroonga | public | pgroonga_vacuum | | boolean - pgroonga | public | pgroonga_wal_apply | | bigint - pgroonga | public | pgroonga_wal_apply | indexname cstring | bigint - pgroonga | public | pgroonga_wal_set_applied_position | | boolean - pgroonga | public | pgroonga_wal_set_applied_position | block bigint, "offset" bigint | boolean - pgroonga | public | pgroonga_wal_set_applied_position | indexname cstring | boolean - pgroonga | public | pgroonga_wal_set_applied_position | indexname cstring, block bigint, "offset" bigint | boolean - pgroonga | public | pgroonga_wal_status | | TABLE(name text, oid oid, current_block bigint, current_offset bigint, current_size bigint, last_block bigint, last_offset bigint, last_size bigint) - pgroonga | public | pgroonga_wal_truncate | | bigint - pgroonga | public | pgroonga_wal_truncate | indexname cstring | bigint - pgroonga_database | public | pgroonga_database_remove | | boolean - pgrouting | public | _pgr_alphashape | text, alpha double precision, OUT seq1 bigint, OUT textgeom text | SETOF record - pgrouting | public | _pgr_array_reverse | anyarray | anyarray - pgrouting | public | _pgr_articulationpoints | edges_sql text, OUT seq integer, OUT node bigint | SETOF record - pgrouting | public | _pgr_astar | edges_sql text, combinations_sql text, directed boolean, heuristic integer, factor double precision, epsilon double precision, only_cost boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | _pgr_astar | edges_sql text, start_vids anyarray, end_vids anyarray, directed boolean, heuristic integer, factor double precision, epsilon double precision, only_cost boolean, normal boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | _pgr_bdastar | text, anyarray, anyarray, directed boolean, heuristic integer, factor double precision, epsilon double precision, only_cost boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | _pgr_bdastar | text, text, directed boolean, heuristic integer, factor double precision, epsilon double precision, only_cost boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | _pgr_bddijkstra | text, anyarray, anyarray, directed boolean, only_cost boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | _pgr_bddijkstra | text, text, directed boolean, only_cost boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | _pgr_bellmanford | edges_sql text, combinations_sql text, directed boolean, only_cost boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | _pgr_bellmanford | edges_sql text, from_vids anyarray, to_vids anyarray, directed boolean, only_cost boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | _pgr_biconnectedcomponents | edges_sql text, OUT seq bigint, OUT component bigint, OUT edge bigint | SETOF record - pgrouting | public | _pgr_binarybreadthfirstsearch | edges_sql text, combinations_sql text, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | _pgr_binarybreadthfirstsearch | edges_sql text, from_vids anyarray, to_vids anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | _pgr_bipartite | edges_sql text, OUT node bigint, OUT color bigint | SETOF record - pgrouting | public | _pgr_boost_version | | text - pgrouting | public | _pgr_breadthfirstsearch | edges_sql text, from_vids anyarray, max_depth bigint, directed boolean, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | _pgr_bridges | edges_sql text, OUT seq integer, OUT edge bigint | SETOF record - pgrouting | public | _pgr_build_type | | text - pgrouting | public | _pgr_checkcolumn | text, text, text, is_optional boolean, dryrun boolean | boolean - pgrouting | public | _pgr_checkquery | text | text - pgrouting | public | _pgr_checkverttab | vertname text, columnsarr text[], reporterrs integer, fnname text, OUT sname text, OUT vname text | record - pgrouting | public | _pgr_chinesepostman | edges_sql text, only_cost boolean, OUT seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | _pgr_compilation_date | | text - pgrouting | public | _pgr_compiler_version | | text - pgrouting | public | _pgr_connectedcomponents | edges_sql text, OUT seq bigint, OUT component bigint, OUT node bigint | SETOF record - pgrouting | public | _pgr_contraction | edges_sql text, contraction_order bigint[], max_cycles integer, forbidden_vertices bigint[], directed boolean, OUT type text, OUT id bigint, OUT contracted_vertices bigint[], OUT source bigint, OUT target bigint, OUT cost double precision | SETOF record - pgrouting | public | _pgr_createindex | sname text, tname text, colname text, indext text, reporterrs integer, fnname text | void - pgrouting | public | _pgr_createindex | tabname text, colname text, indext text, reporterrs integer, fnname text | void - pgrouting | public | _pgr_cuthillmckeeordering | text, OUT seq bigint, OUT node bigint | SETOF record - pgrouting | public | _pgr_dagshortestpath | text, anyarray, anyarray, directed boolean, only_cost boolean, OUT seq integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | _pgr_dagshortestpath | text, text, directed boolean, only_cost boolean, OUT seq integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | _pgr_depthfirstsearch | edges_sql text, root_vids anyarray, directed boolean, max_depth bigint, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | _pgr_dijkstra | edges_sql text, combinations_sql text, directed boolean, only_cost boolean, n_goals bigint, global boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | _pgr_dijkstra | edges_sql text, combinations_sql text, directed boolean, only_cost boolean, normal boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | _pgr_dijkstra | edges_sql text, start_vids anyarray, end_vids anyarray, directed boolean, only_cost boolean, normal boolean, n_goals bigint, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | _pgr_dijkstra | edges_sql text, start_vids anyarray, end_vids anyarray, directed boolean, only_cost boolean, normal boolean, n_goals bigint, global boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | _pgr_dijkstranear | text, anyarray, anyarray, bigint, directed boolean, OUT seq integer, OUT path_seq integer, OUT end_vid bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | _pgr_dijkstranear | text, anyarray, bigint, bigint, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | _pgr_dijkstranear | text, bigint, anyarray, bigint, directed boolean, OUT seq integer, OUT path_seq integer, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | _pgr_dijkstravia | edges_sql text, via_vids anyarray, directed boolean, strict boolean, u_turn_on_edge boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision, OUT route_agg_cost double precision | SETOF record - pgrouting | public | _pgr_drivingdistance | edges_sql text, start_vids anyarray, distance double precision, directed boolean, equicost boolean, OUT seq integer, OUT from_v bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | _pgr_edgecoloring | edges_sql text, OUT edge_id bigint, OUT color_id bigint | SETOF record - pgrouting | public | _pgr_edgedisjointpaths | text, anyarray, anyarray, directed boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | _pgr_edgedisjointpaths | text, text, directed boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | _pgr_edwardmoore | edges_sql text, combinations_sql text, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | _pgr_edwardmoore | edges_sql text, from_vids anyarray, to_vids anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | _pgr_endpoint | g geometry | geometry - pgrouting | public | _pgr_floydwarshall | edges_sql text, directed boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record - pgrouting | public | _pgr_get_statement | o_sql text | text - pgrouting | public | _pgr_getcolumnname | sname text, tname text, col text, reporterrs integer, fnname text | text - pgrouting | public | _pgr_getcolumnname | tab text, col text, reporterrs integer, fnname text | text - pgrouting | public | _pgr_getcolumntype | sname text, tname text, cname text, reporterrs integer, fnname text | text - pgrouting | public | _pgr_getcolumntype | tab text, col text, reporterrs integer, fnname text | text - pgrouting | public | _pgr_gettablename | tab text, reporterrs integer, fnname text, OUT sname text, OUT tname text | record - pgrouting | public | _pgr_git_hash | | text - pgrouting | public | _pgr_hawickcircuits | text, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | _pgr_iscolumnindexed | sname text, tname text, cname text, reporterrs integer, fnname text | boolean - pgrouting | public | _pgr_iscolumnindexed | tab text, col text, reporterrs integer, fnname text | boolean - pgrouting | public | _pgr_iscolumnintable | tab text, col text | boolean - pgrouting | public | _pgr_isplanar | text | boolean - pgrouting | public | _pgr_johnson | edges_sql text, directed boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record - pgrouting | public | _pgr_kruskal | text, anyarray, fn_suffix text, max_depth bigint, distance double precision, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | _pgr_ksp | edges_sql text, start_vid bigint, end_vid bigint, k integer, directed boolean, heap_paths boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | _pgr_lengauertarjandominatortree | edges_sql text, root_vid bigint, OUT seq integer, OUT vid bigint, OUT idom bigint | SETOF record - pgrouting | public | _pgr_lib_version | | text - pgrouting | public | _pgr_linegraph | text, directed boolean, OUT seq integer, OUT source bigint, OUT target bigint, OUT cost double precision, OUT reverse_cost double precision | SETOF record - pgrouting | public | _pgr_linegraphfull | text, OUT seq integer, OUT source bigint, OUT target bigint, OUT cost double precision, OUT edge bigint | SETOF record - pgrouting | public | _pgr_makeconnected | text, OUT seq bigint, OUT start_vid bigint, OUT end_vid bigint | SETOF record - pgrouting | public | _pgr_maxcardinalitymatch | edges_sql text, directed boolean, OUT seq integer, OUT edge bigint, OUT source bigint, OUT target bigint | SETOF record - pgrouting | public | _pgr_maxflow | edges_sql text, combinations_sql text, algorithm integer, only_flow boolean, OUT seq integer, OUT edge_id bigint, OUT source bigint, OUT target bigint, OUT flow bigint, OUT residual_capacity bigint | SETOF record - pgrouting | public | _pgr_maxflow | edges_sql text, sources anyarray, targets anyarray, algorithm integer, only_flow boolean, OUT seq integer, OUT edge_id bigint, OUT source bigint, OUT target bigint, OUT flow bigint, OUT residual_capacity bigint | SETOF record - pgrouting | public | _pgr_maxflowmincost | edges_sql text, combinations_sql text, only_cost boolean, OUT seq integer, OUT edge bigint, OUT source bigint, OUT target bigint, OUT flow bigint, OUT residual_capacity bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | _pgr_maxflowmincost | edges_sql text, sources anyarray, targets anyarray, only_cost boolean, OUT seq integer, OUT edge bigint, OUT source bigint, OUT target bigint, OUT flow bigint, OUT residual_capacity bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | _pgr_msg | msgkind integer, fnname text, msg text | void - pgrouting | public | _pgr_onerror | errcond boolean, reporterrs integer, fnname text, msgerr text, hinto text, msgok text | void - pgrouting | public | _pgr_operating_system | | text - pgrouting | public | _pgr_parameter_check | fn text, sql text, big boolean | boolean - pgrouting | public | _pgr_pgsql_version | | text - pgrouting | public | _pgr_pickdeliver | text, text, text, factor double precision, max_cycles integer, initial_sol integer, OUT seq integer, OUT vehicle_seq integer, OUT vehicle_id bigint, OUT stop_seq integer, OUT stop_type integer, OUT stop_id bigint, OUT order_id bigint, OUT cargo double precision, OUT travel_time double precision, OUT arrival_time double precision, OUT wait_time double precision, OUT service_time double precision, OUT departure_time double precision | SETOF record - pgrouting | public | _pgr_pickdelivereuclidean | text, text, factor double precision, max_cycles integer, initial_sol integer, OUT seq integer, OUT vehicle_seq integer, OUT vehicle_id bigint, OUT stop_seq integer, OUT stop_type integer, OUT order_id bigint, OUT cargo double precision, OUT travel_time double precision, OUT arrival_time double precision, OUT wait_time double precision, OUT service_time double precision, OUT departure_time double precision | SETOF record - pgrouting | public | _pgr_pointtoid | point geometry, tolerance double precision, vertname text, srid integer | bigint - pgrouting | public | _pgr_prim | text, anyarray, order_by text, max_depth bigint, distance double precision, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | _pgr_quote_ident | idname text | text - pgrouting | public | _pgr_sequentialvertexcoloring | edges_sql text, OUT vertex_id bigint, OUT color_id bigint | SETOF record - pgrouting | public | _pgr_startpoint | g geometry | geometry - pgrouting | public | _pgr_stoerwagner | edges_sql text, OUT seq integer, OUT edge bigint, OUT cost double precision, OUT mincut double precision | SETOF record - pgrouting | public | _pgr_strongcomponents | edges_sql text, OUT seq bigint, OUT component bigint, OUT node bigint | SETOF record - pgrouting | public | _pgr_topologicalsort | edges_sql text, OUT seq integer, OUT sorted_v bigint | SETOF record - pgrouting | public | _pgr_transitiveclosure | edges_sql text, OUT seq integer, OUT vid bigint, OUT target_array bigint[] | SETOF record - pgrouting | public | _pgr_trsp | sql text, source_eid integer, source_pos double precision, target_eid integer, target_pos double precision, directed boolean, has_reverse_cost boolean, turn_restrict_sql text, OUT seq integer, OUT id1 integer, OUT id2 integer, OUT cost double precision | SETOF record - pgrouting | public | _pgr_trsp | text, text, anyarray, anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | _pgr_trsp | text, text, anyarray, bigint, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | _pgr_trsp | text, text, bigint, anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | _pgr_trsp | text, text, bigint, bigint, directed boolean, OUT seq integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | _pgr_trsp_withpoints | text, text, text, anyarray, anyarray, directed boolean, driving_side character, details boolean, OUT seq integer, OUT path_seq integer, OUT departure bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | _pgr_trsp_withpoints | text, text, text, text, directed boolean, driving_side character, details boolean, OUT seq integer, OUT path_seq integer, OUT departure bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | _pgr_trspvia | text, text, anyarray, boolean, boolean, boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision, OUT route_agg_cost double precision | SETOF record - pgrouting | public | _pgr_trspvia_withpoints | text, text, text, anyarray, boolean, boolean, boolean, character, boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision, OUT route_agg_cost double precision | SETOF record - pgrouting | public | _pgr_trspviavertices | sql text, vids integer[], directed boolean, has_rcost boolean, turn_restrict_sql text, OUT seq integer, OUT id1 integer, OUT id2 integer, OUT id3 integer, OUT cost double precision | SETOF record - pgrouting | public | _pgr_tsp | matrix_row_sql text, start_id bigint, end_id bigint, max_processing_time double precision, tries_per_temperature integer, max_changes_per_temperature integer, max_consecutive_non_changes integer, initial_temperature double precision, final_temperature double precision, cooling_factor double precision, randomize boolean, OUT seq integer, OUT node bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | _pgr_tspeuclidean | coordinates_sql text, start_id bigint, end_id bigint, max_processing_time double precision, tries_per_temperature integer, max_changes_per_temperature integer, max_consecutive_non_changes integer, initial_temperature double precision, final_temperature double precision, cooling_factor double precision, randomize boolean, OUT seq integer, OUT node bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | _pgr_turnrestrictedpath | text, text, bigint, bigint, integer, directed boolean, heap_paths boolean, stop_on_first boolean, strict boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | _pgr_versionless | v1 text, v2 text | boolean - pgrouting | public | _pgr_vrponedepot | text, text, text, integer, OUT seq integer, OUT vehicle_seq integer, OUT vehicle_id bigint, OUT stop_seq integer, OUT stop_type integer, OUT stop_id bigint, OUT order_id bigint, OUT cargo double precision, OUT travel_time double precision, OUT arrival_time double precision, OUT wait_time double precision, OUT service_time double precision, OUT departure_time double precision | SETOF record - pgrouting | public | _pgr_withpoints | edges_sql text, points_sql text, combinations_sql text, directed boolean, driving_side character, details boolean, only_cost boolean, OUT seq integer, OUT path_seq integer, OUT start_pid bigint, OUT end_pid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | _pgr_withpoints | edges_sql text, points_sql text, start_pids anyarray, end_pids anyarray, directed boolean, driving_side character, details boolean, only_cost boolean, normal boolean, OUT seq integer, OUT path_seq integer, OUT start_pid bigint, OUT end_pid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | _pgr_withpointsdd | edges_sql text, points_sql text, start_pid anyarray, distance double precision, directed boolean, driving_side character, details boolean, equicost boolean, OUT seq integer, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | _pgr_withpointsksp | edges_sql text, points_sql text, start_pid bigint, end_pid bigint, k integer, directed boolean, heap_paths boolean, driving_side character, details boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | _pgr_withpointsvia | sql text, via_edges bigint[], fraction double precision[], directed boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision, OUT route_agg_cost double precision | SETOF record - pgrouting | public | _pgr_withpointsvia | text, text, anyarray, boolean, boolean, boolean, character, boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision, OUT route_agg_cost double precision | SETOF record - pgrouting | public | _trsp | text, text, anyarray, anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | _v4trsp | text, text, anyarray, anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | _v4trsp | text, text, text, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_alphashape | geometry, alpha double precision | geometry - pgrouting | public | pgr_analyzegraph | text, double precision, the_geom text, id text, source text, target text, rows_where text | character varying - pgrouting | public | pgr_analyzeoneway | text, text[], text[], text[], text[], two_way_if_null boolean, oneway text, source text, target text | text - pgrouting | public | pgr_articulationpoints | text, OUT node bigint | SETOF bigint - pgrouting | public | pgr_astar | text, anyarray, anyarray, directed boolean, heuristic integer, factor double precision, epsilon double precision, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_astar | text, anyarray, bigint, directed boolean, heuristic integer, factor double precision, epsilon double precision, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_astar | text, bigint, anyarray, directed boolean, heuristic integer, factor double precision, epsilon double precision, OUT seq integer, OUT path_seq integer, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_astar | text, bigint, bigint, directed boolean, heuristic integer, factor double precision, epsilon double precision, OUT seq integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_astar | text, text, directed boolean, heuristic integer, factor double precision, epsilon double precision, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_astarcost | text, anyarray, anyarray, directed boolean, heuristic integer, factor double precision, epsilon double precision, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_astarcost | text, anyarray, bigint, directed boolean, heuristic integer, factor double precision, epsilon double precision, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_astarcost | text, bigint, anyarray, directed boolean, heuristic integer, factor double precision, epsilon double precision, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_astarcost | text, bigint, bigint, directed boolean, heuristic integer, factor double precision, epsilon double precision, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_astarcost | text, text, directed boolean, heuristic integer, factor double precision, epsilon double precision, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_astarcostmatrix | text, anyarray, directed boolean, heuristic integer, factor double precision, epsilon double precision, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_bdastar | text, anyarray, anyarray, directed boolean, heuristic integer, factor numeric, epsilon numeric, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_bdastar | text, anyarray, bigint, directed boolean, heuristic integer, factor numeric, epsilon numeric, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_bdastar | text, bigint, anyarray, directed boolean, heuristic integer, factor numeric, epsilon numeric, OUT seq integer, OUT path_seq integer, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_bdastar | text, bigint, bigint, directed boolean, heuristic integer, factor numeric, epsilon numeric, OUT seq integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_bdastar | text, text, directed boolean, heuristic integer, factor numeric, epsilon numeric, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_bdastarcost | text, anyarray, anyarray, directed boolean, heuristic integer, factor numeric, epsilon numeric, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_bdastarcost | text, anyarray, bigint, directed boolean, heuristic integer, factor numeric, epsilon numeric, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_bdastarcost | text, bigint, anyarray, directed boolean, heuristic integer, factor numeric, epsilon numeric, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_bdastarcost | text, bigint, bigint, directed boolean, heuristic integer, factor numeric, epsilon numeric, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_bdastarcost | text, text, directed boolean, heuristic integer, factor numeric, epsilon numeric, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_bdastarcostmatrix | text, anyarray, directed boolean, heuristic integer, factor numeric, epsilon numeric, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_bddijkstra | text, anyarray, anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_bddijkstra | text, anyarray, bigint, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_bddijkstra | text, bigint, anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_bddijkstra | text, bigint, bigint, directed boolean, OUT seq integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_bddijkstra | text, text, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_bddijkstracost | text, anyarray, anyarray, directed boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_bddijkstracost | text, anyarray, bigint, directed boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_bddijkstracost | text, bigint, anyarray, directed boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_bddijkstracost | text, bigint, bigint, directed boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_bddijkstracost | text, text, directed boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_bddijkstracostmatrix | text, anyarray, directed boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_bellmanford | text, anyarray, anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_bellmanford | text, anyarray, bigint, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_bellmanford | text, bigint, anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_bellmanford | text, bigint, bigint, directed boolean, OUT seq integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_bellmanford | text, text, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_biconnectedcomponents | text, OUT seq bigint, OUT component bigint, OUT edge bigint | SETOF record - pgrouting | public | pgr_binarybreadthfirstsearch | text, anyarray, anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_binarybreadthfirstsearch | text, anyarray, bigint, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_binarybreadthfirstsearch | text, bigint, anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_binarybreadthfirstsearch | text, bigint, bigint, directed boolean, OUT seq integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_binarybreadthfirstsearch | text, text, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_bipartite | text, OUT vertex_id bigint, OUT color_id bigint | SETOF record - pgrouting | public | pgr_boykovkolmogorov | text, anyarray, anyarray, OUT seq integer, OUT edge bigint, OUT start_vid bigint, OUT end_vid bigint, OUT flow bigint, OUT residual_capacity bigint | SETOF record - pgrouting | public | pgr_boykovkolmogorov | text, anyarray, bigint, OUT seq integer, OUT edge bigint, OUT start_vid bigint, OUT end_vid bigint, OUT flow bigint, OUT residual_capacity bigint | SETOF record - pgrouting | public | pgr_boykovkolmogorov | text, bigint, anyarray, OUT seq integer, OUT edge bigint, OUT start_vid bigint, OUT end_vid bigint, OUT flow bigint, OUT residual_capacity bigint | SETOF record - pgrouting | public | pgr_boykovkolmogorov | text, bigint, bigint, OUT seq integer, OUT edge bigint, OUT start_vid bigint, OUT end_vid bigint, OUT flow bigint, OUT residual_capacity bigint | SETOF record - pgrouting | public | pgr_boykovkolmogorov | text, text, OUT seq integer, OUT edge bigint, OUT start_vid bigint, OUT end_vid bigint, OUT flow bigint, OUT residual_capacity bigint | SETOF record - pgrouting | public | pgr_breadthfirstsearch | text, anyarray, max_depth bigint, directed boolean, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_breadthfirstsearch | text, bigint, max_depth bigint, directed boolean, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_bridges | text, OUT edge bigint | SETOF bigint - pgrouting | public | pgr_chinesepostman | text, OUT seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_chinesepostmancost | text | double precision - pgrouting | public | pgr_connectedcomponents | text, OUT seq bigint, OUT component bigint, OUT node bigint | SETOF record - pgrouting | public | pgr_contraction | text, bigint[], max_cycles integer, forbidden_vertices bigint[], directed boolean, OUT type text, OUT id bigint, OUT contracted_vertices bigint[], OUT source bigint, OUT target bigint, OUT cost double precision | SETOF record - pgrouting | public | pgr_createtopology | text, double precision, the_geom text, id text, source text, target text, rows_where text, clean boolean | character varying - pgrouting | public | pgr_createverticestable | text, the_geom text, source text, target text, rows_where text | text - pgrouting | public | pgr_cuthillmckeeordering | text, OUT seq bigint, OUT node bigint | SETOF record - pgrouting | public | pgr_dagshortestpath | text, anyarray, anyarray, OUT seq integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_dagshortestpath | text, anyarray, bigint, OUT seq integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_dagshortestpath | text, bigint, anyarray, OUT seq integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_dagshortestpath | text, bigint, bigint, OUT seq integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_dagshortestpath | text, text, OUT seq integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_degree | text, text, dryrun boolean, OUT node bigint, OUT degree bigint | SETOF record - pgrouting | public | pgr_depthfirstsearch | text, anyarray, directed boolean, max_depth bigint, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_depthfirstsearch | text, bigint, directed boolean, max_depth bigint, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_dijkstra | text, anyarray, anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_dijkstra | text, anyarray, bigint, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_dijkstra | text, bigint, anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_dijkstra | text, bigint, bigint, directed boolean, OUT seq integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_dijkstra | text, text, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_dijkstracost | text, anyarray, anyarray, directed boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_dijkstracost | text, anyarray, bigint, directed boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_dijkstracost | text, bigint, anyarray, directed boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_dijkstracost | text, bigint, bigint, directed boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_dijkstracost | text, text, directed boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_dijkstracostmatrix | text, anyarray, directed boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_dijkstranear | text, anyarray, anyarray, directed boolean, cap bigint, global boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_dijkstranear | text, anyarray, bigint, directed boolean, cap bigint, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_dijkstranear | text, bigint, anyarray, directed boolean, cap bigint, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_dijkstranear | text, text, directed boolean, cap bigint, global boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_dijkstranearcost | text, anyarray, anyarray, directed boolean, cap bigint, global boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_dijkstranearcost | text, anyarray, bigint, directed boolean, cap bigint, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_dijkstranearcost | text, bigint, anyarray, directed boolean, cap bigint, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_dijkstranearcost | text, text, directed boolean, cap bigint, global boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_dijkstravia | text, anyarray, directed boolean, strict boolean, u_turn_on_edge boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision, OUT route_agg_cost double precision | SETOF record - pgrouting | public | pgr_drivingdistance | text, anyarray, double precision, directed boolean, equicost boolean, OUT seq integer, OUT from_v bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_drivingdistance | text, bigint, double precision, directed boolean, OUT seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_edgecoloring | text, OUT edge_id bigint, OUT color_id bigint | SETOF record - pgrouting | public | pgr_edgedisjointpaths | text, anyarray, anyarray, directed boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_edgedisjointpaths | text, anyarray, bigint, directed boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_edgedisjointpaths | text, bigint, anyarray, directed boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_edgedisjointpaths | text, bigint, bigint, directed boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_edgedisjointpaths | text, text, directed boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_edmondskarp | text, anyarray, anyarray, OUT seq integer, OUT edge bigint, OUT start_vid bigint, OUT end_vid bigint, OUT flow bigint, OUT residual_capacity bigint | SETOF record - pgrouting | public | pgr_edmondskarp | text, anyarray, bigint, OUT seq integer, OUT edge bigint, OUT start_vid bigint, OUT end_vid bigint, OUT flow bigint, OUT residual_capacity bigint | SETOF record - pgrouting | public | pgr_edmondskarp | text, bigint, anyarray, OUT seq integer, OUT edge bigint, OUT start_vid bigint, OUT end_vid bigint, OUT flow bigint, OUT residual_capacity bigint | SETOF record - pgrouting | public | pgr_edmondskarp | text, bigint, bigint, OUT seq integer, OUT edge bigint, OUT start_vid bigint, OUT end_vid bigint, OUT flow bigint, OUT residual_capacity bigint | SETOF record - pgrouting | public | pgr_edmondskarp | text, text, OUT seq integer, OUT edge bigint, OUT start_vid bigint, OUT end_vid bigint, OUT flow bigint, OUT residual_capacity bigint | SETOF record - pgrouting | public | pgr_edwardmoore | text, anyarray, anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_edwardmoore | text, anyarray, bigint, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_edwardmoore | text, bigint, anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_edwardmoore | text, bigint, bigint, directed boolean, OUT seq integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_edwardmoore | text, text, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_extractvertices | text, dryrun boolean, OUT id bigint, OUT in_edges bigint[], OUT out_edges bigint[], OUT x double precision, OUT y double precision, OUT geom geometry | SETOF record - pgrouting | public | pgr_findcloseedges | text, geometry, double precision, cap integer, partial boolean, dryrun boolean, OUT edge_id bigint, OUT fraction double precision, OUT side character, OUT distance double precision, OUT geom geometry, OUT edge geometry | SETOF record - pgrouting | public | pgr_findcloseedges | text, geometry[], double precision, cap integer, partial boolean, dryrun boolean, OUT edge_id bigint, OUT fraction double precision, OUT side character, OUT distance double precision, OUT geom geometry, OUT edge geometry | SETOF record - pgrouting | public | pgr_floydwarshall | text, directed boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_full_version | OUT version text, OUT build_type text, OUT compile_date text, OUT library text, OUT system text, OUT postgresql text, OUT compiler text, OUT boost text, OUT hash text | record - pgrouting | public | pgr_hawickcircuits | text, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_isplanar | text | boolean - pgrouting | public | pgr_johnson | text, directed boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_kruskal | text, OUT edge bigint, OUT cost double precision | SETOF record - pgrouting | public | pgr_kruskalbfs | text, anyarray, max_depth bigint, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_kruskalbfs | text, bigint, max_depth bigint, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_kruskaldd | text, anyarray, double precision, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_kruskaldd | text, anyarray, numeric, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_kruskaldd | text, bigint, double precision, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_kruskaldd | text, bigint, numeric, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_kruskaldfs | text, anyarray, max_depth bigint, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_kruskaldfs | text, bigint, max_depth bigint, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_ksp | text, bigint, bigint, integer, directed boolean, heap_paths boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_lengauertarjandominatortree | text, bigint, OUT seq integer, OUT vertex_id bigint, OUT idom bigint | SETOF record - pgrouting | public | pgr_linegraph | text, directed boolean, OUT seq integer, OUT source bigint, OUT target bigint, OUT cost double precision, OUT reverse_cost double precision | SETOF record - pgrouting | public | pgr_linegraphfull | text, OUT seq integer, OUT source bigint, OUT target bigint, OUT cost double precision, OUT edge bigint | SETOF record - pgrouting | public | pgr_makeconnected | text, OUT seq bigint, OUT start_vid bigint, OUT end_vid bigint | SETOF record - pgrouting | public | pgr_maxcardinalitymatch | text, OUT edge bigint | SETOF bigint - pgrouting | public | pgr_maxcardinalitymatch | text, directed boolean, OUT seq integer, OUT edge bigint, OUT source bigint, OUT target bigint | SETOF record - pgrouting | public | pgr_maxflow | text, anyarray, anyarray | bigint - pgrouting | public | pgr_maxflow | text, anyarray, bigint | bigint - pgrouting | public | pgr_maxflow | text, bigint, anyarray | bigint - pgrouting | public | pgr_maxflow | text, bigint, bigint | bigint - pgrouting | public | pgr_maxflow | text, text | bigint - pgrouting | public | pgr_maxflowmincost | text, anyarray, anyarray, OUT seq integer, OUT edge bigint, OUT source bigint, OUT target bigint, OUT flow bigint, OUT residual_capacity bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_maxflowmincost | text, anyarray, bigint, OUT seq integer, OUT edge bigint, OUT source bigint, OUT target bigint, OUT flow bigint, OUT residual_capacity bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_maxflowmincost | text, bigint, anyarray, OUT seq integer, OUT edge bigint, OUT source bigint, OUT target bigint, OUT flow bigint, OUT residual_capacity bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_maxflowmincost | text, bigint, bigint, OUT seq integer, OUT edge bigint, OUT source bigint, OUT target bigint, OUT flow bigint, OUT residual_capacity bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_maxflowmincost | text, text, OUT seq integer, OUT edge bigint, OUT source bigint, OUT target bigint, OUT flow bigint, OUT residual_capacity bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_maxflowmincost_cost | text, anyarray, anyarray | double precision - pgrouting | public | pgr_maxflowmincost_cost | text, anyarray, bigint | double precision - pgrouting | public | pgr_maxflowmincost_cost | text, bigint, anyarray | double precision - pgrouting | public | pgr_maxflowmincost_cost | text, bigint, bigint | double precision - pgrouting | public | pgr_maxflowmincost_cost | text, text | double precision - pgrouting | public | pgr_nodenetwork | text, double precision, id text, the_geom text, table_ending text, rows_where text, outall boolean | text - pgrouting | public | pgr_pickdeliver | text, text, text, factor double precision, max_cycles integer, initial_sol integer, OUT seq integer, OUT vehicle_seq integer, OUT vehicle_id bigint, OUT stop_seq integer, OUT stop_type integer, OUT stop_id bigint, OUT order_id bigint, OUT cargo double precision, OUT travel_time double precision, OUT arrival_time double precision, OUT wait_time double precision, OUT service_time double precision, OUT departure_time double precision | SETOF record - pgrouting | public | pgr_pickdelivereuclidean | text, text, factor double precision, max_cycles integer, initial_sol integer, OUT seq integer, OUT vehicle_seq integer, OUT vehicle_id bigint, OUT stop_seq integer, OUT stop_type integer, OUT order_id bigint, OUT cargo double precision, OUT travel_time double precision, OUT arrival_time double precision, OUT wait_time double precision, OUT service_time double precision, OUT departure_time double precision | SETOF record - pgrouting | public | pgr_prim | text, OUT edge bigint, OUT cost double precision | SETOF record - pgrouting | public | pgr_primbfs | text, anyarray, max_depth bigint, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_primbfs | text, bigint, max_depth bigint, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_primdd | text, anyarray, double precision, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_primdd | text, anyarray, numeric, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_primdd | text, bigint, double precision, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_primdd | text, bigint, numeric, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_primdfs | text, anyarray, max_depth bigint, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_primdfs | text, bigint, max_depth bigint, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_pushrelabel | text, anyarray, anyarray, OUT seq integer, OUT edge bigint, OUT start_vid bigint, OUT end_vid bigint, OUT flow bigint, OUT residual_capacity bigint | SETOF record - pgrouting | public | pgr_pushrelabel | text, anyarray, bigint, OUT seq integer, OUT edge bigint, OUT start_vid bigint, OUT end_vid bigint, OUT flow bigint, OUT residual_capacity bigint | SETOF record - pgrouting | public | pgr_pushrelabel | text, bigint, anyarray, OUT seq integer, OUT edge bigint, OUT start_vid bigint, OUT end_vid bigint, OUT flow bigint, OUT residual_capacity bigint | SETOF record - pgrouting | public | pgr_pushrelabel | text, bigint, bigint, OUT seq integer, OUT edge bigint, OUT start_vid bigint, OUT end_vid bigint, OUT flow bigint, OUT residual_capacity bigint | SETOF record - pgrouting | public | pgr_pushrelabel | text, text, OUT seq integer, OUT edge bigint, OUT start_vid bigint, OUT end_vid bigint, OUT flow bigint, OUT residual_capacity bigint | SETOF record - pgrouting | public | pgr_sequentialvertexcoloring | text, OUT vertex_id bigint, OUT color_id bigint | SETOF record - pgrouting | public | pgr_stoerwagner | text, OUT seq integer, OUT edge bigint, OUT cost double precision, OUT mincut double precision | SETOF record - pgrouting | public | pgr_strongcomponents | text, OUT seq bigint, OUT component bigint, OUT node bigint | SETOF record - pgrouting | public | pgr_topologicalsort | text, OUT seq integer, OUT sorted_v bigint | SETOF record - pgrouting | public | pgr_transitiveclosure | text, OUT seq integer, OUT vid bigint, OUT target_array bigint[] | SETOF record - pgrouting | public | pgr_trsp | text, integer, double precision, integer, double precision, boolean, boolean, turn_restrict_sql text, OUT seq integer, OUT id1 integer, OUT id2 integer, OUT cost double precision | SETOF record - pgrouting | public | pgr_trsp | text, integer, integer, boolean, boolean, restrictions_sql text, OUT seq integer, OUT id1 integer, OUT id2 integer, OUT cost double precision | SETOF record - pgrouting | public | pgr_trsp | text, text, anyarray, anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_trsp | text, text, anyarray, bigint, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_trsp | text, text, bigint, anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_trsp | text, text, bigint, bigint, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_trsp | text, text, text, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_trsp_withpoints | text, text, text, anyarray, anyarray, directed boolean, driving_side character, details boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_trsp_withpoints | text, text, text, anyarray, bigint, directed boolean, driving_side character, details boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_trsp_withpoints | text, text, text, bigint, anyarray, directed boolean, driving_side character, details boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_trsp_withpoints | text, text, text, bigint, bigint, directed boolean, driving_side character, details boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_trsp_withpoints | text, text, text, text, directed boolean, driving_side character, details boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_trspvia | text, text, anyarray, directed boolean, strict boolean, u_turn_on_edge boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision, OUT route_agg_cost double precision | SETOF record - pgrouting | public | pgr_trspvia_withpoints | text, text, text, anyarray, directed boolean, strict boolean, u_turn_on_edge boolean, driving_side character, details boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision, OUT route_agg_cost double precision | SETOF record - pgrouting | public | pgr_trspviaedges | text, integer[], double precision[], boolean, boolean, turn_restrict_sql text, OUT seq integer, OUT id1 integer, OUT id2 integer, OUT id3 integer, OUT cost double precision | SETOF record - pgrouting | public | pgr_trspviavertices | text, anyarray, boolean, boolean, restrictions_sql text, OUT seq integer, OUT id1 integer, OUT id2 integer, OUT id3 integer, OUT cost double precision | SETOF record - pgrouting | public | pgr_tsp | text, start_id bigint, end_id bigint, max_processing_time double precision, tries_per_temperature integer, max_changes_per_temperature integer, max_consecutive_non_changes integer, initial_temperature double precision, final_temperature double precision, cooling_factor double precision, randomize boolean, OUT seq integer, OUT node bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_tspeuclidean | text, start_id bigint, end_id bigint, max_processing_time double precision, tries_per_temperature integer, max_changes_per_temperature integer, max_consecutive_non_changes integer, initial_temperature double precision, final_temperature double precision, cooling_factor double precision, randomize boolean, OUT seq integer, OUT node bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_turnrestrictedpath | text, text, bigint, bigint, integer, directed boolean, heap_paths boolean, stop_on_first boolean, strict boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_version | | text - pgrouting | public | pgr_vrponedepot | text, text, text, integer, OUT oid integer, OUT opos integer, OUT vid integer, OUT tarrival integer, OUT tdepart integer | SETOF record - pgrouting | public | pgr_withpoints | text, text, anyarray, anyarray, directed boolean, driving_side character, details boolean, OUT seq integer, OUT path_seq integer, OUT start_pid bigint, OUT end_pid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_withpoints | text, text, anyarray, bigint, directed boolean, driving_side character, details boolean, OUT seq integer, OUT path_seq integer, OUT start_pid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_withpoints | text, text, bigint, anyarray, directed boolean, driving_side character, details boolean, OUT seq integer, OUT path_seq integer, OUT end_pid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_withpoints | text, text, bigint, bigint, directed boolean, driving_side character, details boolean, OUT seq integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_withpoints | text, text, text, directed boolean, driving_side character, details boolean, OUT seq integer, OUT path_seq integer, OUT start_pid bigint, OUT end_pid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_withpointscost | text, text, anyarray, anyarray, directed boolean, driving_side character, OUT start_pid bigint, OUT end_pid bigint, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_withpointscost | text, text, anyarray, bigint, directed boolean, driving_side character, OUT start_pid bigint, OUT end_pid bigint, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_withpointscost | text, text, bigint, anyarray, directed boolean, driving_side character, OUT start_pid bigint, OUT end_pid bigint, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_withpointscost | text, text, bigint, bigint, directed boolean, driving_side character, OUT start_pid bigint, OUT end_pid bigint, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_withpointscost | text, text, text, directed boolean, driving_side character, OUT start_pid bigint, OUT end_pid bigint, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_withpointscostmatrix | text, text, anyarray, directed boolean, driving_side character, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_withpointsdd | text, text, anyarray, double precision, directed boolean, driving_side character, details boolean, equicost boolean, OUT seq integer, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_withpointsdd | text, text, bigint, double precision, directed boolean, driving_side character, details boolean, OUT seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_withpointsksp | text, text, bigint, bigint, integer, directed boolean, heap_paths boolean, driving_side character, details boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record - pgrouting | public | pgr_withpointsvia | text, text, anyarray, directed boolean, strict boolean, u_turn_on_edge boolean, driving_side character, details boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision, OUT route_agg_cost double precision | SETOF record - pgrowlocks | public | pgrowlocks | relname text, OUT locked_row tid, OUT locker xid, OUT multi boolean, OUT xids xid[], OUT modes text[], OUT pids integer[] | SETOF record - pgsodium | pgsodium | create_key | key_type pgsodium.key_type, name text, raw_key bytea, raw_key_nonce bytea, parent_key uuid, key_context bytea, expires timestamp with time zone, associated_data text | pgsodium.valid_key - pgsodium | pgsodium | create_mask_view | relid oid, debug boolean | void - pgsodium | pgsodium | create_mask_view | relid oid, subid integer, debug boolean | void - pgsodium | pgsodium | crypto_aead_det_decrypt | ciphertext bytea, additional bytea, key bytea, nonce bytea | bytea - pgsodium | pgsodium | crypto_aead_det_decrypt | message bytea, additional bytea, key_id bigint, context bytea, nonce bytea | bytea - pgsodium | pgsodium | crypto_aead_det_decrypt | message bytea, additional bytea, key_uuid uuid | bytea - pgsodium | pgsodium | crypto_aead_det_decrypt | message bytea, additional bytea, key_uuid uuid, nonce bytea | bytea - pgsodium | pgsodium | crypto_aead_det_encrypt | message bytea, additional bytea, key bytea, nonce bytea | bytea - pgsodium | pgsodium | crypto_aead_det_encrypt | message bytea, additional bytea, key_id bigint, context bytea, nonce bytea | bytea - pgsodium | pgsodium | crypto_aead_det_encrypt | message bytea, additional bytea, key_uuid uuid | bytea - pgsodium | pgsodium | crypto_aead_det_encrypt | message bytea, additional bytea, key_uuid uuid, nonce bytea | bytea - pgsodium | pgsodium | crypto_aead_det_keygen | | bytea - pgsodium | pgsodium | crypto_aead_det_noncegen | | bytea - pgsodium | pgsodium | crypto_aead_ietf_decrypt | message bytea, additional bytea, nonce bytea, key bytea | bytea - pgsodium | pgsodium | crypto_aead_ietf_decrypt | message bytea, additional bytea, nonce bytea, key_id bigint, context bytea | bytea - pgsodium | pgsodium | crypto_aead_ietf_decrypt | message bytea, additional bytea, nonce bytea, key_uuid uuid | bytea - pgsodium | pgsodium | crypto_aead_ietf_encrypt | message bytea, additional bytea, nonce bytea, key bytea | bytea - pgsodium | pgsodium | crypto_aead_ietf_encrypt | message bytea, additional bytea, nonce bytea, key_id bigint, context bytea | bytea - pgsodium | pgsodium | crypto_aead_ietf_encrypt | message bytea, additional bytea, nonce bytea, key_uuid uuid | bytea - pgsodium | pgsodium | crypto_aead_ietf_keygen | | bytea - pgsodium | pgsodium | crypto_aead_ietf_noncegen | | bytea - pgsodium | pgsodium | crypto_auth | message bytea, key bytea | bytea - pgsodium | pgsodium | crypto_auth | message bytea, key_id bigint, context bytea | bytea - pgsodium | pgsodium | crypto_auth | message bytea, key_uuid uuid | bytea - pgsodium | pgsodium | crypto_auth_hmacsha256 | message bytea, key_id bigint, context bytea | bytea - pgsodium | pgsodium | crypto_auth_hmacsha256 | message bytea, key_uuid uuid | bytea - pgsodium | pgsodium | crypto_auth_hmacsha256 | message bytea, secret bytea | bytea - pgsodium | pgsodium | crypto_auth_hmacsha256_keygen | | bytea - pgsodium | pgsodium | crypto_auth_hmacsha256_verify | hash bytea, message bytea, key_id bigint, context bytea | boolean - pgsodium | pgsodium | crypto_auth_hmacsha256_verify | hash bytea, message bytea, secret bytea | boolean - pgsodium | pgsodium | crypto_auth_hmacsha256_verify | signature bytea, message bytea, key_uuid uuid | boolean - pgsodium | pgsodium | crypto_auth_hmacsha512 | message bytea, key_id bigint, context bytea | bytea - pgsodium | pgsodium | crypto_auth_hmacsha512 | message bytea, key_uuid uuid | bytea - pgsodium | pgsodium | crypto_auth_hmacsha512 | message bytea, secret bytea | bytea - pgsodium | pgsodium | crypto_auth_hmacsha512_keygen | | bytea - pgsodium | pgsodium | crypto_auth_hmacsha512_verify | hash bytea, message bytea, key_id bigint, context bytea | boolean - pgsodium | pgsodium | crypto_auth_hmacsha512_verify | hash bytea, message bytea, secret bytea | boolean - pgsodium | pgsodium | crypto_auth_hmacsha512_verify | signature bytea, message bytea, key_uuid uuid | boolean - pgsodium | pgsodium | crypto_auth_keygen | | bytea - pgsodium | pgsodium | crypto_auth_verify | mac bytea, message bytea, key bytea | boolean - pgsodium | pgsodium | crypto_auth_verify | mac bytea, message bytea, key_id bigint, context bytea | boolean - pgsodium | pgsodium | crypto_auth_verify | mac bytea, message bytea, key_uuid uuid | boolean - pgsodium | pgsodium | crypto_box | message bytea, nonce bytea, public bytea, secret bytea | bytea - pgsodium | pgsodium | crypto_box_new_keypair | | pgsodium.crypto_box_keypair - pgsodium | pgsodium | crypto_box_new_seed | | bytea - pgsodium | pgsodium | crypto_box_noncegen | | bytea - pgsodium | pgsodium | crypto_box_open | ciphertext bytea, nonce bytea, public bytea, secret bytea | bytea - pgsodium | pgsodium | crypto_box_seal | message bytea, public_key bytea | bytea - pgsodium | pgsodium | crypto_box_seal_open | ciphertext bytea, public_key bytea, secret_key bytea | bytea - pgsodium | pgsodium | crypto_box_seed_new_keypair | seed bytea | pgsodium.crypto_box_keypair - pgsodium | pgsodium | crypto_cmp | text, text | boolean - pgsodium | pgsodium | crypto_generichash | message bytea, key bigint, context bytea | bytea - pgsodium | pgsodium | crypto_generichash | message bytea, key bytea | bytea - pgsodium | pgsodium | crypto_generichash | message bytea, key_uuid uuid | bytea - pgsodium | pgsodium | crypto_generichash_keygen | | bytea - pgsodium | pgsodium | crypto_hash_sha256 | message bytea | bytea - pgsodium | pgsodium | crypto_hash_sha512 | message bytea | bytea - pgsodium | pgsodium | crypto_kdf_derive_from_key | subkey_size bigint, subkey_id bigint, context bytea, primary_key bytea | bytea - pgsodium | pgsodium | crypto_kdf_derive_from_key | subkey_size integer, subkey_id bigint, context bytea, primary_key uuid | bytea - pgsodium | pgsodium | crypto_kdf_keygen | | bytea - pgsodium | pgsodium | crypto_kx_client_session_keys | client_pk bytea, client_sk bytea, server_pk bytea | pgsodium.crypto_kx_session - pgsodium | pgsodium | crypto_kx_new_keypair | | pgsodium.crypto_kx_keypair - pgsodium | pgsodium | crypto_kx_new_seed | | bytea - pgsodium | pgsodium | crypto_kx_seed_new_keypair | seed bytea | pgsodium.crypto_kx_keypair - pgsodium | pgsodium | crypto_kx_server_session_keys | server_pk bytea, server_sk bytea, client_pk bytea | pgsodium.crypto_kx_session - pgsodium | pgsodium | crypto_pwhash | password bytea, salt bytea | bytea - pgsodium | pgsodium | crypto_pwhash_saltgen | | bytea - pgsodium | pgsodium | crypto_pwhash_str | password bytea | bytea - pgsodium | pgsodium | crypto_pwhash_str_verify | hashed_password bytea, password bytea | boolean - pgsodium | pgsodium | crypto_secretbox | message bytea, nonce bytea, key bytea | bytea - pgsodium | pgsodium | crypto_secretbox | message bytea, nonce bytea, key_id bigint, context bytea | bytea - pgsodium | pgsodium | crypto_secretbox | message bytea, nonce bytea, key_uuid uuid | bytea - pgsodium | pgsodium | crypto_secretbox_keygen | | bytea - pgsodium | pgsodium | crypto_secretbox_noncegen | | bytea - pgsodium | pgsodium | crypto_secretbox_open | ciphertext bytea, nonce bytea, key bytea | bytea - pgsodium | pgsodium | crypto_secretbox_open | message bytea, nonce bytea, key_id bigint, context bytea | bytea - pgsodium | pgsodium | crypto_secretbox_open | message bytea, nonce bytea, key_uuid uuid | bytea - pgsodium | pgsodium | crypto_secretstream_keygen | | bytea - pgsodium | pgsodium | crypto_shorthash | message bytea, key bigint, context bytea | bytea - pgsodium | pgsodium | crypto_shorthash | message bytea, key bytea | bytea - pgsodium | pgsodium | crypto_shorthash | message bytea, key_uuid uuid | bytea - pgsodium | pgsodium | crypto_shorthash_keygen | | bytea - pgsodium | pgsodium | crypto_sign | message bytea, key bytea | bytea - pgsodium | pgsodium | crypto_sign_detached | message bytea, key bytea | bytea - pgsodium | pgsodium | crypto_sign_final_create | state bytea, key bytea | bytea - pgsodium | pgsodium | crypto_sign_final_verify | state bytea, signature bytea, key bytea | boolean - pgsodium | pgsodium | crypto_sign_init | | bytea - pgsodium | pgsodium | crypto_sign_new_keypair | | pgsodium.crypto_sign_keypair - pgsodium | pgsodium | crypto_sign_new_seed | | bytea - pgsodium | pgsodium | crypto_sign_open | signed_message bytea, key bytea | bytea - pgsodium | pgsodium | crypto_sign_seed_new_keypair | seed bytea | pgsodium.crypto_sign_keypair - pgsodium | pgsodium | crypto_sign_update | state bytea, message bytea | bytea - pgsodium | pgsodium | crypto_sign_update_agg | message bytea | bytea - pgsodium | pgsodium | crypto_sign_update_agg | state bytea, message bytea | bytea - pgsodium | pgsodium | crypto_sign_update_agg1 | state bytea, message bytea | bytea - pgsodium | pgsodium | crypto_sign_update_agg2 | cur_state bytea, initial_state bytea, message bytea | bytea - pgsodium | pgsodium | crypto_sign_verify_detached | sig bytea, message bytea, key bytea | boolean - pgsodium | pgsodium | crypto_signcrypt_new_keypair | | pgsodium.crypto_signcrypt_keypair - pgsodium | pgsodium | crypto_signcrypt_sign_after | state bytea, sender_sk bytea, ciphertext bytea | bytea - pgsodium | pgsodium | crypto_signcrypt_sign_before | sender bytea, recipient bytea, sender_sk bytea, recipient_pk bytea, additional bytea | pgsodium.crypto_signcrypt_state_key - pgsodium | pgsodium | crypto_signcrypt_verify_after | state bytea, signature bytea, sender_pk bytea, ciphertext bytea | boolean - pgsodium | pgsodium | crypto_signcrypt_verify_before | signature bytea, sender bytea, recipient bytea, additional bytea, sender_pk bytea, recipient_sk bytea | pgsodium.crypto_signcrypt_state_key - pgsodium | pgsodium | crypto_signcrypt_verify_public | signature bytea, sender bytea, recipient bytea, additional bytea, sender_pk bytea, ciphertext bytea | boolean - pgsodium | pgsodium | crypto_stream_xchacha20 | bigint, bytea, bigint, context bytea | bytea - pgsodium | pgsodium | crypto_stream_xchacha20 | bigint, bytea, bytea | bytea - pgsodium | pgsodium | crypto_stream_xchacha20_keygen | | bytea - pgsodium | pgsodium | crypto_stream_xchacha20_noncegen | | bytea - pgsodium | pgsodium | crypto_stream_xchacha20_xor | bytea, bytea, bigint, context bytea | bytea - pgsodium | pgsodium | crypto_stream_xchacha20_xor | bytea, bytea, bytea | bytea - pgsodium | pgsodium | crypto_stream_xchacha20_xor_ic | bytea, bytea, bigint, bigint, context bytea | bytea - pgsodium | pgsodium | crypto_stream_xchacha20_xor_ic | bytea, bytea, bigint, bytea | bytea - pgsodium | pgsodium | decrypted_columns | relid oid | text - pgsodium | pgsodium | derive_key | key_id bigint, key_len integer, context bytea | bytea - pgsodium | pgsodium | disable_security_label_trigger | | void - pgsodium | pgsodium | enable_security_label_trigger | | void - pgsodium | pgsodium | encrypted_column | relid oid, m record | text - pgsodium | pgsodium | encrypted_columns | relid oid | text - pgsodium | pgsodium | get_key_by_id | uuid | pgsodium.valid_key - pgsodium | pgsodium | get_key_by_name | text | pgsodium.valid_key - pgsodium | pgsodium | get_named_keys | filter text | SETOF pgsodium.valid_key - pgsodium | pgsodium | has_mask | role regrole, source_name text | boolean - pgsodium | pgsodium | key_encrypt_secret_raw_key | | trigger - pgsodium | pgsodium | mask_columns | source_relid oid | TABLE(attname name, key_id text, key_id_column text, associated_column text, nonce_column text, format_type text) - pgsodium | pgsodium | mask_role | masked_role regrole, source_name text, view_name text | void - pgsodium | pgsodium | pgsodium_derive | key_id bigint, key_len integer, context bytea | bytea - pgsodium | pgsodium | quote_assoc | text, boolean | text - pgsodium | pgsodium | randombytes_buf | size integer | bytea - pgsodium | pgsodium | randombytes_buf_deterministic | size integer, seed bytea | bytea - pgsodium | pgsodium | randombytes_new_seed | | bytea - pgsodium | pgsodium | randombytes_random | | integer - pgsodium | pgsodium | randombytes_uniform | upper_bound integer | integer - pgsodium | pgsodium | sodium_base642bin | base64 text | bytea - pgsodium | pgsodium | sodium_bin2base64 | bin bytea | text - pgsodium | pgsodium | trg_mask_update | | event_trigger - pgsodium | pgsodium | update_mask | target oid, debug boolean | void - pgsodium | pgsodium | update_masks | debug boolean | void - pgsodium | pgsodium | version | | text - pgstattuple | public | pg_relpages | relname regclass | bigint - pgstattuple | public | pg_relpages | relname text | bigint - pgstattuple | public | pgstatginindex | relname regclass, OUT version integer, OUT pending_pages integer, OUT pending_tuples bigint | record - pgstattuple | public | pgstathashindex | relname regclass, OUT version integer, OUT bucket_pages bigint, OUT overflow_pages bigint, OUT bitmap_pages bigint, OUT unused_pages bigint, OUT live_items bigint, OUT dead_items bigint, OUT free_percent double precision | record - pgstattuple | public | pgstatindex | relname regclass, OUT version integer, OUT tree_level integer, OUT index_size bigint, OUT root_block_no bigint, OUT internal_pages bigint, OUT leaf_pages bigint, OUT empty_pages bigint, OUT deleted_pages bigint, OUT avg_leaf_density double precision, OUT leaf_fragmentation double precision | record - pgstattuple | public | pgstatindex | relname text, OUT version integer, OUT tree_level integer, OUT index_size bigint, OUT root_block_no bigint, OUT internal_pages bigint, OUT leaf_pages bigint, OUT empty_pages bigint, OUT deleted_pages bigint, OUT avg_leaf_density double precision, OUT leaf_fragmentation double precision | record - pgstattuple | public | pgstattuple | relname text, OUT table_len bigint, OUT tuple_count bigint, OUT tuple_len bigint, OUT tuple_percent double precision, OUT dead_tuple_count bigint, OUT dead_tuple_len bigint, OUT dead_tuple_percent double precision, OUT free_space bigint, OUT free_percent double precision | record - pgstattuple | public | pgstattuple | reloid regclass, OUT table_len bigint, OUT tuple_count bigint, OUT tuple_len bigint, OUT tuple_percent double precision, OUT dead_tuple_count bigint, OUT dead_tuple_len bigint, OUT dead_tuple_percent double precision, OUT free_space bigint, OUT free_percent double precision | record - pgstattuple | public | pgstattuple_approx | reloid regclass, OUT table_len bigint, OUT scanned_percent double precision, OUT approx_tuple_count bigint, OUT approx_tuple_len bigint, OUT approx_tuple_percent double precision, OUT dead_tuple_count bigint, OUT dead_tuple_len bigint, OUT dead_tuple_percent double precision, OUT approx_free_space bigint, OUT approx_free_percent double precision | record - pgtap | public | _add | text, integer | integer - pgtap | public | _add | text, integer, text | integer - pgtap | public | _alike | boolean, anyelement, text, text | text - pgtap | public | _ancestor_of | name, name, integer | boolean - pgtap | public | _ancestor_of | name, name, name, name, integer | boolean - pgtap | public | _are | text, name[], name[], text | text - pgtap | public | _areni | text, text[], text[], text | text - pgtap | public | _array_to_sorted_string | name[], text | text - pgtap | public | _assets_are | text, text[], text[], text | text - pgtap | public | _cast_exists | name, name | boolean - pgtap | public | _cast_exists | name, name, name | boolean - pgtap | public | _cast_exists | name, name, name, name | boolean - pgtap | public | _cdi | name, name, anyelement | text - pgtap | public | _cdi | name, name, anyelement, text | text - pgtap | public | _cdi | name, name, name, anyelement, text | text - pgtap | public | _cexists | name, name | boolean - pgtap | public | _cexists | name, name, name | boolean - pgtap | public | _ckeys | name, character | name[] - pgtap | public | _ckeys | name, name, character | name[] - pgtap | public | _cleanup | | boolean - pgtap | public | _cmp_types | oid, name | boolean - pgtap | public | _col_is_null | name, name, name, text, boolean | text - pgtap | public | _col_is_null | name, name, text, boolean | text - pgtap | public | _constraint | name, character, name[], text, text | text - pgtap | public | _constraint | name, name, character, name[], text, text | text - pgtap | public | _contract_on | text | "char" - pgtap | public | _currtest | | integer - pgtap | public | _db_privs | | name[] - pgtap | public | _def_is | text, text, anyelement, text | text - pgtap | public | _definer | name | boolean - pgtap | public | _definer | name, name | boolean - pgtap | public | _definer | name, name, name[] | boolean - pgtap | public | _definer | name, name[] | boolean - pgtap | public | _dexists | name | boolean - pgtap | public | _dexists | name, name | boolean - pgtap | public | _do_ne | text, text, text, text | text - pgtap | public | _docomp | text, text, text, text | text - pgtap | public | _error_diag | text, text, text, text, text, text, text, text, text, text | text - pgtap | public | _expand_context | character | text - pgtap | public | _expand_on | character | text - pgtap | public | _expand_vol | character | text - pgtap | public | _ext_exists | name | boolean - pgtap | public | _ext_exists | name, name | boolean - pgtap | public | _extensions | | SETOF name - pgtap | public | _extensions | name | SETOF name - pgtap | public | _extras | character, name, name[] | name[] - pgtap | public | _extras | character, name[] | name[] - pgtap | public | _extras | character[], name, name[] | name[] - pgtap | public | _extras | character[], name[] | name[] - pgtap | public | _finish | integer, integer, integer, boolean | SETOF text - pgtap | public | _fkexists | name, name, name[] | boolean - pgtap | public | _fkexists | name, name[] | boolean - pgtap | public | _fprivs_are | text, name, name[], text | text - pgtap | public | _func_compare | name, name, anyelement, anyelement, text | text - pgtap | public | _func_compare | name, name, boolean, text | text - pgtap | public | _func_compare | name, name, name[], anyelement, anyelement, text | text - pgtap | public | _func_compare | name, name, name[], boolean, text | text - pgtap | public | _funkargs | name[] | text - pgtap | public | _get | text | integer - pgtap | public | _get_ac_privs | name, text | text[] - pgtap | public | _get_col_ns_type | name, name, name | text - pgtap | public | _get_col_privs | name, text, name | text[] - pgtap | public | _get_col_type | name, name | text - pgtap | public | _get_col_type | name, name, name | text - pgtap | public | _get_context | name, name | "char" - pgtap | public | _get_db_owner | name | name - pgtap | public | _get_db_privs | name, text | text[] - pgtap | public | _get_dtype | name | text - pgtap | public | _get_dtype | name, text, boolean | text - pgtap | public | _get_fdw_privs | name, text | text[] - pgtap | public | _get_func_owner | name, name, name[] | name - pgtap | public | _get_func_owner | name, name[] | name - pgtap | public | _get_func_privs | text, text | text[] - pgtap | public | _get_index_owner | name, name | name - pgtap | public | _get_index_owner | name, name, name | name - pgtap | public | _get_lang_privs | name, text | text[] - pgtap | public | _get_language_owner | name | name - pgtap | public | _get_latest | text | integer[] - pgtap | public | _get_latest | text, integer | integer - pgtap | public | _get_note | integer | text - pgtap | public | _get_note | text | text - pgtap | public | _get_opclass_owner | name | name - pgtap | public | _get_opclass_owner | name, name | name - pgtap | public | _get_rel_owner | character, name | name - pgtap | public | _get_rel_owner | character, name, name | name - pgtap | public | _get_rel_owner | character[], name | name - pgtap | public | _get_rel_owner | character[], name, name | name - pgtap | public | _get_rel_owner | name | name - pgtap | public | _get_rel_owner | name, name | name - pgtap | public | _get_schema_owner | name | name - pgtap | public | _get_schema_privs | name, text | text[] - pgtap | public | _get_sequence_privs | name, text | text[] - pgtap | public | _get_server_privs | name, text | text[] - pgtap | public | _get_table_privs | name, text | text[] - pgtap | public | _get_tablespace_owner | name | name - pgtap | public | _get_tablespaceprivs | name, text | text[] - pgtap | public | _get_type_owner | name | name - pgtap | public | _get_type_owner | name, name | name - pgtap | public | _got_func | name | boolean - pgtap | public | _got_func | name, name | boolean - pgtap | public | _got_func | name, name, name[] | boolean - pgtap | public | _got_func | name, name[] | boolean - pgtap | public | _grolist | name | oid[] - pgtap | public | _has_def | name, name | boolean - pgtap | public | _has_def | name, name, name | boolean - pgtap | public | _has_group | name | boolean - pgtap | public | _has_role | name | boolean - pgtap | public | _has_type | name, character[] | boolean - pgtap | public | _has_type | name, name, character[] | boolean - pgtap | public | _has_user | name | boolean - pgtap | public | _hasc | name, character | boolean - pgtap | public | _hasc | name, name, character | boolean - pgtap | public | _have_index | name, name | boolean - pgtap | public | _have_index | name, name, name | boolean - pgtap | public | _ident_array_to_sorted_string | name[], text | text - pgtap | public | _ident_array_to_string | name[], text | text - pgtap | public | _ikeys | name, name | text[] - pgtap | public | _ikeys | name, name, name | text[] - pgtap | public | _inherited | name | boolean - pgtap | public | _inherited | name, name | boolean - pgtap | public | _is_indexed | name, name, text[] | boolean - pgtap | public | _is_instead | name, name | boolean - pgtap | public | _is_instead | name, name, name | boolean - pgtap | public | _is_schema | name | boolean - pgtap | public | _is_super | name | boolean - pgtap | public | _is_trusted | name | boolean - pgtap | public | _is_verbose | | boolean - pgtap | public | _keys | name, character | SETOF name[] - pgtap | public | _keys | name, name, character | SETOF name[] - pgtap | public | _lang | name | name - pgtap | public | _lang | name, name | name - pgtap | public | _lang | name, name, name[] | name - pgtap | public | _lang | name, name[] | name - pgtap | public | _missing | character, name, name[] | name[] - pgtap | public | _missing | character, name[] | name[] - pgtap | public | _missing | character[], name, name[] | name[] - pgtap | public | _missing | character[], name[] | name[] - pgtap | public | _nosuch | name, name, name[] | text - pgtap | public | _op_exists | name, name, name | boolean - pgtap | public | _op_exists | name, name, name, name | boolean - pgtap | public | _op_exists | name, name, name, name, name | boolean - pgtap | public | _opc_exists | name | boolean - pgtap | public | _opc_exists | name, name | boolean - pgtap | public | _partof | name, name | boolean - pgtap | public | _partof | name, name, name, name | boolean - pgtap | public | _parts | name | SETOF name - pgtap | public | _parts | name, name | SETOF name - pgtap | public | _pg_sv_column_array | oid, smallint[] | name[] - pgtap | public | _pg_sv_table_accessible | oid, oid | boolean - pgtap | public | _pg_sv_type_array | oid[] | name[] - pgtap | public | _prokind | p_oid oid | "char" - pgtap | public | _query | text | text - pgtap | public | _quote_ident_like | text, text | text - pgtap | public | _refine_vol | text | text - pgtap | public | _relcomp | text, anyarray, text, text | text - pgtap | public | _relcomp | text, text, text, text | text - pgtap | public | _relcomp | text, text, text, text, text | text - pgtap | public | _relexists | name | boolean - pgtap | public | _relexists | name, name | boolean - pgtap | public | _relne | text, anyarray, text, text | text - pgtap | public | _relne | text, text, text, text | text - pgtap | public | _returns | name | text - pgtap | public | _returns | name, name | text - pgtap | public | _returns | name, name, name[] | text - pgtap | public | _returns | name, name[] | text - pgtap | public | _rexists | character, name | boolean - pgtap | public | _rexists | character, name, name | boolean - pgtap | public | _rexists | character[], name | boolean - pgtap | public | _rexists | character[], name, name | boolean - pgtap | public | _rule_on | name, name | "char" - pgtap | public | _rule_on | name, name, name | "char" - pgtap | public | _runem | text[], boolean | SETOF text - pgtap | public | _runner | text[], text[], text[], text[], text[] | SETOF text - pgtap | public | _set | integer, integer | integer - pgtap | public | _set | text, integer | integer - pgtap | public | _set | text, integer, text | integer - pgtap | public | _strict | name | boolean - pgtap | public | _strict | name, name | boolean - pgtap | public | _strict | name, name, name[] | boolean - pgtap | public | _strict | name, name[] | boolean - pgtap | public | _table_privs | | name[] - pgtap | public | _temptable | anyarray, text | text - pgtap | public | _temptable | text, text | text - pgtap | public | _temptypes | text | text - pgtap | public | _time_trials | text, integer, numeric | SETOF _time_trial_type - pgtap | public | _tlike | boolean, text, text, text | text - pgtap | public | _todo | | text - pgtap | public | _trig | name, name | boolean - pgtap | public | _trig | name, name, name | boolean - pgtap | public | _type_func | "char", name | boolean - pgtap | public | _type_func | "char", name, name | boolean - pgtap | public | _type_func | "char", name, name, name[] | boolean - pgtap | public | _type_func | "char", name, name[] | boolean - pgtap | public | _types_are | name, name[], text, character[] | text - pgtap | public | _types_are | name[], text, character[] | text - pgtap | public | _unalike | boolean, anyelement, text, text | text - pgtap | public | _vol | name | text - pgtap | public | _vol | name, name | text - pgtap | public | _vol | name, name, name[] | text - pgtap | public | _vol | name, name[] | text - pgtap | public | add_result | boolean, boolean, text, text, text | integer - pgtap | public | alike | anyelement, text | text - pgtap | public | alike | anyelement, text, text | text - pgtap | public | any_column_privs_are | name, name, name, name[] | text - pgtap | public | any_column_privs_are | name, name, name, name[], text | text - pgtap | public | any_column_privs_are | name, name, name[] | text - pgtap | public | any_column_privs_are | name, name, name[], text | text - pgtap | public | bag_eq | text, anyarray | text - pgtap | public | bag_eq | text, anyarray, text | text - pgtap | public | bag_eq | text, text | text - pgtap | public | bag_eq | text, text, text | text - pgtap | public | bag_has | text, text | text - pgtap | public | bag_has | text, text, text | text - pgtap | public | bag_hasnt | text, text | text - pgtap | public | bag_hasnt | text, text, text | text - pgtap | public | bag_ne | text, anyarray | text - pgtap | public | bag_ne | text, anyarray, text | text - pgtap | public | bag_ne | text, text | text - pgtap | public | bag_ne | text, text, text | text - pgtap | public | can | name, name[] | text - pgtap | public | can | name, name[], text | text - pgtap | public | can | name[] | text - pgtap | public | can | name[], text | text - pgtap | public | cast_context_is | name, name, text | text - pgtap | public | cast_context_is | name, name, text, text | text - pgtap | public | casts_are | text[] | text - pgtap | public | casts_are | text[], text | text - pgtap | public | check_test | text, boolean | SETOF text - pgtap | public | check_test | text, boolean, text | SETOF text - pgtap | public | check_test | text, boolean, text, text | SETOF text - pgtap | public | check_test | text, boolean, text, text, text | SETOF text - pgtap | public | check_test | text, boolean, text, text, text, boolean | SETOF text - pgtap | public | cmp_ok | anyelement, text, anyelement | text - pgtap | public | cmp_ok | anyelement, text, anyelement, text | text - pgtap | public | col_default_is | name, name, anyelement | text - pgtap | public | col_default_is | name, name, anyelement, text | text - pgtap | public | col_default_is | name, name, name, anyelement, text | text - pgtap | public | col_default_is | name, name, name, text, text | text - pgtap | public | col_default_is | name, name, text | text - pgtap | public | col_default_is | name, name, text, text | text - pgtap | public | col_has_check | name, name | text - pgtap | public | col_has_check | name, name, name, text | text - pgtap | public | col_has_check | name, name, name[], text | text - pgtap | public | col_has_check | name, name, text | text - pgtap | public | col_has_check | name, name[] | text - pgtap | public | col_has_check | name, name[], text | text - pgtap | public | col_has_default | name, name | text - pgtap | public | col_has_default | name, name, name, text | text - pgtap | public | col_has_default | name, name, text | text - pgtap | public | col_hasnt_default | name, name | text - pgtap | public | col_hasnt_default | name, name, name, text | text - pgtap | public | col_hasnt_default | name, name, text | text - pgtap | public | col_is_fk | name, name | text - pgtap | public | col_is_fk | name, name, name, text | text - pgtap | public | col_is_fk | name, name, name[], text | text - pgtap | public | col_is_fk | name, name, text | text - pgtap | public | col_is_fk | name, name[] | text - pgtap | public | col_is_fk | name, name[], text | text - pgtap | public | col_is_null | schema_name name, table_name name, column_name name, description text | text - pgtap | public | col_is_null | table_name name, column_name name, description text | text - pgtap | public | col_is_pk | name, name | text - pgtap | public | col_is_pk | name, name, name, text | text - pgtap | public | col_is_pk | name, name, name[], text | text - pgtap | public | col_is_pk | name, name, text | text - pgtap | public | col_is_pk | name, name[] | text - pgtap | public | col_is_pk | name, name[], text | text - pgtap | public | col_is_unique | name, name | text - pgtap | public | col_is_unique | name, name, name | text - pgtap | public | col_is_unique | name, name, name, text | text - pgtap | public | col_is_unique | name, name, name[] | text - pgtap | public | col_is_unique | name, name, name[], text | text - pgtap | public | col_is_unique | name, name, text | text - pgtap | public | col_is_unique | name, name[] | text - pgtap | public | col_is_unique | name, name[], text | text - pgtap | public | col_isnt_fk | name, name | text - pgtap | public | col_isnt_fk | name, name, name, text | text - pgtap | public | col_isnt_fk | name, name, name[], text | text - pgtap | public | col_isnt_fk | name, name, text | text - pgtap | public | col_isnt_fk | name, name[] | text - pgtap | public | col_isnt_fk | name, name[], text | text - pgtap | public | col_isnt_pk | name, name | text - pgtap | public | col_isnt_pk | name, name, name, text | text - pgtap | public | col_isnt_pk | name, name, name[], text | text - pgtap | public | col_isnt_pk | name, name, text | text - pgtap | public | col_isnt_pk | name, name[] | text - pgtap | public | col_isnt_pk | name, name[], text | text - pgtap | public | col_not_null | schema_name name, table_name name, column_name name, description text | text - pgtap | public | col_not_null | table_name name, column_name name, description text | text - pgtap | public | col_type_is | name, name, name, name, text | text - pgtap | public | col_type_is | name, name, name, name, text, text | text - pgtap | public | col_type_is | name, name, name, text | text - pgtap | public | col_type_is | name, name, name, text, text | text - pgtap | public | col_type_is | name, name, text | text - pgtap | public | col_type_is | name, name, text, text | text - pgtap | public | collect_tap | VARIADIC text[] | text - pgtap | public | collect_tap | character varying[] | text - pgtap | public | column_privs_are | name, name, name, name, name[] | text - pgtap | public | column_privs_are | name, name, name, name, name[], text | text - pgtap | public | column_privs_are | name, name, name, name[] | text - pgtap | public | column_privs_are | name, name, name, name[], text | text - pgtap | public | columns_are | name, name, name[] | text - pgtap | public | columns_are | name, name, name[], text | text - pgtap | public | columns_are | name, name[] | text - pgtap | public | columns_are | name, name[], text | text - pgtap | public | composite_owner_is | name, name | text - pgtap | public | composite_owner_is | name, name, name | text - pgtap | public | composite_owner_is | name, name, name, text | text - pgtap | public | composite_owner_is | name, name, text | text - pgtap | public | database_privs_are | name, name, name[] | text - pgtap | public | database_privs_are | name, name, name[], text | text - pgtap | public | db_owner_is | name, name | text - pgtap | public | db_owner_is | name, name, text | text - pgtap | public | diag | VARIADIC anyarray | text - pgtap | public | diag | VARIADIC text[] | text - pgtap | public | diag | msg anyelement | text - pgtap | public | diag | msg text | text - pgtap | public | diag_test_name | text | text - pgtap | public | display_oper | name, oid | text - pgtap | public | do_tap | | SETOF text - pgtap | public | do_tap | name | SETOF text - pgtap | public | do_tap | name, text | SETOF text - pgtap | public | do_tap | text | SETOF text - pgtap | public | doesnt_imatch | anyelement, text | text - pgtap | public | doesnt_imatch | anyelement, text, text | text - pgtap | public | doesnt_match | anyelement, text | text - pgtap | public | doesnt_match | anyelement, text, text | text - pgtap | public | domain_type_is | name, text, name, text | text - pgtap | public | domain_type_is | name, text, name, text, text | text - pgtap | public | domain_type_is | name, text, text | text - pgtap | public | domain_type_is | name, text, text, text | text - pgtap | public | domain_type_is | text, text | text - pgtap | public | domain_type_is | text, text, text | text - pgtap | public | domain_type_isnt | name, text, name, text | text - pgtap | public | domain_type_isnt | name, text, name, text, text | text - pgtap | public | domain_type_isnt | name, text, text | text - pgtap | public | domain_type_isnt | name, text, text, text | text - pgtap | public | domain_type_isnt | text, text | text - pgtap | public | domain_type_isnt | text, text, text | text - pgtap | public | domains_are | name, name[] | text - pgtap | public | domains_are | name, name[], text | text - pgtap | public | domains_are | name[] | text - pgtap | public | domains_are | name[], text | text - pgtap | public | enum_has_labels | name, name, name[] | text - pgtap | public | enum_has_labels | name, name, name[], text | text - pgtap | public | enum_has_labels | name, name[] | text - pgtap | public | enum_has_labels | name, name[], text | text - pgtap | public | enums_are | name, name[] | text - pgtap | public | enums_are | name, name[], text | text - pgtap | public | enums_are | name[] | text - pgtap | public | enums_are | name[], text | text - pgtap | public | extensions_are | name, name[] | text - pgtap | public | extensions_are | name, name[], text | text - pgtap | public | extensions_are | name[] | text - pgtap | public | extensions_are | name[], text | text - pgtap | public | fail | | text - pgtap | public | fail | text | text - pgtap | public | fdw_privs_are | name, name, name[] | text - pgtap | public | fdw_privs_are | name, name, name[], text | text - pgtap | public | findfuncs | name, text | text[] - pgtap | public | findfuncs | name, text, text | text[] - pgtap | public | findfuncs | text | text[] - pgtap | public | findfuncs | text, text | text[] - pgtap | public | finish | exception_on_failure boolean | SETOF text - pgtap | public | fk_ok | name, name, name, name | text - pgtap | public | fk_ok | name, name, name, name, name, name, text | text - pgtap | public | fk_ok | name, name, name, name, name, text | text - pgtap | public | fk_ok | name, name, name, name, text | text - pgtap | public | fk_ok | name, name, name[], name, name, name[] | text - pgtap | public | fk_ok | name, name, name[], name, name, name[], text | text - pgtap | public | fk_ok | name, name[], name, name[] | text - pgtap | public | fk_ok | name, name[], name, name[], text | text - pgtap | public | foreign_table_owner_is | name, name | text - pgtap | public | foreign_table_owner_is | name, name, name | text - pgtap | public | foreign_table_owner_is | name, name, name, text | text - pgtap | public | foreign_table_owner_is | name, name, text | text - pgtap | public | foreign_tables_are | name, name[] | text - pgtap | public | foreign_tables_are | name, name[], text | text - pgtap | public | foreign_tables_are | name[] | text - pgtap | public | foreign_tables_are | name[], text | text - pgtap | public | function_lang_is | name, name | text - pgtap | public | function_lang_is | name, name, name | text - pgtap | public | function_lang_is | name, name, name, text | text - pgtap | public | function_lang_is | name, name, name[], name | text - pgtap | public | function_lang_is | name, name, name[], name, text | text - pgtap | public | function_lang_is | name, name, text | text - pgtap | public | function_lang_is | name, name[], name | text - pgtap | public | function_lang_is | name, name[], name, text | text - pgtap | public | function_owner_is | name, name, name[], name | text - pgtap | public | function_owner_is | name, name, name[], name, text | text - pgtap | public | function_owner_is | name, name[], name | text - pgtap | public | function_owner_is | name, name[], name, text | text - pgtap | public | function_privs_are | name, name, name[], name, name[] | text - pgtap | public | function_privs_are | name, name, name[], name, name[], text | text - pgtap | public | function_privs_are | name, name[], name, name[] | text - pgtap | public | function_privs_are | name, name[], name, name[], text | text - pgtap | public | function_returns | name, name, name[], text | text - pgtap | public | function_returns | name, name, name[], text, text | text - pgtap | public | function_returns | name, name, text | text - pgtap | public | function_returns | name, name, text, text | text - pgtap | public | function_returns | name, name[], text | text - pgtap | public | function_returns | name, name[], text, text | text - pgtap | public | function_returns | name, text | text - pgtap | public | function_returns | name, text, text | text - pgtap | public | functions_are | name, name[] | text - pgtap | public | functions_are | name, name[], text | text - pgtap | public | functions_are | name[] | text - pgtap | public | functions_are | name[], text | text - pgtap | public | groups_are | name[] | text - pgtap | public | groups_are | name[], text | text - pgtap | public | has_cast | name, name | text - pgtap | public | has_cast | name, name, name | text - pgtap | public | has_cast | name, name, name, name | text - pgtap | public | has_cast | name, name, name, name, text | text - pgtap | public | has_cast | name, name, name, text | text - pgtap | public | has_cast | name, name, text | text - pgtap | public | has_check | name | text - pgtap | public | has_check | name, name, text | text - pgtap | public | has_check | name, text | text - pgtap | public | has_column | name, name | text - pgtap | public | has_column | name, name, name, text | text - pgtap | public | has_column | name, name, text | text - pgtap | public | has_composite | name | text - pgtap | public | has_composite | name, name, text | text - pgtap | public | has_composite | name, text | text - pgtap | public | has_domain | name | text - pgtap | public | has_domain | name, name | text - pgtap | public | has_domain | name, name, text | text - pgtap | public | has_domain | name, text | text - pgtap | public | has_enum | name | text - pgtap | public | has_enum | name, name | text - pgtap | public | has_enum | name, name, text | text - pgtap | public | has_enum | name, text | text - pgtap | public | has_extension | name | text - pgtap | public | has_extension | name, name | text - pgtap | public | has_extension | name, name, text | text - pgtap | public | has_extension | name, text | text - pgtap | public | has_fk | name | text - pgtap | public | has_fk | name, name, text | text - pgtap | public | has_fk | name, text | text - pgtap | public | has_foreign_table | name | text - pgtap | public | has_foreign_table | name, name | text - pgtap | public | has_foreign_table | name, name, text | text - pgtap | public | has_foreign_table | name, text | text - pgtap | public | has_function | name | text - pgtap | public | has_function | name, name | text - pgtap | public | has_function | name, name, name[] | text - pgtap | public | has_function | name, name, name[], text | text - pgtap | public | has_function | name, name, text | text - pgtap | public | has_function | name, name[] | text - pgtap | public | has_function | name, name[], text | text - pgtap | public | has_function | name, text | text - pgtap | public | has_group | name | text - pgtap | public | has_group | name, text | text - pgtap | public | has_index | name, name | text - pgtap | public | has_index | name, name, name | text - pgtap | public | has_index | name, name, name, name | text - pgtap | public | has_index | name, name, name, name, text | text - pgtap | public | has_index | name, name, name, name[] | text - pgtap | public | has_index | name, name, name, name[], text | text - pgtap | public | has_index | name, name, name, text | text - pgtap | public | has_index | name, name, name[] | text - pgtap | public | has_index | name, name, name[], text | text - pgtap | public | has_index | name, name, text | text - pgtap | public | has_inherited_tables | name | text - pgtap | public | has_inherited_tables | name, name | text - pgtap | public | has_inherited_tables | name, name, text | text - pgtap | public | has_inherited_tables | name, text | text - pgtap | public | has_language | name | text - pgtap | public | has_language | name, text | text - pgtap | public | has_leftop | name, name | text - pgtap | public | has_leftop | name, name, name | text - pgtap | public | has_leftop | name, name, name, name | text - pgtap | public | has_leftop | name, name, name, name, text | text - pgtap | public | has_leftop | name, name, name, text | text - pgtap | public | has_leftop | name, name, text | text - pgtap | public | has_materialized_view | name | text - pgtap | public | has_materialized_view | name, name, text | text - pgtap | public | has_materialized_view | name, text | text - pgtap | public | has_opclass | name | text - pgtap | public | has_opclass | name, name | text - pgtap | public | has_opclass | name, name, text | text - pgtap | public | has_opclass | name, text | text - pgtap | public | has_operator | name, name, name | text - pgtap | public | has_operator | name, name, name, name | text - pgtap | public | has_operator | name, name, name, name, name | text - pgtap | public | has_operator | name, name, name, name, name, text | text - pgtap | public | has_operator | name, name, name, name, text | text - pgtap | public | has_operator | name, name, name, text | text - pgtap | public | has_pk | name | text - pgtap | public | has_pk | name, name, text | text - pgtap | public | has_pk | name, text | text - pgtap | public | has_relation | name | text - pgtap | public | has_relation | name, name, text | text - pgtap | public | has_relation | name, text | text - pgtap | public | has_rightop | name, name | text - pgtap | public | has_rightop | name, name, name | text - pgtap | public | has_rightop | name, name, name, name | text - pgtap | public | has_rightop | name, name, name, name, text | text - pgtap | public | has_rightop | name, name, name, text | text - pgtap | public | has_rightop | name, name, text | text - pgtap | public | has_role | name | text - pgtap | public | has_role | name, text | text - pgtap | public | has_rule | name, name | text - pgtap | public | has_rule | name, name, name | text - pgtap | public | has_rule | name, name, name, text | text - pgtap | public | has_rule | name, name, text | text - pgtap | public | has_schema | name | text - pgtap | public | has_schema | name, text | text - pgtap | public | has_sequence | name | text - pgtap | public | has_sequence | name, name | text - pgtap | public | has_sequence | name, name, text | text - pgtap | public | has_sequence | name, text | text - pgtap | public | has_table | name | text - pgtap | public | has_table | name, name | text - pgtap | public | has_table | name, name, text | text - pgtap | public | has_table | name, text | text - pgtap | public | has_tablespace | name | text - pgtap | public | has_tablespace | name, text | text - pgtap | public | has_tablespace | name, text, text | text - pgtap | public | has_trigger | name, name | text - pgtap | public | has_trigger | name, name, name | text - pgtap | public | has_trigger | name, name, name, text | text - pgtap | public | has_trigger | name, name, text | text - pgtap | public | has_type | name | text - pgtap | public | has_type | name, name | text - pgtap | public | has_type | name, name, text | text - pgtap | public | has_type | name, text | text - pgtap | public | has_unique | text | text - pgtap | public | has_unique | text, text | text - pgtap | public | has_unique | text, text, text | text - pgtap | public | has_user | name | text - pgtap | public | has_user | name, text | text - pgtap | public | has_view | name | text - pgtap | public | has_view | name, name | text - pgtap | public | has_view | name, name, text | text - pgtap | public | has_view | name, text | text - pgtap | public | hasnt_cast | name, name | text - pgtap | public | hasnt_cast | name, name, name | text - pgtap | public | hasnt_cast | name, name, name, name | text - pgtap | public | hasnt_cast | name, name, name, name, text | text - pgtap | public | hasnt_cast | name, name, name, text | text - pgtap | public | hasnt_cast | name, name, text | text - pgtap | public | hasnt_column | name, name | text - pgtap | public | hasnt_column | name, name, name, text | text - pgtap | public | hasnt_column | name, name, text | text - pgtap | public | hasnt_composite | name | text - pgtap | public | hasnt_composite | name, name, text | text - pgtap | public | hasnt_composite | name, text | text - pgtap | public | hasnt_domain | name | text - pgtap | public | hasnt_domain | name, name | text - pgtap | public | hasnt_domain | name, name, text | text - pgtap | public | hasnt_domain | name, text | text - pgtap | public | hasnt_enum | name | text - pgtap | public | hasnt_enum | name, name | text - pgtap | public | hasnt_enum | name, name, text | text - pgtap | public | hasnt_enum | name, text | text - pgtap | public | hasnt_extension | name | text - pgtap | public | hasnt_extension | name, name | text - pgtap | public | hasnt_extension | name, name, text | text - pgtap | public | hasnt_extension | name, text | text - pgtap | public | hasnt_fk | name | text - pgtap | public | hasnt_fk | name, name, text | text - pgtap | public | hasnt_fk | name, text | text - pgtap | public | hasnt_foreign_table | name | text - pgtap | public | hasnt_foreign_table | name, name | text - pgtap | public | hasnt_foreign_table | name, name, text | text - pgtap | public | hasnt_foreign_table | name, text | text - pgtap | public | hasnt_function | name | text - pgtap | public | hasnt_function | name, name | text - pgtap | public | hasnt_function | name, name, name[] | text - pgtap | public | hasnt_function | name, name, name[], text | text - pgtap | public | hasnt_function | name, name, text | text - pgtap | public | hasnt_function | name, name[] | text - pgtap | public | hasnt_function | name, name[], text | text - pgtap | public | hasnt_function | name, text | text - pgtap | public | hasnt_group | name | text - pgtap | public | hasnt_group | name, text | text - pgtap | public | hasnt_index | name, name | text - pgtap | public | hasnt_index | name, name, name | text - pgtap | public | hasnt_index | name, name, name, text | text - pgtap | public | hasnt_index | name, name, text | text - pgtap | public | hasnt_inherited_tables | name | text - pgtap | public | hasnt_inherited_tables | name, name | text - pgtap | public | hasnt_inherited_tables | name, name, text | text - pgtap | public | hasnt_inherited_tables | name, text | text - pgtap | public | hasnt_language | name | text - pgtap | public | hasnt_language | name, text | text - pgtap | public | hasnt_leftop | name, name | text - pgtap | public | hasnt_leftop | name, name, name | text - pgtap | public | hasnt_leftop | name, name, name, name | text - pgtap | public | hasnt_leftop | name, name, name, name, text | text - pgtap | public | hasnt_leftop | name, name, name, text | text - pgtap | public | hasnt_leftop | name, name, text | text - pgtap | public | hasnt_materialized_view | name | text - pgtap | public | hasnt_materialized_view | name, name, text | text - pgtap | public | hasnt_materialized_view | name, text | text - pgtap | public | hasnt_opclass | name | text - pgtap | public | hasnt_opclass | name, name | text - pgtap | public | hasnt_opclass | name, name, text | text - pgtap | public | hasnt_opclass | name, text | text - pgtap | public | hasnt_operator | name, name, name | text - pgtap | public | hasnt_operator | name, name, name, name | text - pgtap | public | hasnt_operator | name, name, name, name, name | text - pgtap | public | hasnt_operator | name, name, name, name, name, text | text - pgtap | public | hasnt_operator | name, name, name, name, text | text - pgtap | public | hasnt_operator | name, name, name, text | text - pgtap | public | hasnt_pk | name | text - pgtap | public | hasnt_pk | name, name, text | text - pgtap | public | hasnt_pk | name, text | text - pgtap | public | hasnt_relation | name | text - pgtap | public | hasnt_relation | name, name, text | text - pgtap | public | hasnt_relation | name, text | text - pgtap | public | hasnt_rightop | name, name | text - pgtap | public | hasnt_rightop | name, name, name | text - pgtap | public | hasnt_rightop | name, name, name, name | text - pgtap | public | hasnt_rightop | name, name, name, name, text | text - pgtap | public | hasnt_rightop | name, name, name, text | text - pgtap | public | hasnt_rightop | name, name, text | text - pgtap | public | hasnt_role | name | text - pgtap | public | hasnt_role | name, text | text - pgtap | public | hasnt_rule | name, name | text - pgtap | public | hasnt_rule | name, name, name | text - pgtap | public | hasnt_rule | name, name, name, text | text - pgtap | public | hasnt_rule | name, name, text | text - pgtap | public | hasnt_schema | name | text - pgtap | public | hasnt_schema | name, text | text - pgtap | public | hasnt_sequence | name | text - pgtap | public | hasnt_sequence | name, name, text | text - pgtap | public | hasnt_sequence | name, text | text - pgtap | public | hasnt_table | name | text - pgtap | public | hasnt_table | name, name | text - pgtap | public | hasnt_table | name, name, text | text - pgtap | public | hasnt_table | name, text | text - pgtap | public | hasnt_tablespace | name | text - pgtap | public | hasnt_tablespace | name, text | text - pgtap | public | hasnt_trigger | name, name | text - pgtap | public | hasnt_trigger | name, name, name | text - pgtap | public | hasnt_trigger | name, name, name, text | text - pgtap | public | hasnt_trigger | name, name, text | text - pgtap | public | hasnt_type | name | text - pgtap | public | hasnt_type | name, name | text - pgtap | public | hasnt_type | name, name, text | text - pgtap | public | hasnt_type | name, text | text - pgtap | public | hasnt_user | name | text - pgtap | public | hasnt_user | name, text | text - pgtap | public | hasnt_view | name | text - pgtap | public | hasnt_view | name, name | text - pgtap | public | hasnt_view | name, name, text | text - pgtap | public | hasnt_view | name, text | text - pgtap | public | ialike | anyelement, text | text - pgtap | public | ialike | anyelement, text, text | text - pgtap | public | imatches | anyelement, text | text - pgtap | public | imatches | anyelement, text, text | text - pgtap | public | in_todo | | boolean - pgtap | public | index_is_primary | name | text - pgtap | public | index_is_primary | name, name | text - pgtap | public | index_is_primary | name, name, name | text - pgtap | public | index_is_primary | name, name, name, text | text - pgtap | public | index_is_type | name, name | text - pgtap | public | index_is_type | name, name, name | text - pgtap | public | index_is_type | name, name, name, name | text - pgtap | public | index_is_type | name, name, name, name, text | text - pgtap | public | index_is_unique | name | text - pgtap | public | index_is_unique | name, name | text - pgtap | public | index_is_unique | name, name, name | text - pgtap | public | index_is_unique | name, name, name, text | text - pgtap | public | index_owner_is | name, name, name | text - pgtap | public | index_owner_is | name, name, name, name | text - pgtap | public | index_owner_is | name, name, name, name, text | text - pgtap | public | index_owner_is | name, name, name, text | text - pgtap | public | indexes_are | name, name, name[] | text - pgtap | public | indexes_are | name, name, name[], text | text - pgtap | public | indexes_are | name, name[] | text - pgtap | public | indexes_are | name, name[], text | text - pgtap | public | is | anyelement, anyelement | text - pgtap | public | is | anyelement, anyelement, text | text - pgtap | public | is_aggregate | name | text - pgtap | public | is_aggregate | name, name | text - pgtap | public | is_aggregate | name, name, name[] | text - pgtap | public | is_aggregate | name, name, name[], text | text - pgtap | public | is_aggregate | name, name, text | text - pgtap | public | is_aggregate | name, name[] | text - pgtap | public | is_aggregate | name, name[], text | text - pgtap | public | is_aggregate | name, text | text - pgtap | public | is_ancestor_of | name, name | text - pgtap | public | is_ancestor_of | name, name, integer | text - pgtap | public | is_ancestor_of | name, name, integer, text | text - pgtap | public | is_ancestor_of | name, name, name, name | text - pgtap | public | is_ancestor_of | name, name, name, name, integer | text - pgtap | public | is_ancestor_of | name, name, name, name, integer, text | text - pgtap | public | is_ancestor_of | name, name, name, name, text | text - pgtap | public | is_ancestor_of | name, name, text | text - pgtap | public | is_clustered | name | text - pgtap | public | is_clustered | name, name | text - pgtap | public | is_clustered | name, name, name | text - pgtap | public | is_clustered | name, name, name, text | text - pgtap | public | is_definer | name | text - pgtap | public | is_definer | name, name | text - pgtap | public | is_definer | name, name, name[] | text - pgtap | public | is_definer | name, name, name[], text | text - pgtap | public | is_definer | name, name, text | text - pgtap | public | is_definer | name, name[] | text - pgtap | public | is_definer | name, name[], text | text - pgtap | public | is_definer | name, text | text - pgtap | public | is_descendent_of | name, name | text - pgtap | public | is_descendent_of | name, name, integer | text - pgtap | public | is_descendent_of | name, name, integer, text | text - pgtap | public | is_descendent_of | name, name, name, name | text - pgtap | public | is_descendent_of | name, name, name, name, integer | text - pgtap | public | is_descendent_of | name, name, name, name, integer, text | text - pgtap | public | is_descendent_of | name, name, name, name, text | text - pgtap | public | is_descendent_of | name, name, text | text - pgtap | public | is_empty | text | text - pgtap | public | is_empty | text, text | text - pgtap | public | is_indexed | name, name | text - pgtap | public | is_indexed | name, name, name | text - pgtap | public | is_indexed | name, name, name, text | text - pgtap | public | is_indexed | name, name, name[] | text - pgtap | public | is_indexed | name, name, name[], text | text - pgtap | public | is_indexed | name, name[] | text - pgtap | public | is_indexed | name, name[], text | text - pgtap | public | is_member_of | name, name | text - pgtap | public | is_member_of | name, name, text | text - pgtap | public | is_member_of | name, name[] | text - pgtap | public | is_member_of | name, name[], text | text - pgtap | public | is_normal_function | name | text - pgtap | public | is_normal_function | name, name | text - pgtap | public | is_normal_function | name, name, name[] | text - pgtap | public | is_normal_function | name, name, name[], text | text - pgtap | public | is_normal_function | name, name, text | text - pgtap | public | is_normal_function | name, name[] | text - pgtap | public | is_normal_function | name, name[], text | text - pgtap | public | is_normal_function | name, text | text - pgtap | public | is_partition_of | name, name | text - pgtap | public | is_partition_of | name, name, name, name | text - pgtap | public | is_partition_of | name, name, name, name, text | text - pgtap | public | is_partition_of | name, name, text | text - pgtap | public | is_partitioned | name | text - pgtap | public | is_partitioned | name, name | text - pgtap | public | is_partitioned | name, name, text | text - pgtap | public | is_partitioned | name, text | text - pgtap | public | is_procedure | name | text - pgtap | public | is_procedure | name, name | text - pgtap | public | is_procedure | name, name, name[] | text - pgtap | public | is_procedure | name, name, name[], text | text - pgtap | public | is_procedure | name, name, text | text - pgtap | public | is_procedure | name, name[] | text - pgtap | public | is_procedure | name, name[], text | text - pgtap | public | is_procedure | name, text | text - pgtap | public | is_strict | name | text - pgtap | public | is_strict | name, name | text - pgtap | public | is_strict | name, name, name[] | text - pgtap | public | is_strict | name, name, name[], text | text - pgtap | public | is_strict | name, name, text | text - pgtap | public | is_strict | name, name[] | text - pgtap | public | is_strict | name, name[], text | text - pgtap | public | is_strict | name, text | text - pgtap | public | is_superuser | name | text - pgtap | public | is_superuser | name, text | text - pgtap | public | is_window | name | text - pgtap | public | is_window | name, name | text - pgtap | public | is_window | name, name, name[] | text - pgtap | public | is_window | name, name, name[], text | text - pgtap | public | is_window | name, name, text | text - pgtap | public | is_window | name, name[] | text - pgtap | public | is_window | name, name[], text | text - pgtap | public | is_window | name, text | text - pgtap | public | isa_ok | anyelement, regtype | text - pgtap | public | isa_ok | anyelement, regtype, text | text - pgtap | public | isnt | anyelement, anyelement | text - pgtap | public | isnt | anyelement, anyelement, text | text - pgtap | public | isnt_aggregate | name | text - pgtap | public | isnt_aggregate | name, name | text - pgtap | public | isnt_aggregate | name, name, name[] | text - pgtap | public | isnt_aggregate | name, name, name[], text | text - pgtap | public | isnt_aggregate | name, name, text | text - pgtap | public | isnt_aggregate | name, name[] | text - pgtap | public | isnt_aggregate | name, name[], text | text - pgtap | public | isnt_aggregate | name, text | text - pgtap | public | isnt_ancestor_of | name, name | text - pgtap | public | isnt_ancestor_of | name, name, integer | text - pgtap | public | isnt_ancestor_of | name, name, integer, text | text - pgtap | public | isnt_ancestor_of | name, name, name, name | text - pgtap | public | isnt_ancestor_of | name, name, name, name, integer | text - pgtap | public | isnt_ancestor_of | name, name, name, name, integer, text | text - pgtap | public | isnt_ancestor_of | name, name, name, name, text | text - pgtap | public | isnt_ancestor_of | name, name, text | text - pgtap | public | isnt_definer | name | text - pgtap | public | isnt_definer | name, name | text - pgtap | public | isnt_definer | name, name, name[] | text - pgtap | public | isnt_definer | name, name, name[], text | text - pgtap | public | isnt_definer | name, name, text | text - pgtap | public | isnt_definer | name, name[] | text - pgtap | public | isnt_definer | name, name[], text | text - pgtap | public | isnt_definer | name, text | text - pgtap | public | isnt_descendent_of | name, name | text - pgtap | public | isnt_descendent_of | name, name, integer | text - pgtap | public | isnt_descendent_of | name, name, integer, text | text - pgtap | public | isnt_descendent_of | name, name, name, name | text - pgtap | public | isnt_descendent_of | name, name, name, name, integer | text - pgtap | public | isnt_descendent_of | name, name, name, name, integer, text | text - pgtap | public | isnt_descendent_of | name, name, name, name, text | text - pgtap | public | isnt_descendent_of | name, name, text | text - pgtap | public | isnt_empty | text | text - pgtap | public | isnt_empty | text, text | text - pgtap | public | isnt_member_of | name, name | text - pgtap | public | isnt_member_of | name, name, text | text - pgtap | public | isnt_member_of | name, name[] | text - pgtap | public | isnt_member_of | name, name[], text | text - pgtap | public | isnt_normal_function | name | text - pgtap | public | isnt_normal_function | name, name | text - pgtap | public | isnt_normal_function | name, name, name[] | text - pgtap | public | isnt_normal_function | name, name, name[], text | text - pgtap | public | isnt_normal_function | name, name, text | text - pgtap | public | isnt_normal_function | name, name[] | text - pgtap | public | isnt_normal_function | name, name[], text | text - pgtap | public | isnt_normal_function | name, text | text - pgtap | public | isnt_partitioned | name | text - pgtap | public | isnt_partitioned | name, name | text - pgtap | public | isnt_partitioned | name, name, text | text - pgtap | public | isnt_partitioned | name, text | text - pgtap | public | isnt_procedure | name | text - pgtap | public | isnt_procedure | name, name | text - pgtap | public | isnt_procedure | name, name, name[] | text - pgtap | public | isnt_procedure | name, name, name[], text | text - pgtap | public | isnt_procedure | name, name, text | text - pgtap | public | isnt_procedure | name, name[] | text - pgtap | public | isnt_procedure | name, name[], text | text - pgtap | public | isnt_procedure | name, text | text - pgtap | public | isnt_strict | name | text - pgtap | public | isnt_strict | name, name | text - pgtap | public | isnt_strict | name, name, name[] | text - pgtap | public | isnt_strict | name, name, name[], text | text - pgtap | public | isnt_strict | name, name, text | text - pgtap | public | isnt_strict | name, name[] | text - pgtap | public | isnt_strict | name, name[], text | text - pgtap | public | isnt_strict | name, text | text - pgtap | public | isnt_superuser | name | text - pgtap | public | isnt_superuser | name, text | text - pgtap | public | isnt_window | name | text - pgtap | public | isnt_window | name, name | text - pgtap | public | isnt_window | name, name, name[] | text - pgtap | public | isnt_window | name, name, name[], text | text - pgtap | public | isnt_window | name, name, text | text - pgtap | public | isnt_window | name, name[] | text - pgtap | public | isnt_window | name, name[], text | text - pgtap | public | isnt_window | name, text | text - pgtap | public | language_is_trusted | name | text - pgtap | public | language_is_trusted | name, text | text - pgtap | public | language_owner_is | name, name | text - pgtap | public | language_owner_is | name, name, text | text - pgtap | public | language_privs_are | name, name, name[] | text - pgtap | public | language_privs_are | name, name, name[], text | text - pgtap | public | languages_are | name[] | text - pgtap | public | languages_are | name[], text | text - pgtap | public | lives_ok | text | text - pgtap | public | lives_ok | text, text | text - pgtap | public | matches | anyelement, text | text - pgtap | public | matches | anyelement, text, text | text - pgtap | public | materialized_view_owner_is | name, name | text - pgtap | public | materialized_view_owner_is | name, name, name | text - pgtap | public | materialized_view_owner_is | name, name, name, text | text - pgtap | public | materialized_view_owner_is | name, name, text | text - pgtap | public | materialized_views_are | name, name[] | text - pgtap | public | materialized_views_are | name, name[], text | text - pgtap | public | materialized_views_are | name[] | text - pgtap | public | materialized_views_are | name[], text | text - pgtap | public | no_plan | | SETOF boolean - pgtap | public | num_failed | | integer - pgtap | public | ok | boolean | text - pgtap | public | ok | boolean, text | text - pgtap | public | opclass_owner_is | name, name | text - pgtap | public | opclass_owner_is | name, name, name | text - pgtap | public | opclass_owner_is | name, name, name, text | text - pgtap | public | opclass_owner_is | name, name, text | text - pgtap | public | opclasses_are | name, name[] | text - pgtap | public | opclasses_are | name, name[], text | text - pgtap | public | opclasses_are | name[] | text - pgtap | public | opclasses_are | name[], text | text - pgtap | public | operators_are | name, text[] | text - pgtap | public | operators_are | name, text[], text | text - pgtap | public | operators_are | text[] | text - pgtap | public | operators_are | text[], text | text - pgtap | public | os_name | | text - pgtap | public | partitions_are | name, name, name[] | text - pgtap | public | partitions_are | name, name, name[], text | text - pgtap | public | partitions_are | name, name[] | text - pgtap | public | partitions_are | name, name[], text | text - pgtap | public | pass | | text - pgtap | public | pass | text | text - pgtap | public | performs_ok | text, numeric | text - pgtap | public | performs_ok | text, numeric, text | text - pgtap | public | performs_within | text, numeric, numeric | text - pgtap | public | performs_within | text, numeric, numeric, integer | text - pgtap | public | performs_within | text, numeric, numeric, integer, text | text - pgtap | public | performs_within | text, numeric, numeric, text | text - pgtap | public | pg_version | | text - pgtap | public | pg_version_num | | integer - pgtap | public | pgtap_version | | numeric - pgtap | public | plan | integer | text - pgtap | public | policies_are | name, name, name[] | text - pgtap | public | policies_are | name, name, name[], text | text - pgtap | public | policies_are | name, name[] | text - pgtap | public | policies_are | name, name[], text | text - pgtap | public | policy_cmd_is | name, name, name, text | text - pgtap | public | policy_cmd_is | name, name, name, text, text | text - pgtap | public | policy_cmd_is | name, name, text | text - pgtap | public | policy_cmd_is | name, name, text, text | text - pgtap | public | policy_roles_are | name, name, name, name[] | text - pgtap | public | policy_roles_are | name, name, name, name[], text | text - pgtap | public | policy_roles_are | name, name, name[] | text - pgtap | public | policy_roles_are | name, name, name[], text | text - pgtap | public | relation_owner_is | name, name | text - pgtap | public | relation_owner_is | name, name, name | text - pgtap | public | relation_owner_is | name, name, name, text | text - pgtap | public | relation_owner_is | name, name, text | text - pgtap | public | results_eq | refcursor, anyarray | text - pgtap | public | results_eq | refcursor, anyarray, text | text - pgtap | public | results_eq | refcursor, refcursor | text - pgtap | public | results_eq | refcursor, refcursor, text | text - pgtap | public | results_eq | refcursor, text | text - pgtap | public | results_eq | refcursor, text, text | text - pgtap | public | results_eq | text, anyarray | text - pgtap | public | results_eq | text, anyarray, text | text - pgtap | public | results_eq | text, refcursor | text - pgtap | public | results_eq | text, refcursor, text | text - pgtap | public | results_eq | text, text | text - pgtap | public | results_eq | text, text, text | text - pgtap | public | results_ne | refcursor, anyarray | text - pgtap | public | results_ne | refcursor, anyarray, text | text - pgtap | public | results_ne | refcursor, refcursor | text - pgtap | public | results_ne | refcursor, refcursor, text | text - pgtap | public | results_ne | refcursor, text | text - pgtap | public | results_ne | refcursor, text, text | text - pgtap | public | results_ne | text, anyarray | text - pgtap | public | results_ne | text, anyarray, text | text - pgtap | public | results_ne | text, refcursor | text - pgtap | public | results_ne | text, refcursor, text | text - pgtap | public | results_ne | text, text | text - pgtap | public | results_ne | text, text, text | text - pgtap | public | roles_are | name[] | text - pgtap | public | roles_are | name[], text | text - pgtap | public | row_eq | text, anyelement | text - pgtap | public | row_eq | text, anyelement, text | text - pgtap | public | rule_is_instead | name, name | text - pgtap | public | rule_is_instead | name, name, name | text - pgtap | public | rule_is_instead | name, name, name, text | text - pgtap | public | rule_is_instead | name, name, text | text - pgtap | public | rule_is_on | name, name, name, text | text - pgtap | public | rule_is_on | name, name, name, text, text | text - pgtap | public | rule_is_on | name, name, text | text - pgtap | public | rule_is_on | name, name, text, text | text - pgtap | public | rules_are | name, name, name[] | text - pgtap | public | rules_are | name, name, name[], text | text - pgtap | public | rules_are | name, name[] | text - pgtap | public | rules_are | name, name[], text | text - pgtap | public | runtests | | SETOF text - pgtap | public | runtests | name | SETOF text - pgtap | public | runtests | name, text | SETOF text - pgtap | public | runtests | text | SETOF text - pgtap | public | schema_owner_is | name, name | text - pgtap | public | schema_owner_is | name, name, text | text - pgtap | public | schema_privs_are | name, name, name[] | text - pgtap | public | schema_privs_are | name, name, name[], text | text - pgtap | public | schemas_are | name[] | text - pgtap | public | schemas_are | name[], text | text - pgtap | public | sequence_owner_is | name, name | text - pgtap | public | sequence_owner_is | name, name, name | text - pgtap | public | sequence_owner_is | name, name, name, text | text - pgtap | public | sequence_owner_is | name, name, text | text - pgtap | public | sequence_privs_are | name, name, name, name[] | text - pgtap | public | sequence_privs_are | name, name, name, name[], text | text - pgtap | public | sequence_privs_are | name, name, name[] | text - pgtap | public | sequence_privs_are | name, name, name[], text | text - pgtap | public | sequences_are | name, name[] | text - pgtap | public | sequences_are | name, name[], text | text - pgtap | public | sequences_are | name[] | text - pgtap | public | sequences_are | name[], text | text - pgtap | public | server_privs_are | name, name, name[] | text - pgtap | public | server_privs_are | name, name, name[], text | text - pgtap | public | set_eq | text, anyarray | text - pgtap | public | set_eq | text, anyarray, text | text - pgtap | public | set_eq | text, text | text - pgtap | public | set_eq | text, text, text | text - pgtap | public | set_has | text, text | text - pgtap | public | set_has | text, text, text | text - pgtap | public | set_hasnt | text, text | text - pgtap | public | set_hasnt | text, text, text | text - pgtap | public | set_ne | text, anyarray | text - pgtap | public | set_ne | text, anyarray, text | text - pgtap | public | set_ne | text, text | text - pgtap | public | set_ne | text, text, text | text - pgtap | public | skip | integer | text - pgtap | public | skip | integer, text | text - pgtap | public | skip | text | text - pgtap | public | skip | why text, how_many integer | text - pgtap | public | table_owner_is | name, name | text - pgtap | public | table_owner_is | name, name, name | text - pgtap | public | table_owner_is | name, name, name, text | text - pgtap | public | table_owner_is | name, name, text | text - pgtap | public | table_privs_are | name, name, name, name[] | text - pgtap | public | table_privs_are | name, name, name, name[], text | text - pgtap | public | table_privs_are | name, name, name[] | text - pgtap | public | table_privs_are | name, name, name[], text | text - pgtap | public | tables_are | name, name[] | text - pgtap | public | tables_are | name, name[], text | text - pgtap | public | tables_are | name[] | text - pgtap | public | tables_are | name[], text | text - pgtap | public | tablespace_owner_is | name, name | text - pgtap | public | tablespace_owner_is | name, name, text | text - pgtap | public | tablespace_privs_are | name, name, name[] | text - pgtap | public | tablespace_privs_are | name, name, name[], text | text - pgtap | public | tablespaces_are | name[] | text - pgtap | public | tablespaces_are | name[], text | text - pgtap | public | throws_ilike | text, text | text - pgtap | public | throws_ilike | text, text, text | text - pgtap | public | throws_imatching | text, text | text - pgtap | public | throws_imatching | text, text, text | text - pgtap | public | throws_like | text, text | text - pgtap | public | throws_like | text, text, text | text - pgtap | public | throws_matching | text, text | text - pgtap | public | throws_matching | text, text, text | text - pgtap | public | throws_ok | text | text - pgtap | public | throws_ok | text, character, text, text | text - pgtap | public | throws_ok | text, integer | text - pgtap | public | throws_ok | text, integer, text | text - pgtap | public | throws_ok | text, integer, text, text | text - pgtap | public | throws_ok | text, text | text - pgtap | public | throws_ok | text, text, text | text - pgtap | public | todo | how_many integer | SETOF boolean - pgtap | public | todo | how_many integer, why text | SETOF boolean - pgtap | public | todo | why text | SETOF boolean - pgtap | public | todo | why text, how_many integer | SETOF boolean - pgtap | public | todo_end | | SETOF boolean - pgtap | public | todo_start | | SETOF boolean - pgtap | public | todo_start | text | SETOF boolean - pgtap | public | trigger_is | name, name, name | text - pgtap | public | trigger_is | name, name, name, name, name | text - pgtap | public | trigger_is | name, name, name, name, name, text | text - pgtap | public | trigger_is | name, name, name, text | text - pgtap | public | triggers_are | name, name, name[] | text - pgtap | public | triggers_are | name, name, name[], text | text - pgtap | public | triggers_are | name, name[] | text - pgtap | public | triggers_are | name, name[], text | text - pgtap | public | type_owner_is | name, name | text - pgtap | public | type_owner_is | name, name, name | text - pgtap | public | type_owner_is | name, name, name, text | text - pgtap | public | type_owner_is | name, name, text | text - pgtap | public | types_are | name, name[] | text - pgtap | public | types_are | name, name[], text | text - pgtap | public | types_are | name[] | text - pgtap | public | types_are | name[], text | text - pgtap | public | unalike | anyelement, text | text - pgtap | public | unalike | anyelement, text, text | text - pgtap | public | unialike | anyelement, text | text - pgtap | public | unialike | anyelement, text, text | text - pgtap | public | users_are | name[] | text - pgtap | public | users_are | name[], text | text - pgtap | public | view_owner_is | name, name | text - pgtap | public | view_owner_is | name, name, name | text - pgtap | public | view_owner_is | name, name, name, text | text - pgtap | public | view_owner_is | name, name, text | text - pgtap | public | views_are | name, name[] | text - pgtap | public | views_are | name, name[], text | text - pgtap | public | views_are | name[] | text - pgtap | public | views_are | name[], text | text - pgtap | public | volatility_is | name, name, name[], text | text - pgtap | public | volatility_is | name, name, name[], text, text | text - pgtap | public | volatility_is | name, name, text | text - pgtap | public | volatility_is | name, name, text, text | text - pgtap | public | volatility_is | name, name[], text | text - pgtap | public | volatility_is | name, name[], text, text | text - pgtap | public | volatility_is | name, text | text - pgtap | public | volatility_is | name, text, text | text - plcoffee | pg_catalog | plcoffee_call_handler | | language_handler - plcoffee | pg_catalog | plcoffee_call_validator | oid | void - plcoffee | pg_catalog | plcoffee_inline_handler | internal | void - plls | pg_catalog | plls_call_handler | | language_handler - plls | pg_catalog | plls_call_validator | oid | void - plls | pg_catalog | plls_inline_handler | internal | void - plpgsql | pg_catalog | plpgsql_call_handler | | language_handler - plpgsql | pg_catalog | plpgsql_inline_handler | internal | void - plpgsql | pg_catalog | plpgsql_validator | oid | void - plpgsql_check | public | __plpgsql_show_dependency_tb | funcoid regprocedure, relid regclass, anyelememttype regtype, anyenumtype regtype, anyrangetype regtype, anycompatibletype regtype, anycompatiblerangetype regtype | TABLE(type text, oid oid, schema text, name text, params text) - plpgsql_check | public | __plpgsql_show_dependency_tb | name text, relid regclass, anyelememttype regtype, anyenumtype regtype, anyrangetype regtype, anycompatibletype regtype, anycompatiblerangetype regtype | TABLE(type text, oid oid, schema text, name text, params text) - plpgsql_check | public | plpgsql_check_function | funcoid regprocedure, relid regclass, format text, fatal_errors boolean, other_warnings boolean, performance_warnings boolean, extra_warnings boolean, security_warnings boolean, compatibility_warnings boolean, oldtable name, newtable name, anyelememttype regtype, anyenumtype regtype, anyrangetype regtype, anycompatibletype regtype, anycompatiblerangetype regtype, without_warnings boolean, all_warnings boolean, use_incomment_options boolean, incomment_options_usage_warning boolean, constant_tracing boolean | SETOF text - plpgsql_check | public | plpgsql_check_function | name text, relid regclass, format text, fatal_errors boolean, other_warnings boolean, performance_warnings boolean, extra_warnings boolean, security_warnings boolean, compatibility_warnings boolean, oldtable name, newtable name, anyelememttype regtype, anyenumtype regtype, anyrangetype regtype, anycompatibletype regtype, anycompatiblerangetype regtype, without_warnings boolean, all_warnings boolean, use_incomment_options boolean, incomment_options_usage_warning boolean, constant_tracing boolean | SETOF text - plpgsql_check | public | plpgsql_check_function_tb | funcoid regprocedure, relid regclass, fatal_errors boolean, other_warnings boolean, performance_warnings boolean, extra_warnings boolean, security_warnings boolean, compatibility_warnings boolean, oldtable name, newtable name, anyelememttype regtype, anyenumtype regtype, anyrangetype regtype, anycompatibletype regtype, anycompatiblerangetype regtype, without_warnings boolean, all_warnings boolean, use_incomment_options boolean, incomment_options_usage_warning boolean, constant_tracing boolean | TABLE(functionid regproc, lineno integer, statement text, sqlstate text, message text, detail text, hint text, level text, "position" integer, query text, context text) - plpgsql_check | public | plpgsql_check_function_tb | name text, relid regclass, fatal_errors boolean, other_warnings boolean, performance_warnings boolean, extra_warnings boolean, security_warnings boolean, compatibility_warnings boolean, oldtable name, newtable name, anyelememttype regtype, anyenumtype regtype, anyrangetype regtype, anycompatibletype regtype, anycompatiblerangetype regtype, without_warnings boolean, all_warnings boolean, use_incomment_options boolean, incomment_options_usage_warning boolean, constant_tracing boolean | TABLE(functionid regproc, lineno integer, statement text, sqlstate text, message text, detail text, hint text, level text, "position" integer, query text, context text) - plpgsql_check | public | plpgsql_check_pragma | VARIADIC name text[] | integer - plpgsql_check | public | plpgsql_check_profiler | enable boolean | boolean - plpgsql_check | public | plpgsql_check_tracer | enable boolean, verbosity text | boolean - plpgsql_check | public | plpgsql_coverage_branches | funcoid regprocedure | double precision - plpgsql_check | public | plpgsql_coverage_branches | name text | double precision - plpgsql_check | public | plpgsql_coverage_statements | funcoid regprocedure | double precision - plpgsql_check | public | plpgsql_coverage_statements | name text | double precision - plpgsql_check | public | plpgsql_profiler_function_statements_tb | funcoid regprocedure | TABLE(stmtid integer, parent_stmtid integer, parent_note text, block_num integer, lineno integer, queryid bigint, exec_stmts bigint, exec_stmts_err bigint, total_time double precision, avg_time double precision, max_time double precision, processed_rows bigint, stmtname text) - plpgsql_check | public | plpgsql_profiler_function_statements_tb | name text | TABLE(stmtid integer, parent_stmtid integer, parent_note text, block_num integer, lineno integer, queryid bigint, exec_stmts bigint, exec_stmts_err bigint, total_time double precision, avg_time double precision, max_time double precision, processed_rows bigint, stmtname text) - plpgsql_check | public | plpgsql_profiler_function_tb | funcoid regprocedure | TABLE(lineno integer, stmt_lineno integer, queryids bigint[], cmds_on_row integer, exec_stmts bigint, exec_stmts_err bigint, total_time double precision, avg_time double precision, max_time double precision[], processed_rows bigint[], source text) - plpgsql_check | public | plpgsql_profiler_function_tb | name text | TABLE(lineno integer, stmt_lineno integer, queryids bigint[], cmds_on_row integer, exec_stmts bigint, exec_stmts_err bigint, total_time double precision, avg_time double precision, max_time double precision[], processed_rows bigint[], source text) - plpgsql_check | public | plpgsql_profiler_functions_all | | TABLE(funcoid regprocedure, exec_count bigint, exec_stmts_err bigint, total_time double precision, avg_time double precision, stddev_time double precision, min_time double precision, max_time double precision) - plpgsql_check | public | plpgsql_profiler_install_fake_queryid_hook | | void - plpgsql_check | public | plpgsql_profiler_remove_fake_queryid_hook | | void - plpgsql_check | public | plpgsql_profiler_reset | funcoid regprocedure | void - plpgsql_check | public | plpgsql_profiler_reset_all | | void - plpgsql_check | public | plpgsql_show_dependency_tb | fnname text, relid regclass, anyelememttype regtype, anyenumtype regtype, anyrangetype regtype, anycompatibletype regtype, anycompatiblerangetype regtype | TABLE(type text, oid oid, schema text, name text, params text) - plpgsql_check | public | plpgsql_show_dependency_tb | funcoid regprocedure, relid regclass, anyelememttype regtype, anyenumtype regtype, anyrangetype regtype, anycompatibletype regtype, anycompatiblerangetype regtype | TABLE(type text, oid oid, schema text, name text, params text) - plv8 | pg_catalog | plv8_call_handler | | language_handler - plv8 | pg_catalog | plv8_call_validator | oid | void - plv8 | pg_catalog | plv8_info | | json - plv8 | pg_catalog | plv8_inline_handler | internal | void - plv8 | pg_catalog | plv8_reset | | void - plv8 | pg_catalog | plv8_version | | text - postgis | public | _postgis_deprecate | oldname text, newname text, version text | void - postgis | public | _postgis_index_extent | tbl regclass, col text | box2d - postgis | public | _postgis_join_selectivity | regclass, text, regclass, text, text | double precision - postgis | public | _postgis_pgsql_version | | text - postgis | public | _postgis_scripts_pgsql_version | | text - postgis | public | _postgis_selectivity | tbl regclass, att_name text, geom geometry, mode text | double precision - postgis | public | _postgis_stats | tbl regclass, att_name text, text | text - postgis | public | _st_3ddfullywithin | geom1 geometry, geom2 geometry, double precision | boolean - postgis | public | _st_3ddwithin | geom1 geometry, geom2 geometry, double precision | boolean - postgis | public | _st_3dintersects | geom1 geometry, geom2 geometry | boolean - postgis | public | _st_asgml | integer, geometry, integer, integer, text, text | text - postgis | public | _st_asx3d | integer, geometry, integer, integer, text | text - postgis | public | _st_bestsrid | geography | integer - postgis | public | _st_bestsrid | geography, geography | integer - postgis | public | _st_contains | geom1 geometry, geom2 geometry | boolean - postgis | public | _st_containsproperly | geom1 geometry, geom2 geometry | boolean - postgis | public | _st_coveredby | geog1 geography, geog2 geography | boolean - postgis | public | _st_coveredby | geom1 geometry, geom2 geometry | boolean - postgis | public | _st_covers | geog1 geography, geog2 geography | boolean - postgis | public | _st_covers | geom1 geometry, geom2 geometry | boolean - postgis | public | _st_crosses | geom1 geometry, geom2 geometry | boolean - postgis | public | _st_dfullywithin | geom1 geometry, geom2 geometry, double precision | boolean - postgis | public | _st_distancetree | geography, geography | double precision - postgis | public | _st_distancetree | geography, geography, double precision, boolean | double precision - postgis | public | _st_distanceuncached | geography, geography | double precision - postgis | public | _st_distanceuncached | geography, geography, boolean | double precision - postgis | public | _st_distanceuncached | geography, geography, double precision, boolean | double precision - postgis | public | _st_dwithin | geog1 geography, geog2 geography, tolerance double precision, use_spheroid boolean | boolean - postgis | public | _st_dwithin | geom1 geometry, geom2 geometry, double precision | boolean - postgis | public | _st_dwithinuncached | geography, geography, double precision | boolean - postgis | public | _st_dwithinuncached | geography, geography, double precision, boolean | boolean - postgis | public | _st_equals | geom1 geometry, geom2 geometry | boolean - postgis | public | _st_expand | geography, double precision | geography - postgis | public | _st_geomfromgml | text, integer | geometry - postgis | public | _st_intersects | geom1 geometry, geom2 geometry | boolean - postgis | public | _st_linecrossingdirection | line1 geometry, line2 geometry | integer - postgis | public | _st_longestline | geom1 geometry, geom2 geometry | geometry - postgis | public | _st_maxdistance | geom1 geometry, geom2 geometry | double precision - postgis | public | _st_orderingequals | geom1 geometry, geom2 geometry | boolean - postgis | public | _st_overlaps | geom1 geometry, geom2 geometry | boolean - postgis | public | _st_pointoutside | geography | geography - postgis | public | _st_sortablehash | geom geometry | bigint - postgis | public | _st_touches | geom1 geometry, geom2 geometry | boolean - postgis | public | _st_voronoi | g1 geometry, clip geometry, tolerance double precision, return_polygons boolean | geometry - postgis | public | _st_within | geom1 geometry, geom2 geometry | boolean - postgis | public | addauth | text | boolean - postgis | public | addgeometrycolumn | catalog_name character varying, schema_name character varying, table_name character varying, column_name character varying, new_srid_in integer, new_type character varying, new_dim integer, use_typmod boolean | text - postgis | public | addgeometrycolumn | schema_name character varying, table_name character varying, column_name character varying, new_srid integer, new_type character varying, new_dim integer, use_typmod boolean | text - postgis | public | addgeometrycolumn | table_name character varying, column_name character varying, new_srid integer, new_type character varying, new_dim integer, use_typmod boolean | text - postgis | public | box | box3d | box - postgis | public | box | geometry | box - postgis | public | box2d | box3d | box2d - postgis | public | box2d | geometry | box2d - postgis | public | box2d_in | cstring | box2d - postgis | public | box2d_out | box2d | cstring - postgis | public | box2df_in | cstring | box2df - postgis | public | box2df_out | box2df | cstring - postgis | public | box3d | box2d | box3d - postgis | public | box3d | geometry | box3d - postgis | public | box3d_in | cstring | box3d - postgis | public | box3d_out | box3d | cstring - postgis | public | box3dtobox | box3d | box - postgis | public | bytea | geography | bytea - postgis | public | bytea | geometry | bytea - postgis | public | checkauth | text, text | integer - postgis | public | checkauth | text, text, text | integer - postgis | public | checkauthtrigger | | trigger - postgis | public | contains_2d | box2df, box2df | boolean - postgis | public | contains_2d | box2df, geometry | boolean - postgis | public | contains_2d | geometry, box2df | boolean - postgis | public | disablelongtransactions | | text - postgis | public | dropgeometrycolumn | catalog_name character varying, schema_name character varying, table_name character varying, column_name character varying | text - postgis | public | dropgeometrycolumn | schema_name character varying, table_name character varying, column_name character varying | text - postgis | public | dropgeometrycolumn | table_name character varying, column_name character varying | text - postgis | public | dropgeometrytable | catalog_name character varying, schema_name character varying, table_name character varying | text - postgis | public | dropgeometrytable | schema_name character varying, table_name character varying | text - postgis | public | dropgeometrytable | table_name character varying | text - postgis | public | enablelongtransactions | | text - postgis | public | equals | geom1 geometry, geom2 geometry | boolean - postgis | public | find_srid | character varying, character varying, character varying | integer - postgis | public | geog_brin_inclusion_add_value | internal, internal, internal, internal | boolean - postgis | public | geography | bytea | geography - postgis | public | geography | geography, integer, boolean | geography - postgis | public | geography | geometry | geography - postgis | public | geography_analyze | internal | boolean - postgis | public | geography_cmp | geography, geography | integer - postgis | public | geography_distance_knn | geography, geography | double precision - postgis | public | geography_eq | geography, geography | boolean - postgis | public | geography_ge | geography, geography | boolean - postgis | public | geography_gist_compress | internal | internal - postgis | public | geography_gist_consistent | internal, geography, integer | boolean - postgis | public | geography_gist_decompress | internal | internal - postgis | public | geography_gist_distance | internal, geography, integer | double precision - postgis | public | geography_gist_penalty | internal, internal, internal | internal - postgis | public | geography_gist_picksplit | internal, internal | internal - postgis | public | geography_gist_same | box2d, box2d, internal | internal - postgis | public | geography_gist_union | bytea, internal | internal - postgis | public | geography_gt | geography, geography | boolean - postgis | public | geography_in | cstring, oid, integer | geography - postgis | public | geography_le | geography, geography | boolean - postgis | public | geography_lt | geography, geography | boolean - postgis | public | geography_out | geography | cstring - postgis | public | geography_overlaps | geography, geography | boolean - postgis | public | geography_recv | internal, oid, integer | geography - postgis | public | geography_send | geography | bytea - postgis | public | geography_spgist_choose_nd | internal, internal | void - postgis | public | geography_spgist_compress_nd | internal | internal - postgis | public | geography_spgist_config_nd | internal, internal | void - postgis | public | geography_spgist_inner_consistent_nd | internal, internal | void - postgis | public | geography_spgist_leaf_consistent_nd | internal, internal | boolean - postgis | public | geography_spgist_picksplit_nd | internal, internal | void - postgis | public | geography_typmod_in | cstring[] | integer - postgis | public | geography_typmod_out | integer | cstring - postgis | public | geom2d_brin_inclusion_add_value | internal, internal, internal, internal | boolean - postgis | public | geom3d_brin_inclusion_add_value | internal, internal, internal, internal | boolean - postgis | public | geom4d_brin_inclusion_add_value | internal, internal, internal, internal | boolean - postgis | public | geometry | box2d | geometry - postgis | public | geometry | box3d | geometry - postgis | public | geometry | bytea | geometry - postgis | public | geometry | geography | geometry - postgis | public | geometry | geometry, integer, boolean | geometry - postgis | public | geometry | path | geometry - postgis | public | geometry | point | geometry - postgis | public | geometry | polygon | geometry - postgis | public | geometry | text | geometry - postgis | public | geometry_above | geom1 geometry, geom2 geometry | boolean - postgis | public | geometry_analyze | internal | boolean - postgis | public | geometry_below | geom1 geometry, geom2 geometry | boolean - postgis | public | geometry_cmp | geom1 geometry, geom2 geometry | integer - postgis | public | geometry_contained_3d | geom1 geometry, geom2 geometry | boolean - postgis | public | geometry_contains | geom1 geometry, geom2 geometry | boolean - postgis | public | geometry_contains_3d | geom1 geometry, geom2 geometry | boolean - postgis | public | geometry_contains_nd | geometry, geometry | boolean - postgis | public | geometry_distance_box | geom1 geometry, geom2 geometry | double precision - postgis | public | geometry_distance_centroid | geom1 geometry, geom2 geometry | double precision - postgis | public | geometry_distance_centroid_nd | geometry, geometry | double precision - postgis | public | geometry_distance_cpa | geometry, geometry | double precision - postgis | public | geometry_eq | geom1 geometry, geom2 geometry | boolean - postgis | public | geometry_ge | geom1 geometry, geom2 geometry | boolean - postgis | public | geometry_gist_compress_2d | internal | internal - postgis | public | geometry_gist_compress_nd | internal | internal - postgis | public | geometry_gist_consistent_2d | internal, geometry, integer | boolean - postgis | public | geometry_gist_consistent_nd | internal, geometry, integer | boolean - postgis | public | geometry_gist_decompress_2d | internal | internal - postgis | public | geometry_gist_decompress_nd | internal | internal - postgis | public | geometry_gist_distance_2d | internal, geometry, integer | double precision - postgis | public | geometry_gist_distance_nd | internal, geometry, integer | double precision - postgis | public | geometry_gist_penalty_2d | internal, internal, internal | internal - postgis | public | geometry_gist_penalty_nd | internal, internal, internal | internal - postgis | public | geometry_gist_picksplit_2d | internal, internal | internal - postgis | public | geometry_gist_picksplit_nd | internal, internal | internal - postgis | public | geometry_gist_same_2d | geom1 geometry, geom2 geometry, internal | internal - postgis | public | geometry_gist_same_nd | geometry, geometry, internal | internal - postgis | public | geometry_gist_sortsupport_2d | internal | void - postgis | public | geometry_gist_union_2d | bytea, internal | internal - postgis | public | geometry_gist_union_nd | bytea, internal | internal - postgis | public | geometry_gt | geom1 geometry, geom2 geometry | boolean - postgis | public | geometry_hash | geometry | integer - postgis | public | geometry_in | cstring | geometry - postgis | public | geometry_le | geom1 geometry, geom2 geometry | boolean - postgis | public | geometry_left | geom1 geometry, geom2 geometry | boolean - postgis | public | geometry_lt | geom1 geometry, geom2 geometry | boolean - postgis | public | geometry_out | geometry | cstring - postgis | public | geometry_overabove | geom1 geometry, geom2 geometry | boolean - postgis | public | geometry_overbelow | geom1 geometry, geom2 geometry | boolean - postgis | public | geometry_overlaps | geom1 geometry, geom2 geometry | boolean - postgis | public | geometry_overlaps_3d | geom1 geometry, geom2 geometry | boolean - postgis | public | geometry_overlaps_nd | geometry, geometry | boolean - postgis | public | geometry_overleft | geom1 geometry, geom2 geometry | boolean - postgis | public | geometry_overright | geom1 geometry, geom2 geometry | boolean - postgis | public | geometry_recv | internal | geometry - postgis | public | geometry_right | geom1 geometry, geom2 geometry | boolean - postgis | public | geometry_same | geom1 geometry, geom2 geometry | boolean - postgis | public | geometry_same_3d | geom1 geometry, geom2 geometry | boolean - postgis | public | geometry_same_nd | geometry, geometry | boolean - postgis | public | geometry_send | geometry | bytea - postgis | public | geometry_sortsupport | internal | void - postgis | public | geometry_spgist_choose_2d | internal, internal | void - postgis | public | geometry_spgist_choose_3d | internal, internal | void - postgis | public | geometry_spgist_choose_nd | internal, internal | void - postgis | public | geometry_spgist_compress_2d | internal | internal - postgis | public | geometry_spgist_compress_3d | internal | internal - postgis | public | geometry_spgist_compress_nd | internal | internal - postgis | public | geometry_spgist_config_2d | internal, internal | void - postgis | public | geometry_spgist_config_3d | internal, internal | void - postgis | public | geometry_spgist_config_nd | internal, internal | void - postgis | public | geometry_spgist_inner_consistent_2d | internal, internal | void - postgis | public | geometry_spgist_inner_consistent_3d | internal, internal | void - postgis | public | geometry_spgist_inner_consistent_nd | internal, internal | void - postgis | public | geometry_spgist_leaf_consistent_2d | internal, internal | boolean - postgis | public | geometry_spgist_leaf_consistent_3d | internal, internal | boolean - postgis | public | geometry_spgist_leaf_consistent_nd | internal, internal | boolean - postgis | public | geometry_spgist_picksplit_2d | internal, internal | void - postgis | public | geometry_spgist_picksplit_3d | internal, internal | void - postgis | public | geometry_spgist_picksplit_nd | internal, internal | void - postgis | public | geometry_typmod_in | cstring[] | integer - postgis | public | geometry_typmod_out | integer | cstring - postgis | public | geometry_within | geom1 geometry, geom2 geometry | boolean - postgis | public | geometry_within_nd | geometry, geometry | boolean - postgis | public | geometrytype | geography | text - postgis | public | geometrytype | geometry | text - postgis | public | geomfromewkb | bytea | geometry - postgis | public | geomfromewkt | text | geometry - postgis | public | get_proj4_from_srid | integer | text - postgis | public | gettransactionid | | xid - postgis | public | gidx_in | cstring | gidx - postgis | public | gidx_out | gidx | cstring - postgis | public | gserialized_gist_joinsel_2d | internal, oid, internal, smallint | double precision - postgis | public | gserialized_gist_joinsel_nd | internal, oid, internal, smallint | double precision - postgis | public | gserialized_gist_sel_2d | internal, oid, internal, integer | double precision - postgis | public | gserialized_gist_sel_nd | internal, oid, internal, integer | double precision - postgis | public | is_contained_2d | box2df, box2df | boolean - postgis | public | is_contained_2d | box2df, geometry | boolean - postgis | public | is_contained_2d | geometry, box2df | boolean - postgis | public | json | geometry | json - postgis | public | jsonb | geometry | jsonb - postgis | public | lockrow | text, text, text | integer - postgis | public | lockrow | text, text, text, text | integer - postgis | public | lockrow | text, text, text, text, timestamp without time zone | integer - postgis | public | lockrow | text, text, text, timestamp without time zone | integer - postgis | public | longtransactionsenabled | | boolean - postgis | public | overlaps_2d | box2df, box2df | boolean - postgis | public | overlaps_2d | box2df, geometry | boolean - postgis | public | overlaps_2d | geometry, box2df | boolean - postgis | public | overlaps_geog | geography, gidx | boolean - postgis | public | overlaps_geog | gidx, geography | boolean - postgis | public | overlaps_geog | gidx, gidx | boolean - postgis | public | overlaps_nd | geometry, gidx | boolean - postgis | public | overlaps_nd | gidx, geometry | boolean - postgis | public | overlaps_nd | gidx, gidx | boolean - postgis | public | path | geometry | path - postgis | public | pgis_asflatgeobuf_finalfn | internal | bytea - postgis | public | pgis_asflatgeobuf_transfn | internal, anyelement | internal - postgis | public | pgis_asflatgeobuf_transfn | internal, anyelement, boolean | internal - postgis | public | pgis_asflatgeobuf_transfn | internal, anyelement, boolean, text | internal - postgis | public | pgis_asgeobuf_finalfn | internal | bytea - postgis | public | pgis_asgeobuf_transfn | internal, anyelement | internal - postgis | public | pgis_asgeobuf_transfn | internal, anyelement, text | internal - postgis | public | pgis_asmvt_combinefn | internal, internal | internal - postgis | public | pgis_asmvt_deserialfn | bytea, internal | internal - postgis | public | pgis_asmvt_finalfn | internal | bytea - postgis | public | pgis_asmvt_serialfn | internal | bytea - postgis | public | pgis_asmvt_transfn | internal, anyelement | internal - postgis | public | pgis_asmvt_transfn | internal, anyelement, text | internal - postgis | public | pgis_asmvt_transfn | internal, anyelement, text, integer | internal - postgis | public | pgis_asmvt_transfn | internal, anyelement, text, integer, text | internal - postgis | public | pgis_asmvt_transfn | internal, anyelement, text, integer, text, text | internal - postgis | public | pgis_geometry_accum_transfn | internal, geometry | internal - postgis | public | pgis_geometry_accum_transfn | internal, geometry, double precision | internal - postgis | public | pgis_geometry_accum_transfn | internal, geometry, double precision, integer | internal - postgis | public | pgis_geometry_clusterintersecting_finalfn | internal | geometry[] - postgis | public | pgis_geometry_clusterwithin_finalfn | internal | geometry[] - postgis | public | pgis_geometry_collect_finalfn | internal | geometry - postgis | public | pgis_geometry_makeline_finalfn | internal | geometry - postgis | public | pgis_geometry_polygonize_finalfn | internal | geometry - postgis | public | pgis_geometry_union_parallel_combinefn | internal, internal | internal - postgis | public | pgis_geometry_union_parallel_deserialfn | bytea, internal | internal - postgis | public | pgis_geometry_union_parallel_finalfn | internal | geometry - postgis | public | pgis_geometry_union_parallel_serialfn | internal | bytea - postgis | public | pgis_geometry_union_parallel_transfn | internal, geometry | internal - postgis | public | pgis_geometry_union_parallel_transfn | internal, geometry, double precision | internal - postgis | public | point | geometry | point - postgis | public | polygon | geometry | polygon - postgis | public | populate_geometry_columns | tbl_oid oid, use_typmod boolean | integer - postgis | public | populate_geometry_columns | use_typmod boolean | text - postgis | public | postgis_addbbox | geometry | geometry - postgis | public | postgis_cache_bbox | | trigger - postgis | public | postgis_constraint_dims | geomschema text, geomtable text, geomcolumn text | integer - postgis | public | postgis_constraint_srid | geomschema text, geomtable text, geomcolumn text | integer - postgis | public | postgis_constraint_type | geomschema text, geomtable text, geomcolumn text | character varying - postgis | public | postgis_dropbbox | geometry | geometry - postgis | public | postgis_extensions_upgrade | | text - postgis | public | postgis_full_version | | text - postgis | public | postgis_geos_noop | geometry | geometry - postgis | public | postgis_geos_version | | text - postgis | public | postgis_getbbox | geometry | box2d - postgis | public | postgis_hasbbox | geometry | boolean - postgis | public | postgis_index_supportfn | internal | internal - postgis | public | postgis_lib_build_date | | text - postgis | public | postgis_lib_revision | | text - postgis | public | postgis_lib_version | | text - postgis | public | postgis_libjson_version | | text - postgis | public | postgis_liblwgeom_version | | text - postgis | public | postgis_libprotobuf_version | | text - postgis | public | postgis_libxml_version | | text - postgis | public | postgis_noop | geometry | geometry - postgis | public | postgis_proj_version | | text - postgis | public | postgis_scripts_build_date | | text - postgis | public | postgis_scripts_installed | | text - postgis | public | postgis_scripts_released | | text - postgis | public | postgis_svn_version | | text - postgis | public | postgis_transform_geometry | geom geometry, text, text, integer | geometry - postgis | public | postgis_type_name | geomname character varying, coord_dimension integer, use_new_name boolean | character varying - postgis | public | postgis_typmod_dims | integer | integer - postgis | public | postgis_typmod_srid | integer | integer - postgis | public | postgis_typmod_type | integer | text - postgis | public | postgis_version | | text - postgis | public | postgis_wagyu_version | | text - postgis | public | spheroid_in | cstring | spheroid - postgis | public | spheroid_out | spheroid | cstring - postgis | public | st_3dclosestpoint | geom1 geometry, geom2 geometry | geometry - postgis | public | st_3ddfullywithin | geom1 geometry, geom2 geometry, double precision | boolean - postgis | public | st_3ddistance | geom1 geometry, geom2 geometry | double precision - postgis | public | st_3ddwithin | geom1 geometry, geom2 geometry, double precision | boolean - postgis | public | st_3dextent | geometry | box3d - postgis | public | st_3dintersects | geom1 geometry, geom2 geometry | boolean - postgis | public | st_3dlength | geometry | double precision - postgis | public | st_3dlineinterpolatepoint | geometry, double precision | geometry - postgis | public | st_3dlongestline | geom1 geometry, geom2 geometry | geometry - postgis | public | st_3dmakebox | geom1 geometry, geom2 geometry | box3d - postgis | public | st_3dmaxdistance | geom1 geometry, geom2 geometry | double precision - postgis | public | st_3dperimeter | geometry | double precision - postgis | public | st_3dshortestline | geom1 geometry, geom2 geometry | geometry - postgis | public | st_addmeasure | geometry, double precision, double precision | geometry - postgis | public | st_addpoint | geom1 geometry, geom2 geometry | geometry - postgis | public | st_addpoint | geom1 geometry, geom2 geometry, integer | geometry - postgis | public | st_affine | geometry, double precision, double precision, double precision, double precision, double precision, double precision | geometry - postgis | public | st_affine | geometry, double precision, double precision, double precision, double precision, double precision, double precision, double precision, double precision, double precision, double precision, double precision, double precision | geometry - postgis | public | st_angle | line1 geometry, line2 geometry | double precision - postgis | public | st_angle | pt1 geometry, pt2 geometry, pt3 geometry, pt4 geometry | double precision - postgis | public | st_area | geog geography, use_spheroid boolean | double precision - postgis | public | st_area | geometry | double precision - postgis | public | st_area | text | double precision - postgis | public | st_area2d | geometry | double precision - postgis | public | st_asbinary | geography | bytea - postgis | public | st_asbinary | geography, text | bytea - postgis | public | st_asbinary | geometry | bytea - postgis | public | st_asbinary | geometry, text | bytea - postgis | public | st_asencodedpolyline | geom geometry, nprecision integer | text - postgis | public | st_asewkb | geometry | bytea - postgis | public | st_asewkb | geometry, text | bytea - postgis | public | st_asewkt | geography | text - postgis | public | st_asewkt | geography, integer | text - postgis | public | st_asewkt | geometry | text - postgis | public | st_asewkt | geometry, integer | text - postgis | public | st_asewkt | text | text - postgis | public | st_asflatgeobuf | anyelement | bytea - postgis | public | st_asflatgeobuf | anyelement, boolean | bytea - postgis | public | st_asflatgeobuf | anyelement, boolean, text | bytea - postgis | public | st_asgeobuf | anyelement | bytea - postgis | public | st_asgeobuf | anyelement, text | bytea - postgis | public | st_asgeojson | geog geography, maxdecimaldigits integer, options integer | text - postgis | public | st_asgeojson | geom geometry, maxdecimaldigits integer, options integer | text - postgis | public | st_asgeojson | r record, geom_column text, maxdecimaldigits integer, pretty_bool boolean | text - postgis | public | st_asgeojson | text | text - postgis | public | st_asgml | geog geography, maxdecimaldigits integer, options integer, nprefix text, id text | text - postgis | public | st_asgml | geom geometry, maxdecimaldigits integer, options integer | text - postgis | public | st_asgml | text | text - postgis | public | st_asgml | version integer, geog geography, maxdecimaldigits integer, options integer, nprefix text, id text | text - postgis | public | st_asgml | version integer, geom geometry, maxdecimaldigits integer, options integer, nprefix text, id text | text - postgis | public | st_ashexewkb | geometry | text - postgis | public | st_ashexewkb | geometry, text | text - postgis | public | st_askml | geog geography, maxdecimaldigits integer, nprefix text | text - postgis | public | st_askml | geom geometry, maxdecimaldigits integer, nprefix text | text - postgis | public | st_askml | text | text - postgis | public | st_aslatlontext | geom geometry, tmpl text | text - postgis | public | st_asmarc21 | geom geometry, format text | text - postgis | public | st_asmvt | anyelement | bytea - postgis | public | st_asmvt | anyelement, text | bytea - postgis | public | st_asmvt | anyelement, text, integer | bytea - postgis | public | st_asmvt | anyelement, text, integer, text | bytea - postgis | public | st_asmvt | anyelement, text, integer, text, text | bytea - postgis | public | st_asmvtgeom | geom geometry, bounds box2d, extent integer, buffer integer, clip_geom boolean | geometry - postgis | public | st_assvg | geog geography, rel integer, maxdecimaldigits integer | text - postgis | public | st_assvg | geom geometry, rel integer, maxdecimaldigits integer | text - postgis | public | st_assvg | text | text - postgis | public | st_astext | geography | text - postgis | public | st_astext | geography, integer | text - postgis | public | st_astext | geometry | text - postgis | public | st_astext | geometry, integer | text - postgis | public | st_astext | text | text - postgis | public | st_astwkb | geom geometry, prec integer, prec_z integer, prec_m integer, with_sizes boolean, with_boxes boolean | bytea - postgis | public | st_astwkb | geom geometry[], ids bigint[], prec integer, prec_z integer, prec_m integer, with_sizes boolean, with_boxes boolean | bytea - postgis | public | st_asx3d | geom geometry, maxdecimaldigits integer, options integer | text - postgis | public | st_azimuth | geog1 geography, geog2 geography | double precision - postgis | public | st_azimuth | geom1 geometry, geom2 geometry | double precision - postgis | public | st_bdmpolyfromtext | text, integer | geometry - postgis | public | st_bdpolyfromtext | text, integer | geometry - postgis | public | st_boundary | geometry | geometry - postgis | public | st_boundingdiagonal | geom geometry, fits boolean | geometry - postgis | public | st_box2dfromgeohash | text, integer | box2d - postgis | public | st_buffer | geography, double precision | geography - postgis | public | st_buffer | geography, double precision, integer | geography - postgis | public | st_buffer | geography, double precision, text | geography - postgis | public | st_buffer | geom geometry, radius double precision, options text | geometry - postgis | public | st_buffer | geom geometry, radius double precision, quadsegs integer | geometry - postgis | public | st_buffer | text, double precision | geometry - postgis | public | st_buffer | text, double precision, integer | geometry - postgis | public | st_buffer | text, double precision, text | geometry - postgis | public | st_buildarea | geometry | geometry - postgis | public | st_centroid | geography, use_spheroid boolean | geography - postgis | public | st_centroid | geometry | geometry - postgis | public | st_centroid | text | geometry - postgis | public | st_chaikinsmoothing | geometry, integer, boolean | geometry - postgis | public | st_cleangeometry | geometry | geometry - postgis | public | st_clipbybox2d | geom geometry, box box2d | geometry - postgis | public | st_closestpoint | geom1 geometry, geom2 geometry | geometry - postgis | public | st_closestpointofapproach | geometry, geometry | double precision - postgis | public | st_clusterdbscan | geometry, eps double precision, minpoints integer | integer - postgis | public | st_clusterintersecting | geometry | geometry[] - postgis | public | st_clusterintersecting | geometry[] | geometry[] - postgis | public | st_clusterkmeans | geom geometry, k integer, max_radius double precision | integer - postgis | public | st_clusterwithin | geometry, double precision | geometry[] - postgis | public | st_clusterwithin | geometry[], double precision | geometry[] - postgis | public | st_collect | geom1 geometry, geom2 geometry | geometry - postgis | public | st_collect | geometry | geometry - postgis | public | st_collect | geometry[] | geometry - postgis | public | st_collectionextract | geometry | geometry - postgis | public | st_collectionextract | geometry, integer | geometry - postgis | public | st_collectionhomogenize | geometry | geometry - postgis | public | st_combinebbox | box2d, geometry | box2d - postgis | public | st_combinebbox | box3d, box3d | box3d - postgis | public | st_combinebbox | box3d, geometry | box3d - postgis | public | st_concavehull | param_geom geometry, param_pctconvex double precision, param_allow_holes boolean | geometry - postgis | public | st_contains | geom1 geometry, geom2 geometry | boolean - postgis | public | st_containsproperly | geom1 geometry, geom2 geometry | boolean - postgis | public | st_convexhull | geometry | geometry - postgis | public | st_coorddim | geometry geometry | smallint - postgis | public | st_coveredby | geog1 geography, geog2 geography | boolean - postgis | public | st_coveredby | geom1 geometry, geom2 geometry | boolean - postgis | public | st_coveredby | text, text | boolean - postgis | public | st_covers | geog1 geography, geog2 geography | boolean - postgis | public | st_covers | geom1 geometry, geom2 geometry | boolean - postgis | public | st_covers | text, text | boolean - postgis | public | st_cpawithin | geometry, geometry, double precision | boolean - postgis | public | st_crosses | geom1 geometry, geom2 geometry | boolean - postgis | public | st_curvetoline | geom geometry, tol double precision, toltype integer, flags integer | geometry - postgis | public | st_delaunaytriangles | g1 geometry, tolerance double precision, flags integer | geometry - postgis | public | st_dfullywithin | geom1 geometry, geom2 geometry, double precision | boolean - postgis | public | st_difference | geom1 geometry, geom2 geometry, gridsize double precision | geometry - postgis | public | st_dimension | geometry | integer - postgis | public | st_disjoint | geom1 geometry, geom2 geometry | boolean - postgis | public | st_distance | geog1 geography, geog2 geography, use_spheroid boolean | double precision - postgis | public | st_distance | geom1 geometry, geom2 geometry | double precision - postgis | public | st_distance | text, text | double precision - postgis | public | st_distancecpa | geometry, geometry | double precision - postgis | public | st_distancesphere | geom1 geometry, geom2 geometry | double precision - postgis | public | st_distancesphere | geom1 geometry, geom2 geometry, radius double precision | double precision - postgis | public | st_distancespheroid | geom1 geometry, geom2 geometry | double precision - postgis | public | st_distancespheroid | geom1 geometry, geom2 geometry, spheroid | double precision - postgis | public | st_dump | geometry | SETOF geometry_dump - postgis | public | st_dumppoints | geometry | SETOF geometry_dump - postgis | public | st_dumprings | geometry | SETOF geometry_dump - postgis | public | st_dumpsegments | geometry | SETOF geometry_dump - postgis | public | st_dwithin | geog1 geography, geog2 geography, tolerance double precision, use_spheroid boolean | boolean - postgis | public | st_dwithin | geom1 geometry, geom2 geometry, double precision | boolean - postgis | public | st_dwithin | text, text, double precision | boolean - postgis | public | st_endpoint | geometry | geometry - postgis | public | st_envelope | geometry | geometry - postgis | public | st_equals | geom1 geometry, geom2 geometry | boolean - postgis | public | st_estimatedextent | text, text | box2d - postgis | public | st_estimatedextent | text, text, text | box2d - postgis | public | st_estimatedextent | text, text, text, boolean | box2d - postgis | public | st_expand | box box2d, dx double precision, dy double precision | box2d - postgis | public | st_expand | box box3d, dx double precision, dy double precision, dz double precision | box3d - postgis | public | st_expand | box2d, double precision | box2d - postgis | public | st_expand | box3d, double precision | box3d - postgis | public | st_expand | geom geometry, dx double precision, dy double precision, dz double precision, dm double precision | geometry - postgis | public | st_expand | geometry, double precision | geometry - postgis | public | st_extent | geometry | box2d - postgis | public | st_exteriorring | geometry | geometry - postgis | public | st_filterbym | geometry, double precision, double precision, boolean | geometry - postgis | public | st_findextent | text, text | box2d - postgis | public | st_findextent | text, text, text | box2d - postgis | public | st_flipcoordinates | geometry | geometry - postgis | public | st_force2d | geometry | geometry - postgis | public | st_force3d | geom geometry, zvalue double precision | geometry - postgis | public | st_force3dm | geom geometry, mvalue double precision | geometry - postgis | public | st_force3dz | geom geometry, zvalue double precision | geometry - postgis | public | st_force4d | geom geometry, zvalue double precision, mvalue double precision | geometry - postgis | public | st_forcecollection | geometry | geometry - postgis | public | st_forcecurve | geometry | geometry - postgis | public | st_forcepolygonccw | geometry | geometry - postgis | public | st_forcepolygoncw | geometry | geometry - postgis | public | st_forcerhr | geometry | geometry - postgis | public | st_forcesfs | geometry | geometry - postgis | public | st_forcesfs | geometry, version text | geometry - postgis | public | st_frechetdistance | geom1 geometry, geom2 geometry, double precision | double precision - postgis | public | st_fromflatgeobuf | anyelement, bytea | SETOF anyelement - postgis | public | st_fromflatgeobuftotable | text, text, bytea | void - postgis | public | st_generatepoints | area geometry, npoints integer | geometry - postgis | public | st_generatepoints | area geometry, npoints integer, seed integer | geometry - postgis | public | st_geogfromtext | text | geography - postgis | public | st_geogfromwkb | bytea | geography - postgis | public | st_geographyfromtext | text | geography - postgis | public | st_geohash | geog geography, maxchars integer | text - postgis | public | st_geohash | geom geometry, maxchars integer | text - postgis | public | st_geomcollfromtext | text | geometry - postgis | public | st_geomcollfromtext | text, integer | geometry - postgis | public | st_geomcollfromwkb | bytea | geometry - postgis | public | st_geomcollfromwkb | bytea, integer | geometry - postgis | public | st_geometricmedian | g geometry, tolerance double precision, max_iter integer, fail_if_not_converged boolean | geometry - postgis | public | st_geometryfromtext | text | geometry - postgis | public | st_geometryfromtext | text, integer | geometry - postgis | public | st_geometryn | geometry, integer | geometry - postgis | public | st_geometrytype | geometry | text - postgis | public | st_geomfromewkb | bytea | geometry - postgis | public | st_geomfromewkt | text | geometry - postgis | public | st_geomfromgeohash | text, integer | geometry - postgis | public | st_geomfromgeojson | json | geometry - postgis | public | st_geomfromgeojson | jsonb | geometry - postgis | public | st_geomfromgeojson | text | geometry - postgis | public | st_geomfromgml | text | geometry - postgis | public | st_geomfromgml | text, integer | geometry - postgis | public | st_geomfromkml | text | geometry - postgis | public | st_geomfrommarc21 | marc21xml text | geometry - postgis | public | st_geomfromtext | text | geometry - postgis | public | st_geomfromtext | text, integer | geometry - postgis | public | st_geomfromtwkb | bytea | geometry - postgis | public | st_geomfromwkb | bytea | geometry - postgis | public | st_geomfromwkb | bytea, integer | geometry - postgis | public | st_gmltosql | text | geometry - postgis | public | st_gmltosql | text, integer | geometry - postgis | public | st_hasarc | geometry geometry | boolean - postgis | public | st_hausdorffdistance | geom1 geometry, geom2 geometry | double precision - postgis | public | st_hausdorffdistance | geom1 geometry, geom2 geometry, double precision | double precision - postgis | public | st_hexagon | size double precision, cell_i integer, cell_j integer, origin geometry | geometry - postgis | public | st_hexagongrid | size double precision, bounds geometry, OUT geom geometry, OUT i integer, OUT j integer | SETOF record - postgis | public | st_interiorringn | geometry, integer | geometry - postgis | public | st_interpolatepoint | line geometry, point geometry | double precision - postgis | public | st_intersection | geography, geography | geography - postgis | public | st_intersection | geom1 geometry, geom2 geometry, gridsize double precision | geometry - postgis | public | st_intersection | text, text | geometry - postgis | public | st_intersects | geog1 geography, geog2 geography | boolean - postgis | public | st_intersects | geom1 geometry, geom2 geometry | boolean - postgis | public | st_intersects | text, text | boolean - postgis | public | st_isclosed | geometry | boolean - postgis | public | st_iscollection | geometry | boolean - postgis | public | st_isempty | geometry | boolean - postgis | public | st_ispolygonccw | geometry | boolean - postgis | public | st_ispolygoncw | geometry | boolean - postgis | public | st_isring | geometry | boolean - postgis | public | st_issimple | geometry | boolean - postgis | public | st_isvalid | geometry | boolean - postgis | public | st_isvalid | geometry, integer | boolean - postgis | public | st_isvaliddetail | geom geometry, flags integer | valid_detail - postgis | public | st_isvalidreason | geometry | text - postgis | public | st_isvalidreason | geometry, integer | text - postgis | public | st_isvalidtrajectory | geometry | boolean - postgis | public | st_length | geog geography, use_spheroid boolean | double precision - postgis | public | st_length | geometry | double precision - postgis | public | st_length | text | double precision - postgis | public | st_length2d | geometry | double precision - postgis | public | st_length2dspheroid | geometry, spheroid | double precision - postgis | public | st_lengthspheroid | geometry, spheroid | double precision - postgis | public | st_letters | letters text, font json | geometry - postgis | public | st_linecrossingdirection | line1 geometry, line2 geometry | integer - postgis | public | st_linefromencodedpolyline | txtin text, nprecision integer | geometry - postgis | public | st_linefrommultipoint | geometry | geometry - postgis | public | st_linefromtext | text | geometry - postgis | public | st_linefromtext | text, integer | geometry - postgis | public | st_linefromwkb | bytea | geometry - postgis | public | st_linefromwkb | bytea, integer | geometry - postgis | public | st_lineinterpolatepoint | geometry, double precision | geometry - postgis | public | st_lineinterpolatepoints | geometry, double precision, repeat boolean | geometry - postgis | public | st_linelocatepoint | geom1 geometry, geom2 geometry | double precision - postgis | public | st_linemerge | geometry | geometry - postgis | public | st_linemerge | geometry, boolean | geometry - postgis | public | st_linestringfromwkb | bytea | geometry - postgis | public | st_linestringfromwkb | bytea, integer | geometry - postgis | public | st_linesubstring | geometry, double precision, double precision | geometry - postgis | public | st_linetocurve | geometry geometry | geometry - postgis | public | st_locatealong | geometry geometry, measure double precision, leftrightoffset double precision | geometry - postgis | public | st_locatebetween | geometry geometry, frommeasure double precision, tomeasure double precision, leftrightoffset double precision | geometry - postgis | public | st_locatebetweenelevations | geometry geometry, fromelevation double precision, toelevation double precision | geometry - postgis | public | st_longestline | geom1 geometry, geom2 geometry | geometry - postgis | public | st_m | geometry | double precision - postgis | public | st_makebox2d | geom1 geometry, geom2 geometry | box2d - postgis | public | st_makeenvelope | double precision, double precision, double precision, double precision, integer | geometry - postgis | public | st_makeline | geom1 geometry, geom2 geometry | geometry - postgis | public | st_makeline | geometry | geometry - postgis | public | st_makeline | geometry[] | geometry - postgis | public | st_makepoint | double precision, double precision | geometry - postgis | public | st_makepoint | double precision, double precision, double precision | geometry - postgis | public | st_makepoint | double precision, double precision, double precision, double precision | geometry - postgis | public | st_makepointm | double precision, double precision, double precision | geometry - postgis | public | st_makepolygon | geometry | geometry - postgis | public | st_makepolygon | geometry, geometry[] | geometry - postgis | public | st_makevalid | geom geometry, params text | geometry - postgis | public | st_makevalid | geometry | geometry - postgis | public | st_maxdistance | geom1 geometry, geom2 geometry | double precision - postgis | public | st_maximuminscribedcircle | geometry, OUT center geometry, OUT nearest geometry, OUT radius double precision | record - postgis | public | st_memcollect | geometry | geometry - postgis | public | st_memsize | geometry | integer - postgis | public | st_memunion | geometry | geometry - postgis | public | st_minimumboundingcircle | inputgeom geometry, segs_per_quarter integer | geometry - postgis | public | st_minimumboundingradius | geometry, OUT center geometry, OUT radius double precision | record - postgis | public | st_minimumclearance | geometry | double precision - postgis | public | st_minimumclearanceline | geometry | geometry - postgis | public | st_mlinefromtext | text | geometry - postgis | public | st_mlinefromtext | text, integer | geometry - postgis | public | st_mlinefromwkb | bytea | geometry - postgis | public | st_mlinefromwkb | bytea, integer | geometry - postgis | public | st_mpointfromtext | text | geometry - postgis | public | st_mpointfromtext | text, integer | geometry - postgis | public | st_mpointfromwkb | bytea | geometry - postgis | public | st_mpointfromwkb | bytea, integer | geometry - postgis | public | st_mpolyfromtext | text | geometry - postgis | public | st_mpolyfromtext | text, integer | geometry - postgis | public | st_mpolyfromwkb | bytea | geometry - postgis | public | st_mpolyfromwkb | bytea, integer | geometry - postgis | public | st_multi | geometry | geometry - postgis | public | st_multilinefromwkb | bytea | geometry - postgis | public | st_multilinestringfromtext | text | geometry - postgis | public | st_multilinestringfromtext | text, integer | geometry - postgis | public | st_multipointfromtext | text | geometry - postgis | public | st_multipointfromwkb | bytea | geometry - postgis | public | st_multipointfromwkb | bytea, integer | geometry - postgis | public | st_multipolyfromwkb | bytea | geometry - postgis | public | st_multipolyfromwkb | bytea, integer | geometry - postgis | public | st_multipolygonfromtext | text | geometry - postgis | public | st_multipolygonfromtext | text, integer | geometry - postgis | public | st_ndims | geometry | smallint - postgis | public | st_node | g geometry | geometry - postgis | public | st_normalize | geom geometry | geometry - postgis | public | st_npoints | geometry | integer - postgis | public | st_nrings | geometry | integer - postgis | public | st_numgeometries | geometry | integer - postgis | public | st_numinteriorring | geometry | integer - postgis | public | st_numinteriorrings | geometry | integer - postgis | public | st_numpatches | geometry | integer - postgis | public | st_numpoints | geometry | integer - postgis | public | st_offsetcurve | line geometry, distance double precision, params text | geometry - postgis | public | st_orderingequals | geom1 geometry, geom2 geometry | boolean - postgis | public | st_orientedenvelope | geometry | geometry - postgis | public | st_overlaps | geom1 geometry, geom2 geometry | boolean - postgis | public | st_patchn | geometry, integer | geometry - postgis | public | st_perimeter | geog geography, use_spheroid boolean | double precision - postgis | public | st_perimeter | geometry | double precision - postgis | public | st_perimeter2d | geometry | double precision - postgis | public | st_point | double precision, double precision | geometry - postgis | public | st_point | double precision, double precision, srid integer | geometry - postgis | public | st_pointfromgeohash | text, integer | geometry - postgis | public | st_pointfromtext | text | geometry - postgis | public | st_pointfromtext | text, integer | geometry - postgis | public | st_pointfromwkb | bytea | geometry - postgis | public | st_pointfromwkb | bytea, integer | geometry - postgis | public | st_pointinsidecircle | geometry, double precision, double precision, double precision | boolean - postgis | public | st_pointm | xcoordinate double precision, ycoordinate double precision, mcoordinate double precision, srid integer | geometry - postgis | public | st_pointn | geometry, integer | geometry - postgis | public | st_pointonsurface | geometry | geometry - postgis | public | st_points | geometry | geometry - postgis | public | st_pointz | xcoordinate double precision, ycoordinate double precision, zcoordinate double precision, srid integer | geometry - postgis | public | st_pointzm | xcoordinate double precision, ycoordinate double precision, zcoordinate double precision, mcoordinate double precision, srid integer | geometry - postgis | public | st_polyfromtext | text | geometry - postgis | public | st_polyfromtext | text, integer | geometry - postgis | public | st_polyfromwkb | bytea | geometry - postgis | public | st_polyfromwkb | bytea, integer | geometry - postgis | public | st_polygon | geometry, integer | geometry - postgis | public | st_polygonfromtext | text | geometry - postgis | public | st_polygonfromtext | text, integer | geometry - postgis | public | st_polygonfromwkb | bytea | geometry - postgis | public | st_polygonfromwkb | bytea, integer | geometry - postgis | public | st_polygonize | geometry | geometry - postgis | public | st_polygonize | geometry[] | geometry - postgis | public | st_project | geog geography, distance double precision, azimuth double precision | geography - postgis | public | st_quantizecoordinates | g geometry, prec_x integer, prec_y integer, prec_z integer, prec_m integer | geometry - postgis | public | st_reduceprecision | geom geometry, gridsize double precision | geometry - postgis | public | st_relate | geom1 geometry, geom2 geometry | text - postgis | public | st_relate | geom1 geometry, geom2 geometry, integer | text - postgis | public | st_relate | geom1 geometry, geom2 geometry, text | boolean - postgis | public | st_relatematch | text, text | boolean - postgis | public | st_removepoint | geometry, integer | geometry - postgis | public | st_removerepeatedpoints | geom geometry, tolerance double precision | geometry - postgis | public | st_reverse | geometry | geometry - postgis | public | st_rotate | geometry, double precision | geometry - postgis | public | st_rotate | geometry, double precision, double precision, double precision | geometry - postgis | public | st_rotate | geometry, double precision, geometry | geometry - postgis | public | st_rotatex | geometry, double precision | geometry - postgis | public | st_rotatey | geometry, double precision | geometry - postgis | public | st_rotatez | geometry, double precision | geometry - postgis | public | st_scale | geometry, double precision, double precision | geometry - postgis | public | st_scale | geometry, double precision, double precision, double precision | geometry - postgis | public | st_scale | geometry, geometry | geometry - postgis | public | st_scale | geometry, geometry, origin geometry | geometry - postgis | public | st_scroll | geometry, geometry | geometry - postgis | public | st_segmentize | geog geography, max_segment_length double precision | geography - postgis | public | st_segmentize | geometry, double precision | geometry - postgis | public | st_seteffectivearea | geometry, double precision, integer | geometry - postgis | public | st_setpoint | geometry, integer, geometry | geometry - postgis | public | st_setsrid | geog geography, srid integer | geography - postgis | public | st_setsrid | geom geometry, srid integer | geometry - postgis | public | st_sharedpaths | geom1 geometry, geom2 geometry | geometry - postgis | public | st_shiftlongitude | geometry | geometry - postgis | public | st_shortestline | geom1 geometry, geom2 geometry | geometry - postgis | public | st_simplify | geometry, double precision | geometry - postgis | public | st_simplify | geometry, double precision, boolean | geometry - postgis | public | st_simplifypolygonhull | geom geometry, vertex_fraction double precision, is_outer boolean | geometry - postgis | public | st_simplifypreservetopology | geometry, double precision | geometry - postgis | public | st_simplifyvw | geometry, double precision | geometry - postgis | public | st_snap | geom1 geometry, geom2 geometry, double precision | geometry - postgis | public | st_snaptogrid | geom1 geometry, geom2 geometry, double precision, double precision, double precision, double precision | geometry - postgis | public | st_snaptogrid | geometry, double precision | geometry - postgis | public | st_snaptogrid | geometry, double precision, double precision | geometry - postgis | public | st_snaptogrid | geometry, double precision, double precision, double precision, double precision | geometry - postgis | public | st_split | geom1 geometry, geom2 geometry | geometry - postgis | public | st_square | size double precision, cell_i integer, cell_j integer, origin geometry | geometry - postgis | public | st_squaregrid | size double precision, bounds geometry, OUT geom geometry, OUT i integer, OUT j integer | SETOF record - postgis | public | st_srid | geog geography | integer - postgis | public | st_srid | geom geometry | integer - postgis | public | st_startpoint | geometry | geometry - postgis | public | st_subdivide | geom geometry, maxvertices integer, gridsize double precision | SETOF geometry - postgis | public | st_summary | geography | text - postgis | public | st_summary | geometry | text - postgis | public | st_swapordinates | geom geometry, ords cstring | geometry - postgis | public | st_symdifference | geom1 geometry, geom2 geometry, gridsize double precision | geometry - postgis | public | st_symmetricdifference | geom1 geometry, geom2 geometry | geometry - postgis | public | st_tileenvelope | zoom integer, x integer, y integer, bounds geometry, margin double precision | geometry - postgis | public | st_touches | geom1 geometry, geom2 geometry | boolean - postgis | public | st_transform | geom geometry, from_proj text, to_proj text | geometry - postgis | public | st_transform | geom geometry, from_proj text, to_srid integer | geometry - postgis | public | st_transform | geom geometry, to_proj text | geometry - postgis | public | st_transform | geometry, integer | geometry - postgis | public | st_translate | geometry, double precision, double precision | geometry - postgis | public | st_translate | geometry, double precision, double precision, double precision | geometry - postgis | public | st_transscale | geometry, double precision, double precision, double precision, double precision | geometry - postgis | public | st_triangulatepolygon | g1 geometry | geometry - postgis | public | st_unaryunion | geometry, gridsize double precision | geometry - postgis | public | st_union | geom1 geometry, geom2 geometry | geometry - postgis | public | st_union | geom1 geometry, geom2 geometry, gridsize double precision | geometry - postgis | public | st_union | geometry | geometry - postgis | public | st_union | geometry, gridsize double precision | geometry - postgis | public | st_union | geometry[] | geometry - postgis | public | st_voronoilines | g1 geometry, tolerance double precision, extend_to geometry | geometry - postgis | public | st_voronoipolygons | g1 geometry, tolerance double precision, extend_to geometry | geometry - postgis | public | st_within | geom1 geometry, geom2 geometry | boolean - postgis | public | st_wkbtosql | wkb bytea | geometry - postgis | public | st_wkttosql | text | geometry - postgis | public | st_wrapx | geom geometry, wrap double precision, move double precision | geometry - postgis | public | st_x | geometry | double precision - postgis | public | st_xmax | box3d | double precision - postgis | public | st_xmin | box3d | double precision - postgis | public | st_y | geometry | double precision - postgis | public | st_ymax | box3d | double precision - postgis | public | st_ymin | box3d | double precision - postgis | public | st_z | geometry | double precision - postgis | public | st_zmax | box3d | double precision - postgis | public | st_zmflag | geometry | smallint - postgis | public | st_zmin | box3d | double precision - postgis | public | text | geometry | text - postgis | public | unlockrows | text | integer - postgis | public | updategeometrysrid | catalogn_name character varying, schema_name character varying, table_name character varying, column_name character varying, new_srid_in integer | text - postgis | public | updategeometrysrid | character varying, character varying, character varying, integer | text - postgis | public | updategeometrysrid | character varying, character varying, integer | text - postgis_raster | public | __st_countagg_transfn | agg agg_count, rast raster, nband integer, exclude_nodata_value boolean, sample_percent double precision | agg_count - postgis_raster | public | _add_overview_constraint | ovschema name, ovtable name, ovcolumn name, refschema name, reftable name, refcolumn name, factor integer | boolean - postgis_raster | public | _add_raster_constraint | cn name, sql text | boolean - postgis_raster | public | _add_raster_constraint_alignment | rastschema name, rasttable name, rastcolumn name | boolean - postgis_raster | public | _add_raster_constraint_blocksize | rastschema name, rasttable name, rastcolumn name, axis text | boolean - postgis_raster | public | _add_raster_constraint_coverage_tile | rastschema name, rasttable name, rastcolumn name | boolean - postgis_raster | public | _add_raster_constraint_extent | rastschema name, rasttable name, rastcolumn name | boolean - postgis_raster | public | _add_raster_constraint_nodata_values | rastschema name, rasttable name, rastcolumn name | boolean - postgis_raster | public | _add_raster_constraint_num_bands | rastschema name, rasttable name, rastcolumn name | boolean - postgis_raster | public | _add_raster_constraint_out_db | rastschema name, rasttable name, rastcolumn name | boolean - postgis_raster | public | _add_raster_constraint_pixel_types | rastschema name, rasttable name, rastcolumn name | boolean - postgis_raster | public | _add_raster_constraint_scale | rastschema name, rasttable name, rastcolumn name, axis character | boolean - postgis_raster | public | _add_raster_constraint_spatially_unique | rastschema name, rasttable name, rastcolumn name | boolean - postgis_raster | public | _add_raster_constraint_srid | rastschema name, rasttable name, rastcolumn name | boolean - postgis_raster | public | _drop_overview_constraint | ovschema name, ovtable name, ovcolumn name | boolean - postgis_raster | public | _drop_raster_constraint | rastschema name, rasttable name, cn name | boolean - postgis_raster | public | _drop_raster_constraint_alignment | rastschema name, rasttable name, rastcolumn name | boolean - postgis_raster | public | _drop_raster_constraint_blocksize | rastschema name, rasttable name, rastcolumn name, axis text | boolean - postgis_raster | public | _drop_raster_constraint_coverage_tile | rastschema name, rasttable name, rastcolumn name | boolean - postgis_raster | public | _drop_raster_constraint_extent | rastschema name, rasttable name, rastcolumn name | boolean - postgis_raster | public | _drop_raster_constraint_nodata_values | rastschema name, rasttable name, rastcolumn name | boolean - postgis_raster | public | _drop_raster_constraint_num_bands | rastschema name, rasttable name, rastcolumn name | boolean - postgis_raster | public | _drop_raster_constraint_out_db | rastschema name, rasttable name, rastcolumn name | boolean - postgis_raster | public | _drop_raster_constraint_pixel_types | rastschema name, rasttable name, rastcolumn name | boolean - postgis_raster | public | _drop_raster_constraint_regular_blocking | rastschema name, rasttable name, rastcolumn name | boolean - postgis_raster | public | _drop_raster_constraint_scale | rastschema name, rasttable name, rastcolumn name, axis character | boolean - postgis_raster | public | _drop_raster_constraint_spatially_unique | rastschema name, rasttable name, rastcolumn name | boolean - postgis_raster | public | _drop_raster_constraint_srid | rastschema name, rasttable name, rastcolumn name | boolean - postgis_raster | public | _overview_constraint | ov raster, factor integer, refschema name, reftable name, refcolumn name | boolean - postgis_raster | public | _overview_constraint_info | ovschema name, ovtable name, ovcolumn name, OUT refschema name, OUT reftable name, OUT refcolumn name, OUT factor integer | record - postgis_raster | public | _raster_constraint_info_alignment | rastschema name, rasttable name, rastcolumn name | boolean - postgis_raster | public | _raster_constraint_info_blocksize | rastschema name, rasttable name, rastcolumn name, axis text | integer - postgis_raster | public | _raster_constraint_info_coverage_tile | rastschema name, rasttable name, rastcolumn name | boolean - postgis_raster | public | _raster_constraint_info_extent | rastschema name, rasttable name, rastcolumn name | geometry - postgis_raster | public | _raster_constraint_info_index | rastschema name, rasttable name, rastcolumn name | boolean - postgis_raster | public | _raster_constraint_info_nodata_values | rastschema name, rasttable name, rastcolumn name | double precision[] - postgis_raster | public | _raster_constraint_info_num_bands | rastschema name, rasttable name, rastcolumn name | integer - postgis_raster | public | _raster_constraint_info_out_db | rastschema name, rasttable name, rastcolumn name | boolean[] - postgis_raster | public | _raster_constraint_info_pixel_types | rastschema name, rasttable name, rastcolumn name | text[] - postgis_raster | public | _raster_constraint_info_regular_blocking | rastschema name, rasttable name, rastcolumn name | boolean - postgis_raster | public | _raster_constraint_info_scale | rastschema name, rasttable name, rastcolumn name, axis character | double precision - postgis_raster | public | _raster_constraint_info_spatially_unique | rastschema name, rasttable name, rastcolumn name | boolean - postgis_raster | public | _raster_constraint_info_srid | rastschema name, rasttable name, rastcolumn name | integer - postgis_raster | public | _raster_constraint_nodata_values | rast raster | numeric[] - postgis_raster | public | _raster_constraint_out_db | rast raster | boolean[] - postgis_raster | public | _raster_constraint_pixel_types | rast raster | text[] - postgis_raster | public | _st_aspect4ma | value double precision[], pos integer[], VARIADIC userargs text[] | double precision - postgis_raster | public | _st_asraster | geom geometry, scalex double precision, scaley double precision, width integer, height integer, pixeltype text[], value double precision[], nodataval double precision[], upperleftx double precision, upperlefty double precision, gridx double precision, gridy double precision, skewx double precision, skewy double precision, touched boolean | raster - postgis_raster | public | _st_clip | rast raster, nband integer[], geom geometry, nodataval double precision[], crop boolean | raster - postgis_raster | public | _st_colormap | rast raster, nband integer, colormap text, method text | raster - postgis_raster | public | _st_contains | rast1 raster, nband1 integer, rast2 raster, nband2 integer | boolean - postgis_raster | public | _st_containsproperly | rast1 raster, nband1 integer, rast2 raster, nband2 integer | boolean - postgis_raster | public | _st_convertarray4ma | value double precision[] | double precision[] - postgis_raster | public | _st_count | rast raster, nband integer, exclude_nodata_value boolean, sample_percent double precision | bigint - postgis_raster | public | _st_countagg_finalfn | agg agg_count | bigint - postgis_raster | public | _st_countagg_transfn | agg agg_count, rast raster, exclude_nodata_value boolean | agg_count - postgis_raster | public | _st_countagg_transfn | agg agg_count, rast raster, nband integer, exclude_nodata_value boolean | agg_count - postgis_raster | public | _st_countagg_transfn | agg agg_count, rast raster, nband integer, exclude_nodata_value boolean, sample_percent double precision | agg_count - postgis_raster | public | _st_coveredby | rast1 raster, nband1 integer, rast2 raster, nband2 integer | boolean - postgis_raster | public | _st_covers | rast1 raster, nband1 integer, rast2 raster, nband2 integer | boolean - postgis_raster | public | _st_dfullywithin | rast1 raster, nband1 integer, rast2 raster, nband2 integer, distance double precision | boolean - postgis_raster | public | _st_dwithin | rast1 raster, nband1 integer, rast2 raster, nband2 integer, distance double precision | boolean - postgis_raster | public | _st_gdalwarp | rast raster, algorithm text, maxerr double precision, srid integer, scalex double precision, scaley double precision, gridx double precision, gridy double precision, skewx double precision, skewy double precision, width integer, height integer | raster - postgis_raster | public | _st_grayscale4ma | value double precision[], pos integer[], VARIADIC userargs text[] | double precision - postgis_raster | public | _st_hillshade4ma | value double precision[], pos integer[], VARIADIC userargs text[] | double precision - postgis_raster | public | _st_histogram | rast raster, nband integer, exclude_nodata_value boolean, sample_percent double precision, bins integer, width double precision[], "right" boolean, min double precision, max double precision, OUT min double precision, OUT max double precision, OUT count bigint, OUT percent double precision | SETOF record - postgis_raster | public | _st_intersects | geom geometry, rast raster, nband integer | boolean - postgis_raster | public | _st_intersects | rast1 raster, nband1 integer, rast2 raster, nband2 integer | boolean - postgis_raster | public | _st_mapalgebra | rastbandargset rastbandarg[], callbackfunc regprocedure, pixeltype text, distancex integer, distancey integer, extenttype text, customextent raster, mask double precision[], weighted boolean, VARIADIC userargs text[] | raster - postgis_raster | public | _st_mapalgebra | rastbandargset rastbandarg[], expression text, pixeltype text, extenttype text, nodata1expr text, nodata2expr text, nodatanodataval double precision | raster - postgis_raster | public | _st_neighborhood | rast raster, band integer, columnx integer, rowy integer, distancex integer, distancey integer, exclude_nodata_value boolean | double precision[] - postgis_raster | public | _st_overlaps | rast1 raster, nband1 integer, rast2 raster, nband2 integer | boolean - postgis_raster | public | _st_pixelascentroids | rast raster, band integer, columnx integer, rowy integer, exclude_nodata_value boolean | TABLE(geom geometry, val double precision, x integer, y integer) - postgis_raster | public | _st_pixelaspolygons | rast raster, band integer, columnx integer, rowy integer, exclude_nodata_value boolean | TABLE(geom geometry, val double precision, x integer, y integer) - postgis_raster | public | _st_quantile | rast raster, nband integer, exclude_nodata_value boolean, sample_percent double precision, quantiles double precision[], OUT quantile double precision, OUT value double precision | SETOF record - postgis_raster | public | _st_rastertoworldcoord | rast raster, columnx integer, rowy integer, OUT longitude double precision, OUT latitude double precision | record - postgis_raster | public | _st_reclass | rast raster, VARIADIC reclassargset reclassarg[] | raster - postgis_raster | public | _st_roughness4ma | value double precision[], pos integer[], VARIADIC userargs text[] | double precision - postgis_raster | public | _st_samealignment_finalfn | agg agg_samealignment | boolean - postgis_raster | public | _st_samealignment_transfn | agg agg_samealignment, rast raster | agg_samealignment - postgis_raster | public | _st_setvalues | rast raster, nband integer, x integer, y integer, newvalueset double precision[], noset boolean[], hasnosetvalue boolean, nosetvalue double precision, keepnodata boolean | raster - postgis_raster | public | _st_slope4ma | value double precision[], pos integer[], VARIADIC userargs text[] | double precision - postgis_raster | public | _st_summarystats | rast raster, nband integer, exclude_nodata_value boolean, sample_percent double precision | summarystats - postgis_raster | public | _st_summarystats_finalfn | internal | summarystats - postgis_raster | public | _st_summarystats_transfn | internal, raster, boolean, double precision | internal - postgis_raster | public | _st_summarystats_transfn | internal, raster, integer, boolean | internal - postgis_raster | public | _st_summarystats_transfn | internal, raster, integer, boolean, double precision | internal - postgis_raster | public | _st_tile | rast raster, width integer, height integer, nband integer[], padwithnodata boolean, nodataval double precision | SETOF raster - postgis_raster | public | _st_touches | rast1 raster, nband1 integer, rast2 raster, nband2 integer | boolean - postgis_raster | public | _st_tpi4ma | value double precision[], pos integer[], VARIADIC userargs text[] | double precision - postgis_raster | public | _st_tri4ma | value double precision[], pos integer[], VARIADIC userargs text[] | double precision - postgis_raster | public | _st_union_finalfn | internal | raster - postgis_raster | public | _st_union_transfn | internal, raster | internal - postgis_raster | public | _st_union_transfn | internal, raster, integer | internal - postgis_raster | public | _st_union_transfn | internal, raster, integer, text | internal - postgis_raster | public | _st_union_transfn | internal, raster, text | internal - postgis_raster | public | _st_union_transfn | internal, raster, unionarg[] | internal - postgis_raster | public | _st_valuecount | rast raster, nband integer, exclude_nodata_value boolean, searchvalues double precision[], roundto double precision, OUT value double precision, OUT count integer, OUT percent double precision | SETOF record - postgis_raster | public | _st_valuecount | rastertable text, rastercolumn text, nband integer, exclude_nodata_value boolean, searchvalues double precision[], roundto double precision, OUT value double precision, OUT count integer, OUT percent double precision | SETOF record - postgis_raster | public | _st_within | rast1 raster, nband1 integer, rast2 raster, nband2 integer | boolean - postgis_raster | public | _st_worldtorastercoord | rast raster, longitude double precision, latitude double precision, OUT columnx integer, OUT rowy integer | record - postgis_raster | public | _updaterastersrid | schema_name name, table_name name, column_name name, new_srid integer | boolean - postgis_raster | public | addoverviewconstraints | ovschema name, ovtable name, ovcolumn name, refschema name, reftable name, refcolumn name, ovfactor integer | boolean - postgis_raster | public | addoverviewconstraints | ovtable name, ovcolumn name, reftable name, refcolumn name, ovfactor integer | boolean - postgis_raster | public | addrasterconstraints | rastschema name, rasttable name, rastcolumn name, VARIADIC constraints text[] | boolean - postgis_raster | public | addrasterconstraints | rastschema name, rasttable name, rastcolumn name, srid boolean, scale_x boolean, scale_y boolean, blocksize_x boolean, blocksize_y boolean, same_alignment boolean, regular_blocking boolean, num_bands boolean, pixel_types boolean, nodata_values boolean, out_db boolean, extent boolean | boolean - postgis_raster | public | addrasterconstraints | rasttable name, rastcolumn name, VARIADIC constraints text[] | boolean - postgis_raster | public | addrasterconstraints | rasttable name, rastcolumn name, srid boolean, scale_x boolean, scale_y boolean, blocksize_x boolean, blocksize_y boolean, same_alignment boolean, regular_blocking boolean, num_bands boolean, pixel_types boolean, nodata_values boolean, out_db boolean, extent boolean | boolean - postgis_raster | public | box3d | raster | box3d - postgis_raster | public | bytea | raster | bytea - postgis_raster | public | dropoverviewconstraints | ovschema name, ovtable name, ovcolumn name | boolean - postgis_raster | public | dropoverviewconstraints | ovtable name, ovcolumn name | boolean - postgis_raster | public | droprasterconstraints | rastschema name, rasttable name, rastcolumn name, VARIADIC constraints text[] | boolean - postgis_raster | public | droprasterconstraints | rastschema name, rasttable name, rastcolumn name, srid boolean, scale_x boolean, scale_y boolean, blocksize_x boolean, blocksize_y boolean, same_alignment boolean, regular_blocking boolean, num_bands boolean, pixel_types boolean, nodata_values boolean, out_db boolean, extent boolean | boolean - postgis_raster | public | droprasterconstraints | rasttable name, rastcolumn name, VARIADIC constraints text[] | boolean - postgis_raster | public | droprasterconstraints | rasttable name, rastcolumn name, srid boolean, scale_x boolean, scale_y boolean, blocksize_x boolean, blocksize_y boolean, same_alignment boolean, regular_blocking boolean, num_bands boolean, pixel_types boolean, nodata_values boolean, out_db boolean, extent boolean | boolean - postgis_raster | public | geometry_contained_by_raster | geometry, raster | boolean - postgis_raster | public | geometry_raster_contain | geometry, raster | boolean - postgis_raster | public | geometry_raster_overlap | geometry, raster | boolean - postgis_raster | public | postgis_gdal_version | | text - postgis_raster | public | postgis_noop | raster | geometry - postgis_raster | public | postgis_raster_lib_build_date | | text - postgis_raster | public | postgis_raster_lib_version | | text - postgis_raster | public | postgis_raster_scripts_installed | | text - postgis_raster | public | raster_above | raster, raster | boolean - postgis_raster | public | raster_below | raster, raster | boolean - postgis_raster | public | raster_contain | raster, raster | boolean - postgis_raster | public | raster_contained | raster, raster | boolean - postgis_raster | public | raster_contained_by_geometry | raster, geometry | boolean - postgis_raster | public | raster_eq | raster, raster | boolean - postgis_raster | public | raster_geometry_contain | raster, geometry | boolean - postgis_raster | public | raster_geometry_overlap | raster, geometry | boolean - postgis_raster | public | raster_hash | raster | integer - postgis_raster | public | raster_in | cstring | raster - postgis_raster | public | raster_left | raster, raster | boolean - postgis_raster | public | raster_out | raster | cstring - postgis_raster | public | raster_overabove | raster, raster | boolean - postgis_raster | public | raster_overbelow | raster, raster | boolean - postgis_raster | public | raster_overlap | raster, raster | boolean - postgis_raster | public | raster_overleft | raster, raster | boolean - postgis_raster | public | raster_overright | raster, raster | boolean - postgis_raster | public | raster_right | raster, raster | boolean - postgis_raster | public | raster_same | raster, raster | boolean - postgis_raster | public | st_addband | rast raster, addbandargset addbandarg[] | raster - postgis_raster | public | st_addband | rast raster, index integer, outdbfile text, outdbindex integer[], nodataval double precision | raster - postgis_raster | public | st_addband | rast raster, index integer, pixeltype text, initialvalue double precision, nodataval double precision | raster - postgis_raster | public | st_addband | rast raster, outdbfile text, outdbindex integer[], index integer, nodataval double precision | raster - postgis_raster | public | st_addband | rast raster, pixeltype text, initialvalue double precision, nodataval double precision | raster - postgis_raster | public | st_addband | torast raster, fromrast raster, fromband integer, torastindex integer | raster - postgis_raster | public | st_addband | torast raster, fromrasts raster[], fromband integer, torastindex integer | raster - postgis_raster | public | st_approxcount | rast raster, exclude_nodata_value boolean, sample_percent double precision | bigint - postgis_raster | public | st_approxcount | rast raster, nband integer, exclude_nodata_value boolean, sample_percent double precision | bigint - postgis_raster | public | st_approxcount | rast raster, nband integer, sample_percent double precision | bigint - postgis_raster | public | st_approxcount | rast raster, sample_percent double precision | bigint - postgis_raster | public | st_approxhistogram | rast raster, nband integer, exclude_nodata_value boolean, sample_percent double precision, bins integer, "right" boolean, OUT min double precision, OUT max double precision, OUT count bigint, OUT percent double precision | SETOF record - postgis_raster | public | st_approxhistogram | rast raster, nband integer, exclude_nodata_value boolean, sample_percent double precision, bins integer, width double precision[], "right" boolean, OUT min double precision, OUT max double precision, OUT count bigint, OUT percent double precision | SETOF record - postgis_raster | public | st_approxhistogram | rast raster, nband integer, sample_percent double precision, OUT min double precision, OUT max double precision, OUT count bigint, OUT percent double precision | SETOF record - postgis_raster | public | st_approxhistogram | rast raster, nband integer, sample_percent double precision, bins integer, "right" boolean, OUT min double precision, OUT max double precision, OUT count bigint, OUT percent double precision | SETOF record - postgis_raster | public | st_approxhistogram | rast raster, nband integer, sample_percent double precision, bins integer, width double precision[], "right" boolean, OUT min double precision, OUT max double precision, OUT count bigint, OUT percent double precision | SETOF record - postgis_raster | public | st_approxhistogram | rast raster, sample_percent double precision, OUT min double precision, OUT max double precision, OUT count bigint, OUT percent double precision | SETOF record - postgis_raster | public | st_approxquantile | rast raster, exclude_nodata_value boolean, quantile double precision | double precision - postgis_raster | public | st_approxquantile | rast raster, nband integer, exclude_nodata_value boolean, sample_percent double precision, quantile double precision | double precision - postgis_raster | public | st_approxquantile | rast raster, nband integer, exclude_nodata_value boolean, sample_percent double precision, quantiles double precision[], OUT quantile double precision, OUT value double precision | SETOF record - postgis_raster | public | st_approxquantile | rast raster, nband integer, sample_percent double precision, quantile double precision | double precision - postgis_raster | public | st_approxquantile | rast raster, nband integer, sample_percent double precision, quantiles double precision[], OUT quantile double precision, OUT value double precision | SETOF record - postgis_raster | public | st_approxquantile | rast raster, quantile double precision | double precision - postgis_raster | public | st_approxquantile | rast raster, quantiles double precision[], OUT quantile double precision, OUT value double precision | SETOF record - postgis_raster | public | st_approxquantile | rast raster, sample_percent double precision, quantile double precision | double precision - postgis_raster | public | st_approxquantile | rast raster, sample_percent double precision, quantiles double precision[], OUT quantile double precision, OUT value double precision | SETOF record - postgis_raster | public | st_approxsummarystats | rast raster, exclude_nodata_value boolean, sample_percent double precision | summarystats - postgis_raster | public | st_approxsummarystats | rast raster, nband integer, exclude_nodata_value boolean, sample_percent double precision | summarystats - postgis_raster | public | st_approxsummarystats | rast raster, nband integer, sample_percent double precision | summarystats - postgis_raster | public | st_approxsummarystats | rast raster, sample_percent double precision | summarystats - postgis_raster | public | st_asbinary | raster, outasin boolean | bytea - postgis_raster | public | st_asgdalraster | rast raster, format text, options text[], srid integer | bytea - postgis_raster | public | st_ashexwkb | raster, outasin boolean | text - postgis_raster | public | st_asjpeg | rast raster, nband integer, options text[] | bytea - postgis_raster | public | st_asjpeg | rast raster, nband integer, quality integer | bytea - postgis_raster | public | st_asjpeg | rast raster, nbands integer[], options text[] | bytea - postgis_raster | public | st_asjpeg | rast raster, nbands integer[], quality integer | bytea - postgis_raster | public | st_asjpeg | rast raster, options text[] | bytea - postgis_raster | public | st_aspect | rast raster, nband integer, customextent raster, pixeltype text, units text, interpolate_nodata boolean | raster - postgis_raster | public | st_aspect | rast raster, nband integer, pixeltype text, units text, interpolate_nodata boolean | raster - postgis_raster | public | st_aspng | rast raster, nband integer, compression integer | bytea - postgis_raster | public | st_aspng | rast raster, nband integer, options text[] | bytea - postgis_raster | public | st_aspng | rast raster, nbands integer[], compression integer | bytea - postgis_raster | public | st_aspng | rast raster, nbands integer[], options text[] | bytea - postgis_raster | public | st_aspng | rast raster, options text[] | bytea - postgis_raster | public | st_asraster | geom geometry, ref raster, pixeltype text, value double precision, nodataval double precision, touched boolean | raster - postgis_raster | public | st_asraster | geom geometry, ref raster, pixeltype text[], value double precision[], nodataval double precision[], touched boolean | raster - postgis_raster | public | st_asraster | geom geometry, scalex double precision, scaley double precision, gridx double precision, gridy double precision, pixeltype text, value double precision, nodataval double precision, skewx double precision, skewy double precision, touched boolean | raster - postgis_raster | public | st_asraster | geom geometry, scalex double precision, scaley double precision, gridx double precision, gridy double precision, pixeltype text[], value double precision[], nodataval double precision[], skewx double precision, skewy double precision, touched boolean | raster - postgis_raster | public | st_asraster | geom geometry, scalex double precision, scaley double precision, pixeltype text, value double precision, nodataval double precision, upperleftx double precision, upperlefty double precision, skewx double precision, skewy double precision, touched boolean | raster - postgis_raster | public | st_asraster | geom geometry, scalex double precision, scaley double precision, pixeltype text[], value double precision[], nodataval double precision[], upperleftx double precision, upperlefty double precision, skewx double precision, skewy double precision, touched boolean | raster - postgis_raster | public | st_asraster | geom geometry, width integer, height integer, gridx double precision, gridy double precision, pixeltype text, value double precision, nodataval double precision, skewx double precision, skewy double precision, touched boolean | raster - postgis_raster | public | st_asraster | geom geometry, width integer, height integer, gridx double precision, gridy double precision, pixeltype text[], value double precision[], nodataval double precision[], skewx double precision, skewy double precision, touched boolean | raster - postgis_raster | public | st_asraster | geom geometry, width integer, height integer, pixeltype text, value double precision, nodataval double precision, upperleftx double precision, upperlefty double precision, skewx double precision, skewy double precision, touched boolean | raster - postgis_raster | public | st_asraster | geom geometry, width integer, height integer, pixeltype text[], value double precision[], nodataval double precision[], upperleftx double precision, upperlefty double precision, skewx double precision, skewy double precision, touched boolean | raster - postgis_raster | public | st_astiff | rast raster, compression text, srid integer | bytea - postgis_raster | public | st_astiff | rast raster, nbands integer[], compression text, srid integer | bytea - postgis_raster | public | st_astiff | rast raster, nbands integer[], options text[], srid integer | bytea - postgis_raster | public | st_astiff | rast raster, options text[], srid integer | bytea - postgis_raster | public | st_aswkb | raster, outasin boolean | bytea - postgis_raster | public | st_band | rast raster, nband integer | raster - postgis_raster | public | st_band | rast raster, nbands integer[] | raster - postgis_raster | public | st_band | rast raster, nbands text, delimiter character | raster - postgis_raster | public | st_bandfilesize | rast raster, band integer | bigint - postgis_raster | public | st_bandfiletimestamp | rast raster, band integer | bigint - postgis_raster | public | st_bandisnodata | rast raster, band integer, forcechecking boolean | boolean - postgis_raster | public | st_bandisnodata | rast raster, forcechecking boolean | boolean - postgis_raster | public | st_bandmetadata | rast raster, band integer | TABLE(pixeltype text, nodatavalue double precision, isoutdb boolean, path text, outdbbandnum integer, filesize bigint, filetimestamp bigint) - postgis_raster | public | st_bandmetadata | rast raster, band integer[] | TABLE(bandnum integer, pixeltype text, nodatavalue double precision, isoutdb boolean, path text, outdbbandnum integer, filesize bigint, filetimestamp bigint) - postgis_raster | public | st_bandnodatavalue | rast raster, band integer | double precision - postgis_raster | public | st_bandpath | rast raster, band integer | text - postgis_raster | public | st_bandpixeltype | rast raster, band integer | text - postgis_raster | public | st_clip | rast raster, geom geometry, crop boolean | raster - postgis_raster | public | st_clip | rast raster, geom geometry, nodataval double precision, crop boolean | raster - postgis_raster | public | st_clip | rast raster, geom geometry, nodataval double precision[], crop boolean | raster - postgis_raster | public | st_clip | rast raster, nband integer, geom geometry, crop boolean | raster - postgis_raster | public | st_clip | rast raster, nband integer, geom geometry, nodataval double precision, crop boolean | raster - postgis_raster | public | st_clip | rast raster, nband integer[], geom geometry, nodataval double precision[], crop boolean | raster - postgis_raster | public | st_colormap | rast raster, colormap text, method text | raster - postgis_raster | public | st_colormap | rast raster, nband integer, colormap text, method text | raster - postgis_raster | public | st_contains | rast1 raster, nband1 integer, rast2 raster, nband2 integer | boolean - postgis_raster | public | st_contains | rast1 raster, rast2 raster | boolean - postgis_raster | public | st_containsproperly | rast1 raster, nband1 integer, rast2 raster, nband2 integer | boolean - postgis_raster | public | st_containsproperly | rast1 raster, rast2 raster | boolean - postgis_raster | public | st_contour | rast raster, bandnumber integer, level_interval double precision, level_base double precision, fixed_levels double precision[], polygonize boolean | TABLE(geom geometry, id integer, value double precision) - postgis_raster | public | st_convexhull | raster | geometry - postgis_raster | public | st_count | rast raster, exclude_nodata_value boolean | bigint - postgis_raster | public | st_count | rast raster, nband integer, exclude_nodata_value boolean | bigint - postgis_raster | public | st_countagg | raster, boolean | bigint - postgis_raster | public | st_countagg | raster, integer, boolean | bigint - postgis_raster | public | st_countagg | raster, integer, boolean, double precision | bigint - postgis_raster | public | st_coveredby | rast1 raster, nband1 integer, rast2 raster, nband2 integer | boolean - postgis_raster | public | st_coveredby | rast1 raster, rast2 raster | boolean - postgis_raster | public | st_covers | rast1 raster, nband1 integer, rast2 raster, nband2 integer | boolean - postgis_raster | public | st_covers | rast1 raster, rast2 raster | boolean - postgis_raster | public | st_createoverview | tab regclass, col name, factor integer, algo text | regclass - postgis_raster | public | st_dfullywithin | rast1 raster, nband1 integer, rast2 raster, nband2 integer, distance double precision | boolean - postgis_raster | public | st_dfullywithin | rast1 raster, rast2 raster, distance double precision | boolean - postgis_raster | public | st_disjoint | rast1 raster, nband1 integer, rast2 raster, nband2 integer | boolean - postgis_raster | public | st_disjoint | rast1 raster, rast2 raster | boolean - postgis_raster | public | st_distinct4ma | matrix double precision[], nodatamode text, VARIADIC args text[] | double precision - postgis_raster | public | st_distinct4ma | value double precision[], pos integer[], VARIADIC userargs text[] | double precision - postgis_raster | public | st_dumpaspolygons | rast raster, band integer, exclude_nodata_value boolean | SETOF geomval - postgis_raster | public | st_dumpvalues | rast raster, nband integer, exclude_nodata_value boolean | double precision[] - postgis_raster | public | st_dumpvalues | rast raster, nband integer[], exclude_nodata_value boolean | TABLE(nband integer, valarray double precision[]) - postgis_raster | public | st_dwithin | rast1 raster, nband1 integer, rast2 raster, nband2 integer, distance double precision | boolean - postgis_raster | public | st_dwithin | rast1 raster, rast2 raster, distance double precision | boolean - postgis_raster | public | st_envelope | raster | geometry - postgis_raster | public | st_fromgdalraster | gdaldata bytea, srid integer | raster - postgis_raster | public | st_gdaldrivers | OUT idx integer, OUT short_name text, OUT long_name text, OUT can_read boolean, OUT can_write boolean, OUT create_options text | SETOF record - postgis_raster | public | st_georeference | rast raster, format text | text - postgis_raster | public | st_geotransform | raster, OUT imag double precision, OUT jmag double precision, OUT theta_i double precision, OUT theta_ij double precision, OUT xoffset double precision, OUT yoffset double precision | record - postgis_raster | public | st_grayscale | rast raster, redband integer, greenband integer, blueband integer, extenttype text | raster - postgis_raster | public | st_grayscale | rastbandargset rastbandarg[], extenttype text | raster - postgis_raster | public | st_hasnoband | rast raster, nband integer | boolean - postgis_raster | public | st_height | raster | integer - postgis_raster | public | st_hillshade | rast raster, nband integer, customextent raster, pixeltype text, azimuth double precision, altitude double precision, max_bright double precision, scale double precision, interpolate_nodata boolean | raster - postgis_raster | public | st_hillshade | rast raster, nband integer, pixeltype text, azimuth double precision, altitude double precision, max_bright double precision, scale double precision, interpolate_nodata boolean | raster - postgis_raster | public | st_histogram | rast raster, nband integer, bins integer, "right" boolean, OUT min double precision, OUT max double precision, OUT count bigint, OUT percent double precision | SETOF record - postgis_raster | public | st_histogram | rast raster, nband integer, bins integer, width double precision[], "right" boolean, OUT min double precision, OUT max double precision, OUT count bigint, OUT percent double precision | SETOF record - postgis_raster | public | st_histogram | rast raster, nband integer, exclude_nodata_value boolean, bins integer, "right" boolean, OUT min double precision, OUT max double precision, OUT count bigint, OUT percent double precision | SETOF record - postgis_raster | public | st_histogram | rast raster, nband integer, exclude_nodata_value boolean, bins integer, width double precision[], "right" boolean, OUT min double precision, OUT max double precision, OUT count bigint, OUT percent double precision | SETOF record - postgis_raster | public | st_interpolateraster | geom geometry, options text, rast raster, bandnumber integer | raster - postgis_raster | public | st_intersection | geomin geometry, rast raster, band integer | SETOF geomval - postgis_raster | public | st_intersection | rast raster, band integer, geomin geometry | SETOF geomval - postgis_raster | public | st_intersection | rast raster, geomin geometry | SETOF geomval - postgis_raster | public | st_intersection | rast1 raster, band1 integer, rast2 raster, band2 integer, nodataval double precision | raster - postgis_raster | public | st_intersection | rast1 raster, band1 integer, rast2 raster, band2 integer, nodataval double precision[] | raster - postgis_raster | public | st_intersection | rast1 raster, band1 integer, rast2 raster, band2 integer, returnband text, nodataval double precision | raster - postgis_raster | public | st_intersection | rast1 raster, band1 integer, rast2 raster, band2 integer, returnband text, nodataval double precision[] | raster - postgis_raster | public | st_intersection | rast1 raster, rast2 raster, nodataval double precision | raster - postgis_raster | public | st_intersection | rast1 raster, rast2 raster, nodataval double precision[] | raster - postgis_raster | public | st_intersection | rast1 raster, rast2 raster, returnband text, nodataval double precision | raster - postgis_raster | public | st_intersection | rast1 raster, rast2 raster, returnband text, nodataval double precision[] | raster - postgis_raster | public | st_intersects | geom geometry, rast raster, nband integer | boolean - postgis_raster | public | st_intersects | rast raster, geom geometry, nband integer | boolean - postgis_raster | public | st_intersects | rast raster, nband integer, geom geometry | boolean - postgis_raster | public | st_intersects | rast1 raster, nband1 integer, rast2 raster, nband2 integer | boolean - postgis_raster | public | st_intersects | rast1 raster, rast2 raster | boolean - postgis_raster | public | st_invdistweight4ma | value double precision[], pos integer[], VARIADIC userargs text[] | double precision - postgis_raster | public | st_iscoveragetile | rast raster, coverage raster, tilewidth integer, tileheight integer | boolean - postgis_raster | public | st_isempty | rast raster | boolean - postgis_raster | public | st_makeemptycoverage | tilewidth integer, tileheight integer, width integer, height integer, upperleftx double precision, upperlefty double precision, scalex double precision, scaley double precision, skewx double precision, skewy double precision, srid integer | SETOF raster - postgis_raster | public | st_makeemptyraster | rast raster | raster - postgis_raster | public | st_makeemptyraster | width integer, height integer, upperleftx double precision, upperlefty double precision, pixelsize double precision | raster - postgis_raster | public | st_makeemptyraster | width integer, height integer, upperleftx double precision, upperlefty double precision, scalex double precision, scaley double precision, skewx double precision, skewy double precision, srid integer | raster - postgis_raster | public | st_mapalgebra | rast raster, nband integer, callbackfunc regprocedure, mask double precision[], weighted boolean, pixeltype text, extenttype text, customextent raster, VARIADIC userargs text[] | raster - postgis_raster | public | st_mapalgebra | rast raster, nband integer, callbackfunc regprocedure, pixeltype text, extenttype text, customextent raster, distancex integer, distancey integer, VARIADIC userargs text[] | raster - postgis_raster | public | st_mapalgebra | rast raster, nband integer, pixeltype text, expression text, nodataval double precision | raster - postgis_raster | public | st_mapalgebra | rast raster, nband integer[], callbackfunc regprocedure, pixeltype text, extenttype text, customextent raster, distancex integer, distancey integer, VARIADIC userargs text[] | raster - postgis_raster | public | st_mapalgebra | rast raster, pixeltype text, expression text, nodataval double precision | raster - postgis_raster | public | st_mapalgebra | rast1 raster, band1 integer, rast2 raster, band2 integer, expression text, pixeltype text, extenttype text, nodata1expr text, nodata2expr text, nodatanodataval double precision | raster - postgis_raster | public | st_mapalgebra | rast1 raster, nband1 integer, rast2 raster, nband2 integer, callbackfunc regprocedure, pixeltype text, extenttype text, customextent raster, distancex integer, distancey integer, VARIADIC userargs text[] | raster - postgis_raster | public | st_mapalgebra | rast1 raster, rast2 raster, expression text, pixeltype text, extenttype text, nodata1expr text, nodata2expr text, nodatanodataval double precision | raster - postgis_raster | public | st_mapalgebra | rastbandargset rastbandarg[], callbackfunc regprocedure, pixeltype text, extenttype text, customextent raster, distancex integer, distancey integer, VARIADIC userargs text[] | raster - postgis_raster | public | st_mapalgebraexpr | rast raster, band integer, pixeltype text, expression text, nodataval double precision | raster - postgis_raster | public | st_mapalgebraexpr | rast raster, pixeltype text, expression text, nodataval double precision | raster - postgis_raster | public | st_mapalgebraexpr | rast1 raster, band1 integer, rast2 raster, band2 integer, expression text, pixeltype text, extenttype text, nodata1expr text, nodata2expr text, nodatanodataval double precision | raster - postgis_raster | public | st_mapalgebraexpr | rast1 raster, rast2 raster, expression text, pixeltype text, extenttype text, nodata1expr text, nodata2expr text, nodatanodataval double precision | raster - postgis_raster | public | st_mapalgebrafct | rast raster, band integer, onerastuserfunc regprocedure | raster - postgis_raster | public | st_mapalgebrafct | rast raster, band integer, onerastuserfunc regprocedure, VARIADIC args text[] | raster - postgis_raster | public | st_mapalgebrafct | rast raster, band integer, pixeltype text, onerastuserfunc regprocedure | raster - postgis_raster | public | st_mapalgebrafct | rast raster, band integer, pixeltype text, onerastuserfunc regprocedure, VARIADIC args text[] | raster - postgis_raster | public | st_mapalgebrafct | rast raster, onerastuserfunc regprocedure | raster - postgis_raster | public | st_mapalgebrafct | rast raster, onerastuserfunc regprocedure, VARIADIC args text[] | raster - postgis_raster | public | st_mapalgebrafct | rast raster, pixeltype text, onerastuserfunc regprocedure | raster - postgis_raster | public | st_mapalgebrafct | rast raster, pixeltype text, onerastuserfunc regprocedure, VARIADIC args text[] | raster - postgis_raster | public | st_mapalgebrafct | rast1 raster, band1 integer, rast2 raster, band2 integer, tworastuserfunc regprocedure, pixeltype text, extenttype text, VARIADIC userargs text[] | raster - postgis_raster | public | st_mapalgebrafct | rast1 raster, rast2 raster, tworastuserfunc regprocedure, pixeltype text, extenttype text, VARIADIC userargs text[] | raster - postgis_raster | public | st_mapalgebrafctngb | rast raster, band integer, pixeltype text, ngbwidth integer, ngbheight integer, onerastngbuserfunc regprocedure, nodatamode text, VARIADIC args text[] | raster - postgis_raster | public | st_max4ma | matrix double precision[], nodatamode text, VARIADIC args text[] | double precision - postgis_raster | public | st_max4ma | value double precision[], pos integer[], VARIADIC userargs text[] | double precision - postgis_raster | public | st_mean4ma | matrix double precision[], nodatamode text, VARIADIC args text[] | double precision - postgis_raster | public | st_mean4ma | value double precision[], pos integer[], VARIADIC userargs text[] | double precision - postgis_raster | public | st_memsize | raster | integer - postgis_raster | public | st_metadata | rast raster, OUT upperleftx double precision, OUT upperlefty double precision, OUT width integer, OUT height integer, OUT scalex double precision, OUT scaley double precision, OUT skewx double precision, OUT skewy double precision, OUT srid integer, OUT numbands integer | record - postgis_raster | public | st_min4ma | matrix double precision[], nodatamode text, VARIADIC args text[] | double precision - postgis_raster | public | st_min4ma | value double precision[], pos integer[], VARIADIC userargs text[] | double precision - postgis_raster | public | st_minconvexhull | rast raster, nband integer | geometry - postgis_raster | public | st_mindist4ma | value double precision[], pos integer[], VARIADIC userargs text[] | double precision - postgis_raster | public | st_minpossiblevalue | pixeltype text | double precision - postgis_raster | public | st_nearestvalue | rast raster, band integer, columnx integer, rowy integer, exclude_nodata_value boolean | double precision - postgis_raster | public | st_nearestvalue | rast raster, band integer, pt geometry, exclude_nodata_value boolean | double precision - postgis_raster | public | st_nearestvalue | rast raster, columnx integer, rowy integer, exclude_nodata_value boolean | double precision - postgis_raster | public | st_nearestvalue | rast raster, pt geometry, exclude_nodata_value boolean | double precision - postgis_raster | public | st_neighborhood | rast raster, band integer, columnx integer, rowy integer, distancex integer, distancey integer, exclude_nodata_value boolean | double precision[] - postgis_raster | public | st_neighborhood | rast raster, band integer, pt geometry, distancex integer, distancey integer, exclude_nodata_value boolean | double precision[] - postgis_raster | public | st_neighborhood | rast raster, columnx integer, rowy integer, distancex integer, distancey integer, exclude_nodata_value boolean | double precision[] - postgis_raster | public | st_neighborhood | rast raster, pt geometry, distancex integer, distancey integer, exclude_nodata_value boolean | double precision[] - postgis_raster | public | st_notsamealignmentreason | rast1 raster, rast2 raster | text - postgis_raster | public | st_numbands | raster | integer - postgis_raster | public | st_overlaps | rast1 raster, nband1 integer, rast2 raster, nband2 integer | boolean - postgis_raster | public | st_overlaps | rast1 raster, rast2 raster | boolean - postgis_raster | public | st_pixelascentroid | rast raster, x integer, y integer | geometry - postgis_raster | public | st_pixelascentroids | rast raster, band integer, exclude_nodata_value boolean | TABLE(geom geometry, val double precision, x integer, y integer) - postgis_raster | public | st_pixelaspoint | rast raster, x integer, y integer | geometry - postgis_raster | public | st_pixelaspoints | rast raster, band integer, exclude_nodata_value boolean | TABLE(geom geometry, val double precision, x integer, y integer) - postgis_raster | public | st_pixelaspolygon | rast raster, x integer, y integer | geometry - postgis_raster | public | st_pixelaspolygons | rast raster, band integer, exclude_nodata_value boolean | TABLE(geom geometry, val double precision, x integer, y integer) - postgis_raster | public | st_pixelheight | raster | double precision - postgis_raster | public | st_pixelofvalue | rast raster, nband integer, search double precision, exclude_nodata_value boolean | TABLE(x integer, y integer) - postgis_raster | public | st_pixelofvalue | rast raster, nband integer, search double precision[], exclude_nodata_value boolean | TABLE(val double precision, x integer, y integer) - postgis_raster | public | st_pixelofvalue | rast raster, search double precision, exclude_nodata_value boolean | TABLE(x integer, y integer) - postgis_raster | public | st_pixelofvalue | rast raster, search double precision[], exclude_nodata_value boolean | TABLE(val double precision, x integer, y integer) - postgis_raster | public | st_pixelwidth | raster | double precision - postgis_raster | public | st_polygon | rast raster, band integer | geometry - postgis_raster | public | st_quantile | rast raster, exclude_nodata_value boolean, quantile double precision | double precision - postgis_raster | public | st_quantile | rast raster, nband integer, exclude_nodata_value boolean, quantile double precision | double precision - postgis_raster | public | st_quantile | rast raster, nband integer, exclude_nodata_value boolean, quantiles double precision[], OUT quantile double precision, OUT value double precision | SETOF record - postgis_raster | public | st_quantile | rast raster, nband integer, quantile double precision | double precision - postgis_raster | public | st_quantile | rast raster, nband integer, quantiles double precision[], OUT quantile double precision, OUT value double precision | SETOF record - postgis_raster | public | st_quantile | rast raster, quantile double precision | double precision - postgis_raster | public | st_quantile | rast raster, quantiles double precision[], OUT quantile double precision, OUT value double precision | SETOF record - postgis_raster | public | st_range4ma | matrix double precision[], nodatamode text, VARIADIC args text[] | double precision - postgis_raster | public | st_range4ma | value double precision[], pos integer[], VARIADIC userargs text[] | double precision - postgis_raster | public | st_rastertoworldcoord | rast raster, columnx integer, rowy integer, OUT longitude double precision, OUT latitude double precision | record - postgis_raster | public | st_rastertoworldcoordx | rast raster, xr integer | double precision - postgis_raster | public | st_rastertoworldcoordx | rast raster, xr integer, yr integer | double precision - postgis_raster | public | st_rastertoworldcoordy | rast raster, xr integer, yr integer | double precision - postgis_raster | public | st_rastertoworldcoordy | rast raster, yr integer | double precision - postgis_raster | public | st_rastfromhexwkb | text | raster - postgis_raster | public | st_rastfromwkb | bytea | raster - postgis_raster | public | st_reclass | rast raster, VARIADIC reclassargset reclassarg[] | raster - postgis_raster | public | st_reclass | rast raster, nband integer, reclassexpr text, pixeltype text, nodataval double precision | raster - postgis_raster | public | st_reclass | rast raster, reclassexpr text, pixeltype text | raster - postgis_raster | public | st_resample | rast raster, ref raster, algorithm text, maxerr double precision, usescale boolean | raster - postgis_raster | public | st_resample | rast raster, ref raster, usescale boolean, algorithm text, maxerr double precision | raster - postgis_raster | public | st_resample | rast raster, scalex double precision, scaley double precision, gridx double precision, gridy double precision, skewx double precision, skewy double precision, algorithm text, maxerr double precision | raster - postgis_raster | public | st_resample | rast raster, width integer, height integer, gridx double precision, gridy double precision, skewx double precision, skewy double precision, algorithm text, maxerr double precision | raster - postgis_raster | public | st_rescale | rast raster, scalex double precision, scaley double precision, algorithm text, maxerr double precision | raster - postgis_raster | public | st_rescale | rast raster, scalexy double precision, algorithm text, maxerr double precision | raster - postgis_raster | public | st_resize | rast raster, percentwidth double precision, percentheight double precision, algorithm text, maxerr double precision | raster - postgis_raster | public | st_resize | rast raster, width integer, height integer, algorithm text, maxerr double precision | raster - postgis_raster | public | st_resize | rast raster, width text, height text, algorithm text, maxerr double precision | raster - postgis_raster | public | st_reskew | rast raster, skewx double precision, skewy double precision, algorithm text, maxerr double precision | raster - postgis_raster | public | st_reskew | rast raster, skewxy double precision, algorithm text, maxerr double precision | raster - postgis_raster | public | st_retile | tab regclass, col name, ext geometry, sfx double precision, sfy double precision, tw integer, th integer, algo text | SETOF raster - postgis_raster | public | st_rotation | raster | double precision - postgis_raster | public | st_roughness | rast raster, nband integer, customextent raster, pixeltype text, interpolate_nodata boolean | raster - postgis_raster | public | st_roughness | rast raster, nband integer, pixeltype text, interpolate_nodata boolean | raster - postgis_raster | public | st_samealignment | rast1 raster, rast2 raster | boolean - postgis_raster | public | st_samealignment | raster | boolean - postgis_raster | public | st_samealignment | ulx1 double precision, uly1 double precision, scalex1 double precision, scaley1 double precision, skewx1 double precision, skewy1 double precision, ulx2 double precision, uly2 double precision, scalex2 double precision, scaley2 double precision, skewx2 double precision, skewy2 double precision | boolean - postgis_raster | public | st_scalex | raster | double precision - postgis_raster | public | st_scaley | raster | double precision - postgis_raster | public | st_setbandindex | rast raster, band integer, outdbindex integer, force boolean | raster - postgis_raster | public | st_setbandisnodata | rast raster, band integer | raster - postgis_raster | public | st_setbandnodatavalue | rast raster, band integer, nodatavalue double precision, forcechecking boolean | raster - postgis_raster | public | st_setbandnodatavalue | rast raster, nodatavalue double precision | raster - postgis_raster | public | st_setbandpath | rast raster, band integer, outdbpath text, outdbindex integer, force boolean | raster - postgis_raster | public | st_setgeoreference | rast raster, georef text, format text | raster - postgis_raster | public | st_setgeoreference | rast raster, upperleftx double precision, upperlefty double precision, scalex double precision, scaley double precision, skewx double precision, skewy double precision | raster - postgis_raster | public | st_setgeotransform | rast raster, imag double precision, jmag double precision, theta_i double precision, theta_ij double precision, xoffset double precision, yoffset double precision | raster - postgis_raster | public | st_setm | rast raster, geom geometry, resample text, band integer | geometry - postgis_raster | public | st_setrotation | rast raster, rotation double precision | raster - postgis_raster | public | st_setscale | rast raster, scale double precision | raster - postgis_raster | public | st_setscale | rast raster, scalex double precision, scaley double precision | raster - postgis_raster | public | st_setskew | rast raster, skew double precision | raster - postgis_raster | public | st_setskew | rast raster, skewx double precision, skewy double precision | raster - postgis_raster | public | st_setsrid | rast raster, srid integer | raster - postgis_raster | public | st_setupperleft | rast raster, upperleftx double precision, upperlefty double precision | raster - postgis_raster | public | st_setvalue | rast raster, band integer, x integer, y integer, newvalue double precision | raster - postgis_raster | public | st_setvalue | rast raster, geom geometry, newvalue double precision | raster - postgis_raster | public | st_setvalue | rast raster, nband integer, geom geometry, newvalue double precision | raster - postgis_raster | public | st_setvalue | rast raster, x integer, y integer, newvalue double precision | raster - postgis_raster | public | st_setvalues | rast raster, nband integer, geomvalset geomval[], keepnodata boolean | raster - postgis_raster | public | st_setvalues | rast raster, nband integer, x integer, y integer, newvalueset double precision[], noset boolean[], keepnodata boolean | raster - postgis_raster | public | st_setvalues | rast raster, nband integer, x integer, y integer, newvalueset double precision[], nosetvalue double precision, keepnodata boolean | raster - postgis_raster | public | st_setvalues | rast raster, nband integer, x integer, y integer, width integer, height integer, newvalue double precision, keepnodata boolean | raster - postgis_raster | public | st_setvalues | rast raster, x integer, y integer, width integer, height integer, newvalue double precision, keepnodata boolean | raster - postgis_raster | public | st_setz | rast raster, geom geometry, resample text, band integer | geometry - postgis_raster | public | st_skewx | raster | double precision - postgis_raster | public | st_skewy | raster | double precision - postgis_raster | public | st_slope | rast raster, nband integer, customextent raster, pixeltype text, units text, scale double precision, interpolate_nodata boolean | raster - postgis_raster | public | st_slope | rast raster, nband integer, pixeltype text, units text, scale double precision, interpolate_nodata boolean | raster - postgis_raster | public | st_snaptogrid | rast raster, gridx double precision, gridy double precision, algorithm text, maxerr double precision, scalex double precision, scaley double precision | raster - postgis_raster | public | st_snaptogrid | rast raster, gridx double precision, gridy double precision, scalex double precision, scaley double precision, algorithm text, maxerr double precision | raster - postgis_raster | public | st_snaptogrid | rast raster, gridx double precision, gridy double precision, scalexy double precision, algorithm text, maxerr double precision | raster - postgis_raster | public | st_srid | raster | integer - postgis_raster | public | st_stddev4ma | matrix double precision[], nodatamode text, VARIADIC args text[] | double precision - postgis_raster | public | st_stddev4ma | value double precision[], pos integer[], VARIADIC userargs text[] | double precision - postgis_raster | public | st_sum4ma | matrix double precision[], nodatamode text, VARIADIC args text[] | double precision - postgis_raster | public | st_sum4ma | value double precision[], pos integer[], VARIADIC userargs text[] | double precision - postgis_raster | public | st_summary | rast raster | text - postgis_raster | public | st_summarystats | rast raster, exclude_nodata_value boolean | summarystats - postgis_raster | public | st_summarystats | rast raster, nband integer, exclude_nodata_value boolean | summarystats - postgis_raster | public | st_summarystatsagg | raster, boolean, double precision | summarystats - postgis_raster | public | st_summarystatsagg | raster, integer, boolean | summarystats - postgis_raster | public | st_summarystatsagg | raster, integer, boolean, double precision | summarystats - postgis_raster | public | st_tile | rast raster, nband integer, width integer, height integer, padwithnodata boolean, nodataval double precision | SETOF raster - postgis_raster | public | st_tile | rast raster, nband integer[], width integer, height integer, padwithnodata boolean, nodataval double precision | SETOF raster - postgis_raster | public | st_tile | rast raster, width integer, height integer, padwithnodata boolean, nodataval double precision | SETOF raster - postgis_raster | public | st_touches | rast1 raster, nband1 integer, rast2 raster, nband2 integer | boolean - postgis_raster | public | st_touches | rast1 raster, rast2 raster | boolean - postgis_raster | public | st_tpi | rast raster, nband integer, customextent raster, pixeltype text, interpolate_nodata boolean | raster - postgis_raster | public | st_tpi | rast raster, nband integer, pixeltype text, interpolate_nodata boolean | raster - postgis_raster | public | st_transform | rast raster, alignto raster, algorithm text, maxerr double precision | raster - postgis_raster | public | st_transform | rast raster, srid integer, algorithm text, maxerr double precision, scalex double precision, scaley double precision | raster - postgis_raster | public | st_transform | rast raster, srid integer, scalex double precision, scaley double precision, algorithm text, maxerr double precision | raster - postgis_raster | public | st_transform | rast raster, srid integer, scalexy double precision, algorithm text, maxerr double precision | raster - postgis_raster | public | st_tri | rast raster, nband integer, customextent raster, pixeltype text, interpolate_nodata boolean | raster - postgis_raster | public | st_tri | rast raster, nband integer, pixeltype text, interpolate_nodata boolean | raster - postgis_raster | public | st_union | raster | raster - postgis_raster | public | st_union | raster, integer | raster - postgis_raster | public | st_union | raster, integer, text | raster - postgis_raster | public | st_union | raster, text | raster - postgis_raster | public | st_union | raster, unionarg[] | raster - postgis_raster | public | st_upperleftx | raster | double precision - postgis_raster | public | st_upperlefty | raster | double precision - postgis_raster | public | st_value | rast raster, band integer, pt geometry, exclude_nodata_value boolean, resample text | double precision - postgis_raster | public | st_value | rast raster, band integer, x integer, y integer, exclude_nodata_value boolean | double precision - postgis_raster | public | st_value | rast raster, pt geometry, exclude_nodata_value boolean | double precision - postgis_raster | public | st_value | rast raster, x integer, y integer, exclude_nodata_value boolean | double precision - postgis_raster | public | st_valuecount | rast raster, nband integer, exclude_nodata_value boolean, searchvalue double precision, roundto double precision | integer - postgis_raster | public | st_valuecount | rast raster, nband integer, exclude_nodata_value boolean, searchvalues double precision[], roundto double precision, OUT value double precision, OUT count integer | SETOF record - postgis_raster | public | st_valuecount | rast raster, nband integer, searchvalue double precision, roundto double precision | integer - postgis_raster | public | st_valuecount | rast raster, nband integer, searchvalues double precision[], roundto double precision, OUT value double precision, OUT count integer | SETOF record - postgis_raster | public | st_valuecount | rast raster, searchvalue double precision, roundto double precision | integer - postgis_raster | public | st_valuecount | rast raster, searchvalues double precision[], roundto double precision, OUT value double precision, OUT count integer | SETOF record - postgis_raster | public | st_valuecount | rastertable text, rastercolumn text, nband integer, exclude_nodata_value boolean, searchvalue double precision, roundto double precision | integer - postgis_raster | public | st_valuecount | rastertable text, rastercolumn text, nband integer, exclude_nodata_value boolean, searchvalues double precision[], roundto double precision, OUT value double precision, OUT count integer | SETOF record - postgis_raster | public | st_valuecount | rastertable text, rastercolumn text, nband integer, searchvalue double precision, roundto double precision | integer - postgis_raster | public | st_valuecount | rastertable text, rastercolumn text, nband integer, searchvalues double precision[], roundto double precision, OUT value double precision, OUT count integer | SETOF record - postgis_raster | public | st_valuecount | rastertable text, rastercolumn text, searchvalue double precision, roundto double precision | integer - postgis_raster | public | st_valuecount | rastertable text, rastercolumn text, searchvalues double precision[], roundto double precision, OUT value double precision, OUT count integer | SETOF record - postgis_raster | public | st_valuepercent | rast raster, nband integer, exclude_nodata_value boolean, searchvalue double precision, roundto double precision | double precision - postgis_raster | public | st_valuepercent | rast raster, nband integer, exclude_nodata_value boolean, searchvalues double precision[], roundto double precision, OUT value double precision, OUT percent double precision | SETOF record - postgis_raster | public | st_valuepercent | rast raster, nband integer, searchvalue double precision, roundto double precision | double precision - postgis_raster | public | st_valuepercent | rast raster, nband integer, searchvalues double precision[], roundto double precision, OUT value double precision, OUT percent double precision | SETOF record - postgis_raster | public | st_valuepercent | rast raster, searchvalue double precision, roundto double precision | double precision - postgis_raster | public | st_valuepercent | rast raster, searchvalues double precision[], roundto double precision, OUT value double precision, OUT percent double precision | SETOF record - postgis_raster | public | st_valuepercent | rastertable text, rastercolumn text, nband integer, exclude_nodata_value boolean, searchvalue double precision, roundto double precision | double precision - postgis_raster | public | st_valuepercent | rastertable text, rastercolumn text, nband integer, exclude_nodata_value boolean, searchvalues double precision[], roundto double precision, OUT value double precision, OUT percent double precision | SETOF record - postgis_raster | public | st_valuepercent | rastertable text, rastercolumn text, nband integer, searchvalue double precision, roundto double precision | double precision - postgis_raster | public | st_valuepercent | rastertable text, rastercolumn text, nband integer, searchvalues double precision[], roundto double precision, OUT value double precision, OUT percent double precision | SETOF record - postgis_raster | public | st_valuepercent | rastertable text, rastercolumn text, searchvalue double precision, roundto double precision | double precision - postgis_raster | public | st_valuepercent | rastertable text, rastercolumn text, searchvalues double precision[], roundto double precision, OUT value double precision, OUT percent double precision | SETOF record - postgis_raster | public | st_width | raster | integer - postgis_raster | public | st_within | rast1 raster, nband1 integer, rast2 raster, nband2 integer | boolean - postgis_raster | public | st_within | rast1 raster, rast2 raster | boolean - postgis_raster | public | st_worldtorastercoord | rast raster, longitude double precision, latitude double precision, OUT columnx integer, OUT rowy integer | record - postgis_raster | public | st_worldtorastercoord | rast raster, pt geometry, OUT columnx integer, OUT rowy integer | record - postgis_raster | public | st_worldtorastercoordx | rast raster, pt geometry | integer - postgis_raster | public | st_worldtorastercoordx | rast raster, xw double precision | integer - postgis_raster | public | st_worldtorastercoordx | rast raster, xw double precision, yw double precision | integer - postgis_raster | public | st_worldtorastercoordy | rast raster, pt geometry | integer - postgis_raster | public | st_worldtorastercoordy | rast raster, xw double precision, yw double precision | integer - postgis_raster | public | st_worldtorastercoordy | rast raster, yw double precision | integer - postgis_raster | public | updaterastersrid | schema_name name, table_name name, column_name name, new_srid integer | boolean - postgis_raster | public | updaterastersrid | table_name name, column_name name, new_srid integer | boolean - postgis_sfcgal | public | postgis_sfcgal_full_version | | text - postgis_sfcgal | public | postgis_sfcgal_noop | geometry | geometry - postgis_sfcgal | public | postgis_sfcgal_scripts_installed | | text - postgis_sfcgal | public | postgis_sfcgal_version | | text - postgis_sfcgal | public | st_3darea | geometry | double precision - postgis_sfcgal | public | st_3dconvexhull | geometry | geometry - postgis_sfcgal | public | st_3ddifference | geom1 geometry, geom2 geometry | geometry - postgis_sfcgal | public | st_3dintersection | geom1 geometry, geom2 geometry | geometry - postgis_sfcgal | public | st_3dunion | geom1 geometry, geom2 geometry | geometry - postgis_sfcgal | public | st_3dunion | geometry | geometry - postgis_sfcgal | public | st_alphashape | g1 geometry, alpha double precision, allow_holes boolean | geometry - postgis_sfcgal | public | st_approximatemedialaxis | geometry | geometry - postgis_sfcgal | public | st_constraineddelaunaytriangles | geometry | geometry - postgis_sfcgal | public | st_extrude | geometry, double precision, double precision, double precision | geometry - postgis_sfcgal | public | st_forcelhr | geometry | geometry - postgis_sfcgal | public | st_isplanar | geometry | boolean - postgis_sfcgal | public | st_issolid | geometry | boolean - postgis_sfcgal | public | st_makesolid | geometry | geometry - postgis_sfcgal | public | st_minkowskisum | geometry, geometry | geometry - postgis_sfcgal | public | st_optimalalphashape | g1 geometry, allow_holes boolean, nb_components integer | geometry - postgis_sfcgal | public | st_orientation | geometry | integer - postgis_sfcgal | public | st_straightskeleton | geometry | geometry - postgis_sfcgal | public | st_tesselate | geometry | geometry - postgis_sfcgal | public | st_volume | geometry | double precision - postgis_tiger_geocoder | tiger | count_words | character varying | integer - postgis_tiger_geocoder | tiger | create_census_base_tables | | text - postgis_tiger_geocoder | tiger | cull_null | character varying | character varying - postgis_tiger_geocoder | tiger | diff_zip | zip1 character varying, zip2 character varying | integer - postgis_tiger_geocoder | tiger | drop_dupe_featnames_generate_script | | text - postgis_tiger_geocoder | tiger | drop_indexes_generate_script | tiger_data_schema text | text - postgis_tiger_geocoder | tiger | drop_nation_tables_generate_script | param_schema text | text - postgis_tiger_geocoder | tiger | drop_state_tables_generate_script | param_state text, param_schema text | text - postgis_tiger_geocoder | tiger | end_soundex | character varying | character varying - postgis_tiger_geocoder | tiger | geocode | in_addy tiger.norm_addy, max_results integer, restrict_geom geometry, OUT addy tiger.norm_addy, OUT geomout geometry, OUT rating integer | SETOF record - postgis_tiger_geocoder | tiger | geocode | input character varying, max_results integer, restrict_geom geometry, OUT addy tiger.norm_addy, OUT geomout geometry, OUT rating integer | SETOF record - postgis_tiger_geocoder | tiger | geocode_address | parsed tiger.norm_addy, max_results integer, restrict_geom geometry, OUT addy tiger.norm_addy, OUT geomout geometry, OUT rating integer | SETOF record - postgis_tiger_geocoder | tiger | geocode_intersection | roadway1 text, roadway2 text, in_state text, in_city text, in_zip text, num_results integer, OUT addy tiger.norm_addy, OUT geomout geometry, OUT rating integer | SETOF record - postgis_tiger_geocoder | tiger | geocode_location | parsed tiger.norm_addy, restrict_geom geometry, OUT addy tiger.norm_addy, OUT geomout geometry, OUT rating integer | SETOF record - postgis_tiger_geocoder | tiger | get_geocode_setting | setting_name text | text - postgis_tiger_geocoder | tiger | get_last_words | inputstring character varying, count integer | character varying - postgis_tiger_geocoder | tiger | get_tract | loc_geom geometry, output_field text | text - postgis_tiger_geocoder | tiger | greatest_hn | fromhn character varying, tohn character varying | integer - postgis_tiger_geocoder | tiger | includes_address | given_address integer, addr1 integer, addr2 integer, addr3 integer, addr4 integer | boolean - postgis_tiger_geocoder | tiger | install_geocode_settings | | void - postgis_tiger_geocoder | tiger | install_missing_indexes | | boolean - postgis_tiger_geocoder | tiger | install_pagc_tables | | void - postgis_tiger_geocoder | tiger | interpolate_from_address | given_address integer, in_addr1 character varying, in_addr2 character varying, in_road geometry, in_side character varying, in_offset_m double precision | geometry - postgis_tiger_geocoder | tiger | is_pretype | text | boolean - postgis_tiger_geocoder | tiger | least_hn | fromhn character varying, tohn character varying | integer - postgis_tiger_geocoder | tiger | levenshtein_ignore_case | character varying, character varying | integer - postgis_tiger_geocoder | tiger | loader_generate_census_script | param_states text[], os text | SETOF text - postgis_tiger_geocoder | tiger | loader_generate_nation_script | os text | SETOF text - postgis_tiger_geocoder | tiger | loader_generate_script | param_states text[], os text | SETOF text - postgis_tiger_geocoder | tiger | loader_load_staged_data | param_staging_table text, param_target_table text | integer - postgis_tiger_geocoder | tiger | loader_load_staged_data | param_staging_table text, param_target_table text, param_columns_exclude text[] | integer - postgis_tiger_geocoder | tiger | loader_macro_replace | param_input text, param_keys text[], param_values text[] | text - postgis_tiger_geocoder | tiger | location_extract | fullstreet character varying, stateabbrev character varying | character varying - postgis_tiger_geocoder | tiger | location_extract_countysub_exact | fullstreet character varying, stateabbrev character varying | character varying - postgis_tiger_geocoder | tiger | location_extract_countysub_fuzzy | fullstreet character varying, stateabbrev character varying | character varying - postgis_tiger_geocoder | tiger | location_extract_place_exact | fullstreet character varying, stateabbrev character varying | character varying - postgis_tiger_geocoder | tiger | location_extract_place_fuzzy | fullstreet character varying, stateabbrev character varying | character varying - postgis_tiger_geocoder | tiger | missing_indexes_generate_script | | text - postgis_tiger_geocoder | tiger | normalize_address | in_rawinput character varying | tiger.norm_addy - postgis_tiger_geocoder | tiger | nullable_levenshtein | character varying, character varying | integer - postgis_tiger_geocoder | tiger | numeric_streets_equal | input_street character varying, output_street character varying | boolean - postgis_tiger_geocoder | tiger | pagc_normalize_address | in_rawinput character varying | tiger.norm_addy - postgis_tiger_geocoder | tiger | pprint_addy | input tiger.norm_addy | character varying - postgis_tiger_geocoder | tiger | rate_attributes | dirpa character varying, dirpb character varying, streetnamea character varying, streetnameb character varying, streettypea character varying, streettypeb character varying, dirsa character varying, dirsb character varying, locationa character varying, locationb character varying, prequalabr character varying | integer - postgis_tiger_geocoder | tiger | rate_attributes | dirpa character varying, dirpb character varying, streetnamea character varying, streetnameb character varying, streettypea character varying, streettypeb character varying, dirsa character varying, dirsb character varying, prequalabr character varying | integer - postgis_tiger_geocoder | tiger | reverse_geocode | pt geometry, include_strnum_range boolean, OUT intpt geometry[], OUT addy tiger.norm_addy[], OUT street character varying[] | record - postgis_tiger_geocoder | tiger | set_geocode_setting | setting_name text, setting_value text | text - postgis_tiger_geocoder | tiger | setsearchpathforinstall | a_schema_name text | text - postgis_tiger_geocoder | tiger | state_extract | rawinput character varying | character varying - postgis_tiger_geocoder | tiger | topology_load_tiger | toponame character varying, region_type character varying, region_id character varying | text - postgis_tiger_geocoder | tiger | utmzone | geometry | integer - postgis_tiger_geocoder | tiger | zip_range | zip text, range_start integer, range_end integer | character varying[] - postgis_topology | topology | _asgmledge | edge_id integer, start_node integer, end_node integer, line geometry, visitedtable regclass, nsprefix_in text, prec integer, options integer, idprefix text, gmlver integer | text - postgis_topology | topology | _asgmlface | toponame text, face_id integer, visitedtable regclass, nsprefix_in text, prec integer, options integer, idprefix text, gmlver integer | text - postgis_topology | topology | _asgmlnode | id integer, point geometry, nsprefix_in text, prec integer, options integer, idprefix text, gmlver integer | text - postgis_topology | topology | _checkedgelinking | curedge_edge_id integer, prevedge_edge_id integer, prevedge_next_left_edge integer, prevedge_next_right_edge integer | validatetopology_returntype - postgis_topology | topology | _st_adjacentedges | atopology character varying, anode integer, anedge integer | integer[] - postgis_topology | topology | _st_mintolerance | ageom geometry | double precision - postgis_topology | topology | _st_mintolerance | atopology character varying, ageom geometry | double precision - postgis_topology | topology | _validatetopologyedgelinking | bbox geometry | SETOF validatetopology_returntype - postgis_topology | topology | _validatetopologygetfaceshellmaximaledgering | atopology character varying, aface integer | geometry - postgis_topology | topology | _validatetopologygetringedges | starting_edge integer | integer[] - postgis_topology | topology | _validatetopologyrings | bbox geometry | SETOF validatetopology_returntype - postgis_topology | topology | addedge | atopology character varying, aline geometry | integer - postgis_topology | topology | addface | atopology character varying, apoly geometry, force_new boolean | integer - postgis_topology | topology | addnode | atopology character varying, apoint geometry, allowedgesplitting boolean, setcontainingface boolean | integer - postgis_topology | topology | addtopogeometrycolumn | character varying, character varying, character varying, character varying, character varying | integer - postgis_topology | topology | addtopogeometrycolumn | toponame character varying, schema character varying, tbl character varying, col character varying, ltype character varying, child integer | integer - postgis_topology | topology | addtosearchpath | a_schema_name character varying | text - postgis_topology | topology | asgml | tg topogeometry | text - postgis_topology | topology | asgml | tg topogeometry, nsprefix text | text - postgis_topology | topology | asgml | tg topogeometry, nsprefix text, prec integer, options integer, vis regclass | text - postgis_topology | topology | asgml | tg topogeometry, nsprefix text, prec integer, options integer, visitedtable regclass, idprefix text | text - postgis_topology | topology | asgml | tg topogeometry, nsprefix text, prec integer, opts integer | text - postgis_topology | topology | asgml | tg topogeometry, nsprefix_in text, precision_in integer, options_in integer, visitedtable regclass, idprefix text, gmlver integer | text - postgis_topology | topology | asgml | tg topogeometry, visitedtable regclass | text - postgis_topology | topology | asgml | tg topogeometry, visitedtable regclass, nsprefix text | text - postgis_topology | topology | astopojson | tg topogeometry, edgemaptable regclass | text - postgis_topology | topology | cleartopogeom | tg topogeometry | topogeometry - postgis_topology | topology | copytopology | atopology character varying, newtopo character varying | integer - postgis_topology | topology | createtopogeom | toponame character varying, tg_type integer, layer_id integer | topogeometry - postgis_topology | topology | createtopogeom | toponame character varying, tg_type integer, layer_id integer, tg_objs topoelementarray | topogeometry - postgis_topology | topology | createtopology | atopology character varying, srid integer, prec double precision, hasz boolean | integer - postgis_topology | topology | createtopology | character varying | integer - postgis_topology | topology | createtopology | character varying, integer | integer - postgis_topology | topology | createtopology | toponame character varying, srid integer, prec double precision | integer - postgis_topology | topology | droptopogeometrycolumn | schema character varying, tbl character varying, col character varying | text - postgis_topology | topology | droptopology | atopology character varying | text - postgis_topology | topology | equals | tg1 topogeometry, tg2 topogeometry | boolean - postgis_topology | topology | findlayer | layer_table regclass, feature_column name | layer - postgis_topology | topology | findlayer | schema_name name, table_name name, feature_column name | layer - postgis_topology | topology | findlayer | tg topogeometry | layer - postgis_topology | topology | findlayer | topology_id integer, layer_id integer | layer - postgis_topology | topology | findtopology | integer | topology - postgis_topology | topology | findtopology | name, name, name | topology - postgis_topology | topology | findtopology | regclass, name | topology - postgis_topology | topology | findtopology | text | topology - postgis_topology | topology | findtopology | topogeometry | topology - postgis_topology | topology | geometry | topogeom topogeometry | geometry - postgis_topology | topology | geometrytype | tg topogeometry | text - postgis_topology | topology | getedgebypoint | atopology character varying, apoint geometry, tol1 double precision | integer - postgis_topology | topology | getfacebypoint | atopology character varying, apoint geometry, tol1 double precision | integer - postgis_topology | topology | getfacecontainingpoint | atopology text, apoint geometry | integer - postgis_topology | topology | getnodebypoint | atopology character varying, apoint geometry, tol1 double precision | integer - postgis_topology | topology | getnodeedges | atopology character varying, anode integer | SETOF getfaceedges_returntype - postgis_topology | topology | getringedges | atopology character varying, anedge integer, maxedges integer | SETOF getfaceedges_returntype - postgis_topology | topology | gettopogeomelementarray | tg topogeometry | topoelementarray - postgis_topology | topology | gettopogeomelementarray | toponame character varying, layer_id integer, tgid integer | topoelementarray - postgis_topology | topology | gettopogeomelements | tg topogeometry | SETOF topoelement - postgis_topology | topology | gettopogeomelements | toponame character varying, layerid integer, tgid integer | SETOF topoelement - postgis_topology | topology | gettopologyid | toponame character varying | integer - postgis_topology | topology | gettopologyname | topoid integer | character varying - postgis_topology | topology | gettopologysrid | toponame character varying | integer - postgis_topology | topology | intersects | tg1 topogeometry, tg2 topogeometry | boolean - postgis_topology | topology | layertrigger | | trigger - postgis_topology | topology | polygonize | toponame character varying | text - postgis_topology | topology | populate_topology_layer | | TABLE(schema_name text, table_name text, feature_column text) - postgis_topology | topology | postgis_topology_scripts_installed | | text - postgis_topology | topology | relationtrigger | | trigger - postgis_topology | topology | removeunusedprimitives | atopology text, bbox geometry | integer - postgis_topology | topology | st_addedgemodface | atopology character varying, anode integer, anothernode integer, acurve geometry | integer - postgis_topology | topology | st_addedgenewfaces | atopology character varying, anode integer, anothernode integer, acurve geometry | integer - postgis_topology | topology | st_addisoedge | atopology character varying, anode integer, anothernode integer, acurve geometry | integer - postgis_topology | topology | st_addisonode | atopology character varying, aface integer, apoint geometry | integer - postgis_topology | topology | st_changeedgegeom | atopology character varying, anedge integer, acurve geometry | text - postgis_topology | topology | st_createtopogeo | atopology character varying, acollection geometry | text - postgis_topology | topology | st_geometrytype | tg topogeometry | text - postgis_topology | topology | st_getfaceedges | toponame character varying, face_id integer | SETOF getfaceedges_returntype - postgis_topology | topology | st_getfacegeometry | toponame character varying, aface integer | geometry - postgis_topology | topology | st_inittopogeo | atopology character varying | text - postgis_topology | topology | st_modedgeheal | toponame character varying, e1id integer, e2id integer | integer - postgis_topology | topology | st_modedgesplit | atopology character varying, anedge integer, apoint geometry | integer - postgis_topology | topology | st_moveisonode | atopology character varying, anode integer, apoint geometry | text - postgis_topology | topology | st_newedgeheal | toponame character varying, e1id integer, e2id integer | integer - postgis_topology | topology | st_newedgessplit | atopology character varying, anedge integer, apoint geometry | integer - postgis_topology | topology | st_remedgemodface | toponame character varying, e1id integer | integer - postgis_topology | topology | st_remedgenewface | toponame character varying, e1id integer | integer - postgis_topology | topology | st_remisonode | character varying, integer | text - postgis_topology | topology | st_removeisoedge | atopology character varying, anedge integer | text - postgis_topology | topology | st_removeisonode | atopology character varying, anode integer | text - postgis_topology | topology | st_simplify | tg topogeometry, tolerance double precision | geometry - postgis_topology | topology | st_srid | tg topogeometry | integer - postgis_topology | topology | topoelementarray_agg | topoelement | topoelementarray - postgis_topology | topology | topoelementarray_append | topoelementarray, topoelement | topoelementarray - postgis_topology | topology | topogeo_addgeometry | atopology character varying, ageom geometry, tolerance double precision | void - postgis_topology | topology | topogeo_addlinestring | atopology character varying, aline geometry, tolerance double precision | SETOF integer - postgis_topology | topology | topogeo_addpoint | atopology character varying, apoint geometry, tolerance double precision | integer - postgis_topology | topology | topogeo_addpolygon | atopology character varying, apoly geometry, tolerance double precision | SETOF integer - postgis_topology | topology | topogeom_addelement | tg topogeometry, el topoelement | topogeometry - postgis_topology | topology | topogeom_addtopogeom | tgt topogeometry, src topogeometry | topogeometry - postgis_topology | topology | topogeom_remelement | tg topogeometry, el topoelement | topogeometry - postgis_topology | topology | topologysummary | atopology character varying | text - postgis_topology | topology | totopogeom | ageom geometry, atopology character varying, alayer integer, atolerance double precision | topogeometry - postgis_topology | topology | totopogeom | ageom geometry, tg topogeometry, atolerance double precision | topogeometry - postgis_topology | topology | validatetopology | toponame character varying, bbox geometry | SETOF validatetopology_returntype - postgis_topology | topology | validatetopologyrelation | toponame character varying | TABLE(error text, layer_id integer, topogeo_id integer, element_id integer) - postgres_fdw | public | postgres_fdw_disconnect | text | boolean - postgres_fdw | public | postgres_fdw_disconnect_all | | boolean - postgres_fdw | public | postgres_fdw_get_connections | OUT server_name text, OUT valid boolean | SETOF record - postgres_fdw | public | postgres_fdw_handler | | fdw_handler - postgres_fdw | public | postgres_fdw_validator | text[], oid | void - refint | public | check_foreign_key | | trigger - refint | public | check_primary_key | | trigger - rum | public | rum_anyarray_config | internal | void - rum | public | rum_anyarray_consistent | internal, smallint, anyarray, integer, internal, internal, internal, internal | boolean - rum | public | rum_anyarray_distance | anyarray, anyarray | double precision - rum | public | rum_anyarray_ordering | internal, smallint, anyarray, integer, internal, internal, internal, internal, internal | double precision - rum | public | rum_anyarray_similar | anyarray, anyarray | boolean - rum | public | rum_bit_compare_prefix | bit, bit, smallint, internal | integer - rum | public | rum_bit_extract_query | bit, internal, smallint, internal, internal | internal - rum | public | rum_bit_extract_value | bit, internal | internal - rum | public | rum_btree_consistent | internal, smallint, internal, integer, internal, internal, internal, internal | boolean - rum | public | rum_bytea_compare_prefix | bytea, bytea, smallint, internal | integer - rum | public | rum_bytea_extract_query | bytea, internal, smallint, internal, internal | internal - rum | public | rum_bytea_extract_value | bytea, internal | internal - rum | public | rum_char_compare_prefix | "char", "char", smallint, internal | integer - rum | public | rum_char_extract_query | "char", internal, smallint, internal, internal | internal - rum | public | rum_char_extract_value | "char", internal | internal - rum | public | rum_cidr_compare_prefix | cidr, cidr, smallint, internal | integer - rum | public | rum_cidr_extract_query | cidr, internal, smallint, internal, internal | internal - rum | public | rum_cidr_extract_value | cidr, internal | internal - rum | public | rum_date_compare_prefix | date, date, smallint, internal | integer - rum | public | rum_date_extract_query | date, internal, smallint, internal, internal | internal - rum | public | rum_date_extract_value | date, internal | internal - rum | public | rum_extract_anyarray | anyarray, internal, internal, internal, internal | internal - rum | public | rum_extract_anyarray_query | anyarray, internal, smallint, internal, internal, internal, internal | internal - rum | public | rum_extract_tsquery | tsquery, internal, smallint, internal, internal, internal, internal | internal - rum | public | rum_extract_tsquery_hash | tsquery, internal, smallint, internal, internal, internal, internal | internal - rum | public | rum_extract_tsvector | tsvector, internal, internal, internal, internal | internal - rum | public | rum_extract_tsvector_hash | tsvector, internal, internal, internal, internal | internal - rum | public | rum_float4_compare_prefix | real, real, smallint, internal | integer - rum | public | rum_float4_config | internal | void - rum | public | rum_float4_distance | real, real | double precision - rum | public | rum_float4_extract_query | real, internal, smallint, internal, internal | internal - rum | public | rum_float4_extract_value | real, internal | internal - rum | public | rum_float4_key_distance | real, real, smallint | double precision - rum | public | rum_float4_left_distance | real, real | double precision - rum | public | rum_float4_outer_distance | real, real, smallint | double precision - rum | public | rum_float4_right_distance | real, real | double precision - rum | public | rum_float8_compare_prefix | double precision, double precision, smallint, internal | integer - rum | public | rum_float8_config | internal | void - rum | public | rum_float8_distance | double precision, double precision | double precision - rum | public | rum_float8_extract_query | double precision, internal, smallint, internal, internal | internal - rum | public | rum_float8_extract_value | double precision, internal | internal - rum | public | rum_float8_key_distance | double precision, double precision, smallint | double precision - rum | public | rum_float8_left_distance | double precision, double precision | double precision - rum | public | rum_float8_outer_distance | double precision, double precision, smallint | double precision - rum | public | rum_float8_right_distance | double precision, double precision | double precision - rum | public | rum_inet_compare_prefix | inet, inet, smallint, internal | integer - rum | public | rum_inet_extract_query | inet, internal, smallint, internal, internal | internal - rum | public | rum_inet_extract_value | inet, internal | internal - rum | public | rum_int2_compare_prefix | smallint, smallint, smallint, internal | integer - rum | public | rum_int2_config | internal | void - rum | public | rum_int2_distance | smallint, smallint | double precision - rum | public | rum_int2_extract_query | smallint, internal, smallint, internal, internal | internal - rum | public | rum_int2_extract_value | smallint, internal | internal - rum | public | rum_int2_key_distance | smallint, smallint, smallint | double precision - rum | public | rum_int2_left_distance | smallint, smallint | double precision - rum | public | rum_int2_outer_distance | smallint, smallint, smallint | double precision - rum | public | rum_int2_right_distance | smallint, smallint | double precision - rum | public | rum_int4_compare_prefix | integer, integer, smallint, internal | integer - rum | public | rum_int4_config | internal | void - rum | public | rum_int4_distance | integer, integer | double precision - rum | public | rum_int4_extract_query | integer, internal, smallint, internal, internal | internal - rum | public | rum_int4_extract_value | integer, internal | internal - rum | public | rum_int4_key_distance | integer, integer, smallint | double precision - rum | public | rum_int4_left_distance | integer, integer | double precision - rum | public | rum_int4_outer_distance | integer, integer, smallint | double precision - rum | public | rum_int4_right_distance | integer, integer | double precision - rum | public | rum_int8_compare_prefix | bigint, bigint, smallint, internal | integer - rum | public | rum_int8_config | internal | void - rum | public | rum_int8_distance | bigint, bigint | double precision - rum | public | rum_int8_extract_query | bigint, internal, smallint, internal, internal | internal - rum | public | rum_int8_extract_value | bigint, internal | internal - rum | public | rum_int8_key_distance | bigint, bigint, smallint | double precision - rum | public | rum_int8_left_distance | bigint, bigint | double precision - rum | public | rum_int8_outer_distance | bigint, bigint, smallint | double precision - rum | public | rum_int8_right_distance | bigint, bigint | double precision - rum | public | rum_interval_compare_prefix | interval, interval, smallint, internal | integer - rum | public | rum_interval_extract_query | interval, internal, smallint, internal, internal | internal - rum | public | rum_interval_extract_value | interval, internal | internal - rum | public | rum_macaddr_compare_prefix | macaddr, macaddr, smallint, internal | integer - rum | public | rum_macaddr_extract_query | macaddr, internal, smallint, internal, internal | internal - rum | public | rum_macaddr_extract_value | macaddr, internal | internal - rum | public | rum_money_compare_prefix | money, money, smallint, internal | integer - rum | public | rum_money_config | internal | void - rum | public | rum_money_distance | money, money | double precision - rum | public | rum_money_extract_query | money, internal, smallint, internal, internal | internal - rum | public | rum_money_extract_value | money, internal | internal - rum | public | rum_money_key_distance | money, money, smallint | double precision - rum | public | rum_money_left_distance | money, money | double precision - rum | public | rum_money_outer_distance | money, money, smallint | double precision - rum | public | rum_money_right_distance | money, money | double precision - rum | public | rum_numeric_cmp | numeric, numeric | integer - rum | public | rum_numeric_compare_prefix | numeric, numeric, smallint, internal | integer - rum | public | rum_numeric_extract_query | numeric, internal, smallint, internal, internal | internal - rum | public | rum_numeric_extract_value | numeric, internal | internal - rum | public | rum_oid_compare_prefix | oid, oid, smallint, internal | integer - rum | public | rum_oid_config | internal | void - rum | public | rum_oid_distance | oid, oid | double precision - rum | public | rum_oid_extract_query | oid, internal, smallint, internal, internal | internal - rum | public | rum_oid_extract_value | oid, internal | internal - rum | public | rum_oid_key_distance | oid, oid, smallint | double precision - rum | public | rum_oid_left_distance | oid, oid | double precision - rum | public | rum_oid_outer_distance | oid, oid, smallint | double precision - rum | public | rum_oid_right_distance | oid, oid | double precision - rum | public | rum_text_compare_prefix | text, text, smallint, internal | integer - rum | public | rum_text_extract_query | text, internal, smallint, internal, internal | internal - rum | public | rum_text_extract_value | text, internal | internal - rum | public | rum_time_compare_prefix | time without time zone, time without time zone, smallint, internal | integer - rum | public | rum_time_extract_query | time without time zone, internal, smallint, internal, internal | internal - rum | public | rum_time_extract_value | time without time zone, internal | internal - rum | public | rum_timestamp_compare_prefix | timestamp without time zone, timestamp without time zone, smallint, internal | integer - rum | public | rum_timestamp_config | internal | void - rum | public | rum_timestamp_consistent | internal, smallint, timestamp without time zone, integer, internal, internal, internal, internal | boolean - rum | public | rum_timestamp_distance | timestamp without time zone, timestamp without time zone | double precision - rum | public | rum_timestamp_extract_query | timestamp without time zone, internal, smallint, internal, internal, internal, internal | internal - rum | public | rum_timestamp_extract_value | timestamp without time zone, internal, internal, internal, internal | internal - rum | public | rum_timestamp_key_distance | timestamp without time zone, timestamp without time zone, smallint | double precision - rum | public | rum_timestamp_left_distance | timestamp without time zone, timestamp without time zone | double precision - rum | public | rum_timestamp_outer_distance | timestamp without time zone, timestamp without time zone, smallint | double precision - rum | public | rum_timestamp_right_distance | timestamp without time zone, timestamp without time zone | double precision - rum | public | rum_timestamptz_distance | timestamp with time zone, timestamp with time zone | double precision - rum | public | rum_timestamptz_key_distance | timestamp with time zone, timestamp with time zone, smallint | double precision - rum | public | rum_timestamptz_left_distance | timestamp with time zone, timestamp with time zone | double precision - rum | public | rum_timestamptz_right_distance | timestamp with time zone, timestamp with time zone | double precision - rum | public | rum_timetz_compare_prefix | time with time zone, time with time zone, smallint, internal | integer - rum | public | rum_timetz_extract_query | time with time zone, internal, smallint, internal, internal | internal - rum | public | rum_timetz_extract_value | time with time zone, internal | internal - rum | public | rum_ts_distance | tsvector, rum_distance_query | real - rum | public | rum_ts_distance | tsvector, tsquery | real - rum | public | rum_ts_distance | tsvector, tsquery, integer | real - rum | public | rum_ts_join_pos | internal, internal | bytea - rum | public | rum_ts_score | tsvector, rum_distance_query | real - rum | public | rum_ts_score | tsvector, tsquery | real - rum | public | rum_ts_score | tsvector, tsquery, integer | real - rum | public | rum_tsquery_addon_consistent | internal, smallint, tsvector, integer, internal, internal, internal, internal | boolean - rum | public | rum_tsquery_consistent | internal, smallint, tsvector, integer, internal, internal, internal, internal | boolean - rum | public | rum_tsquery_distance | internal, smallint, tsvector, integer, internal, internal, internal, internal, internal | double precision - rum | public | rum_tsquery_pre_consistent | internal, smallint, tsvector, integer, internal, internal, internal, internal | boolean - rum | public | rum_tsvector_config | internal | void - rum | public | rum_varbit_compare_prefix | bit varying, bit varying, smallint, internal | integer - rum | public | rum_varbit_extract_query | bit varying, internal, smallint, internal, internal | internal - rum | public | rum_varbit_extract_value | bit varying, internal | internal - rum | public | rumhandler | internal | index_am_handler - rum | public | ruminv_extract_tsquery | tsquery, internal, internal, internal, internal | internal - rum | public | ruminv_extract_tsvector | tsvector, internal, smallint, internal, internal, internal, internal | internal - rum | public | ruminv_tsquery_config | internal | void - rum | public | ruminv_tsvector_consistent | internal, smallint, tsvector, integer, internal, internal, internal, internal | boolean - rum | public | tsquery_to_distance_query | tsquery | rum_distance_query - seg | public | gseg_consistent | internal, seg, smallint, oid, internal | boolean - seg | public | gseg_penalty | internal, internal, internal | internal - seg | public | gseg_picksplit | internal, internal | internal - seg | public | gseg_same | seg, seg, internal | internal - seg | public | gseg_union | internal, internal | seg - seg | public | seg_center | seg | real - seg | public | seg_cmp | seg, seg | integer - seg | public | seg_contained | seg, seg | boolean - seg | public | seg_contains | seg, seg | boolean - seg | public | seg_different | seg, seg | boolean - seg | public | seg_ge | seg, seg | boolean - seg | public | seg_gt | seg, seg | boolean - seg | public | seg_in | cstring | seg - seg | public | seg_inter | seg, seg | seg - seg | public | seg_le | seg, seg | boolean - seg | public | seg_left | seg, seg | boolean - seg | public | seg_lower | seg | real - seg | public | seg_lt | seg, seg | boolean - seg | public | seg_out | seg | cstring - seg | public | seg_over_left | seg, seg | boolean - seg | public | seg_over_right | seg, seg | boolean - seg | public | seg_overlap | seg, seg | boolean - seg | public | seg_right | seg, seg | boolean - seg | public | seg_same | seg, seg | boolean - seg | public | seg_size | seg | real - seg | public | seg_union | seg, seg | seg - seg | public | seg_upper | seg | real - sslinfo | public | ssl_cipher | | text - sslinfo | public | ssl_client_cert_present | | boolean - sslinfo | public | ssl_client_dn | | text - sslinfo | public | ssl_client_dn_field | text | text - sslinfo | public | ssl_client_serial | | numeric - sslinfo | public | ssl_extension_info | OUT name text, OUT value text, OUT critical boolean | SETOF record - sslinfo | public | ssl_is_used | | boolean - sslinfo | public | ssl_issuer_dn | | text - sslinfo | public | ssl_issuer_field | text | text - sslinfo | public | ssl_version | | text - tealbase_vault | vault | create_secret | new_secret text, new_name text, new_description text, new_key_id uuid | uuid - tealbase_vault | vault | update_secret | secret_id uuid, new_secret text, new_name text, new_description text, new_key_id uuid | void - tablefunc | public | connectby | text, text, text, text, integer | SETOF record - tablefunc | public | connectby | text, text, text, text, integer, text | SETOF record - tablefunc | public | connectby | text, text, text, text, text, integer | SETOF record - tablefunc | public | connectby | text, text, text, text, text, integer, text | SETOF record - tablefunc | public | crosstab | text | SETOF record - tablefunc | public | crosstab | text, integer | SETOF record - tablefunc | public | crosstab | text, text | SETOF record - tablefunc | public | crosstab2 | text | SETOF tablefunc_crosstab_2 - tablefunc | public | crosstab3 | text | SETOF tablefunc_crosstab_3 - tablefunc | public | crosstab4 | text | SETOF tablefunc_crosstab_4 - tablefunc | public | normal_rand | integer, double precision, double precision | SETOF double precision - tcn | public | triggered_change_notification | | trigger - timescaledb | _timescaledb_debug | extension_state | | text - timescaledb | _timescaledb_functions | alter_job_set_hypertable_id | job_id integer, hypertable regclass | integer - timescaledb | _timescaledb_functions | attach_osm_table_chunk | hypertable regclass, chunk regclass | boolean - timescaledb | _timescaledb_functions | bookend_deserializefunc | bytea, internal | internal - timescaledb | _timescaledb_functions | bookend_finalfunc | internal, anyelement, "any" | anyelement - timescaledb | _timescaledb_functions | bookend_serializefunc | internal | bytea - timescaledb | _timescaledb_functions | cagg_get_bucket_function_info | mat_hypertable_id integer, OUT bucket_func regprocedure, OUT bucket_width text, OUT bucket_origin text, OUT bucket_offset text, OUT bucket_timezone text, OUT bucket_fixed_width boolean | record - timescaledb | _timescaledb_functions | cagg_migrate_create_plan | IN _cagg_data _timescaledb_catalog.continuous_agg, IN _cagg_name_new text, IN _override boolean, IN _drop_old boolean | - timescaledb | _timescaledb_functions | cagg_migrate_execute_copy_data | IN _cagg_data _timescaledb_catalog.continuous_agg, IN _plan_step _timescaledb_catalog.continuous_agg_migrate_plan_step | - timescaledb | _timescaledb_functions | cagg_migrate_execute_copy_policies | IN _cagg_data _timescaledb_catalog.continuous_agg, IN _plan_step _timescaledb_catalog.continuous_agg_migrate_plan_step | - timescaledb | _timescaledb_functions | cagg_migrate_execute_create_new_cagg | IN _cagg_data _timescaledb_catalog.continuous_agg, IN _plan_step _timescaledb_catalog.continuous_agg_migrate_plan_step | - timescaledb | _timescaledb_functions | cagg_migrate_execute_disable_policies | IN _cagg_data _timescaledb_catalog.continuous_agg, IN _plan_step _timescaledb_catalog.continuous_agg_migrate_plan_step | - timescaledb | _timescaledb_functions | cagg_migrate_execute_drop_old_cagg | IN _cagg_data _timescaledb_catalog.continuous_agg, IN _plan_step _timescaledb_catalog.continuous_agg_migrate_plan_step | - timescaledb | _timescaledb_functions | cagg_migrate_execute_enable_policies | IN _cagg_data _timescaledb_catalog.continuous_agg, IN _plan_step _timescaledb_catalog.continuous_agg_migrate_plan_step | - timescaledb | _timescaledb_functions | cagg_migrate_execute_override_cagg | IN _cagg_data _timescaledb_catalog.continuous_agg, IN _plan_step _timescaledb_catalog.continuous_agg_migrate_plan_step | - timescaledb | _timescaledb_functions | cagg_migrate_execute_plan | IN _cagg_data _timescaledb_catalog.continuous_agg | - timescaledb | _timescaledb_functions | cagg_migrate_execute_refresh_new_cagg | IN _cagg_data _timescaledb_catalog.continuous_agg, IN _plan_step _timescaledb_catalog.continuous_agg_migrate_plan_step | - timescaledb | _timescaledb_functions | cagg_migrate_plan_exists | _hypertable_id integer | boolean - timescaledb | _timescaledb_functions | cagg_migrate_pre_validation | _cagg_schema text, _cagg_name text, _cagg_name_new text | _timescaledb_catalog.continuous_agg - timescaledb | _timescaledb_functions | cagg_migrate_to_time_bucket | IN cagg regclass | - timescaledb | _timescaledb_functions | cagg_validate_query | query text, OUT is_valid boolean, OUT error_level text, OUT error_code text, OUT error_message text, OUT error_detail text, OUT error_hint text | record - timescaledb | _timescaledb_functions | cagg_watermark | hypertable_id integer | bigint - timescaledb | _timescaledb_functions | cagg_watermark_materialized | hypertable_id integer | bigint - timescaledb | _timescaledb_functions | calculate_chunk_interval | dimension_id integer, dimension_coord bigint, chunk_target_size bigint | bigint - timescaledb | _timescaledb_functions | chunk_constraint_add_table_constraint | chunk_constraint_row _timescaledb_catalog.chunk_constraint | void - timescaledb | _timescaledb_functions | chunk_id_from_relid | relid oid | integer - timescaledb | _timescaledb_functions | chunk_index_clone | chunk_index_oid oid | oid - timescaledb | _timescaledb_functions | chunk_index_replace | chunk_index_oid_old oid, chunk_index_oid_new oid | void - timescaledb | _timescaledb_functions | chunk_status | regclass | integer - timescaledb | _timescaledb_functions | chunks_local_size | schema_name_in name, table_name_in name | TABLE(chunk_id integer, chunk_schema name, chunk_name name, table_bytes bigint, index_bytes bigint, toast_bytes bigint, total_bytes bigint) - timescaledb | _timescaledb_functions | compressed_chunk_local_stats | schema_name_in name, table_name_in name | TABLE(chunk_schema name, chunk_name name, compression_status text, before_compression_table_bytes bigint, before_compression_index_bytes bigint, before_compression_toast_bytes bigint, before_compression_total_bytes bigint, after_compression_table_bytes bigint, after_compression_index_bytes bigint, after_compression_toast_bytes bigint, after_compression_total_bytes bigint) - timescaledb | _timescaledb_functions | compressed_data_in | cstring | _timescaledb_internal.compressed_data - timescaledb | _timescaledb_functions | compressed_data_out | _timescaledb_internal.compressed_data | cstring - timescaledb | _timescaledb_functions | compressed_data_recv | internal | _timescaledb_internal.compressed_data - timescaledb | _timescaledb_functions | compressed_data_send | _timescaledb_internal.compressed_data | bytea - timescaledb | _timescaledb_functions | constraint_clone | constraint_oid oid, target_oid regclass | void - timescaledb | _timescaledb_functions | continuous_agg_invalidation_trigger | | trigger - timescaledb | _timescaledb_functions | create_chunk | hypertable regclass, slices jsonb, schema_name name, table_name name, chunk_table regclass | TABLE(chunk_id integer, hypertable_id integer, schema_name name, table_name name, relkind "char", slices jsonb, created boolean) - timescaledb | _timescaledb_functions | create_chunk_table | hypertable regclass, slices jsonb, schema_name name, table_name name | boolean - timescaledb | _timescaledb_functions | create_compressed_chunk | chunk regclass, chunk_table regclass, uncompressed_heap_size bigint, uncompressed_toast_size bigint, uncompressed_index_size bigint, compressed_heap_size bigint, compressed_toast_size bigint, compressed_index_size bigint, numrows_pre_compression bigint, numrows_post_compression bigint | regclass - timescaledb | _timescaledb_functions | dimension_info_in | cstring | _timescaledb_internal.dimension_info - timescaledb | _timescaledb_functions | dimension_info_out | _timescaledb_internal.dimension_info | cstring - timescaledb | _timescaledb_functions | drop_chunk | chunk regclass | boolean - timescaledb | _timescaledb_functions | finalize_agg | agg_name text, inner_agg_collation_schema name, inner_agg_collation_name name, inner_agg_input_types name[], inner_agg_serialized_state bytea, return_type_dummy_val anyelement | anyelement - timescaledb | _timescaledb_functions | finalize_agg_ffunc | tstate internal, aggfn text, inner_agg_collation_schema name, inner_agg_collation_name name, inner_agg_input_types name[], inner_agg_serialized_state bytea, return_type_dummy_val anyelement | anyelement - timescaledb | _timescaledb_functions | finalize_agg_sfunc | tstate internal, aggfn text, inner_agg_collation_schema name, inner_agg_collation_name name, inner_agg_input_types name[], inner_agg_serialized_state bytea, return_type_dummy_val anyelement | internal - timescaledb | _timescaledb_functions | first_combinefunc | internal, internal | internal - timescaledb | _timescaledb_functions | first_sfunc | internal, anyelement, "any" | internal - timescaledb | _timescaledb_functions | freeze_chunk | chunk regclass | boolean - timescaledb | _timescaledb_functions | generate_uuid | | uuid - timescaledb | _timescaledb_functions | get_approx_row_count | relation regclass | bigint - timescaledb | _timescaledb_functions | get_compressed_chunk_index_for_recompression | uncompressed_chunk regclass | regclass - timescaledb | _timescaledb_functions | get_create_command | table_name name | text - timescaledb | _timescaledb_functions | get_git_commit | | TABLE(commit_tag text, commit_hash text, commit_time timestamp with time zone) - timescaledb | _timescaledb_functions | get_orderby_defaults | relation regclass, segment_by_cols text[] | jsonb - timescaledb | _timescaledb_functions | get_os_info | | TABLE(sysname text, version text, release text, version_pretty text) - timescaledb | _timescaledb_functions | get_partition_for_key | val anyelement | integer - timescaledb | _timescaledb_functions | get_partition_hash | val anyelement | integer - timescaledb | _timescaledb_functions | get_segmentby_defaults | relation regclass | jsonb - timescaledb | _timescaledb_functions | hist_combinefunc | state1 internal, state2 internal | internal - timescaledb | _timescaledb_functions | hist_deserializefunc | bytea, internal | internal - timescaledb | _timescaledb_functions | hist_finalfunc | state internal, val double precision, min double precision, max double precision, nbuckets integer | integer[] - timescaledb | _timescaledb_functions | hist_serializefunc | internal | bytea - timescaledb | _timescaledb_functions | hist_sfunc | state internal, val double precision, min double precision, max double precision, nbuckets integer | internal - timescaledb | _timescaledb_functions | hypertable_local_size | schema_name_in name, table_name_in name | TABLE(table_bytes bigint, index_bytes bigint, toast_bytes bigint, total_bytes bigint) - timescaledb | _timescaledb_functions | hypertable_osm_range_update | hypertable regclass, range_start anyelement, range_end anyelement, empty boolean | boolean - timescaledb | _timescaledb_functions | indexes_local_size | schema_name_in name, index_name_in name | TABLE(hypertable_id integer, total_bytes bigint) - timescaledb | _timescaledb_functions | insert_blocker | | trigger - timescaledb | _timescaledb_functions | interval_to_usec | chunk_interval interval | bigint - timescaledb | _timescaledb_functions | last_combinefunc | internal, internal | internal - timescaledb | _timescaledb_functions | last_sfunc | internal, anyelement, "any" | internal - timescaledb | _timescaledb_functions | makeaclitem | regrole, regrole, text, boolean | aclitem - timescaledb | _timescaledb_functions | metadata_insert_trigger | | trigger - timescaledb | _timescaledb_functions | partialize_agg | arg anyelement | bytea - timescaledb | _timescaledb_functions | policy_compression | IN job_id integer, IN config jsonb | - timescaledb | _timescaledb_functions | policy_compression_check | config jsonb | void - timescaledb | _timescaledb_functions | policy_compression_execute | IN job_id integer, IN htid integer, IN lag anyelement, IN maxchunks integer, IN verbose_log boolean, IN recompress_enabled boolean, IN use_creation_time boolean | - timescaledb | _timescaledb_functions | policy_job_stat_history_retention | job_id integer, config jsonb | integer - timescaledb | _timescaledb_functions | policy_job_stat_history_retention_check | config jsonb | void - timescaledb | _timescaledb_functions | policy_recompression | IN job_id integer, IN config jsonb | - timescaledb | _timescaledb_functions | policy_refresh_continuous_aggregate | IN job_id integer, IN config jsonb | - timescaledb | _timescaledb_functions | policy_refresh_continuous_aggregate_check | config jsonb | void - timescaledb | _timescaledb_functions | policy_reorder | IN job_id integer, IN config jsonb | - timescaledb | _timescaledb_functions | policy_reorder_check | config jsonb | void - timescaledb | _timescaledb_functions | policy_retention | IN job_id integer, IN config jsonb | - timescaledb | _timescaledb_functions | policy_retention_check | config jsonb | void - timescaledb | _timescaledb_functions | process_ddl_event | | event_trigger - timescaledb | _timescaledb_functions | range_value_to_pretty | time_value bigint, column_type regtype | text - timescaledb | _timescaledb_functions | recompress_chunk_segmentwise | uncompressed_chunk regclass, if_compressed boolean | regclass - timescaledb | _timescaledb_functions | relation_approximate_size | relation regclass | TABLE(total_size bigint, heap_size bigint, index_size bigint, toast_size bigint) - timescaledb | _timescaledb_functions | relation_size | relation regclass | TABLE(total_size bigint, heap_size bigint, index_size bigint, toast_size bigint) - timescaledb | _timescaledb_functions | remove_dropped_chunk_metadata | _hypertable_id integer | integer - timescaledb | _timescaledb_functions | repair_relation_acls | | - timescaledb | _timescaledb_functions | restart_background_workers | | boolean - timescaledb | _timescaledb_functions | show_chunk | chunk regclass | TABLE(chunk_id integer, hypertable_id integer, schema_name name, table_name name, relkind "char", slices jsonb) - timescaledb | _timescaledb_functions | start_background_workers | | boolean - timescaledb | _timescaledb_functions | stop_background_workers | | boolean - timescaledb | _timescaledb_functions | subtract_integer_from_now | hypertable_relid regclass, lag bigint | bigint - timescaledb | _timescaledb_functions | time_to_internal | time_val anyelement | bigint - timescaledb | _timescaledb_functions | to_date | unixtime_us bigint | date - timescaledb | _timescaledb_functions | to_interval | unixtime_us bigint | interval - timescaledb | _timescaledb_functions | to_timestamp | unixtime_us bigint | timestamp with time zone - timescaledb | _timescaledb_functions | to_timestamp_without_timezone | unixtime_us bigint | timestamp without time zone - timescaledb | _timescaledb_functions | to_unix_microseconds | ts timestamp with time zone | bigint - timescaledb | _timescaledb_functions | tsl_loaded | | boolean - timescaledb | _timescaledb_functions | unfreeze_chunk | chunk regclass | boolean - timescaledb | _timescaledb_internal | alter_job_set_hypertable_id | job_id integer, hypertable regclass | integer - timescaledb | _timescaledb_internal | attach_osm_table_chunk | hypertable regclass, chunk regclass | boolean - timescaledb | _timescaledb_internal | cagg_migrate_create_plan | IN _cagg_data _timescaledb_catalog.continuous_agg, IN _cagg_name_new text, IN _override boolean, IN _drop_old boolean | - timescaledb | _timescaledb_internal | cagg_migrate_execute_copy_data | IN _cagg_data _timescaledb_catalog.continuous_agg, IN _plan_step _timescaledb_catalog.continuous_agg_migrate_plan_step | - timescaledb | _timescaledb_internal | cagg_migrate_execute_copy_policies | IN _cagg_data _timescaledb_catalog.continuous_agg, IN _plan_step _timescaledb_catalog.continuous_agg_migrate_plan_step | - timescaledb | _timescaledb_internal | cagg_migrate_execute_create_new_cagg | IN _cagg_data _timescaledb_catalog.continuous_agg, IN _plan_step _timescaledb_catalog.continuous_agg_migrate_plan_step | - timescaledb | _timescaledb_internal | cagg_migrate_execute_disable_policies | IN _cagg_data _timescaledb_catalog.continuous_agg, IN _plan_step _timescaledb_catalog.continuous_agg_migrate_plan_step | - timescaledb | _timescaledb_internal | cagg_migrate_execute_drop_old_cagg | IN _cagg_data _timescaledb_catalog.continuous_agg, IN _plan_step _timescaledb_catalog.continuous_agg_migrate_plan_step | - timescaledb | _timescaledb_internal | cagg_migrate_execute_enable_policies | IN _cagg_data _timescaledb_catalog.continuous_agg, IN _plan_step _timescaledb_catalog.continuous_agg_migrate_plan_step | - timescaledb | _timescaledb_internal | cagg_migrate_execute_override_cagg | IN _cagg_data _timescaledb_catalog.continuous_agg, IN _plan_step _timescaledb_catalog.continuous_agg_migrate_plan_step | - timescaledb | _timescaledb_internal | cagg_migrate_execute_plan | IN _cagg_data _timescaledb_catalog.continuous_agg | - timescaledb | _timescaledb_internal | cagg_migrate_execute_refresh_new_cagg | IN _cagg_data _timescaledb_catalog.continuous_agg, IN _plan_step _timescaledb_catalog.continuous_agg_migrate_plan_step | - timescaledb | _timescaledb_internal | cagg_migrate_plan_exists | _hypertable_id integer | boolean - timescaledb | _timescaledb_internal | cagg_migrate_pre_validation | _cagg_schema text, _cagg_name text, _cagg_name_new text | _timescaledb_catalog.continuous_agg - timescaledb | _timescaledb_internal | cagg_watermark | hypertable_id integer | bigint - timescaledb | _timescaledb_internal | cagg_watermark_materialized | hypertable_id integer | bigint - timescaledb | _timescaledb_internal | calculate_chunk_interval | dimension_id integer, dimension_coord bigint, chunk_target_size bigint | bigint - timescaledb | _timescaledb_internal | chunk_constraint_add_table_constraint | chunk_constraint_row _timescaledb_catalog.chunk_constraint | void - timescaledb | _timescaledb_internal | chunk_id_from_relid | relid oid | integer - timescaledb | _timescaledb_internal | chunk_index_clone | chunk_index_oid oid | oid - timescaledb | _timescaledb_internal | chunk_index_replace | chunk_index_oid_old oid, chunk_index_oid_new oid | void - timescaledb | _timescaledb_internal | chunk_status | regclass | integer - timescaledb | _timescaledb_internal | chunks_local_size | schema_name_in name, table_name_in name | TABLE(chunk_id integer, chunk_schema name, chunk_name name, table_bytes bigint, index_bytes bigint, toast_bytes bigint, total_bytes bigint) - timescaledb | _timescaledb_internal | compressed_chunk_local_stats | schema_name_in name, table_name_in name | TABLE(chunk_schema name, chunk_name name, compression_status text, before_compression_table_bytes bigint, before_compression_index_bytes bigint, before_compression_toast_bytes bigint, before_compression_total_bytes bigint, after_compression_table_bytes bigint, after_compression_index_bytes bigint, after_compression_toast_bytes bigint, after_compression_total_bytes bigint) - timescaledb | _timescaledb_internal | compressed_chunk_remote_stats | schema_name_in name, table_name_in name | TABLE(chunk_schema name, chunk_name name, compression_status text, before_compression_table_bytes bigint, before_compression_index_bytes bigint, before_compression_toast_bytes bigint, before_compression_total_bytes bigint, after_compression_table_bytes bigint, after_compression_index_bytes bigint, after_compression_toast_bytes bigint, after_compression_total_bytes bigint, node_name name) - timescaledb | _timescaledb_internal | continuous_agg_invalidation_trigger | | trigger - timescaledb | _timescaledb_internal | create_chunk | hypertable regclass, _slices jsonb, _schema_name name, _table_name name, chunk_table regclass | TABLE(chunk_id integer, hypertable_id integer, schema_name name, table_name name, relkind "char", slices jsonb, created boolean) - timescaledb | _timescaledb_internal | create_chunk_table | hypertable regclass, slices jsonb, schema_name name, table_name name | boolean - timescaledb | _timescaledb_internal | create_compressed_chunk | chunk regclass, chunk_table regclass, uncompressed_heap_size bigint, uncompressed_toast_size bigint, uncompressed_index_size bigint, compressed_heap_size bigint, compressed_toast_size bigint, compressed_index_size bigint, numrows_pre_compression bigint, numrows_post_compression bigint | regclass - timescaledb | _timescaledb_internal | drop_chunk | chunk regclass | boolean - timescaledb | _timescaledb_internal | finalize_agg | agg_name text, inner_agg_collation_schema name, inner_agg_collation_name name, inner_agg_input_types name[], inner_agg_serialized_state bytea, return_type_dummy_val anyelement | anyelement - timescaledb | _timescaledb_internal | finalize_agg_ffunc | tstate internal, aggfn text, inner_agg_collation_schema name, inner_agg_collation_name name, inner_agg_input_types name[], inner_agg_serialized_state bytea, return_type_dummy_val anyelement | anyelement - timescaledb | _timescaledb_internal | finalize_agg_sfunc | tstate internal, aggfn text, inner_agg_collation_schema name, inner_agg_collation_name name, inner_agg_input_types name[], inner_agg_serialized_state bytea, return_type_dummy_val anyelement | internal - timescaledb | _timescaledb_internal | freeze_chunk | chunk regclass | boolean - timescaledb | _timescaledb_internal | generate_uuid | | uuid - timescaledb | _timescaledb_internal | get_approx_row_count | relation regclass | bigint - timescaledb | _timescaledb_internal | get_compressed_chunk_index_for_recompression | uncompressed_chunk regclass | regclass - timescaledb | _timescaledb_internal | get_create_command | table_name name | text - timescaledb | _timescaledb_internal | get_git_commit | | TABLE(commit_tag text, commit_hash text, commit_time timestamp with time zone) - timescaledb | _timescaledb_internal | get_os_info | | TABLE(sysname text, version text, release text, version_pretty text) - timescaledb | _timescaledb_internal | get_partition_for_key | val anyelement | integer - timescaledb | _timescaledb_internal | get_partition_hash | val anyelement | integer - timescaledb | _timescaledb_internal | hypertable_local_size | schema_name_in name, table_name_in name | TABLE(table_bytes bigint, index_bytes bigint, toast_bytes bigint, total_bytes bigint) - timescaledb | _timescaledb_internal | indexes_local_size | schema_name_in name, table_name_in name | TABLE(hypertable_id integer, total_bytes bigint) - timescaledb | _timescaledb_internal | insert_blocker | | trigger - timescaledb | _timescaledb_internal | interval_to_usec | chunk_interval interval | bigint - timescaledb | _timescaledb_internal | partialize_agg | arg anyelement | bytea - timescaledb | _timescaledb_internal | policy_compression | IN job_id integer, IN config jsonb | - timescaledb | _timescaledb_internal | policy_compression_check | config jsonb | void - timescaledb | _timescaledb_internal | policy_compression_execute | IN job_id integer, IN htid integer, IN lag anyelement, IN maxchunks integer, IN verbose_log boolean, IN recompress_enabled boolean, IN use_creation_time boolean | - timescaledb | _timescaledb_internal | policy_job_stat_history_retention | job_id integer, config jsonb | integer - timescaledb | _timescaledb_internal | policy_job_stat_history_retention_check | config jsonb | void - timescaledb | _timescaledb_internal | policy_recompression | IN job_id integer, IN config jsonb | - timescaledb | _timescaledb_internal | policy_refresh_continuous_aggregate | IN job_id integer, IN config jsonb | - timescaledb | _timescaledb_internal | policy_refresh_continuous_aggregate_check | config jsonb | void - timescaledb | _timescaledb_internal | policy_reorder | IN job_id integer, IN config jsonb | - timescaledb | _timescaledb_internal | policy_reorder_check | config jsonb | void - timescaledb | _timescaledb_internal | policy_retention | IN job_id integer, IN config jsonb | - timescaledb | _timescaledb_internal | policy_retention_check | config jsonb | void - timescaledb | _timescaledb_internal | process_ddl_event | | event_trigger - timescaledb | _timescaledb_internal | range_value_to_pretty | time_value bigint, column_type regtype | text - timescaledb | _timescaledb_internal | recompress_chunk_segmentwise | uncompressed_chunk regclass, if_compressed boolean | regclass - timescaledb | _timescaledb_internal | relation_size | relation regclass | TABLE(total_size bigint, heap_size bigint, index_size bigint, toast_size bigint) - timescaledb | _timescaledb_internal | restart_background_workers | | boolean - timescaledb | _timescaledb_internal | show_chunk | chunk regclass | TABLE(chunk_id integer, hypertable_id integer, schema_name name, table_name name, relkind "char", slices jsonb) - timescaledb | _timescaledb_internal | start_background_workers | | boolean - timescaledb | _timescaledb_internal | stop_background_workers | | boolean - timescaledb | _timescaledb_internal | subtract_integer_from_now | hypertable_relid regclass, lag bigint | bigint - timescaledb | _timescaledb_internal | time_to_internal | time_val anyelement | bigint - timescaledb | _timescaledb_internal | to_date | unixtime_us bigint | date - timescaledb | _timescaledb_internal | to_interval | unixtime_us bigint | interval - timescaledb | _timescaledb_internal | to_timestamp | unixtime_us bigint | timestamp with time zone - timescaledb | _timescaledb_internal | to_timestamp_without_timezone | unixtime_us bigint | timestamp without time zone - timescaledb | _timescaledb_internal | to_unix_microseconds | ts timestamp with time zone | bigint - timescaledb | _timescaledb_internal | tsl_loaded | | boolean - timescaledb | _timescaledb_internal | unfreeze_chunk | chunk regclass | boolean - timescaledb | public | add_compression_policy | hypertable regclass, compress_after "any", if_not_exists boolean, schedule_interval interval, initial_start timestamp with time zone, timezone text, compress_created_before interval | integer - timescaledb | public | add_continuous_aggregate_policy | continuous_aggregate regclass, start_offset "any", end_offset "any", schedule_interval interval, if_not_exists boolean, initial_start timestamp with time zone, timezone text | integer - timescaledb | public | add_dimension | hypertable regclass, column_name name, number_partitions integer, chunk_time_interval anyelement, partitioning_func regproc, if_not_exists boolean | TABLE(dimension_id integer, schema_name name, table_name name, column_name name, created boolean) - timescaledb | public | add_dimension | hypertable regclass, dimension _timescaledb_internal.dimension_info, if_not_exists boolean | TABLE(dimension_id integer, created boolean) - timescaledb | public | add_job | proc regproc, schedule_interval interval, config jsonb, initial_start timestamp with time zone, scheduled boolean, check_config regproc, fixed_schedule boolean, timezone text | integer - timescaledb | public | add_reorder_policy | hypertable regclass, index_name name, if_not_exists boolean, initial_start timestamp with time zone, timezone text | integer - timescaledb | public | add_retention_policy | relation regclass, drop_after "any", if_not_exists boolean, schedule_interval interval, initial_start timestamp with time zone, timezone text, drop_created_before interval | integer - timescaledb | public | alter_job | job_id integer, schedule_interval interval, max_runtime interval, max_retries integer, retry_period interval, scheduled boolean, config jsonb, next_start timestamp with time zone, if_exists boolean, check_config regproc, fixed_schedule boolean, initial_start timestamp with time zone, timezone text | TABLE(job_id integer, schedule_interval interval, max_runtime interval, max_retries integer, retry_period interval, scheduled boolean, config jsonb, next_start timestamp with time zone, check_config text, fixed_schedule boolean, initial_start timestamp with time zone, timezone text) - timescaledb | public | approximate_row_count | relation regclass | bigint - timescaledb | public | attach_tablespace | tablespace name, hypertable regclass, if_not_attached boolean | void - timescaledb | public | by_hash | column_name name, number_partitions integer, partition_func regproc | _timescaledb_internal.dimension_info - timescaledb | public | by_range | column_name name, partition_interval anyelement, partition_func regproc | _timescaledb_internal.dimension_info - timescaledb | public | cagg_migrate | IN cagg regclass, IN override boolean, IN drop_old boolean | - timescaledb | public | chunk_compression_stats | hypertable regclass | TABLE(chunk_schema name, chunk_name name, compression_status text, before_compression_table_bytes bigint, before_compression_index_bytes bigint, before_compression_toast_bytes bigint, before_compression_total_bytes bigint, after_compression_table_bytes bigint, after_compression_index_bytes bigint, after_compression_toast_bytes bigint, after_compression_total_bytes bigint, node_name name) - timescaledb | public | chunks_detailed_size | hypertable regclass | TABLE(chunk_schema name, chunk_name name, table_bytes bigint, index_bytes bigint, toast_bytes bigint, total_bytes bigint, node_name name) - timescaledb | public | compress_chunk | uncompressed_chunk regclass, if_not_compressed boolean, recompress boolean | regclass - timescaledb | public | create_hypertable | relation regclass, dimension _timescaledb_internal.dimension_info, create_default_indexes boolean, if_not_exists boolean, migrate_data boolean | TABLE(hypertable_id integer, created boolean) - timescaledb | public | create_hypertable | relation regclass, time_column_name name, partitioning_column name, number_partitions integer, associated_schema_name name, associated_table_prefix name, chunk_time_interval anyelement, create_default_indexes boolean, if_not_exists boolean, partitioning_func regproc, migrate_data boolean, chunk_target_size text, chunk_sizing_func regproc, time_partitioning_func regproc | TABLE(hypertable_id integer, schema_name name, table_name name, created boolean) - timescaledb | public | decompress_chunk | uncompressed_chunk regclass, if_compressed boolean | regclass - timescaledb | public | delete_job | job_id integer | void - timescaledb | public | detach_tablespace | tablespace name, hypertable regclass, if_attached boolean | integer - timescaledb | public | detach_tablespaces | hypertable regclass | integer - timescaledb | public | disable_chunk_skipping | hypertable regclass, column_name name, if_not_exists boolean | TABLE(hypertable_id integer, column_name name, disabled boolean) - timescaledb | public | drop_chunks | relation regclass, older_than "any", newer_than "any", "verbose" boolean, created_before "any", created_after "any" | SETOF text - timescaledb | public | enable_chunk_skipping | hypertable regclass, column_name name, if_not_exists boolean | TABLE(column_stats_id integer, enabled boolean) - timescaledb | public | first | anyelement, "any" | anyelement - timescaledb | public | get_telemetry_report | | jsonb - timescaledb | public | histogram | double precision, double precision, double precision, integer | integer[] - timescaledb | public | hypertable_approximate_detailed_size | relation regclass | TABLE(table_bytes bigint, index_bytes bigint, toast_bytes bigint, total_bytes bigint) - timescaledb | public | hypertable_approximate_size | hypertable regclass | bigint - timescaledb | public | hypertable_compression_stats | hypertable regclass | TABLE(total_chunks bigint, number_compressed_chunks bigint, before_compression_table_bytes bigint, before_compression_index_bytes bigint, before_compression_toast_bytes bigint, before_compression_total_bytes bigint, after_compression_table_bytes bigint, after_compression_index_bytes bigint, after_compression_toast_bytes bigint, after_compression_total_bytes bigint, node_name name) - timescaledb | public | hypertable_detailed_size | hypertable regclass | TABLE(table_bytes bigint, index_bytes bigint, toast_bytes bigint, total_bytes bigint, node_name name) - timescaledb | public | hypertable_index_size | index_name regclass | bigint - timescaledb | public | hypertable_size | hypertable regclass | bigint - timescaledb | public | interpolate | value bigint, prev record, next record | bigint - timescaledb | public | interpolate | value double precision, prev record, next record | double precision - timescaledb | public | interpolate | value integer, prev record, next record | integer - timescaledb | public | interpolate | value real, prev record, next record | real - timescaledb | public | interpolate | value smallint, prev record, next record | smallint - timescaledb | public | last | anyelement, "any" | anyelement - timescaledb | public | locf | value anyelement, prev anyelement, treat_null_as_missing boolean | anyelement - timescaledb | public | move_chunk | chunk regclass, destination_tablespace name, index_destination_tablespace name, reorder_index regclass, "verbose" boolean | void - timescaledb | public | recompress_chunk | IN chunk regclass, IN if_not_compressed boolean | - timescaledb | public | refresh_continuous_aggregate | IN continuous_aggregate regclass, IN window_start "any", IN window_end "any" | - timescaledb | public | remove_compression_policy | hypertable regclass, if_exists boolean | boolean - timescaledb | public | remove_continuous_aggregate_policy | continuous_aggregate regclass, if_not_exists boolean, if_exists boolean | void - timescaledb | public | remove_reorder_policy | hypertable regclass, if_exists boolean | void - timescaledb | public | remove_retention_policy | relation regclass, if_exists boolean | void - timescaledb | public | reorder_chunk | chunk regclass, index regclass, "verbose" boolean | void - timescaledb | public | run_job | IN job_id integer | - timescaledb | public | set_adaptive_chunking | hypertable regclass, chunk_target_size text, INOUT chunk_sizing_func regproc, OUT chunk_target_size bigint | record - timescaledb | public | set_chunk_time_interval | hypertable regclass, chunk_time_interval anyelement, dimension_name name | void - timescaledb | public | set_integer_now_func | hypertable regclass, integer_now_func regproc, replace_if_exists boolean | void - timescaledb | public | set_number_partitions | hypertable regclass, number_partitions integer, dimension_name name | void - timescaledb | public | set_partitioning_interval | hypertable regclass, partition_interval anyelement, dimension_name name | void - timescaledb | public | show_chunks | relation regclass, older_than "any", newer_than "any", created_before "any", created_after "any" | SETOF regclass - timescaledb | public | show_tablespaces | hypertable regclass | SETOF name - timescaledb | public | time_bucket | bucket_width bigint, ts bigint | bigint - timescaledb | public | time_bucket | bucket_width bigint, ts bigint, "offset" bigint | bigint - timescaledb | public | time_bucket | bucket_width integer, ts integer | integer - timescaledb | public | time_bucket | bucket_width integer, ts integer, "offset" integer | integer - timescaledb | public | time_bucket | bucket_width interval, ts date | date - timescaledb | public | time_bucket | bucket_width interval, ts date, "offset" interval | date - timescaledb | public | time_bucket | bucket_width interval, ts date, origin date | date - timescaledb | public | time_bucket | bucket_width interval, ts timestamp with time zone | timestamp with time zone - timescaledb | public | time_bucket | bucket_width interval, ts timestamp with time zone, "offset" interval | timestamp with time zone - timescaledb | public | time_bucket | bucket_width interval, ts timestamp with time zone, origin timestamp with time zone | timestamp with time zone - timescaledb | public | time_bucket | bucket_width interval, ts timestamp with time zone, timezone text, origin timestamp with time zone, "offset" interval | timestamp with time zone - timescaledb | public | time_bucket | bucket_width interval, ts timestamp without time zone | timestamp without time zone - timescaledb | public | time_bucket | bucket_width interval, ts timestamp without time zone, "offset" interval | timestamp without time zone - timescaledb | public | time_bucket | bucket_width interval, ts timestamp without time zone, origin timestamp without time zone | timestamp without time zone - timescaledb | public | time_bucket | bucket_width smallint, ts smallint | smallint - timescaledb | public | time_bucket | bucket_width smallint, ts smallint, "offset" smallint | smallint - timescaledb | public | time_bucket_gapfill | bucket_width bigint, ts bigint, start bigint, finish bigint | bigint - timescaledb | public | time_bucket_gapfill | bucket_width integer, ts integer, start integer, finish integer | integer - timescaledb | public | time_bucket_gapfill | bucket_width interval, ts date, start date, finish date | date - timescaledb | public | time_bucket_gapfill | bucket_width interval, ts timestamp with time zone, start timestamp with time zone, finish timestamp with time zone | timestamp with time zone - timescaledb | public | time_bucket_gapfill | bucket_width interval, ts timestamp with time zone, timezone text, start timestamp with time zone, finish timestamp with time zone | timestamp with time zone - timescaledb | public | time_bucket_gapfill | bucket_width interval, ts timestamp without time zone, start timestamp without time zone, finish timestamp without time zone | timestamp without time zone - timescaledb | public | time_bucket_gapfill | bucket_width smallint, ts smallint, start smallint, finish smallint | smallint - timescaledb | public | timescaledb_post_restore | | boolean - timescaledb | public | timescaledb_pre_restore | | boolean - timescaledb | timescaledb_experimental | add_policies | relation regclass, if_not_exists boolean, refresh_start_offset "any", refresh_end_offset "any", compress_after "any", drop_after "any" | boolean - timescaledb | timescaledb_experimental | alter_policies | relation regclass, if_exists boolean, refresh_start_offset "any", refresh_end_offset "any", compress_after "any", drop_after "any" | boolean - timescaledb | timescaledb_experimental | remove_all_policies | relation regclass, if_exists boolean | boolean - timescaledb | timescaledb_experimental | remove_policies | relation regclass, if_exists boolean, VARIADIC policy_names text[] | boolean - timescaledb | timescaledb_experimental | show_policies | relation regclass | SETOF jsonb - timescaledb | timescaledb_experimental | time_bucket_ng | bucket_width interval, ts date | date - timescaledb | timescaledb_experimental | time_bucket_ng | bucket_width interval, ts date, origin date | date - timescaledb | timescaledb_experimental | time_bucket_ng | bucket_width interval, ts timestamp with time zone | timestamp with time zone - timescaledb | timescaledb_experimental | time_bucket_ng | bucket_width interval, ts timestamp with time zone, origin timestamp with time zone | timestamp with time zone - timescaledb | timescaledb_experimental | time_bucket_ng | bucket_width interval, ts timestamp with time zone, origin timestamp with time zone, timezone text | timestamp with time zone - timescaledb | timescaledb_experimental | time_bucket_ng | bucket_width interval, ts timestamp with time zone, timezone text | timestamp with time zone - timescaledb | timescaledb_experimental | time_bucket_ng | bucket_width interval, ts timestamp without time zone | timestamp without time zone - timescaledb | timescaledb_experimental | time_bucket_ng | bucket_width interval, ts timestamp without time zone, origin timestamp without time zone | timestamp without time zone - tsm_system_rows | public | system_rows | internal | tsm_handler - tsm_system_time | public | system_time | internal | tsm_handler - unaccent | public | unaccent | regdictionary, text | text - unaccent | public | unaccent | text | text - unaccent | public | unaccent_init | internal | internal - unaccent | public | unaccent_lexize | internal, internal, internal, internal | internal - uuid-ossp | public | uuid_generate_v1 | | uuid - uuid-ossp | public | uuid_generate_v1mc | | uuid - uuid-ossp | public | uuid_generate_v3 | namespace uuid, name text | uuid - uuid-ossp | public | uuid_generate_v4 | | uuid - uuid-ossp | public | uuid_generate_v5 | namespace uuid, name text | uuid - uuid-ossp | public | uuid_nil | | uuid - uuid-ossp | public | uuid_ns_dns | | uuid - uuid-ossp | public | uuid_ns_oid | | uuid - uuid-ossp | public | uuid_ns_url | | uuid - uuid-ossp | public | uuid_ns_x500 | | uuid - vector | public | array_to_halfvec | double precision[], integer, boolean | halfvec - vector | public | array_to_halfvec | integer[], integer, boolean | halfvec - vector | public | array_to_halfvec | numeric[], integer, boolean | halfvec - vector | public | array_to_halfvec | real[], integer, boolean | halfvec - vector | public | array_to_sparsevec | double precision[], integer, boolean | sparsevec - vector | public | array_to_sparsevec | integer[], integer, boolean | sparsevec - vector | public | array_to_sparsevec | numeric[], integer, boolean | sparsevec - vector | public | array_to_sparsevec | real[], integer, boolean | sparsevec - vector | public | array_to_vector | double precision[], integer, boolean | vector - vector | public | array_to_vector | integer[], integer, boolean | vector - vector | public | array_to_vector | numeric[], integer, boolean | vector - vector | public | array_to_vector | real[], integer, boolean | vector - vector | public | avg | halfvec | halfvec - vector | public | avg | vector | vector - vector | public | binary_quantize | halfvec | bit - vector | public | binary_quantize | vector | bit - vector | public | cosine_distance | halfvec, halfvec | double precision - vector | public | cosine_distance | sparsevec, sparsevec | double precision - vector | public | cosine_distance | vector, vector | double precision - vector | public | halfvec | halfvec, integer, boolean | halfvec - vector | public | halfvec_accum | double precision[], halfvec | double precision[] - vector | public | halfvec_add | halfvec, halfvec | halfvec - vector | public | halfvec_avg | double precision[] | halfvec - vector | public | halfvec_cmp | halfvec, halfvec | integer - vector | public | halfvec_combine | double precision[], double precision[] | double precision[] - vector | public | halfvec_concat | halfvec, halfvec | halfvec - vector | public | halfvec_eq | halfvec, halfvec | boolean - vector | public | halfvec_ge | halfvec, halfvec | boolean - vector | public | halfvec_gt | halfvec, halfvec | boolean - vector | public | halfvec_in | cstring, oid, integer | halfvec - vector | public | halfvec_l2_squared_distance | halfvec, halfvec | double precision - vector | public | halfvec_le | halfvec, halfvec | boolean - vector | public | halfvec_lt | halfvec, halfvec | boolean - vector | public | halfvec_mul | halfvec, halfvec | halfvec - vector | public | halfvec_ne | halfvec, halfvec | boolean - vector | public | halfvec_negative_inner_product | halfvec, halfvec | double precision - vector | public | halfvec_out | halfvec | cstring - vector | public | halfvec_recv | internal, oid, integer | halfvec - vector | public | halfvec_send | halfvec | bytea - vector | public | halfvec_spherical_distance | halfvec, halfvec | double precision - vector | public | halfvec_sub | halfvec, halfvec | halfvec - vector | public | halfvec_to_float4 | halfvec, integer, boolean | real[] - vector | public | halfvec_to_sparsevec | halfvec, integer, boolean | sparsevec - vector | public | halfvec_to_vector | halfvec, integer, boolean | vector - vector | public | halfvec_typmod_in | cstring[] | integer - vector | public | hamming_distance | bit, bit | double precision - vector | public | hnsw_bit_support | internal | internal - vector | public | hnsw_halfvec_support | internal | internal - vector | public | hnsw_sparsevec_support | internal | internal - vector | public | hnswhandler | internal | index_am_handler - vector | public | inner_product | halfvec, halfvec | double precision - vector | public | inner_product | sparsevec, sparsevec | double precision - vector | public | inner_product | vector, vector | double precision - vector | public | ivfflat_bit_support | internal | internal - vector | public | ivfflat_halfvec_support | internal | internal - vector | public | ivfflathandler | internal | index_am_handler - vector | public | jaccard_distance | bit, bit | double precision - vector | public | l1_distance | halfvec, halfvec | double precision - vector | public | l1_distance | sparsevec, sparsevec | double precision - vector | public | l1_distance | vector, vector | double precision - vector | public | l2_distance | halfvec, halfvec | double precision - vector | public | l2_distance | sparsevec, sparsevec | double precision - vector | public | l2_distance | vector, vector | double precision - vector | public | l2_norm | halfvec | double precision - vector | public | l2_norm | sparsevec | double precision - vector | public | l2_normalize | halfvec | halfvec - vector | public | l2_normalize | sparsevec | sparsevec - vector | public | l2_normalize | vector | vector - vector | public | sparsevec | sparsevec, integer, boolean | sparsevec - vector | public | sparsevec_cmp | sparsevec, sparsevec | integer - vector | public | sparsevec_eq | sparsevec, sparsevec | boolean - vector | public | sparsevec_ge | sparsevec, sparsevec | boolean - vector | public | sparsevec_gt | sparsevec, sparsevec | boolean - vector | public | sparsevec_in | cstring, oid, integer | sparsevec - vector | public | sparsevec_l2_squared_distance | sparsevec, sparsevec | double precision - vector | public | sparsevec_le | sparsevec, sparsevec | boolean - vector | public | sparsevec_lt | sparsevec, sparsevec | boolean - vector | public | sparsevec_ne | sparsevec, sparsevec | boolean - vector | public | sparsevec_negative_inner_product | sparsevec, sparsevec | double precision - vector | public | sparsevec_out | sparsevec | cstring - vector | public | sparsevec_recv | internal, oid, integer | sparsevec - vector | public | sparsevec_send | sparsevec | bytea - vector | public | sparsevec_to_halfvec | sparsevec, integer, boolean | halfvec - vector | public | sparsevec_to_vector | sparsevec, integer, boolean | vector - vector | public | sparsevec_typmod_in | cstring[] | integer - vector | public | subvector | halfvec, integer, integer | halfvec - vector | public | subvector | vector, integer, integer | vector - vector | public | sum | halfvec | halfvec - vector | public | sum | vector | vector - vector | public | vector | vector, integer, boolean | vector - vector | public | vector_accum | double precision[], vector | double precision[] - vector | public | vector_add | vector, vector | vector - vector | public | vector_avg | double precision[] | vector - vector | public | vector_cmp | vector, vector | integer - vector | public | vector_combine | double precision[], double precision[] | double precision[] - vector | public | vector_concat | vector, vector | vector - vector | public | vector_dims | halfvec | integer - vector | public | vector_dims | vector | integer - vector | public | vector_eq | vector, vector | boolean - vector | public | vector_ge | vector, vector | boolean - vector | public | vector_gt | vector, vector | boolean - vector | public | vector_in | cstring, oid, integer | vector - vector | public | vector_l2_squared_distance | vector, vector | double precision - vector | public | vector_le | vector, vector | boolean - vector | public | vector_lt | vector, vector | boolean - vector | public | vector_mul | vector, vector | vector - vector | public | vector_ne | vector, vector | boolean - vector | public | vector_negative_inner_product | vector, vector | double precision - vector | public | vector_norm | vector | double precision - vector | public | vector_out | vector | cstring - vector | public | vector_recv | internal, oid, integer | vector - vector | public | vector_send | vector | bytea - vector | public | vector_spherical_distance | vector, vector | double precision - vector | public | vector_sub | vector, vector | vector - vector | public | vector_to_float4 | vector, integer, boolean | real[] - vector | public | vector_to_halfvec | vector, integer, boolean | halfvec - vector | public | vector_to_sparsevec | vector, integer, boolean | sparsevec - vector | public | vector_typmod_in | cstring[] | integer - wrappers | public | airtable_fdw_handler | | fdw_handler - wrappers | public | airtable_fdw_meta | | TABLE(name text, version text, author text, website text) - wrappers | public | airtable_fdw_validator | options text[], catalog oid | void - wrappers | public | auth0_fdw_handler | | fdw_handler - wrappers | public | auth0_fdw_meta | | TABLE(name text, version text, author text, website text) - wrappers | public | auth0_fdw_validator | options text[], catalog oid | void - wrappers | public | big_query_fdw_handler | | fdw_handler - wrappers | public | big_query_fdw_meta | | TABLE(name text, version text, author text, website text) - wrappers | public | big_query_fdw_validator | options text[], catalog oid | void - wrappers | public | click_house_fdw_handler | | fdw_handler - wrappers | public | click_house_fdw_meta | | TABLE(name text, version text, author text, website text) - wrappers | public | click_house_fdw_validator | options text[], catalog oid | void - wrappers | public | cognito_fdw_handler | | fdw_handler - wrappers | public | cognito_fdw_meta | | TABLE(name text, version text, author text, website text) - wrappers | public | cognito_fdw_validator | options text[], catalog oid | void - wrappers | public | firebase_fdw_handler | | fdw_handler - wrappers | public | firebase_fdw_meta | | TABLE(name text, version text, author text, website text) - wrappers | public | firebase_fdw_validator | options text[], catalog oid | void - wrappers | public | hello_world_fdw_handler | | fdw_handler - wrappers | public | hello_world_fdw_meta | | TABLE(name text, version text, author text, website text) - wrappers | public | hello_world_fdw_validator | options text[], catalog oid | void - wrappers | public | logflare_fdw_handler | | fdw_handler - wrappers | public | logflare_fdw_meta | | TABLE(name text, version text, author text, website text) - wrappers | public | logflare_fdw_validator | options text[], catalog oid | void - wrappers | public | mssql_fdw_handler | | fdw_handler - wrappers | public | mssql_fdw_meta | | TABLE(name text, version text, author text, website text) - wrappers | public | mssql_fdw_validator | options text[], catalog oid | void - wrappers | public | redis_fdw_handler | | fdw_handler - wrappers | public | redis_fdw_meta | | TABLE(name text, version text, author text, website text) - wrappers | public | redis_fdw_validator | options text[], catalog oid | void - wrappers | public | s3_fdw_handler | | fdw_handler - wrappers | public | s3_fdw_meta | | TABLE(name text, version text, author text, website text) - wrappers | public | s3_fdw_validator | options text[], catalog oid | void - wrappers | public | stripe_fdw_handler | | fdw_handler - wrappers | public | stripe_fdw_meta | | TABLE(name text, version text, author text, website text) - wrappers | public | stripe_fdw_validator | options text[], catalog oid | void - wrappers | public | wasm_fdw_handler | | fdw_handler - wrappers | public | wasm_fdw_meta | | TABLE(name text, version text, author text, website text) - wrappers | public | wasm_fdw_validator | options text[], catalog oid | void - xml2 | public | xml_encode_special_chars | text | text - xml2 | public | xml_valid | text | boolean - xml2 | public | xpath_bool | text, text | boolean - xml2 | public | xpath_list | text, text | text - xml2 | public | xpath_list | text, text, text | text - xml2 | public | xpath_nodeset | text, text | text - xml2 | public | xpath_nodeset | text, text, text | text - xml2 | public | xpath_nodeset | text, text, text, text | text - xml2 | public | xpath_number | text, text | real - xml2 | public | xpath_string | text, text | text - xml2 | public | xpath_table | text, text, text, text, text | SETOF record - xml2 | public | xslt_process | text, text | text - xml2 | public | xslt_process | text, text, text | text -(5037 rows) - -/* - -Monitor extension public table/view/matview/index interface - -*/ -select - e.extname as extension_name, - n.nspname as schema_name, - pc.relname as entity_name, - pa.attname -from - pg_catalog.pg_class pc - join pg_catalog.pg_namespace n - on n.oid = pc.relnamespace - join pg_catalog.pg_depend d - on d.objid = pc.oid - join pg_catalog.pg_extension e - on e.oid = d.refobjid - left join pg_catalog.pg_attribute pa - on pa.attrelid = pc.oid - and pa.attnum > 0 - and not pa.attisdropped -where - d.deptype = 'e' - and pc.relkind in ('r', 'v', 'm', 'i') -order by - e.extname, - pc.relname, - pa.attname; - extension_name | schema_name | entity_name | attname -------------------------------+--------------------------+--------------------------------------------------+----------------------------------- - address_standardizer_data_us | public | us_gaz | id - address_standardizer_data_us | public | us_gaz | is_custom - address_standardizer_data_us | public | us_gaz | seq - address_standardizer_data_us | public | us_gaz | stdword - address_standardizer_data_us | public | us_gaz | token - address_standardizer_data_us | public | us_gaz | word - address_standardizer_data_us | public | us_lex | id - address_standardizer_data_us | public | us_lex | is_custom - address_standardizer_data_us | public | us_lex | seq - address_standardizer_data_us | public | us_lex | stdword - address_standardizer_data_us | public | us_lex | token - address_standardizer_data_us | public | us_lex | word - address_standardizer_data_us | public | us_rules | id - address_standardizer_data_us | public | us_rules | is_custom - address_standardizer_data_us | public | us_rules | rule - hypopg | public | hypopg_hidden_indexes | am_name - hypopg | public | hypopg_hidden_indexes | index_name - hypopg | public | hypopg_hidden_indexes | indexrelid - hypopg | public | hypopg_hidden_indexes | is_hypo - hypopg | public | hypopg_hidden_indexes | schema_name - hypopg | public | hypopg_hidden_indexes | table_name - hypopg | public | hypopg_list_indexes | am_name - hypopg | public | hypopg_list_indexes | index_name - hypopg | public | hypopg_list_indexes | indexrelid - hypopg | public | hypopg_list_indexes | schema_name - hypopg | public | hypopg_list_indexes | table_name - pg_buffercache | public | pg_buffercache | bufferid - pg_buffercache | public | pg_buffercache | isdirty - pg_buffercache | public | pg_buffercache | pinning_backends - pg_buffercache | public | pg_buffercache | relblocknumber - pg_buffercache | public | pg_buffercache | reldatabase - pg_buffercache | public | pg_buffercache | relfilenode - pg_buffercache | public | pg_buffercache | relforknumber - pg_buffercache | public | pg_buffercache | reltablespace - pg_buffercache | public | pg_buffercache | usagecount - pg_net | net | _http_response | content - pg_net | net | _http_response | content_type - pg_net | net | _http_response | created - pg_net | net | _http_response | error_msg - pg_net | net | _http_response | headers - pg_net | net | _http_response | id - pg_net | net | _http_response | status_code - pg_net | net | _http_response | timed_out - pg_net | net | http_request_queue | body - pg_net | net | http_request_queue | headers - pg_net | net | http_request_queue | id - pg_net | net | http_request_queue | method - pg_net | net | http_request_queue | timeout_milliseconds - pg_net | net | http_request_queue | url - pg_repack | repack | primary_keys | indexrelid - pg_repack | repack | primary_keys | indrelid - pg_repack | repack | tables | alter_col_storage - pg_repack | repack | tables | ckey - pg_repack | repack | tables | ckid - pg_repack | repack | tables | copy_data - pg_repack | repack | tables | create_log - pg_repack | repack | tables | create_pktype - pg_repack | repack | tables | create_table - pg_repack | repack | tables | create_trigger - pg_repack | repack | tables | delete_log - pg_repack | repack | tables | drop_columns - pg_repack | repack | tables | enable_trigger - pg_repack | repack | tables | lock_table - pg_repack | repack | tables | pkid - pg_repack | repack | tables | relid - pg_repack | repack | tables | relname - pg_repack | repack | tables | reltoastidxid - pg_repack | repack | tables | reltoastrelid - pg_repack | repack | tables | schemaname - pg_repack | repack | tables | sql_delete - pg_repack | repack | tables | sql_insert - pg_repack | repack | tables | sql_peek - pg_repack | repack | tables | sql_pop - pg_repack | repack | tables | sql_update - pg_repack | repack | tables | tablespace_orig - pg_stat_monitor | public | pg_stat_monitor | application_name - pg_stat_monitor | public | pg_stat_monitor | blk_read_time - pg_stat_monitor | public | pg_stat_monitor | blk_write_time - pg_stat_monitor | public | pg_stat_monitor | bucket - pg_stat_monitor | public | pg_stat_monitor | bucket_done - pg_stat_monitor | public | pg_stat_monitor | bucket_start_time - pg_stat_monitor | public | pg_stat_monitor | calls - pg_stat_monitor | public | pg_stat_monitor | client_ip - pg_stat_monitor | public | pg_stat_monitor | cmd_type - pg_stat_monitor | public | pg_stat_monitor | cmd_type_text - pg_stat_monitor | public | pg_stat_monitor | comments - pg_stat_monitor | public | pg_stat_monitor | cpu_sys_time - pg_stat_monitor | public | pg_stat_monitor | cpu_user_time - pg_stat_monitor | public | pg_stat_monitor | datname - pg_stat_monitor | public | pg_stat_monitor | dbid - pg_stat_monitor | public | pg_stat_monitor | elevel - pg_stat_monitor | public | pg_stat_monitor | jit_emission_count - pg_stat_monitor | public | pg_stat_monitor | jit_emission_time - pg_stat_monitor | public | pg_stat_monitor | jit_functions - pg_stat_monitor | public | pg_stat_monitor | jit_generation_time - pg_stat_monitor | public | pg_stat_monitor | jit_inlining_count - pg_stat_monitor | public | pg_stat_monitor | jit_inlining_time - pg_stat_monitor | public | pg_stat_monitor | jit_optimization_count - pg_stat_monitor | public | pg_stat_monitor | jit_optimization_time - pg_stat_monitor | public | pg_stat_monitor | local_blks_dirtied - pg_stat_monitor | public | pg_stat_monitor | local_blks_hit - pg_stat_monitor | public | pg_stat_monitor | local_blks_read - pg_stat_monitor | public | pg_stat_monitor | local_blks_written - pg_stat_monitor | public | pg_stat_monitor | max_exec_time - pg_stat_monitor | public | pg_stat_monitor | max_plan_time - pg_stat_monitor | public | pg_stat_monitor | mean_exec_time - pg_stat_monitor | public | pg_stat_monitor | mean_plan_time - pg_stat_monitor | public | pg_stat_monitor | message - pg_stat_monitor | public | pg_stat_monitor | min_exec_time - pg_stat_monitor | public | pg_stat_monitor | min_plan_time - pg_stat_monitor | public | pg_stat_monitor | pgsm_query_id - pg_stat_monitor | public | pg_stat_monitor | planid - pg_stat_monitor | public | pg_stat_monitor | plans - pg_stat_monitor | public | pg_stat_monitor | query - pg_stat_monitor | public | pg_stat_monitor | query_plan - pg_stat_monitor | public | pg_stat_monitor | queryid - pg_stat_monitor | public | pg_stat_monitor | relations - pg_stat_monitor | public | pg_stat_monitor | resp_calls - pg_stat_monitor | public | pg_stat_monitor | rows - pg_stat_monitor | public | pg_stat_monitor | shared_blks_dirtied - pg_stat_monitor | public | pg_stat_monitor | shared_blks_hit - pg_stat_monitor | public | pg_stat_monitor | shared_blks_read - pg_stat_monitor | public | pg_stat_monitor | shared_blks_written - pg_stat_monitor | public | pg_stat_monitor | sqlcode - pg_stat_monitor | public | pg_stat_monitor | stddev_exec_time - pg_stat_monitor | public | pg_stat_monitor | stddev_plan_time - pg_stat_monitor | public | pg_stat_monitor | temp_blk_read_time - pg_stat_monitor | public | pg_stat_monitor | temp_blk_write_time - pg_stat_monitor | public | pg_stat_monitor | temp_blks_read - pg_stat_monitor | public | pg_stat_monitor | temp_blks_written - pg_stat_monitor | public | pg_stat_monitor | top_query - pg_stat_monitor | public | pg_stat_monitor | top_queryid - pg_stat_monitor | public | pg_stat_monitor | toplevel - pg_stat_monitor | public | pg_stat_monitor | total_exec_time - pg_stat_monitor | public | pg_stat_monitor | total_plan_time - pg_stat_monitor | public | pg_stat_monitor | userid - pg_stat_monitor | public | pg_stat_monitor | username - pg_stat_monitor | public | pg_stat_monitor | wal_bytes - pg_stat_monitor | public | pg_stat_monitor | wal_fpi - pg_stat_monitor | public | pg_stat_monitor | wal_records - pg_stat_statements | public | pg_stat_statements | blk_read_time - pg_stat_statements | public | pg_stat_statements | blk_write_time - pg_stat_statements | public | pg_stat_statements | calls - pg_stat_statements | public | pg_stat_statements | dbid - pg_stat_statements | public | pg_stat_statements | jit_emission_count - pg_stat_statements | public | pg_stat_statements | jit_emission_time - pg_stat_statements | public | pg_stat_statements | jit_functions - pg_stat_statements | public | pg_stat_statements | jit_generation_time - pg_stat_statements | public | pg_stat_statements | jit_inlining_count - pg_stat_statements | public | pg_stat_statements | jit_inlining_time - pg_stat_statements | public | pg_stat_statements | jit_optimization_count - pg_stat_statements | public | pg_stat_statements | jit_optimization_time - pg_stat_statements | public | pg_stat_statements | local_blks_dirtied - pg_stat_statements | public | pg_stat_statements | local_blks_hit - pg_stat_statements | public | pg_stat_statements | local_blks_read - pg_stat_statements | public | pg_stat_statements | local_blks_written - pg_stat_statements | public | pg_stat_statements | max_exec_time - pg_stat_statements | public | pg_stat_statements | max_plan_time - pg_stat_statements | public | pg_stat_statements | mean_exec_time - pg_stat_statements | public | pg_stat_statements | mean_plan_time - pg_stat_statements | public | pg_stat_statements | min_exec_time - pg_stat_statements | public | pg_stat_statements | min_plan_time - pg_stat_statements | public | pg_stat_statements | plans - pg_stat_statements | public | pg_stat_statements | query - pg_stat_statements | public | pg_stat_statements | queryid - pg_stat_statements | public | pg_stat_statements | rows - pg_stat_statements | public | pg_stat_statements | shared_blks_dirtied - pg_stat_statements | public | pg_stat_statements | shared_blks_hit - pg_stat_statements | public | pg_stat_statements | shared_blks_read - pg_stat_statements | public | pg_stat_statements | shared_blks_written - pg_stat_statements | public | pg_stat_statements | stddev_exec_time - pg_stat_statements | public | pg_stat_statements | stddev_plan_time - pg_stat_statements | public | pg_stat_statements | temp_blk_read_time - pg_stat_statements | public | pg_stat_statements | temp_blk_write_time - pg_stat_statements | public | pg_stat_statements | temp_blks_read - pg_stat_statements | public | pg_stat_statements | temp_blks_written - pg_stat_statements | public | pg_stat_statements | toplevel - pg_stat_statements | public | pg_stat_statements | total_exec_time - pg_stat_statements | public | pg_stat_statements | total_plan_time - pg_stat_statements | public | pg_stat_statements | userid - pg_stat_statements | public | pg_stat_statements | wal_bytes - pg_stat_statements | public | pg_stat_statements | wal_fpi - pg_stat_statements | public | pg_stat_statements | wal_records - pg_stat_statements | public | pg_stat_statements_info | dealloc - pg_stat_statements | public | pg_stat_statements_info | stats_reset - pg_tle | pgtle | feature_info | feature - pg_tle | pgtle | feature_info | obj_identity - pg_tle | pgtle | feature_info | proname - pg_tle | pgtle | feature_info | schema_name - pgmq | pgmq | meta | created_at - pgmq | pgmq | meta | is_partitioned - pgmq | pgmq | meta | is_unlogged - pgmq | pgmq | meta | queue_name - pgsodium | pgsodium | decrypted_key | associated_data - pgsodium | pgsodium | decrypted_key | comment - pgsodium | pgsodium | decrypted_key | created - pgsodium | pgsodium | decrypted_key | decrypted_raw_key - pgsodium | pgsodium | decrypted_key | expires - pgsodium | pgsodium | decrypted_key | id - pgsodium | pgsodium | decrypted_key | key_context - pgsodium | pgsodium | decrypted_key | key_id - pgsodium | pgsodium | decrypted_key | key_type - pgsodium | pgsodium | decrypted_key | name - pgsodium | pgsodium | decrypted_key | parent_key - pgsodium | pgsodium | decrypted_key | raw_key - pgsodium | pgsodium | decrypted_key | raw_key_nonce - pgsodium | pgsodium | decrypted_key | status - pgsodium | pgsodium | key | associated_data - pgsodium | pgsodium | key | comment - pgsodium | pgsodium | key | created - pgsodium | pgsodium | key | expires - pgsodium | pgsodium | key | id - pgsodium | pgsodium | key | key_context - pgsodium | pgsodium | key | key_id - pgsodium | pgsodium | key | key_type - pgsodium | pgsodium | key | name - pgsodium | pgsodium | key | parent_key - pgsodium | pgsodium | key | raw_key - pgsodium | pgsodium | key | raw_key_nonce - pgsodium | pgsodium | key | status - pgsodium | pgsodium | key | user_data - pgsodium | pgsodium | mask_columns | associated_columns - pgsodium | pgsodium | mask_columns | attname - pgsodium | pgsodium | mask_columns | attrelid - pgsodium | pgsodium | mask_columns | format_type - pgsodium | pgsodium | mask_columns | key_id - pgsodium | pgsodium | mask_columns | key_id_column - pgsodium | pgsodium | mask_columns | nonce_column - pgsodium | pgsodium | masking_rule | associated_columns - pgsodium | pgsodium | masking_rule | attname - pgsodium | pgsodium | masking_rule | attnum - pgsodium | pgsodium | masking_rule | attrelid - pgsodium | pgsodium | masking_rule | col_description - pgsodium | pgsodium | masking_rule | format_type - pgsodium | pgsodium | masking_rule | key_id - pgsodium | pgsodium | masking_rule | key_id_column - pgsodium | pgsodium | masking_rule | nonce_column - pgsodium | pgsodium | masking_rule | priority - pgsodium | pgsodium | masking_rule | relname - pgsodium | pgsodium | masking_rule | relnamespace - pgsodium | pgsodium | masking_rule | security_invoker - pgsodium | pgsodium | masking_rule | view_name - pgsodium | pgsodium | valid_key | associated_data - pgsodium | pgsodium | valid_key | created - pgsodium | pgsodium | valid_key | expires - pgsodium | pgsodium | valid_key | id - pgsodium | pgsodium | valid_key | key_context - pgsodium | pgsodium | valid_key | key_id - pgsodium | pgsodium | valid_key | key_type - pgsodium | pgsodium | valid_key | name - pgsodium | pgsodium | valid_key | status - pgtap | public | pg_all_foreign_keys | fk_columns - pgtap | public | pg_all_foreign_keys | fk_constraint_name - pgtap | public | pg_all_foreign_keys | fk_schema_name - pgtap | public | pg_all_foreign_keys | fk_table_name - pgtap | public | pg_all_foreign_keys | fk_table_oid - pgtap | public | pg_all_foreign_keys | is_deferrable - pgtap | public | pg_all_foreign_keys | is_deferred - pgtap | public | pg_all_foreign_keys | match_type - pgtap | public | pg_all_foreign_keys | on_delete - pgtap | public | pg_all_foreign_keys | on_update - pgtap | public | pg_all_foreign_keys | pk_columns - pgtap | public | pg_all_foreign_keys | pk_constraint_name - pgtap | public | pg_all_foreign_keys | pk_index_name - pgtap | public | pg_all_foreign_keys | pk_schema_name - pgtap | public | pg_all_foreign_keys | pk_table_name - pgtap | public | pg_all_foreign_keys | pk_table_oid - pgtap | public | tap_funky | args - pgtap | public | tap_funky | is_definer - pgtap | public | tap_funky | is_strict - pgtap | public | tap_funky | is_visible - pgtap | public | tap_funky | kind - pgtap | public | tap_funky | langoid - pgtap | public | tap_funky | name - pgtap | public | tap_funky | oid - pgtap | public | tap_funky | owner - pgtap | public | tap_funky | returns - pgtap | public | tap_funky | returns_set - pgtap | public | tap_funky | schema - pgtap | public | tap_funky | volatility - postgis | public | geography_columns | coord_dimension - postgis | public | geography_columns | f_geography_column - postgis | public | geography_columns | f_table_catalog - postgis | public | geography_columns | f_table_name - postgis | public | geography_columns | f_table_schema - postgis | public | geography_columns | srid - postgis | public | geography_columns | type - postgis | public | geometry_columns | coord_dimension - postgis | public | geometry_columns | f_geometry_column - postgis | public | geometry_columns | f_table_catalog - postgis | public | geometry_columns | f_table_name - postgis | public | geometry_columns | f_table_schema - postgis | public | geometry_columns | srid - postgis | public | geometry_columns | type - postgis | public | spatial_ref_sys | auth_name - postgis | public | spatial_ref_sys | auth_srid - postgis | public | spatial_ref_sys | proj4text - postgis | public | spatial_ref_sys | srid - postgis | public | spatial_ref_sys | srtext - postgis_raster | public | raster_columns | blocksize_x - postgis_raster | public | raster_columns | blocksize_y - postgis_raster | public | raster_columns | extent - postgis_raster | public | raster_columns | nodata_values - postgis_raster | public | raster_columns | num_bands - postgis_raster | public | raster_columns | out_db - postgis_raster | public | raster_columns | pixel_types - postgis_raster | public | raster_columns | r_raster_column - postgis_raster | public | raster_columns | r_table_catalog - postgis_raster | public | raster_columns | r_table_name - postgis_raster | public | raster_columns | r_table_schema - postgis_raster | public | raster_columns | regular_blocking - postgis_raster | public | raster_columns | same_alignment - postgis_raster | public | raster_columns | scale_x - postgis_raster | public | raster_columns | scale_y - postgis_raster | public | raster_columns | spatial_index - postgis_raster | public | raster_columns | srid - postgis_raster | public | raster_overviews | o_raster_column - postgis_raster | public | raster_overviews | o_table_catalog - postgis_raster | public | raster_overviews | o_table_name - postgis_raster | public | raster_overviews | o_table_schema - postgis_raster | public | raster_overviews | overview_factor - postgis_raster | public | raster_overviews | r_raster_column - postgis_raster | public | raster_overviews | r_table_catalog - postgis_raster | public | raster_overviews | r_table_name - postgis_raster | public | raster_overviews | r_table_schema - postgis_tiger_geocoder | tiger | addr | arid - postgis_tiger_geocoder | tiger | addr | fromarmid - postgis_tiger_geocoder | tiger | addr | fromhn - postgis_tiger_geocoder | tiger | addr | fromtyp - postgis_tiger_geocoder | tiger | addr | gid - postgis_tiger_geocoder | tiger | addr | mtfcc - postgis_tiger_geocoder | tiger | addr | plus4 - postgis_tiger_geocoder | tiger | addr | side - postgis_tiger_geocoder | tiger | addr | statefp - postgis_tiger_geocoder | tiger | addr | tlid - postgis_tiger_geocoder | tiger | addr | toarmid - postgis_tiger_geocoder | tiger | addr | tohn - postgis_tiger_geocoder | tiger | addr | totyp - postgis_tiger_geocoder | tiger | addr | zip - postgis_tiger_geocoder | tiger | addrfeat | aridl - postgis_tiger_geocoder | tiger | addrfeat | aridr - postgis_tiger_geocoder | tiger | addrfeat | edge_mtfcc - postgis_tiger_geocoder | tiger | addrfeat | fullname - postgis_tiger_geocoder | tiger | addrfeat | gid - postgis_tiger_geocoder | tiger | addrfeat | lfromhn - postgis_tiger_geocoder | tiger | addrfeat | lfromtyp - postgis_tiger_geocoder | tiger | addrfeat | linearid - postgis_tiger_geocoder | tiger | addrfeat | ltohn - postgis_tiger_geocoder | tiger | addrfeat | ltotyp - postgis_tiger_geocoder | tiger | addrfeat | offsetl - postgis_tiger_geocoder | tiger | addrfeat | offsetr - postgis_tiger_geocoder | tiger | addrfeat | parityl - postgis_tiger_geocoder | tiger | addrfeat | parityr - postgis_tiger_geocoder | tiger | addrfeat | plus4l - postgis_tiger_geocoder | tiger | addrfeat | plus4r - postgis_tiger_geocoder | tiger | addrfeat | rfromhn - postgis_tiger_geocoder | tiger | addrfeat | rfromtyp - postgis_tiger_geocoder | tiger | addrfeat | rtohn - postgis_tiger_geocoder | tiger | addrfeat | rtotyp - postgis_tiger_geocoder | tiger | addrfeat | statefp - postgis_tiger_geocoder | tiger | addrfeat | the_geom - postgis_tiger_geocoder | tiger | addrfeat | tlid - postgis_tiger_geocoder | tiger | addrfeat | zipl - postgis_tiger_geocoder | tiger | addrfeat | zipr - postgis_tiger_geocoder | tiger | bg | aland - postgis_tiger_geocoder | tiger | bg | awater - postgis_tiger_geocoder | tiger | bg | bg_id - postgis_tiger_geocoder | tiger | bg | blkgrpce - postgis_tiger_geocoder | tiger | bg | countyfp - postgis_tiger_geocoder | tiger | bg | funcstat - postgis_tiger_geocoder | tiger | bg | gid - postgis_tiger_geocoder | tiger | bg | intptlat - postgis_tiger_geocoder | tiger | bg | intptlon - postgis_tiger_geocoder | tiger | bg | mtfcc - postgis_tiger_geocoder | tiger | bg | namelsad - postgis_tiger_geocoder | tiger | bg | statefp - postgis_tiger_geocoder | tiger | bg | the_geom - postgis_tiger_geocoder | tiger | bg | tractce - postgis_tiger_geocoder | tiger | county | aland - postgis_tiger_geocoder | tiger | county | awater - postgis_tiger_geocoder | tiger | county | cbsafp - postgis_tiger_geocoder | tiger | county | classfp - postgis_tiger_geocoder | tiger | county | cntyidfp - postgis_tiger_geocoder | tiger | county | countyfp - postgis_tiger_geocoder | tiger | county | countyns - postgis_tiger_geocoder | tiger | county | csafp - postgis_tiger_geocoder | tiger | county | funcstat - postgis_tiger_geocoder | tiger | county | gid - postgis_tiger_geocoder | tiger | county | intptlat - postgis_tiger_geocoder | tiger | county | intptlon - postgis_tiger_geocoder | tiger | county | lsad - postgis_tiger_geocoder | tiger | county | metdivfp - postgis_tiger_geocoder | tiger | county | mtfcc - postgis_tiger_geocoder | tiger | county | name - postgis_tiger_geocoder | tiger | county | namelsad - postgis_tiger_geocoder | tiger | county | statefp - postgis_tiger_geocoder | tiger | county | the_geom - postgis_tiger_geocoder | tiger | county_lookup | co_code - postgis_tiger_geocoder | tiger | county_lookup | name - postgis_tiger_geocoder | tiger | county_lookup | st_code - postgis_tiger_geocoder | tiger | county_lookup | state - postgis_tiger_geocoder | tiger | countysub_lookup | co_code - postgis_tiger_geocoder | tiger | countysub_lookup | county - postgis_tiger_geocoder | tiger | countysub_lookup | cs_code - postgis_tiger_geocoder | tiger | countysub_lookup | name - postgis_tiger_geocoder | tiger | countysub_lookup | st_code - postgis_tiger_geocoder | tiger | countysub_lookup | state - postgis_tiger_geocoder | tiger | cousub | aland - postgis_tiger_geocoder | tiger | cousub | awater - postgis_tiger_geocoder | tiger | cousub | classfp - postgis_tiger_geocoder | tiger | cousub | cnectafp - postgis_tiger_geocoder | tiger | cousub | cosbidfp - postgis_tiger_geocoder | tiger | cousub | countyfp - postgis_tiger_geocoder | tiger | cousub | cousubfp - postgis_tiger_geocoder | tiger | cousub | cousubns - postgis_tiger_geocoder | tiger | cousub | funcstat - postgis_tiger_geocoder | tiger | cousub | gid - postgis_tiger_geocoder | tiger | cousub | intptlat - postgis_tiger_geocoder | tiger | cousub | intptlon - postgis_tiger_geocoder | tiger | cousub | lsad - postgis_tiger_geocoder | tiger | cousub | mtfcc - postgis_tiger_geocoder | tiger | cousub | name - postgis_tiger_geocoder | tiger | cousub | namelsad - postgis_tiger_geocoder | tiger | cousub | nctadvfp - postgis_tiger_geocoder | tiger | cousub | nectafp - postgis_tiger_geocoder | tiger | cousub | statefp - postgis_tiger_geocoder | tiger | cousub | the_geom - postgis_tiger_geocoder | tiger | direction_lookup | abbrev - postgis_tiger_geocoder | tiger | direction_lookup | name - postgis_tiger_geocoder | tiger | edges | artpath - postgis_tiger_geocoder | tiger | edges | countyfp - postgis_tiger_geocoder | tiger | edges | deckedroad - postgis_tiger_geocoder | tiger | edges | divroad - postgis_tiger_geocoder | tiger | edges | exttyp - postgis_tiger_geocoder | tiger | edges | featcat - postgis_tiger_geocoder | tiger | edges | fullname - postgis_tiger_geocoder | tiger | edges | gcseflg - postgis_tiger_geocoder | tiger | edges | gid - postgis_tiger_geocoder | tiger | edges | hydroflg - postgis_tiger_geocoder | tiger | edges | lfromadd - postgis_tiger_geocoder | tiger | edges | ltoadd - postgis_tiger_geocoder | tiger | edges | mtfcc - postgis_tiger_geocoder | tiger | edges | offsetl - postgis_tiger_geocoder | tiger | edges | offsetr - postgis_tiger_geocoder | tiger | edges | olfflg - postgis_tiger_geocoder | tiger | edges | passflg - postgis_tiger_geocoder | tiger | edges | persist - postgis_tiger_geocoder | tiger | edges | railflg - postgis_tiger_geocoder | tiger | edges | rfromadd - postgis_tiger_geocoder | tiger | edges | roadflg - postgis_tiger_geocoder | tiger | edges | rtoadd - postgis_tiger_geocoder | tiger | edges | smid - postgis_tiger_geocoder | tiger | edges | statefp - postgis_tiger_geocoder | tiger | edges | tfidl - postgis_tiger_geocoder | tiger | edges | tfidr - postgis_tiger_geocoder | tiger | edges | the_geom - postgis_tiger_geocoder | tiger | edges | tlid - postgis_tiger_geocoder | tiger | edges | tnidf - postgis_tiger_geocoder | tiger | edges | tnidt - postgis_tiger_geocoder | tiger | edges | ttyp - postgis_tiger_geocoder | tiger | edges | zipl - postgis_tiger_geocoder | tiger | edges | zipr - postgis_tiger_geocoder | tiger | faces | aiannhce - postgis_tiger_geocoder | tiger | faces | aiannhce00 - postgis_tiger_geocoder | tiger | faces | aiannhfp - postgis_tiger_geocoder | tiger | faces | aiannhfp00 - postgis_tiger_geocoder | tiger | faces | anrcfp - postgis_tiger_geocoder | tiger | faces | anrcfp00 - postgis_tiger_geocoder | tiger | faces | atotal - postgis_tiger_geocoder | tiger | faces | blkgrpce - postgis_tiger_geocoder | tiger | faces | blkgrpce00 - postgis_tiger_geocoder | tiger | faces | blkgrpce20 - postgis_tiger_geocoder | tiger | faces | blockce - postgis_tiger_geocoder | tiger | faces | blockce00 - postgis_tiger_geocoder | tiger | faces | blockce20 - postgis_tiger_geocoder | tiger | faces | cbsafp - postgis_tiger_geocoder | tiger | faces | cd108fp - postgis_tiger_geocoder | tiger | faces | cd111fp - postgis_tiger_geocoder | tiger | faces | cnectafp - postgis_tiger_geocoder | tiger | faces | comptyp - postgis_tiger_geocoder | tiger | faces | comptyp00 - postgis_tiger_geocoder | tiger | faces | conctyfp - postgis_tiger_geocoder | tiger | faces | conctyfp00 - postgis_tiger_geocoder | tiger | faces | countyfp - postgis_tiger_geocoder | tiger | faces | countyfp00 - postgis_tiger_geocoder | tiger | faces | countyfp20 - postgis_tiger_geocoder | tiger | faces | cousubfp - postgis_tiger_geocoder | tiger | faces | cousubfp00 - postgis_tiger_geocoder | tiger | faces | csafp - postgis_tiger_geocoder | tiger | faces | elsdlea - postgis_tiger_geocoder | tiger | faces | elsdlea00 - postgis_tiger_geocoder | tiger | faces | gid - postgis_tiger_geocoder | tiger | faces | intptlat - postgis_tiger_geocoder | tiger | faces | intptlon - postgis_tiger_geocoder | tiger | faces | lwflag - postgis_tiger_geocoder | tiger | faces | metdivfp - postgis_tiger_geocoder | tiger | faces | nctadvfp - postgis_tiger_geocoder | tiger | faces | nectafp - postgis_tiger_geocoder | tiger | faces | offset - postgis_tiger_geocoder | tiger | faces | placefp - postgis_tiger_geocoder | tiger | faces | placefp00 - postgis_tiger_geocoder | tiger | faces | puma5ce - postgis_tiger_geocoder | tiger | faces | puma5ce00 - postgis_tiger_geocoder | tiger | faces | scsdlea - postgis_tiger_geocoder | tiger | faces | scsdlea00 - postgis_tiger_geocoder | tiger | faces | sldlst - postgis_tiger_geocoder | tiger | faces | sldlst00 - postgis_tiger_geocoder | tiger | faces | sldust - postgis_tiger_geocoder | tiger | faces | sldust00 - postgis_tiger_geocoder | tiger | faces | statefp - postgis_tiger_geocoder | tiger | faces | statefp00 - postgis_tiger_geocoder | tiger | faces | statefp20 - postgis_tiger_geocoder | tiger | faces | submcdfp - postgis_tiger_geocoder | tiger | faces | submcdfp00 - postgis_tiger_geocoder | tiger | faces | tazce - postgis_tiger_geocoder | tiger | faces | tazce00 - postgis_tiger_geocoder | tiger | faces | tblkgpce - postgis_tiger_geocoder | tiger | faces | tfid - postgis_tiger_geocoder | tiger | faces | the_geom - postgis_tiger_geocoder | tiger | faces | tractce - postgis_tiger_geocoder | tiger | faces | tractce00 - postgis_tiger_geocoder | tiger | faces | tractce20 - postgis_tiger_geocoder | tiger | faces | trsubce - postgis_tiger_geocoder | tiger | faces | trsubce00 - postgis_tiger_geocoder | tiger | faces | trsubfp - postgis_tiger_geocoder | tiger | faces | trsubfp00 - postgis_tiger_geocoder | tiger | faces | ttractce - postgis_tiger_geocoder | tiger | faces | uace - postgis_tiger_geocoder | tiger | faces | uace00 - postgis_tiger_geocoder | tiger | faces | ugace - postgis_tiger_geocoder | tiger | faces | ugace00 - postgis_tiger_geocoder | tiger | faces | unsdlea - postgis_tiger_geocoder | tiger | faces | unsdlea00 - postgis_tiger_geocoder | tiger | faces | vtdst - postgis_tiger_geocoder | tiger | faces | vtdst00 - postgis_tiger_geocoder | tiger | faces | zcta5ce - postgis_tiger_geocoder | tiger | faces | zcta5ce00 - postgis_tiger_geocoder | tiger | featnames | fullname - postgis_tiger_geocoder | tiger | featnames | gid - postgis_tiger_geocoder | tiger | featnames | linearid - postgis_tiger_geocoder | tiger | featnames | mtfcc - postgis_tiger_geocoder | tiger | featnames | name - postgis_tiger_geocoder | tiger | featnames | paflag - postgis_tiger_geocoder | tiger | featnames | predir - postgis_tiger_geocoder | tiger | featnames | predirabrv - postgis_tiger_geocoder | tiger | featnames | prequal - postgis_tiger_geocoder | tiger | featnames | prequalabr - postgis_tiger_geocoder | tiger | featnames | pretyp - postgis_tiger_geocoder | tiger | featnames | pretypabrv - postgis_tiger_geocoder | tiger | featnames | statefp - postgis_tiger_geocoder | tiger | featnames | sufdir - postgis_tiger_geocoder | tiger | featnames | sufdirabrv - postgis_tiger_geocoder | tiger | featnames | sufqual - postgis_tiger_geocoder | tiger | featnames | sufqualabr - postgis_tiger_geocoder | tiger | featnames | suftyp - postgis_tiger_geocoder | tiger | featnames | suftypabrv - postgis_tiger_geocoder | tiger | featnames | tlid - postgis_tiger_geocoder | tiger | geocode_settings | category - postgis_tiger_geocoder | tiger | geocode_settings | name - postgis_tiger_geocoder | tiger | geocode_settings | setting - postgis_tiger_geocoder | tiger | geocode_settings | short_desc - postgis_tiger_geocoder | tiger | geocode_settings | unit - postgis_tiger_geocoder | tiger | geocode_settings_default | category - postgis_tiger_geocoder | tiger | geocode_settings_default | name - postgis_tiger_geocoder | tiger | geocode_settings_default | setting - postgis_tiger_geocoder | tiger | geocode_settings_default | short_desc - postgis_tiger_geocoder | tiger | geocode_settings_default | unit - postgis_tiger_geocoder | tiger | loader_lookuptables | columns_exclude - postgis_tiger_geocoder | tiger | loader_lookuptables | insert_mode - postgis_tiger_geocoder | tiger | loader_lookuptables | level_county - postgis_tiger_geocoder | tiger | loader_lookuptables | level_nation - postgis_tiger_geocoder | tiger | loader_lookuptables | level_state - postgis_tiger_geocoder | tiger | loader_lookuptables | load - postgis_tiger_geocoder | tiger | loader_lookuptables | lookup_name - postgis_tiger_geocoder | tiger | loader_lookuptables | post_load_process - postgis_tiger_geocoder | tiger | loader_lookuptables | pre_load_process - postgis_tiger_geocoder | tiger | loader_lookuptables | process_order - postgis_tiger_geocoder | tiger | loader_lookuptables | single_geom_mode - postgis_tiger_geocoder | tiger | loader_lookuptables | single_mode - postgis_tiger_geocoder | tiger | loader_lookuptables | table_name - postgis_tiger_geocoder | tiger | loader_lookuptables | website_root_override - postgis_tiger_geocoder | tiger | loader_platform | county_process_command - postgis_tiger_geocoder | tiger | loader_platform | declare_sect - postgis_tiger_geocoder | tiger | loader_platform | environ_set_command - postgis_tiger_geocoder | tiger | loader_platform | loader - postgis_tiger_geocoder | tiger | loader_platform | os - postgis_tiger_geocoder | tiger | loader_platform | path_sep - postgis_tiger_geocoder | tiger | loader_platform | pgbin - postgis_tiger_geocoder | tiger | loader_platform | psql - postgis_tiger_geocoder | tiger | loader_platform | unzip_command - postgis_tiger_geocoder | tiger | loader_platform | wget - postgis_tiger_geocoder | tiger | loader_variables | data_schema - postgis_tiger_geocoder | tiger | loader_variables | staging_fold - postgis_tiger_geocoder | tiger | loader_variables | staging_schema - postgis_tiger_geocoder | tiger | loader_variables | tiger_year - postgis_tiger_geocoder | tiger | loader_variables | website_root - postgis_tiger_geocoder | tiger | pagc_gaz | id - postgis_tiger_geocoder | tiger | pagc_gaz | is_custom - postgis_tiger_geocoder | tiger | pagc_gaz | seq - postgis_tiger_geocoder | tiger | pagc_gaz | stdword - postgis_tiger_geocoder | tiger | pagc_gaz | token - postgis_tiger_geocoder | tiger | pagc_gaz | word - postgis_tiger_geocoder | tiger | pagc_lex | id - postgis_tiger_geocoder | tiger | pagc_lex | is_custom - postgis_tiger_geocoder | tiger | pagc_lex | seq - postgis_tiger_geocoder | tiger | pagc_lex | stdword - postgis_tiger_geocoder | tiger | pagc_lex | token - postgis_tiger_geocoder | tiger | pagc_lex | word - postgis_tiger_geocoder | tiger | pagc_rules | id - postgis_tiger_geocoder | tiger | pagc_rules | is_custom - postgis_tiger_geocoder | tiger | pagc_rules | rule - postgis_tiger_geocoder | tiger | place | aland - postgis_tiger_geocoder | tiger | place | awater - postgis_tiger_geocoder | tiger | place | classfp - postgis_tiger_geocoder | tiger | place | cpi - postgis_tiger_geocoder | tiger | place | funcstat - postgis_tiger_geocoder | tiger | place | gid - postgis_tiger_geocoder | tiger | place | intptlat - postgis_tiger_geocoder | tiger | place | intptlon - postgis_tiger_geocoder | tiger | place | lsad - postgis_tiger_geocoder | tiger | place | mtfcc - postgis_tiger_geocoder | tiger | place | name - postgis_tiger_geocoder | tiger | place | namelsad - postgis_tiger_geocoder | tiger | place | pcicbsa - postgis_tiger_geocoder | tiger | place | pcinecta - postgis_tiger_geocoder | tiger | place | placefp - postgis_tiger_geocoder | tiger | place | placens - postgis_tiger_geocoder | tiger | place | plcidfp - postgis_tiger_geocoder | tiger | place | statefp - postgis_tiger_geocoder | tiger | place | the_geom - postgis_tiger_geocoder | tiger | place_lookup | name - postgis_tiger_geocoder | tiger | place_lookup | pl_code - postgis_tiger_geocoder | tiger | place_lookup | st_code - postgis_tiger_geocoder | tiger | place_lookup | state - postgis_tiger_geocoder | tiger | secondary_unit_lookup | abbrev - postgis_tiger_geocoder | tiger | secondary_unit_lookup | name - postgis_tiger_geocoder | tiger | state | aland - postgis_tiger_geocoder | tiger | state | awater - postgis_tiger_geocoder | tiger | state | division - postgis_tiger_geocoder | tiger | state | funcstat - postgis_tiger_geocoder | tiger | state | gid - postgis_tiger_geocoder | tiger | state | intptlat - postgis_tiger_geocoder | tiger | state | intptlon - postgis_tiger_geocoder | tiger | state | lsad - postgis_tiger_geocoder | tiger | state | mtfcc - postgis_tiger_geocoder | tiger | state | name - postgis_tiger_geocoder | tiger | state | region - postgis_tiger_geocoder | tiger | state | statefp - postgis_tiger_geocoder | tiger | state | statens - postgis_tiger_geocoder | tiger | state | stusps - postgis_tiger_geocoder | tiger | state | the_geom - postgis_tiger_geocoder | tiger | state_lookup | abbrev - postgis_tiger_geocoder | tiger | state_lookup | name - postgis_tiger_geocoder | tiger | state_lookup | st_code - postgis_tiger_geocoder | tiger | state_lookup | statefp - postgis_tiger_geocoder | tiger | street_type_lookup | abbrev - postgis_tiger_geocoder | tiger | street_type_lookup | is_hw - postgis_tiger_geocoder | tiger | street_type_lookup | name - postgis_tiger_geocoder | tiger | tabblock | aland - postgis_tiger_geocoder | tiger | tabblock | awater - postgis_tiger_geocoder | tiger | tabblock | blockce - postgis_tiger_geocoder | tiger | tabblock | countyfp - postgis_tiger_geocoder | tiger | tabblock | funcstat - postgis_tiger_geocoder | tiger | tabblock | gid - postgis_tiger_geocoder | tiger | tabblock | intptlat - postgis_tiger_geocoder | tiger | tabblock | intptlon - postgis_tiger_geocoder | tiger | tabblock | mtfcc - postgis_tiger_geocoder | tiger | tabblock | name - postgis_tiger_geocoder | tiger | tabblock | statefp - postgis_tiger_geocoder | tiger | tabblock | tabblock_id - postgis_tiger_geocoder | tiger | tabblock | the_geom - postgis_tiger_geocoder | tiger | tabblock | tractce - postgis_tiger_geocoder | tiger | tabblock | uace - postgis_tiger_geocoder | tiger | tabblock | ur - postgis_tiger_geocoder | tiger | tabblock20 | aland - postgis_tiger_geocoder | tiger | tabblock20 | awater - postgis_tiger_geocoder | tiger | tabblock20 | blockce - postgis_tiger_geocoder | tiger | tabblock20 | countyfp - postgis_tiger_geocoder | tiger | tabblock20 | funcstat - postgis_tiger_geocoder | tiger | tabblock20 | geoid - postgis_tiger_geocoder | tiger | tabblock20 | housing - postgis_tiger_geocoder | tiger | tabblock20 | intptlat - postgis_tiger_geocoder | tiger | tabblock20 | intptlon - postgis_tiger_geocoder | tiger | tabblock20 | mtfcc - postgis_tiger_geocoder | tiger | tabblock20 | name - postgis_tiger_geocoder | tiger | tabblock20 | pop - postgis_tiger_geocoder | tiger | tabblock20 | statefp - postgis_tiger_geocoder | tiger | tabblock20 | the_geom - postgis_tiger_geocoder | tiger | tabblock20 | tractce - postgis_tiger_geocoder | tiger | tabblock20 | uace - postgis_tiger_geocoder | tiger | tabblock20 | uatype - postgis_tiger_geocoder | tiger | tabblock20 | ur - postgis_tiger_geocoder | tiger | tract | aland - postgis_tiger_geocoder | tiger | tract | awater - postgis_tiger_geocoder | tiger | tract | countyfp - postgis_tiger_geocoder | tiger | tract | funcstat - postgis_tiger_geocoder | tiger | tract | gid - postgis_tiger_geocoder | tiger | tract | intptlat - postgis_tiger_geocoder | tiger | tract | intptlon - postgis_tiger_geocoder | tiger | tract | mtfcc - postgis_tiger_geocoder | tiger | tract | name - postgis_tiger_geocoder | tiger | tract | namelsad - postgis_tiger_geocoder | tiger | tract | statefp - postgis_tiger_geocoder | tiger | tract | the_geom - postgis_tiger_geocoder | tiger | tract | tract_id - postgis_tiger_geocoder | tiger | tract | tractce - postgis_tiger_geocoder | tiger | zcta5 | aland - postgis_tiger_geocoder | tiger | zcta5 | awater - postgis_tiger_geocoder | tiger | zcta5 | classfp - postgis_tiger_geocoder | tiger | zcta5 | funcstat - postgis_tiger_geocoder | tiger | zcta5 | gid - postgis_tiger_geocoder | tiger | zcta5 | intptlat - postgis_tiger_geocoder | tiger | zcta5 | intptlon - postgis_tiger_geocoder | tiger | zcta5 | mtfcc - postgis_tiger_geocoder | tiger | zcta5 | partflg - postgis_tiger_geocoder | tiger | zcta5 | statefp - postgis_tiger_geocoder | tiger | zcta5 | the_geom - postgis_tiger_geocoder | tiger | zcta5 | zcta5ce - postgis_tiger_geocoder | tiger | zip_lookup | cnt - postgis_tiger_geocoder | tiger | zip_lookup | co_code - postgis_tiger_geocoder | tiger | zip_lookup | county - postgis_tiger_geocoder | tiger | zip_lookup | cousub - postgis_tiger_geocoder | tiger | zip_lookup | cs_code - postgis_tiger_geocoder | tiger | zip_lookup | pl_code - postgis_tiger_geocoder | tiger | zip_lookup | place - postgis_tiger_geocoder | tiger | zip_lookup | st_code - postgis_tiger_geocoder | tiger | zip_lookup | state - postgis_tiger_geocoder | tiger | zip_lookup | zip - postgis_tiger_geocoder | tiger | zip_lookup_all | cnt - postgis_tiger_geocoder | tiger | zip_lookup_all | co_code - postgis_tiger_geocoder | tiger | zip_lookup_all | county - postgis_tiger_geocoder | tiger | zip_lookup_all | cousub - postgis_tiger_geocoder | tiger | zip_lookup_all | cs_code - postgis_tiger_geocoder | tiger | zip_lookup_all | pl_code - postgis_tiger_geocoder | tiger | zip_lookup_all | place - postgis_tiger_geocoder | tiger | zip_lookup_all | st_code - postgis_tiger_geocoder | tiger | zip_lookup_all | state - postgis_tiger_geocoder | tiger | zip_lookup_all | zip - postgis_tiger_geocoder | tiger | zip_lookup_base | city - postgis_tiger_geocoder | tiger | zip_lookup_base | county - postgis_tiger_geocoder | tiger | zip_lookup_base | state - postgis_tiger_geocoder | tiger | zip_lookup_base | statefp - postgis_tiger_geocoder | tiger | zip_lookup_base | zip - postgis_tiger_geocoder | tiger | zip_state | statefp - postgis_tiger_geocoder | tiger | zip_state | stusps - postgis_tiger_geocoder | tiger | zip_state | zip - postgis_tiger_geocoder | tiger | zip_state_loc | place - postgis_tiger_geocoder | tiger | zip_state_loc | statefp - postgis_tiger_geocoder | tiger | zip_state_loc | stusps - postgis_tiger_geocoder | tiger | zip_state_loc | zip - postgis_topology | topology | layer | child_id - postgis_topology | topology | layer | feature_column - postgis_topology | topology | layer | feature_type - postgis_topology | topology | layer | layer_id - postgis_topology | topology | layer | level - postgis_topology | topology | layer | schema_name - postgis_topology | topology | layer | table_name - postgis_topology | topology | layer | topology_id - postgis_topology | topology | topology | hasz - postgis_topology | topology | topology | id - postgis_topology | topology | topology | name - postgis_topology | topology | topology | precision - postgis_topology | topology | topology | srid - tealbase_vault | vault | secrets | created_at - tealbase_vault | vault | secrets | description - tealbase_vault | vault | secrets | id - tealbase_vault | vault | secrets | key_id - tealbase_vault | vault | secrets | name - tealbase_vault | vault | secrets | nonce - tealbase_vault | vault | secrets | secret - tealbase_vault | vault | secrets | updated_at - timescaledb | _timescaledb_config | bgw_job | application_name - timescaledb | _timescaledb_config | bgw_job | check_name - timescaledb | _timescaledb_config | bgw_job | check_schema - timescaledb | _timescaledb_config | bgw_job | config - timescaledb | _timescaledb_config | bgw_job | fixed_schedule - timescaledb | _timescaledb_config | bgw_job | hypertable_id - timescaledb | _timescaledb_config | bgw_job | id - timescaledb | _timescaledb_config | bgw_job | initial_start - timescaledb | _timescaledb_config | bgw_job | max_retries - timescaledb | _timescaledb_config | bgw_job | max_runtime - timescaledb | _timescaledb_config | bgw_job | owner - timescaledb | _timescaledb_config | bgw_job | proc_name - timescaledb | _timescaledb_config | bgw_job | proc_schema - timescaledb | _timescaledb_config | bgw_job | retry_period - timescaledb | _timescaledb_config | bgw_job | schedule_interval - timescaledb | _timescaledb_config | bgw_job | scheduled - timescaledb | _timescaledb_config | bgw_job | timezone - timescaledb | _timescaledb_internal | bgw_job_stat | consecutive_crashes - timescaledb | _timescaledb_internal | bgw_job_stat | consecutive_failures - timescaledb | _timescaledb_internal | bgw_job_stat | flags - timescaledb | _timescaledb_internal | bgw_job_stat | job_id - timescaledb | _timescaledb_internal | bgw_job_stat | last_finish - timescaledb | _timescaledb_internal | bgw_job_stat | last_run_success - timescaledb | _timescaledb_internal | bgw_job_stat | last_start - timescaledb | _timescaledb_internal | bgw_job_stat | last_successful_finish - timescaledb | _timescaledb_internal | bgw_job_stat | next_start - timescaledb | _timescaledb_internal | bgw_job_stat | total_crashes - timescaledb | _timescaledb_internal | bgw_job_stat | total_duration - timescaledb | _timescaledb_internal | bgw_job_stat | total_duration_failures - timescaledb | _timescaledb_internal | bgw_job_stat | total_failures - timescaledb | _timescaledb_internal | bgw_job_stat | total_runs - timescaledb | _timescaledb_internal | bgw_job_stat | total_successes - timescaledb | _timescaledb_internal | bgw_job_stat_history | data - timescaledb | _timescaledb_internal | bgw_job_stat_history | execution_finish - timescaledb | _timescaledb_internal | bgw_job_stat_history | execution_start - timescaledb | _timescaledb_internal | bgw_job_stat_history | id - timescaledb | _timescaledb_internal | bgw_job_stat_history | job_id - timescaledb | _timescaledb_internal | bgw_job_stat_history | pid - timescaledb | _timescaledb_internal | bgw_job_stat_history | succeeded - timescaledb | _timescaledb_internal | bgw_policy_chunk_stats | chunk_id - timescaledb | _timescaledb_internal | bgw_policy_chunk_stats | job_id - timescaledb | _timescaledb_internal | bgw_policy_chunk_stats | last_time_job_run - timescaledb | _timescaledb_internal | bgw_policy_chunk_stats | num_times_job_run - timescaledb | _timescaledb_cache | cache_inval_bgw_job | - timescaledb | _timescaledb_cache | cache_inval_extension | - timescaledb | _timescaledb_cache | cache_inval_hypertable | - timescaledb | _timescaledb_catalog | chunk | compressed_chunk_id - timescaledb | _timescaledb_catalog | chunk | creation_time - timescaledb | _timescaledb_catalog | chunk | dropped - timescaledb | _timescaledb_catalog | chunk | hypertable_id - timescaledb | _timescaledb_catalog | chunk | id - timescaledb | _timescaledb_catalog | chunk | osm_chunk - timescaledb | _timescaledb_catalog | chunk | schema_name - timescaledb | _timescaledb_catalog | chunk | status - timescaledb | _timescaledb_catalog | chunk | table_name - timescaledb | _timescaledb_catalog | chunk_column_stats | chunk_id - timescaledb | _timescaledb_catalog | chunk_column_stats | column_name - timescaledb | _timescaledb_catalog | chunk_column_stats | hypertable_id - timescaledb | _timescaledb_catalog | chunk_column_stats | id - timescaledb | _timescaledb_catalog | chunk_column_stats | range_end - timescaledb | _timescaledb_catalog | chunk_column_stats | range_start - timescaledb | _timescaledb_catalog | chunk_column_stats | valid - timescaledb | timescaledb_information | chunk_compression_settings | chunk - timescaledb | timescaledb_information | chunk_compression_settings | hypertable - timescaledb | timescaledb_information | chunk_compression_settings | orderby - timescaledb | timescaledb_information | chunk_compression_settings | segmentby - timescaledb | _timescaledb_catalog | chunk_constraint | chunk_id - timescaledb | _timescaledb_catalog | chunk_constraint | constraint_name - timescaledb | _timescaledb_catalog | chunk_constraint | dimension_slice_id - timescaledb | _timescaledb_catalog | chunk_constraint | hypertable_constraint_name - timescaledb | _timescaledb_catalog | chunk_index | chunk_id - timescaledb | _timescaledb_catalog | chunk_index | hypertable_id - timescaledb | _timescaledb_catalog | chunk_index | hypertable_index_name - timescaledb | _timescaledb_catalog | chunk_index | index_name - timescaledb | timescaledb_information | chunks | chunk_creation_time - timescaledb | timescaledb_information | chunks | chunk_name - timescaledb | timescaledb_information | chunks | chunk_schema - timescaledb | timescaledb_information | chunks | chunk_tablespace - timescaledb | timescaledb_information | chunks | hypertable_name - timescaledb | timescaledb_information | chunks | hypertable_schema - timescaledb | timescaledb_information | chunks | is_compressed - timescaledb | timescaledb_information | chunks | primary_dimension - timescaledb | timescaledb_information | chunks | primary_dimension_type - timescaledb | timescaledb_information | chunks | range_end - timescaledb | timescaledb_information | chunks | range_end_integer - timescaledb | timescaledb_information | chunks | range_start - timescaledb | timescaledb_information | chunks | range_start_integer - timescaledb | _timescaledb_internal | compressed_chunk_stats | chunk_name - timescaledb | _timescaledb_internal | compressed_chunk_stats | chunk_schema - timescaledb | _timescaledb_internal | compressed_chunk_stats | compressed_heap_size - timescaledb | _timescaledb_internal | compressed_chunk_stats | compressed_index_size - timescaledb | _timescaledb_internal | compressed_chunk_stats | compressed_toast_size - timescaledb | _timescaledb_internal | compressed_chunk_stats | compressed_total_size - timescaledb | _timescaledb_internal | compressed_chunk_stats | compression_status - timescaledb | _timescaledb_internal | compressed_chunk_stats | hypertable_name - timescaledb | _timescaledb_internal | compressed_chunk_stats | hypertable_schema - timescaledb | _timescaledb_internal | compressed_chunk_stats | uncompressed_heap_size - timescaledb | _timescaledb_internal | compressed_chunk_stats | uncompressed_index_size - timescaledb | _timescaledb_internal | compressed_chunk_stats | uncompressed_toast_size - timescaledb | _timescaledb_internal | compressed_chunk_stats | uncompressed_total_size - timescaledb | _timescaledb_catalog | compression_algorithm | description - timescaledb | _timescaledb_catalog | compression_algorithm | id - timescaledb | _timescaledb_catalog | compression_algorithm | name - timescaledb | _timescaledb_catalog | compression_algorithm | version - timescaledb | _timescaledb_catalog | compression_chunk_size | chunk_id - timescaledb | _timescaledb_catalog | compression_chunk_size | compressed_chunk_id - timescaledb | _timescaledb_catalog | compression_chunk_size | compressed_heap_size - timescaledb | _timescaledb_catalog | compression_chunk_size | compressed_index_size - timescaledb | _timescaledb_catalog | compression_chunk_size | compressed_toast_size - timescaledb | _timescaledb_catalog | compression_chunk_size | numrows_frozen_immediately - timescaledb | _timescaledb_catalog | compression_chunk_size | numrows_post_compression - timescaledb | _timescaledb_catalog | compression_chunk_size | numrows_pre_compression - timescaledb | _timescaledb_catalog | compression_chunk_size | uncompressed_heap_size - timescaledb | _timescaledb_catalog | compression_chunk_size | uncompressed_index_size - timescaledb | _timescaledb_catalog | compression_chunk_size | uncompressed_toast_size - timescaledb | timescaledb_information | compression_settings | attname - timescaledb | timescaledb_information | compression_settings | hypertable_name - timescaledb | timescaledb_information | compression_settings | hypertable_schema - timescaledb | _timescaledb_catalog | compression_settings | orderby - timescaledb | timescaledb_information | compression_settings | orderby_asc - timescaledb | timescaledb_information | compression_settings | orderby_column_index - timescaledb | _timescaledb_catalog | compression_settings | orderby_desc - timescaledb | timescaledb_information | compression_settings | orderby_nullsfirst - timescaledb | _timescaledb_catalog | compression_settings | orderby_nullsfirst - timescaledb | _timescaledb_catalog | compression_settings | relid - timescaledb | _timescaledb_catalog | compression_settings | segmentby - timescaledb | timescaledb_information | compression_settings | segmentby_column_index - timescaledb | _timescaledb_catalog | continuous_agg | direct_view_name - timescaledb | _timescaledb_catalog | continuous_agg | direct_view_schema - timescaledb | _timescaledb_catalog | continuous_agg | finalized - timescaledb | _timescaledb_catalog | continuous_agg | mat_hypertable_id - timescaledb | _timescaledb_catalog | continuous_agg | materialized_only - timescaledb | _timescaledb_catalog | continuous_agg | parent_mat_hypertable_id - timescaledb | _timescaledb_catalog | continuous_agg | partial_view_name - timescaledb | _timescaledb_catalog | continuous_agg | partial_view_schema - timescaledb | _timescaledb_catalog | continuous_agg | raw_hypertable_id - timescaledb | _timescaledb_catalog | continuous_agg | user_view_name - timescaledb | _timescaledb_catalog | continuous_agg | user_view_schema - timescaledb | _timescaledb_catalog | continuous_agg_migrate_plan | end_ts - timescaledb | _timescaledb_catalog | continuous_agg_migrate_plan | mat_hypertable_id - timescaledb | _timescaledb_catalog | continuous_agg_migrate_plan | start_ts - timescaledb | _timescaledb_catalog | continuous_agg_migrate_plan | user_view_definition - timescaledb | _timescaledb_catalog | continuous_agg_migrate_plan_step | config - timescaledb | _timescaledb_catalog | continuous_agg_migrate_plan_step | end_ts - timescaledb | _timescaledb_catalog | continuous_agg_migrate_plan_step | mat_hypertable_id - timescaledb | _timescaledb_catalog | continuous_agg_migrate_plan_step | start_ts - timescaledb | _timescaledb_catalog | continuous_agg_migrate_plan_step | status - timescaledb | _timescaledb_catalog | continuous_agg_migrate_plan_step | step_id - timescaledb | _timescaledb_catalog | continuous_agg_migrate_plan_step | type - timescaledb | timescaledb_information | continuous_aggregates | compression_enabled - timescaledb | timescaledb_information | continuous_aggregates | finalized - timescaledb | timescaledb_information | continuous_aggregates | hypertable_name - timescaledb | timescaledb_information | continuous_aggregates | hypertable_schema - timescaledb | timescaledb_information | continuous_aggregates | materialization_hypertable_name - timescaledb | timescaledb_information | continuous_aggregates | materialization_hypertable_schema - timescaledb | timescaledb_information | continuous_aggregates | materialized_only - timescaledb | timescaledb_information | continuous_aggregates | view_definition - timescaledb | timescaledb_information | continuous_aggregates | view_name - timescaledb | timescaledb_information | continuous_aggregates | view_owner - timescaledb | timescaledb_information | continuous_aggregates | view_schema - timescaledb | _timescaledb_catalog | continuous_aggs_bucket_function | bucket_fixed_width - timescaledb | _timescaledb_catalog | continuous_aggs_bucket_function | bucket_func - timescaledb | _timescaledb_catalog | continuous_aggs_bucket_function | bucket_offset - timescaledb | _timescaledb_catalog | continuous_aggs_bucket_function | bucket_origin - timescaledb | _timescaledb_catalog | continuous_aggs_bucket_function | bucket_timezone - timescaledb | _timescaledb_catalog | continuous_aggs_bucket_function | bucket_width - timescaledb | _timescaledb_catalog | continuous_aggs_bucket_function | mat_hypertable_id - timescaledb | _timescaledb_catalog | continuous_aggs_hypertable_invalidation_log | greatest_modified_value - timescaledb | _timescaledb_catalog | continuous_aggs_hypertable_invalidation_log | hypertable_id - timescaledb | _timescaledb_catalog | continuous_aggs_hypertable_invalidation_log | lowest_modified_value - timescaledb | _timescaledb_catalog | continuous_aggs_invalidation_threshold | hypertable_id - timescaledb | _timescaledb_catalog | continuous_aggs_invalidation_threshold | watermark - timescaledb | _timescaledb_catalog | continuous_aggs_materialization_invalidation_log | greatest_modified_value - timescaledb | _timescaledb_catalog | continuous_aggs_materialization_invalidation_log | lowest_modified_value - timescaledb | _timescaledb_catalog | continuous_aggs_materialization_invalidation_log | materialization_id - timescaledb | _timescaledb_catalog | continuous_aggs_watermark | mat_hypertable_id - timescaledb | _timescaledb_catalog | continuous_aggs_watermark | watermark - timescaledb | _timescaledb_catalog | dimension | aligned - timescaledb | _timescaledb_catalog | dimension | column_name - timescaledb | _timescaledb_catalog | dimension | column_type - timescaledb | _timescaledb_catalog | dimension | compress_interval_length - timescaledb | _timescaledb_catalog | dimension | hypertable_id - timescaledb | _timescaledb_catalog | dimension | id - timescaledb | _timescaledb_catalog | dimension | integer_now_func - timescaledb | _timescaledb_catalog | dimension | integer_now_func_schema - timescaledb | _timescaledb_catalog | dimension | interval_length - timescaledb | _timescaledb_catalog | dimension | num_slices - timescaledb | _timescaledb_catalog | dimension | partitioning_func - timescaledb | _timescaledb_catalog | dimension | partitioning_func_schema - timescaledb | _timescaledb_catalog | dimension_slice | dimension_id - timescaledb | _timescaledb_catalog | dimension_slice | id - timescaledb | _timescaledb_catalog | dimension_slice | range_end - timescaledb | _timescaledb_catalog | dimension_slice | range_start - timescaledb | timescaledb_information | dimensions | column_name - timescaledb | timescaledb_information | dimensions | column_type - timescaledb | timescaledb_information | dimensions | dimension_number - timescaledb | timescaledb_information | dimensions | dimension_type - timescaledb | timescaledb_information | dimensions | hypertable_name - timescaledb | timescaledb_information | dimensions | hypertable_schema - timescaledb | timescaledb_information | dimensions | integer_interval - timescaledb | timescaledb_information | dimensions | integer_now_func - timescaledb | timescaledb_information | dimensions | num_partitions - timescaledb | timescaledb_information | dimensions | time_interval - timescaledb | _timescaledb_catalog | hypertable | associated_schema_name - timescaledb | _timescaledb_catalog | hypertable | associated_table_prefix - timescaledb | _timescaledb_catalog | hypertable | chunk_sizing_func_name - timescaledb | _timescaledb_catalog | hypertable | chunk_sizing_func_schema - timescaledb | _timescaledb_catalog | hypertable | chunk_target_size - timescaledb | _timescaledb_catalog | hypertable | compressed_hypertable_id - timescaledb | _timescaledb_catalog | hypertable | compression_state - timescaledb | _timescaledb_catalog | hypertable | id - timescaledb | _timescaledb_catalog | hypertable | num_dimensions - timescaledb | _timescaledb_catalog | hypertable | schema_name - timescaledb | _timescaledb_catalog | hypertable | status - timescaledb | _timescaledb_catalog | hypertable | table_name - timescaledb | _timescaledb_internal | hypertable_chunk_local_size | chunk_id - timescaledb | _timescaledb_internal | hypertable_chunk_local_size | chunk_name - timescaledb | _timescaledb_internal | hypertable_chunk_local_size | chunk_schema - timescaledb | _timescaledb_internal | hypertable_chunk_local_size | compressed_heap_size - timescaledb | _timescaledb_internal | hypertable_chunk_local_size | compressed_index_size - timescaledb | _timescaledb_internal | hypertable_chunk_local_size | compressed_toast_size - timescaledb | _timescaledb_internal | hypertable_chunk_local_size | compressed_total_size - timescaledb | _timescaledb_internal | hypertable_chunk_local_size | heap_bytes - timescaledb | _timescaledb_internal | hypertable_chunk_local_size | hypertable_id - timescaledb | _timescaledb_internal | hypertable_chunk_local_size | hypertable_name - timescaledb | _timescaledb_internal | hypertable_chunk_local_size | hypertable_schema - timescaledb | _timescaledb_internal | hypertable_chunk_local_size | index_bytes - timescaledb | _timescaledb_internal | hypertable_chunk_local_size | toast_bytes - timescaledb | _timescaledb_internal | hypertable_chunk_local_size | total_bytes - timescaledb | timescaledb_information | hypertable_compression_settings | compress_interval_length - timescaledb | timescaledb_information | hypertable_compression_settings | hypertable - timescaledb | timescaledb_information | hypertable_compression_settings | orderby - timescaledb | timescaledb_information | hypertable_compression_settings | segmentby - timescaledb | timescaledb_information | hypertables | compression_enabled - timescaledb | timescaledb_information | hypertables | hypertable_name - timescaledb | timescaledb_information | hypertables | hypertable_schema - timescaledb | timescaledb_information | hypertables | num_chunks - timescaledb | timescaledb_information | hypertables | num_dimensions - timescaledb | timescaledb_information | hypertables | owner - timescaledb | timescaledb_information | hypertables | tablespaces - timescaledb | timescaledb_information | job_errors | err_message - timescaledb | timescaledb_information | job_errors | finish_time - timescaledb | timescaledb_information | job_errors | job_id - timescaledb | timescaledb_information | job_errors | pid - timescaledb | timescaledb_information | job_errors | proc_name - timescaledb | timescaledb_information | job_errors | proc_schema - timescaledb | timescaledb_information | job_errors | sqlerrcode - timescaledb | timescaledb_information | job_errors | start_time - timescaledb | timescaledb_information | job_history | config - timescaledb | timescaledb_information | job_history | err_message - timescaledb | timescaledb_information | job_history | finish_time - timescaledb | timescaledb_information | job_history | id - timescaledb | timescaledb_information | job_history | job_id - timescaledb | timescaledb_information | job_history | pid - timescaledb | timescaledb_information | job_history | proc_name - timescaledb | timescaledb_information | job_history | proc_schema - timescaledb | timescaledb_information | job_history | sqlerrcode - timescaledb | timescaledb_information | job_history | start_time - timescaledb | timescaledb_information | job_history | succeeded - timescaledb | timescaledb_information | job_stats | hypertable_name - timescaledb | timescaledb_information | job_stats | hypertable_schema - timescaledb | timescaledb_information | job_stats | job_id - timescaledb | timescaledb_information | job_stats | job_status - timescaledb | timescaledb_information | job_stats | last_run_duration - timescaledb | timescaledb_information | job_stats | last_run_started_at - timescaledb | timescaledb_information | job_stats | last_run_status - timescaledb | timescaledb_information | job_stats | last_successful_finish - timescaledb | timescaledb_information | job_stats | next_start - timescaledb | timescaledb_information | job_stats | total_failures - timescaledb | timescaledb_information | job_stats | total_runs - timescaledb | timescaledb_information | job_stats | total_successes - timescaledb | timescaledb_information | jobs | application_name - timescaledb | timescaledb_information | jobs | check_name - timescaledb | timescaledb_information | jobs | check_schema - timescaledb | timescaledb_information | jobs | config - timescaledb | timescaledb_information | jobs | fixed_schedule - timescaledb | timescaledb_information | jobs | hypertable_name - timescaledb | timescaledb_information | jobs | hypertable_schema - timescaledb | timescaledb_information | jobs | initial_start - timescaledb | timescaledb_information | jobs | job_id - timescaledb | timescaledb_information | jobs | max_retries - timescaledb | timescaledb_information | jobs | max_runtime - timescaledb | timescaledb_information | jobs | next_start - timescaledb | timescaledb_information | jobs | owner - timescaledb | timescaledb_information | jobs | proc_name - timescaledb | timescaledb_information | jobs | proc_schema - timescaledb | timescaledb_information | jobs | retry_period - timescaledb | timescaledb_information | jobs | schedule_interval - timescaledb | timescaledb_information | jobs | scheduled - timescaledb | _timescaledb_catalog | metadata | include_in_telemetry - timescaledb | _timescaledb_catalog | metadata | key - timescaledb | _timescaledb_catalog | metadata | value - timescaledb | timescaledb_experimental | policies | config - timescaledb | timescaledb_experimental | policies | hypertable_name - timescaledb | timescaledb_experimental | policies | hypertable_schema - timescaledb | timescaledb_experimental | policies | proc_name - timescaledb | timescaledb_experimental | policies | proc_schema - timescaledb | timescaledb_experimental | policies | relation_name - timescaledb | timescaledb_experimental | policies | relation_schema - timescaledb | timescaledb_experimental | policies | schedule_interval - timescaledb | _timescaledb_catalog | tablespace | hypertable_id - timescaledb | _timescaledb_catalog | tablespace | id - timescaledb | _timescaledb_catalog | tablespace | tablespace_name - timescaledb | _timescaledb_catalog | telemetry_event | body - timescaledb | _timescaledb_catalog | telemetry_event | created - timescaledb | _timescaledb_catalog | telemetry_event | tag - wrappers | public | wrappers_fdw_stats | bytes_in - wrappers | public | wrappers_fdw_stats | bytes_out - wrappers | public | wrappers_fdw_stats | create_times - wrappers | public | wrappers_fdw_stats | created_at - wrappers | public | wrappers_fdw_stats | fdw_name - wrappers | public | wrappers_fdw_stats | metadata - wrappers | public | wrappers_fdw_stats | rows_in - wrappers | public | wrappers_fdw_stats | rows_out - wrappers | public | wrappers_fdw_stats | updated_at -(1086 rows) - diff --git a/nix/tests/expected/hypopg.out b/nix/tests/expected/hypopg.out deleted file mode 100644 index 35c8a5b..0000000 --- a/nix/tests/expected/hypopg.out +++ /dev/null @@ -1,14 +0,0 @@ -create schema v; -create table v.samp( - id int -); -select 1 from hypopg_create_index($$ - create index on v.samp(id) -$$); - ?column? ----------- - 1 -(1 row) - -drop schema v cascade; -NOTICE: drop cascades to table v.samp diff --git a/nix/tests/expected/index_advisor.out b/nix/tests/expected/index_advisor.out deleted file mode 100644 index 5a269ba..0000000 --- a/nix/tests/expected/index_advisor.out +++ /dev/null @@ -1,16 +0,0 @@ -create schema v; -create table v.book( - id int primary key, - title text not null -); -select - index_statements, errors -from - index_advisor('select id from v.book where title = $1'); - index_statements | errors -------------------------------------------------+-------- - {"CREATE INDEX ON v.book USING btree (title)"} | {} -(1 row) - -drop schema v cascade; -NOTICE: drop cascades to table v.book diff --git a/nix/tests/expected/pg-safeupdate.out b/nix/tests/expected/pg-safeupdate.out deleted file mode 100644 index f910011..0000000 --- a/nix/tests/expected/pg-safeupdate.out +++ /dev/null @@ -1,12 +0,0 @@ -load 'safeupdate'; -set safeupdate.enabled=1; -create schema v; -create table v.foo( - id int, - val text -); -update v.foo - set val = 'bar'; -ERROR: UPDATE requires a WHERE clause -drop schema v cascade; -NOTICE: drop cascades to table v.foo diff --git a/nix/tests/expected/pg_graphql.out b/nix/tests/expected/pg_graphql.out deleted file mode 100644 index 63a3520..0000000 --- a/nix/tests/expected/pg_graphql.out +++ /dev/null @@ -1,259 +0,0 @@ -begin; - comment on schema public is '@graphql({"inflect_names": true})'; - create table account( - id serial primary key, - email varchar(255) not null, - priority int, - status text default 'active' - ); - create table blog( - id serial primary key, - owner_id integer not null references account(id) - ); - comment on table blog is e'@graphql({"totalCount": {"enabled": true}})'; - -- Make sure functions still work - create function _echo_email(account) - returns text - language sql - as $$ select $1.email $$; - /* - Literals - */ - select graphql.resolve($$ - mutation { - insertIntoAccountCollection(objects: [ - { email: "foo@barsley.com", priority: 1 }, - { email: "bar@foosworth.com" } - ]) { - affectedCount - records { - id - status - echoEmail - blogCollection { - totalCount - } - } - } - } - $$); - resolve ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - {"data": {"insertIntoAccountCollection": {"records": [{"id": 1, "status": "active", "echoEmail": "foo@barsley.com", "blogCollection": {"totalCount": 0}}, {"id": 2, "status": "active", "echoEmail": "bar@foosworth.com", "blogCollection": {"totalCount": 0}}], "affectedCount": 2}}} -(1 row) - - select graphql.resolve($$ - mutation { - insertIntoBlogCollection(objects: [{ - ownerId: 1 - }]) { - records { - id - owner { - id - } - } - } - } - $$); - resolve --------------------------------------------------------------------------------------- - {"data": {"insertIntoBlogCollection": {"records": [{"id": 1, "owner": {"id": 1}}]}}} -(1 row) - - -- Override a default on status with null - select graphql.resolve($$ - mutation { - insertIntoAccountCollection(objects: [ - { email: "baz@baz.com", status: null }, - ]) { - affectedCount - records { - email - status - } - } - } - $$); - resolve ------------------------------------------------------------------------------------------------------------------------- - {"data": {"insertIntoAccountCollection": {"records": [{"email": "baz@baz.com", "status": null}], "affectedCount": 1}}} -(1 row) - - /* - Variables - */ - select graphql.resolve($$ - mutation newAccount($emailAddress: String) { - xyz: insertIntoAccountCollection(objects: [ - { email: $emailAddress }, - { email: "other@email.com" } - ]) { - affectedCount - records { - id - email - } - } - } - $$, - variables := '{"emailAddress": "foo@bar.com"}'::jsonb - ); - resolve --------------------------------------------------------------------------------------------------------------------------------- - {"data": {"xyz": {"records": [{"id": 4, "email": "foo@bar.com"}, {"id": 5, "email": "other@email.com"}], "affectedCount": 2}}} -(1 row) - - -- Variable override of default with null results in null - select graphql.resolve($$ - mutation newAccount($status: String) { - xyz: insertIntoAccountCollection(objects: [ - { email: "1@email.com", status: $status} - ]) { - affectedCount - records { - email - status - } - } - } - $$, - variables := '{"status": null}'::jsonb - ); - resolve ------------------------------------------------------------------------------------------------- - {"data": {"xyz": {"records": [{"email": "1@email.com", "status": null}], "affectedCount": 1}}} -(1 row) - - -- Skipping variable override of default results in default - select graphql.resolve($$ - mutation newAccount($status: String) { - xyz: insertIntoAccountCollection(objects: [ - { email: "x@y.com", status: $status}, - ]) { - affectedCount - records { - email - status - } - } - } - $$, - variables := '{}'::jsonb - ); - resolve ------------------------------------------------------------------------------------------------- - {"data": {"xyz": {"records": [{"email": "x@y.com", "status": "active"}], "affectedCount": 1}}} -(1 row) - - select graphql.resolve($$ - mutation newAccount($acc: AccountInsertInput!) { - insertIntoAccountCollection(objects: [$acc]) { - affectedCount - records { - id - email - } - } - } - $$, - variables := '{"acc": {"email": "bar@foo.com"}}'::jsonb - ); - resolve ------------------------------------------------------------------------------------------------------------------ - {"data": {"insertIntoAccountCollection": {"records": [{"id": 8, "email": "bar@foo.com"}], "affectedCount": 1}}} -(1 row) - - select graphql.resolve($$ - mutation newAccounts($acc: [AccountInsertInput!]!) { - insertIntoAccountCollection(objects: $accs) { - affectedCount - records { - id - email - } - } - } - $$, - variables := '{"accs": [{"email": "bar@foo.com"}]}'::jsonb - ); - resolve ------------------------------------------------------------------------------------------------------------------ - {"data": {"insertIntoAccountCollection": {"records": [{"id": 9, "email": "bar@foo.com"}], "affectedCount": 1}}} -(1 row) - - -- Single object coerces to a list - select graphql.resolve($$ - mutation { - insertIntoBlogCollection(objects: {ownerId: 1}) { - affectedCount - } - } - $$); - resolve --------------------------------------------------------------- - {"data": {"insertIntoBlogCollection": {"affectedCount": 1}}} -(1 row) - - /* - Errors - */ - -- Field does not exist - select graphql.resolve($$ - mutation createAccount($acc: AccountInsertInput) { - insertIntoAccountCollection(objects: [$acc]) { - affectedCount - records { - id - email - } - } - } - $$, - variables := '{"acc": {"doesNotExist": "other"}}'::jsonb - ); - resolve ---------------------------------------------------------------------------------------------------------------------- - {"data": null, "errors": [{"message": "Input for type AccountInsertInput contains extra keys [\"doesNotExist\"]"}]} -(1 row) - - -- Wrong input type (list of string, not list of object) - select graphql.resolve($$ - mutation { - insertIntoBlogCollection(objects: ["not an object"]) { - affectedCount - } - } - $$); - resolve ------------------------------------------------------------------------------------ - {"data": null, "errors": [{"message": "Invalid input for BlogInsertInput type"}]} -(1 row) - - -- objects argument is missing - select graphql.resolve($$ - mutation { - insertIntoBlogCollection { - affectedCount - } - } - $$); - resolve ---------------------------------------------------------------------------- - {"data": null, "errors": [{"message": "Invalid input for NonNull type"}]} -(1 row) - - -- Empty call - select graphql.resolve($$ - mutation { - insertIntoBlogCollection(objects: []) { - affectedCount - } - } - $$); - resolve --------------------------------------------------------------------------------------------- - {"data": null, "errors": [{"message": "At least one record must be provided to objects"}]} -(1 row) - -rollback; diff --git a/nix/tests/expected/pg_hashids.out b/nix/tests/expected/pg_hashids.out deleted file mode 100644 index 393218e..0000000 --- a/nix/tests/expected/pg_hashids.out +++ /dev/null @@ -1,36 +0,0 @@ -select id_encode(1001); -- Result: jNl - id_encode ------------ - jNl -(1 row) - -select id_encode(1234567, 'This is my salt'); -- Result: Pdzxp - id_encode ------------ - Pdzxp -(1 row) - -select id_encode(1234567, 'This is my salt', 10); -- Result: PlRPdzxpR7 - id_encode ------------- - PlRPdzxpR7 -(1 row) - -select id_encode(1234567, 'This is my salt', 10, 'abcdefghijABCDxFGHIJ1234567890'); -- Result: 3GJ956J9B9 - id_encode ------------- - 3GJ956J9B9 -(1 row) - -select id_decode('PlRPdzxpR7', 'This is my salt', 10); -- Result: 1234567 - id_decode ------------ - {1234567} -(1 row) - -select id_decode('3GJ956J9B9', 'This is my salt', 10, 'abcdefghijABCDxFGHIJ1234567890'); -- Result: 1234567 - id_decode ------------ - {1234567} -(1 row) - diff --git a/nix/tests/expected/pg_jsonschema.out b/nix/tests/expected/pg_jsonschema.out deleted file mode 100644 index c291141..0000000 --- a/nix/tests/expected/pg_jsonschema.out +++ /dev/null @@ -1,73 +0,0 @@ -begin; --- Test json_matches_schema -create table customer( - id serial primary key, - metadata json, - check ( - json_matches_schema( - '{ - "type": "object", - "properties": { - "tags": { - "type": "array", - "items": { - "type": "string", - "maxLength": 16 - } - } - } - }', - metadata - ) - ) -); -insert into customer(metadata) -values ('{"tags": ["vip", "darkmode-ui"]}'); --- Test jsonb_matches_schema -select - jsonb_matches_schema( - '{ - "type": "object", - "properties": { - "tags": { - "type": "array", - "items": { - "type": "string", - "maxLength": 16 - } - } - } - }', - '{"tags": ["vip", "darkmode-ui"]}'::jsonb -); - jsonb_matches_schema ----------------------- - t -(1 row) - --- Test jsonschema_is_valid -select - jsonschema_is_valid( - '{ - "type": "object", - "properties": { - "tags": { - "type": "array", - "items": { - "type": "string", - "maxLength": 16 - } - } - } - }'); - jsonschema_is_valid ---------------------- - t -(1 row) - --- Test invalid payload -insert into customer(metadata) -values ('{"tags": [1, 3]}'); -ERROR: new row for relation "customer" violates check constraint "customer_metadata_check" -DETAIL: Failing row contains (2, {"tags": [1, 3]}). -rollback; diff --git a/nix/tests/expected/pg_net.out b/nix/tests/expected/pg_net.out deleted file mode 100644 index 6b3ca5b..0000000 --- a/nix/tests/expected/pg_net.out +++ /dev/null @@ -1,11 +0,0 @@ --- This is a very basic test because you can't get the value returned --- by a pg_net request in the same transaction that created it; -select - net.http_get ( - 'https://postman-echo.com/get?foo1=bar1&foo2=bar2' - ) as request_id; - request_id ------------- - 1 -(1 row) - diff --git a/nix/tests/expected/pg_plan_filter.out b/nix/tests/expected/pg_plan_filter.out deleted file mode 100644 index 4bdcd65..0000000 --- a/nix/tests/expected/pg_plan_filter.out +++ /dev/null @@ -1,16 +0,0 @@ -begin; - load 'plan_filter'; - create schema v; - -- create a sample table - create table v.test_table ( - id serial primary key, - data text - ); - -- insert some test data - insert into v.test_table (data) - values ('sample1'), ('sample2'), ('sample3'); - set local plan_filter.statement_cost_limit = 0.001; - select * from v.test_table; -ERROR: plan cost limit exceeded -HINT: The plan for your query shows that it would probably have an excessive run time. This may be due to a logic error in the SQL, or it maybe just a very costly query. Rewrite your query or increase the configuration parameter "plan_filter.statement_cost_limit". -rollback; diff --git a/nix/tests/expected/pg_stat_monitor.out b/nix/tests/expected/pg_stat_monitor.out deleted file mode 100644 index f4d9069..0000000 --- a/nix/tests/expected/pg_stat_monitor.out +++ /dev/null @@ -1,10 +0,0 @@ -select - * -from - pg_stat_monitor -where - false; - bucket | bucket_start_time | userid | username | dbid | datname | client_ip | pgsm_query_id | queryid | toplevel | top_queryid | query | comments | planid | query_plan | top_query | application_name | relations | cmd_type | cmd_type_text | elevel | sqlcode | message | calls | total_exec_time | min_exec_time | max_exec_time | mean_exec_time | stddev_exec_time | rows | shared_blks_hit | shared_blks_read | shared_blks_dirtied | shared_blks_written | local_blks_hit | local_blks_read | local_blks_dirtied | local_blks_written | temp_blks_read | temp_blks_written | blk_read_time | blk_write_time | temp_blk_read_time | temp_blk_write_time | resp_calls | cpu_user_time | cpu_sys_time | wal_records | wal_fpi | wal_bytes | bucket_done | plans | total_plan_time | min_plan_time | max_plan_time | mean_plan_time | stddev_plan_time | jit_functions | jit_generation_time | jit_inlining_count | jit_inlining_time | jit_optimization_count | jit_optimization_time | jit_emission_count | jit_emission_time ---------+-------------------+--------+----------+------+---------+-----------+---------------+---------+----------+-------------+-------+----------+--------+------------+-----------+------------------+-----------+----------+---------------+--------+---------+---------+-------+-----------------+---------------+---------------+----------------+------------------+------+-----------------+------------------+---------------------+---------------------+----------------+-----------------+--------------------+--------------------+----------------+-------------------+---------------+----------------+--------------------+---------------------+------------+---------------+--------------+-------------+---------+-----------+-------------+-------+-----------------+---------------+---------------+----------------+------------------+---------------+---------------------+--------------------+-------------------+------------------------+-----------------------+--------------------+------------------- -(0 rows) - diff --git a/nix/tests/expected/pg_tle.out b/nix/tests/expected/pg_tle.out deleted file mode 100644 index cffce1d..0000000 --- a/nix/tests/expected/pg_tle.out +++ /dev/null @@ -1,91 +0,0 @@ -select - pgtle.install_extension( - 'pg_distance', - '0.1', - 'Distance functions for two points', - $_pg_tle_$ - CREATE FUNCTION dist(x1 float8, y1 float8, x2 float8, y2 float8, norm int) - RETURNS float8 - AS $$ - SELECT (abs(x2 - x1) ^ norm + abs(y2 - y1) ^ norm) ^ (1::float8 / norm); - $$ LANGUAGE SQL; - - CREATE FUNCTION manhattan_dist(x1 float8, y1 float8, x2 float8, y2 float8) - RETURNS float8 - AS $$ - SELECT dist(x1, y1, x2, y2, 1); - $$ LANGUAGE SQL; - - CREATE FUNCTION euclidean_dist(x1 float8, y1 float8, x2 float8, y2 float8) - RETURNS float8 - AS $$ - SELECT dist(x1, y1, x2, y2, 2); - $$ LANGUAGE SQL; - $_pg_tle_$ - ); - install_extension -------------------- - t -(1 row) - -create extension pg_distance; -select manhattan_dist(1, 1, 5, 5); - manhattan_dist ----------------- - 8 -(1 row) - -select euclidean_dist(1, 1, 5, 5); - euclidean_dist -------------------- - 5.656854249492381 -(1 row) - -SELECT pgtle.install_update_path( - 'pg_distance', - '0.1', - '0.2', - $_pg_tle_$ - CREATE OR REPLACE FUNCTION dist(x1 float8, y1 float8, x2 float8, y2 float8, norm int) - RETURNS float8 - AS $$ - SELECT (abs(x2 - x1) ^ norm + abs(y2 - y1) ^ norm) ^ (1::float8 / norm); - $$ LANGUAGE SQL IMMUTABLE PARALLEL SAFE; - - CREATE OR REPLACE FUNCTION manhattan_dist(x1 float8, y1 float8, x2 float8, y2 float8) - RETURNS float8 - AS $$ - SELECT dist(x1, y1, x2, y2, 1); - $$ LANGUAGE SQL IMMUTABLE PARALLEL SAFE; - - CREATE OR REPLACE FUNCTION euclidean_dist(x1 float8, y1 float8, x2 float8, y2 float8) - RETURNS float8 - AS $$ - SELECT dist(x1, y1, x2, y2, 2); - $$ LANGUAGE SQL IMMUTABLE PARALLEL SAFE; - $_pg_tle_$ - ); - install_update_path ---------------------- - t -(1 row) - -select - pgtle.set_default_version('pg_distance', '0.2'); - set_default_version ---------------------- - t -(1 row) - -alter extension pg_distance update; -drop extension pg_distance; -select - pgtle.uninstall_extension('pg_distance'); - uninstall_extension ---------------------- - t -(1 row) - --- Restore original state if any of the above fails -drop extension pg_tle cascade; -create extension pg_tle; diff --git a/nix/tests/expected/pgaudit.out b/nix/tests/expected/pgaudit.out deleted file mode 100644 index 1937be6..0000000 --- a/nix/tests/expected/pgaudit.out +++ /dev/null @@ -1,24 +0,0 @@ --- Note: there is no test that the logs were correctly output. Only checking for exceptions -set pgaudit.log = 'write, ddl'; -set pgaudit.log_relation = on; -set pgaudit.log_level = notice; -create schema v; -create table v.account( - id int, - name text, - password text, - description text -); -insert into v.account (id, name, password, description) -values (1, 'user1', 'HASH1', 'blah, blah'); -select - * -from - v.account; - id | name | password | description -----+-------+----------+------------- - 1 | user1 | HASH1 | blah, blah -(1 row) - -drop schema v cascade; -NOTICE: drop cascades to table v.account diff --git a/nix/tests/expected/pgjwt.out b/nix/tests/expected/pgjwt.out deleted file mode 100644 index 4e4500f..0000000 --- a/nix/tests/expected/pgjwt.out +++ /dev/null @@ -1,22 +0,0 @@ -select - sign( - payload := '{"sub":"1234567890","name":"John Doe","iat":1516239022}', - secret := 'secret', - algorithm := 'HS256' - ); - sign -------------------------------------------------------------------------------------------------------------------------------------------------------------- - eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.XbPfbIHMI6arZ3Y922BhjWgQzWXcXNrz0ogtVhfEd2o -(1 row) - -select - verify( - token := 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJuYW1lIjoiRm9vIn0.Q8hKjuadCEhnCPuqIj9bfLhTh_9QSxshTRsA5Aq4IuM', - secret := 'secret', - algorithm := 'HS256' - ); - verify ----------------------------------------------------------------- - ("{""alg"":""HS256"",""typ"":""JWT""}","{""name"":""Foo""}",t) -(1 row) - diff --git a/nix/tests/expected/pgmq.out b/nix/tests/expected/pgmq.out deleted file mode 100644 index 9fb1819..0000000 --- a/nix/tests/expected/pgmq.out +++ /dev/null @@ -1,141 +0,0 @@ --- Test the standard flow -select - pgmq.create('Foo'); - create --------- - -(1 row) - -select - * -from - pgmq.send( - queue_name:='Foo', - msg:='{"foo": "bar1"}' - ); - send ------- - 1 -(1 row) - --- Test queue is not case sensitive -select - * -from - pgmq.send( - queue_name:='foo', -- note: lowercase useage - msg:='{"foo": "bar2"}', - delay:=5 - ); - send ------- - 2 -(1 row) - -select - msg_id, - read_ct, - message -from - pgmq.read( - queue_name:='Foo', - vt:=30, - qty:=2 - ); - msg_id | read_ct | message ---------+---------+----------------- - 1 | 1 | {"foo": "bar1"} -(1 row) - -select - msg_id, - read_ct, - message -from - pgmq.pop('Foo'); - msg_id | read_ct | message ---------+---------+--------- -(0 rows) - --- Archive message with msg_id=2. -select - pgmq.archive( - queue_name:='Foo', - msg_id:=2 - ); - archive ---------- - t -(1 row) - -select - pgmq.create('my_queue'); - create --------- - -(1 row) - -select - pgmq.send_batch( - queue_name:='my_queue', - msgs:=array['{"foo": "bar3"}','{"foo": "bar4"}','{"foo": "bar5"}']::jsonb[] -); - send_batch ------------- - 1 - 2 - 3 -(3 rows) - -select - pgmq.archive( - queue_name:='my_queue', - msg_ids:=array[3, 4, 5] - ); - archive ---------- - 3 -(1 row) - -select - pgmq.delete('my_queue', 6); - delete --------- - f -(1 row) - -select - pgmq.drop_queue('my_queue'); - drop_queue ------------- - t -(1 row) - -/* --- Disabled until pg_partman goes back into the image -select - pgmq.create_partitioned( - 'my_partitioned_queue', - '5 seconds', - '10 seconds' -); -*/ --- Make sure SQLI enabling characters are blocked -select pgmq.create('F--oo'); -ERROR: queue name contains invalid characters: $, ;, --, or \' -CONTEXT: PL/pgSQL function pgmq.format_table_name(text,text) line 5 at RAISE -PL/pgSQL function pgmq.create_non_partitioned(text) line 3 during statement block local variable initialization -SQL statement "SELECT pgmq.create_non_partitioned(queue_name)" -PL/pgSQL function pgmq."create"(text) line 3 at PERFORM -select pgmq.create('F$oo'); -ERROR: queue name contains invalid characters: $, ;, --, or \' -CONTEXT: PL/pgSQL function pgmq.format_table_name(text,text) line 5 at RAISE -PL/pgSQL function pgmq.create_non_partitioned(text) line 3 during statement block local variable initialization -SQL statement "SELECT pgmq.create_non_partitioned(queue_name)" -PL/pgSQL function pgmq."create"(text) line 3 at PERFORM -select pgmq.create($$F'oo$$); -ERROR: queue name contains invalid characters: $, ;, --, or \' -CONTEXT: PL/pgSQL function pgmq.format_table_name(text,text) line 5 at RAISE -PL/pgSQL function pgmq.create_non_partitioned(text) line 3 during statement block local variable initialization -SQL statement "SELECT pgmq.create_non_partitioned(queue_name)" -PL/pgSQL function pgmq."create"(text) line 3 at PERFORM diff --git a/nix/tests/expected/pgroonga.out b/nix/tests/expected/pgroonga.out deleted file mode 100644 index 5ceeed2..0000000 --- a/nix/tests/expected/pgroonga.out +++ /dev/null @@ -1,76 +0,0 @@ -create schema v; -create table v.roon( - id serial primary key, - content text -); -with tokenizers as ( - select - x - from - jsonb_array_elements( - (select pgroonga_command('tokenizer_list'))::jsonb - ) x(val) - limit - 1 - offset - 1 -- first record is unrelated and not stable -) -select - t.x::jsonb ->> 'name' -from - jsonb_array_elements((select * from tokenizers)) t(x) -order by - t.x::jsonb ->> 'name'; - ?column? ---------------------------------------------- - TokenBigram - TokenBigramIgnoreBlank - TokenBigramIgnoreBlankSplitSymbol - TokenBigramIgnoreBlankSplitSymbolAlpha - TokenBigramIgnoreBlankSplitSymbolAlphaDigit - TokenBigramSplitSymbol - TokenBigramSplitSymbolAlpha - TokenBigramSplitSymbolAlphaDigit - TokenDelimit - TokenDelimitNull - TokenDocumentVectorBM25 - TokenDocumentVectorTFIDF - TokenMecab - TokenNgram - TokenPattern - TokenRegexp - TokenTable - TokenTrigram - TokenUnigram -(19 rows) - -insert into v.roon (content) -values - ('Hello World'), - ('PostgreSQL with PGroonga is a thing'), - ('This is a full-text search test'), - ('PGroonga supports various languages'); --- Create default index -create index pgroonga_index on v.roon using pgroonga (content); --- Create mecab tokenizer index since we had a bug with this one once -create index pgroonga_index_mecab on v.roon using pgroonga (content) with (tokenizer='TokenMecab'); --- Run some queries to test the index -select * from v.roon where content &@~ 'Hello'; - id | content -----+------------- - 1 | Hello World -(1 row) - -select * from v.roon where content &@~ 'powerful'; - id | content -----+--------- -(0 rows) - -select * from v.roon where content &@~ 'supports'; - id | content -----+------------------------------------- - 4 | PGroonga supports various languages -(1 row) - -drop schema v cascade; -NOTICE: drop cascades to table v.roon diff --git a/nix/tests/expected/pgrouting.out b/nix/tests/expected/pgrouting.out deleted file mode 100644 index 2362a72..0000000 --- a/nix/tests/expected/pgrouting.out +++ /dev/null @@ -1,31 +0,0 @@ -create schema v; --- create the roads table -create table v.roads ( - id serial primary key, - source integer, - target integer, - cost double precision -); --- insert sample data into roads table -insert into v.roads (source, target, cost) values -(1, 2, 1.0), -(2, 3, 1.0), -(3, 4, 1.0), -(1, 3, 2.5), -(3, 5, 2.0); --- create a function to use pgRouting to find the shortest path -select * from pgr_dijkstra( - 'select id, source, target, cost from v.roads', - 1, -- start node - 4 -- end node -); - seq | path_seq | node | edge | cost | agg_cost ------+----------+------+------+------+---------- - 1 | 1 | 1 | 1 | 1 | 0 - 2 | 2 | 2 | 2 | 1 | 1 - 3 | 3 | 3 | 3 | 1 | 2 - 4 | 4 | 4 | -1 | 0 | 3 -(4 rows) - -drop schema v cascade; -NOTICE: drop cascades to table v.roads diff --git a/nix/tests/expected/pgsodium.out b/nix/tests/expected/pgsodium.out deleted file mode 100644 index 418bf2d..0000000 --- a/nix/tests/expected/pgsodium.out +++ /dev/null @@ -1,9 +0,0 @@ -select - status -from - pgsodium.create_key(); - status --------- - valid -(1 row) - diff --git a/nix/tests/expected/pgtap.out b/nix/tests/expected/pgtap.out deleted file mode 100644 index 272d838..0000000 --- a/nix/tests/expected/pgtap.out +++ /dev/null @@ -1,21 +0,0 @@ -begin; -select plan(1); - plan ------- - 1..1 -(1 row) - --- Run the tests. -select pass( 'My test passed, w00t!' ); - pass ------------------------------- - ok 1 - My test passed, w00t! -(1 row) - --- Finish the tests and clean up. -select * from finish(); - finish --------- -(0 rows) - -rollback; diff --git a/nix/tests/expected/pgvector.out b/nix/tests/expected/pgvector.out deleted file mode 100644 index 6564be5..0000000 --- a/nix/tests/expected/pgvector.out +++ /dev/null @@ -1,90 +0,0 @@ -create schema v; -create table v.items( - id serial primary key, - embedding vector(3), - half_embedding halfvec(3), - bit_embedding bit(3), - sparse_embedding sparsevec(3) -); --- vector ops -create index on v.items using hnsw (embedding vector_l2_ops); -create index on v.items using hnsw (embedding vector_cosine_ops); -create index on v.items using hnsw (embedding vector_l1_ops); -create index on v.items using ivfflat (embedding vector_l2_ops); -NOTICE: ivfflat index created with little data -DETAIL: This will cause low recall. -HINT: Drop the index until the table has more data. -create index on v.items using ivfflat (embedding vector_cosine_ops); -NOTICE: ivfflat index created with little data -DETAIL: This will cause low recall. -HINT: Drop the index until the table has more data. --- halfvec ops -create index on v.items using hnsw (half_embedding halfvec_l2_ops); -create index on v.items using hnsw (half_embedding halfvec_cosine_ops); -create index on v.items using hnsw (half_embedding halfvec_l1_ops); -create index on v.items using ivfflat (half_embedding halfvec_l2_ops); -NOTICE: ivfflat index created with little data -DETAIL: This will cause low recall. -HINT: Drop the index until the table has more data. -create index on v.items using ivfflat (half_embedding halfvec_cosine_ops); -NOTICE: ivfflat index created with little data -DETAIL: This will cause low recall. -HINT: Drop the index until the table has more data. --- sparsevec -create index on v.items using hnsw (sparse_embedding sparsevec_l2_ops); -create index on v.items using hnsw (sparse_embedding sparsevec_cosine_ops); -create index on v.items using hnsw (sparse_embedding sparsevec_l1_ops); --- bit ops -create index on v.items using hnsw (bit_embedding bit_hamming_ops); -create index on v.items using ivfflat (bit_embedding bit_hamming_ops); -NOTICE: ivfflat index created with little data -DETAIL: This will cause low recall. -HINT: Drop the index until the table has more data. --- Populate some records -insert into v.items( - embedding, - half_embedding, - bit_embedding, - sparse_embedding -) -values - ('[1,2,3]', '[1,2,3]', '101', '{1:4}/3'), - ('[2,3,4]', '[2,3,4]', '010', '{1:7,3:0}/3'); --- Test op types -select - * -from - v.items -order by - embedding <-> '[2,3,5]', - embedding <=> '[2,3,5]', - embedding <+> '[2,3,5]', - embedding <#> '[2,3,5]', - half_embedding <-> '[2,3,5]', - half_embedding <=> '[2,3,5]', - half_embedding <+> '[2,3,5]', - half_embedding <#> '[2,3,5]', - sparse_embedding <-> '{2:4,3:1}/3', - sparse_embedding <=> '{2:4,3:1}/3', - sparse_embedding <+> '{2:4,3:1}/3', - sparse_embedding <#> '{2:4,3:1}/3', - bit_embedding <~> '011'; - id | embedding | half_embedding | bit_embedding | sparse_embedding -----+-----------+----------------+---------------+------------------ - 2 | [2,3,4] | [2,3,4] | 010 | {1:7}/3 - 1 | [1,2,3] | [1,2,3] | 101 | {1:4}/3 -(2 rows) - -select - avg(embedding), - avg(half_embedding) -from - v.items; - avg | avg ----------------+--------------- - [1.5,2.5,3.5] | [1.5,2.5,3.5] -(1 row) - --- Cleanup -drop schema v cascade; -NOTICE: drop cascades to table v.items diff --git a/nix/tests/expected/plpgsql-check.out b/nix/tests/expected/plpgsql-check.out deleted file mode 100644 index 2b5bf82..0000000 --- a/nix/tests/expected/plpgsql-check.out +++ /dev/null @@ -1,35 +0,0 @@ -create schema v; -create table v.t1( - a int, - b int -); -create or replace function v.f1() - returns void - language plpgsql -as $$ -declare r record; -begin - for r in select * from v.t1 - loop - raise notice '%', r.c; -- there is bug - table t1 missing "c" column - end loop; -end; -$$; -select * from v.f1(); - f1 ----- - -(1 row) - --- use plpgsql_check_function to check the function for errors -select * from plpgsql_check_function('v.f1()'); - plpgsql_check_function -------------------------------------------------- - error:42703:6:RAISE:record "r" has no field "c" - Context: SQL expression "r.c" -(2 rows) - -drop schema v cascade; -NOTICE: drop cascades to 2 other objects -DETAIL: drop cascades to table v.t1 -drop cascades to function v.f1() diff --git a/nix/tests/expected/plv8.out b/nix/tests/expected/plv8.out deleted file mode 100644 index f24c858..0000000 --- a/nix/tests/expected/plv8.out +++ /dev/null @@ -1,17 +0,0 @@ -create schema v; --- create a function to perform some JavaScript operations -create function v.multiply_numbers(a integer, b integer) - returns integer - language plv8 -as $$ - return a * b; -$$; -select - v.multiply_numbers(3, 4); - multiply_numbers ------------------- - 12 -(1 row) - -drop schema v cascade; -NOTICE: drop cascades to function v.multiply_numbers(integer,integer) diff --git a/nix/tests/expected/postgis.out b/nix/tests/expected/postgis.out deleted file mode 100644 index 53194d8..0000000 --- a/nix/tests/expected/postgis.out +++ /dev/null @@ -1,59 +0,0 @@ -create schema v; --- create a table to store geographic points -create table v.places ( - id serial primary key, - name text, - geom geometry(point, 4326) -- using WGS 84 coordinate system -); --- insert some sample geographic points into the places table -insert into v.places (name, geom) -values - ('place_a', st_setsrid(st_makepoint(-73.9857, 40.7484), 4326)), -- latitude and longitude for a location - ('place_b', st_setsrid(st_makepoint(-74.0060, 40.7128), 4326)), -- another location - ('place_c', st_setsrid(st_makepoint(-73.9687, 40.7851), 4326)); -- yet another location --- calculate the distance between two points (in meters) -select - a.name as place_a, - b.name as place_b, - st_distance(a.geom::geography, b.geom::geography) as distance_meters -from - v.places a, - v.places b -where - a.name = 'place_a' - and b.name = 'place_b'; - place_a | place_b | distance_meters ----------+---------+----------------- - place_a | place_b | 4309.25283351 -(1 row) - --- find all places within a 5km radius of 'place_a' -select - name, - st_distance( - geom::geography, - ( - select - geom - from - v.places - where - name = 'place_a' - )::geography) as distance_meters -from - v.places -where - st_dwithin( - geom::geography, - (select geom from v.places where name = 'place_a')::geography, - 5000 - ) - and name != 'place_a'; - name | distance_meters ----------+----------------- - place_b | 4309.25283351 - place_c | 4320.8765634 -(2 rows) - -drop schema v cascade; -NOTICE: drop cascades to table v.places diff --git a/nix/tests/expected/rum.out b/nix/tests/expected/rum.out deleted file mode 100644 index ba8a402..0000000 --- a/nix/tests/expected/rum.out +++ /dev/null @@ -1,38 +0,0 @@ -create schema v; -create table v.test_rum( - t text, - a tsvector -); -create trigger tsvectorupdate - before update or insert on v.test_rum - for each row - execute procedure - tsvector_update_trigger( - 'a', - 'pg_catalog.english', - 't' - ); -insert into v.test_rum(t) -values - ('the situation is most beautiful'), - ('it is a beautiful'), - ('it looks like a beautiful place'); -create index rumidx on v.test_rum using rum (a rum_tsvector_ops); -select - t, - a <=> to_tsquery('english', 'beautiful | place') as rank -from - v.test_rum -where - a @@ to_tsquery('english', 'beautiful | place') -order by - a <=> to_tsquery('english', 'beautiful | place'); - t | rank ----------------------------------+---------- - it looks like a beautiful place | 8.22467 - the situation is most beautiful | 16.44934 - it is a beautiful | 16.44934 -(3 rows) - -drop schema v cascade; -NOTICE: drop cascades to table v.test_rum diff --git a/nix/tests/expected/timescale.out b/nix/tests/expected/timescale.out deleted file mode 100644 index 0812954..0000000 --- a/nix/tests/expected/timescale.out +++ /dev/null @@ -1,47 +0,0 @@ --- Confirm we're running the apache version -show timescaledb.license; - timescaledb.license ---------------------- - apache -(1 row) - --- Create schema v -create schema v; --- Create a table in the v schema -create table v.sensor_data ( - time timestamptz not null, - sensor_id int not null, - temperature double precision not null, - humidity double precision not null -); --- Convert the table to a hypertable -select create_hypertable('v.sensor_data', 'time'); - create_hypertable ---------------------- - (1,v,sensor_data,t) -(1 row) - --- Insert some data into the hypertable -insert into v.sensor_data (time, sensor_id, temperature, humidity) -values - ('2024-08-09', 1, 22.5, 60.2), - ('2024-08-08', 1, 23.0, 59.1), - ('2024-08-07', 2, 21.7, 63.3); --- Select data from the hypertable -select - * -from - v.sensor_data; - time | sensor_id | temperature | humidity -------------------------------+-----------+-------------+---------- - Fri Aug 09 00:00:00 2024 PDT | 1 | 22.5 | 60.2 - Thu Aug 08 00:00:00 2024 PDT | 1 | 23 | 59.1 - Wed Aug 07 00:00:00 2024 PDT | 2 | 21.7 | 63.3 -(3 rows) - --- Drop schema v and all its entities -drop schema v cascade; -NOTICE: drop cascades to 3 other objects -DETAIL: drop cascades to table v.sensor_data -drop cascades to table _timescaledb_internal._hyper_1_1_chunk -drop cascades to table _timescaledb_internal._hyper_1_2_chunk diff --git a/nix/tests/expected/vault.out b/nix/tests/expected/vault.out deleted file mode 100644 index e4eaff2..0000000 --- a/nix/tests/expected/vault.out +++ /dev/null @@ -1,42 +0,0 @@ -select - 1 -from - vault.create_secret('my_s3kre3t'); - ?column? ----------- - 1 -(1 row) - -select - 1 -from - vault.create_secret( - 'another_s3kre3t', - 'unique_name', - 'This is the description' - ); - ?column? ----------- - 1 -(1 row) - -insert into vault.secrets (secret) -values - ('s3kre3t_k3y'); -select - name, - description -from - vault.decrypted_secrets -order by - created_at desc -limit - 3; - name | description --------------+------------------------- - | - unique_name | This is the description - | -(3 rows) - - diff --git a/nix/tests/expected/wal2json.out b/nix/tests/expected/wal2json.out deleted file mode 100644 index 6edc359..0000000 --- a/nix/tests/expected/wal2json.out +++ /dev/null @@ -1,42 +0,0 @@ -create schema v; -create table v.foo( - id int primary key -); -select - 1 -from - pg_create_logical_replication_slot('reg_test', 'wal2json', false); - ?column? ----------- - 1 -(1 row) - -insert into v.foo(id) values (1); -select - data -from - pg_logical_slot_get_changes( - 'reg_test', - null, - null, - 'include-pk', '1', - 'include-transaction', 'false', - 'include-timestamp', 'false', - 'include-type-oids', 'false', - 'format-version', '2', - 'actions', 'insert,update,delete' - ) x; - data --------------------------------------------------------------------------------------------------------------------------------------- - {"action":"I","schema":"v","table":"foo","columns":[{"name":"id","type":"integer","value":1}],"pk":[{"name":"id","type":"integer"}]} -(1 row) - -select - pg_drop_replication_slot('reg_test'); - pg_drop_replication_slot --------------------------- - -(1 row) - -drop schema v cascade; -NOTICE: drop cascades to table v.foo diff --git a/nix/tests/migrations/data.sql b/nix/tests/migrations/data.sql deleted file mode 100644 index 36396e6..0000000 --- a/nix/tests/migrations/data.sql +++ /dev/null @@ -1,21 +0,0 @@ -create table account( - id int primary key, - is_verified bool, - name text, - phone text -); - -insert into public.account(id, is_verified, name, phone) -values - (1, true, 'foo', '1111111111'), - (2, true, 'bar', null), - (3, false, 'baz', '33333333333'); - -select id as test_new_key_id from pgsodium.create_key(name:='test_new_key') \gset - -select vault.create_secret ( - 's3kr3t_k3y', 'a_name', 'this is the foo secret key') test_secret_id \gset - -select vault.create_secret ( - 's3kr3t_k3y_2', 'another_name', 'this is another foo key', - (select id from pgsodium.key where name = 'test_new_key')) test_secret_id_2 \gset diff --git a/nix/tests/postgresql.conf.in b/nix/tests/postgresql.conf.in deleted file mode 100644 index ef860af..0000000 --- a/nix/tests/postgresql.conf.in +++ /dev/null @@ -1,800 +0,0 @@ -# ----------------------------- -# PostgreSQL configuration file - -# ----------------------------- -# -# This file consists of lines of the form: -# -# name = value -# -# (The "=" is optional.) Whitespace may be used. Comments are introduced with -# "#" anywhere on a line. The complete list of parameter names and allowed -# values can be found in the PostgreSQL documentation. -# -# The commented-out settings shown in this file represent the default values. -# Re-commenting a setting is NOT sufficient to revert it to the default value; -# you need to reload the server. -# -# This file is read on server startup and when the server receives a SIGHUP -# signal. If you edit the file on a running system, you have to SIGHUP the -# server for the changes to take effect, run "pg_ctl reload", or execute -# "SELECT pg_reload_conf()". Some parameters, which are marked below, -# require a server shutdown and restart to take effect. -# -# Any parameter can also be given as a command-line option to the server, e.g., -# "postgres -c log_connections=on". Some parameters can be changed at run time -# with the "SET" SQL command. -# -# Memory units: B = bytes Time units: us = microseconds -# kB = kilobytes ms = milliseconds -# MB = megabytes s = seconds -# GB = gigabytes min = minutes -# TB = terabytes h = hours -# d = days - - -#------------------------------------------------------------------------------ -# FILE LOCATIONS -#------------------------------------------------------------------------------ - -# The default values of these variables are driven from the -D command-line -# option or PGDATA environment variable, represented here as ConfigDir. - -#data_directory = 'ConfigDir' # use data in another directory - # (change requires restart) -#hba_file = 'ConfigDir/pg_hba.conf' # host-based authentication file - # (change requires restart) -#ident_file = 'ConfigDir/pg_ident.conf' # ident configuration file - # (change requires restart) - -# If external_pid_file is not explicitly set, no extra PID file is written. -#external_pid_file = '' # write an extra PID file - # (change requires restart) - - -#------------------------------------------------------------------------------ -# CONNECTIONS AND AUTHENTICATION -#------------------------------------------------------------------------------ - -# - Connection Settings - - -listen_addresses = '*' # what IP address(es) to listen on; -#port = @PGSQL_DEFAULT_PORT@ # (change requires restart) -max_connections = 100 # (change requires restart) -#superuser_reserved_connections = 3 # (change requires restart) -unix_socket_directories = '/tmp' # comma-separated list of directories - # (change requires restart) -#unix_socket_group = '' # (change requires restart) -#unix_socket_permissions = 0777 # begin with 0 to use octal notation - # (change requires restart) -#bonjour = off # advertise server via Bonjour - # (change requires restart) -#bonjour_name = '' # defaults to the computer name - # (change requires restart) - -# - TCP settings - -# see "man tcp" for details - -#tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds; - # 0 selects the system default -#tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds; - # 0 selects the system default -#tcp_keepalives_count = 0 # TCP_KEEPCNT; - # 0 selects the system default -#tcp_user_timeout = 0 # TCP_USER_TIMEOUT, in milliseconds; - # 0 selects the system default - -#client_connection_check_interval = 0 # time between checks for client - # disconnection while running queries; - # 0 for never - -# - Authentication - - -#authentication_timeout = 1min # 1s-600s -#password_encryption = scram-sha-256 # scram-sha-256 or md5 -#db_user_namespace = off - -# GSSAPI using Kerberos -#krb_server_keyfile = 'FILE:${sysconfdir}/krb5.keytab' -#krb_caseins_users = off - -# - SSL - - -#ssl = off -#ssl_ca_file = '' -#ssl_cert_file = 'server.crt' -#ssl_crl_file = '' -#ssl_crl_dir = '' -#ssl_key_file = 'server.key' -#ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers -#ssl_prefer_server_ciphers = on -#ssl_ecdh_curve = 'prime256v1' -#ssl_min_protocol_version = 'TLSv1.2' -#ssl_max_protocol_version = '' -#ssl_dh_params_file = '' -#ssl_passphrase_command = '' -#ssl_passphrase_command_supports_reload = off - - -#------------------------------------------------------------------------------ -# RESOURCE USAGE (except WAL) -#------------------------------------------------------------------------------ - -# - Memory - - -shared_buffers = 128MB # min 128kB - # (change requires restart) -#huge_pages = try # on, off, or try - # (change requires restart) -#huge_page_size = 0 # zero for system default - # (change requires restart) -#temp_buffers = 8MB # min 800kB -#max_prepared_transactions = 0 # zero disables the feature - # (change requires restart) -# Caution: it is not advisable to set max_prepared_transactions nonzero unless -# you actively intend to use prepared transactions. -#work_mem = 4MB # min 64kB -#hash_mem_multiplier = 1.0 # 1-1000.0 multiplier on hash table work_mem -#maintenance_work_mem = 64MB # min 1MB -#autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem -#logical_decoding_work_mem = 64MB # min 64kB -#max_stack_depth = 2MB # min 100kB -#shared_memory_type = mmap # the default is the first option - # supported by the operating system: - # mmap - # sysv - # windows - # (change requires restart) -dynamic_shared_memory_type = posix # the default is the first option - # supported by the operating system: - # posix - # sysv - # windows - # mmap - # (change requires restart) -#min_dynamic_shared_memory = 0MB # (change requires restart) - -# - Disk - - -#temp_file_limit = -1 # limits per-process temp file space - # in kilobytes, or -1 for no limit - -# - Kernel Resources - - -#max_files_per_process = 1000 # min 64 - # (change requires restart) - -# - Cost-Based Vacuum Delay - - -#vacuum_cost_delay = 0 # 0-100 milliseconds (0 disables) -#vacuum_cost_page_hit = 1 # 0-10000 credits -#vacuum_cost_page_miss = 2 # 0-10000 credits -#vacuum_cost_page_dirty = 20 # 0-10000 credits -#vacuum_cost_limit = 200 # 1-10000 credits - -# - Background Writer - - -#bgwriter_delay = 200ms # 10-10000ms between rounds -#bgwriter_lru_maxpages = 100 # max buffers written/round, 0 disables -#bgwriter_lru_multiplier = 2.0 # 0-10.0 multiplier on buffers scanned/round -#bgwriter_flush_after = 512kB # measured in pages, 0 disables - -# - Asynchronous Behavior - - -#backend_flush_after = 0 # measured in pages, 0 disables -#effective_io_concurrency = 1 # 1-1000; 0 disables prefetching -#maintenance_io_concurrency = 10 # 1-1000; 0 disables prefetching -#max_worker_processes = 8 # (change requires restart) -#max_parallel_workers_per_gather = 2 # taken from max_parallel_workers -#max_parallel_maintenance_workers = 2 # taken from max_parallel_workers -#max_parallel_workers = 8 # maximum number of max_worker_processes that - # can be used in parallel operations -#parallel_leader_participation = on -#old_snapshot_threshold = -1 # 1min-60d; -1 disables; 0 is immediate - # (change requires restart) - - -#------------------------------------------------------------------------------ -# WRITE-AHEAD LOG -#------------------------------------------------------------------------------ - -# - Settings - - -wal_level = logical # minimal, replica, or logical - # (change requires restart) -#fsync = on # flush data to disk for crash safety - # (turning this off can cause - # unrecoverable data corruption) -#synchronous_commit = on # synchronization level; - # off, local, remote_write, remote_apply, or on -#wal_sync_method = fsync # the default is the first option - # supported by the operating system: - # open_datasync - # fdatasync (default on Linux and FreeBSD) - # fsync - # fsync_writethrough - # open_sync -#full_page_writes = on # recover from partial page writes -wal_log_hints = on # also do full page writes of non-critical updates - # (change requires restart) -#wal_compression = off # enable compression of full-page writes -#wal_init_zero = on # zero-fill new WAL files -#wal_recycle = on # recycle WAL files -#wal_buffers = -1 # min 32kB, -1 sets based on shared_buffers - # (change requires restart) -#wal_writer_delay = 200ms # 1-10000 milliseconds -#wal_writer_flush_after = 1MB # measured in pages, 0 disables -#wal_skip_threshold = 2MB - -#commit_delay = 0 # range 0-100000, in microseconds -#commit_siblings = 5 # range 1-1000 - -# - Checkpoints - - -#checkpoint_timeout = 5min # range 30s-1d -#checkpoint_completion_target = 0.9 # checkpoint target duration, 0.0 - 1.0 -#checkpoint_flush_after = 256kB # measured in pages, 0 disables -#checkpoint_warning = 30s # 0 disables -max_wal_size = 1GB -min_wal_size = 80MB - -# - Archiving - - -#archive_mode = off # enables archiving; off, on, or always - # (change requires restart) -#archive_command = '' # command to use to archive a logfile segment - # placeholders: %p = path of file to archive - # %f = file name only - # e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f' -#archive_timeout = 0 # force a logfile segment switch after this - # number of seconds; 0 disables - -# - Archive Recovery - - -# These are only used in recovery mode. - -#restore_command = '' # command to use to restore an archived logfile segment - # placeholders: %p = path of file to restore - # %f = file name only - # e.g. 'cp /mnt/server/archivedir/%f %p' -#archive_cleanup_command = '' # command to execute at every restartpoint -#recovery_end_command = '' # command to execute at completion of recovery - -# - Recovery Target - - -# Set these only when performing a targeted recovery. - -#recovery_target = '' # 'immediate' to end recovery as soon as a - # consistent state is reached - # (change requires restart) -#recovery_target_name = '' # the named restore point to which recovery will proceed - # (change requires restart) -#recovery_target_time = '' # the time stamp up to which recovery will proceed - # (change requires restart) -#recovery_target_xid = '' # the transaction ID up to which recovery will proceed - # (change requires restart) -#recovery_target_lsn = '' # the WAL LSN up to which recovery will proceed - # (change requires restart) -#recovery_target_inclusive = on # Specifies whether to stop: - # just after the specified recovery target (on) - # just before the recovery target (off) - # (change requires restart) -#recovery_target_timeline = 'latest' # 'current', 'latest', or timeline ID - # (change requires restart) -#recovery_target_action = 'pause' # 'pause', 'promote', 'shutdown' - # (change requires restart) - - -#------------------------------------------------------------------------------ -# REPLICATION -#------------------------------------------------------------------------------ - -# - Sending Servers - - -# Set these on the primary and on any standby that will send replication data. - -#max_wal_senders = 10 # max number of walsender processes - # (change requires restart) -#max_replication_slots = 10 # max number of replication slots - # (change requires restart) -#wal_keep_size = 0 # in megabytes; 0 disables -#max_slot_wal_keep_size = -1 # in megabytes; -1 disables -#wal_sender_timeout = 60s # in milliseconds; 0 disables -#track_commit_timestamp = off # collect timestamp of transaction commit - # (change requires restart) - -# - Primary Server - - -# These settings are ignored on a standby server. - -#synchronous_standby_names = '' # standby servers that provide sync rep - # method to choose sync standbys, number of sync standbys, - # and comma-separated list of application_name - # from standby(s); '*' = all -#vacuum_defer_cleanup_age = 0 # number of xacts by which cleanup is delayed - -# - Standby Servers - - -# These settings are ignored on a primary server. - -#primary_conninfo = '' # connection string to sending server -#primary_slot_name = '' # replication slot on sending server -#promote_trigger_file = '' # file name whose presence ends recovery -#hot_standby = on # "off" disallows queries during recovery - # (change requires restart) -#max_standby_archive_delay = 30s # max delay before canceling queries - # when reading WAL from archive; - # -1 allows indefinite delay -#max_standby_streaming_delay = 30s # max delay before canceling queries - # when reading streaming WAL; - # -1 allows indefinite delay -#wal_receiver_create_temp_slot = off # create temp slot if primary_slot_name - # is not set -#wal_receiver_status_interval = 10s # send replies at least this often - # 0 disables -#hot_standby_feedback = off # send info from standby to prevent - # query conflicts -#wal_receiver_timeout = 60s # time that receiver waits for - # communication from primary - # in milliseconds; 0 disables -#wal_retrieve_retry_interval = 5s # time to wait before retrying to - # retrieve WAL after a failed attempt -#recovery_min_apply_delay = 0 # minimum delay for applying changes during recovery - -# - Subscribers - - -# These settings are ignored on a publisher. - -#max_logical_replication_workers = 4 # taken from max_worker_processes - # (change requires restart) -#max_sync_workers_per_subscription = 2 # taken from max_logical_replication_workers - - -#------------------------------------------------------------------------------ -# QUERY TUNING -#------------------------------------------------------------------------------ - -# - Planner Method Configuration - - -#enable_async_append = on -#enable_bitmapscan = on -#enable_gathermerge = on -#enable_hashagg = on -#enable_hashjoin = on -#enable_incremental_sort = on -#enable_indexscan = on -#enable_indexonlyscan = on -#enable_material = on -#enable_memoize = on -#enable_mergejoin = on -#enable_nestloop = on -#enable_parallel_append = on -#enable_parallel_hash = on -#enable_partition_pruning = on -#enable_partitionwise_join = off -#enable_partitionwise_aggregate = off -#enable_seqscan = on -#enable_sort = on -#enable_tidscan = on - -# - Planner Cost Constants - - -#seq_page_cost = 1.0 # measured on an arbitrary scale -#random_page_cost = 4.0 # same scale as above -#cpu_tuple_cost = 0.01 # same scale as above -#cpu_index_tuple_cost = 0.005 # same scale as above -#cpu_operator_cost = 0.0025 # same scale as above -#parallel_setup_cost = 1000.0 # same scale as above -#parallel_tuple_cost = 0.1 # same scale as above -#min_parallel_table_scan_size = 8MB -#min_parallel_index_scan_size = 512kB -#effective_cache_size = 4GB - -#jit_above_cost = 100000 # perform JIT compilation if available - # and query more expensive than this; - # -1 disables -#jit_inline_above_cost = 500000 # inline small functions if query is - # more expensive than this; -1 disables -#jit_optimize_above_cost = 500000 # use expensive JIT optimizations if - # query is more expensive than this; - # -1 disables - -# - Genetic Query Optimizer - - -#geqo = on -#geqo_threshold = 12 -#geqo_effort = 5 # range 1-10 -#geqo_pool_size = 0 # selects default based on effort -#geqo_generations = 0 # selects default based on effort -#geqo_selection_bias = 2.0 # range 1.5-2.0 -#geqo_seed = 0.0 # range 0.0-1.0 - -# - Other Planner Options - - -#default_statistics_target = 100 # range 1-10000 -#constraint_exclusion = partition # on, off, or partition -#cursor_tuple_fraction = 0.1 # range 0.0-1.0 -#from_collapse_limit = 8 -#jit = on # allow JIT compilation -#join_collapse_limit = 8 # 1 disables collapsing of explicit - # JOIN clauses -#plan_cache_mode = auto # auto, force_generic_plan or - # force_custom_plan - - -#------------------------------------------------------------------------------ -# REPORTING AND LOGGING -#------------------------------------------------------------------------------ - -# - Where to Log - - -#log_destination = 'stderr' # Valid values are combinations of - # stderr, csvlog, syslog, and eventlog, - # depending on platform. csvlog - # requires logging_collector to be on. - -# This is used when logging to stderr: -#logging_collector = off # Enable capturing of stderr and csvlog - # into log files. Required to be on for - # csvlogs. - # (change requires restart) - -# These are only used if logging_collector is on: -#log_directory = 'log' # directory where log files are written, - # can be absolute or relative to PGDATA -#log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' # log file name pattern, - # can include strftime() escapes -#log_file_mode = 0600 # creation mode for log files, - # begin with 0 to use octal notation -#log_rotation_age = 1d # Automatic rotation of logfiles will - # happen after that time. 0 disables. -#log_rotation_size = 10MB # Automatic rotation of logfiles will - # happen after that much log output. - # 0 disables. -#log_truncate_on_rotation = off # If on, an existing log file with the - # same name as the new log file will be - # truncated rather than appended to. - # But such truncation only occurs on - # time-driven rotation, not on restarts - # or size-driven rotation. Default is - # off, meaning append to existing files - # in all cases. - -# These are relevant when logging to syslog: -#syslog_facility = 'LOCAL0' -#syslog_ident = 'postgres' -#syslog_sequence_numbers = on -#syslog_split_messages = on - -# This is only relevant when logging to eventlog (Windows): -# (change requires restart) -#event_source = 'PostgreSQL' - -# - When to Log - - -#log_min_messages = warning # values in order of decreasing detail: - # debug5 - # debug4 - # debug3 - # debug2 - # debug1 - # info - # notice - # warning - # error - # log - # fatal - # panic - -#log_min_error_statement = error # values in order of decreasing detail: - # debug5 - # debug4 - # debug3 - # debug2 - # debug1 - # info - # notice - # warning - # error - # log - # fatal - # panic (effectively off) - -#log_min_duration_statement = -1 # -1 is disabled, 0 logs all statements - # and their durations, > 0 logs only - # statements running at least this number - # of milliseconds - -#log_min_duration_sample = -1 # -1 is disabled, 0 logs a sample of statements - # and their durations, > 0 logs only a sample of - # statements running at least this number - # of milliseconds; - # sample fraction is determined by log_statement_sample_rate - -#log_statement_sample_rate = 1.0 # fraction of logged statements exceeding - # log_min_duration_sample to be logged; - # 1.0 logs all such statements, 0.0 never logs - - -#log_transaction_sample_rate = 0.0 # fraction of transactions whose statements - # are logged regardless of their duration; 1.0 logs all - # statements from all transactions, 0.0 never logs - -# - What to Log - - -#debug_print_parse = off -#debug_print_rewritten = off -#debug_print_plan = off -#debug_pretty_print = on -#log_autovacuum_min_duration = -1 # log autovacuum activity; - # -1 disables, 0 logs all actions and - # their durations, > 0 logs only - # actions running at least this number - # of milliseconds. -#log_checkpoints = off -#log_connections = off -#log_disconnections = off -#log_duration = off -#log_error_verbosity = default # terse, default, or verbose messages -#log_hostname = off -#log_line_prefix = '%m [%p] ' # special values: - # %a = application name - # %u = user name - # %d = database name - # %r = remote host and port - # %h = remote host - # %b = backend type - # %p = process ID - # %P = process ID of parallel group leader - # %t = timestamp without milliseconds - # %m = timestamp with milliseconds - # %n = timestamp with milliseconds (as a Unix epoch) - # %Q = query ID (0 if none or not computed) - # %i = command tag - # %e = SQL state - # %c = session ID - # %l = session line number - # %s = session start timestamp - # %v = virtual transaction ID - # %x = transaction ID (0 if none) - # %q = stop here in non-session - # processes - # %% = '%' - # e.g. '<%u%%%d> ' -#log_lock_waits = off # log lock waits >= deadlock_timeout -#log_recovery_conflict_waits = off # log standby recovery conflict waits - # >= deadlock_timeout -#log_parameter_max_length = -1 # when logging statements, limit logged - # bind-parameter values to N bytes; - # -1 means print in full, 0 disables -#log_parameter_max_length_on_error = 0 # when logging an error, limit logged - # bind-parameter values to N bytes; - # -1 means print in full, 0 disables -#log_statement = 'none' # none, ddl, mod, all -#log_replication_commands = off -#log_temp_files = -1 # log temporary files equal or larger - # than the specified size in kilobytes; - # -1 disables, 0 logs all temp files -log_timezone = 'America/Chicago' - - -#------------------------------------------------------------------------------ -# PROCESS TITLE -#------------------------------------------------------------------------------ - -#cluster_name = '' # added to process titles if nonempty - # (change requires restart) -#update_process_title = on - - -#------------------------------------------------------------------------------ -# STATISTICS -#------------------------------------------------------------------------------ - -# - Query and Index Statistics Collector - - -#track_activities = on -#track_activity_query_size = 1024 # (change requires restart) -#track_counts = on -#track_io_timing = off -#track_wal_io_timing = off -#track_functions = none # none, pl, all -#stats_temp_directory = 'pg_stat_tmp' - - -# - Monitoring - - -#compute_query_id = auto -#log_statement_stats = off -#log_parser_stats = off -#log_planner_stats = off -#log_executor_stats = off - - -#------------------------------------------------------------------------------ -# AUTOVACUUM -#------------------------------------------------------------------------------ - -#autovacuum = on # Enable autovacuum subprocess? 'on' - # requires track_counts to also be on. -#autovacuum_max_workers = 3 # max number of autovacuum subprocesses - # (change requires restart) -#autovacuum_naptime = 1min # time between autovacuum runs -#autovacuum_vacuum_threshold = 50 # min number of row updates before - # vacuum -#autovacuum_vacuum_insert_threshold = 1000 # min number of row inserts - # before vacuum; -1 disables insert - # vacuums -#autovacuum_analyze_threshold = 50 # min number of row updates before - # analyze -#autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum -#autovacuum_vacuum_insert_scale_factor = 0.2 # fraction of inserts over table - # size before insert vacuum -#autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze -#autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum - # (change requires restart) -#autovacuum_multixact_freeze_max_age = 400000000 # maximum multixact age - # before forced vacuum - # (change requires restart) -#autovacuum_vacuum_cost_delay = 2ms # default vacuum cost delay for - # autovacuum, in milliseconds; - # -1 means use vacuum_cost_delay -#autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for - # autovacuum, -1 means use - # vacuum_cost_limit - - -#------------------------------------------------------------------------------ -# CLIENT CONNECTION DEFAULTS -#------------------------------------------------------------------------------ - -# - Statement Behavior - - -#client_min_messages = notice # values in order of decreasing detail: - # debug5 - # debug4 - # debug3 - # debug2 - # debug1 - # log - # notice - # warning - # error -#search_path = '"$user", public' # schema names -#row_security = on -#default_table_access_method = 'heap' -#default_tablespace = '' # a tablespace name, '' uses the default -#default_toast_compression = 'pglz' # 'pglz' or 'lz4' -#temp_tablespaces = '' # a list of tablespace names, '' uses - # only default tablespace -#check_function_bodies = on -#default_transaction_isolation = 'read committed' -#default_transaction_read_only = off -#default_transaction_deferrable = off -#session_replication_role = 'origin' -#statement_timeout = 0 # in milliseconds, 0 is disabled -#lock_timeout = 0 # in milliseconds, 0 is disabled -#idle_in_transaction_session_timeout = 0 # in milliseconds, 0 is disabled -#idle_session_timeout = 0 # in milliseconds, 0 is disabled -#vacuum_freeze_table_age = 150000000 -#vacuum_freeze_min_age = 50000000 -#vacuum_failsafe_age = 1600000000 -#vacuum_multixact_freeze_table_age = 150000000 -#vacuum_multixact_freeze_min_age = 5000000 -#vacuum_multixact_failsafe_age = 1600000000 -#bytea_output = 'hex' # hex, escape -#xmlbinary = 'base64' -#xmloption = 'content' -#gin_pending_list_limit = 4MB - -# - Locale and Formatting - - -datestyle = 'iso, mdy' -#intervalstyle = 'postgres' -timezone = 'America/Chicago' -#timezone_abbreviations = 'Default' # Select the set of available time zone - # abbreviations. Currently, there are - # Default - # Australia (historical usage) - # India - # You can create your own file in - # share/timezonesets/. -#extra_float_digits = 1 # min -15, max 3; any value >0 actually - # selects precise output mode -#client_encoding = sql_ascii # actually, defaults to database - # encoding - -# These settings are initialized by initdb, but they can be changed. -lc_messages = 'C' # locale for system error message - # strings -lc_monetary = 'C' # locale for monetary formatting -lc_numeric = 'C' # locale for number formatting -lc_time = 'C' # locale for time formatting - -# default configuration for text search -default_text_search_config = 'pg_catalog.english' - -# - Shared Library Preloading - - -#local_preload_libraries = '' -#session_preload_libraries = '' -shared_preload_libraries = 'pg_stat_statements, pgaudit, plpgsql, plpgsql_check, pg_cron, pg_net, pgsodium, timescaledb, auto_explain, pg_tle, plan_filter, pg_backtrace' # (change requires restart) -jit_provider = 'llvmjit' # JIT library to use - - -# - Other Defaults - - -#dynamic_library_path = '$libdir' -#gin_fuzzy_search_limit = 0 - - -#------------------------------------------------------------------------------ -# LOCK MANAGEMENT -#------------------------------------------------------------------------------ - -#deadlock_timeout = 1s -#max_locks_per_transaction = 64 # min 10 - # (change requires restart) -#max_pred_locks_per_transaction = 64 # min 10 - # (change requires restart) -#max_pred_locks_per_relation = -2 # negative values mean - # (max_pred_locks_per_transaction - # / -max_pred_locks_per_relation) - 1 -#max_pred_locks_per_page = 2 # min 0 - - -#------------------------------------------------------------------------------ -# VERSION AND PLATFORM COMPATIBILITY -#------------------------------------------------------------------------------ - -# - Previous PostgreSQL Versions - - -#array_nulls = on -#backslash_quote = safe_encoding # on, off, or safe_encoding -#escape_string_warning = on -#lo_compat_privileges = off -#quote_all_identifiers = off -#standard_conforming_strings = on -#synchronize_seqscans = on - -# - Other Platforms and Clients - - -#transform_null_equals = off - - -#------------------------------------------------------------------------------ -# ERROR HANDLING -#------------------------------------------------------------------------------ - -#exit_on_error = off # terminate session on any error? -#restart_after_crash = on # reinitialize after backend crash? -#data_sync_retry = off # retry or panic on failure to fsync - # data? - # (change requires restart) -#recovery_init_sync_method = fsync # fsync, syncfs (Linux 5.8+) - - -#------------------------------------------------------------------------------ -# CONFIG FILE INCLUDES -#------------------------------------------------------------------------------ - -# These options allow settings to be loaded from files other than the -# default postgresql.conf. Note that these are directives, not variable -# assignments, so they can usefully be given more than once. - -#include_dir = '...' # include files ending in '.conf' from - # a directory, e.g., 'conf.d' -#include_if_exists = '...' # include file only if it exists -#include = '...' # include file - - -#------------------------------------------------------------------------------ -# CUSTOMIZED OPTIONS -#------------------------------------------------------------------------------ - -# Add settings for extensions here - -pgsodium.getkey_script = '@PGSODIUM_GETKEY_SCRIPT@' - -auto_explain.log_min_duration = 10s -cron.database_name = 'postgres' diff --git a/nix/tests/prime.sql b/nix/tests/prime.sql deleted file mode 100644 index dbcd69c..0000000 --- a/nix/tests/prime.sql +++ /dev/null @@ -1,98 +0,0 @@ -create role postgres; -create extension address_standardizer; -create extension address_standardizer_data_us; -create extension adminpack; -create extension amcheck; -create extension autoinc; -create extension bloom; -create extension btree_gin; -create extension btree_gist; -create extension citext; -create extension cube; -create extension dblink; -create extension dict_int; -create extension dict_xsyn; -create extension earthdistance; -create extension file_fdw; -create extension fuzzystrmatch; -create extension http; -create extension hstore; -create extension hypopg; -create extension index_advisor; -create extension insert_username; -create extension intagg; -create extension intarray; -create extension isn; -create extension lo; -create extension ltree; -create extension moddatetime; -create extension old_snapshot; -create extension pageinspect; -create extension pg_buffercache; - -/* -TODO: Does not enable locally mode -requires a change to postgresql.conf to set -cron.database_name = 'testing' -*/ --- create extension pg_cron; - -create extension pg_net; -create extension pg_graphql; -create extension pg_freespacemap; -create extension pg_hashids; -create extension pg_prewarm; -create extension pgmq; -create extension pg_jsonschema; -create extension pg_repack; -create extension pg_stat_monitor; -create extension pg_stat_statements; -create extension pg_surgery; -create extension pg_tle; -create extension pg_trgm; -create extension pg_visibility; -create extension pg_walinspect; -create extension pgaudit; -create extension pgcrypto; -create extension pgtap; -create extension pgjwt; -create extension pgroonga; -create extension pgroonga_database; -create extension pgsodium; -create extension pgrowlocks; -create extension pgstattuple; -create extension plpgsql_check; - -create extension plv8; -create extension plcoffee; -create extension plls; - -create extension postgis; -create extension postgis_raster; -create extension postgis_sfcgal; -create extension postgis_tiger_geocoder; -create extension postgis_topology; -create extension pgrouting; -- requires postgis -create extension postgres_fdw; -create extension rum; -create extension refint; -create extension seg; -create extension sslinfo; -create extension tealbase_vault; -create extension tablefunc; -create extension tcn; -create extension timescaledb; -create extension tsm_system_rows; -create extension tsm_system_time; -create extension unaccent; -create extension "uuid-ossp"; -create extension vector; -create extension wrappers; -create extension xml2; - - - - - - -CREATE EXTENSION IF NOT EXISTS pg_backtrace; diff --git a/nix/tests/smoke/0000-hello-world.sql b/nix/tests/smoke/0000-hello-world.sql deleted file mode 100644 index d6f002d..0000000 --- a/nix/tests/smoke/0000-hello-world.sql +++ /dev/null @@ -1,10 +0,0 @@ --- Start transaction and plan the tests. -BEGIN; -SELECT plan(1); - --- Run the tests. -SELECT pass( 'My test passed, w00t!' ); - --- Finish the tests and clean up. -SELECT * FROM finish(); -ROLLBACK; diff --git a/nix/tests/smoke/0001-pg_graphql.sql b/nix/tests/smoke/0001-pg_graphql.sql deleted file mode 100644 index 80e3cb2..0000000 --- a/nix/tests/smoke/0001-pg_graphql.sql +++ /dev/null @@ -1,59 +0,0 @@ --- Start transaction and plan the tests. -begin; - select plan(1); - - create extension if not exists pg_graphql; - - create table account( - id int primary key, - is_verified bool, - name text, - phone text - ); - - insert into public.account(id, is_verified, name, phone) - values - (1, true, 'foo', '1111111111'), - (2, true, 'bar', null), - (3, false, 'baz', '33333333333'); - - select is( - graphql.resolve($$ - { - accountCollection { - edges { - node { - id - } - } - } - } - $$), - '{ - "data": { - "accountCollection": { - "edges": [ - { - "node": { - "id": 1 - } - }, - { - "node": { - "id": 2 - } - }, - { - "node": { - "id": 3 - } - } - ] - } - } - }'::jsonb - ); - - - select * from finish(); -rollback; diff --git a/nix/tests/smoke/0002-supautils.sql b/nix/tests/smoke/0002-supautils.sql deleted file mode 100644 index 7a21606..0000000 --- a/nix/tests/smoke/0002-supautils.sql +++ /dev/null @@ -1,17 +0,0 @@ -BEGIN; -SELECT plan(2); - --- the setting doesn't exist when supautils is not loaded -SELECT throws_ok($$ - select current_setting('supautils.privileged_extensions', false) -$$); - -LOAD 'supautils'; - --- now it does -SELECT ok( - current_setting('supautils.privileged_extensions', false) = '' -); - -SELECT * FROM finish(); -ROLLBACK; diff --git a/nix/tests/smoke/0003-pgsodium-vault.sql b/nix/tests/smoke/0003-pgsodium-vault.sql deleted file mode 100644 index 1c9cedf..0000000 --- a/nix/tests/smoke/0003-pgsodium-vault.sql +++ /dev/null @@ -1,40 +0,0 @@ -BEGIN; - -select plan(3); - -select id as test_new_key_id from pgsodium.create_key(name:='test_new_key') \gset - -select vault.create_secret ( - 's3kr3t_k3y', 'a_name', 'this is the foo secret key') test_secret_id \gset - -select vault.create_secret ( - 's3kr3t_k3y_2', 'another_name', 'this is another foo key', - (select id from pgsodium.key where name = 'test_new_key')) test_secret_id_2 \gset - -SELECT results_eq( - $$ - SELECT decrypted_secret = 's3kr3t_k3y', description = 'this is the foo secret key' - FROM vault.decrypted_secrets WHERE name = 'a_name'; - $$, - $$VALUES (true, true)$$, - 'can select from masking view with custom key'); - -SELECT results_eq( - $$ - SELECT decrypted_secret = 's3kr3t_k3y_2', description = 'this is another foo key' - FROM vault.decrypted_secrets WHERE key_id = (select id from pgsodium.key where name = 'test_new_key'); - $$, - $$VALUES (true, true)$$, - 'can select from masking view'); - -SELECT lives_ok( - format($test$ - select vault.update_secret( - %L::uuid, new_name:='a_new_name', - new_secret:='new_s3kr3t_k3y', new_description:='this is the bar key') - $test$, :'test_secret_id'), - 'can update name, secret and description' - ); - -SELECT * FROM finish(); -ROLLBACK; diff --git a/nix/tests/smoke/0004-index_advisor.sql b/nix/tests/smoke/0004-index_advisor.sql deleted file mode 100644 index 53170f6..0000000 --- a/nix/tests/smoke/0004-index_advisor.sql +++ /dev/null @@ -1,19 +0,0 @@ --- Start transaction and plan the tests. -begin; - select plan(1); - - create extension if not exists index_advisor; - - create table account( - id int primary key, - is_verified bool - ); - - select is( - (select count(1) from index_advisor('select id from public.account where is_verified;'))::int, - 1, - 'index_advisor returns 1 row' - ); - - select * from finish(); -rollback; diff --git a/nix/tests/smoke/0005-test_pgroonga_mecab.sql b/nix/tests/smoke/0005-test_pgroonga_mecab.sql deleted file mode 100644 index 7341d5f..0000000 --- a/nix/tests/smoke/0005-test_pgroonga_mecab.sql +++ /dev/null @@ -1,36 +0,0 @@ --- File: 0005-test_pgroonga_revised.sql - -begin; - -- Plan for 3 tests: extension, table, and index - select plan(3); - - -- Create the PGroonga extension - create extension if not exists pgroonga; - - -- -- Test 1: Check if PGroonga extension exists - select has_extension('pgroonga', 'The pgroonga extension should exist.'); - - -- Create the table - create table notes( - id integer primary key, - content text - ); - - -- Test 2: Check if the table was created - SELECT has_table('public', 'notes', 'The notes table should exist.'); - -- Create the PGroonga index - CREATE INDEX pgroonga_content_index - ON notes - USING pgroonga (content) - WITH (tokenizer='TokenMecab'); - - -- -- Test 3: Check if the index was created - SELECT has_index('public', 'notes', 'pgroonga_content_index', 'The pgroonga_content_index should exist.'); - - -- -- Cleanup (this won't affect the test results as they've already been checked) - DROP INDEX IF EXISTS pgroonga_content_index; - DROP TABLE IF EXISTS notes; - - -- Finish the test plan - select * from finish(); -rollback; \ No newline at end of file diff --git a/nix/tests/sql/extensions_sql_interface.sql b/nix/tests/sql/extensions_sql_interface.sql deleted file mode 100644 index ce75802..0000000 --- a/nix/tests/sql/extensions_sql_interface.sql +++ /dev/null @@ -1,101 +0,0 @@ -/* - -The purpose of this test is to monitor the SQL interface exposed -by Postgres extensions so we have to manually review/approve any difference -that emerge as versions change. - -*/ - - -/* - -List all extensions that are not enabled -If a new entry shows up in this list, that means a new extension has been -added and you should `create extension ...` to enable it in ./nix/tests/prime - -*/ - -select - name -from - pg_available_extensions -where - installed_version is null -order by - name asc; - - -/* - -Monitor relocatability and config of each extension -- lesson learned from pg_cron - -*/ - -select - extname as extension_name, - extrelocatable as is_relocatable -from - pg_extension -order by - extname asc; - - -/* - -Monitor extension public function interface - -*/ - -select - e.extname as extension_name, - n.nspname as schema_name, - p.proname as function_name, - pg_catalog.pg_get_function_identity_arguments(p.oid) as argument_types, - pg_catalog.pg_get_function_result(p.oid) as return_type -from - pg_catalog.pg_proc p - join pg_catalog.pg_namespace n - on n.oid = p.pronamespace - join pg_catalog.pg_depend d - on d.objid = p.oid - join pg_catalog.pg_extension e - on e.oid = d.refobjid -where - d.deptype = 'e' -order by - e.extname, - n.nspname, - p.proname, - pg_catalog.pg_get_function_identity_arguments(p.oid); - -/* - -Monitor extension public table/view/matview/index interface - -*/ - -select - e.extname as extension_name, - n.nspname as schema_name, - pc.relname as entity_name, - pa.attname -from - pg_catalog.pg_class pc - join pg_catalog.pg_namespace n - on n.oid = pc.relnamespace - join pg_catalog.pg_depend d - on d.objid = pc.oid - join pg_catalog.pg_extension e - on e.oid = d.refobjid - left join pg_catalog.pg_attribute pa - on pa.attrelid = pc.oid - and pa.attnum > 0 - and not pa.attisdropped -where - d.deptype = 'e' - and pc.relkind in ('r', 'v', 'm', 'i') -order by - e.extname, - pc.relname, - pa.attname; diff --git a/nix/tests/sql/hypopg.sql b/nix/tests/sql/hypopg.sql deleted file mode 100644 index 6aabb69..0000000 --- a/nix/tests/sql/hypopg.sql +++ /dev/null @@ -1,13 +0,0 @@ -create schema v; - -create table v.samp( - id int -); - -select 1 from hypopg_create_index($$ - create index on v.samp(id) -$$); - -drop schema v cascade; - - diff --git a/nix/tests/sql/index_advisor.sql b/nix/tests/sql/index_advisor.sql deleted file mode 100644 index 3911d6e..0000000 --- a/nix/tests/sql/index_advisor.sql +++ /dev/null @@ -1,13 +0,0 @@ -create schema v; - -create table v.book( - id int primary key, - title text not null -); - -select - index_statements, errors -from - index_advisor('select id from v.book where title = $1'); - -drop schema v cascade; diff --git a/nix/tests/sql/pg-safeupdate.sql b/nix/tests/sql/pg-safeupdate.sql deleted file mode 100644 index 790ec79..0000000 --- a/nix/tests/sql/pg-safeupdate.sql +++ /dev/null @@ -1,15 +0,0 @@ -load 'safeupdate'; - -set safeupdate.enabled=1; - -create schema v; - -create table v.foo( - id int, - val text -); - -update v.foo - set val = 'bar'; - -drop schema v cascade; diff --git a/nix/tests/sql/pg_graphql.sql b/nix/tests/sql/pg_graphql.sql deleted file mode 100644 index 03f844d..0000000 --- a/nix/tests/sql/pg_graphql.sql +++ /dev/null @@ -1,219 +0,0 @@ -begin; - comment on schema public is '@graphql({"inflect_names": true})'; - - create table account( - id serial primary key, - email varchar(255) not null, - priority int, - status text default 'active' - ); - - create table blog( - id serial primary key, - owner_id integer not null references account(id) - ); - comment on table blog is e'@graphql({"totalCount": {"enabled": true}})'; - - -- Make sure functions still work - create function _echo_email(account) - returns text - language sql - as $$ select $1.email $$; - - /* - Literals - */ - - select graphql.resolve($$ - mutation { - insertIntoAccountCollection(objects: [ - { email: "foo@barsley.com", priority: 1 }, - { email: "bar@foosworth.com" } - ]) { - affectedCount - records { - id - status - echoEmail - blogCollection { - totalCount - } - } - } - } - $$); - - select graphql.resolve($$ - mutation { - insertIntoBlogCollection(objects: [{ - ownerId: 1 - }]) { - records { - id - owner { - id - } - } - } - } - $$); - - - -- Override a default on status with null - select graphql.resolve($$ - mutation { - insertIntoAccountCollection(objects: [ - { email: "baz@baz.com", status: null }, - ]) { - affectedCount - records { - email - status - } - } - } - $$); - - - /* - Variables - */ - - select graphql.resolve($$ - mutation newAccount($emailAddress: String) { - xyz: insertIntoAccountCollection(objects: [ - { email: $emailAddress }, - { email: "other@email.com" } - ]) { - affectedCount - records { - id - email - } - } - } - $$, - variables := '{"emailAddress": "foo@bar.com"}'::jsonb - ); - - - -- Variable override of default with null results in null - select graphql.resolve($$ - mutation newAccount($status: String) { - xyz: insertIntoAccountCollection(objects: [ - { email: "1@email.com", status: $status} - ]) { - affectedCount - records { - email - status - } - } - } - $$, - variables := '{"status": null}'::jsonb - ); - - -- Skipping variable override of default results in default - select graphql.resolve($$ - mutation newAccount($status: String) { - xyz: insertIntoAccountCollection(objects: [ - { email: "x@y.com", status: $status}, - ]) { - affectedCount - records { - email - status - } - } - } - $$, - variables := '{}'::jsonb - ); - - - select graphql.resolve($$ - mutation newAccount($acc: AccountInsertInput!) { - insertIntoAccountCollection(objects: [$acc]) { - affectedCount - records { - id - email - } - } - } - $$, - variables := '{"acc": {"email": "bar@foo.com"}}'::jsonb - ); - - select graphql.resolve($$ - mutation newAccounts($acc: [AccountInsertInput!]!) { - insertIntoAccountCollection(objects: $accs) { - affectedCount - records { - id - email - } - } - } - $$, - variables := '{"accs": [{"email": "bar@foo.com"}]}'::jsonb - ); - - -- Single object coerces to a list - select graphql.resolve($$ - mutation { - insertIntoBlogCollection(objects: {ownerId: 1}) { - affectedCount - } - } - $$); - - - /* - Errors - */ - - -- Field does not exist - select graphql.resolve($$ - mutation createAccount($acc: AccountInsertInput) { - insertIntoAccountCollection(objects: [$acc]) { - affectedCount - records { - id - email - } - } - } - $$, - variables := '{"acc": {"doesNotExist": "other"}}'::jsonb - ); - - -- Wrong input type (list of string, not list of object) - select graphql.resolve($$ - mutation { - insertIntoBlogCollection(objects: ["not an object"]) { - affectedCount - } - } - $$); - - -- objects argument is missing - select graphql.resolve($$ - mutation { - insertIntoBlogCollection { - affectedCount - } - } - $$); - - -- Empty call - select graphql.resolve($$ - mutation { - insertIntoBlogCollection(objects: []) { - affectedCount - } - } - $$); - -rollback; diff --git a/nix/tests/sql/pg_hashids.sql b/nix/tests/sql/pg_hashids.sql deleted file mode 100644 index 1b82eee..0000000 --- a/nix/tests/sql/pg_hashids.sql +++ /dev/null @@ -1,6 +0,0 @@ -select id_encode(1001); -- Result: jNl -select id_encode(1234567, 'This is my salt'); -- Result: Pdzxp -select id_encode(1234567, 'This is my salt', 10); -- Result: PlRPdzxpR7 -select id_encode(1234567, 'This is my salt', 10, 'abcdefghijABCDxFGHIJ1234567890'); -- Result: 3GJ956J9B9 -select id_decode('PlRPdzxpR7', 'This is my salt', 10); -- Result: 1234567 -select id_decode('3GJ956J9B9', 'This is my salt', 10, 'abcdefghijABCDxFGHIJ1234567890'); -- Result: 1234567 diff --git a/nix/tests/sql/pg_jsonschema.sql b/nix/tests/sql/pg_jsonschema.sql deleted file mode 100644 index f5d7c8c..0000000 --- a/nix/tests/sql/pg_jsonschema.sql +++ /dev/null @@ -1,68 +0,0 @@ -begin; - --- Test json_matches_schema -create table customer( - id serial primary key, - metadata json, - - check ( - json_matches_schema( - '{ - "type": "object", - "properties": { - "tags": { - "type": "array", - "items": { - "type": "string", - "maxLength": 16 - } - } - } - }', - metadata - ) - ) -); - -insert into customer(metadata) -values ('{"tags": ["vip", "darkmode-ui"]}'); - --- Test jsonb_matches_schema -select - jsonb_matches_schema( - '{ - "type": "object", - "properties": { - "tags": { - "type": "array", - "items": { - "type": "string", - "maxLength": 16 - } - } - } - }', - '{"tags": ["vip", "darkmode-ui"]}'::jsonb -); - --- Test jsonschema_is_valid -select - jsonschema_is_valid( - '{ - "type": "object", - "properties": { - "tags": { - "type": "array", - "items": { - "type": "string", - "maxLength": 16 - } - } - } - }'); - --- Test invalid payload -insert into customer(metadata) -values ('{"tags": [1, 3]}'); - -rollback; diff --git a/nix/tests/sql/pg_net.sql b/nix/tests/sql/pg_net.sql deleted file mode 100644 index bf44db5..0000000 --- a/nix/tests/sql/pg_net.sql +++ /dev/null @@ -1,7 +0,0 @@ --- This is a very basic test because you can't get the value returned --- by a pg_net request in the same transaction that created it; - -select - net.http_get ( - 'https://postman-echo.com/get?foo1=bar1&foo2=bar2' - ) as request_id; diff --git a/nix/tests/sql/pg_plan_filter.sql b/nix/tests/sql/pg_plan_filter.sql deleted file mode 100644 index b49834d..0000000 --- a/nix/tests/sql/pg_plan_filter.sql +++ /dev/null @@ -1,22 +0,0 @@ -begin; - load 'plan_filter'; - - create schema v; - - -- create a sample table - create table v.test_table ( - id serial primary key, - data text - ); - - -- insert some test data - insert into v.test_table (data) - values ('sample1'), ('sample2'), ('sample3'); - - set local plan_filter.statement_cost_limit = 0.001; - - select * from v.test_table; - -rollback; - - diff --git a/nix/tests/sql/pg_stat_monitor.sql b/nix/tests/sql/pg_stat_monitor.sql deleted file mode 100644 index 69d996b..0000000 --- a/nix/tests/sql/pg_stat_monitor.sql +++ /dev/null @@ -1,6 +0,0 @@ -select - * -from - pg_stat_monitor -where - false; diff --git a/nix/tests/sql/pg_tle.sql b/nix/tests/sql/pg_tle.sql deleted file mode 100644 index 3af1280..0000000 --- a/nix/tests/sql/pg_tle.sql +++ /dev/null @@ -1,70 +0,0 @@ -select - pgtle.install_extension( - 'pg_distance', - '0.1', - 'Distance functions for two points', - $_pg_tle_$ - CREATE FUNCTION dist(x1 float8, y1 float8, x2 float8, y2 float8, norm int) - RETURNS float8 - AS $$ - SELECT (abs(x2 - x1) ^ norm + abs(y2 - y1) ^ norm) ^ (1::float8 / norm); - $$ LANGUAGE SQL; - - CREATE FUNCTION manhattan_dist(x1 float8, y1 float8, x2 float8, y2 float8) - RETURNS float8 - AS $$ - SELECT dist(x1, y1, x2, y2, 1); - $$ LANGUAGE SQL; - - CREATE FUNCTION euclidean_dist(x1 float8, y1 float8, x2 float8, y2 float8) - RETURNS float8 - AS $$ - SELECT dist(x1, y1, x2, y2, 2); - $$ LANGUAGE SQL; - $_pg_tle_$ - ); - -create extension pg_distance; - -select manhattan_dist(1, 1, 5, 5); -select euclidean_dist(1, 1, 5, 5); - -SELECT pgtle.install_update_path( - 'pg_distance', - '0.1', - '0.2', - $_pg_tle_$ - CREATE OR REPLACE FUNCTION dist(x1 float8, y1 float8, x2 float8, y2 float8, norm int) - RETURNS float8 - AS $$ - SELECT (abs(x2 - x1) ^ norm + abs(y2 - y1) ^ norm) ^ (1::float8 / norm); - $$ LANGUAGE SQL IMMUTABLE PARALLEL SAFE; - - CREATE OR REPLACE FUNCTION manhattan_dist(x1 float8, y1 float8, x2 float8, y2 float8) - RETURNS float8 - AS $$ - SELECT dist(x1, y1, x2, y2, 1); - $$ LANGUAGE SQL IMMUTABLE PARALLEL SAFE; - - CREATE OR REPLACE FUNCTION euclidean_dist(x1 float8, y1 float8, x2 float8, y2 float8) - RETURNS float8 - AS $$ - SELECT dist(x1, y1, x2, y2, 2); - $$ LANGUAGE SQL IMMUTABLE PARALLEL SAFE; - $_pg_tle_$ - ); - - -select - pgtle.set_default_version('pg_distance', '0.2'); - -alter extension pg_distance update; - -drop extension pg_distance; - -select - pgtle.uninstall_extension('pg_distance'); - --- Restore original state if any of the above fails -drop extension pg_tle cascade; -create extension pg_tle; diff --git a/nix/tests/sql/pgaudit.sql b/nix/tests/sql/pgaudit.sql deleted file mode 100644 index c071c6e..0000000 --- a/nix/tests/sql/pgaudit.sql +++ /dev/null @@ -1,23 +0,0 @@ --- Note: there is no test that the logs were correctly output. Only checking for exceptions -set pgaudit.log = 'write, ddl'; -set pgaudit.log_relation = on; -set pgaudit.log_level = notice; - -create schema v; - -create table v.account( - id int, - name text, - password text, - description text -); - -insert into v.account (id, name, password, description) -values (1, 'user1', 'HASH1', 'blah, blah'); - -select - * -from - v.account; - -drop schema v cascade; diff --git a/nix/tests/sql/pgjwt.sql b/nix/tests/sql/pgjwt.sql deleted file mode 100644 index 24179e7..0000000 --- a/nix/tests/sql/pgjwt.sql +++ /dev/null @@ -1,13 +0,0 @@ -select - sign( - payload := '{"sub":"1234567890","name":"John Doe","iat":1516239022}', - secret := 'secret', - algorithm := 'HS256' - ); - -select - verify( - token := 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJuYW1lIjoiRm9vIn0.Q8hKjuadCEhnCPuqIj9bfLhTh_9QSxshTRsA5Aq4IuM', - secret := 'secret', - algorithm := 'HS256' - ); diff --git a/nix/tests/sql/pgmq.sql b/nix/tests/sql/pgmq.sql deleted file mode 100644 index cd47cc3..0000000 --- a/nix/tests/sql/pgmq.sql +++ /dev/null @@ -1,90 +0,0 @@ --- Test the standard flow -select - pgmq.create('Foo'); - -select - * -from - pgmq.send( - queue_name:='Foo', - msg:='{"foo": "bar1"}' - ); - --- Test queue is not case sensitive -select - * -from - pgmq.send( - queue_name:='foo', -- note: lowercase useage - msg:='{"foo": "bar2"}', - delay:=5 - ); - -select - msg_id, - read_ct, - message -from - pgmq.read( - queue_name:='Foo', - vt:=30, - qty:=2 - ); - -select - msg_id, - read_ct, - message -from - pgmq.pop('Foo'); - - --- Archive message with msg_id=2. -select - pgmq.archive( - queue_name:='Foo', - msg_id:=2 - ); - - -select - pgmq.create('my_queue'); - -select - pgmq.send_batch( - queue_name:='my_queue', - msgs:=array['{"foo": "bar3"}','{"foo": "bar4"}','{"foo": "bar5"}']::jsonb[] -); - -select - pgmq.archive( - queue_name:='my_queue', - msg_ids:=array[3, 4, 5] - ); - -select - pgmq.delete('my_queue', 6); - - -select - pgmq.drop_queue('my_queue'); - -/* --- Disabled until pg_partman goes back into the image -select - pgmq.create_partitioned( - 'my_partitioned_queue', - '5 seconds', - '10 seconds' -); -*/ - - --- Make sure SQLI enabling characters are blocked -select pgmq.create('F--oo'); -select pgmq.create('F$oo'); -select pgmq.create($$F'oo$$); - - - - diff --git a/nix/tests/sql/pgroonga.sql b/nix/tests/sql/pgroonga.sql deleted file mode 100644 index 503f266..0000000 --- a/nix/tests/sql/pgroonga.sql +++ /dev/null @@ -1,48 +0,0 @@ -create schema v; - -create table v.roon( - id serial primary key, - content text -); - - -with tokenizers as ( - select - x - from - jsonb_array_elements( - (select pgroonga_command('tokenizer_list'))::jsonb - ) x(val) - limit - 1 - offset - 1 -- first record is unrelated and not stable -) -select - t.x::jsonb ->> 'name' -from - jsonb_array_elements((select * from tokenizers)) t(x) -order by - t.x::jsonb ->> 'name'; - - -insert into v.roon (content) -values - ('Hello World'), - ('PostgreSQL with PGroonga is a thing'), - ('This is a full-text search test'), - ('PGroonga supports various languages'); - --- Create default index -create index pgroonga_index on v.roon using pgroonga (content); - --- Create mecab tokenizer index since we had a bug with this one once -create index pgroonga_index_mecab on v.roon using pgroonga (content) with (tokenizer='TokenMecab'); - --- Run some queries to test the index -select * from v.roon where content &@~ 'Hello'; -select * from v.roon where content &@~ 'powerful'; -select * from v.roon where content &@~ 'supports'; - - -drop schema v cascade; diff --git a/nix/tests/sql/pgrouting.sql b/nix/tests/sql/pgrouting.sql deleted file mode 100644 index e3af562..0000000 --- a/nix/tests/sql/pgrouting.sql +++ /dev/null @@ -1,27 +0,0 @@ -create schema v; - --- create the roads table -create table v.roads ( - id serial primary key, - source integer, - target integer, - cost double precision -); - --- insert sample data into roads table -insert into v.roads (source, target, cost) values -(1, 2, 1.0), -(2, 3, 1.0), -(3, 4, 1.0), -(1, 3, 2.5), -(3, 5, 2.0); - --- create a function to use pgRouting to find the shortest path -select * from pgr_dijkstra( - 'select id, source, target, cost from v.roads', - 1, -- start node - 4 -- end node -); - -drop schema v cascade; - diff --git a/nix/tests/sql/pgsodium.sql b/nix/tests/sql/pgsodium.sql deleted file mode 100644 index cd3c382..0000000 --- a/nix/tests/sql/pgsodium.sql +++ /dev/null @@ -1,4 +0,0 @@ -select - status -from - pgsodium.create_key(); diff --git a/nix/tests/sql/pgtap.sql b/nix/tests/sql/pgtap.sql deleted file mode 100644 index b99976a..0000000 --- a/nix/tests/sql/pgtap.sql +++ /dev/null @@ -1,11 +0,0 @@ -begin; - -select plan(1); - --- Run the tests. -select pass( 'My test passed, w00t!' ); - --- Finish the tests and clean up. -select * from finish(); - -rollback; diff --git a/nix/tests/sql/pgvector.sql b/nix/tests/sql/pgvector.sql deleted file mode 100644 index f2de305..0000000 --- a/nix/tests/sql/pgvector.sql +++ /dev/null @@ -1,72 +0,0 @@ -create schema v; - -create table v.items( - id serial primary key, - embedding vector(3), - half_embedding halfvec(3), - bit_embedding bit(3), - sparse_embedding sparsevec(3) -); - --- vector ops -create index on v.items using hnsw (embedding vector_l2_ops); -create index on v.items using hnsw (embedding vector_cosine_ops); -create index on v.items using hnsw (embedding vector_l1_ops); -create index on v.items using ivfflat (embedding vector_l2_ops); -create index on v.items using ivfflat (embedding vector_cosine_ops); - --- halfvec ops -create index on v.items using hnsw (half_embedding halfvec_l2_ops); -create index on v.items using hnsw (half_embedding halfvec_cosine_ops); -create index on v.items using hnsw (half_embedding halfvec_l1_ops); -create index on v.items using ivfflat (half_embedding halfvec_l2_ops); -create index on v.items using ivfflat (half_embedding halfvec_cosine_ops); - --- sparsevec -create index on v.items using hnsw (sparse_embedding sparsevec_l2_ops); -create index on v.items using hnsw (sparse_embedding sparsevec_cosine_ops); -create index on v.items using hnsw (sparse_embedding sparsevec_l1_ops); - --- bit ops -create index on v.items using hnsw (bit_embedding bit_hamming_ops); -create index on v.items using ivfflat (bit_embedding bit_hamming_ops); - --- Populate some records -insert into v.items( - embedding, - half_embedding, - bit_embedding, - sparse_embedding -) -values - ('[1,2,3]', '[1,2,3]', '101', '{1:4}/3'), - ('[2,3,4]', '[2,3,4]', '010', '{1:7,3:0}/3'); - --- Test op types -select - * -from - v.items -order by - embedding <-> '[2,3,5]', - embedding <=> '[2,3,5]', - embedding <+> '[2,3,5]', - embedding <#> '[2,3,5]', - half_embedding <-> '[2,3,5]', - half_embedding <=> '[2,3,5]', - half_embedding <+> '[2,3,5]', - half_embedding <#> '[2,3,5]', - sparse_embedding <-> '{2:4,3:1}/3', - sparse_embedding <=> '{2:4,3:1}/3', - sparse_embedding <+> '{2:4,3:1}/3', - sparse_embedding <#> '{2:4,3:1}/3', - bit_embedding <~> '011'; - -select - avg(embedding), - avg(half_embedding) -from - v.items; - --- Cleanup -drop schema v cascade; diff --git a/nix/tests/sql/plpgsql-check.sql b/nix/tests/sql/plpgsql-check.sql deleted file mode 100644 index d54d2c4..0000000 --- a/nix/tests/sql/plpgsql-check.sql +++ /dev/null @@ -1,26 +0,0 @@ -create schema v; - -create table v.t1( - a int, - b int -); - -create or replace function v.f1() - returns void - language plpgsql -as $$ -declare r record; -begin - for r in select * from v.t1 - loop - raise notice '%', r.c; -- there is bug - table t1 missing "c" column - end loop; -end; -$$; - -select * from v.f1(); - --- use plpgsql_check_function to check the function for errors -select * from plpgsql_check_function('v.f1()'); - -drop schema v cascade; diff --git a/nix/tests/sql/plv8.sql b/nix/tests/sql/plv8.sql deleted file mode 100644 index f58360f..0000000 --- a/nix/tests/sql/plv8.sql +++ /dev/null @@ -1,14 +0,0 @@ -create schema v; - --- create a function to perform some JavaScript operations -create function v.multiply_numbers(a integer, b integer) - returns integer - language plv8 -as $$ - return a * b; -$$; - -select - v.multiply_numbers(3, 4); - -drop schema v cascade; diff --git a/nix/tests/sql/postgis.sql b/nix/tests/sql/postgis.sql deleted file mode 100644 index 766844b..0000000 --- a/nix/tests/sql/postgis.sql +++ /dev/null @@ -1,52 +0,0 @@ -create schema v; - --- create a table to store geographic points -create table v.places ( - id serial primary key, - name text, - geom geometry(point, 4326) -- using WGS 84 coordinate system -); - --- insert some sample geographic points into the places table -insert into v.places (name, geom) -values - ('place_a', st_setsrid(st_makepoint(-73.9857, 40.7484), 4326)), -- latitude and longitude for a location - ('place_b', st_setsrid(st_makepoint(-74.0060, 40.7128), 4326)), -- another location - ('place_c', st_setsrid(st_makepoint(-73.9687, 40.7851), 4326)); -- yet another location - --- calculate the distance between two points (in meters) -select - a.name as place_a, - b.name as place_b, - st_distance(a.geom::geography, b.geom::geography) as distance_meters -from - v.places a, - v.places b -where - a.name = 'place_a' - and b.name = 'place_b'; - --- find all places within a 5km radius of 'place_a' -select - name, - st_distance( - geom::geography, - ( - select - geom - from - v.places - where - name = 'place_a' - )::geography) as distance_meters -from - v.places -where - st_dwithin( - geom::geography, - (select geom from v.places where name = 'place_a')::geography, - 5000 - ) - and name != 'place_a'; - -drop schema v cascade; diff --git a/nix/tests/sql/rum.sql b/nix/tests/sql/rum.sql deleted file mode 100644 index 4686c12..0000000 --- a/nix/tests/sql/rum.sql +++ /dev/null @@ -1,37 +0,0 @@ -create schema v; - -create table v.test_rum( - t text, - a tsvector -); - -create trigger tsvectorupdate - before update or insert on v.test_rum - for each row - execute procedure - tsvector_update_trigger( - 'a', - 'pg_catalog.english', - 't' - ); - -insert into v.test_rum(t) -values - ('the situation is most beautiful'), - ('it is a beautiful'), - ('it looks like a beautiful place'); - -create index rumidx on v.test_rum using rum (a rum_tsvector_ops); - -select - t, - a <=> to_tsquery('english', 'beautiful | place') as rank -from - v.test_rum -where - a @@ to_tsquery('english', 'beautiful | place') -order by - a <=> to_tsquery('english', 'beautiful | place'); - - -drop schema v cascade; diff --git a/nix/tests/sql/timescale.sql b/nix/tests/sql/timescale.sql deleted file mode 100644 index baa96f4..0000000 --- a/nix/tests/sql/timescale.sql +++ /dev/null @@ -1,33 +0,0 @@ --- Confirm we're running the apache version -show timescaledb.license; - --- Create schema v -create schema v; - --- Create a table in the v schema -create table v.sensor_data ( - time timestamptz not null, - sensor_id int not null, - temperature double precision not null, - humidity double precision not null -); - --- Convert the table to a hypertable -select create_hypertable('v.sensor_data', 'time'); - --- Insert some data into the hypertable -insert into v.sensor_data (time, sensor_id, temperature, humidity) -values - ('2024-08-09', 1, 22.5, 60.2), - ('2024-08-08', 1, 23.0, 59.1), - ('2024-08-07', 2, 21.7, 63.3); - --- Select data from the hypertable -select - * -from - v.sensor_data; - --- Drop schema v and all its entities -drop schema v cascade; - diff --git a/nix/tests/sql/vault.sql b/nix/tests/sql/vault.sql deleted file mode 100644 index bafcb4d..0000000 --- a/nix/tests/sql/vault.sql +++ /dev/null @@ -1,30 +0,0 @@ -select - 1 -from - vault.create_secret('my_s3kre3t'); - -select - 1 -from - vault.create_secret( - 'another_s3kre3t', - 'unique_name', - 'This is the description' - ); - -insert into vault.secrets (secret) -values - ('s3kre3t_k3y'); - -select - name, - description -from - vault.decrypted_secrets -order by - created_at desc -limit - 3; - - - diff --git a/nix/tests/sql/wal2json.sql b/nix/tests/sql/wal2json.sql deleted file mode 100644 index 6ec4a6d..0000000 --- a/nix/tests/sql/wal2json.sql +++ /dev/null @@ -1,32 +0,0 @@ -create schema v; - -create table v.foo( - id int primary key -); - -select - 1 -from - pg_create_logical_replication_slot('reg_test', 'wal2json', false); - -insert into v.foo(id) values (1); - -select - data -from - pg_logical_slot_get_changes( - 'reg_test', - null, - null, - 'include-pk', '1', - 'include-transaction', 'false', - 'include-timestamp', 'false', - 'include-type-oids', 'false', - 'format-version', '2', - 'actions', 'insert,update,delete' - ) x; - -select - pg_drop_replication_slot('reg_test'); - -drop schema v cascade; diff --git a/nix/tests/util/pgsodium_getkey.sh b/nix/tests/util/pgsodium_getkey.sh deleted file mode 100755 index 106e3bf..0000000 --- a/nix/tests/util/pgsodium_getkey.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash - -set -euo pipefail - -KEY_FILE="${1:-/tmp/pgsodium.key}" - -if [[ ! -f "${KEY_FILE}" ]]; then - head -c 32 /dev/urandom | od -A n -t x1 | tr -d ' \n' > "${KEY_FILE}" -fi -cat $KEY_FILE \ No newline at end of file diff --git a/nix/tests/util/pgsodium_getkey_arb.sh b/nix/tests/util/pgsodium_getkey_arb.sh deleted file mode 100755 index 446dbba..0000000 --- a/nix/tests/util/pgsodium_getkey_arb.sh +++ /dev/null @@ -1 +0,0 @@ -echo -n 8359dafbba5c05568799c1c24eb6c2fbff497654bc6aa5e9a791c666768875a1 \ No newline at end of file diff --git a/nix/tools/README.md b/nix/tools/README.md deleted file mode 100644 index 2606a57..0000000 --- a/nix/tools/README.md +++ /dev/null @@ -1,2 +0,0 @@ -This directory just contains tools, but you can't run them directly. For the -sake of robustness, you should use `nix run` on this repository to do so. diff --git a/nix/tools/migrate-tool.sh.in b/nix/tools/migrate-tool.sh.in deleted file mode 100644 index 277ee81..0000000 --- a/nix/tools/migrate-tool.sh.in +++ /dev/null @@ -1,123 +0,0 @@ -#!/usr/bin/env bash - -[ ! -z "$DEBUG" ] && set -x - -# first argument is the old version; a path 15 or 16 -if [[ $1 == /nix/store* ]]; then - if [ ! -L "$1/receipt.json" ] || [ ! -e "$1/receipt.json" ]; then - echo "ERROR: $1 does not look like a valid Postgres install" - exit 1 - fi - OLDVER="$1" -elif [ "$1" == "15" ]; then - PSQL15=@PSQL15_BINDIR@ - OLDVER="$PSQL15" -elif [ "$1" == "16" ]; then - PSQL16=@PSQL16_BINDIR@ - OLDVER="$PSQL16" -else - echo "Please provide a valid Postgres version (15 or 16), or a /nix/store path" - exit 1 -fi - -# second argument is the new version; 15 or 16 -if [[ $2 == /nix/store* ]]; then - if [ ! -L "$2/receipt.json" ] || [ ! -e "$2/receipt.json" ]; then - echo "ERROR: $1 does not look like a valid Postgres install" - exit 1 - fi - NEWVER="$2" -elif [ "$2" == "15" ]; then - PSQL15=@PSQL15_BINDIR@ - NEWVER="$PSQL15" -elif [ "$2" == "16" ]; then - PSQL16=@PSQL16_BINDIR@ - NEWVER="$PSQL16" - echo "NEWVER IS $NEWVER" -else - echo "Please provide a valid Postgres version (15 or 16), or a /nix/store path" - exit 1 -fi - -# thid argument is the upgrade method: either pg_dumpall or pg_ugprade -if [ "$3" != "pg_dumpall" ] && [ "$3" != "pg_upgrade" ]; then - echo "Please provide a valid upgrade method (pg_dumpall or pg_upgrade)" - exit 1 -fi -UPGRADE_METHOD="$3" - -echo "Old server build: PSQL $1" -echo "New server build: PSQL $2" -echo "Upgrade method: $UPGRADE_METHOD" - -PORTNO="${2:-@PGSQL_DEFAULT_PORT@}" -DATDIR=$(mktemp -d) -NEWDAT=$(mktemp -d) -mkdir -p "$DATDIR" "$NEWDAT" - -echo "NOTE: using temporary directory $DATDIR for PSQL $1 data, which will not be removed" -echo "NOTE: you are free to re-use this data directory at will" -echo - -$OLDVER/bin/initdb -D "$DATDIR" --locale=C --username=tealbase_admin -$NEWVER/bin/initdb -D "$NEWDAT" --locale=C --username=tealbase_admin - -# NOTE (aseipp): we need to patch postgresql.conf to have the right pgsodium_getkey script -PSQL_CONF_FILE=@PSQL_CONF_FILE@ -PGSODIUM_GETKEY_SCRIPT=@PGSODIUM_GETKEY@ -echo "NOTE: patching postgresql.conf files" -for x in "$DATDIR" "$NEWDAT"; do - sed \ - "s#@PGSODIUM_GETKEY_SCRIPT@#$PGSODIUM_GETKEY_SCRIPT#g" \ - $PSQL_CONF_FILE > "$x/postgresql.conf" -done - -echo "NOTE: Starting first server (v${1}) to load data into the system" -$OLDVER/bin/pg_ctl start -D "$DATDIR" - -PRIMING_SCRIPT=@PRIMING_SCRIPT@ -MIGRATION_DATA=@MIGRATION_DATA@ - -$OLDVER/bin/psql -h localhost -d postgres -Xf "$PRIMING_SCRIPT" -$OLDVER/bin/psql -h localhost -d postgres -Xf "$MIGRATION_DATA" - -if [ "$UPGRADE_METHOD" == "pg_upgrade" ]; then - echo "NOTE: Stopping old server (v${1}) to prepare for migration" - $OLDVER/bin/pg_ctl stop -D "$DATDIR" - - echo "NOTE: Migrating old data $DATDIR to $NEWDAT using pg_upgrade" - - export PGDATAOLD="$DATDIR" - export PGDATANEW="$NEWDAT" - export PGBINOLD="$OLDVER/bin" - export PGBINNEW="$NEWVER/bin" - - if ! $NEWVER/bin/pg_upgrade --check; then - echo "ERROR: pg_upgrade check failed" - exit 1 - fi - - echo "NOTE: pg_upgrade check passed, proceeding with migration" - $NEWVER/bin/pg_upgrade - rm -f delete_old_cluster.sh # we don't need this - exit 0 -fi - -if [ "$UPGRADE_METHOD" == "pg_dumpall" ]; then - SQLDAT="$DATDIR/dump.sql" - echo "NOTE: Exporting data via pg_dumpall ($SQLDAT)" - $NEWVER/bin/pg_dumpall -h localhost > "$SQLDAT" - - echo "NOTE: Stopping old server (v${1}) to prepare for migration" - $OLDVER/bin/pg_ctl stop -D "$DATDIR" - - echo "NOTE: Starting second server (v${2}) to load data into the system" - $NEWVER/bin/pg_ctl start -D "$NEWDAT" - - echo "NOTE: Loading data into new server (v${2}) via 'cat | psql'" - cat "$SQLDAT" | $NEWVER/bin/psql -h localhost -d postgres - - printf "\n\n\n\n" - echo "NOTE: Done, check logs. Stopping the server; new database is located at $NEWDAT" - $NEWVER/bin/pg_ctl stop -D "$NEWDAT" -fi diff --git a/nix/tools/postgresql_schema.sql b/nix/tools/postgresql_schema.sql deleted file mode 100644 index 76518a6..0000000 --- a/nix/tools/postgresql_schema.sql +++ /dev/null @@ -1,11 +0,0 @@ -ALTER DATABASE postgres SET "app.settings.jwt_secret" TO 'my_jwt_secret_which_is_not_so_secret'; -ALTER DATABASE postgres SET "app.settings.jwt_exp" TO 3600; -ALTER USER tealbase_admin WITH PASSWORD 'postgres'; -ALTER USER postgres WITH PASSWORD 'postgres'; -ALTER USER authenticator WITH PASSWORD 'postgres'; -ALTER USER pgbouncer WITH PASSWORD 'postgres'; -ALTER USER tealbase_auth_admin WITH PASSWORD 'postgres'; -ALTER USER tealbase_storage_admin WITH PASSWORD 'postgres'; -ALTER USER tealbase_replication_admin WITH PASSWORD 'postgres'; -ALTER ROLE tealbase_read_only_user WITH PASSWORD 'postgres'; -ALTER ROLE tealbase_admin SET search_path TO "$user",public,auth,extensions; diff --git a/nix/tools/run-client.sh.in b/nix/tools/run-client.sh.in deleted file mode 100644 index f50e605..0000000 --- a/nix/tools/run-client.sh.in +++ /dev/null @@ -1,54 +0,0 @@ -#!/usr/bin/env bash -# shellcheck shell=bash - -[ ! -z "$DEBUG" ] && set -x - -# first argument should be '15' or '16' for the version -if [ "$1" == "15" ]; then - echo "Starting client for PSQL 15" - PSQL15=@PSQL15_BINDIR@ - BINDIR="$PSQL15" -elif [ "$1" == "16" ]; then - echo "Starting client for PSQL 16" - PSQL16=@PSQL16_BINDIR@ - BINDIR="$PSQL16" -elif [ "$1" == "orioledb-16" ]; then - echo "Starting client for PSQL ORIOLEDB 16" - PSQLORIOLEDB16=@PSQLORIOLEDB16_BINDIR@ - BINDIR="$PSQLORIOLEDB16" -else - echo "Please provide a valid Postgres version (15, 16, or orioledb-16)" - exit 1 -fi -#vars for migration.sh -export PATH=$BINDIR/bin:$PATH -export POSTGRES_DB=postgres -export POSTGRES_HOST=localhost -export POSTGRES_PORT=@PGSQL_DEFAULT_PORT@ -PORTNO="${2:-@PGSQL_DEFAULT_PORT@}" -PGSQL_SUPERUSER=@PGSQL_SUPERUSER@ -MIGRATIONS_DIR=@MIGRATIONS_DIR@ -POSTGRESQL_SCHEMA_SQL=@POSTGRESQL_SCHEMA_SQL@ -PGBOUNCER_AUTH_SCHEMA_SQL=@PGBOUNCER_AUTH_SCHEMA_SQL@ -STAT_EXTENSION_SQL=@STAT_EXTENSION_SQL@ -psql -v ON_ERROR_STOP=1 --no-password --no-psqlrc -U "$PGSQL_SUPERUSER" -p "$PORTNO" -h localhost -d postgres <<-EOSQL - create role postgres superuser login password '$PGPASSWORD'; - alter database postgres owner to postgres; -EOSQL -for sql in "$MIGRATIONS_DIR"/init-scripts/*.sql; do - echo "$0: running $sql" - psql -v ON_ERROR_STOP=1 --no-password --no-psqlrc -U postgres -p "$PORTNO" -h localhost -f "$sql" postgres -done -psql -v ON_ERROR_STOP=1 --no-password --no-psqlrc -U postgres -p "$PORTNO" -h localhost -c "ALTER USER tealbase_admin WITH PASSWORD '$PGPASSWORD'" -psql -v ON_ERROR_STOP=1 --no-password --no-psqlrc -U postgres -p "$PORTNO" -h localhost -d postgres -f "$PGBOUNCER_AUTH_SCHEMA_SQL" -psql -v ON_ERROR_STOP=1 --no-password --no-psqlrc -U postgres -p "$PORTNO" -h localhost -d postgres -f "$STAT_EXTENSION_SQL" -# run migrations as super user - postgres user demoted in post-setup -for sql in "$MIGRATIONS_DIR"/migrations/*.sql; do - echo "$0: running $sql" - psql -v ON_ERROR_STOP=1 --no-password --no-psqlrc -U tealbase_admin -p "$PORTNO" -h localhost -f "$sql" postgres -done -psql -v ON_ERROR_STOP=1 --no-password --no-psqlrc -U tealbase_admin -p "$PORTNO" -h localhost -f "$POSTGRESQL_SCHEMA_SQL" postgres -# TODO Do we need to reset stats when running migrations locally? -#psql -v ON_ERROR_STOP=1 --no-password --no-psqlrc -U tealbase_admin -p "$PORTNO" -h localhost -c 'SELECT extensions.pg_stat_statements_reset(); SELECT pg_stat_reset();' postgres || true - -exec psql -U postgres -p "$PORTNO" -h localhost postgres diff --git a/nix/tools/run-replica.sh.in b/nix/tools/run-replica.sh.in deleted file mode 100644 index e2096b1..0000000 --- a/nix/tools/run-replica.sh.in +++ /dev/null @@ -1,43 +0,0 @@ -#!/usr/bin/env bash -# shellcheck shell=bash - -[ ! -z "$DEBUG" ] && set -x - -# first argument should be '15' or '16' for the version -if [ "$1" == "15" ]; then - echo "Starting server for PSQL 15" - PSQL15=@PSQL15_BINDIR@ - BINDIR="$PSQL15" -elif [ "$1" == "16" ]; then - echo "Starting server for PSQL 16" - PSQL16=@PSQL16_BINDIR@ - BINDIR="$PSQL16" -elif [ "$1" == "orioledb-16" ]; then - echo "Starting server for PSQL ORIOLEDB 16" - PSQLORIOLEDB16=@PSQLORIOLEDB16_BINDIR@ - BINDIR="$PSQLORIOLEDB16" -else - echo "Please provide a valid Postgres version (15, 16 or orioledb-16)" - exit 1 -fi - -export PATH=$BINDIR/bin:$PATH - -PGSQL_SUPERUSER=@PGSQL_SUPERUSER@ -MASTER_PORTNO="$2" -REPLICA_PORTNO="$3" -REPLICA_SLOT="replica_$RANDOM" -DATDIR=$(mktemp -d) -mkdir -p "$DATDIR" - -echo "NOTE: runing pg_basebackup for server on port $MASTER_PORTNO" -echo "NOTE: using replica slot $REPLICA_SLOT" - -pg_basebackup -p "$MASTER_PORTNO" -h localhost -U "${PGSQL_SUPERUSER}" -X stream -C -S "$REPLICA_SLOT" -v -R -D "$DATDIR" - -echo "NOTE: using port $REPLICA_PORTNO for replica" -echo "NOTE: using temporary directory $DATDIR for data, which will not be removed" -echo "NOTE: you are free to re-use this data directory at will" -echo - -exec postgres -p "$REPLICA_PORTNO" -D "$DATDIR" -k /tmp diff --git a/nix/tools/run-server.sh.in b/nix/tools/run-server.sh.in deleted file mode 100644 index 977a437..0000000 --- a/nix/tools/run-server.sh.in +++ /dev/null @@ -1,65 +0,0 @@ -#!/usr/bin/env bash -# shellcheck shell=bash -[ ! -z "$DEBUG" ] && set -x -# first argument should be '15' or '16' for the version -if [ "$1" == "15" ]; then - echo "Starting server for PSQL 15" - PSQL15=@PSQL15_BINDIR@ - BINDIR="$PSQL15" -elif [ "$1" == "16" ]; then - echo "Starting server for PSQL 16" - PSQL16=@PSQL16_BINDIR@ - BINDIR="$PSQL16" -elif [ "$1" == "orioledb-16" ]; then - echo "Starting server for PSQL ORIOLEDB 16" - PSQLORIOLEDB16=@PSQLORIOLEDB16_BINDIR@ - BINDIR="$PSQLORIOLEDB16" -else - echo "Please provide a valid Postgres version (15, 16 or orioledb-16)" - exit 1 -fi -export PATH=$BINDIR/bin:$PATH -PGSQL_SUPERUSER=@PGSQL_SUPERUSER@ -PSQL_CONF_FILE=@PSQL_CONF_FILE@ -PGSODIUM_GETKEY_SCRIPT=@PGSODIUM_GETKEY@ -PORTNO="${2:-@PGSQL_DEFAULT_PORT@}" -SUPAUTILS_CONFIG_FILE=@SUPAUTILS_CONF_FILE@ -LOGGING_CONFIG_FILE=@LOGGING_CONF_FILE@ -READREPL_CONFIG_FILE=@READREPL_CONF_FILE@ -PG_HBA_FILE=@PG_HBA@ -PG_IDENT_FILE=@PG_IDENT@ -EXTENSION_CUSTOM_SCRIPTS=@EXTENSION_CUSTOM_SCRIPTS_DIR@ -GROONGA=@GROONGA_DIR@ -DATDIR=$(mktemp -d) -LOCALE_ARCHIVE=@LOCALES@ -export LOCALE_ARCHIVE -export LANG=en_US.UTF-8 -export LANGUAGE=en_US.UTF-8 -export LC_ALL=en_US.UTF-8 -export LANG=en_US.UTF-8 -export LC_CTYPE=en_US.UTF-8 -mkdir -p "$DATDIR" -echo "NOTE: using port $PORTNO for server" -echo "NOTE: using temporary directory $DATDIR for data, which will not be removed" -echo "NOTE: you are free to re-use this data directory at will" -initdb -U "$PGSQL_SUPERUSER" -D "$DATDIR" -echo "NOTE: patching postgresql.conf files" -cp "$PG_HBA_FILE" "$DATDIR/pg_hba.conf" -cp "$PG_IDENT_FILE" "$DATDIR/pg_ident.conf" -cp "$READREPL_CONFIG_FILE" "$DATDIR/read-replica.conf" -cp -r "$EXTENSION_CUSTOM_SCRIPTS" "$DATDIR" -sed "s|supautils.privileged_extensions_custom_scripts_path = '/etc/postgresql-custom/extension-custom-scripts'|supautils.privileged_extensions_custom_scripts_path = '$DATDIR/extension-custom-scripts'|" "$SUPAUTILS_CONFIG_FILE" > "$DATDIR/supautils.conf" -sed -e "1i\\ -include = '$DATDIR/supautils.conf'" \ --e "\$a\\ -pgsodium.getkey_script = '$PGSODIUM_GETKEY_SCRIPT'" \ --e "s|data_directory = '/var/lib/postgresql/data'|data_directory = '$DATDIR'|" \ --e "s|hba_file = '/etc/postgresql/pg_hba.conf'|hba_file = '$DATDIR/pg_hba.conf'|" \ --e "s|ident_file = '/etc/postgresql/pg_ident.conf'|ident_file = '$DATDIR/pg_ident.conf'|" \ --e "s|include = '/etc/postgresql/logging.conf'|#&|" \ --e "s|include = '/etc/postgresql-custom/read-replica.conf'|include = '$DATDIR/read-replica.conf'|" \ --e "\$a\\ -session_preload_libraries = 'supautils'" \ -"$PSQL_CONF_FILE" > "$DATDIR/postgresql.conf" -export GRN_PLUGINS_DIR=$GROONGA/lib/groonga/plugins -postgres --config-file="$DATDIR/postgresql.conf" -p "$PORTNO" -D "$DATDIR" -k /tmp diff --git a/nix/tools/sync-exts-versions.sh.in b/nix/tools/sync-exts-versions.sh.in deleted file mode 100644 index 1b120e9..0000000 --- a/nix/tools/sync-exts-versions.sh.in +++ /dev/null @@ -1,282 +0,0 @@ -#!/usr/bin/env bash -# shellcheck shell=bash - -[ ! -z "$DEBUG" ] && set -x - -#pass in env vars supplied by nix -yq=@YQ@ -jq=@JQ@ -editor=@NIX_EDITOR@ -ansible_vars=$($yq '.' $PWD/ansible/vars.yml) -prefetchurl=@NIXPREFETCHURL@ -_nix=@NIX@ -fetch_source_url() { - local source_url=${1//\"/} # Remove double quotes - source_url=${source_url//\'/} # Remove single quotes - - # Check if the source URL is provided - if [ -z "$source_url" ]; then - echo "Usage: fetch_nix_url " - return 1 - fi - - echo "$source_url" - - # Run nix-prefetch-url command - local initial_hash=$($prefetchurl --type sha256 "$source_url" --unpack | cut -d ' ' -f 2) - #once we can bump up nix version, we can use nix hash convert --hash-algo sha256 - local final_hash=$($_nix hash to-sri --type sha256 $initial_hash) - echo "$final_hash" -} - -sync_version() { - - local package_name=$1 - local version="\"$2\"" - local hash="\"$3\"" - - - # Update the version and hash in the Nix expression - $editor $PWD/nix/ext/$package_name.nix version --inplace -v "$version" - $editor $PWD/nix/ext/$package_name.nix src.hash --inplace -v $hash -} - -run_sync() { - local varname=$1 - local package_name=$2 - - version=$(echo $ansible_vars | $jq -r '.'$varname'') - echo "$key: $version" - url=$($_nix eval .#psql_15/exts/$package_name.src.url) - hash=$(fetch_source_url $url | tail -n 1) - $(sync_version $package_name $version $hash) - echo "synced $package_name to version $version with hash $hash" - - -} - -#for use where nix uses fetchurl -# instead of fetchFromGithub -fetchurl_source_url() { - local source_url=${1//\"/} # Remove double quotes - source_url=${source_url//\'/} # Remove single quotes - - # Check if the source URL is provided - if [ -z "$source_url" ]; then - echo "Usage: fetch_nix_url " - return 1 - fi - - echo "$source_url" - - # Run nix-prefetch-url command - local initial_hash=$($prefetchurl --type sha256 "$source_url" | cut -d ' ' -f 2) - #once we can bump up nix version, we can use nix hash convert --hash-algo sha256 - local final_hash=$($_nix hash to-sri --type sha256 $initial_hash) - echo "$final_hash" -} - -sync_version_fetchurl() { - - local package_name=$1 - local version="\"$2\"" - local hash="\"$3\"" - - - # Update the version and hash in the Nix expression - $editor $PWD/nix/ext/$package_name.nix version --inplace -v "$version" - $editor $PWD/nix/ext/$package_name.nix src.sha256 --inplace -v $hash -} - - -run_sync_fetchurl() { - local varname=$1 - local package_name=$2 - - version=$(echo $ansible_vars | $jq -r '.'$varname'') - echo "$key: $version" - url=$($_nix eval .#psql_15/exts/$package_name.src.url) - hash=$(fetchurl_source_url $url | tail -n 1) - $(sync_version_fetchurl $package_name $version $hash) - echo "synced $package_name to version $version with hash $hash" - - -} - -#for use on derivations that use cargoHash -update_cargo_vendor_hash() { - local package_name=$1 - $editor $PWD/nix/ext/$package_name.nix cargoHash --inplace -v "" - output=$($_nix build .#psql_15/exts/$package_name 2>&1) - - # Check if the command exited with an error - if [ $? -ne 0 ]; then - # Extract the hash value after "got: " - hash_value_scraped=$(echo "$output" | grep "got:" | awk '{for (i=1; i<=NF; i++) if ($i ~ /^sha/) print $i}') - hash_value="\"$hash_value_scraped\"" - # Continue using the captured hash value - $editor $PWD/nix/ext/$package_name.nix cargoHash --inplace -v $hash_value - echo "Updated cargoHash for $package_name to $hash_value" - else - echo "$package_name builds successfully, moving on..." - fi -} - -#iterate values in ansible vars, case statement -# to match ansible var to package name -keys=$(echo "$ansible_vars" | $jq -r 'keys[]') - -for key in $keys; do - case $key in - "pg_hashids_release") - varname="pg_hashids_release" - package_name="pg_hashids" - run_sync $varname $package_name - ;; - "hypopg_release") - varname="hypopg_release" - package_name="hypopg" - run_sync $varname $package_name - ;; - "pg_graphql_release") - varname="pg_graphql_release" - package_name="pg_graphql" - run_sync $varname $package_name - update_cargo_vendor_hash $package_name - ;; - "pg_cron_release") - varname="pg_cron_release" - package_name="pg_cron" - run_sync $varname $package_name - ;; - "pgsql_http_release") - varname="pgsql_http_release" - package_name="pgsql-http" - run_sync $varname $package_name - ;; - "pg_jsonschema_release") - varname="pg_jsonschema_release" - package_name="pg_jsonschema" - run_sync $varname $package_name - update_cargo_vendor_hash $package_name - ;; - "pg_net_release") - varname="pg_net_release" - package_name="pg_net" - run_sync $varname $package_name - ;; - "pg_plan_filter_release") - varname="pg_plan_filter_release" - package_name="pg_plan_filter" - run_sync $varname $package_name - ;; - "pg_safeupdate_release") - varname="pg_safeupdate_release" - package_name="pg-safeupdate" - run_sync $varname $package_name - ;; - "pgsodium_release") - varname="pgsodium_release" - package_name="pgsodium" - run_sync $varname $package_name - ;; - "pg_repack_release") - varname="pg_repack_release" - package_name="pg_repack" - run_sync $varname $package_name - ;; - "pgrouting_release") - varname="pgrouting_release" - package_name="pgrouting" - run_sync $varname $package_name - ;; - "ptap_release") - varname="pgtap_release" - package_name="pgtap" - run_sync $varname $package_name - ;; - "pg_stat_monitor_release") - varname="pg_stat_monitor_release" - package_name="pg_stat_monitor" - run_sync $varname $package_name - ;; - "pg_tle_release") - varname="pg_tle_release" - package_name="pg_tle" - run_sync $varname $package_name - ;; - "pgaudit_release") - varname="pgaudit_release" - package_name="pgaudit" - run_sync $varname $package_name - ;; - "plpgsql_check_release") - varname="plpgsql_check_release" - package_name="plpgsql-check" - run_sync $varname $package_name - ;; - "pgvector_release") - varname="pgvector_release" - package_name="pgvector" - run_sync $varname $package_name - ;; - "pgjwt_release") - varname="pgjwt_release" - package_name="pgjwt" - run_sync $varname $package_name - ;; - "plv8_release") - varname="plv8_release" - package_name="plv8" - run_sync $varname $package_name - ;; - "postgis_release") - varname="postgis_release" - package_name="postgis" - run_sync_fetchurl $varname $package_name - ;; - "pgroonga_release") - varname="pgroonga_release" - package_name="pgroonga" - run_sync_fetchurl $varname $package_name - ;; - "rum_release") - varname="rum_release" - package_name="rum" - run_sync $varname $package_name - ;; - "timescaledb_release") - varname="timescaledb_release" - package_name="timescaledb" - run_sync $varname $package_name - ;; - "supautils_release") - varname="supautils_release" - package_name="supautils" - run_sync $varname $package_name - ;; - "vault_release") - varname="vault_release" - package_name="vault" - run_sync $varname $package_name - ;; - "wal2json_release") - varname="wal2json_release" - package_name="wal2json" - run_sync $varname $package_name - ;; - *) - ;; - esac -done - -# url=$($_nix eval .#psql_16/exts/pgvector.src.url) - -# fetch_nix_url "$url" - -#res=$editor /home/sam/postgres/nix/ext/pgvector.nix src -#echo $res -# url=$($_nix eval .#psql_16/exts/pgvector.src.url) -# #echo $url -# hash=$(fetch_source_url $url | tail -n 1) -# echo "$hash" diff --git a/qemu-arm64-nix.pkr.hcl b/qemu-arm64-nix.pkr.hcl new file mode 100644 index 0000000..d36eeaa --- /dev/null +++ b/qemu-arm64-nix.pkr.hcl @@ -0,0 +1,137 @@ +variable "ansible_arguments" { + type = string + default = "--skip-tags install-postgrest,install-pgbouncer,install-tealbase-internal" +} + +variable "environment" { + type = string + default = "prod" +} + +variable "git_sha" { + type = string +} + +locals { + creator = "packer" +} + +variable "postgres-version" { + type = string + default = "" +} + +variable "postgres-major-version" { + type = string + default = "" +} + +variable "git-head-version" { + type = string + default = "unknown" +} + +variable "packer-execution-id" { + type = string + default = "unknown" +} + +packer { + required_plugins { + amazon = { + source = "github.com/hashicorp/amazon" + version = "~> 1" + } + qemu = { + version = "~> 1.0" + source = "github.com/hashicorp/qemu" + } + } +} + +source "null" "dependencies" { + communicator = "none" +} + +build { + name = "cloudimg.deps" + sources = ["source.null.dependencies"] + + provisioner "shell-local" { + inline = [ + "cp /usr/share/AAVMF/AAVMF_VARS.fd AAVMF_VARS.fd", + "cloud-localds seeds-cloudimg.iso user-data-cloudimg meta-data" + ] + inline_shebang = "/bin/bash -e" + } +} + +source "qemu" "cloudimg" { + boot_wait = "2s" + cpus = 8 + disk_image = true + disk_size = "15G" + format = "qcow2" + headless = true + http_directory = "http" + iso_checksum = "file:https://cloud-images.ubuntu.com/noble/current/SHA256SUMS" + iso_url = "https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-arm64.img" + memory = 40000 + qemu_binary = "qemu-system-aarch64" + qemuargs = [ + ["-machine", "virt,gic-version=3"], + ["-cpu", "host"], + ["-device", "virtio-gpu-pci"], + ["-drive", "if=pflash,format=raw,id=ovmf_code,readonly=on,file=/usr/share/AAVMF/AAVMF_CODE.fd"], + ["-drive", "if=pflash,format=raw,id=ovmf_vars,file=AAVMF_VARS.fd"], + ["-drive", "file=output-cloudimg/packer-cloudimg,format=qcow2"], + ["-drive", "file=seeds-cloudimg.iso,format=raw"], + ["--enable-kvm"] + ] + shutdown_command = "sudo -S shutdown -P now" + ssh_handshake_attempts = 500 + ssh_password = "ubuntu" + ssh_timeout = "1h" + ssh_username = "ubuntu" + ssh_wait_timeout = "1h" + use_backing_file = false + accelerator = "kvm" +} + +build { + name = "cloudimg.image" + sources = ["source.qemu.cloudimg"] + + # Copy ansible playbook + provisioner "shell" { + inline = ["mkdir /tmp/ansible-playbook"] + } + + provisioner "file" { + source = "ansible" + destination = "/tmp/ansible-playbook" + } + + provisioner "file" { + source = "scripts" + destination = "/tmp/ansible-playbook" + } + + provisioner "file" { + source = "migrations" + destination = "/tmp" + } + + provisioner "shell" { + environment_vars = [ + "POSTGRES_MAJOR_VERSION=${var.postgres-major-version}", + "POSTGRES_tealbase_VERSION=${var.postgres-version}", + "GIT_SHA=${var.git_sha}" + ] + use_env_var_file = true + script = "ebssurrogate/scripts/qemu-bootstrap-nix.sh" + execute_command = "sudo -S sh -c '. {{.EnvVarFile}} && cd /tmp/ansible-playbook && {{.Path}}'" + start_retry_timeout = "5m" + skip_clean = true + } +} diff --git a/qemu_artifact.md b/qemu_artifact.md new file mode 100644 index 0000000..a93dae5 --- /dev/null +++ b/qemu_artifact.md @@ -0,0 +1,52 @@ +# QEMU artifact + +We build a container image that contains a QEMU qcow2 disk image. Container images are a convenient mechanism to ship the disk image to the nodes where they're needed. + +Given the size of the image, the first VM using it on a node might take a while to come up, while the image is being pulled down. The image can be pre-fetched to avoid this; we might also switch to other deployment mechanisms in the future. + +### Build process + +The current AMI process involves a few steps: + +1. nix package is build and published using GHA (`.github/workflows/nix-build.yml`) + - this builds Postgres along with the PG extensions we use. +2. "stage1" build (`amazon-arm64-nix.pkr.hcl`, invoked via `.github/workflows/ami-release-nix.yml`) + - uses an upstream Ubuntu image to initialize the AMI + - installs and configures the majority of the software that gets shipped as part of the AMI (e.g. gotrue, postgrest, ...) +3. "stage2" build (`stage2-nix-psql.pkr.hcl`, invoked via `.github/workflows/ami-release-nix.yml`) + - uses the image published from (2) + - installs and configures the software that is build and published using nix in (1) + - cleans up build dependencies etc + +The QEMU artifact process collapses (2) and (3): + +a. nix package is build and published using GHA (`.github/workflows/nix-build.yml`) +b. packer build (`qemu-arm64-nix.pkr.hcl`) + - uses an upstream Ubuntu live image as the base + - performs the work that was performed as part of the "stage1" and "stage2" builds + - this work is executed using `ebssurrogate/scripts/qemu-bootstrap-nix.sh` + +While the AMI build uses the EBS Surrogate Packer builder to create a minimal boot environment that it then adds things to, the QEMU build merely adds things to the Ubuntu Cloud Image. As such, it's likely possible to make something more minimal with a bit more work, but this was deemed unnecessary for now. Collapsing Stage1 and Stage2 was done in the interest of iteration speed, as executing them together is much faster than saving an artifact off stage1, booting another VM off it, and then executing stage2. + +## Publish image for later use + +Following `make init alpine-image`, the generated VM image should be bundled as a container image with the name: `tealbase-postgres-test` . Publish the built docker image to a registry of your choosing, and use the published image with e.g. KubeVirt. + +## Iterating on image + +For faster iteration, it's more convenient to build the image on an ubuntu bare-metal node that's part of the EKS cluster you're using. Build the image in the `k8s.io` namespace in order for it to be available for immediate use on that node. + +### Dependencies note + +Installing `docker.io` on an EKS node might interfere with the k8s setup of the node. You can instead install `nerdctl` and `buildkit`: + +```bash +curl -L -O https://github.com/containerd/nerdctl/releases/download/v2.0.0/nerdctl-2.0.0-linux-arm64.tar.gz +tar -xzf nerdctl-2.0.0-linux-arm64.tar.gz +mv ./nerdctl /usr/local/bin/ +curl -O -L https://github.com/moby/buildkit/releases/download/v0.17.1/buildkit-v0.17.1.linux-arm64.tar.gz +tar -xzf buildkit-v0.17.1.linux-arm64.tar.gz +mv bin/* /usr/local/bin/ +``` + +You'll need to run buildkit: `buildkitd` diff --git a/scripts/90-cleanup-qemu.sh b/scripts/90-cleanup-qemu.sh new file mode 100644 index 0000000..c70c1d2 --- /dev/null +++ b/scripts/90-cleanup-qemu.sh @@ -0,0 +1,62 @@ +#!/bin/bash + +# DigitalOcean Marketplace Image Validation Tool +# © 2021 DigitalOcean LLC. +# This code is licensed under Apache 2.0 license (see LICENSE.md for details) + +set -o errexit + +# Ensure /tmp exists and has the proper permissions before +# checking for security updates +# https://github.com/digitalocean/marketplace-partners/issues/94 +if [[ ! -d /tmp ]]; then + mkdir /tmp +fi +chmod 1777 /tmp + +if [ -n "$(command -v yum)" ]; then + yum update -y + yum clean all +elif [ -n "$(command -v apt-get)" ]; then + # Cleanup more packages + apt-get -y remove --purge \ + automake \ + autoconf \ + autotools-dev \ + cmake-data \ + cpp-9 \ + cpp-10 \ + gcc-9 \ + gcc-10 \ + git \ + git-man \ + ansible \ + libicu-dev \ + libcgal-dev \ + libgcc-9-dev \ + libgcc-8-dev \ + ansible \ + snapd + + add-apt-repository --yes --remove ppa:ansible/ansible + + source /etc/os-release + + apt-get -y update + apt-get -y upgrade + apt-get -y autoremove + apt-get -y autoclean +fi +rm -rf /tmp/* /var/tmp/* +history -c +cat /dev/null > /root/.bash_history +unset HISTFILE +find /var/log -mtime -1 -type f -exec truncate -s 0 {} \; +rm -rf /var/log/*.gz /var/log/*.[0-9] /var/log/*-???????? +rm -rf /var/lib/cloud/instances/* +rm -f /root/.ssh/authorized_keys /etc/ssh/*key* +touch /etc/ssh/revoked_keys +chmod 600 /etc/ssh/revoked_keys + +cat /dev/null > /var/log/lastlog +cat /dev/null > /var/log/wtmp diff --git a/scripts/90-cleanup.sh b/scripts/90-cleanup.sh index f2e1968..644e5f7 100644 --- a/scripts/90-cleanup.sh +++ b/scripts/90-cleanup.sh @@ -24,10 +24,8 @@ elif [ -n "$(command -v apt-get)" ]; then autoconf \ autotools-dev \ cmake-data \ - cpp-8 \ cpp-9 \ cpp-10 \ - gcc-8 \ gcc-9 \ gcc-10 \ git \ @@ -36,14 +34,12 @@ elif [ -n "$(command -v apt-get)" ]; then libicu-dev \ libcgal-dev \ libgcc-9-dev \ - libgcc-8-dev \ ansible add-apt-repository --yes --remove ppa:ansible/ansible source /etc/os-release - apt-get -y remove --purge linux-headers-5.11.0-1021-aws - + apt-get -y update apt-get -y upgrade apt-get -y autoremove diff --git a/scripts/99-img_check.sh b/scripts/99-img_check.sh index 00b5476..ac958a5 100755 --- a/scripts/99-img_check.sh +++ b/scripts/99-img_check.sh @@ -569,7 +569,7 @@ osv=0 if [[ $OS == "Ubuntu" ]]; then ost=1 - if [[ $VER == "20.04" ]]; then + if [[ $VER == "24.04" ]]; then osv=1 elif [[ $VER == "18.04" ]]; then osv=1 diff --git a/scripts/nix-provision.sh b/scripts/nix-provision.sh index 5ed678d..6515eca 100644 --- a/scripts/nix-provision.sh +++ b/scripts/nix-provision.sh @@ -6,11 +6,18 @@ set -o pipefail set -o xtrace function install_packages { - # Setup Ansible on host VM - sudo apt-get update && sudo apt-get install software-properties-common -y - sudo add-apt-repository --yes --update ppa:ansible/ansible && sudo apt-get install ansible -y - ansible-galaxy collection install community.general + # Setup Ansible on host VM + sudo apt-get update && sudo apt-get install -y software-properties-common + # Manually add GPG key with explicit keyserver + sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 93C4A3FD7BB9C367 + + # Add repository and install + sudo add-apt-repository --yes ppa:ansible/ansible + sudo apt-get update + sudo apt-get install -y ansible + + ansible-galaxy collection install community.general } @@ -25,16 +32,23 @@ function install_nix() { function execute_stage2_playbook { + echo "POSTGRES_MAJOR_VERSION: ${POSTGRES_MAJOR_VERSION}" + echo "GIT_SHA: ${GIT_SHA}" sudo tee /etc/ansible/ansible.cfg < str: - inspect_results = docker_client.api.inspect_container(container.name) - return inspect_results["State"]["Health"]["Status"] - - attempts = 0 - - # containers might appear healthy but crash during bootstrap - sleep(3) - - while True: - health = get_health(container) - if health == "healthy": - break - if attempts > 60 or health == "exited": - # print container logs for debugging - print(container.logs().decode("utf-8")) - - # write logs to file to be displayed in GHA output - with open("testinfra-aio-container-logs.log", "w") as f: - f.write(container.logs().decode("utf-8")) - - raise TimeoutError("Container failed to become healthy.") - attempts += 1 - sleep(1) - - # return a testinfra connection to the container - yield testinfra.get_host("docker://" + cast(str, container.name)) - - # at the end of the test suite, destroy the container - container.remove(v=True, force=True) - - -@pytest.mark.parametrize("service_name", [ - 'adminapi', - 'lsn-checkpoint-push', - 'pg_egress_collect', - 'postgresql', - 'logrotate', - 'supa-shutdown', - 'services:kong', - 'services:postgrest', - 'services:gotrue', -]) -def test_service_is_running(host, service_name): - assert host.supervisor(service_name).is_running - - -def test_postgrest_responds_to_requests(): - res = requests.get( - "http://localhost:8000/rest/v1/", - headers={ - "apikey": all_in_one_envs["ANON_KEY"], - "authorization": f"Bearer {all_in_one_envs['ANON_KEY']}", - }, - ) - assert res.ok - - -def test_postgrest_can_connect_to_db(): - res = requests.get( - "http://localhost:8000/rest/v1/buckets", - headers={ - "apikey": all_in_one_envs["SERVICE_ROLE_KEY"], - "authorization": f"Bearer {all_in_one_envs['SERVICE_ROLE_KEY']}", - "accept-profile": "storage", - }, - ) - assert res.ok diff --git a/testinfra/test_ami.py b/testinfra/test_ami.py deleted file mode 100644 index 314daa9..0000000 --- a/testinfra/test_ami.py +++ /dev/null @@ -1,439 +0,0 @@ -import base64 -import boto3 -import gzip -import logging -import os -import pytest -import requests -import socket -import testinfra -from ec2instanceconnectcli.EC2InstanceConnectLogger import EC2InstanceConnectLogger -from ec2instanceconnectcli.EC2InstanceConnectKey import EC2InstanceConnectKey -from time import sleep - -# if GITHUB_RUN_ID is not set, use a default value that includes the user and hostname -RUN_ID = os.environ.get("GITHUB_RUN_ID", "unknown-ci-run-" + os.environ.get("USER", "unknown-user") + '@' + socket.gethostname()) - -postgresql_schema_sql_content = """ -ALTER DATABASE postgres SET "app.settings.jwt_secret" TO 'my_jwt_secret_which_is_not_so_secret'; -ALTER DATABASE postgres SET "app.settings.jwt_exp" TO 3600; - -ALTER USER tealbase_admin WITH PASSWORD 'postgres'; -ALTER USER postgres WITH PASSWORD 'postgres'; -ALTER USER authenticator WITH PASSWORD 'postgres'; -ALTER USER pgbouncer WITH PASSWORD 'postgres'; -ALTER USER tealbase_auth_admin WITH PASSWORD 'postgres'; -ALTER USER tealbase_storage_admin WITH PASSWORD 'postgres'; -ALTER USER tealbase_replication_admin WITH PASSWORD 'postgres'; -ALTER ROLE tealbase_read_only_user WITH PASSWORD 'postgres'; -ALTER ROLE tealbase_admin SET search_path TO "$user",public,auth,extensions; -""" -realtime_env_content = "" -adminapi_yaml_content = """ -port: 8085 -host: 0.0.0.0 -ref: aaaaaaaaaaaaaaaaaaaa -jwt_secret: my_jwt_secret_which_is_not_so_secret -metric_collectors: - - filesystem - - meminfo - - netdev - - loadavg - - cpu - - diskstats - - vmstat -node_exporter_additional_args: - - '--collector.filesystem.ignored-mount-points=^/(boot|sys|dev|run).*' - - '--collector.netdev.device-exclude=lo' -cert_path: /etc/ssl/adminapi/server.crt -key_path: /etc/ssl/adminapi/server.key -upstream_metrics_refresh_duration: 60s -pgbouncer_endpoints: - - 'postgres://pgbouncer:postgres@localhost:6543/pgbouncer' -fail2ban_socket: /var/run/fail2ban/fail2ban.sock -upstream_metrics_sources: - - - name: system - url: 'https://localhost:8085/metrics' - labels_to_attach: [{name: tealbase_project_ref, value: aaaaaaaaaaaaaaaaaaaa}, {name: service_type, value: db}] - skip_tls_verify: true - - - name: postgresql - url: 'http://localhost:9187/metrics' - labels_to_attach: [{name: tealbase_project_ref, value: aaaaaaaaaaaaaaaaaaaa}, {name: service_type, value: postgresql}] - - - name: gotrue - url: 'http://localhost:9122/metrics' - labels_to_attach: [{name: tealbase_project_ref, value: aaaaaaaaaaaaaaaaaaaa}, {name: service_type, value: gotrue}] -monitoring: - disk_usage: - enabled: true -firewall: - enabled: true - internal_ports: - - 9187 - - 8085 - - 9122 - privileged_ports: - - 22 - privileged_ports_allowlist: - - 0.0.0.0/0 - filtered_ports: - - 5432 - - 6543 - unfiltered_ports: - - 80 - - 443 - managed_rules_file: /etc/nftables/tealbase_managed.conf -pg_egress_collect_path: /tmp/pg_egress_collect.txt -aws_config: - creds: - enabled: false - check_frequency: 1h - refresh_buffer_duration: 6h -""" -pgsodium_root_key_content = ( - "0000000000000000000000000000000000000000000000000000000000000000" -) -postgrest_base_conf_content = """ -db-uri = "postgres://authenticator:postgres@localhost:5432/postgres?application_name=postgrest" -db-schema = "public, storage, graphql_public" -db-anon-role = "anon" -jwt-secret = "my_jwt_secret_which_is_not_so_secret" -role-claim-key = ".role" -openapi-mode = "ignore-privileges" -db-use-legacy-gucs = true -admin-server-port = 3001 -server-host = "*6" -db-pool-acquisition-timeout = 10 -max-rows = 1000 -db-extra-search-path = "public, extensions" -""" -gotrue_env_content = """ -API_EXTERNAL_URL=http://localhost -GOTRUE_API_HOST=0.0.0.0 -GOTRUE_SITE_URL= -GOTRUE_DB_DRIVER=postgres -GOTRUE_DB_DATABASE_URL=postgres://tealbase_auth_admin@localhost/postgres?sslmode=disable -GOTRUE_JWT_ADMIN_ROLES=tealbase_admin,service_role -GOTRUE_JWT_AUD=authenticated -GOTRUE_JWT_SECRET=my_jwt_secret_which_is_not_so_secret -""" -walg_config_json_content = """ -{ - "AWS_REGION": "ap-southeast-1", - "WALG_S3_PREFIX": "", - "PGDATABASE": "postgres", - "PGUSER": "tealbase_admin", - "PGPORT": 5432, - "WALG_DELTA_MAX_STEPS": 6, - "WALG_COMPRESSION_METHOD": "lz4" -} -""" -anon_key = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6ImFhYWFhYWFhYWFhYWFhYWFhYWFhIiwicm9sZSI6ImFub24iLCJpYXQiOjE2OTYyMjQ5NjYsImV4cCI6MjAxMTgwMDk2Nn0.QW95aRPA-4QuLzuvaIeeoFKlJP9J2hvAIpJ3WJ6G5zo" -service_role_key = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6ImFhYWFhYWFhYWFhYWFhYWFhYWFhIiwicm9sZSI6InNlcnZpY2Vfcm9sZSIsImlhdCI6MTY5NjIyNDk2NiwiZXhwIjoyMDExODAwOTY2fQ.Om7yqv15gC3mLGitBmvFRB3M4IsLsX9fXzTQnFM7lu0" -tealbase_admin_key = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6ImFhYWFhYWFhYWFhYWFhYWFhYWFhIiwicm9sZSI6InN1cGFiYXNlX2FkbWluIiwiaWF0IjoxNjk2MjI0OTY2LCJleHAiOjIwMTE4MDA5NjZ9.jrD3j2rBWiIx0vhVZzd1CXFv7qkAP392nBMadvXxk1c" -init_json_content = f""" -{{ - "jwt_secret": "my_jwt_secret_which_is_not_so_secret", - "project_ref": "aaaaaaaaaaaaaaaaaaaa", - "logflare_api_key": "", - "logflare_pitr_errors_source": "", - "logflare_postgrest_source": "", - "logflare_pgbouncer_source": "", - "logflare_db_source": "", - "logflare_gotrue_source": "", - "anon_key": "{anon_key}", - "service_key": "{service_role_key}", - "tealbase_admin_key": "{tealbase_admin_key}", - "common_name": "db.aaaaaaaaaaaaaaaaaaaa.tealbase.red", - "region": "ap-southeast-1", - "init_database_only": false -}} -""" - -logger = logging.getLogger("ami-tests") -handler = logging.StreamHandler() -formatter = logging.Formatter( - '%(asctime)s %(name)-12s %(levelname)-8s %(message)s') -handler.setFormatter(formatter) -logger.addHandler(handler) -logger.setLevel(logging.DEBUG) - -# scope='session' uses the same container for all the tests; -# scope='function' uses a new container per test function. -@pytest.fixture(scope="session") -def host(): - ec2 = boto3.resource("ec2", region_name="ap-southeast-1") - images = list( - ec2.images.filter( - Filters=[{"Name": "name", "Values": ["tealbase-postgres-ci-ami-test"]}] - ) - ) - assert len(images) == 1 - image = images[0] - - def gzip_then_base64_encode(s: str) -> str: - return base64.b64encode(gzip.compress(s.encode())).decode() - - instance = list( - ec2.create_instances( - BlockDeviceMappings=[ - { - "DeviceName": "/dev/sda1", - "Ebs": { - "VolumeSize": 8, # gb - "Encrypted": True, - "DeleteOnTermination": True, - "VolumeType": "gp3", - }, - }, - ], - MetadataOptions={ - "HttpTokens": "required", - "HttpEndpoint": "enabled", - }, - IamInstanceProfile={"Name": "pg-ap-southeast-1"}, - InstanceType="t4g.micro", - MinCount=1, - MaxCount=1, - ImageId=image.id, - NetworkInterfaces=[ - { - "DeviceIndex": 0, - "AssociatePublicIpAddress": True, - "Groups": ["sg-0a883ca614ebfbae0", "sg-014d326be5a1627dc"], - } - ], - UserData=f"""#cloud-config -hostname: db-aaaaaaaaaaaaaaaaaaaa -write_files: - - {{path: /etc/postgresql.schema.sql, content: {gzip_then_base64_encode(postgresql_schema_sql_content)}, permissions: '0600', encoding: gz+b64}} - - {{path: /etc/realtime.env, content: {gzip_then_base64_encode(realtime_env_content)}, permissions: '0664', encoding: gz+b64}} - - {{path: /etc/adminapi/adminapi.yaml, content: {gzip_then_base64_encode(adminapi_yaml_content)}, permissions: '0600', owner: 'adminapi:root', encoding: gz+b64}} - - {{path: /etc/postgresql-custom/pgsodium_root.key, content: {gzip_then_base64_encode(pgsodium_root_key_content)}, permissions: '0600', owner: 'postgres:postgres', encoding: gz+b64}} - - {{path: /etc/postgrest/base.conf, content: {gzip_then_base64_encode(postgrest_base_conf_content)}, permissions: '0664', encoding: gz+b64}} - - {{path: /etc/gotrue.env, content: {gzip_then_base64_encode(gotrue_env_content)}, permissions: '0664', encoding: gz+b64}} - - {{path: /etc/wal-g/config.json, content: {gzip_then_base64_encode(walg_config_json_content)}, permissions: '0664', owner: 'wal-g:wal-g', encoding: gz+b64}} - - {{path: /tmp/init.json, content: {gzip_then_base64_encode(init_json_content)}, permissions: '0600', encoding: gz+b64}} -runcmd: - - 'sudo echo \"pgbouncer\" \"postgres\" >> /etc/pgbouncer/userlist.txt' - - 'cd /tmp && aws s3 cp --region ap-southeast-1 s3://init-scripts-staging/project/init.sh .' - - 'bash init.sh "staging"' - - 'rm -rf /tmp/*' -""", - TagSpecifications=[ - { - "ResourceType": "instance", - "Tags": [ - {"Key": "Name", "Value": "ci-ami-test"}, - {"Key": "creator", "Value": "testinfra-ci"}, - {"Key": "testinfra-run-id", "Value": RUN_ID} - ], - } - ], - ) - )[0] - instance.wait_until_running() - - ec2logger = EC2InstanceConnectLogger(debug=False) - temp_key = EC2InstanceConnectKey(ec2logger.get_logger()) - ec2ic = boto3.client("ec2-instance-connect", region_name="ap-southeast-1") - response = ec2ic.send_ssh_public_key( - InstanceId=instance.id, - InstanceOSUser="ubuntu", - SSHPublicKey=temp_key.get_pub_key(), - ) - assert response["Success"] - - # instance doesn't have public ip yet - while not instance.public_ip_address: - logger.warning("waiting for ip to be available") - sleep(5) - instance.reload() - - while True: - sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - if sock.connect_ex((instance.public_ip_address, 22)) == 0: - break - else: - logger.warning("waiting for ssh to be available") - sleep(10) - - host = testinfra.get_host( - # paramiko is an ssh backend - f"paramiko://ubuntu@{instance.public_ip_address}?timeout=60", - ssh_identity_file=temp_key.get_priv_key_file(), - ) - - def is_healthy(host) -> bool: - cmd = host.run("pg_isready -U postgres") - if cmd.failed is True: - logger.warning("pg not ready") - return False - - cmd = host.run(f"curl -sf -k --connect-timeout 30 --max-time 60 https://localhost:8085/health -H 'apikey: {tealbase_admin_key}'") - if cmd.failed is True: - logger.warning("adminapi not ready") - return False - - cmd = host.run("curl -sf --connect-timeout 30 --max-time 60 http://localhost:3001/ready") - if cmd.failed is True: - logger.warning("postgrest not ready") - return False - - cmd = host.run("curl -sf --connect-timeout 30 --max-time 60 http://localhost:8081/health") - if cmd.failed is True: - logger.warning("gotrue not ready") - return False - - # TODO(thebengeu): switch to checking Envoy once it's the default. - cmd = host.run("sudo kong health") - if cmd.failed is True: - logger.warning("kong not ready") - return False - - cmd = host.run("sudo fail2ban-client status") - if cmd.failed is True: - logger.warning("fail2ban not ready") - return False - - return True - - while True: - if is_healthy(host): - break - sleep(1) - - # return a testinfra connection to the instance - yield host - - # at the end of the test suite, destroy the instance - instance.terminate() - - -def test_postgrest_is_running(host): - postgrest = host.service("postgrest") - assert postgrest.is_running - - -def test_postgrest_responds_to_requests(host): - res = requests.get( - f"http://{host.backend.get_hostname()}/rest/v1/", - headers={ - "apikey": anon_key, - "authorization": f"Bearer {anon_key}", - }, - ) - assert res.ok - - -def test_postgrest_can_connect_to_db(host): - res = requests.get( - f"http://{host.backend.get_hostname()}/rest/v1/buckets", - headers={ - "apikey": service_role_key, - "authorization": f"Bearer {service_role_key}", - "accept-profile": "storage", - }, - ) - assert res.ok - - -# There would be an error if the `apikey` query parameter isn't removed, -# since PostgREST treats query parameters as conditions. -# -# Worth testing since remove_apikey_query_parameters uses regexp instead -# of parsed query parameters. -def test_postgrest_starting_apikey_query_parameter_is_removed(host): - res = requests.get( - f"http://{host.backend.get_hostname()}/rest/v1/buckets", - headers={ - "accept-profile": "storage", - }, - params={ - "apikey": service_role_key, - "id": "eq.absent", - "name": "eq.absent", - }, - ) - assert res.ok - - -def test_postgrest_middle_apikey_query_parameter_is_removed(host): - res = requests.get( - f"http://{host.backend.get_hostname()}/rest/v1/buckets", - headers={ - "accept-profile": "storage", - }, - params={ - "id": "eq.absent", - "apikey": service_role_key, - "name": "eq.absent", - }, - ) - assert res.ok - - -def test_postgrest_ending_apikey_query_parameter_is_removed(host): - res = requests.get( - f"http://{host.backend.get_hostname()}/rest/v1/buckets", - headers={ - "accept-profile": "storage", - }, - params={ - "id": "eq.absent", - "name": "eq.absent", - "apikey": service_role_key, - }, - ) - assert res.ok - -# There would be an error if the empty key query parameter isn't removed, -# since PostgREST treats empty key query parameters as malformed input. -# -# Worth testing since remove_apikey_and_empty_key_query_parameters uses regexp instead -# of parsed query parameters. -def test_postgrest_starting_empty_key_query_parameter_is_removed(host): - res = requests.get( - f"http://{host.backend.get_hostname()}/rest/v1/buckets", - headers={ - "accept-profile": "storage", - }, - params={ - "": "empty_key", - "id": "eq.absent", - "apikey": service_role_key, - }, - ) - assert res.ok - - -def test_postgrest_middle_empty_key_query_parameter_is_removed(host): - res = requests.get( - f"http://{host.backend.get_hostname()}/rest/v1/buckets", - headers={ - "accept-profile": "storage", - }, - params={ - "apikey": service_role_key, - "": "empty_key", - "id": "eq.absent", - }, - ) - assert res.ok - - -def test_postgrest_ending_empty_key_query_parameter_is_removed(host): - res = requests.get( - f"http://{host.backend.get_hostname()}/rest/v1/buckets", - headers={ - "accept-profile": "storage", - }, - params={ - "id": "eq.absent", - "apikey": service_role_key, - "": "empty_key", - }, - ) - assert res.ok diff --git a/testinfra/test_ami_nix.py b/testinfra/test_ami_nix.py index fe4be1a..b100f5d 100644 --- a/testinfra/test_ami_nix.py +++ b/testinfra/test_ami_nix.py @@ -6,14 +6,21 @@ import pytest import requests import socket -import testinfra from ec2instanceconnectcli.EC2InstanceConnectLogger import EC2InstanceConnectLogger from ec2instanceconnectcli.EC2InstanceConnectKey import EC2InstanceConnectKey from time import sleep +import subprocess +import paramiko # if GITHUB_RUN_ID is not set, use a default value that includes the user and hostname -RUN_ID = os.environ.get("GITHUB_RUN_ID", "unknown-ci-run-" + os.environ.get("USER", "unknown-user") + '@' + socket.gethostname()) -AMI_NAME = os.environ.get('AMI_NAME') +RUN_ID = os.environ.get( + "GITHUB_RUN_ID", + "unknown-ci-run-" + + os.environ.get("USER", "unknown-user") + + "@" + + socket.gethostname(), +) +AMI_NAME = os.environ.get("AMI_NAME") postgresql_schema_sql_content = """ ALTER DATABASE postgres SET "app.settings.jwt_secret" TO 'my_jwt_secret_which_is_not_so_secret'; ALTER DATABASE postgres SET "app.settings.jwt_exp" TO 3600; @@ -65,6 +72,10 @@ name: gotrue url: 'http://localhost:9122/metrics' labels_to_attach: [{name: tealbase_project_ref, value: aaaaaaaaaaaaaaaaaaaa}, {name: service_type, value: gotrue}] + - + name: postgrest + url: 'http://localhost:3001/metrics' + labels_to_attach: [{name: tealbase_project_ref, value: aaaaaaaaaaaaaaaaaaaa}, {name: service_type, value: postgrest}] monitoring: disk_usage: enabled: true @@ -154,12 +165,57 @@ logger = logging.getLogger("ami-tests") handler = logging.StreamHandler() -formatter = logging.Formatter( - '%(asctime)s %(name)-12s %(levelname)-8s %(message)s') +formatter = logging.Formatter("%(asctime)s %(name)-12s %(levelname)-8s %(message)s") handler.setFormatter(formatter) logger.addHandler(handler) logger.setLevel(logging.DEBUG) + +def get_ssh_connection(instance_ip, ssh_identity_file, max_retries=10): + """Create and return a single SSH connection that can be reused.""" + for attempt in range(max_retries): + try: + # Create SSH client + ssh = paramiko.SSHClient() + ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + + # Connect with our working parameters + ssh.connect( + hostname=instance_ip, + username='ubuntu', + key_filename=ssh_identity_file, + timeout=10, + banner_timeout=10 + ) + + # Test the connection + stdin, stdout, stderr = ssh.exec_command('echo "SSH test"') + if stdout.channel.recv_exit_status() == 0 and "SSH test" in stdout.read().decode(): + logger.info("SSH connection established successfully") + return ssh + else: + raise Exception("SSH test command failed") + + except Exception as e: + if attempt == max_retries - 1: + raise + logger.warning( + f"Ssh connection failed, retrying: {attempt + 1}/{max_retries} failed, retrying ..." + ) + sleep(5) + + +def run_ssh_command(ssh, command): + """Run a command over the established SSH connection.""" + stdin, stdout, stderr = ssh.exec_command(command) + exit_code = stdout.channel.recv_exit_status() + return { + 'succeeded': exit_code == 0, + 'stdout': stdout.read().decode(), + 'stderr': stderr.read().decode() + } + + # scope='session' uses the same container for all the tests; # scope='function' uses a new container per test function. @pytest.fixture(scope="session") @@ -220,6 +276,7 @@ def gzip_then_base64_encode(s: str) -> str: - 'sudo echo \"pgbouncer\" \"postgres\" >> /etc/pgbouncer/userlist.txt' - 'cd /tmp && aws s3 cp --region ap-southeast-1 s3://init-scripts-staging/project/init.sh .' - 'bash init.sh "staging"' + - 'touch /var/lib/init-complete' - 'rm -rf /tmp/*' """, TagSpecifications=[ @@ -228,7 +285,7 @@ def gzip_then_base64_encode(s: str) -> str: "Tags": [ {"Key": "Name", "Value": "ci-ami-test-nix"}, {"Key": "creator", "Value": "testinfra-ci"}, - {"Key": "testinfra-run-id", "Value": RUN_ID} + {"Key": "testinfra-run-id", "Value": RUN_ID}, ], } ], @@ -236,6 +293,9 @@ def gzip_then_base64_encode(s: str) -> str: )[0] instance.wait_until_running() + # Increase wait time before starting health checks + sleep(30) # Wait for 30 seconds to allow services to start + ec2logger = EC2InstanceConnectLogger(debug=False) temp_key = EC2InstanceConnectKey(ec2logger.get_logger()) ec2ic = boto3.client("ec2-instance-connect", region_name="ap-southeast-1") @@ -246,80 +306,95 @@ def gzip_then_base64_encode(s: str) -> str: ) assert response["Success"] - # instance doesn't have public ip yet + # Wait for instance to have public IP while not instance.public_ip_address: logger.warning("waiting for ip to be available") sleep(5) instance.reload() - while True: - sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - if sock.connect_ex((instance.public_ip_address, 22)) == 0: - break - else: - logger.warning("waiting for ssh to be available") - sleep(10) - - host = testinfra.get_host( - # paramiko is an ssh backend - f"paramiko://ubuntu@{instance.public_ip_address}?timeout=60", - ssh_identity_file=temp_key.get_priv_key_file(), + # Create single SSH connection + ssh = get_ssh_connection( + instance.public_ip_address, + temp_key.get_priv_key_file(), ) - def is_healthy(host) -> bool: - cmd = host.run("sudo -u postgres /usr/bin/pg_isready -U postgres") - if cmd.failed is True: - logger.warning("pg not ready") - return False - - cmd = host.run(f"curl -sf -k --connect-timeout 30 --max-time 60 https://localhost:8085/health -H 'apikey: {tealbase_admin_key}'") - if cmd.failed is True: - logger.warning("adminapi not ready") - return False - - cmd = host.run("curl -sf --connect-timeout 30 --max-time 60 http://localhost:3001/ready") - if cmd.failed is True: - logger.warning("postgrest not ready") - return False - - cmd = host.run("curl -sf --connect-timeout 30 --max-time 60 http://localhost:8081/health") - if cmd.failed is True: - logger.warning("gotrue not ready") - return False - - # TODO(thebengeu): switch to checking Envoy once it's the default. - cmd = host.run("sudo kong health") - if cmd.failed is True: - logger.warning("kong not ready") - return False - - cmd = host.run("sudo fail2ban-client status") - if cmd.failed is True: - logger.warning("fail2ban not ready") - return False + # Check PostgreSQL data directory + logger.info("Checking PostgreSQL data directory...") + result = run_ssh_command(ssh, "ls -la /var/lib/postgresql") + if result['succeeded']: + logger.info("PostgreSQL data directory contents:\n" + result['stdout']) + else: + logger.warning("Failed to list PostgreSQL data directory: " + result['stderr']) + + # Wait for init.sh to complete + logger.info("Waiting for init.sh to complete...") + max_attempts = 60 # 5 minutes + attempt = 0 + while attempt < max_attempts: + try: + result = run_ssh_command(ssh, "test -f /var/lib/init-complete") + if result['succeeded']: + logger.info("init.sh has completed") + break + except Exception as e: + logger.warning(f"Error checking init.sh status: {str(e)}") + + attempt += 1 + logger.warning(f"Waiting for init.sh to complete (attempt {attempt}/{max_attempts})") + sleep(5) + + if attempt >= max_attempts: + logger.error("init.sh failed to complete within the timeout period") + instance.terminate() + raise TimeoutError("init.sh failed to complete within the timeout period") + + def is_healthy(ssh) -> bool: + health_checks = [ + ("postgres", "sudo -u postgres /usr/bin/pg_isready -U postgres"), + ("adminapi", f"curl -sf -k --connect-timeout 30 --max-time 60 https://localhost:8085/health -H 'apikey: {tealbase_admin_key}'"), + ("postgrest", "curl -sf --connect-timeout 30 --max-time 60 http://localhost:3001/ready"), + ("gotrue", "curl -sf --connect-timeout 30 --max-time 60 http://localhost:8081/health"), + ("kong", "sudo kong health"), + ("fail2ban", "sudo fail2ban-client status"), + ] + + for service, command in health_checks: + try: + result = run_ssh_command(ssh, command) + if not result['succeeded']: + logger.warning(f"{service} not ready") + return False + except Exception: + logger.warning(f"Connection failed during {service} check") + return False return True while True: - if is_healthy(host): + if is_healthy(ssh): break sleep(1) - # return a testinfra connection to the instance - yield host + # Return both the SSH connection and instance IP for use in tests + yield { + 'ssh': ssh, + 'ip': instance.public_ip_address + } # at the end of the test suite, destroy the instance instance.terminate() def test_postgrest_is_running(host): - postgrest = host.service("postgrest") - assert postgrest.is_running + """Check if postgrest service is running using our SSH connection.""" + result = run_ssh_command(host['ssh'], "systemctl is-active postgrest") + assert result['succeeded'] and result['stdout'].strip() == 'active', "PostgREST service is not running" def test_postgrest_responds_to_requests(host): + """Test if PostgREST responds to requests.""" res = requests.get( - f"http://{host.backend.get_hostname()}/rest/v1/", + f"http://{host['ip']}/rest/v1/", headers={ "apikey": anon_key, "authorization": f"Bearer {anon_key}", @@ -329,8 +404,9 @@ def test_postgrest_responds_to_requests(host): def test_postgrest_can_connect_to_db(host): + """Test if PostgREST can connect to the database.""" res = requests.get( - f"http://{host.backend.get_hostname()}/rest/v1/buckets", + f"http://{host['ip']}/rest/v1/buckets", headers={ "apikey": service_role_key, "authorization": f"Bearer {service_role_key}", @@ -340,14 +416,10 @@ def test_postgrest_can_connect_to_db(host): assert res.ok -# There would be an error if the `apikey` query parameter isn't removed, -# since PostgREST treats query parameters as conditions. -# -# Worth testing since remove_apikey_query_parameters uses regexp instead -# of parsed query parameters. def test_postgrest_starting_apikey_query_parameter_is_removed(host): + """Test if PostgREST removes apikey query parameter at start.""" res = requests.get( - f"http://{host.backend.get_hostname()}/rest/v1/buckets", + f"http://{host['ip']}/rest/v1/buckets", headers={ "accept-profile": "storage", }, @@ -361,8 +433,9 @@ def test_postgrest_starting_apikey_query_parameter_is_removed(host): def test_postgrest_middle_apikey_query_parameter_is_removed(host): + """Test if PostgREST removes apikey query parameter in middle.""" res = requests.get( - f"http://{host.backend.get_hostname()}/rest/v1/buckets", + f"http://{host['ip']}/rest/v1/buckets", headers={ "accept-profile": "storage", }, @@ -376,8 +449,9 @@ def test_postgrest_middle_apikey_query_parameter_is_removed(host): def test_postgrest_ending_apikey_query_parameter_is_removed(host): + """Test if PostgREST removes apikey query parameter at end.""" res = requests.get( - f"http://{host.backend.get_hostname()}/rest/v1/buckets", + f"http://{host['ip']}/rest/v1/buckets", headers={ "accept-profile": "storage", }, @@ -389,14 +463,11 @@ def test_postgrest_ending_apikey_query_parameter_is_removed(host): ) assert res.ok -# There would be an error if the empty key query parameter isn't removed, -# since PostgREST treats empty key query parameters as malformed input. -# -# Worth testing since remove_apikey_and_empty_key_query_parameters uses regexp instead -# of parsed query parameters. + def test_postgrest_starting_empty_key_query_parameter_is_removed(host): + """Test if PostgREST removes empty key query parameter at start.""" res = requests.get( - f"http://{host.backend.get_hostname()}/rest/v1/buckets", + f"http://{host['ip']}/rest/v1/buckets", headers={ "accept-profile": "storage", }, @@ -410,8 +481,9 @@ def test_postgrest_starting_empty_key_query_parameter_is_removed(host): def test_postgrest_middle_empty_key_query_parameter_is_removed(host): + """Test if PostgREST removes empty key query parameter in middle.""" res = requests.get( - f"http://{host.backend.get_hostname()}/rest/v1/buckets", + f"http://{host['ip']}/rest/v1/buckets", headers={ "accept-profile": "storage", }, @@ -425,8 +497,9 @@ def test_postgrest_middle_empty_key_query_parameter_is_removed(host): def test_postgrest_ending_empty_key_query_parameter_is_removed(host): + """Test if PostgREST removes empty key query parameter at end.""" res = requests.get( - f"http://{host.backend.get_hostname()}/rest/v1/buckets", + f"http://{host['ip']}/rest/v1/buckets", headers={ "accept-profile": "storage", }, @@ -437,3 +510,233 @@ def test_postgrest_ending_empty_key_query_parameter_is_removed(host): }, ) assert res.ok + + +def test_postgresql_version(host): + """Print the PostgreSQL version being tested and ensure it's >= 14.""" + result = run_ssh_command(host['ssh'], "sudo -u postgres psql -c 'SELECT version();'") + if result['succeeded']: + print(f"\nPostgreSQL Version:\n{result['stdout']}") + # Extract version number from the output + version_line = result['stdout'].strip().split('\n')[2] # Skip header and get the actual version + # Extract major version number (e.g., "15.8" -> 15) + import re + version_match = re.search(r'PostgreSQL (\d+)\.', version_line) + if version_match: + major_version = int(version_match.group(1)) + print(f"PostgreSQL major version: {major_version}") + assert major_version >= 14, f"PostgreSQL version {major_version} is less than 14" + else: + assert False, "Could not parse PostgreSQL version number" + else: + print(f"\nFailed to get PostgreSQL version: {result['stderr']}") + assert False, "Failed to get PostgreSQL version" + + # Also get the version from the command line + result = run_ssh_command(host['ssh'], "sudo -u postgres psql --version") + if result['succeeded']: + print(f"PostgreSQL Client Version: {result['stdout'].strip()}") + else: + print(f"Failed to get PostgreSQL client version: {result['stderr']}") + + print("✓ PostgreSQL version is >= 14") + + +def test_libpq5_version(host): + """Print the libpq5 version installed and ensure it's >= 14.""" + # Try different package managers to find libpq5 + result = run_ssh_command(host['ssh'], "dpkg -l | grep libpq5 || true") + if result['succeeded'] and result['stdout'].strip(): + print(f"\nlibpq5 package info:\n{result['stdout']}") + # Extract version from dpkg output (format: ii libpq5:arm64 17.5-1.pgdg20.04+1) + import re + version_match = re.search(r'libpq5[^ ]* +(\d+)\.', result['stdout']) + if version_match: + major_version = int(version_match.group(1)) + print(f"libpq5 major version: {major_version}") + assert major_version >= 14, f"libpq5 version {major_version} is less than 14" + else: + print("Could not parse libpq5 version from dpkg output") + else: + print("\nlibpq5 not found via dpkg") + + # Also try to find libpq.so files + result = run_ssh_command(host['ssh'], "find /usr -name '*libpq*' -type f 2>/dev/null | head -10") + if result['succeeded'] and result['stdout'].strip(): + print(f"\nlibpq files found:\n{result['stdout']}") + else: + print("\nNo libpq files found") + + # Check if we can get version from a libpq file + result = run_ssh_command(host['ssh'], "ldd /usr/bin/psql | grep libpq || true") + if result['succeeded'] and result['stdout'].strip(): + print(f"\npsql libpq dependency:\n{result['stdout']}") + else: + print("\nCould not find libpq dependency for psql") + + # Try to get version from libpq directly + result = run_ssh_command(host['ssh'], "psql --version 2>&1 | head -1") + if result['succeeded'] and result['stdout'].strip(): + print(f"\npsql version output: {result['stdout'].strip()}") + # The psql version should match the libpq version + import re + version_match = re.search(r'psql \(PostgreSQL\) (\d+)\.', result['stdout']) + if version_match: + major_version = int(version_match.group(1)) + print(f"psql/libpq major version: {major_version}") + assert major_version >= 14, f"psql/libpq version {major_version} is less than 14" + else: + print("Could not parse psql version") + + print("✓ libpq5 version is >= 14") + + +def test_postgrest_read_only_session_attrs(host): + """Test PostgREST with target_session_attrs=read-only and check for session errors.""" + # First, check if PostgreSQL is configured for read-only mode + result = run_ssh_command(host['ssh'], "sudo -u postgres psql -c \"SHOW default_transaction_read_only;\"") + if result['succeeded']: + default_read_only = result['stdout'].strip() + print(f"PostgreSQL default_transaction_read_only: {default_read_only}") + else: + print("Could not check PostgreSQL read-only setting") + default_read_only = "unknown" + + # Check if PostgreSQL is in recovery mode (standby) + result = run_ssh_command(host['ssh'], "sudo -u postgres psql -c \"SELECT pg_is_in_recovery();\"") + if result['succeeded']: + in_recovery = result['stdout'].strip() + print(f"PostgreSQL pg_is_in_recovery: {in_recovery}") + else: + print("Could not check PostgreSQL recovery status") + in_recovery = "unknown" + + # Find PostgreSQL configuration file + result = run_ssh_command(host['ssh'], "sudo -u postgres psql -c \"SHOW config_file;\"") + if result['succeeded']: + config_file = result['stdout'].strip().split('\n')[2].strip() # Skip header and get the actual path + print(f"PostgreSQL config file: {config_file}") + else: + print("Could not find PostgreSQL config file") + config_file = "/etc/postgresql/15/main/postgresql.conf" # Default fallback + + # Backup PostgreSQL config + result = run_ssh_command(host['ssh'], f"sudo cp {config_file} {config_file}.backup") + assert result['succeeded'], "Failed to backup PostgreSQL config" + + # Add read-only setting to PostgreSQL config + result = run_ssh_command(host['ssh'], f"echo 'default_transaction_read_only = on' | sudo tee -a {config_file}") + assert result['succeeded'], "Failed to add read-only setting to PostgreSQL config" + + # Restart PostgreSQL to apply the new configuration + result = run_ssh_command(host['ssh'], "sudo systemctl restart postgresql") + assert result['succeeded'], "Failed to restart PostgreSQL" + + # Wait for PostgreSQL to start up + sleep(5) + + # Verify the change took effect + result = run_ssh_command(host['ssh'], "sudo -u postgres psql -c \"SHOW default_transaction_read_only;\"") + if result['succeeded']: + new_default_read_only = result['stdout'].strip() + print(f"PostgreSQL default_transaction_read_only after change: {new_default_read_only}") + else: + print("Could not verify PostgreSQL read-only setting change") + + # First, backup the current PostgREST config + result = run_ssh_command(host['ssh'], "sudo cp /etc/postgrest/base.conf /etc/postgrest/base.conf.backup") + assert result['succeeded'], "Failed to backup PostgREST config" + + try: + # Read the current config to get the db-uri + result = run_ssh_command(host['ssh'], "sudo cat /etc/postgrest/base.conf | grep '^db-uri'") + assert result['succeeded'], "Failed to read current db-uri" + + current_db_uri = result['stdout'].strip() + print(f"Current db-uri: {current_db_uri}") + + # Extract just the URI part (remove the db-uri = " prefix and trailing quote) + uri_start = current_db_uri.find('"') + 1 + uri_end = current_db_uri.rfind('"') + base_uri = current_db_uri[uri_start:uri_end] + + # Modify the URI to add target_session_attrs=read-only + if '?' in base_uri: + # URI already has parameters, add target_session_attrs + modified_uri = base_uri + "&target_session_attrs=read-only" + else: + # URI has no parameters, add target_session_attrs + modified_uri = base_uri + "?target_session_attrs=read-only" + + print(f"Modified URI: {modified_uri}") + + # Use awk to replace the db-uri line more reliably + result = run_ssh_command(host['ssh'], f"sudo awk '{{if ($1 == \"db-uri\") print \"db-uri = \\\"{modified_uri}\\\"\"; else print $0}}' /etc/postgrest/base.conf > /tmp/new_base.conf && sudo mv /tmp/new_base.conf /etc/postgrest/base.conf") + assert result['succeeded'], "Failed to update db-uri in config" + + # Verify the change was made correctly + result = run_ssh_command(host['ssh'], "sudo cat /etc/postgrest/base.conf | grep '^db-uri'") + print(f"Updated db-uri line: {result['stdout'].strip()}") + + # Also show the full config to debug + result = run_ssh_command(host['ssh'], "sudo cat /etc/postgrest/base.conf") + print(f"Full config after change:\n{result['stdout']}") + + # Restart PostgREST to apply the new configuration + result = run_ssh_command(host['ssh'], "sudo systemctl restart postgrest") + assert result['succeeded'], "Failed to restart PostgREST" + + # Wait a moment for PostgREST to start up + sleep(5) + + # Check if PostgREST is running + result = run_ssh_command(host['ssh'], "sudo systemctl is-active postgrest") + if not (result['succeeded'] and result['stdout'].strip() == 'active'): + # If PostgREST failed to start, check the logs to see why + log_result = run_ssh_command(host['ssh'], "sudo journalctl -u postgrest --since '5 seconds ago' --no-pager") + print(f"PostgREST failed to start. Recent logs:\n{log_result['stdout']}") + assert False, "PostgREST failed to start after config change" + + # Make a test request to trigger any potential session errors + try: + response = requests.get( + f"http://{host['ip']}/rest/v1/", + headers={"apikey": anon_key, "authorization": f"Bearer {anon_key}"}, + timeout=10 + ) + print(f"Test request status: {response.status_code}") + except Exception as e: + print(f"Test request failed: {str(e)}") + + # Check PostgREST logs for "session is not read-only" errors + result = run_ssh_command(host['ssh'], "sudo journalctl -u postgrest --since '5 seconds ago' | grep -i 'session is not read-only' || true") + + if result['stdout'].strip(): + print(f"\nFound 'session is not read-only' errors in PostgREST logs:\n{result['stdout']}") + assert False, "PostgREST logs contain 'session is not read-only' errors even though PostgreSQL is configured for read-only mode" + else: + print("\nNo 'session is not read-only' errors found in PostgREST logs") + + finally: + # Restore the original configuration + result = run_ssh_command(host['ssh'], "sudo cp /etc/postgrest/base.conf.backup /etc/postgrest/base.conf") + if result['succeeded']: + result = run_ssh_command(host['ssh'], "sudo systemctl restart postgrest") + if result['succeeded']: + print("Restored original PostgREST configuration") + else: + print("Warning: Failed to restart PostgREST after restoring config") + else: + print("Warning: Failed to restore original PostgREST configuration") + + # Restore PostgreSQL to original configuration + result = run_ssh_command(host['ssh'], f"sudo cp {config_file}.backup {config_file}") + if result['succeeded']: + result = run_ssh_command(host['ssh'], "sudo systemctl restart postgresql") + if result['succeeded']: + print("Restored PostgreSQL to original configuration") + else: + print("Warning: Failed to restart PostgreSQL after restoring config") + else: + print("Warning: Failed to restore PostgreSQL configuration") + diff --git a/tests/pg_upgrade/debug.sh b/tests/pg_upgrade/debug.sh index eff9dbb..d8a47e7 100755 --- a/tests/pg_upgrade/debug.sh +++ b/tests/pg_upgrade/debug.sh @@ -24,7 +24,7 @@ if [ ! -f "$LATEST_VERSION_SCRIPTS" ]; then fi if [ ! -f "$LATEST_VERSION_BIN" ]; then - aws s3 cp "s3://${ARTIFACTS_BUCKET_NAME}/upgrades/postgres/tealbase-postgres-${LATEST_PG_VERSION}/20.04.tar.gz" "$LATEST_VERSION_BIN" + aws s3 cp "s3://${ARTIFACTS_BUCKET_NAME}/upgrades/postgres/tealbase-postgres-${LATEST_PG_VERSION}/24.04.tar.gz" "$LATEST_VERSION_BIN" fi rm -rf scripts/pg_upgrade_scripts diff --git a/user-data-cloudimg b/user-data-cloudimg new file mode 100644 index 0000000..9a74d23 --- /dev/null +++ b/user-data-cloudimg @@ -0,0 +1,16 @@ +#cloud-config +users: + - name: root + lock_passwd: false + ssh_redirect_user: true + hashed_passwd: "$6$canonical.$0zWaW71A9ke9ASsaOcFTdQ2tx1gSmLxMPrsH0rF0Yb.2AEKNPV1lrF94n6YuPJmnUy2K2/JSDtxuiBDey6Lpa/" + - name: ubuntu + lock_passwd: false + hashed_passwd: "$6$canonical.$0zWaW71A9ke9ASsaOcFTdQ2tx1gSmLxMPrsH0rF0Yb.2AEKNPV1lrF94n6YuPJmnUy2K2/JSDtxuiBDey6Lpa/" + ssh_redired_user: false + sudo: "ALL=(ALL) NOPASSWD:ALL" + shell: /usr/bin/bash + groups: [adm, audio, cdrom, dialout, dip, floppy, lxd, netdev, plugdev, sudo, video] +ssh_pwauth: True +disable_root: false +preserve_hostname: true From da8eaa1f579354235b0a988d9c06ee93ec95763e Mon Sep 17 00:00:00 2001 From: Ekjot Singh Date: Mon, 28 Jul 2025 01:39:42 +0530 Subject: [PATCH 9/9] feat: 15.8.1.123 --- {aogithub => .github}/CODEOWNERS | 0 {aogithub => .github}/FUNDING.yml | 0 {aogithub => .github}/PULL_REQUEST_TEMPLATE/default.md | 0 {aogithub => .github}/PULL_REQUEST_TEMPLATE/extension_upgrade.md | 0 {aogithub => .github}/actions/shared-checkout/action.yml | 0 {aogithub => .github}/pull_request_template.md | 0 {aogithub => .github}/workflows/ami-release-nix-single.yml | 0 {aogithub => .github}/workflows/ami-release-nix.yml | 0 {aogithub => .github}/workflows/check-shellscripts.yml | 0 {aogithub => .github}/workflows/ci.yml | 0 {aogithub => .github}/workflows/dockerhub-release-matrix.yml | 0 {aogithub => .github}/workflows/manual-docker-release.yml | 0 {aogithub => .github}/workflows/mirror-postgrest.yml | 0 {aogithub => .github}/workflows/mirror.yml | 0 {aogithub => .github}/workflows/nix-build.yml | 0 {aogithub => .github}/workflows/publish-migrations-prod.yml | 0 {aogithub => .github}/workflows/publish-migrations-staging.yml | 0 .../workflows/publish-nix-pgupgrade-bin-flake-version.yml | 0 {aogithub => .github}/workflows/publish-nix-pgupgrade-scripts.yml | 0 {aogithub => .github}/workflows/qemu-image-build.yml | 0 {aogithub => .github}/workflows/test.yml | 0 {aogithub => .github}/workflows/testinfra-ami-build.yml | 0 22 files changed, 0 insertions(+), 0 deletions(-) rename {aogithub => .github}/CODEOWNERS (100%) rename {aogithub => .github}/FUNDING.yml (100%) rename {aogithub => .github}/PULL_REQUEST_TEMPLATE/default.md (100%) rename {aogithub => .github}/PULL_REQUEST_TEMPLATE/extension_upgrade.md (100%) rename {aogithub => .github}/actions/shared-checkout/action.yml (100%) rename {aogithub => .github}/pull_request_template.md (100%) rename {aogithub => .github}/workflows/ami-release-nix-single.yml (100%) rename {aogithub => .github}/workflows/ami-release-nix.yml (100%) rename {aogithub => .github}/workflows/check-shellscripts.yml (100%) rename {aogithub => .github}/workflows/ci.yml (100%) rename {aogithub => .github}/workflows/dockerhub-release-matrix.yml (100%) rename {aogithub => .github}/workflows/manual-docker-release.yml (100%) rename {aogithub => .github}/workflows/mirror-postgrest.yml (100%) rename {aogithub => .github}/workflows/mirror.yml (100%) rename {aogithub => .github}/workflows/nix-build.yml (100%) rename {aogithub => .github}/workflows/publish-migrations-prod.yml (100%) rename {aogithub => .github}/workflows/publish-migrations-staging.yml (100%) rename {aogithub => .github}/workflows/publish-nix-pgupgrade-bin-flake-version.yml (100%) rename {aogithub => .github}/workflows/publish-nix-pgupgrade-scripts.yml (100%) rename {aogithub => .github}/workflows/qemu-image-build.yml (100%) rename {aogithub => .github}/workflows/test.yml (100%) rename {aogithub => .github}/workflows/testinfra-ami-build.yml (100%) diff --git a/aogithub/CODEOWNERS b/.github/CODEOWNERS similarity index 100% rename from aogithub/CODEOWNERS rename to .github/CODEOWNERS diff --git a/aogithub/FUNDING.yml b/.github/FUNDING.yml similarity index 100% rename from aogithub/FUNDING.yml rename to .github/FUNDING.yml diff --git a/aogithub/PULL_REQUEST_TEMPLATE/default.md b/.github/PULL_REQUEST_TEMPLATE/default.md similarity index 100% rename from aogithub/PULL_REQUEST_TEMPLATE/default.md rename to .github/PULL_REQUEST_TEMPLATE/default.md diff --git a/aogithub/PULL_REQUEST_TEMPLATE/extension_upgrade.md b/.github/PULL_REQUEST_TEMPLATE/extension_upgrade.md similarity index 100% rename from aogithub/PULL_REQUEST_TEMPLATE/extension_upgrade.md rename to .github/PULL_REQUEST_TEMPLATE/extension_upgrade.md diff --git a/aogithub/actions/shared-checkout/action.yml b/.github/actions/shared-checkout/action.yml similarity index 100% rename from aogithub/actions/shared-checkout/action.yml rename to .github/actions/shared-checkout/action.yml diff --git a/aogithub/pull_request_template.md b/.github/pull_request_template.md similarity index 100% rename from aogithub/pull_request_template.md rename to .github/pull_request_template.md diff --git a/aogithub/workflows/ami-release-nix-single.yml b/.github/workflows/ami-release-nix-single.yml similarity index 100% rename from aogithub/workflows/ami-release-nix-single.yml rename to .github/workflows/ami-release-nix-single.yml diff --git a/aogithub/workflows/ami-release-nix.yml b/.github/workflows/ami-release-nix.yml similarity index 100% rename from aogithub/workflows/ami-release-nix.yml rename to .github/workflows/ami-release-nix.yml diff --git a/aogithub/workflows/check-shellscripts.yml b/.github/workflows/check-shellscripts.yml similarity index 100% rename from aogithub/workflows/check-shellscripts.yml rename to .github/workflows/check-shellscripts.yml diff --git a/aogithub/workflows/ci.yml b/.github/workflows/ci.yml similarity index 100% rename from aogithub/workflows/ci.yml rename to .github/workflows/ci.yml diff --git a/aogithub/workflows/dockerhub-release-matrix.yml b/.github/workflows/dockerhub-release-matrix.yml similarity index 100% rename from aogithub/workflows/dockerhub-release-matrix.yml rename to .github/workflows/dockerhub-release-matrix.yml diff --git a/aogithub/workflows/manual-docker-release.yml b/.github/workflows/manual-docker-release.yml similarity index 100% rename from aogithub/workflows/manual-docker-release.yml rename to .github/workflows/manual-docker-release.yml diff --git a/aogithub/workflows/mirror-postgrest.yml b/.github/workflows/mirror-postgrest.yml similarity index 100% rename from aogithub/workflows/mirror-postgrest.yml rename to .github/workflows/mirror-postgrest.yml diff --git a/aogithub/workflows/mirror.yml b/.github/workflows/mirror.yml similarity index 100% rename from aogithub/workflows/mirror.yml rename to .github/workflows/mirror.yml diff --git a/aogithub/workflows/nix-build.yml b/.github/workflows/nix-build.yml similarity index 100% rename from aogithub/workflows/nix-build.yml rename to .github/workflows/nix-build.yml diff --git a/aogithub/workflows/publish-migrations-prod.yml b/.github/workflows/publish-migrations-prod.yml similarity index 100% rename from aogithub/workflows/publish-migrations-prod.yml rename to .github/workflows/publish-migrations-prod.yml diff --git a/aogithub/workflows/publish-migrations-staging.yml b/.github/workflows/publish-migrations-staging.yml similarity index 100% rename from aogithub/workflows/publish-migrations-staging.yml rename to .github/workflows/publish-migrations-staging.yml diff --git a/aogithub/workflows/publish-nix-pgupgrade-bin-flake-version.yml b/.github/workflows/publish-nix-pgupgrade-bin-flake-version.yml similarity index 100% rename from aogithub/workflows/publish-nix-pgupgrade-bin-flake-version.yml rename to .github/workflows/publish-nix-pgupgrade-bin-flake-version.yml diff --git a/aogithub/workflows/publish-nix-pgupgrade-scripts.yml b/.github/workflows/publish-nix-pgupgrade-scripts.yml similarity index 100% rename from aogithub/workflows/publish-nix-pgupgrade-scripts.yml rename to .github/workflows/publish-nix-pgupgrade-scripts.yml diff --git a/aogithub/workflows/qemu-image-build.yml b/.github/workflows/qemu-image-build.yml similarity index 100% rename from aogithub/workflows/qemu-image-build.yml rename to .github/workflows/qemu-image-build.yml diff --git a/aogithub/workflows/test.yml b/.github/workflows/test.yml similarity index 100% rename from aogithub/workflows/test.yml rename to .github/workflows/test.yml diff --git a/aogithub/workflows/testinfra-ami-build.yml b/.github/workflows/testinfra-ami-build.yml similarity index 100% rename from aogithub/workflows/testinfra-ami-build.yml rename to .github/workflows/testinfra-ami-build.yml