diff --git a/.github/workflows/build-cloudberry-rocky8.yml b/.github/workflows/build-cloudberry-rocky8.yml new file mode 100644 index 00000000000..2abf88060e3 --- /dev/null +++ b/.github/workflows/build-cloudberry-rocky8.yml @@ -0,0 +1,1909 @@ +# -------------------------------------------------------------------- +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed +# with this work for additional information regarding copyright +# ownership. The ASF licenses this file to You under the Apache +# License, Version 2.0 (the "License"); you may not use this file +# except in compliance with the License. You may obtain a copy of the +# License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. +# +# -------------------------------------------------------------------- +# GitHub Actions Workflow: Apache Cloudberry Build Pipeline (Rocky 8) +# -------------------------------------------------------------------- +# Description: +# +# This workflow builds, tests, and packages Apache Cloudberry on +# Rocky Linux 8. It ensures artifact integrity, performs installation +# tests, validates key operations, and provides detailed test reports, +# including handling for ignored test cases. +# +# Workflow Overview: +# 1. **Check Skip**: +# - Dynamically determines if the workflow should run based on CI skip flags. +# - Evaluates the following fields for skip flags: +# - **Pull Request Events**: PR title and PR body. +# - **Push Events**: Commit message of the head commit. +# - Supports the following skip patterns (case-insensitive): +# - `[skip ci]` +# - `[ci skip]` +# - `[no ci]` +# - **Example Usage**: +# - Add `[skip ci]` to a commit message, PR title, or body to skip the workflow. +# +# 2. **Build Job**: +# - Configures and builds Apache Cloudberry. +# - Supports debug build configuration via ENABLE_DEBUG flag. +# - Runs unit tests and verifies build artifacts. +# - Creates RPM packages (regular or debug), source tarballs, and logs. +# - **Key Artifacts**: RPM package, source tarball, build logs. +# +# 3. **RPM Install Test Job**: +# - Verifies RPM integrity and installs Cloudberry. +# - Validates successful installation. +# - **Key Artifacts**: Installation logs, verification results. +# +# 4. **Test Job (Matrix)**: +# - Executes a test matrix to validate different scenarios. +# - Creates a demo cluster and runs installcheck tests. +# - Parses and reports test results, including failed and ignored tests. +# - Detects and analyzes any core dumps generated during tests. +# - **Key Features**: +# - Regression diffs are displayed if found, aiding quick debugging. +# - Both failed and ignored test names are logged and reported. +# - Core dumps are analyzed using GDB for stack traces. +# - **Key Artifacts**: Test logs, regression files, test summaries, core analyses. +# +# 5. **Report Job**: +# - Aggregates job results into a final report. +# - Sends failure notifications if any step fails. +# +# Execution Environment: +# - **Runs On**: ubuntu-22.04 with Rocky Linux 8 containers. +# - **Resource Requirements**: +# - Disk: Minimum 20GB free space. +# - Memory: Minimum 8GB RAM. +# - CPU: Recommended 4+ cores. +# +# Triggers: +# - Push to `main` branch. +# - Pull request that modifies this workflow file. +# - Scheduled: Every Monday at 02:00 UTC. +# - Manual workflow dispatch. +# +# Container Images: +# - **Build**: `apache/incubator-cloudberry:cbdb-build-rocky8-latest` +# - **Test**: `apache/incubator-cloudberry:cbdb-test-rocky8-latest` +# +# Artifacts: +# - RPM Package (retention: ${{ env.LOG_RETENTION_DAYS }} days). +# - Source Tarball (retention: ${{ env.LOG_RETENTION_DAYS }} days). +# - Logs and Test Results (retention: ${{ env.LOG_RETENTION_DAYS }} days). +# - Regression Diffs (retention: ${{ env.LOG_RETENTION_DAYS }} days). +# - Core Dump Analyses (retention: ${{ env.LOG_RETENTION_DAYS }} days). +# +# Notes: +# - Supports concurrent job execution. +# - Includes robust skip logic for pull requests and pushes. +# - Handles ignored test cases, ensuring results are comprehensive. +# - Provides detailed logs and error handling for failed and ignored tests. +# - Analyzes core dumps generated during test execution. +# - Supports debug builds with preserved symbols. +# -------------------------------------------------------------------- + +name: Apache Cloudberry Build (Rocky 8) + +on: + push: + branches: [main, REL_2_STABLE] + pull_request: + paths: + - '.github/workflows/build-cloudberry-rocky8.yml' + # We can enable the PR test when needed + # branches: [main, REL_2_STABLE] + # types: [opened, synchronize, reopened, edited] + schedule: + # Run every Monday at 02:00 UTC + - cron: '0 2 * * 1' + workflow_dispatch: + inputs: + test_selection: + description: 'Select tests to run (comma-separated). Examples: ic-good-opt-off,ic-contrib' + required: false + default: 'all' + type: string + reuse_artifacts_from_run_id: + description: 'Reuse build artifacts from a previous run ID (leave empty to build fresh)' + required: false + default: '' + type: string + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: false + +# Note: Step details, logs, and artifacts require users to be logged into GitHub +# even for public repositories. This is a GitHub security feature and cannot +# be overridden by permissions. + +permissions: + # READ permissions allow viewing repository contents + contents: read # Required for checking out code and reading repository files + + # READ permissions for packages (Container registry, etc) + packages: read # Allows reading from GitHub package registry + + # WRITE permissions for actions includes read access to: + # - Workflow runs + # - Artifacts (requires GitHub login) + # - Logs (requires GitHub login) + actions: write + + # READ permissions for checks API: + # - Step details visibility (requires GitHub login) + # - Check run status and details + checks: read + + # READ permissions for pull request metadata: + # - PR status + # - Associated checks + # - Review states + pull-requests: read + +env: + LOG_RETENTION_DAYS: 7 + ENABLE_DEBUG: false + +jobs: + + ## ====================================================================== + ## Job: check-skip + ## ====================================================================== + + check-skip: + runs-on: ubuntu-22.04 + outputs: + should_skip: ${{ steps.skip-check.outputs.should_skip }} + steps: + - id: skip-check + shell: bash + env: + EVENT_NAME: ${{ github.event_name }} + PR_TITLE: ${{ github.event.pull_request.title || '' }} + PR_BODY: ${{ github.event.pull_request.body || '' }} + run: | + # Default to not skipping + echo "should_skip=false" >> "$GITHUB_OUTPUT" + + # Apply skip logic only for pull_request events + if [[ "$EVENT_NAME" == "pull_request" ]]; then + # Combine PR title and body for skip check + MESSAGE="${PR_TITLE}\n${PR_BODY}" + + # Escape special characters using printf %s + ESCAPED_MESSAGE=$(printf "%s" "$MESSAGE") + + echo "Checking PR title and body (escaped): $ESCAPED_MESSAGE" + + # Check for skip patterns + if echo -e "$ESCAPED_MESSAGE" | grep -qEi '\[skip[ -]ci\]|\[ci[ -]skip\]|\[no[ -]ci\]'; then + echo "should_skip=true" >> "$GITHUB_OUTPUT" + fi + else + echo "Skip logic is not applied for $EVENT_NAME events." + fi + + - name: Report Skip Status + if: steps.skip-check.outputs.should_skip == 'true' + run: | + echo "CI Skip flag detected in PR - skipping all checks." + exit 0 + + ## ====================================================================== + ## Job: prepare-test-matrix + ## ====================================================================== + + prepare-test-matrix: + runs-on: ubuntu-22.04 + needs: [check-skip] + if: needs.check-skip.outputs.should_skip != 'true' + outputs: + test-matrix: ${{ steps.set-matrix.outputs.matrix }} + + steps: + - id: set-matrix + run: | + echo "=== Matrix Preparation Diagnostics ===" + echo "Event type: ${{ github.event_name }}" + echo "Test selection input: '${{ github.event.inputs.test_selection }}'" + + # Define defaults + DEFAULT_NUM_PRIMARY_MIRROR_PAIRS=3 + DEFAULT_ENABLE_CGROUPS=false + DEFAULT_ENABLE_CORE_CHECK=true + DEFAULT_PG_SETTINGS_OPTIMIZER="" + + # Define base test configurations + ALL_TESTS='{ + "include": [ + {"test":"ic-good-opt-off", + "make_configs":["src/test/regress:installcheck-good"], + "pg_settings":{"optimizer":"off"} + }, + {"test":"ic-good-opt-on", + "make_configs":["src/test/regress:installcheck-good"], + "pg_settings":{"optimizer":"on"} + }, + {"test":"pax-ic-good-opt-off", + "make_configs":[ + "contrib/pax_storage/:pax-test", + "contrib/pax_storage/:regress_test" + ], + "pg_settings":{ + "optimizer":"off", + "default_table_access_method":"pax" + } + }, + {"test":"pax-ic-good-opt-on", + "make_configs":[ + "contrib/pax_storage/:pax-test", + "contrib/pax_storage/:regress_test" + ], + "pg_settings":{ + "optimizer":"on", + "default_table_access_method":"pax" + } + }, + {"test":"pax-ic-isolation2-opt-off", + "make_configs":["contrib/pax_storage/:isolation2_test"], + "pg_settings":{ + "optimizer":"off", + "default_table_access_method":"pax" + }, + "enable_core_check":false + }, + {"test":"pax-ic-isolation2-opt-on", + "make_configs":["contrib/pax_storage/:isolation2_test"], + "pg_settings":{ + "optimizer":"on", + "default_table_access_method":"pax" + }, + "enable_core_check":false + }, + {"test":"ic-expandshrink", + "make_configs":["src/test/isolation2:installcheck-expandshrink"] + }, + {"test":"ic-singlenode", + "make_configs":["src/test/isolation:installcheck-singlenode", + "src/test/singlenode_regress:installcheck-singlenode", + "src/test/singlenode_isolation2:installcheck-singlenode"], + "num_primary_mirror_pairs":0 + }, + {"test":"ic-resgroup-v2", + "make_configs":["src/test/isolation2:installcheck-resgroup-v2"], + "enable_cgroups":true + }, + {"test":"ic-contrib", + "make_configs":["contrib/auto_explain:installcheck", + "contrib/amcheck:installcheck", + "contrib/citext:installcheck", + "contrib/btree_gin:installcheck", + "contrib/btree_gist:installcheck", + "contrib/dblink:installcheck", + "contrib/dict_int:installcheck", + "contrib/dict_xsyn:installcheck", + "contrib/extprotocol:installcheck", + "contrib/file_fdw:installcheck", + "contrib/formatter_fixedwidth:installcheck", + "contrib/hstore:installcheck", + "contrib/indexscan:installcheck", + "contrib/pg_trgm:installcheck", + "contrib/indexscan:installcheck", + "contrib/pgcrypto:installcheck", + "contrib/pgstattuple:installcheck", + "contrib/tablefunc:installcheck", + "contrib/passwordcheck:installcheck", + "contrib/pg_buffercache:installcheck", + "contrib/sslinfo:installcheck"] + }, + {"test":"ic-gpcontrib", + "make_configs":["gpcontrib/orafce:installcheck", + "gpcontrib/zstd:installcheck", + "gpcontrib/gp_sparse_vector:installcheck", + "gpcontrib/gp_toolkit:installcheck"] + }, + {"test":"ic-fixme", + "make_configs":["src/test/regress:installcheck-fixme"], + "enable_core_check":false + }, + {"test":"ic-isolation2", + "make_configs":["src/test/isolation2:installcheck-isolation2"] + }, + {"test":"ic-isolation2-hot-standby", + "make_configs":["src/test/isolation2:installcheck-hot-standby"] + }, + {"test":"ic-isolation2-crash", + "make_configs":["src/test/isolation2:installcheck-isolation2-crash"], + "enable_core_check":false + }, + {"test":"ic-parallel-retrieve-cursor", + "make_configs":["src/test/isolation2:installcheck-parallel-retrieve-cursor"] + }, + {"test":"ic-cbdb-parallel", + "make_configs":["src/test/regress:installcheck-cbdb-parallel"] + } + ] + }' + + # Function to apply defaults + apply_defaults() { + echo "$1" | jq --arg npm "$DEFAULT_NUM_PRIMARY_MIRROR_PAIRS" \ + --argjson ec "$DEFAULT_ENABLE_CGROUPS" \ + --argjson ecc "$DEFAULT_ENABLE_CORE_CHECK" \ + --arg opt "$DEFAULT_PG_SETTINGS_OPTIMIZER" \ + 'def get_defaults: + { + num_primary_mirror_pairs: ($npm|tonumber), + enable_cgroups: $ec, + enable_core_check: $ecc, + pg_settings: { + optimizer: $opt + } + }; + get_defaults * .' + } + + # Extract all valid test names from ALL_TESTS + VALID_TESTS=$(echo "$ALL_TESTS" | jq -r '.include[].test') + + # Parse input test selection + IFS=',' read -ra SELECTED_TESTS <<< "${{ github.event.inputs.test_selection }}" + + # Default to all tests if selection is empty or 'all' + if [[ "${SELECTED_TESTS[*]}" == "all" || -z "${SELECTED_TESTS[*]}" ]]; then + mapfile -t SELECTED_TESTS <<< "$VALID_TESTS" + fi + + # Validate and filter selected tests + INVALID_TESTS=() + FILTERED_TESTS=() + for TEST in "${SELECTED_TESTS[@]}"; do + TEST=$(echo "$TEST" | tr -d '[:space:]') # Trim whitespace + if echo "$VALID_TESTS" | grep -qw "$TEST"; then + FILTERED_TESTS+=("$TEST") + else + INVALID_TESTS+=("$TEST") + fi + done + + # Handle invalid tests + if [[ ${#INVALID_TESTS[@]} -gt 0 ]]; then + echo "::error::Invalid test(s) selected: ${INVALID_TESTS[*]}" + echo "Valid tests are: $(echo "$VALID_TESTS" | tr '\n' ', ')" + exit 1 + fi + + # Build result JSON with defaults applied + RESULT='{"include":[' + FIRST=true + for TEST in "${FILTERED_TESTS[@]}"; do + CONFIG=$(jq -c --arg test "$TEST" '.include[] | select(.test == $test)' <<< "$ALL_TESTS") + FILTERED_WITH_DEFAULTS=$(apply_defaults "$CONFIG") + if [[ "$FIRST" == true ]]; then + FIRST=false + else + RESULT="${RESULT}," + fi + RESULT="${RESULT}${FILTERED_WITH_DEFAULTS}" + done + RESULT="${RESULT}]}" + + # Output the matrix for GitHub Actions + echo "Final matrix configuration:" + echo "$RESULT" | jq . + + # Fix: Use block redirection + { + echo "matrix<> "$GITHUB_OUTPUT" + + echo "=== Matrix Preparation Complete ===" + + ## ====================================================================== + ## Job: build + ## ====================================================================== + + build: + name: Build Apache Cloudberry RPM (Rocky 8) + env: + JOB_TYPE: build + needs: [check-skip] + runs-on: ubuntu-22.04 + timeout-minutes: 120 + if: github.event.inputs.reuse_artifacts_from_run_id == '' + outputs: + build_timestamp: ${{ steps.set_timestamp.outputs.timestamp }} + + container: + image: apache/incubator-cloudberry:cbdb-build-rocky8-latest + options: >- + --user root + -h cdw + -v /usr/share:/host_usr_share + -v /usr/local:/host_usr_local + -v /opt:/host_opt + + steps: + - name: Free Disk Space + if: needs.check-skip.outputs.should_skip != 'true' + run: | + echo "=== Disk space before cleanup ===" + df -h / + + # Remove pre-installed tools from host to free disk space + rm -rf /host_opt/hostedtoolcache || true # GitHub Actions tool cache + rm -rf /host_usr_local/lib/android || true # Android SDK + rm -rf /host_usr_share/dotnet || true # .NET SDK + rm -rf /host_opt/ghc || true # Haskell GHC + rm -rf /host_usr_local/.ghcup || true # Haskell GHCup + rm -rf /host_usr_share/swift || true # Swift + rm -rf /host_usr_local/share/powershell || true # PowerShell + rm -rf /host_usr_local/share/chromium || true # Chromium + rm -rf /host_usr_share/miniconda || true # Miniconda + rm -rf /host_opt/az || true # Azure CLI + rm -rf /host_usr_share/sbt || true # Scala Build Tool + + echo "=== Disk space after cleanup ===" + df -h / + + - name: Skip Check + if: needs.check-skip.outputs.should_skip == 'true' + run: | + echo "Build skipped via CI skip flag" >> "$GITHUB_STEP_SUMMARY" + exit 0 + + - name: Set build timestamp + if: needs.check-skip.outputs.should_skip != 'true' + id: set_timestamp # Add an ID to reference this step + run: | + timestamp=$(date +'%Y%m%d_%H%M%S') + echo "timestamp=$timestamp" | tee -a "$GITHUB_OUTPUT" # Use GITHUB_OUTPUT for job outputs + echo "BUILD_TIMESTAMP=$timestamp" | tee -a "$GITHUB_ENV" # Also set as environment variable + + - name: Checkout Apache Cloudberry + if: needs.check-skip.outputs.should_skip != 'true' + uses: actions/checkout@v4 + with: + fetch-depth: 1 + submodules: true + + - name: Cloudberry Environment Initialization + if: needs.check-skip.outputs.should_skip != 'true' + env: + LOGS_DIR: build-logs + run: | + set -eo pipefail + if ! su - gpadmin -c "/tmp/init_system.sh"; then + echo "::error::Container initialization failed" + exit 1 + fi + + mkdir -p "${LOGS_DIR}/details" + chown -R gpadmin:gpadmin . + chmod -R 755 . + chmod 777 "${LOGS_DIR}" + + df -kh / + rm -rf /__t/* + df -kh / + + df -h | tee -a "${LOGS_DIR}/details/disk-usage.log" + free -h | tee -a "${LOGS_DIR}/details/memory-usage.log" + + { + echo "=== Environment Information ===" + uname -a + df -h + free -h + env + } | tee -a "${LOGS_DIR}/details/environment.log" + + echo "SRC_DIR=${GITHUB_WORKSPACE}" | tee -a "$GITHUB_ENV" + + - name: Generate Build Job Summary Start + if: needs.check-skip.outputs.should_skip != 'true' + run: | + { + echo "# Build Job Summary" + echo "## Environment" + echo "- Start Time: $(date -u +'%Y-%m-%d %H:%M:%S UTC')" + echo "- ENABLE_DEBUG: ${{ env.ENABLE_DEBUG }}" + echo "- OS Version: $(cat /etc/redhat-release)" + echo "- GCC Version: $(gcc --version | head -n1)" + } >> "$GITHUB_STEP_SUMMARY" + + - name: Run Apache Cloudberry configure script + if: needs.check-skip.outputs.should_skip != 'true' + env: + SRC_DIR: ${{ github.workspace }} + run: | + set -eo pipefail + chmod +x "${SRC_DIR}"/devops/build/automation/cloudberry/scripts/configure-cloudberry.sh + if ! time su - gpadmin -c "cd ${SRC_DIR} && SRC_DIR=${SRC_DIR} ENABLE_DEBUG=${{ env.ENABLE_DEBUG }} ${SRC_DIR}/devops/build/automation/cloudberry/scripts/configure-cloudberry.sh"; then + echo "::error::Configure script failed" + exit 1 + fi + + - name: Run Apache Cloudberry build script + if: needs.check-skip.outputs.should_skip != 'true' + env: + SRC_DIR: ${{ github.workspace }} + run: | + set -eo pipefail + + chmod +x "${SRC_DIR}"/devops/build/automation/cloudberry/scripts/build-cloudberry.sh + if ! time su - gpadmin -c "cd ${SRC_DIR} && SRC_DIR=${SRC_DIR} ${SRC_DIR}/devops/build/automation/cloudberry/scripts/build-cloudberry.sh"; then + echo "::error::Build script failed" + exit 1 + fi + + - name: Verify build artifacts + if: needs.check-skip.outputs.should_skip != 'true' + run: | + set -eo pipefail + + echo "Verifying build artifacts..." + { + echo "=== Build Artifacts Verification ===" + echo "Timestamp: $(date -u)" + + if [ ! -d "/usr/local/cloudberry-db" ]; then + echo "::error::Build artifacts directory not found" + exit 1 + fi + + # Verify critical binaries + critical_binaries=( + "/usr/local/cloudberry-db/bin/postgres" + "/usr/local/cloudberry-db/bin/psql" + ) + + echo "Checking critical binaries..." + for binary in "${critical_binaries[@]}"; do + if [ ! -f "$binary" ]; then + echo "::error::Critical binary missing: $binary" + exit 1 + fi + if [ ! -x "$binary" ]; then + echo "::error::Binary not executable: $binary" + exit 1 + fi + echo "Binary verified: $binary" + ls -l "$binary" + done + + # Test binary execution + echo "Testing binary execution..." + if ! /usr/local/cloudberry-db/bin/postgres --version; then + echo "::error::postgres binary verification failed" + exit 1 + fi + if ! /usr/local/cloudberry-db/bin/psql --version; then + echo "::error::psql binary verification failed" + exit 1 + fi + + echo "All build artifacts verified successfully" + } 2>&1 | tee -a build-logs/details/build-verification.log + + - name: Create Source tarball, create RPM and verify artifacts + if: needs.check-skip.outputs.should_skip != 'true' + env: + CBDB_VERSION: 99.0.0 + BUILD_NUMBER: 1 + SRC_DIR: ${{ github.workspace }} + run: | + set -eo pipefail + + { + echo "=== Artifact Creation Log ===" + echo "Timestamp: $(date -u)" + + # Create source tarball + echo "Creating source tarball..." + tar czf "${SRC_DIR}"/../apache-cloudberry-incubating-src.tgz -C "${SRC_DIR}"/.. ./cloudberry + mv "${SRC_DIR}"/../apache-cloudberry-incubating-src.tgz "${SRC_DIR}" + + # Verify tarball contents + echo "Verifying source tarball contents..." + if ! tar tzf "${SRC_DIR}"/apache-cloudberry-incubating-src.tgz > /dev/null; then + echo "::error::Source tarball verification failed" + exit 1 + fi + + # Create RPM + echo "Creating RPM package..." + rpmdev-setuptree + ln -s "${SRC_DIR}"/devops/build/packaging/rpm/apache-cloudberry-db-incubating.spec "${HOME}"/rpmbuild/SPECS/apache-cloudberry-db-incubating.spec + cp "${SRC_DIR}"/LICENSE /usr/local/cloudberry-db + + DEBUG_RPMBUILD_OPT="" + DEBUG_IDENTIFIER="" + if [ "${{ env.ENABLE_DEBUG }}" = "true" ]; then + DEBUG_RPMBUILD_OPT="--with-debug" + DEBUG_IDENTIFIER=".debug" + fi + + "${SRC_DIR}"/devops/build/packaging/rpm/build-rpm.sh --version "${CBDB_VERSION}" --release "${BUILD_NUMBER}" "${DEBUG_RPMBUILD_OPT}" + + # Get OS version and move RPM + os_version=$(grep -oP '(?<=^VERSION_ID=")[0-9]' /etc/os-release) + RPM_FILE="${HOME}"/rpmbuild/RPMS/x86_64/apache-cloudberry-db-incubating-"${CBDB_VERSION}"-"${BUILD_NUMBER}""${DEBUG_IDENTIFIER}".el"${os_version}".x86_64.rpm + cp "${RPM_FILE}" "${SRC_DIR}" + RPM_DEBUG="${HOME}"/rpmbuild/RPMS/x86_64/apache-cloudberry-db-incubating-debuginfo-"${CBDB_VERSION}"-"${BUILD_NUMBER}""${DEBUG_IDENTIFIER}".el"${os_version}".x86_64.rpm + cp "${RPM_DEBUG}" "${SRC_DIR}" + + # Get package information + echo "Package Information:" + rpm -qip "${RPM_FILE}" + + # Verify critical files in RPM + echo "Verifying critical files in RPM..." + for binary in "bin/postgres" "bin/psql"; do + if ! rpm -qlp "${RPM_FILE}" | grep -q "${binary}$"; then + echo "::error::Critical binary '${binary}' not found in RPM" + exit 1 + fi + done + + # Record checksums + echo "Calculating checksums..." + sha256sum "${RPM_FILE}" | tee -a build-logs/details/checksums.log + sha256sum "${SRC_DIR}"/apache-cloudberry-incubating-src.tgz | tee -a build-logs/details/checksums.log + + echo "Artifacts created and verified successfully" + + } 2>&1 | tee -a build-logs/details/artifact-creation.log + + - name: Run Apache Cloudberry unittest script + if: needs.check-skip.outputs.should_skip != 'true' + env: + SRC_DIR: ${{ github.workspace }} + run: | + set -eo pipefail + chmod +x "${SRC_DIR}"/devops/build/automation/cloudberry/scripts/unittest-cloudberry.sh + if ! time su - gpadmin -c "cd ${SRC_DIR} && SRC_DIR=${SRC_DIR} ${SRC_DIR}/devops/build/automation/cloudberry/scripts/unittest-cloudberry.sh"; then + echo "::error::Unittest script failed" + exit 1 + fi + + - name: Generate Build Job Summary End + if: always() + run: | + { + echo "## Build Results" + echo "- End Time: $(date -u +'%Y-%m-%d %H:%M:%S UTC')" + } >> "$GITHUB_STEP_SUMMARY" + + - name: Upload build logs + if: needs.check-skip.outputs.should_skip != 'true' + uses: actions/upload-artifact@v4 + with: + name: build-logs-rocky8-${{ env.BUILD_TIMESTAMP }} + path: | + build-logs/ + retention-days: ${{ env.LOG_RETENTION_DAYS }} + + - name: Upload Cloudberry RPM build artifacts + if: needs.check-skip.outputs.should_skip != 'true' + uses: actions/upload-artifact@v4 + with: + name: apache-cloudberry-db-incubating-rpm-build-artifacts-rocky8 + retention-days: ${{ env.LOG_RETENTION_DAYS }} + if-no-files-found: error + path: | + *.rpm + + - name: Upload Cloudberry source build artifacts + if: needs.check-skip.outputs.should_skip != 'true' + uses: actions/upload-artifact@v4 + with: + name: apache-cloudberry-db-incubating-source-build-artifacts-rocky8 + retention-days: ${{ env.LOG_RETENTION_DAYS }} + if-no-files-found: error + path: | + apache-cloudberry-incubating-src.tgz + + ## ====================================================================== + ## Job: rpm-install-test + ## ====================================================================== + + rpm-install-test: + name: RPM Install Test Apache Cloudberry (Rocky 8) + needs: [check-skip, build] + if: | + !cancelled() && + (needs.build.result == 'success' || needs.build.result == 'skipped') && + github.event.inputs.reuse_artifacts_from_run_id == '' + runs-on: ubuntu-22.04 + timeout-minutes: 120 + + container: + image: apache/incubator-cloudberry:cbdb-test-rocky8-latest + options: >- + --user root + -h cdw + -v /usr/share:/host_usr_share + -v /usr/local:/host_usr_local + -v /opt:/host_opt + + steps: + - name: Free Disk Space + if: needs.check-skip.outputs.should_skip != 'true' + run: | + echo "=== Disk space before cleanup ===" + df -h / + + # Remove pre-installed tools from host to free disk space + rm -rf /host_opt/hostedtoolcache || true # GitHub Actions tool cache + rm -rf /host_usr_local/lib/android || true # Android SDK + rm -rf /host_usr_share/dotnet || true # .NET SDK + rm -rf /host_opt/ghc || true # Haskell GHC + rm -rf /host_usr_local/.ghcup || true # Haskell GHCup + rm -rf /host_usr_share/swift || true # Swift + rm -rf /host_usr_local/share/powershell || true # PowerShell + rm -rf /host_usr_local/share/chromium || true # Chromium + rm -rf /host_usr_share/miniconda || true # Miniconda + rm -rf /host_opt/az || true # Azure CLI + rm -rf /host_usr_share/sbt || true # Scala Build Tool + + echo "=== Disk space after cleanup ===" + df -h / + + - name: Skip Check + if: needs.check-skip.outputs.should_skip == 'true' + run: | + echo "RPM install test skipped via CI skip flag" >> "$GITHUB_STEP_SUMMARY" + exit 0 + + - name: Download Cloudberry RPM build artifacts + if: needs.check-skip.outputs.should_skip != 'true' + uses: actions/download-artifact@v4 + with: + name: apache-cloudberry-db-incubating-rpm-build-artifacts-rocky8 + path: ${{ github.workspace }}/rpm_build_artifacts + merge-multiple: false + run-id: ${{ github.event.inputs.reuse_artifacts_from_run_id || github.run_id }} + github-token: ${{ secrets.GITHUB_TOKEN }} + + - name: Cloudberry Environment Initialization + if: needs.check-skip.outputs.should_skip != 'true' + env: + LOGS_DIR: install-logs + run: | + set -eo pipefail + if ! su - gpadmin -c "/tmp/init_system.sh"; then + echo "::error::Container initialization failed" + exit 1 + fi + + mkdir -p "${LOGS_DIR}/details" + chown -R gpadmin:gpadmin . + chmod -R 755 . + chmod 777 "${LOGS_DIR}" + + df -kh / + rm -rf /__t/* + df -kh / + + df -h | tee -a "${LOGS_DIR}/details/disk-usage.log" + free -h | tee -a "${LOGS_DIR}/details/memory-usage.log" + + { + echo "=== Environment Information ===" + uname -a + df -h + free -h + env + } | tee -a "${LOGS_DIR}/details/environment.log" + + echo "SRC_DIR=${GITHUB_WORKSPACE}" | tee -a "$GITHUB_ENV" + + - name: Verify RPM artifacts + if: needs.check-skip.outputs.should_skip != 'true' + id: verify-artifacts + run: | + set -eo pipefail + + RPM_FILE=$(ls "${GITHUB_WORKSPACE}"/rpm_build_artifacts/apache-cloudberry-db-incubating-[0-9]*.rpm | grep -v "debuginfo") + if [ ! -f "${RPM_FILE}" ]; then + echo "::error::RPM file not found" + exit 1 + fi + + echo "rpm_file=${RPM_FILE}" >> "$GITHUB_OUTPUT" + + echo "Verifying RPM artifacts..." + { + echo "=== RPM Verification Summary ===" + echo "Timestamp: $(date -u)" + echo "RPM File: ${RPM_FILE}" + + # Get RPM metadata and verify contents + echo "Package Information:" + rpm -qip "${RPM_FILE}" + + # Get key RPM attributes for verification + RPM_VERSION=$(rpm -qp --queryformat "%{VERSION}" "${RPM_FILE}") + RPM_RELEASE=$(rpm -qp --queryformat "%{RELEASE}" "${RPM_FILE}") + echo "version=${RPM_VERSION}" >> "$GITHUB_OUTPUT" + echo "release=${RPM_RELEASE}" >> "$GITHUB_OUTPUT" + + # Verify expected binaries are in the RPM + echo "Verifying critical files in RPM..." + for binary in "bin/postgres" "bin/psql"; do + if ! rpm -qlp "${RPM_FILE}" | grep -q "${binary}$"; then + echo "::error::Critical binary '${binary}' not found in RPM" + exit 1 + fi + done + + echo "RPM Details:" + echo "- Version: ${RPM_VERSION}" + echo "- Release: ${RPM_RELEASE}" + + # Calculate and store checksum + echo "Checksum:" + sha256sum "${RPM_FILE}" + + } 2>&1 | tee -a install-logs/details/rpm-verification.log + + - name: Install Cloudberry RPM + if: success() && needs.check-skip.outputs.should_skip != 'true' + env: + RPM_FILE: ${{ steps.verify-artifacts.outputs.rpm_file }} + RPM_VERSION: ${{ steps.verify-artifacts.outputs.version }} + RPM_RELEASE: ${{ steps.verify-artifacts.outputs.release }} + run: | + set -eo pipefail + + if [ -z "${RPM_FILE}" ]; then + echo "::error::RPM_FILE environment variable is not set" + exit 1 + fi + + { + echo "=== RPM Installation Log ===" + echo "Timestamp: $(date -u)" + echo "RPM File: ${RPM_FILE}" + echo "Version: ${RPM_VERSION}" + echo "Release: ${RPM_RELEASE}" + + # Refresh repository metadata to avoid mirror issues + echo "Refreshing repository metadata..." + dnf clean all + dnf makecache --refresh || dnf makecache + + # Clean install location + rm -rf /usr/local/cloudberry-db + + # Install RPM with retry logic for mirror issues + # Use --releasever=8 to pin to stable Rocky Linux 8 repos (not bleeding-edge 8.10) + echo "Starting installation..." + if ! time dnf install -y --setopt=retries=10 --releasever=8 "${RPM_FILE}"; then + echo "::error::RPM installation failed" + exit 1 + fi + + echo "Installation completed successfully" + rpm -qi apache-cloudberry-db-incubating + echo "Installed files:" + rpm -ql apache-cloudberry-db-incubating + } 2>&1 | tee -a install-logs/details/rpm-installation.log + + - name: Upload install logs + if: needs.check-skip.outputs.should_skip != 'true' + uses: actions/upload-artifact@v4 + with: + name: install-logs-rocky8-${{ needs.build.outputs.build_timestamp }} + path: | + install-logs/ + retention-days: ${{ env.LOG_RETENTION_DAYS }} + + - name: Generate Install Test Job Summary End + if: always() + shell: bash {0} + run: | + { + echo "# Installed Package Summary" + echo "\`\`\`" + + rpm -qi apache-cloudberry-db-incubating + echo "\`\`\`" + } >> "$GITHUB_STEP_SUMMARY" || true + + ## ====================================================================== + ## Job: test + ## ====================================================================== + + test: + name: ${{ matrix.test }} (Rocky 8) + needs: [check-skip, build, prepare-test-matrix] + if: | + !cancelled() && + (needs.build.result == 'success' || needs.build.result == 'skipped') + runs-on: ubuntu-22.04 + timeout-minutes: 120 + # actionlint-allow matrix[*].pg_settings + strategy: + fail-fast: false # Continue with other tests if one fails + matrix: ${{ fromJson(needs.prepare-test-matrix.outputs.test-matrix) }} + + container: + image: apache/incubator-cloudberry:cbdb-build-rocky8-latest + options: >- + --privileged + --user root + --hostname cdw + --shm-size=2gb + --ulimit core=-1 + --cgroupns=host + -v /sys/fs/cgroup:/sys/fs/cgroup:rw + -v /usr/share:/host_usr_share + -v /usr/local:/host_usr_local + -v /opt:/host_opt + + steps: + - name: Free Disk Space + if: needs.check-skip.outputs.should_skip != 'true' + run: | + echo "=== Disk space before cleanup ===" + df -h / + + # Remove pre-installed tools from host to free disk space + rm -rf /host_opt/hostedtoolcache || true # GitHub Actions tool cache + rm -rf /host_usr_local/lib/android || true # Android SDK + rm -rf /host_usr_share/dotnet || true # .NET SDK + rm -rf /host_opt/ghc || true # Haskell GHC + rm -rf /host_usr_local/.ghcup || true # Haskell GHCup + rm -rf /host_usr_share/swift || true # Swift + rm -rf /host_usr_local/share/powershell || true # PowerShell + rm -rf /host_usr_local/share/chromium || true # Chromium + rm -rf /host_usr_share/miniconda || true # Miniconda + rm -rf /host_opt/az || true # Azure CLI + rm -rf /host_usr_share/sbt || true # Scala Build Tool + + echo "=== Disk space after cleanup ===" + df -h / + + - name: Skip Check + if: needs.check-skip.outputs.should_skip == 'true' + run: | + echo "Test ${{ matrix.test }} skipped via CI skip flag" >> "$GITHUB_STEP_SUMMARY" + exit 0 + + - name: Use timestamp from previous job + if: needs.check-skip.outputs.should_skip != 'true' + run: | + echo "Timestamp from output: ${{ needs.build.outputs.build_timestamp }}" + + - name: Cloudberry Environment Initialization + env: + LOGS_DIR: build-logs + run: | + set -eo pipefail + if ! su - gpadmin -c "/tmp/init_system.sh"; then + echo "::error::Container initialization failed" + exit 1 + fi + + mkdir -p "${LOGS_DIR}/details" + chown -R gpadmin:gpadmin . + chmod -R 755 . + chmod 777 "${LOGS_DIR}" + + df -kh / + rm -rf /__t/* + df -kh / + + df -h | tee -a "${LOGS_DIR}/details/disk-usage.log" + free -h | tee -a "${LOGS_DIR}/details/memory-usage.log" + + { + echo "=== Environment Information ===" + uname -a + df -h + free -h + env + } | tee -a "${LOGS_DIR}/details/environment.log" + + echo "SRC_DIR=${GITHUB_WORKSPACE}" | tee -a "$GITHUB_ENV" + + - name: Setup cgroups + if: needs.check-skip.outputs.should_skip != 'true' + shell: bash + run: | + set -uxo pipefail + + if [ "${{ matrix.enable_cgroups }}" = "true" ]; then + + echo "Current mounts:" + mount | grep cgroup + + CGROUP_BASEDIR=/sys/fs/cgroup + + # 1. Basic setup with permissions + sudo chmod -R 777 ${CGROUP_BASEDIR}/ + sudo mkdir -p ${CGROUP_BASEDIR}/gpdb + sudo chmod -R 777 ${CGROUP_BASEDIR}/gpdb + sudo chown -R gpadmin:gpadmin ${CGROUP_BASEDIR}/gpdb + + # 2. Enable controllers + sudo bash -c "echo '+cpu +cpuset +memory +io' > ${CGROUP_BASEDIR}/cgroup.subtree_control" || true + sudo bash -c "echo '+cpu +cpuset +memory +io' > ${CGROUP_BASEDIR}/gpdb/cgroup.subtree_control" || true + + # 3. CPU settings + sudo bash -c "echo 'max 100000' > ${CGROUP_BASEDIR}/gpdb/cpu.max" || true + sudo bash -c "echo '100' > ${CGROUP_BASEDIR}/gpdb/cpu.weight" || true + sudo bash -c "echo '0' > ${CGROUP_BASEDIR}/gpdb/cpu.weight.nice" || true + sudo bash -c "echo 0-$(( $(nproc) - 1 )) > ${CGROUP_BASEDIR}/gpdb/cpuset.cpus" || true + sudo bash -c "echo '0' > ${CGROUP_BASEDIR}/gpdb/cpuset.mems" || true + + # 4. Memory settings + sudo bash -c "echo 'max' > ${CGROUP_BASEDIR}/gpdb/memory.max" || true + sudo bash -c "echo '0' > ${CGROUP_BASEDIR}/gpdb/memory.min" || true + sudo bash -c "echo 'max' > ${CGROUP_BASEDIR}/gpdb/memory.high" || true + + # 5. IO settings + echo "Available block devices:" + lsblk + + sudo bash -c " + if [ -f \${CGROUP_BASEDIR}/gpdb/io.stat ]; then + echo 'Detected IO devices:' + cat \${CGROUP_BASEDIR}/gpdb/io.stat + fi + echo '' > \${CGROUP_BASEDIR}/gpdb/io.max || true + " + + # 6. Fix permissions again after all writes + sudo chmod -R 777 ${CGROUP_BASEDIR}/gpdb + sudo chown -R gpadmin:gpadmin ${CGROUP_BASEDIR}/gpdb + + # 7. Check required files + echo "Checking required files:" + required_files=( + "cgroup.procs" + "cpu.max" + "cpu.pressure" + "cpu.weight" + "cpu.weight.nice" + "cpu.stat" + "cpuset.cpus" + "cpuset.mems" + "cpuset.cpus.effective" + "cpuset.mems.effective" + "memory.current" + "io.max" + ) + + for file in "${required_files[@]}"; do + if [ -f "${CGROUP_BASEDIR}/gpdb/$file" ]; then + echo "✓ $file exists" + ls -l "${CGROUP_BASEDIR}/gpdb/$file" + else + echo "✗ $file missing" + fi + done + + # 8. Test subdirectory creation + echo "Testing subdirectory creation..." + sudo -u gpadmin bash -c " + TEST_DIR=\${CGROUP_BASEDIR}/gpdb/test6448 + if mkdir -p \$TEST_DIR; then + echo 'Created test directory' + sudo chmod -R 777 \$TEST_DIR + if echo \$\$ > \$TEST_DIR/cgroup.procs; then + echo 'Successfully wrote to cgroup.procs' + cat \$TEST_DIR/cgroup.procs + # Move processes back to parent before cleanup + echo \$\$ > \${CGROUP_BASEDIR}/gpdb/cgroup.procs + else + echo 'Failed to write to cgroup.procs' + ls -la \$TEST_DIR/cgroup.procs + fi + ls -la \$TEST_DIR/ + rmdir \$TEST_DIR || { + echo 'Moving all processes to parent before cleanup' + cat \$TEST_DIR/cgroup.procs | while read pid; do + echo \$pid > \${CGROUP_BASEDIR}/gpdb/cgroup.procs 2>/dev/null || true + done + rmdir \$TEST_DIR + } + else + echo 'Failed to create test directory' + fi + " + + # 9. Verify setup as gpadmin user + echo "Testing cgroup access as gpadmin..." + sudo -u gpadmin bash -c " + echo 'Checking mounts...' + mount | grep cgroup + + echo 'Checking /proc/self/mounts...' + cat /proc/self/mounts | grep cgroup + + if ! grep -q cgroup2 /proc/self/mounts; then + echo 'ERROR: cgroup2 mount NOT visible to gpadmin' + exit 1 + fi + echo 'SUCCESS: cgroup2 mount visible to gpadmin' + + if ! [ -w ${CGROUP_BASEDIR}/gpdb ]; then + echo 'ERROR: gpadmin cannot write to gpdb cgroup' + exit 1 + fi + echo 'SUCCESS: gpadmin can write to gpdb cgroup' + + echo 'Verifying key files content:' + echo 'cpu.max:' + cat ${CGROUP_BASEDIR}/gpdb/cpu.max || echo 'Failed to read cpu.max' + echo 'cpuset.cpus:' + cat ${CGROUP_BASEDIR}/gpdb/cpuset.cpus || echo 'Failed to read cpuset.cpus' + echo 'cgroup.subtree_control:' + cat ${CGROUP_BASEDIR}/gpdb/cgroup.subtree_control || echo 'Failed to read cgroup.subtree_control' + " + + # 10. Show final state + echo "Final cgroup state:" + ls -la ${CGROUP_BASEDIR}/gpdb/ + echo "Cgroup setup completed successfully" + else + echo "Cgroup setup skipped" + fi + + - name: "Generate Test Job Summary Start: ${{ matrix.test }}" + if: always() + run: | + { + echo "# Test Job Summary: ${{ matrix.test }} (Rocky 8)" + echo "## Environment" + echo "- Start Time: $(date -u +'%Y-%m-%d %H:%M:%S UTC')" + + if [[ "${{ needs.check-skip.outputs.should_skip }}" == "true" ]]; then + echo "## Skip Status" + echo "✓ Test execution skipped via CI skip flag" + else + echo "- OS Version: $(cat /etc/redhat-release)" + fi + } >> "$GITHUB_STEP_SUMMARY" + + - name: Download Cloudberry RPM build artifacts + if: needs.check-skip.outputs.should_skip != 'true' + uses: actions/download-artifact@v4 + with: + name: apache-cloudberry-db-incubating-rpm-build-artifacts-rocky8 + path: ${{ github.workspace }}/rpm_build_artifacts + merge-multiple: false + run-id: ${{ github.event.inputs.reuse_artifacts_from_run_id || github.run_id }} + github-token: ${{ secrets.GITHUB_TOKEN }} + + - name: Download Cloudberry Source build artifacts + if: needs.check-skip.outputs.should_skip != 'true' + uses: actions/download-artifact@v4 + with: + name: apache-cloudberry-db-incubating-source-build-artifacts-rocky8 + path: ${{ github.workspace }}/source_build_artifacts + merge-multiple: false + run-id: ${{ github.event.inputs.reuse_artifacts_from_run_id || github.run_id }} + github-token: ${{ secrets.GITHUB_TOKEN }} + + - name: Verify downloaded artifacts + if: needs.check-skip.outputs.should_skip != 'true' + id: verify-artifacts + run: | + set -eo pipefail + + SRC_TARBALL_FILE=$(ls "${GITHUB_WORKSPACE}"/source_build_artifacts/apache-cloudberry-incubating-src.tgz) + if [ ! -f "${SRC_TARBALL_FILE}" ]; then + echo "::error::SRC TARBALL file not found" + exit 1 + fi + + echo "src_tarball_file=${SRC_TARBALL_FILE}" >> "$GITHUB_OUTPUT" + + echo "Verifying SRC TARBALL artifacts..." + { + echo "=== SRC TARBALL Verification Summary ===" + echo "Timestamp: $(date -u)" + echo "SRC TARBALL File: ${SRC_TARBALL_FILE}" + + # Calculate and store checksum + echo "Checksum:" + sha256sum "${SRC_TARBALL_FILE}" + + } 2>&1 | tee -a build-logs/details/src-tarball-verification.log + + RPM_FILE=$(ls "${GITHUB_WORKSPACE}"/rpm_build_artifacts/apache-cloudberry-db-incubating-[0-9]*.rpm | grep -v "debuginfo") + if [ ! -f "${RPM_FILE}" ]; then + echo "::error::RPM file not found" + exit 1 + fi + + echo "rpm_file=${RPM_FILE}" >> "$GITHUB_OUTPUT" + + echo "Verifying RPM artifacts..." + { + echo "=== RPM Verification Summary ===" + echo "Timestamp: $(date -u)" + echo "RPM File: ${RPM_FILE}" + + # Get RPM metadata and verify contents + echo "Package Information:" + rpm -qip "${RPM_FILE}" + + # Get key RPM attributes for verification + RPM_VERSION=$(rpm -qp --queryformat "%{VERSION}" "${RPM_FILE}") + RPM_RELEASE=$(rpm -qp --queryformat "%{RELEASE}" "${RPM_FILE}") + echo "version=${RPM_VERSION}" >> "$GITHUB_OUTPUT" + echo "release=${RPM_RELEASE}" >> "$GITHUB_OUTPUT" + + # Verify expected binaries are in the RPM + echo "Verifying critical files in RPM..." + for binary in "bin/postgres" "bin/psql"; do + if ! rpm -qlp "${RPM_FILE}" | grep -q "${binary}$"; then + echo "::error::Critical binary '${binary}' not found in RPM" + exit 1 + fi + done + + echo "RPM Details:" + echo "- Version: ${RPM_VERSION}" + echo "- Release: ${RPM_RELEASE}" + + # Calculate and store checksum + echo "Checksum:" + sha256sum "${RPM_FILE}" + + } 2>&1 | tee -a build-logs/details/rpm-verification.log + + - name: Install Cloudberry RPM + if: success() && needs.check-skip.outputs.should_skip != 'true' + env: + RPM_FILE: ${{ steps.verify-artifacts.outputs.rpm_file }} + RPM_VERSION: ${{ steps.verify-artifacts.outputs.version }} + RPM_RELEASE: ${{ steps.verify-artifacts.outputs.release }} + run: | + set -eo pipefail + + if [ -z "${RPM_FILE}" ]; then + echo "::error::RPM_FILE environment variable is not set" + exit 1 + fi + + { + echo "=== RPM Installation Log ===" + echo "Timestamp: $(date -u)" + echo "RPM File: ${RPM_FILE}" + echo "Version: ${RPM_VERSION}" + echo "Release: ${RPM_RELEASE}" + + # Refresh repository metadata to avoid mirror issues + echo "Refreshing repository metadata..." + dnf clean all + dnf makecache --refresh || dnf makecache + + # Clean install location + rm -rf /usr/local/cloudberry-db + + # Install RPM with retry logic for mirror issues + # Use --releasever=8 to pin to stable Rocky Linux 8 repos (not bleeding-edge 8.10) + echo "Starting installation..." + if ! time dnf install -y --setopt=retries=10 --releasever=8 "${RPM_FILE}"; then + echo "::error::RPM installation failed" + exit 1 + fi + + echo "Installation completed successfully" + rpm -qi apache-cloudberry-db-incubating + } 2>&1 | tee -a build-logs/details/rpm-installation.log + + # Clean up downloaded RPM artifacts to free disk space + echo "=== Disk space before RPM cleanup ===" + echo "Human readable:" + df -kh / + echo "Exact KB:" + df -k / + echo "RPM artifacts size:" + du -sh "${GITHUB_WORKSPACE}"/rpm_build_artifacts || true + echo "Cleaning up RPM artifacts to free disk space..." + rm -rf "${GITHUB_WORKSPACE}"/rpm_build_artifacts + echo "=== Disk space after RPM cleanup ===" + echo "Human readable:" + df -kh / + echo "Exact KB:" + df -k / + + - name: Extract source tarball + if: success() && needs.check-skip.outputs.should_skip != 'true' + env: + SRC_TARBALL_FILE: ${{ steps.verify-artifacts.outputs.src_tarball_file }} + SRC_DIR: ${{ github.workspace }} + run: | + set -eo pipefail + + { + echo "=== Source Extraction Log ===" + echo "Timestamp: $(date -u)" + + echo "Starting extraction..." + if ! time tar zxf "${SRC_TARBALL_FILE}" -C "${SRC_DIR}"/.. ; then + echo "::error::Source extraction failed" + exit 1 + fi + + echo "Extraction completed successfully" + echo "Extracted contents:" + ls -la "${SRC_DIR}/../cloudberry" + echo "Directory size:" + du -sh "${SRC_DIR}/../cloudberry" + } 2>&1 | tee -a build-logs/details/source-extraction.log + + # Clean up source tarball to free disk space + echo "=== Disk space before source tarball cleanup ===" + echo "Human readable:" + df -kh / + echo "Exact KB:" + df -k / + echo "Source tarball artifacts size:" + du -sh "${GITHUB_WORKSPACE}"/source_build_artifacts || true + echo "Cleaning up source tarball to free disk space..." + rm -rf "${GITHUB_WORKSPACE}"/source_build_artifacts + echo "=== Disk space after source tarball cleanup ===" + echo "Human readable:" + df -kh / + echo "Exact KB:" + df -k / + + - name: Create Apache Cloudberry demo cluster + if: success() && needs.check-skip.outputs.should_skip != 'true' + env: + SRC_DIR: ${{ github.workspace }} + run: | + set -eo pipefail + + { + chmod +x "${SRC_DIR}"/devops/build/automation/cloudberry/scripts/create-cloudberry-demo-cluster.sh + if ! time su - gpadmin -c "cd ${SRC_DIR} && NUM_PRIMARY_MIRROR_PAIRS='${{ matrix.num_primary_mirror_pairs }}' SRC_DIR=${SRC_DIR} ${SRC_DIR}/devops/build/automation/cloudberry/scripts/create-cloudberry-demo-cluster.sh"; then + echo "::error::Demo cluster creation failed" + exit 1 + fi + + } 2>&1 | tee -a build-logs/details/create-cloudberry-demo-cluster.log + + - name: "Run Tests: ${{ matrix.test }}" + if: success() && needs.check-skip.outputs.should_skip != 'true' + env: + SRC_DIR: ${{ github.workspace }} + shell: bash {0} + run: | + set -o pipefail + + # Initialize test status + overall_status=0 + + # Create logs directory structure + mkdir -p build-logs/details + + # Core file config + mkdir -p "/tmp/cloudberry-cores" + chmod 1777 "/tmp/cloudberry-cores" + sysctl -w kernel.core_pattern="/tmp/cloudberry-cores/core-%e-%s-%u-%g-%p-%t" + sysctl kernel.core_pattern + su - gpadmin -c "ulimit -c" + + # WARNING: PostgreSQL Settings + # When adding new pg_settings key/value pairs: + # 1. Add a new check below for the setting + # 2. Follow the same pattern as optimizer + # 3. Update matrix entries to include the new setting + + # Set PostgreSQL options if defined + PG_OPTS="" + if [[ "${{ matrix.pg_settings.optimizer != '' }}" == "true" ]]; then + PG_OPTS="$PG_OPTS -c optimizer=${{ matrix.pg_settings.optimizer }}" + fi + + if [[ "${{ matrix.pg_settings.default_table_access_method != '' }}" == "true" ]]; then + PG_OPTS="$PG_OPTS -c default_table_access_method=${{ matrix.pg_settings.default_table_access_method }}" + fi + + # Read configs into array + IFS=' ' read -r -a configs <<< "${{ join(matrix.make_configs, ' ') }}" + + echo "=== Starting test execution for ${{ matrix.test }} ===" + echo "Number of configurations to execute: ${#configs[@]}" + echo "" + + # Execute each config separately + for ((i=0; i<${#configs[@]}; i++)); do + config="${configs[$i]}" + IFS=':' read -r dir target <<< "$config" + + echo "=== Executing configuration $((i+1))/${#configs[@]} ===" + echo "Make command: make -C $dir $target" + echo "Environment:" + echo "- PGOPTIONS: ${PG_OPTS}" + + # Create unique log file for this configuration + config_log="build-logs/details/make-${{ matrix.test }}-config$i.log" + + # Clean up any existing core files + echo "Cleaning up existing core files..." + rm -f /tmp/cloudberry-cores/core-* + + # Execute test script with proper environment setup + if ! time su - gpadmin -c "cd ${SRC_DIR} && \ + MAKE_NAME='${{ matrix.test }}-config$i' \ + MAKE_TARGET='$target' \ + MAKE_DIRECTORY='-C $dir' \ + PGOPTIONS='${PG_OPTS}' \ + SRC_DIR='${SRC_DIR}' \ + ${SRC_DIR}/devops/build/automation/cloudberry/scripts/test-cloudberry.sh" \ + 2>&1 | tee "$config_log"; then + echo "::warning::Test execution failed for configuration $((i+1)): make -C $dir $target" + overall_status=1 + fi + + # Check for results directory + results_dir="${dir}/results" + + if [[ -d "$results_dir" ]]; then + echo "-----------------------------------------" | tee -a build-logs/details/make-${{ matrix.test }}-config$i-results.log + echo "Found results directory: $results_dir" | tee -a build-logs/details/make-${{ matrix.test }}-config$i-results.log + echo "Contents of results directory:" | tee -a build-logs/details/make-${{ matrix.test }}-config$i-results.log + + find "$results_dir" -type f -ls >> "$log_file" 2>&1 | tee -a build-logs/details/make-${{ matrix.test }}-config$i-results.log + echo "-----------------------------------------" | tee -a build-logs/details/make-${{ matrix.test }}-config$i-results.log + else + echo "-----------------------------------------" + echo "Results directory $results_dir does not exit" + echo "-----------------------------------------" + fi + + # Analyze any core files generated by this test configuration + echo "Analyzing core files for configuration ${{ matrix.test }}-config$i..." + test_id="${{ matrix.test }}-config$i" + + # List the cores directory + echo "-----------------------------------------" + echo "Cores directory: /tmp/cloudberry-cores" + echo "Contents of cores directory:" + ls -Rl "/tmp/cloudberry-cores" + echo "-----------------------------------------" + + "${SRC_DIR}"/devops/build/automation/cloudberry/scripts/analyze_core_dumps.sh "$test_id" + core_analysis_rc=$? + case "$core_analysis_rc" in + 0) echo "No core dumps found for this configuration" ;; + 1) echo "Core dumps were found and analyzed successfully" ;; + 2) echo "::warning::Issues encountered during core dump analysis" ;; + *) echo "::error::Unexpected return code from core dump analysis: $core_analysis_rc" ;; + esac + + echo "Log file: $config_log" + echo "=== End configuration $((i+1)) execution ===" + echo "" + done + + echo "=== Test execution completed ===" + echo "Log files:" + ls -l build-logs/details/ + + # Store number of configurations for parsing step + echo "NUM_CONFIGS=${#configs[@]}" >> "$GITHUB_ENV" + + # Report overall status + if [ $overall_status -eq 0 ]; then + echo "All test executions completed successfully" + else + echo "::warning::Some test executions failed, check individual logs for details" + fi + + exit $overall_status + + - name: "Parse Test Results: ${{ matrix.test }}" + id: test-results + if: always() && needs.check-skip.outputs.should_skip != 'true' + env: + SRC_DIR: ${{ github.workspace }} + shell: bash {0} + run: | + set -o pipefail + + overall_status=0 + + # Get configs array to create context for results + IFS=' ' read -r -a configs <<< "${{ join(matrix.make_configs, ' ') }}" + + echo "=== Starting results parsing for ${{ matrix.test }} ===" + echo "Number of configurations to parse: ${#configs[@]}" + echo "" + + # Parse each configuration's results independently + for ((i=0; i "test_results.$i.txt" + overall_status=1 + continue + fi + + # Parse this configuration's results + + MAKE_NAME="${{ matrix.test }}-config$i" \ + "${SRC_DIR}"/devops/build/automation/cloudberry/scripts/parse-test-results.sh "$config_log" + status_code=$? + + { + echo "SUITE_NAME=${{ matrix.test }}" + echo "DIR=${dir}" + echo "TARGET=${target}" + } >> test_results.txt + + # Process return code + case $status_code in + 0) # All tests passed + echo "All tests passed successfully" + if [ -f test_results.txt ]; then + (echo "MAKE_COMMAND=\"make -C $dir $target\""; cat test_results.txt) | tee "test_results.${{ matrix.test }}.$i.txt" + rm test_results.txt + fi + ;; + 1) # Tests failed but parsed successfully + echo "Test failures detected but properly parsed" + if [ -f test_results.txt ]; then + (echo "MAKE_COMMAND=\"make -C $dir $target\""; cat test_results.txt) | tee "test_results.${{ matrix.test }}.$i.txt" + rm test_results.txt + fi + overall_status=1 + ;; + 2) # Parse error or missing file + echo "::warning::Could not parse test results properly for configuration $((i+1))" + { + echo "MAKE_COMMAND=\"make -C $dir $target\"" + echo "STATUS=parse_error" + echo "TOTAL_TESTS=0" + echo "FAILED_TESTS=0" + echo "PASSED_TESTS=0" + echo "IGNORED_TESTS=0" + } | tee "test_results.${{ matrix.test }}.$i.txt" + overall_status=1 + ;; + *) # Unexpected error + echo "::warning::Unexpected error during test results parsing for configuration $((i+1))" + { + echo "MAKE_COMMAND=\"make -C $dir $target\"" + echo "STATUS=unknown_error" + echo "TOTAL_TESTS=0" + echo "FAILED_TESTS=0" + echo "PASSED_TESTS=0" + echo "IGNORED_TESTS=0" + } | tee "test_results.${{ matrix.test }}.$i.txt" + overall_status=1 + ;; + esac + + echo "Results stored in test_results.$i.txt" + echo "=== End parsing for configuration $((i+1)) ===" + echo "" + done + + # Report status of results files + echo "=== Results file status ===" + echo "Generated results files:" + for ((i=0; i> "$GITHUB_STEP_SUMMARY" || true + + - name: Upload test logs + if: always() + uses: actions/upload-artifact@v4 + with: + name: test-logs-${{ matrix.test }}-rocky8-${{ needs.build.outputs.build_timestamp }} + path: | + build-logs/ + retention-days: ${{ env.LOG_RETENTION_DAYS }} + + - name: Upload Test Metadata + if: always() + uses: actions/upload-artifact@v4 + with: + name: test-metadata-${{ matrix.test }}-rocky8 + path: | + test_results*.txt + retention-days: ${{ env.LOG_RETENTION_DAYS }} + + - name: Upload test results files + uses: actions/upload-artifact@v4 + with: + name: results-${{ matrix.test }}-rocky8-${{ needs.build.outputs.build_timestamp }} + path: | + **/regression.out + **/regression.diffs + **/results/ + retention-days: ${{ env.LOG_RETENTION_DAYS }} + + - name: Upload test regression logs + if: failure() || cancelled() + uses: actions/upload-artifact@v4 + with: + name: regression-logs-${{ matrix.test }}-rocky8-${{ needs.build.outputs.build_timestamp }} + path: | + **/regression.out + **/regression.diffs + **/results/ + gpAux/gpdemo/datadirs/standby/log/ + gpAux/gpdemo/datadirs/qddir/demoDataDir-1/log/ + gpAux/gpdemo/datadirs/dbfast1/demoDataDir0/log/ + gpAux/gpdemo/datadirs/dbfast2/demoDataDir1/log/ + gpAux/gpdemo/datadirs/dbfast3/demoDataDir2/log/ + gpAux/gpdemo/datadirs/dbfast_mirror1/demoDataDir0/log/ + gpAux/gpdemo/datadirs/dbfast_mirror2/demoDataDir1/log/ + gpAux/gpdemo/datadirs/dbfast_mirror3/demoDataDir2/log/ + retention-days: ${{ env.LOG_RETENTION_DAYS }} + + ## ====================================================================== + ## Job: report + ## ====================================================================== + + report: + name: Generate Apache Cloudberry Build Report (Rocky 8) + needs: [check-skip, build, prepare-test-matrix, rpm-install-test, test] + if: always() + runs-on: ubuntu-22.04 + steps: + - name: Generate Final Report + run: | + { + echo "# Apache Cloudberry Build Pipeline Report (Rocky 8)" + + if [[ "${{ needs.check-skip.outputs.should_skip }}" == "true" ]]; then + echo "## CI Skip Status" + echo "✅ CI checks skipped via skip flag" + echo "- Completion Time: $(date -u +'%Y-%m-%d %H:%M:%S UTC')" + else + echo "## Job Status" + echo "- Build Job: ${{ needs.build.result }}" + echo "- Test Job: ${{ needs.test.result }}" + echo "- Completion Time: $(date -u +'%Y-%m-%d %H:%M:%S UTC')" + + if [[ "${{ needs.build.result }}" == "success" && "${{ needs.test.result }}" == "success" ]]; then + echo "✅ Pipeline completed successfully" + else + echo "⚠️ Pipeline completed with failures" + + if [[ "${{ needs.build.result }}" != "success" ]]; then + echo "### Build Job Failure" + echo "Check build logs for details" + fi + + if [[ "${{ needs.test.result }}" != "success" ]]; then + echo "### Test Job Failure" + echo "Check test logs and regression files for details" + fi + fi + fi + } >> "$GITHUB_STEP_SUMMARY" + + - name: Notify on failure + if: | + needs.check-skip.outputs.should_skip != 'true' && + (needs.build.result != 'success' || needs.test.result != 'success') + run: | + echo "::error::Build/Test pipeline failed! Check job summaries and logs for details" + echo "Timestamp: $(date -u +'%Y-%m-%d %H:%M:%S UTC')" + echo "Build Result: ${{ needs.build.result }}" + echo "Test Result: ${{ needs.test.result }}" diff --git a/.github/workflows/build-cloudberry.yml b/.github/workflows/build-cloudberry.yml index 9d44d06bbdc..f4104e9ced1 100644 --- a/.github/workflows/build-cloudberry.yml +++ b/.github/workflows/build-cloudberry.yml @@ -309,10 +309,10 @@ jobs: }, {"test":"ic-gpcontrib", "make_configs":["gpcontrib/orafce:installcheck", - "gpcontrib/pxf_fdw:installcheck", "gpcontrib/zstd:installcheck", "gpcontrib/gp_sparse_vector:installcheck", - "gpcontrib/gp_toolkit:installcheck"] + "gpcontrib/gp_toolkit:installcheck", + "gpcontrib/gp_exttable_fdw:installcheck"] }, {"test":"ic-fixme", "make_configs":["src/test/regress:installcheck-fixme"], diff --git a/.github/workflows/build-deb-cloudberry.yml b/.github/workflows/build-deb-cloudberry.yml index be28fff9e77..705e82ca87f 100644 --- a/.github/workflows/build-deb-cloudberry.yml +++ b/.github/workflows/build-deb-cloudberry.yml @@ -248,7 +248,6 @@ jobs: }, {"test":"ic-deb-gpcontrib", "make_configs":["gpcontrib/orafce:installcheck", - "gpcontrib/pxf_fdw:installcheck", "gpcontrib/zstd:installcheck", "gpcontrib/gp_sparse_vector:installcheck", "gpcontrib/gp_toolkit:installcheck"] @@ -555,8 +554,15 @@ jobs: exit 1 fi - ARCH="amd64" - CBDB_PKG_VERSION=${CBDB_VERSION}-${BUILD_NUMBER}-$(git --git-dir=.git rev-list HEAD --count).$(git --git-dir=.git rev-parse --short HEAD) + ARCH=$(dpkg --print-architecture) + # Detect OS distribution (e.g., ubuntu22.04, debian12) + if [ -f /etc/os-release ]; then + . /etc/os-release + OS_DISTRO=$(echo "${ID}${VERSION_ID}" | tr '[:upper:]' '[:lower:]') + else + OS_DISTRO="unknown" + fi + CBDB_PKG_VERSION=${CBDB_VERSION}-${BUILD_NUMBER}-${OS_DISTRO} echo "Produced artifacts" ls -l ../ diff --git a/.gitignore b/.gitignore index 44a59792420..5c21989c4ab 100644 --- a/.gitignore +++ b/.gitignore @@ -45,7 +45,6 @@ config.status.lineno autom4te.cache configure.lineno Debug -Release pgsql.sln cscope.* build.sh diff --git a/configure b/configure index 567eaba5ec8..58481caefc8 100755 --- a/configure +++ b/configure @@ -698,6 +698,7 @@ BISON MKDIR_P LN_S TAR +USE_MDBLOCALES install_bin INSTALL_DATA INSTALL_SCRIPT @@ -946,6 +947,7 @@ with_rt with_libcurl with_apr_config with_gnu_ld +with_mdblocales with_ssl with_openssl enable_openssl_redirect @@ -1705,6 +1707,7 @@ Optional Packages: --without-libcurl do not use libcurl --with-apr-config=PATH path to apr-1-config utility --with-gnu-ld assume the C compiler uses GNU ld [default=no] + --without-mdblocales build without MDB locales --with-ssl=LIB use LIB for SSL/TLS support (openssl) --with-openssl obsolete spelling of --with-ssl=openssl @@ -2921,7 +2924,6 @@ PG_PACKAGE_VERSION=14.4 - ac_aux_dir= for ac_dir in config "$srcdir"/config; do if test -f "$ac_dir/install-sh"; then @@ -12220,6 +12222,38 @@ case $INSTALL in esac +# +# MDB locales +# + + + + +# Check whether --with-mdblocales was given. +if test "${with_mdblocales+set}" = set; then : + withval=$with_mdblocales; + case $withval in + yes) + +$as_echo "#define USE_MDBLOCALES 1" >>confdefs.h + + ;; + no) + : + ;; + *) + as_fn_error $? "no argument expected for --with-mdblocales option" "$LINENO" 5 + ;; + esac + +else + with_mdblocales=no + +fi + + + + if test -z "$TAR"; then for ac_prog in tar do @@ -12856,6 +12890,56 @@ $as_echo "${python_libspec} ${python_additional_libs}" >&6; } +fi + +if test "$with_mdblocales" = yes; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for mdb_setlocale in -lmdblocales" >&5 +$as_echo_n "checking for mdb_setlocale in -lmdblocales... " >&6; } +if ${ac_cv_lib_mdblocales_mdb_setlocale+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lmdblocales $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char mdb_setlocale (); +int +main () +{ +return mdb_setlocale (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_mdblocales_mdb_setlocale=yes +else + ac_cv_lib_mdblocales_mdb_setlocale=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_mdblocales_mdb_setlocale" >&5 +$as_echo "$ac_cv_lib_mdblocales_mdb_setlocale" >&6; } +if test "x$ac_cv_lib_mdblocales_mdb_setlocale" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_LIBMDBLOCALES 1 +_ACEOF + + LIBS="-lmdblocales $LIBS" + +else + as_fn_error $? "mdblocales library not found" "$LINENO" 5 +fi + fi if test x"$cross_compiling" = x"yes" && test -z "$with_system_tzdata"; then @@ -17077,6 +17161,17 @@ fi done +fi + +if test "$with_mdblocales" = yes; then + ac_fn_c_check_header_mongrel "$LINENO" "mdblocales.h" "ac_cv_header_mdblocales_h" "$ac_includes_default" +if test "x$ac_cv_header_mdblocales_h" = xyes; then : + +else + as_fn_error $? "mdblocales header not found." "$LINENO" 5 +fi + + fi if test "$with_gssapi" = yes ; then diff --git a/configure.ac b/configure.ac index 271a4d57a05..46ab6f50d21 100644 --- a/configure.ac +++ b/configure.ac @@ -1462,6 +1462,14 @@ case $INSTALL in esac AC_SUBST(install_bin) +# +# MDB locales +# + +PGAC_ARG_BOOL(with, mdblocales, yes, [build without MDB locales], + [AC_DEFINE([USE_MDBLOCALES], 1, [Define to 1 to build with MDB locales. (--with-mdblocales)])]) +AC_SUBST(USE_MDBLOCALES) + PGAC_PATH_PROGS(TAR, tar) AC_PROG_LN_S AC_PROG_MKDIR_P @@ -1620,6 +1628,11 @@ failure. It is possible the compiler isn't looking in the proper directory. Use --without-zlib to disable zlib support.])]) fi +if test "$with_mdblocales" = yes; then + AC_CHECK_LIB(mdblocales, mdb_setlocale, [], + [AC_MSG_ERROR([mdblocales library not found])]) +fi + if test "$enable_external_fts" = yes; then AC_CHECK_LIB(jansson, jansson_version_str, [], [AC_MSG_ERROR([jansson library not found or version is too old, version must >= 2.13])]) @@ -1999,6 +2012,10 @@ if test "$with_lz4" = yes; then AC_CHECK_HEADERS(lz4.h, [], [AC_MSG_ERROR([lz4.h header file is required for LZ4])]) fi +if test "$with_mdblocales" = yes; then + AC_CHECK_HEADER(mdblocales.h, [], [AC_MSG_ERROR([mdblocales header not found.])]) +fi + if test "$with_gssapi" = yes ; then AC_CHECK_HEADERS(gssapi/gssapi.h, [], [AC_CHECK_HEADERS(gssapi.h, [], [AC_MSG_ERROR([gssapi.h header file is required for GSSAPI])])]) diff --git a/contrib/citext/Makefile b/contrib/citext/Makefile index b9b43ee787f..f8bb56c0975 100644 --- a/contrib/citext/Makefile +++ b/contrib/citext/Makefile @@ -11,7 +11,7 @@ DATA = citext--1.4.sql \ citext--1.0--1.1.sql PGFILEDESC = "citext - case-insensitive character string data type" -REGRESS = citext +REGRESS = create_index_acl citext REGRESS_OPTS += --init-file=$(top_srcdir)/src/test/regress/init_file ifdef USE_PGXS diff --git a/contrib/citext/expected/create_index_acl.out b/contrib/citext/expected/create_index_acl.out new file mode 100644 index 00000000000..28602ccfdf3 --- /dev/null +++ b/contrib/citext/expected/create_index_acl.out @@ -0,0 +1,80 @@ +-- Each DefineIndex() ACL check uses either the original userid or the table +-- owner userid; see its header comment. Here, confirm that DefineIndex() +-- uses its original userid where necessary. The test works by creating +-- indexes that refer to as many sorts of objects as possible, with the table +-- owner having as few applicable privileges as possible. (The privileges.sql +-- regress_sro_user tests look for the opposite defect; they confirm that +-- DefineIndex() uses the table owner userid where necessary.) +-- Don't override tablespaces; this version lacks allow_in_place_tablespaces. +BEGIN; +SET allow_segment_DML TO true; +CREATE ROLE regress_minimal; +NOTICE: resource queue required -- using default resource queue "pg_default" +CREATE SCHEMA s; +CREATE EXTENSION citext SCHEMA s; +-- Revoke all conceivably-relevant ACLs within the extension. The system +-- doesn't check all these ACLs, but this will provide some coverage if that +-- ever changes. +REVOKE ALL ON TYPE s.citext FROM PUBLIC; +REVOKE ALL ON FUNCTION s.citext_pattern_lt FROM PUBLIC; +REVOKE ALL ON FUNCTION s.citext_pattern_le FROM PUBLIC; +REVOKE ALL ON FUNCTION s.citext_eq FROM PUBLIC; +REVOKE ALL ON FUNCTION s.citext_pattern_ge FROM PUBLIC; +REVOKE ALL ON FUNCTION s.citext_pattern_gt FROM PUBLIC; +REVOKE ALL ON FUNCTION s.citext_pattern_cmp FROM PUBLIC; +-- Functions sufficient for making an index column that has the side effect of +-- changing search_path at expression planning time. +CREATE FUNCTION public.setter() RETURNS bool VOLATILE + LANGUAGE SQL AS $$SET search_path = s; SELECT true$$; +CREATE FUNCTION s.const() RETURNS bool IMMUTABLE + LANGUAGE SQL AS $$SELECT public.setter()$$; +CREATE FUNCTION s.index_this_expr(s.citext, bool) RETURNS s.citext IMMUTABLE + LANGUAGE SQL AS $$SELECT $1$$; +REVOKE ALL ON FUNCTION public.setter FROM PUBLIC; +REVOKE ALL ON FUNCTION s.const FROM PUBLIC; +REVOKE ALL ON FUNCTION s.index_this_expr FROM PUBLIC; +-- Even for an empty table, expression planning calls s.const & public.setter. +GRANT EXECUTE ON FUNCTION public.setter TO regress_minimal; +GRANT EXECUTE ON FUNCTION s.const TO regress_minimal; +-- Function for index predicate. +CREATE FUNCTION s.index_row_if(s.citext) RETURNS bool IMMUTABLE + LANGUAGE SQL AS $$SELECT $1 IS NOT NULL$$; +REVOKE ALL ON FUNCTION s.index_row_if FROM PUBLIC; +-- Even for an empty table, CREATE INDEX checks ii_Predicate permissions. +GRANT EXECUTE ON FUNCTION s.index_row_if TO regress_minimal; +-- Non-extension, non-function objects. +CREATE COLLATION s.coll (LOCALE="C"); +CREATE TABLE s.x (y s.citext) DISTRIBUTED REPLICATED; +ALTER TABLE s.x OWNER TO regress_minimal; +-- Empty-table DefineIndex() +CREATE UNIQUE INDEX u0rows ON s.x USING btree + ((s.index_this_expr(y, s.const())) COLLATE s.coll s.citext_pattern_ops) + WHERE s.index_row_if(y); +ALTER TABLE s.x ADD CONSTRAINT e0rows EXCLUDE USING btree + ((s.index_this_expr(y, s.const())) COLLATE s.coll WITH s.=) + WHERE (s.index_row_if(y)); +-- Make the table nonempty. +INSERT INTO s.x VALUES ('foo'), ('bar'); +-- If the INSERT runs the planner on index expressions, a search_path change +-- survives. As of 2022-06, the INSERT reuses a cached plan. It does so even +-- under debug_discard_caches, since each index is new-in-transaction. If +-- future work changes a cache lifecycle, this RESET may become necessary. +RESET search_path; +-- For a nonempty table, owner needs permissions throughout ii_Expressions. +GRANT EXECUTE ON FUNCTION s.index_this_expr TO regress_minimal; +CREATE UNIQUE INDEX u2rows ON s.x USING btree + ((s.index_this_expr(y, s.const())) COLLATE s.coll s.citext_pattern_ops) + WHERE s.index_row_if(y); +ALTER TABLE s.x ADD CONSTRAINT e2rows EXCLUDE USING btree + ((s.index_this_expr(y, s.const())) COLLATE s.coll WITH s.=) + WHERE (s.index_row_if(y)); +-- Shall not find s.coll via search_path, despite the s.const->public.setter +-- call having set search_path=s during expression planning. Suppress the +-- message itself, which depends on the database encoding. +\set VERBOSITY sqlstate +ALTER TABLE s.x ADD CONSTRAINT underqualified EXCLUDE USING btree + ((s.index_this_expr(y, s.const())) COLLATE coll WITH s.=) + WHERE (s.index_row_if(y)); +ERROR: 42704 +\set VERBOSITY default +ROLLBACK; diff --git a/contrib/citext/sql/create_index_acl.sql b/contrib/citext/sql/create_index_acl.sql new file mode 100644 index 00000000000..aedb9d625fc --- /dev/null +++ b/contrib/citext/sql/create_index_acl.sql @@ -0,0 +1,80 @@ +-- Each DefineIndex() ACL check uses either the original userid or the table +-- owner userid; see its header comment. Here, confirm that DefineIndex() +-- uses its original userid where necessary. The test works by creating +-- indexes that refer to as many sorts of objects as possible, with the table +-- owner having as few applicable privileges as possible. (The privileges.sql +-- regress_sro_user tests look for the opposite defect; they confirm that +-- DefineIndex() uses the table owner userid where necessary.) + +-- Don't override tablespaces; this version lacks allow_in_place_tablespaces. + +BEGIN; +SET allow_segment_DML TO true; +CREATE ROLE regress_minimal; +CREATE SCHEMA s; +CREATE EXTENSION citext SCHEMA s; +-- Revoke all conceivably-relevant ACLs within the extension. The system +-- doesn't check all these ACLs, but this will provide some coverage if that +-- ever changes. +REVOKE ALL ON TYPE s.citext FROM PUBLIC; +REVOKE ALL ON FUNCTION s.citext_pattern_lt FROM PUBLIC; +REVOKE ALL ON FUNCTION s.citext_pattern_le FROM PUBLIC; +REVOKE ALL ON FUNCTION s.citext_eq FROM PUBLIC; +REVOKE ALL ON FUNCTION s.citext_pattern_ge FROM PUBLIC; +REVOKE ALL ON FUNCTION s.citext_pattern_gt FROM PUBLIC; +REVOKE ALL ON FUNCTION s.citext_pattern_cmp FROM PUBLIC; +-- Functions sufficient for making an index column that has the side effect of +-- changing search_path at expression planning time. +CREATE FUNCTION public.setter() RETURNS bool VOLATILE + LANGUAGE SQL AS $$SET search_path = s; SELECT true$$; +CREATE FUNCTION s.const() RETURNS bool IMMUTABLE + LANGUAGE SQL AS $$SELECT public.setter()$$; +CREATE FUNCTION s.index_this_expr(s.citext, bool) RETURNS s.citext IMMUTABLE + LANGUAGE SQL AS $$SELECT $1$$; +REVOKE ALL ON FUNCTION public.setter FROM PUBLIC; +REVOKE ALL ON FUNCTION s.const FROM PUBLIC; +REVOKE ALL ON FUNCTION s.index_this_expr FROM PUBLIC; +-- Even for an empty table, expression planning calls s.const & public.setter. +GRANT EXECUTE ON FUNCTION public.setter TO regress_minimal; +GRANT EXECUTE ON FUNCTION s.const TO regress_minimal; +-- Function for index predicate. +CREATE FUNCTION s.index_row_if(s.citext) RETURNS bool IMMUTABLE + LANGUAGE SQL AS $$SELECT $1 IS NOT NULL$$; +REVOKE ALL ON FUNCTION s.index_row_if FROM PUBLIC; +-- Even for an empty table, CREATE INDEX checks ii_Predicate permissions. +GRANT EXECUTE ON FUNCTION s.index_row_if TO regress_minimal; +-- Non-extension, non-function objects. +CREATE COLLATION s.coll (LOCALE="C"); +CREATE TABLE s.x (y s.citext) DISTRIBUTED REPLICATED; +ALTER TABLE s.x OWNER TO regress_minimal; +-- Empty-table DefineIndex() +CREATE UNIQUE INDEX u0rows ON s.x USING btree + ((s.index_this_expr(y, s.const())) COLLATE s.coll s.citext_pattern_ops) + WHERE s.index_row_if(y); +ALTER TABLE s.x ADD CONSTRAINT e0rows EXCLUDE USING btree + ((s.index_this_expr(y, s.const())) COLLATE s.coll WITH s.=) + WHERE (s.index_row_if(y)); +-- Make the table nonempty. +INSERT INTO s.x VALUES ('foo'), ('bar'); +-- If the INSERT runs the planner on index expressions, a search_path change +-- survives. As of 2022-06, the INSERT reuses a cached plan. It does so even +-- under debug_discard_caches, since each index is new-in-transaction. If +-- future work changes a cache lifecycle, this RESET may become necessary. +RESET search_path; +-- For a nonempty table, owner needs permissions throughout ii_Expressions. +GRANT EXECUTE ON FUNCTION s.index_this_expr TO regress_minimal; +CREATE UNIQUE INDEX u2rows ON s.x USING btree + ((s.index_this_expr(y, s.const())) COLLATE s.coll s.citext_pattern_ops) + WHERE s.index_row_if(y); +ALTER TABLE s.x ADD CONSTRAINT e2rows EXCLUDE USING btree + ((s.index_this_expr(y, s.const())) COLLATE s.coll WITH s.=) + WHERE (s.index_row_if(y)); +-- Shall not find s.coll via search_path, despite the s.const->public.setter +-- call having set search_path=s during expression planning. Suppress the +-- message itself, which depends on the database encoding. +\set VERBOSITY sqlstate +ALTER TABLE s.x ADD CONSTRAINT underqualified EXCLUDE USING btree + ((s.index_this_expr(y, s.const())) COLLATE coll WITH s.=) + WHERE (s.index_row_if(y)); +\set VERBOSITY default +ROLLBACK; diff --git a/contrib/pax_storage/src/cpp/storage/oper/pax_oper.cc b/contrib/pax_storage/src/cpp/storage/oper/pax_oper.cc index 44d4e49d7f8..d08c7a445b9 100644 --- a/contrib/pax_storage/src/cpp/storage/oper/pax_oper.cc +++ b/contrib/pax_storage/src/cpp/storage/oper/pax_oper.cc @@ -25,6 +25,7 @@ *------------------------------------------------------------------------- */ +#include "common/mdb_locale.h" #include "storage/oper/pax_oper.h" #include "comm/cbdb_wrappers.h" @@ -588,9 +589,9 @@ static inline bool LocaleIsC(Oid collation) { return (bool)result; } - localeptr = setlocale(LC_COLLATE, NULL); + localeptr = SETLOCALE(LC_COLLATE, NULL); CBDB_CHECK(localeptr, cbdb::CException::ExType::kExTypeCError, - fmt("Invalid locale, fail to `setlocale`, errno: %d", errno)); + fmt("Invalid locale, fail to `SETLOCALE`, errno: %d", errno)); if (strcmp(localeptr, "C") == 0 || // cut line strcmp(localeptr, "POSIX") == 0) { diff --git a/contrib/pax_storage/src/test/regress/expected/create_function_3.out b/contrib/pax_storage/src/test/regress/expected/create_function_3.out index 8380df1591f..7842a3c1c82 100644 --- a/contrib/pax_storage/src/test/regress/expected/create_function_3.out +++ b/contrib/pax_storage/src/test/regress/expected/create_function_3.out @@ -166,10 +166,10 @@ SET SESSION AUTHORIZATION regress_unpriv_user; SET search_path TO temp_func_test, public; ALTER FUNCTION functest_E_1(int) NOT LEAKPROOF; ALTER FUNCTION functest_E_2(int) LEAKPROOF; -ERROR: only superuser can define a leakproof function +ERROR: only superuser or mdb_admin can define a leakproof function CREATE FUNCTION functest_E_3(int) RETURNS bool LANGUAGE 'sql' LEAKPROOF AS 'SELECT $1 < 200'; -- fail -ERROR: only superuser can define a leakproof function +ERROR: only superuser or mdb_admin can define a leakproof function RESET SESSION AUTHORIZATION; -- -- CALLED ON NULL INPUT | RETURNS NULL ON NULL INPUT | STRICT diff --git a/contrib/pax_storage/src/test/regress/expected/create_function_3_optimizer.out b/contrib/pax_storage/src/test/regress/expected/create_function_3_optimizer.out index 3ae669d518a..3256709e1aa 100644 --- a/contrib/pax_storage/src/test/regress/expected/create_function_3_optimizer.out +++ b/contrib/pax_storage/src/test/regress/expected/create_function_3_optimizer.out @@ -166,10 +166,10 @@ SET SESSION AUTHORIZATION regress_unpriv_user; SET search_path TO temp_func_test, public; ALTER FUNCTION functest_E_1(int) NOT LEAKPROOF; ALTER FUNCTION functest_E_2(int) LEAKPROOF; -ERROR: only superuser can define a leakproof function +ERROR: only superuser or mdb_admin can define a leakproof function CREATE FUNCTION functest_E_3(int) RETURNS bool LANGUAGE 'sql' LEAKPROOF AS 'SELECT $1 < 200'; -- fail -ERROR: only superuser can define a leakproof function +ERROR: only superuser or mdb_admin can define a leakproof function RESET SESSION AUTHORIZATION; -- -- CALLED ON NULL INPUT | RETURNS NULL ON NULL INPUT | STRICT diff --git a/contrib/pax_storage/src/test/regress/expected/create_view.out b/contrib/pax_storage/src/test/regress/expected/create_view.out index d35d3a61066..077dc1afc91 100644 --- a/contrib/pax_storage/src/test/regress/expected/create_view.out +++ b/contrib/pax_storage/src/test/regress/expected/create_view.out @@ -1551,16 +1551,25 @@ select * from tt14v; begin; -- this perhaps should be rejected, but it isn't: alter table tt14t drop column f3; --- f3 is still in the view ... +-- column f3 is still in the view, sort of ... select pg_get_viewdef('tt14v', true); - pg_get_viewdef --------------------------------- - SELECT t.f1, + - t.f3, + - t.f4 + - FROM tt14f() t(f1, f3, f4); + pg_get_viewdef +--------------------------------- + SELECT t.f1, + + t."?dropped?column?" AS f3,+ + t.f4 + + FROM tt14f() t(f1, f4); (1 row) +-- ... and you can even EXPLAIN it ... +explain (verbose, costs off) select * from tt14v; + QUERY PLAN +---------------------------------------- + Function Scan on testviewschm2.tt14f t + Output: t.f1, t.f3, t.f4 + Function Call: tt14f() +(3 rows) + -- but will fail at execution select f1, f4 from tt14v; f1 | f4 diff --git a/contrib/pax_storage/src/test/regress/expected/create_view_optimizer.out b/contrib/pax_storage/src/test/regress/expected/create_view_optimizer.out index de91254a5ba..e19d51b0069 100755 --- a/contrib/pax_storage/src/test/regress/expected/create_view_optimizer.out +++ b/contrib/pax_storage/src/test/regress/expected/create_view_optimizer.out @@ -1550,16 +1550,25 @@ select * from tt14v; begin; -- this perhaps should be rejected, but it isn't: alter table tt14t drop column f3; --- f3 is still in the view ... +-- column f3 is still in the view, sort of ... select pg_get_viewdef('tt14v', true); - pg_get_viewdef --------------------------------- - SELECT t.f1, + - t.f3, + - t.f4 + - FROM tt14f() t(f1, f3, f4); + pg_get_viewdef +--------------------------------- + SELECT t.f1, + + t."?dropped?column?" AS f3,+ + t.f4 + + FROM tt14f() t(f1, f4); (1 row) +-- ... and you can even EXPLAIN it ... +explain (verbose, costs off) select * from tt14v; + QUERY PLAN +---------------------------------------- + Function Scan on testviewschm2.tt14f t + Output: t.f1, t.f3, t.f4 + Function Call: tt14f() +(3 rows) + -- but will fail at execution select f1, f4 from tt14v; f1 | f4 diff --git a/contrib/pax_storage/src/test/regress/sql/create_view.sql b/contrib/pax_storage/src/test/regress/sql/create_view.sql index e1b013fe7a5..5cd91e5189e 100644 --- a/contrib/pax_storage/src/test/regress/sql/create_view.sql +++ b/contrib/pax_storage/src/test/regress/sql/create_view.sql @@ -533,8 +533,10 @@ begin; -- this perhaps should be rejected, but it isn't: alter table tt14t drop column f3; --- f3 is still in the view ... +-- column f3 is still in the view, sort of ... select pg_get_viewdef('tt14v', true); +-- ... and you can even EXPLAIN it ... +explain (verbose, costs off) select * from tt14v; -- but will fail at execution select f1, f4 from tt14v; select * from tt14v; diff --git a/contrib/pgcrypto/expected/fips_2.out b/contrib/pgcrypto/expected/fips_2.out index 51957b898da..19ba00f8bf8 100644 --- a/contrib/pgcrypto/expected/fips_2.out +++ b/contrib/pgcrypto/expected/fips_2.out @@ -55,7 +55,7 @@ SELECT 'Test gen_salt : EXPECTED FAIL FIPS' as comment; (1 row) UPDATE fipstest SET salt = gen_salt('md5'); -ERROR: requested functionality not allowed in FIPS mode (pgcrypto.c:213) +ERROR: requested functionality not allowed in FIPS mode (pgcrypto.c:XXX) SELECT 'Test crypt : EXPECTED FAIL FIPS' as comment; comment --------------------------------- @@ -63,9 +63,9 @@ SELECT 'Test crypt : EXPECTED FAIL FIPS' as comment; (1 row) UPDATE fipstest SET res = crypt(data, salt); -ERROR: requested functionality not allowed in FIPS mode (pgcrypto.c:266) +ERROR: requested functionality not allowed in FIPS mode (pgcrypto.c:XXX) SELECT res = crypt(data, res) AS "worked" FROM fipstest; -ERROR: requested functionality not allowed in FIPS mode (pgcrypto.c:266) +ERROR: requested functionality not allowed in FIPS mode (pgcrypto.c:XXX) SELECT 'Test pgp : EXPECTED PASS' as comment; comment -------------------------- @@ -73,7 +73,7 @@ SELECT 'Test pgp : EXPECTED PASS' as comment; (1 row) select pgp_sym_decrypt(pgp_sym_encrypt('santa clause', 'mypass', 'cipher-algo=aes256'), 'mypass'); -ERROR: requested functionality not allowed in FIPS mode (openssl.c:772) +ERROR: requested functionality not allowed in FIPS mode (openssl.c:XXX) SELECT 'Test pgp : EXPECTED FAIL FIPS' as comment; comment ------------------------------- @@ -89,7 +89,7 @@ SELECT 'Test raw encrypt : EXPECTED PASS' as comment; (1 row) SELECT encrypt('santa claus', 'mypass', 'aes') as raw_aes; -ERROR: requested functionality not allowed in FIPS mode (openssl.c:772) +ERROR: requested functionality not allowed in FIPS mode (openssl.c:XXX) SELECT 'Test raw encrypt : EXPECTED FAIL FIPS' as comment; comment --------------------------------------- @@ -97,5 +97,5 @@ SELECT 'Test raw encrypt : EXPECTED FAIL FIPS' as comment; (1 row) SELECT encrypt('santa claus', 'mypass', 'bf') as raw_blowfish; -ERROR: requested functionality not allowed in FIPS mode (openssl.c:772) +ERROR: requested functionality not allowed in FIPS mode (openssl.c:XXX) DROP TABLE fipstest; diff --git a/contrib/postgres_fdw/postgres_fdw.c b/contrib/postgres_fdw/postgres_fdw.c index fbbd867c239..58599c7aeaa 100644 --- a/contrib/postgres_fdw/postgres_fdw.c +++ b/contrib/postgres_fdw/postgres_fdw.c @@ -7167,7 +7167,7 @@ fetch_more_data_begin(AsyncRequest *areq) snprintf(sql, sizeof(sql), "FETCH %d FROM c%u", fsstate->fetch_size, fsstate->cursor_number); - if (PQsendQuery(fsstate->conn, sql) < 0) + if (!PQsendQuery(fsstate->conn, sql)) pgfdw_report_error(ERROR, NULL, fsstate->conn, false, fsstate->query); /* Remember that the request is in process */ diff --git a/devops/build/automation/cloudberry/scripts/configure-cloudberry.sh b/devops/build/automation/cloudberry/scripts/configure-cloudberry.sh index bc046695032..32a9f3d8657 100755 --- a/devops/build/automation/cloudberry/scripts/configure-cloudberry.sh +++ b/devops/build/automation/cloudberry/scripts/configure-cloudberry.sh @@ -62,6 +62,12 @@ # --enable-cassert # --enable-debug-extensions # +# ENABLE_MDBLOCALES - Enable custom locales (true/false, defaults to +# false) +# +# When true, add option: +# --with-mdblocales +# # Prerequisites: # - System dependencies must be installed: # * xerces-c development files @@ -138,6 +144,11 @@ if [ "${ENABLE_DEBUG:-false}" = "true" ]; then --enable-debug-extensions" fi +CONFIGURE_MDBLOCALES_OPTS="--without-mdblocales" +if [ "${ENABLE_MDBLOCALES:-false}" = "true" ]; then + CONFIGURE_MDBLOCALES_OPTS="--with-mdblocales" +fi + # Configure build log_section "Configure" execute_cmd ./configure --prefix=${BUILD_DESTINATION} \ @@ -148,7 +159,7 @@ execute_cmd ./configure --prefix=${BUILD_DESTINATION} \ --enable-orafce \ --enable-orca \ --enable-pax \ - --enable-pxf \ + --disable-pxf \ --enable-tap-tests \ ${CONFIGURE_DEBUG_OPTS} \ --with-gssapi \ @@ -164,6 +175,7 @@ execute_cmd ./configure --prefix=${BUILD_DESTINATION} \ --with-ssl=openssl \ --with-openssl \ --with-uuid=e2fs \ + ${CONFIGURE_MDBLOCALES_OPTS} \ --with-includes=/usr/local/xerces-c/include \ --with-libraries=${BUILD_DESTINATION}/lib || exit 4 log_section_end "Configure" diff --git a/devops/build/packaging/deb/build-deb.sh b/devops/build/packaging/deb/build-deb.sh index 2e7312be53f..1f5aef2258a 100755 --- a/devops/build/packaging/deb/build-deb.sh +++ b/devops/build/packaging/deb/build-deb.sh @@ -124,7 +124,17 @@ if [ -z ${BUILD_USER+x} ]; then export BUILD_USER=github fi -export CBDB_PKG_VERSION=${CBDB_FULL_VERSION}-${BUILD_NUMBER}-$(git --git-dir=.git rev-list HEAD --count).$(git --git-dir=.git rev-parse --short HEAD) +# Detect OS distribution (e.g., ubuntu22.04, debian12) +if [ -z ${OS_DISTRO+x} ]; then + if [ -f /etc/os-release ]; then + . /etc/os-release + OS_DISTRO=$(echo "${ID}${VERSION_ID}" | tr '[:upper:]' '[:lower:]') + else + OS_DISTRO="unknown" + fi +fi + +export CBDB_PKG_VERSION=${CBDB_FULL_VERSION}-${BUILD_NUMBER}-${OS_DISTRO} # Check if required commands are available check_commands diff --git a/devops/build/packaging/deb/ubuntu22.04/control b/devops/build/packaging/deb/ubuntu22.04/control index 70e4eda77d3..4bc5d90b84d 100644 --- a/devops/build/packaging/deb/ubuntu22.04/control +++ b/devops/build/packaging/deb/ubuntu22.04/control @@ -43,7 +43,7 @@ Build-Depends: debhelper (>= 9), Package: apache-cloudberry-db-incubating Provides: apache-cloudberry-db -Architecture: amd64 +Architecture: any Depends: curl, cgroup-tools, iputils-ping, diff --git a/devops/build/packaging/deb/ubuntu22.04/rules b/devops/build/packaging/deb/ubuntu22.04/rules index 6213985b48c..cb387d209e6 100755 --- a/devops/build/packaging/deb/ubuntu22.04/rules +++ b/devops/build/packaging/deb/ubuntu22.04/rules @@ -19,7 +19,7 @@ include /usr/share/dpkg/default.mk dh $@ --parallel gpinstall: - make install + make install DESTDIR=${DEBIAN_DESTINATION} prefix= override_dh_auto_install: gpinstall # the staging directory for creating a debian is NOT the right GPHOME. @@ -43,7 +43,7 @@ override_dh_gencontrol: dh_gencontrol -- -v${CBDB_PKG_VERSION} -p${PACKAGE_CBDB} override_dh_shlibdeps: - LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/lib/x86_64-linux-gnu/libfakeroot:${DEBIAN_DESTINATION}/lib dh_shlibdeps --dpkg-shlibdeps-params=--ignore-missing-info + LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/lib/$(DEB_HOST_MULTIARCH)/libfakeroot:${DEBIAN_DESTINATION}/lib dh_shlibdeps --dpkg-shlibdeps-params=--ignore-missing-info clean_dev_local: rm -rf ${DEBIAN_DESTINATION} diff --git a/devops/release/cloudberry-release.sh b/devops/release/cloudberry-release.sh index 5fd579b481e..641d435f26a 100755 --- a/devops/release/cloudberry-release.sh +++ b/devops/release/cloudberry-release.sh @@ -32,11 +32,13 @@ # - Verifies Git identity (user.name and user.email) prior to tagging # - Creates a BUILD_NUMBER file (currently hardcoded as 1) in the release tarball # - Recursively archives all submodules into the source tarball -# - Generates SHA-512 checksum (.sha512) for the source tarball +# - Generates SHA-512 checksum (.sha512) using sha512sum for cross-platform consistency # - Generates GPG signature (.asc) for the source tarball, unless --skip-signing is used # - Moves signed artifacts into a dedicated artifacts/ directory # - Verifies integrity and authenticity of artifacts via SHA-512 checksum and GPG signature # - Allows skipping of upstream remote URL validation (e.g., for forks) via --skip-remote-check +# - Excludes macOS extended attribute files (._*, .DS_Store, __MACOSX) for cross-platform compatibility +# - Validates availability of required tools (sha512sum, gtar, gpg) with platform-specific guidance # # Usage: # ./cloudberry-release.sh --stage --tag 2.0.0-incubating-rc1 --gpg-user your@apache.org @@ -56,6 +58,8 @@ # or the path must be explicitly provided using --repo # - Git user.name and user.email must be configured # - Repository remote must be: git@github.com:apache/cloudberry.git +# - Required tools: sha512sum, tar (gtar on macOS), gpg, xmllint +# - On macOS: brew install coreutils gnu-tar gnupg # # Examples: # ./cloudberry-release.sh -s -t 2.0.0-incubating-rc1 --gpg-user your@apache.org @@ -70,6 +74,101 @@ set -euo pipefail +# Global variables for detected platform and tools +DETECTED_PLATFORM="" +DETECTED_SHA_TOOL="" +DETECTED_TAR_TOOL="" + +# Platform detection and tool check +check_platform_and_tools() { + local has_errors=false + + # Detect platform + case "$(uname -s)" in + Linux*) DETECTED_PLATFORM="Linux" ;; + Darwin*) DETECTED_PLATFORM="macOS" ;; + CYGWIN*|MINGW*|MSYS*) DETECTED_PLATFORM="Windows" ;; + *) DETECTED_PLATFORM="Unknown" ;; + esac + + echo "Platform detected: $DETECTED_PLATFORM" + echo + + # Check sha512sum + if command -v sha512sum >/dev/null 2>&1; then + DETECTED_SHA_TOOL="sha512sum" + echo "[OK] SHA-512 tool: $DETECTED_SHA_TOOL" + else + echo "[ERROR] SHA-512 tool: sha512sum not found" + has_errors=true + fi + + # Check tar tool + if [[ "$DETECTED_PLATFORM" == "macOS" ]]; then + if command -v gtar >/dev/null 2>&1; then + DETECTED_TAR_TOOL="gtar" + echo "[OK] Tar tool: $DETECTED_TAR_TOOL (GNU tar)" + else + echo "[ERROR] Tar tool: gtar not found (GNU tar required on macOS)" + has_errors=true + fi + else + if command -v tar >/dev/null 2>&1; then + DETECTED_TAR_TOOL="tar" + echo "[OK] Tar tool: $DETECTED_TAR_TOOL" + else + echo "[ERROR] Tar tool: tar not found" + has_errors=true + fi + fi + + # Check GPG tool (only when signing is required) + if [[ "$SKIP_SIGNING" == true ]]; then + echo "- GPG tool: skipped (--skip-signing enabled)" + else + if command -v gpg >/dev/null 2>&1; then + local gpg_version=$(gpg --version | head -n1 | sed 's/gpg (GnuPG) //') + echo "[OK] GPG tool: gpg $gpg_version" + else + echo "[ERROR] GPG tool: gpg not found" + has_errors=true + fi + fi + + # Check xmllint tool + if command -v xmllint >/dev/null 2>&1; then + echo "[OK] XML tool: xmllint" + else + echo "[ERROR] XML tool: xmllint not found" + has_errors=true + fi + + # Show installation guidance if there are errors + if [[ "$has_errors" == true ]]; then + echo + echo "Missing required tools. Installation guidance:" + case "$DETECTED_PLATFORM" in + Linux) + echo " Please install required packages: coreutils tar gnupg libxml2-utils" + ;; + macOS) + echo " brew install coreutils gnu-tar gnupg" + ;; + Windows) + echo " Please use Git Bash or install GNU tools" + ;; + *) + echo " Please install GNU coreutils, tar, GnuPG, and libxml2" + ;; + esac + echo + echo "These tools ensure consistent cross-platform behavior and secure signing." + return 1 + fi + + return 0 +} + confirm() { read -r -p "$1 [y/N] " response case "$response" in @@ -78,6 +177,25 @@ confirm() { esac } +# Interactive step confirmation +confirm_next_step() { + echo + read -r -p "Press Enter or type y/yes to continue, or 'n' to exit: " response + case "$response" in + ""|[yY][eE][sS]|[yY]) + return 0 + ;; + [nN]|[nN][oO]) + echo "Process stopped by user." + exit 0 + ;; + *) + echo "Invalid input. Please press Enter or type y/yes to continue, or 'n' to exit." + confirm_next_step + ;; + esac +} + section() { echo echo "=================================================================" @@ -199,22 +317,16 @@ else fi if [[ -n "$REPO_ARG" ]]; then - if [[ -n "$REPO_ARG" ]]; then - if [[ ! -d "$REPO_ARG" || ! -f "$REPO_ARG/configure.ac" ]]; then - echo "ERROR: '$REPO_ARG' does not appear to be a valid Cloudberry source directory." - echo "Expected to find a 'configure.ac' file but it is missing." - echo - echo "Hint: Make sure you passed the correct --repo path to a valid Git clone." - exit 1 - fi - cd "$REPO_ARG" - elif [[ ! -f configure.ac ]]; then - echo "ERROR: No Cloudberry source directory specified and no 'configure.ac' found in the current directory." + # Validate the specified repository path + if [[ ! -d "$REPO_ARG" || ! -f "$REPO_ARG/configure.ac" ]]; then + echo "ERROR: '$REPO_ARG' does not appear to be a valid Cloudberry source directory." + echo "Expected to find a 'configure.ac' file but it is missing." echo - echo "Hint: Either run this script from the root of a Cloudberry Git clone," - echo "or use the --repo option to specify the source directory." + echo "Hint: Make sure you passed the correct --repo path to a valid Git clone." exit 1 fi + + # Change to the specified repository directory cd "$REPO_ARG" if [[ ! -d ".git" ]]; then @@ -258,6 +370,15 @@ if $STAGE && [[ -z "$TAG" ]]; then show_help fi +# Check platform and required tools early +if $STAGE; then + section "Platform and Tool Detection" + if ! check_platform_and_tools; then + exit 1 + fi + confirm_next_step +fi + section "Validating Version Consistency" # Extract version from configure.ac @@ -272,6 +393,16 @@ if ! [[ "$TAG" =~ $SEMVER_REGEX ]]; then exit 1 fi +# Extract base version from tag (remove -rc suffix if present) +BASE_VERSION="$TAG" +if [[ "$TAG" =~ ^(.+)-rc[0-9]+$ ]]; then + BASE_VERSION="${BASH_REMATCH[1]}" +fi + +echo "Version validation strategy:" +echo " Tag: $TAG" +echo " Base version (for source files): $BASE_VERSION" + # Check gpversion.py consistency PY_LINE=$(grep "^MAIN_VERSION" gpMgmt/bin/gppylib/gpversion.py | sed -E 's/#.*//' | tr -d '[:space:]') @@ -281,28 +412,24 @@ if [[ "$PY_LINE" != "MAIN_VERSION=$EXPECTED" ]]; then exit 1 fi -# For final releases (non-RC), ensure configure.ac version matches tag exactly -if [[ "$TAG" != *-rc* && "$CONFIGURE_AC_VERSION" != "$TAG" ]]; then - echo "ERROR: configure.ac version ($CONFIGURE_AC_VERSION) does not match final release tag ($TAG)" - echo "Please update configure.ac to match the tag before proceeding." +# Ensure configure.ac version matches base version (without -rc suffix) +if [[ "$CONFIGURE_AC_VERSION" != "$BASE_VERSION" ]]; then + echo "ERROR: configure.ac version ($CONFIGURE_AC_VERSION) does not match base version ($BASE_VERSION)" + echo "For RC tags like '$TAG', configure.ac should contain the base version '$BASE_VERSION'" exit 1 fi -# Ensure the generated 'configure' script is up to date +# Ensure the generated 'configure' script matches base version CONFIGURE_VERSION_LINE=$(grep "^PACKAGE_VERSION=" configure || true) CONFIGURE_VERSION=$(echo "$CONFIGURE_VERSION_LINE" | sed -E "s/^PACKAGE_VERSION='([^']+)'.*/\1/") -if [[ "$CONFIGURE_VERSION" != "$TAG" ]]; then - echo "ERROR: Version in generated 'configure' script ($CONFIGURE_VERSION) does not match release tag ($TAG)." - echo "This likely means autoconf was not run after updating configure.ac." +if [[ "$CONFIGURE_VERSION" != "$BASE_VERSION" ]]; then + echo "ERROR: Version in generated 'configure' script ($CONFIGURE_VERSION) does not match base version ($BASE_VERSION)." + echo "This likely means autoconf was not run after updating configure.ac to the base version." exit 1 fi -# Ensure xmllint is available -if ! command -v xmllint >/dev/null 2>&1; then - echo "ERROR: xmllint is required but not installed." - exit 1 -fi + # Extract version from pom.xml using xmllint with namespace stripping POM_VERSION=$(xmllint --xpath '//*[local-name()="project"]/*[local-name()="version"]/text()' pom.xml 2>/dev/null || true) @@ -312,9 +439,9 @@ if [[ -z "$POM_VERSION" ]]; then exit 1 fi -if [[ "$POM_VERSION" != "$TAG" ]]; then - echo "ERROR: Version in pom.xml ($POM_VERSION) does not match release tag ($TAG)." - echo "Please update pom.xml before tagging." +if [[ "$POM_VERSION" != "$BASE_VERSION" ]]; then + echo "ERROR: Version in pom.xml ($POM_VERSION) does not match base version ($BASE_VERSION)." + echo "For RC tags like '$TAG', pom.xml should contain the base version '$BASE_VERSION'" exit 1 fi @@ -324,12 +451,14 @@ if ! git diff-index --quiet HEAD --; then exit 1 fi -echo "MAIN_VERSION verified" +echo "Version consistency verified" printf " %-14s: %s\n" "Release Tag" "$TAG" +printf " %-14s: %s\n" "Base Version" "$BASE_VERSION" printf " %-14s: %s\n" "configure.ac" "$CONFIGURE_AC_VERSION" printf " %-14s: %s\n" "configure" "$CONFIGURE_VERSION" printf " %-14s: %s\n" "pom.xml" "$POM_VERSION" printf " %-14s: %s\n" "gpversion.py" "${EXPECTED//[\[\]]}" +confirm_next_step section "Checking the state of the Tag" @@ -357,6 +486,8 @@ else echo "INFO: Tag '$TAG' does not yet exist. It will be created during staging." fi +confirm_next_step + # Check and display submodule initialization status if [ -s .gitmodules ]; then section "Checking Git Submodules" @@ -417,8 +548,9 @@ section "Staging release: $TAG" echo "INFO: Reusing existing tag '$TAG'; skipping tag creation." fi - echo "Creating BUILD_NUMBER file with value of 1" - echo "1" > BUILD_NUMBER + echo "Creating BUILD_NUMBER file with commit hash" + build_num=$(git rev-parse --short HEAD) + echo "$build_num" > BUILD_NUMBER echo -e "\nTag Summary" TAG_OBJECT=$(git rev-parse "$TAG") @@ -426,6 +558,7 @@ section "Staging release: $TAG" echo "$TAG (tag object): $TAG_OBJECT" echo " Points to commit: $TAG_COMMIT" git log -1 --format="%C(auto)%h %d" "$TAG" + confirm_next_step section "Creating Source Tarball" @@ -433,6 +566,10 @@ section "Staging release: $TAG" TMP_DIR=$(mktemp -d) trap 'rm -rf "$TMP_DIR"' EXIT + # Set environment variables to prevent macOS extended attributes + export COPYFILE_DISABLE=1 + export COPY_EXTENDED_ATTRIBUTES_DISABLE=1 + git archive --format=tar --prefix="apache-cloudberry-${TAG}/" "$TAG" | tar -x -C "$TMP_DIR" cp BUILD_NUMBER "$TMP_DIR/apache-cloudberry-${TAG}/" @@ -447,16 +584,66 @@ section "Staging release: $TAG" " fi - tar -czf "$TAR_NAME" -C "$TMP_DIR" "apache-cloudberry-${TAG}" + # Clean up macOS extended attributes if on macOS + if [[ "$DETECTED_PLATFORM" == "macOS" ]]; then + echo "Cleaning macOS extended attributes from extracted files..." + # Remove all extended attributes recursively + if command -v xattr >/dev/null 2>&1; then + find "$TMP_DIR/apache-cloudberry-${TAG}" -type f -exec xattr -c {} \; 2>/dev/null || true + echo "[OK] Extended attributes cleaned using xattr" + fi + + # Remove any ._* files that might have been created + find "$TMP_DIR/apache-cloudberry-${TAG}" -name '._*' -delete 2>/dev/null || true + find "$TMP_DIR/apache-cloudberry-${TAG}" -name '.DS_Store' -delete 2>/dev/null || true + find "$TMP_DIR/apache-cloudberry-${TAG}" -name '__MACOSX' -type d -exec rm -rf {} \; 2>/dev/null || true + echo "[OK] macOS-specific files removed" + fi + + # Create tarball using the detected tar tool + if [[ "$DETECTED_PLATFORM" == "macOS" ]]; then + echo "Using GNU tar for cross-platform compatibility..." + $DETECTED_TAR_TOOL --exclude='._*' --exclude='.DS_Store' --exclude='__MACOSX' -czf "$TAR_NAME" -C "$TMP_DIR" "apache-cloudberry-${TAG}" + echo "INFO: macOS detected - applied extended attribute cleanup and GNU tar" + else + # On other platforms, use standard tar + $DETECTED_TAR_TOOL -czf "$TAR_NAME" -C "$TMP_DIR" "apache-cloudberry-${TAG}" + fi + rm -rf "$TMP_DIR" echo -e "Archive saved to: $TAR_NAME" + + # Verify that no macOS extended attribute files are included + if [[ "$DETECTED_PLATFORM" == "macOS" ]]; then + echo "Verifying tarball does not contain macOS-specific files..." + MACOS_FILES=$($DETECTED_TAR_TOOL -tzf "$TAR_NAME" | grep -E '\._|\.DS_Store|__MACOSX' || true) + if [[ -n "$MACOS_FILES" ]]; then + echo "WARNING: Found macOS-specific files in tarball:" + echo "$MACOS_FILES" + echo "This may cause compilation issues on Linux systems." + else + echo "[OK] Tarball verified clean of macOS-specific files" + fi + + # Additional check for extended attributes in tar headers + echo "Checking for extended attribute headers in tarball..." + if $DETECTED_TAR_TOOL -tvf "$TAR_NAME" 2>&1 | grep -q "LIBARCHIVE.xattr" 2>/dev/null; then + echo "WARNING: Tarball may still contain extended attribute headers" + echo "This could cause 'Ignoring unknown extended header keyword' warnings on Linux" + else + echo "[OK] No extended attribute headers detected in tarball (GNU tar used)" + fi + fi + + confirm_next_step # Generate SHA-512 checksum section "Generating SHA-512 Checksum" echo -e "\nGenerating SHA-512 checksum" - shasum -a 512 "$TAR_NAME" > "${TAR_NAME}.sha512" + sha512sum "$TAR_NAME" > "${TAR_NAME}.sha512" echo "Checksum saved to: ${TAR_NAME}.sha512" + confirm_next_step section "Signing with GPG key: $GPG_USER" # Conditionally generate GPG signature @@ -469,8 +656,10 @@ section "Staging release: $TAG" fi # Move artifacts to top-level artifacts directory - - ARTIFACTS_DIR="$(cd "$(dirname "$REPO_ARG")" && cd .. && pwd)/artifacts" + # At this point, we're always in the cloudberry repository directory + # (either we started there, or we cd'd there via --repo) + ARTIFACTS_DIR="$(cd .. && pwd)/artifacts" + mkdir -p "$ARTIFACTS_DIR" section "Moving Artifacts to $ARTIFACTS_DIR" @@ -479,10 +668,11 @@ section "Staging release: $TAG" mv -vf "$TAR_NAME" "$ARTIFACTS_DIR/" mv -vf "${TAR_NAME}.sha512" "$ARTIFACTS_DIR/" [[ -f "${TAR_NAME}.asc" ]] && mv -vf "${TAR_NAME}.asc" "$ARTIFACTS_DIR/" + confirm_next_step section "Verifying sha512 ($ARTIFACTS_DIR/${TAR_NAME}.sha512) Release Artifact" - cd "$ARTIFACTS_DIR" - sha512sum -c "$ARTIFACTS_DIR/${TAR_NAME}.sha512" + (cd "$ARTIFACTS_DIR" && sha512sum -c "${TAR_NAME}.sha512") + confirm_next_step section "Verifying GPG Signature ($ARTIFACTS_DIR/${TAR_NAME}.asc) Release Artifact" @@ -491,6 +681,7 @@ section "Staging release: $TAG" else echo "INFO: Signature verification skipped (--skip-signing). Signature is only available when generated via this script." fi + confirm_next_step section "Release candidate for $TAG staged successfully" fi diff --git a/devops/sandbox/Dockerfile.RELEASE.rockylinux9 b/devops/sandbox/Dockerfile.RELEASE.rockylinux9 index f9f422f57f6..ac394c6cb60 100644 --- a/devops/sandbox/Dockerfile.RELEASE.rockylinux9 +++ b/devops/sandbox/Dockerfile.RELEASE.rockylinux9 @@ -152,7 +152,7 @@ RUN cd /home/gpadmin/cloudberry && \ --enable-orafce \ --enable-orca \ --enable-pax \ - --enable-pxf \ + --disable-pxf \ --enable-tap-tests \ --with-gssapi \ --with-ldap \ diff --git a/doc/src/sgml/catalogs.sgml b/doc/src/sgml/catalogs.sgml index 27b1f16b6a8..533856b12d7 100644 --- a/doc/src/sgml/catalogs.sgml +++ b/doc/src/sgml/catalogs.sgml @@ -7480,8 +7480,7 @@ SCRAM-SHA-256$<iteration count>:&l is a publicly readable view on pg_statistic_ext_data (after joining with pg_statistic_ext) that only exposes - information about those tables and columns that are readable by the - current user. + information about tables the current user owns. @@ -12925,7 +12924,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx and pg_statistic_ext_data catalogs. This view allows access only to rows of pg_statistic_ext and pg_statistic_ext_data - that correspond to tables the user has permission to read, and therefore + that correspond to tables the user owns, and therefore it is safe to allow public read access to this view. @@ -13125,7 +13124,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx and pg_statistic_ext_data catalogs. This view allows access only to rows of pg_statistic_ext and pg_statistic_ext_data - that correspond to tables the user has permission to read, and therefore + that correspond to tables the user owns, and therefore it is safe to allow public read access to this view. diff --git a/doc/src/sgml/client-auth.sgml b/doc/src/sgml/client-auth.sgml index 02f04891129..eb5e9f48db1 100644 --- a/doc/src/sgml/client-auth.sgml +++ b/doc/src/sgml/client-auth.sgml @@ -417,7 +417,9 @@ hostnogssenc databaseuser Specifies the authentication method to use when a connection matches this record. The possible choices are summarized here; details - are in . + are in . All the options + are lower case and treated case sensitively, so even acronyms like + ldap must be specified as lower case. diff --git a/doc/src/sgml/config.sgml b/doc/src/sgml/config.sgml index bd61286e042..bc3d0d1bd14 100644 --- a/doc/src/sgml/config.sgml +++ b/doc/src/sgml/config.sgml @@ -10458,6 +10458,25 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' + + allow_in_place_tablespaces (boolean) + + allow_in_place_tablespaces configuration parameter + + + + + Allows tablespaces to be created as directories inside + pg_tblspc, when an empty location string + is provided to the CREATE TABLESPACE command. This + is intended to allow testing replication scenarios where primary and + standby servers are running on the same machine. Such directories + are likely to confuse backup tools that expect to find only symbolic + links in that location. Only superusers can change this setting. + + + + allow_system_table_mods (boolean) diff --git a/doc/src/sgml/ddl.sgml b/doc/src/sgml/ddl.sgml index c358bff56d9..c85e92b3a2f 100644 --- a/doc/src/sgml/ddl.sgml +++ b/doc/src/sgml/ddl.sgml @@ -557,7 +557,7 @@ CREATE TABLE products ( tests, it cannot guarantee that the database will not reach a state in which the constraint condition is false (due to subsequent changes of the other row(s) involved). This would cause a database dump and - reload to fail. The reload could fail even when the complete + restore to fail. The restore could fail even when the complete database state is consistent with the constraint, due to rows not being loaded in an order that will satisfy the constraint. If possible, use UNIQUE, EXCLUDE, @@ -569,10 +569,10 @@ CREATE TABLE products ( If what you desire is a one-time check against other rows at row insertion, rather than a continuously-maintained consistency guarantee, a custom trigger can be used - to implement that. (This approach avoids the dump/reload problem because + to implement that. (This approach avoids the dump/restore problem because pg_dump does not reinstall triggers until after - reloading data, so that the check will not be enforced during a - dump/reload.) + restoring data, so that the check will not be enforced during a + dump/restore.) @@ -594,7 +594,7 @@ CREATE TABLE products ( function. PostgreSQL does not disallow that, but it will not notice if there are rows in the table that now violate the CHECK constraint. That would cause a - subsequent database dump and reload to fail. + subsequent database dump and restore to fail. The recommended way to handle such a change is to drop the constraint (using ALTER TABLE), adjust the function definition, and re-add the constraint, thereby rechecking it against all table rows. diff --git a/doc/src/sgml/ecpg.sgml b/doc/src/sgml/ecpg.sgml index 5a2dc4a8ae8..9df09df4d77 100644 --- a/doc/src/sgml/ecpg.sgml +++ b/doc/src/sgml/ecpg.sgml @@ -1890,7 +1890,8 @@ EXEC SQL SELECT b INTO :val :val_ind FROM test1; The indicator variable val_ind will be zero if the value was not null, and it will be negative if the value was - null. + null. (See to enable + Oracle-specific behavior.) @@ -9801,6 +9802,42 @@ risnull(CINTTYPE, (char *) &i); + + <productname>Oracle</productname> Compatibility Mode + + ecpg can be run in a so-called Oracle + compatibility mode. If this mode is active, it tries to + behave as if it were Oracle Pro*C. + + + + Specifically, this mode changes ecpg in three ways: + + + + + Pad character arrays receiving character string types with + trailing spaces to the specified length + + + + + + Zero byte terminate these character arrays, and set the indicator + variable if truncation occurs + + + + + + Set the null indicator to -1 when character + arrays receive empty character string types + + + + + + Internals diff --git a/doc/src/sgml/extend.sgml b/doc/src/sgml/extend.sgml index bb0b2679bbb..8b8ccd9d4c0 100644 --- a/doc/src/sgml/extend.sgml +++ b/doc/src/sgml/extend.sgml @@ -982,7 +982,7 @@ SET LOCAL search_path TO @extschema@, pg_temp; pg_dump. But that behavior is undesirable for a configuration table; any data changes made by the user need to be included in dumps, or the extension will behave differently after a dump - and reload. + and restore. diff --git a/doc/src/sgml/func.sgml b/doc/src/sgml/func.sgml index 8266615ea47..16ad120dd23 100644 --- a/doc/src/sgml/func.sgml +++ b/doc/src/sgml/func.sgml @@ -1840,6 +1840,9 @@ repeat('Pg', 4) PgPgPgPg subsequent random() calls in the current session can be repeated by re-issuing setseed() with the same argument. + Without any prior setseed() call in the same + session, the first random() call obtains a seed + from a platform-dependent source of random bits. @@ -15446,6 +15449,10 @@ table2-mapping json_array_length('[1,2,3,{"f1":1,"f2":[5,6]},4]') 5 + + + jsonb_array_length('[]') + 0 @@ -17887,10 +17894,19 @@ SELECT NULLIF(value, '(none)') ... Returns the length of the requested array dimension. + (Produces NULL instead of 0 for empty or missing array dimensions.) array_length(array[1,2,3], 1) 3 + + + array_length(array[]::int[], 1) + NULL + + + array_length(array['text'], 2) + NULL diff --git a/doc/src/sgml/high-availability.sgml b/doc/src/sgml/high-availability.sgml index a265409f025..eaa6f4b53cc 100644 --- a/doc/src/sgml/high-availability.sgml +++ b/doc/src/sgml/high-availability.sgml @@ -2194,7 +2194,7 @@ HINT: You can then restart the server after making the necessary configuration Currently, temporary table creation is not allowed during read-only transactions, so in some cases existing scripts will not run correctly. This restriction might be relaxed in a later release. This is - both an SQL Standard compliance issue and a technical issue. + both an SQL standard compliance issue and a technical issue. diff --git a/doc/src/sgml/json.sgml b/doc/src/sgml/json.sgml index a173368229b..c421d4ba75a 100644 --- a/doc/src/sgml/json.sgml +++ b/doc/src/sgml/json.sgml @@ -701,10 +701,10 @@ UPDATE table_name SET jsonb_field[2] = '2'; assigned value can be placed. --- Where jsonb_field was {}, it is now {'a': [{'b': 1}]} +-- Where jsonb_field was {}, it is now {"a": [{"b": 1}]} UPDATE table_name SET jsonb_field['a'][0]['b'] = '1'; --- Where jsonb_field was [], it is now [null, {'a': 1}] +-- Where jsonb_field was [], it is now [null, {"a": 1}] UPDATE table_name SET jsonb_field[1]['a'] = '1'; diff --git a/doc/src/sgml/monitoring.sgml b/doc/src/sgml/monitoring.sgml index 9c67c9d1c50..949bba7c768 100644 --- a/doc/src/sgml/monitoring.sgml +++ b/doc/src/sgml/monitoring.sgml @@ -5607,7 +5607,7 @@ SELECT pg_stat_get_backend_pid(s.backendid) AS pid,
- ANALYZE phases + ANALYZE Phases @@ -6537,7 +6537,7 @@ SELECT pg_stat_get_backend_pid(s.backendid) AS pid,
- Base backup phases + Base Backup Phases diff --git a/doc/src/sgml/mvcc.sgml b/doc/src/sgml/mvcc.sgml index 6c94f6a9429..d357799e53b 100644 --- a/doc/src/sgml/mvcc.sgml +++ b/doc/src/sgml/mvcc.sgml @@ -277,9 +277,10 @@ The table also shows that PostgreSQL's Repeatable Read implementation - does not allow phantom reads. Stricter behavior is permitted by the - SQL standard: the four isolation levels only define which phenomena - must not happen, not which phenomena must happen. + does not allow phantom reads. This is acceptable under the SQL + standard because the standard specifies which anomalies must + not occur at certain isolation levels; higher + guarantees are acceptable. The behavior of the available isolation levels is detailed in the following subsections. @@ -874,7 +875,7 @@ ERROR: could not serialize access due to read/write dependencies among transact Table-Level Lock Modes - ACCESS SHARE + ACCESS SHARE (AccessShareLock) @@ -892,7 +893,7 @@ ERROR: could not serialize access due to read/write dependencies among transact - ROW SHARE + ROW SHARE (RowShareLock) @@ -913,7 +914,7 @@ ERROR: could not serialize access due to read/write dependencies among transact - ROW EXCLUSIVE + ROW EXCLUSIVE (RowExclusiveLock) @@ -935,7 +936,7 @@ ERROR: could not serialize access due to read/write dependencies among transact - SHARE UPDATE EXCLUSIVE + SHARE UPDATE EXCLUSIVE (ShareUpdateExclusiveLock) @@ -961,7 +962,7 @@ ERROR: could not serialize access due to read/write dependencies among transact - SHARE + SHARE (ShareLock) @@ -981,7 +982,7 @@ ERROR: could not serialize access due to read/write dependencies among transact - SHARE ROW EXCLUSIVE + SHARE ROW EXCLUSIVE (ShareRowExclusiveLock) @@ -1003,7 +1004,7 @@ ERROR: could not serialize access due to read/write dependencies among transact - EXCLUSIVE + EXCLUSIVE (ExclusiveLock) @@ -1025,7 +1026,7 @@ ERROR: could not serialize access due to read/write dependencies among transact - ACCESS EXCLUSIVE + ACCESS EXCLUSIVE (AccessExclusiveLock) diff --git a/doc/src/sgml/perform.sgml b/doc/src/sgml/perform.sgml index 9cf8ebea808..749d4693744 100644 --- a/doc/src/sgml/perform.sgml +++ b/doc/src/sgml/perform.sgml @@ -1785,7 +1785,7 @@ SELECT * FROM x, y, a, b, c WHERE something AND somethingelse; Dump scripts generated by pg_dump automatically apply - several, but not all, of the above guidelines. To reload a + several, but not all, of the above guidelines. To restore a pg_dump dump as quickly as possible, you need to do a few extra things manually. (Note that these points apply while restoring a dump, not while creating it. diff --git a/doc/src/sgml/plhandler.sgml b/doc/src/sgml/plhandler.sgml index 40ee59de9f3..980c95ecf39 100644 --- a/doc/src/sgml/plhandler.sgml +++ b/doc/src/sgml/plhandler.sgml @@ -156,7 +156,7 @@ attached to a function when check_function_bodies is on. Therefore, checks whose results might be affected by GUC parameters definitely should be skipped when check_function_bodies is - off, to avoid false failures when reloading a dump. + off, to avoid false failures when restoring a dump. diff --git a/doc/src/sgml/plpgsql.sgml b/doc/src/sgml/plpgsql.sgml index 4cd4bcba802..22fa317f7b5 100644 --- a/doc/src/sgml/plpgsql.sgml +++ b/doc/src/sgml/plpgsql.sgml @@ -3767,7 +3767,7 @@ RAISE ; After level if any, - you can write a format + you can specify a format string (which must be a simple string literal, not an expression). The format string specifies the error message text to be reported. The format string can be followed diff --git a/doc/src/sgml/ref/alter_function.sgml b/doc/src/sgml/ref/alter_function.sgml index 3c99b450e0a..ee94c34ae38 100644 --- a/doc/src/sgml/ref/alter_function.sgml +++ b/doc/src/sgml/ref/alter_function.sgml @@ -161,8 +161,10 @@ ALTER FUNCTION name [ ( [ [ extension_name - The name of the extension that the procedure is to depend on. + This form marks the procedure as dependent on the extension, or no longer + dependent on the extension if NO is specified. + A procedure that's marked as dependent on an extension is dropped when the + extension is dropped, even if cascade is not specified. + A procedure can depend upon multiple extensions, and will be dropped when + any one of those extensions is dropped. diff --git a/doc/src/sgml/ref/alter_type.sgml b/doc/src/sgml/ref/alter_type.sgml index 21887e88a0f..146065144f5 100644 --- a/doc/src/sgml/ref/alter_type.sgml +++ b/doc/src/sgml/ref/alter_type.sgml @@ -411,7 +411,7 @@ ALTER TYPE name SET ( since the original creation of the enum type). The slowdown is usually insignificant; but if it matters, optimal performance can be regained by dropping and recreating the enum type, or by dumping and - reloading the database. + restoring the database. diff --git a/doc/src/sgml/ref/create_domain.sgml b/doc/src/sgml/ref/create_domain.sgml index e4b856d630c..82a0b874929 100644 --- a/doc/src/sgml/ref/create_domain.sgml +++ b/doc/src/sgml/ref/create_domain.sgml @@ -234,7 +234,7 @@ INSERT INTO tab (domcol) VALUES ((SELECT domcol FROM tab WHERE false)); function. PostgreSQL does not disallow that, but it will not notice if there are stored values of the domain type that now violate the CHECK constraint. That would cause a - subsequent database dump and reload to fail. The recommended way to + subsequent database dump and restore to fail. The recommended way to handle such a change is to drop the constraint (using ALTER DOMAIN), adjust the function definition, and re-add the constraint, thereby rechecking it against stored data. diff --git a/doc/src/sgml/ref/drop_extension.sgml b/doc/src/sgml/ref/drop_extension.sgml index 5e507dec928..c01ddace84c 100644 --- a/doc/src/sgml/ref/drop_extension.sgml +++ b/doc/src/sgml/ref/drop_extension.sgml @@ -30,7 +30,9 @@ DROP EXTENSION [ IF EXISTS ] name [ DROP EXTENSION removes extensions from the database. - Dropping an extension causes its component objects to be dropped as well. + Dropping an extension causes its component objects, and other explicitly + dependent routines (see , + the depends on extension action), to be dropped as well. @@ -77,9 +79,9 @@ DROP EXTENSION [ IF EXISTS ] name [ RESTRICT - Refuse to drop the extension if any objects depend on it (other than - its own member objects and other extensions listed in the same - DROP command). This is the default. + This option prevents the specified extensions from being dropped + if there exists non-extension-member objects that depends on any + the extensions. This is the default. diff --git a/doc/src/sgml/ref/insert.sgml b/doc/src/sgml/ref/insert.sgml index 2973b72b815..c3f49f73980 100644 --- a/doc/src/sgml/ref/insert.sgml +++ b/doc/src/sgml/ref/insert.sgml @@ -75,6 +75,11 @@ INSERT INTO table_name [ AS + INSERT into tables that lack unique indexes will + not be blocked by concurrent activity. Tables with unique indexes + might block if concurrent sessions perform actions that lock or modify + rows matching the unique index values being inserted; the details + are covered in . ON CONFLICT can be used to specify an alternative action to raising a unique constraint or exclusion constraint violation error. (See below.) @@ -176,7 +181,7 @@ INSERT INTO table_name [ AS ON CONFLICT DO UPDATE targets a table named excluded, since that will otherwise - be taken as the name of the special table representing rows proposed + be taken as the name of the special table representing the row proposed for insertion. @@ -396,7 +401,7 @@ INSERT INTO table_name [ AS SET and WHERE clauses in ON CONFLICT DO UPDATE have access to the existing row using the - table's name (or an alias), and to rows proposed for insertion + table's name (or an alias), and to the row proposed for insertion using the special excluded table. SELECT privilege is required on any column in the target table where corresponding excluded diff --git a/doc/src/sgml/ref/pg_dump.sgml b/doc/src/sgml/ref/pg_dump.sgml index ca6ff8cdc65..956f97e2537 100644 --- a/doc/src/sgml/ref/pg_dump.sgml +++ b/doc/src/sgml/ref/pg_dump.sgml @@ -371,9 +371,9 @@ PostgreSQL documentation Requesting exclusive locks on database objects while running a parallel dump could - cause the dump to fail. The reason is that the pg_dump coordinator process - requests shared locks on the objects that the worker processes are going to dump later - in order to + cause the dump to fail. The reason is that the pg_dump leader process + requests shared locks (ACCESS SHARE) on the + objects that the worker processes are going to dump later in order to make sure that nobody deletes them and makes them go away while the dump is running. If another client then requests an exclusive lock on a table, that lock will not be granted but will be queued waiting for the shared lock of the coordinator process to be @@ -694,7 +694,7 @@ PostgreSQL documentation ...). This will make restoration very slow; it is mainly useful for making dumps that can be loaded into non-PostgreSQL databases. - Any error during reloading will cause only rows that are part of the + Any error during restoring will cause only rows that are part of the problematic INSERT to be lost, rather than the entire table contents. @@ -718,9 +718,9 @@ PostgreSQL documentation This option is relevant only when creating a data-only dump. It instructs pg_dump to include commands to temporarily disable triggers on the target tables while - the data is reloaded. Use this if you have referential + the data is restored. Use this if you have referential integrity checks or other triggers on the tables that you - do not want to invoke during data reload. + do not want to invoke during data restore. @@ -838,7 +838,7 @@ PostgreSQL documentation than COPY). This will make restoration very slow; it is mainly useful for making dumps that can be loaded into non-PostgreSQL databases. - Any error during reloading will cause only rows that are part of the + Any error during restoring will cause only rows that are part of the problematic INSERT to be lost, rather than the entire table contents. Note that the restore might fail altogether if you have rearranged column order. The @@ -857,12 +857,22 @@ PostgreSQL documentation target the root of the partitioning hierarchy that contains it, rather than the partition itself. This causes the appropriate partition to be re-determined for each row when the data is loaded. This may be - useful when reloading data on a server where rows do not always fall + useful when restoring data on a server where rows do not always fall into the same partitions as they did on the original server. That could happen, for example, if the partitioning column is of type text and the two systems have different definitions of the collation used to sort the partitioning column. + + + It is best not to use parallelism when restoring from an archive made + with this option, because pg_restore will + not know exactly which partition(s) a given archive data item will + load data into. This could result in inefficiency due to lock + conflicts between parallel jobs, or perhaps even restore failures due + to foreign key constraints being set up before all the relevant data + is loaded. + @@ -1021,7 +1031,7 @@ PostgreSQL documentation Dump data as INSERT commands (rather than COPY). Controls the maximum number of rows per INSERT command. The value specified must be a - number greater than zero. Any error during reloading will cause only + number greater than zero. Any error during restoring will cause only rows that are part of the problematic INSERT to be lost, rather than the entire table contents. diff --git a/doc/src/sgml/ref/pg_dumpall.sgml b/doc/src/sgml/ref/pg_dumpall.sgml index 5bde886c453..ae632f739cd 100644 --- a/doc/src/sgml/ref/pg_dumpall.sgml +++ b/doc/src/sgml/ref/pg_dumpall.sgml @@ -310,9 +310,9 @@ PostgreSQL documentation This option is relevant only when creating a data-only dump. It instructs pg_dumpall to include commands to temporarily disable triggers on the target tables while - the data is reloaded. Use this if you have referential + the data is restored. Use this if you have referential integrity checks or other triggers on the tables that you - do not want to invoke during data reload. + do not want to invoke during data restore. @@ -389,7 +389,7 @@ PostgreSQL documentation target the root of the partitioning hierarchy that contains it, rather than the partition itself. This causes the appropriate partition to be re-determined for each row when the data is loaded. This may be - useful when reloading data on a server where rows do not always fall + useful when restoring data on a server where rows do not always fall into the same partitions as they did on the original server. That could happen, for example, if the partitioning column is of type text and the two systems have different definitions of the collation used @@ -549,7 +549,7 @@ PostgreSQL documentation Dump data as INSERT commands (rather than COPY). Controls the maximum number of rows per INSERT command. The value specified must be a - number greater than zero. Any error during reloading will cause only + number greater than zero. Any error during restoring will cause only rows that are part of the problematic INSERT to be lost, rather than the entire table contents. @@ -824,7 +824,7 @@ PostgreSQL documentation - To reload database(s) from this file, you can use: + To restore database(s) from this file, you can use: $ psql -f db.out postgres diff --git a/doc/src/sgml/ref/pg_resetwal.sgml b/doc/src/sgml/ref/pg_resetwal.sgml index 3e4882cdc65..fd539f56043 100644 --- a/doc/src/sgml/ref/pg_resetwal.sgml +++ b/doc/src/sgml/ref/pg_resetwal.sgml @@ -55,7 +55,7 @@ PostgreSQL documentation After running this command, it should be possible to start the server, but bear in mind that the database might contain inconsistent data due to partially-committed transactions. You should immediately dump your data, - run initdb, and reload. After reload, check for + run initdb, and restore. After restore, check for inconsistencies and repair as needed. @@ -78,7 +78,7 @@ PostgreSQL documentation discussed below. If you are not able to determine correct values for all these fields, can still be used, but the recovered database must be treated with even more suspicion than - usual: an immediate dump and reload is imperative. Do not + usual: an immediate dump and restore is imperative. Do not execute any data-modifying operations in the database before you dump, as any such action is likely to make the corruption worse. diff --git a/doc/src/sgml/ref/pg_restore.sgml b/doc/src/sgml/ref/pg_restore.sgml index 93ea937ac8e..1b56a4afb36 100644 --- a/doc/src/sgml/ref/pg_restore.sgml +++ b/doc/src/sgml/ref/pg_restore.sgml @@ -538,9 +538,9 @@ PostgreSQL documentation This option is relevant only when performing a data-only restore. It instructs pg_restore to execute commands to temporarily disable triggers on the target tables while - the data is reloaded. Use this if you have referential + the data is restored. Use this if you have referential integrity checks or other triggers on the tables that you - do not want to invoke during data reload. + do not want to invoke during data restore. @@ -958,7 +958,7 @@ CREATE DATABASE foo WITH TEMPLATE template0; - To reload the dump into a new database called newdb: + To restore the dump into a new database called newdb: $ createdb -T template0 newdb diff --git a/doc/src/sgml/ref/pgupgrade.sgml b/doc/src/sgml/ref/pgupgrade.sgml index f21563fb5b9..6069063b481 100644 --- a/doc/src/sgml/ref/pgupgrade.sgml +++ b/doc/src/sgml/ref/pgupgrade.sgml @@ -39,7 +39,7 @@ PostgreSQL documentation pg_upgrade (formerly called pg_migrator) allows data stored in PostgreSQL data files to be upgraded to a later PostgreSQL - major version without the data dump/reload typically required for + major version without the data dump/restore typically required for major version upgrades, e.g., from 9.5.8 to 9.6.4 or from 10.7 to 11.2. It is not required for minor version upgrades, e.g., from 9.6.2 to 9.6.3 or from 10.1 to 10.2. @@ -415,7 +415,7 @@ NET STOP postgresql-&majorversion; The option allows multiple CPU cores to be used - for copying/linking of files and to dump and reload database schemas + for copying/linking of files and to dump and restore database schemas in parallel; a good place to start is the maximum of the number of CPU cores and tablespaces. This option can dramatically reduce the time to upgrade a multi-database server running on a multiprocessor @@ -614,7 +614,8 @@ rsync --archive --delete --hard-links --size-only --no-inc-recursive /vol1/pg_tb Configure the servers for log shipping. (You do not need to run pg_start_backup() and pg_stop_backup() or take a file system backup as the standbys are still synchronized - with the primary.) + with the primary.) Replication slots are not copied and must + be recreated. diff --git a/doc/src/sgml/ref/release_savepoint.sgml b/doc/src/sgml/ref/release_savepoint.sgml index 39665d28efa..daf8eb9a436 100644 --- a/doc/src/sgml/ref/release_savepoint.sgml +++ b/doc/src/sgml/ref/release_savepoint.sgml @@ -82,8 +82,9 @@ RELEASE [ SAVEPOINT ] savepoint_name - If multiple savepoints have the same name, only the one that was most - recently defined is released. + If multiple savepoints have the same name, only the most recently defined + unreleased one is released. Repeated commands will release progressively + older savepoints. diff --git a/doc/src/sgml/ref/savepoint.sgml b/doc/src/sgml/ref/savepoint.sgml index b17342a1ee6..f84ac3d167f 100644 --- a/doc/src/sgml/ref/savepoint.sgml +++ b/doc/src/sgml/ref/savepoint.sgml @@ -53,7 +53,9 @@ SAVEPOINT savepoint_name savepoint_name - The name to give to the new savepoint. + The name to give to the new savepoint. If savepoints with the + same name already exist, they will be inaccessible until newer + identically-named savepoints are released. @@ -106,6 +108,32 @@ COMMIT; The above transaction will insert both 3 and 4. + + + To use a single savepoint name: + +BEGIN; + INSERT INTO table1 VALUES (1); + SAVEPOINT my_savepoint; + INSERT INTO table1 VALUES (2); + SAVEPOINT my_savepoint; + INSERT INTO table1 VALUES (3); + + -- rollback to the second savepoint + ROLLBACK TO SAVEPOINT my_savepoint; + SELECT * FROM table1; -- shows rows 1 and 2 + + -- release the second savepoint + RELEASE SAVEPOINT my_savepoint; + + -- rollback to the first savepoint + ROLLBACK TO SAVEPOINT my_savepoint; + SELECT * FROM table1; -- shows only row 1 +COMMIT; + + The above transaction shows row 3 being rolled back first, then row 2. + + diff --git a/doc/src/sgml/ref/set.sgml b/doc/src/sgml/ref/set.sgml index 339ee9eec94..c4aab56a2d3 100644 --- a/doc/src/sgml/ref/set.sgml +++ b/doc/src/sgml/ref/set.sgml @@ -175,8 +175,7 @@ SET [ SESSION | LOCAL ] TIME ZONE { timezone Sets the internal seed for the random number generator (the function random). Allowed values are - floating-point numbers between -1 and 1, which are then - multiplied by 231-1. + floating-point numbers between -1 and 1 inclusive. diff --git a/doc/src/sgml/runtime.sgml b/doc/src/sgml/runtime.sgml index cf2630c3fc3..375644059db 100644 --- a/doc/src/sgml/runtime.sgml +++ b/doc/src/sgml/runtime.sgml @@ -1678,7 +1678,7 @@ $ kill -INT `head -1 /usr/local/pgsql/data/postmaster.pid`major releases of PostgreSQL, the internal data storage format is subject to change, thus complicating upgrades. The traditional method for moving data to a new major version - is to dump and reload the database, though this can be slow. A + is to dump and restore the database, though this can be slow. A faster method is . Replication methods are also available, as discussed below. (If you are using a pre-packaged version @@ -1764,7 +1764,7 @@ $ kill -INT `head -1 /usr/local/pgsql/data/postmaster.pid` One upgrade method is to dump data from one major version of - PostgreSQL and reload it in another — to do + PostgreSQL and restore it in another — to do this, you must use a logical backup tool like pg_dumpall; file system level backup methods will not work. (There are checks in place that prevent diff --git a/doc/src/sgml/textsearch.sgml b/doc/src/sgml/textsearch.sgml index 6afaf9e62c4..fbe049f0636 100644 --- a/doc/src/sgml/textsearch.sgml +++ b/doc/src/sgml/textsearch.sgml @@ -1974,7 +1974,7 @@ CREATE TRIGGER tsvectorupdate BEFORE INSERT OR UPDATE explicitly when creating tsvector values inside triggers, so that the column's contents will not be affected by changes to default_text_search_config. Failure to do this is likely to - lead to problems such as search results changing after a dump and reload. + lead to problems such as search results changing after a dump and restore. diff --git a/gpcontrib/gp_exttable_fdw/input/gp_exttable_fdw.source b/gpcontrib/gp_exttable_fdw/input/gp_exttable_fdw.source index 033dddd5215..41012e73c81 100644 --- a/gpcontrib/gp_exttable_fdw/input/gp_exttable_fdw.source +++ b/gpcontrib/gp_exttable_fdw/input/gp_exttable_fdw.source @@ -72,7 +72,7 @@ SELECT * FROM tableless_ext_fdw; -- When using CREATE FOREIGN TABLE syntax, '|' and '\' need to be escaped as '|' --> '\|' and '|' --> '\\'. CREATE FOREIGN TABLE ext_special_uri(a int, b int) SERVER gp_exttable_server -OPTIONS (format 'csv', delimiter ',', +OPTIONS (format_type 'c', delimiter ',', location_uris 'file://@hostname@@abs_srcdir@/data/spe\\cial1\||file://@hostname@@abs_srcdir@/data/\|special2\\'); \a SELECT urilocation FROM pg_exttable WHERE reloid = 'public.ext_special_uri'::regclass; diff --git a/gpcontrib/gp_exttable_fdw/output/gp_exttable_fdw.source b/gpcontrib/gp_exttable_fdw/output/gp_exttable_fdw.source index 337d21a99e8..a3191eb0853 100644 --- a/gpcontrib/gp_exttable_fdw/output/gp_exttable_fdw.source +++ b/gpcontrib/gp_exttable_fdw/output/gp_exttable_fdw.source @@ -76,7 +76,7 @@ SELECT * FROM tableless_ext_fdw; -- When using CREATE FOREIGN TABLE syntax, '|' and '\' need to be escaped as '|' --> '\|' and '|' --> '\\'. CREATE FOREIGN TABLE ext_special_uri(a int, b int) SERVER gp_exttable_server -OPTIONS (format 'csv', delimiter ',', +OPTIONS (format_type 'c', delimiter ',', location_uris 'file://@hostname@@abs_srcdir@/data/spe\\cial1\||file://@hostname@@abs_srcdir@/data/\|special2\\'); \a SELECT urilocation FROM pg_exttable WHERE reloid = 'public.ext_special_uri'::regclass; @@ -85,7 +85,7 @@ urilocation (1 row) SELECT ftoptions FROM pg_foreign_table WHERE ftrelid='public.ext_special_uri'::regclass; ftoptions -{format=csv,"delimiter=,","location_uris=file://@hostname@@abs_srcdir@/data/spe\\\\cial1\\||file://@hostname@@abs_srcdir@/data/\\|special2\\\\"} +{format_type=c,"delimiter=,","location_uris=file://@hostname@@abs_srcdir@/data/spe\\\\cial1\\||file://@hostname@@abs_srcdir@/data/\\|special2\\\\"} (1 row) \a SELECT * FROM ext_special_uri ORDER BY a; diff --git a/gpcontrib/orafce/others.c b/gpcontrib/orafce/others.c index 2fb612efe19..5bf8b650e4c 100644 --- a/gpcontrib/orafce/others.c +++ b/gpcontrib/orafce/others.c @@ -45,6 +45,7 @@ #include "utils/uuid.h" #include "orafce.h" #include "builtins.h" +#include "common/mdb_locale.h" /* * Source code for nlssort is taken from postgresql-nls-string @@ -322,7 +323,7 @@ _nls_run_strxfrm(text *string, text *locale) */ if (!lc_collate_cache) { - if ((lc_collate_cache = setlocale(LC_COLLATE, NULL))) + if ((lc_collate_cache = SETLOCALE(LC_COLLATE, NULL))) /* Make a copy of the locale name string. */ #ifdef _MSC_VER lc_collate_cache = _strdup(lc_collate_cache); @@ -364,7 +365,7 @@ _nls_run_strxfrm(text *string, text *locale) * If setlocale failed, we know the default stayed the same, * co we can safely elog. */ - if (!setlocale(LC_COLLATE, locale_str)) + if (!SETLOCALE(LC_COLLATE, locale_str)) elog(ERROR, "failed to set the requested LC_COLLATE value [%s]", locale_str); changed_locale = true; @@ -409,7 +410,7 @@ _nls_run_strxfrm(text *string, text *locale) /* * Set original locale */ - if (!setlocale(LC_COLLATE, lc_collate_cache)) + if (!SETLOCALE(LC_COLLATE, lc_collate_cache)) elog(FATAL, "failed to set back the default LC_COLLATE value [%s]", lc_collate_cache); } @@ -422,7 +423,7 @@ _nls_run_strxfrm(text *string, text *locale) /* * Set original locale */ - if (!setlocale(LC_COLLATE, lc_collate_cache)) + if (!SETLOCALE(LC_COLLATE, lc_collate_cache)) elog(FATAL, "failed to set back the default LC_COLLATE value [%s]", lc_collate_cache); pfree(locale_str); } diff --git a/src/backend/access/appendonly/appendonlyam.c b/src/backend/access/appendonly/appendonlyam.c index b5f6a17a597..defa5ccc80e 100755 --- a/src/backend/access/appendonly/appendonlyam.c +++ b/src/backend/access/appendonly/appendonlyam.c @@ -1983,7 +1983,7 @@ appendonly_endscan(TableScanDesc scan) static pg_attribute_hot_inline bool appendonly_getnextslot_noqual(AppendOnlyScanDesc aoscan, ScanDirection direction, TupleTableSlot *slot) { - while (appendonlygettup(aoscan, direction, aoscan->rs_base.rs_nkeys, aoscan->aos_key, slot)) + if (appendonlygettup(aoscan, direction, aoscan->rs_base.rs_nkeys, aoscan->aos_key, slot)) { pgstat_count_heap_getnext(aoscan->aos_rd); return true; diff --git a/src/backend/access/transam/transam.c b/src/backend/access/transam/transam.c index 1c881550b65..f7c8e1b3466 100644 --- a/src/backend/access/transam/transam.c +++ b/src/backend/access/transam/transam.c @@ -249,10 +249,15 @@ TransactionIdDidAbortForReader(TransactionId transactionId) * * This does NOT look into pg_xact but merely probes our local cache * (and so it's not named TransactionIdDidComplete, which would be the - * appropriate name for a function that worked that way). The intended - * use is just to short-circuit TransactionIdIsInProgress calls when doing - * repeated heapam_visibility.c checks for the same XID. If this isn't - * extremely fast then it will be counterproductive. + * appropriate name for a function that worked that way). + * + * NB: This is unused, and will be removed in v15. This was used to + * short-circuit TransactionIdIsInProgress, but that was wrong for a + * transaction that was known to be marked as committed in CLOG but not + * yet removed from the proc array. This is kept in backbranches just in + * case it is still used by extensions. However, extensions doing + * something similar to tuple visibility checks should also be careful to + * check the proc array first! * * Note: * Assumes transaction identifier is valid. diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c index cff69879aa1..07831e9b098 100644 --- a/src/backend/access/transam/xlog.c +++ b/src/backend/access/transam/xlog.c @@ -11918,6 +11918,14 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p, snprintf(fullpath, sizeof(fullpath), "pg_tblspc/%s", de->d_name); + /* + * Skip anything that isn't a symlink/junction. For testing only, + * we sometimes use allow_in_place_tablespaces to create + * directories directly under pg_tblspc, which would fail below. + */ + if (get_dirent_type(fullpath, de, false, ERROR) != PGFILETYPE_LNK) + continue; + #if defined(HAVE_READLINK) || defined(WIN32) rllen = readlink(fullpath, linkpath, sizeof(linkpath)); if (rllen < 0) diff --git a/src/backend/catalog/Makefile b/src/backend/catalog/Makefile index 8a58b8e5897..b95f92f2e6e 100644 --- a/src/backend/catalog/Makefile +++ b/src/backend/catalog/Makefile @@ -200,6 +200,7 @@ endif $(INSTALL_DATA) $(srcdir)/information_schema.sql '$(DESTDIR)$(datadir)/information_schema.sql' $(INSTALL_DATA) $(call vpathsearch,cdb_schema.sql) '$(DESTDIR)$(datadir)/cdb_init.d/cdb_schema.sql' $(INSTALL_DATA) $(srcdir)/sql_features.txt '$(DESTDIR)$(datadir)/sql_features.txt' + $(INSTALL_DATA) $(srcdir)/fix-CVE-2024-4317.sql '$(DESTDIR)$(datadir)/fix-CVE-2024-4317.sql' installdirs: $(MKDIR_P) '$(DESTDIR)$(datadir)' @@ -207,7 +208,7 @@ installdirs: .PHONY: uninstall-data uninstall-data: - rm -f $(addprefix '$(DESTDIR)$(datadir)'/, postgres.bki system_constraints.sql system_functions.sql system_views.sql information_schema.sql cdb_init.d/cdb_schema.sql cdb_init.d/gp_toolkit.sql sql_features.txt) + rm -f $(addprefix '$(DESTDIR)$(datadir)'/, postgres.bki system_constraints.sql system_functions.sql system_views.sql information_schema.sql cdb_init.d/cdb_schema.sql cdb_init.d/gp_toolkit.sql sql_features.txt fix-CVE-2024-4317.sql) ifeq ($(USE_INTERNAL_FTS_FOUND), false) rm -f $(addprefix '$(DESTDIR)$(datadir)'/, external_fts.sql) endif diff --git a/src/backend/catalog/dependency.c b/src/backend/catalog/dependency.c index 6c38ca470f6..39994474faf 100644 --- a/src/backend/catalog/dependency.c +++ b/src/backend/catalog/dependency.c @@ -1972,6 +1972,13 @@ find_expr_references_walker(Node *node, add_object_address(OCLASS_TYPE, objoid, 0, context->addrs); break; + case REGCOLLATIONOID: + objoid = DatumGetObjectId(con->constvalue); + if (SearchSysCacheExists1(COLLOID, + ObjectIdGetDatum(objoid))) + add_object_address(OCLASS_COLLATION, objoid, 0, + context->addrs); + break; case REGCONFIGOID: objoid = DatumGetObjectId(con->constvalue); if (SearchSysCacheExists1(TSCONFIGOID, diff --git a/src/backend/catalog/fix-CVE-2024-4317.sql b/src/backend/catalog/fix-CVE-2024-4317.sql new file mode 100644 index 00000000000..9e78c44c410 --- /dev/null +++ b/src/backend/catalog/fix-CVE-2024-4317.sql @@ -0,0 +1,115 @@ +/* + * fix-CVE-2024-4317.sql + * + * Copyright (c) 2024, PostgreSQL Global Development Group + * + * src/backend/catalog/fix-CVE-2024-4317.sql + * + * This file should be run in every database in the cluster to address + * CVE-2024-4317. + */ + +SET search_path = pg_catalog; + +CREATE OR REPLACE VIEW pg_stats_ext WITH (security_barrier) AS + SELECT cn.nspname AS schemaname, + c.relname AS tablename, + sn.nspname AS statistics_schemaname, + s.stxname AS statistics_name, + pg_get_userbyid(s.stxowner) AS statistics_owner, + ( SELECT array_agg(a.attname ORDER BY a.attnum) + FROM unnest(s.stxkeys) k + JOIN pg_attribute a + ON (a.attrelid = s.stxrelid AND a.attnum = k) + ) AS attnames, + pg_get_statisticsobjdef_expressions(s.oid) as exprs, + s.stxkind AS kinds, + sd.stxdndistinct AS n_distinct, + sd.stxddependencies AS dependencies, + m.most_common_vals, + m.most_common_val_nulls, + m.most_common_freqs, + m.most_common_base_freqs + FROM pg_statistic_ext s JOIN pg_class c ON (c.oid = s.stxrelid) + JOIN pg_statistic_ext_data sd ON (s.oid = sd.stxoid) + LEFT JOIN pg_namespace cn ON (cn.oid = c.relnamespace) + LEFT JOIN pg_namespace sn ON (sn.oid = s.stxnamespace) + LEFT JOIN LATERAL + ( SELECT array_agg(values) AS most_common_vals, + array_agg(nulls) AS most_common_val_nulls, + array_agg(frequency) AS most_common_freqs, + array_agg(base_frequency) AS most_common_base_freqs + FROM pg_mcv_list_items(sd.stxdmcv) + ) m ON sd.stxdmcv IS NOT NULL + WHERE pg_has_role(c.relowner, 'USAGE') + AND (c.relrowsecurity = false OR NOT row_security_active(c.oid)); + +CREATE OR REPLACE VIEW pg_stats_ext_exprs WITH (security_barrier) AS + SELECT cn.nspname AS schemaname, + c.relname AS tablename, + sn.nspname AS statistics_schemaname, + s.stxname AS statistics_name, + pg_get_userbyid(s.stxowner) AS statistics_owner, + stat.expr, + (stat.a).stanullfrac AS null_frac, + (stat.a).stawidth AS avg_width, + (stat.a).stadistinct AS n_distinct, + (CASE + WHEN (stat.a).stakind1 = 1 THEN (stat.a).stavalues1 + WHEN (stat.a).stakind2 = 1 THEN (stat.a).stavalues2 + WHEN (stat.a).stakind3 = 1 THEN (stat.a).stavalues3 + WHEN (stat.a).stakind4 = 1 THEN (stat.a).stavalues4 + WHEN (stat.a).stakind5 = 1 THEN (stat.a).stavalues5 + END) AS most_common_vals, + (CASE + WHEN (stat.a).stakind1 = 1 THEN (stat.a).stanumbers1 + WHEN (stat.a).stakind2 = 1 THEN (stat.a).stanumbers2 + WHEN (stat.a).stakind3 = 1 THEN (stat.a).stanumbers3 + WHEN (stat.a).stakind4 = 1 THEN (stat.a).stanumbers4 + WHEN (stat.a).stakind5 = 1 THEN (stat.a).stanumbers5 + END) AS most_common_freqs, + (CASE + WHEN (stat.a).stakind1 = 2 THEN (stat.a).stavalues1 + WHEN (stat.a).stakind2 = 2 THEN (stat.a).stavalues2 + WHEN (stat.a).stakind3 = 2 THEN (stat.a).stavalues3 + WHEN (stat.a).stakind4 = 2 THEN (stat.a).stavalues4 + WHEN (stat.a).stakind5 = 2 THEN (stat.a).stavalues5 + END) AS histogram_bounds, + (CASE + WHEN (stat.a).stakind1 = 3 THEN (stat.a).stanumbers1[1] + WHEN (stat.a).stakind2 = 3 THEN (stat.a).stanumbers2[1] + WHEN (stat.a).stakind3 = 3 THEN (stat.a).stanumbers3[1] + WHEN (stat.a).stakind4 = 3 THEN (stat.a).stanumbers4[1] + WHEN (stat.a).stakind5 = 3 THEN (stat.a).stanumbers5[1] + END) correlation, + (CASE + WHEN (stat.a).stakind1 = 4 THEN (stat.a).stavalues1 + WHEN (stat.a).stakind2 = 4 THEN (stat.a).stavalues2 + WHEN (stat.a).stakind3 = 4 THEN (stat.a).stavalues3 + WHEN (stat.a).stakind4 = 4 THEN (stat.a).stavalues4 + WHEN (stat.a).stakind5 = 4 THEN (stat.a).stavalues5 + END) AS most_common_elems, + (CASE + WHEN (stat.a).stakind1 = 4 THEN (stat.a).stanumbers1 + WHEN (stat.a).stakind2 = 4 THEN (stat.a).stanumbers2 + WHEN (stat.a).stakind3 = 4 THEN (stat.a).stanumbers3 + WHEN (stat.a).stakind4 = 4 THEN (stat.a).stanumbers4 + WHEN (stat.a).stakind5 = 4 THEN (stat.a).stanumbers5 + END) AS most_common_elem_freqs, + (CASE + WHEN (stat.a).stakind1 = 5 THEN (stat.a).stanumbers1 + WHEN (stat.a).stakind2 = 5 THEN (stat.a).stanumbers2 + WHEN (stat.a).stakind3 = 5 THEN (stat.a).stanumbers3 + WHEN (stat.a).stakind4 = 5 THEN (stat.a).stanumbers4 + WHEN (stat.a).stakind5 = 5 THEN (stat.a).stanumbers5 + END) AS elem_count_histogram + FROM pg_statistic_ext s JOIN pg_class c ON (c.oid = s.stxrelid) + LEFT JOIN pg_statistic_ext_data sd ON (s.oid = sd.stxoid) + LEFT JOIN pg_namespace cn ON (cn.oid = c.relnamespace) + LEFT JOIN pg_namespace sn ON (sn.oid = s.stxnamespace) + JOIN LATERAL ( + SELECT unnest(pg_get_statisticsobjdef_expressions(s.oid)) AS expr, + unnest(sd.stxdexpr)::pg_statistic AS a + ) stat ON (stat.expr IS NOT NULL) + WHERE pg_has_role(c.relowner, 'USAGE') + AND (c.relrowsecurity = false OR NOT row_security_active(c.oid)); diff --git a/src/backend/catalog/namespace.c b/src/backend/catalog/namespace.c index f367b00a675..be09847022b 100644 --- a/src/backend/catalog/namespace.c +++ b/src/backend/catalog/namespace.c @@ -2971,7 +2971,6 @@ LookupExplicitNamespace(const char *nspname, bool missing_ok) { Oid namespaceId; AclResult aclresult; - /* check for pg_temp alias */ if (strcmp(nspname, "pg_temp") == 0) { @@ -2989,7 +2988,24 @@ LookupExplicitNamespace(const char *nspname, bool missing_ok) if (missing_ok && !OidIsValid(namespaceId)) return InvalidOid; - aclresult = pg_namespace_aclcheck(namespaceId, GetUserId(), ACL_USAGE); + HeapTuple tuple; + Oid ownerId; + + tuple = SearchSysCache1(NAMESPACEOID, ObjectIdGetDatum(namespaceId)); + if (!HeapTupleIsValid(tuple)) + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_SCHEMA), + errmsg("schema with OID %u does not exist", namespaceId))); + + ownerId = ((Form_pg_namespace) GETSTRUCT(tuple))->nspowner; + + ReleaseSysCache(tuple); + + if (!mdb_admin_allow_bypass_owner_checks(GetUserId(), ownerId)) { + aclresult = pg_namespace_aclcheck(namespaceId, GetUserId(), ACL_USAGE); + } else { + aclresult = ACLCHECK_OK; + } if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_SCHEMA, nspname); diff --git a/src/backend/catalog/system_views.sql b/src/backend/catalog/system_views.sql index 4cb9a9b57d6..86e938a3b87 100644 --- a/src/backend/catalog/system_views.sql +++ b/src/backend/catalog/system_views.sql @@ -310,12 +310,7 @@ CREATE VIEW pg_stats_ext WITH (security_barrier) AS array_agg(base_frequency) AS most_common_base_freqs FROM pg_mcv_list_items(sd.stxdmcv) ) m ON sd.stxdmcv IS NOT NULL - WHERE NOT EXISTS - ( SELECT 1 - FROM unnest(stxkeys) k - JOIN pg_attribute a - ON (a.attrelid = s.stxrelid AND a.attnum = k) - WHERE NOT has_column_privilege(c.oid, a.attnum, 'select') ) + WHERE pg_has_role(c.relowner, 'USAGE') AND (c.relrowsecurity = false OR NOT row_security_active(c.oid)); CREATE VIEW pg_stats_ext_exprs WITH (security_barrier) AS @@ -384,7 +379,9 @@ CREATE VIEW pg_stats_ext_exprs WITH (security_barrier) AS JOIN LATERAL ( SELECT unnest(pg_get_statisticsobjdef_expressions(s.oid)) AS expr, unnest(sd.stxdexpr)::pg_statistic AS a - ) stat ON (stat.expr IS NOT NULL); + ) stat ON (stat.expr IS NOT NULL) + WHERE pg_has_role(c.relowner, 'USAGE') + AND (c.relrowsecurity = false OR NOT row_security_active(c.oid)); -- unprivileged users may read pg_statistic_ext but not pg_statistic_ext_data REVOKE ALL ON pg_statistic_ext_data FROM public; diff --git a/src/backend/commands/alter.c b/src/backend/commands/alter.c index f5dfd6ff126..6f370a2c9aa 100644 --- a/src/backend/commands/alter.c +++ b/src/backend/commands/alter.c @@ -1085,7 +1085,8 @@ AlterObjectOwner_internal(Relation rel, Oid objectId, Oid new_ownerId) if (!superuser()) { /* must be owner */ - if (!has_privs_of_role(GetUserId(), old_ownerId)) + if (!has_privs_of_role(GetUserId(), old_ownerId) + && !mdb_admin_allow_bypass_owner_checks(GetUserId(), old_ownerId)) { char *objname; char namebuf[NAMEDATALEN]; @@ -1105,14 +1106,13 @@ AlterObjectOwner_internal(Relation rel, Oid objectId, Oid new_ownerId) aclcheck_error(ACLCHECK_NOT_OWNER, get_object_type(classId, objectId), objname); } - /* Must be able to become new owner */ - check_is_member_of_role(GetUserId(), new_ownerId); + + check_mdb_admin_is_member_of_role(GetUserId(), new_ownerId); /* New owner must have CREATE privilege on namespace */ if (OidIsValid(namespaceId)) { AclResult aclresult; - aclresult = pg_namespace_aclcheck(namespaceId, new_ownerId, ACL_CREATE); if (aclresult != ACLCHECK_OK) diff --git a/src/backend/commands/analyze.c b/src/backend/commands/analyze.c index 0b6261dc61f..00fc25b2439 100644 --- a/src/backend/commands/analyze.c +++ b/src/backend/commands/analyze.c @@ -192,7 +192,7 @@ static int gp_acquire_sample_rows_func(Relation onerel, int elevel, static BlockNumber acquire_index_number_of_blocks(Relation indexrel, Relation tablerel); static void gp_acquire_correlations_dispatcher(Oid relOid, bool inh, float4 *correlations, bool *correlationsIsNull); -static int compare_rows(const void *a, const void *b); +static int compare_rows(const void *a, const void *b, void *arg); static int acquire_inherited_sample_rows(Relation onerel, int elevel, HeapTuple *rows, int targrows, double *totalrows, double *totaldeadrows); @@ -1910,7 +1910,8 @@ acquire_sample_rows(Relation onerel, int elevel, * tuples are already sorted. */ if (numrows == targrows) - qsort((void *) rows, numrows, sizeof(HeapTuple), compare_rows); + qsort_interruptible((void *) rows, numrows, sizeof(HeapTuple), + compare_rows, NULL); /* * Estimate total numbers of live and dead rows in relation, extrapolating @@ -1946,10 +1947,10 @@ acquire_sample_rows(Relation onerel, int elevel, } /* - * qsort comparator for sorting rows[] array + * Comparator for sorting rows[] array */ static int -compare_rows(const void *a, const void *b) +compare_rows(const void *a, const void *b, void *arg) { HeapTuple ha = *(const HeapTuple *) a; HeapTuple hb = *(const HeapTuple *) b; @@ -3307,7 +3308,7 @@ static void merge_leaf_stats(VacAttrStatsP stats, int samplerows, double totalrows); static int compare_scalars(const void *a, const void *b, void *arg); -static int compare_mcvs(const void *a, const void *b); +static int compare_mcvs(const void *a, const void *b, void *arg); static int analyze_mcv_list(int *mcv_counts, int num_mcv, double stadistinct, @@ -3977,8 +3978,8 @@ compute_scalar_stats(VacAttrStatsP stats, /* Sort the collected values */ cxt.ssup = &ssup; cxt.tupnoLink = tupnoLink; - qsort_arg((void *) values, values_cnt, sizeof(ScalarItem), - compare_scalars, (void *) &cxt); + qsort_interruptible((void *) values, values_cnt, sizeof(ScalarItem), + compare_scalars, (void *) &cxt); /* * Now scan the values in order, find the most common ones, and also @@ -4245,8 +4246,8 @@ compute_scalar_stats(VacAttrStatsP stats, deltafrac; /* Sort the MCV items into position order to speed next loop */ - qsort((void *) track, num_mcv, - sizeof(ScalarMCVItem), compare_mcvs); + qsort_interruptible((void *) track, num_mcv, sizeof(ScalarMCVItem), + compare_mcvs, NULL); /* * Collapse out the MCV items from the values[] array. @@ -5004,7 +5005,7 @@ merge_leaf_stats(VacAttrStatsP stats, } /* - * qsort_arg comparator for sorting ScalarItems + * Comparator for sorting ScalarItems * * Aside from sorting the items, we update the tupnoLink[] array * whenever two ScalarItems are found to contain equal datums. The array @@ -5041,10 +5042,10 @@ compare_scalars(const void *a, const void *b, void *arg) } /* - * qsort comparator for sorting ScalarMCVItems by position + * Comparator for sorting ScalarMCVItems by position */ static int -compare_mcvs(const void *a, const void *b) +compare_mcvs(const void *a, const void *b, void *arg) { int da = ((const ScalarMCVItem *) a)->first; int db = ((const ScalarMCVItem *) b)->first; diff --git a/src/backend/commands/functioncmds.c b/src/backend/commands/functioncmds.c index b99b2419fcc..1ab3b36dd59 100644 --- a/src/backend/commands/functioncmds.c +++ b/src/backend/commands/functioncmds.c @@ -1525,9 +1525,13 @@ CreateFunction(ParseState *pstate, CreateFunctionStmt *stmt) * by security barrier views or row-level security policies. */ if (isLeakProof && !superuser()) - ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("only superuser can define a leakproof function"))); + { + Oid role = get_role_oid("mdb_admin", true /*if nodoby created mdb_admin role in this database*/); + if (!is_member_of_role(GetUserId(), role)) + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + errmsg("only superuser or mdb_admin can define a leakproof function"))); + } if (transformDefElem) { @@ -1852,9 +1856,13 @@ AlterFunction(ParseState *pstate, AlterFunctionStmt *stmt) { procForm->proleakproof = intVal(leakproof_item->arg); if (procForm->proleakproof && !superuser()) - ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("only superuser can define a leakproof function"))); + { + Oid role = get_role_oid("mdb_admin", true /*if nodoby created mdb_admin role in this database*/); + if (!is_member_of_role(GetUserId(), role)) + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + errmsg("only superuser or mdb_admin can define a leakproof function"))); + } } if (cost_item) { diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c index 2065667ce42..7d91d604443 100644 --- a/src/backend/commands/indexcmds.c +++ b/src/backend/commands/indexcmds.c @@ -98,7 +98,10 @@ static void ComputeIndexAttrs(IndexInfo *indexInfo, Oid relId, const char *accessMethodName, Oid accessMethodId, bool amcanorder, - bool isconstraint); + bool isconstraint, + Oid ddl_userid, + int ddl_sec_context, + int *ddl_save_nestlevel); static char *ChooseIndexName(const char *tabname, Oid namespaceId, List *colnames, List *exclusionOpNames, bool primary, bool isconstraint); @@ -397,9 +400,8 @@ CheckIndexCompatible(Oid oldId, * Compute the operator classes, collations, and exclusion operators for * the new index, so we can test whether it's compatible with the existing * one. Note that ComputeIndexAttrs might fail here, but that's OK: - * DefineIndex would have called this function with the same arguments - * later on, and it would have failed then anyway. Our attributeList - * contains only key attributes, thus we're filling ii_NumIndexAttrs and + * DefineIndex would have failed later. Our attributeList contains only + * key attributes, thus we're filling ii_NumIndexAttrs and * ii_NumIndexKeyAttrs with same value. */ indexInfo = makeIndexInfo(numberOfAttributes, numberOfAttributes, @@ -413,7 +415,7 @@ CheckIndexCompatible(Oid oldId, coloptions, attributeList, exclusionOpNames, relationId, accessMethodName, accessMethodId, - amcanorder, isconstraint); + amcanorder, isconstraint, InvalidOid, 0, NULL); /* Get the soon-obsolete pg_index tuple. */ @@ -659,6 +661,19 @@ WaitForOlderSnapshots(TransactionId limitXmin, bool progress) * DefineIndex * Creates a new index. * + * This function manages the current userid according to the needs of pg_dump. + * Recreating old-database catalog entries in new-database is fine, regardless + * of which users would have permission to recreate those entries now. That's + * just preservation of state. Running opaque expressions, like calling a + * function named in a catalog entry or evaluating a pg_node_tree in a catalog + * entry, as anyone other than the object owner, is not fine. To adhere to + * those principles and to remain fail-safe, use the table owner userid for + * most ACL checks. Use the original userid for ACL checks reached without + * traversing opaque expressions. (pg_dump can predict such ACL checks from + * catalogs.) Overall, this is a mess. Future DDL development should + * consider offering one DDL command for catalog setup and a separate DDL + * command for steps that run opaque expressions. + * * 'relationId': the OID of the heap relation on which the index is to be * created * 'stmt': IndexStmt describing the properties of the new index. @@ -1184,7 +1199,8 @@ DefineIndex(Oid relationId, coloptions, allIndexParams, stmt->excludeOpNames, relationId, accessMethodName, accessMethodId, - amcanorder, stmt->isconstraint); + amcanorder, stmt->isconstraint, root_save_userid, + root_save_sec_context, &root_save_nestlevel); /* * We disallow unique indexes on IVM columns of IMMVs. @@ -1623,11 +1639,8 @@ DefineIndex(Oid relationId, /* * Roll back any GUC changes executed by index functions, and keep - * subsequent changes local to this command. It's barely possible that - * some index function changed a behavior-affecting GUC, e.g. xmloption, - * that affects subsequent steps. This improves bug-compatibility with - * older PostgreSQL versions. They did the AtEOXact_GUC() here for the - * purpose of clearing the above default_tablespace change. + * subsequent changes local to this command. This is essential if some + * index function changed a behavior-affecting GUC, e.g. search_path. */ AtEOXact_GUC(false, root_save_nestlevel); root_save_nestlevel = NewGUCNestLevel(); @@ -1890,6 +1903,10 @@ DefineIndex(Oid relationId, } stmt->idxname = indexRelationName; + + AtEOXact_GUC(false, root_save_nestlevel); + SetUserIdAndSecContext(root_save_userid, root_save_sec_context); + if (shouldDispatch) { /* make sure the QE uses the same index name that we chose */ @@ -1910,8 +1927,6 @@ DefineIndex(Oid relationId, * Indexes on partitioned tables are not themselves built, so we're * done here. */ - AtEOXact_GUC(false, root_save_nestlevel); - SetUserIdAndSecContext(root_save_userid, root_save_sec_context); table_close(rel, NoLock); if (!OidIsValid(parentIndexId)) pgstat_progress_end_command(); @@ -1919,6 +1934,10 @@ DefineIndex(Oid relationId, } stmt->idxname = indexRelationName; + + AtEOXact_GUC(false, root_save_nestlevel); + SetUserIdAndSecContext(root_save_userid, root_save_sec_context); + if (shouldDispatch) { int flags = DF_CANCEL_ON_ERROR | DF_WITH_SNAPSHOT; @@ -1928,6 +1947,7 @@ DefineIndex(Oid relationId, /* make sure the QE uses the same index name that we chose */ stmt->oldNode = InvalidOid; Assert(stmt->relation != NULL); + CdbDispatchUtilityStatement((Node *) stmt, flags, GetAssignedOidsForDispatch(), NULL); @@ -1937,9 +1957,6 @@ DefineIndex(Oid relationId, cdb_sync_indcheckxmin_with_segments(indexRelationId); } - AtEOXact_GUC(false, root_save_nestlevel); - SetUserIdAndSecContext(root_save_userid, root_save_sec_context); - if (!concurrent || Gp_role == GP_ROLE_EXECUTE) { /* Close the heap and we're done, in the non-concurrent case */ @@ -2282,6 +2299,10 @@ CheckPredicate(Expr *predicate) * Compute per-index-column information, including indexed column numbers * or index expressions, opclasses and their options. Note, all output vectors * should be allocated for all columns, including "including" ones. + * + * If the caller switched to the table owner, ddl_userid is the role for ACL + * checks reached without traversing opaque expressions. Otherwise, it's + * InvalidOid, and other ddl_* arguments are undefined. */ static void ComputeIndexAttrs(IndexInfo *indexInfo, @@ -2295,12 +2316,17 @@ ComputeIndexAttrs(IndexInfo *indexInfo, const char *accessMethodName, Oid accessMethodId, bool amcanorder, - bool isconstraint) + bool isconstraint, + Oid ddl_userid, + int ddl_sec_context, + int *ddl_save_nestlevel) { ListCell *nextExclOp; ListCell *lc; int attn; int nkeycols = indexInfo->ii_NumIndexKeyAttrs; + Oid save_userid; + int save_sec_context; /* Allocate space for exclusion operator info, if needed */ if (exclusionOpNames) @@ -2314,6 +2340,9 @@ ComputeIndexAttrs(IndexInfo *indexInfo, else nextExclOp = NULL; + if (OidIsValid(ddl_userid)) + GetUserIdAndSecContext(&save_userid, &save_sec_context); + /* * process attributeList */ @@ -2450,10 +2479,24 @@ ComputeIndexAttrs(IndexInfo *indexInfo, } /* - * Apply collation override if any + * Apply collation override if any. Use of ddl_userid is necessary + * due to ACL checks therein, and it's safe because collations don't + * contain opaque expressions (or non-opaque expressions). */ if (attribute->collation) + { + if (OidIsValid(ddl_userid)) + { + AtEOXact_GUC(false, *ddl_save_nestlevel); + SetUserIdAndSecContext(ddl_userid, ddl_sec_context); + } attcollation = get_collation_oid(attribute->collation, false); + if (OidIsValid(ddl_userid)) + { + SetUserIdAndSecContext(save_userid, save_sec_context); + *ddl_save_nestlevel = NewGUCNestLevel(); + } + } /* * Check we have a collation iff it's a collatable type. The only @@ -2481,12 +2524,25 @@ ComputeIndexAttrs(IndexInfo *indexInfo, collationOidP[attn] = attcollation; /* - * Identify the opclass to use. + * Identify the opclass to use. Use of ddl_userid is necessary due to + * ACL checks therein. This is safe despite opclasses containing + * opaque expressions (specifically, functions), because only + * superusers can define opclasses. */ + if (OidIsValid(ddl_userid)) + { + AtEOXact_GUC(false, *ddl_save_nestlevel); + SetUserIdAndSecContext(ddl_userid, ddl_sec_context); + } classOidP[attn] = ResolveOpClass(attribute->opclass, atttype, accessMethodName, accessMethodId); + if (OidIsValid(ddl_userid)) + { + SetUserIdAndSecContext(save_userid, save_sec_context); + *ddl_save_nestlevel = NewGUCNestLevel(); + } /* * Identify the exclusion operator, if any. @@ -2500,9 +2556,23 @@ ComputeIndexAttrs(IndexInfo *indexInfo, /* * Find the operator --- it must accept the column datatype - * without runtime coercion (but binary compatibility is OK) + * without runtime coercion (but binary compatibility is OK). + * Operators contain opaque expressions (specifically, functions). + * compatible_oper_opid() boils down to oper() and + * IsBinaryCoercible(). PostgreSQL would have security problems + * elsewhere if oper() started calling opaque expressions. */ + if (OidIsValid(ddl_userid)) + { + AtEOXact_GUC(false, *ddl_save_nestlevel); + SetUserIdAndSecContext(ddl_userid, ddl_sec_context); + } opid = compatible_oper_opid(opname, atttype, atttype, false); + if (OidIsValid(ddl_userid)) + { + SetUserIdAndSecContext(save_userid, save_sec_context); + *ddl_save_nestlevel = NewGUCNestLevel(); + } /* * Only allow commutative operators to be used in exclusion diff --git a/src/backend/commands/schemacmds.c b/src/backend/commands/schemacmds.c index 96757eaa814..03f96bb6499 100644 --- a/src/backend/commands/schemacmds.c +++ b/src/backend/commands/schemacmds.c @@ -598,12 +598,12 @@ AlterSchemaOwner_internal(HeapTuple tup, Relation rel, Oid newOwnerId) AclResult aclresult; /* Otherwise, must be owner of the existing object */ - if (!pg_namespace_ownercheck(nspForm->oid, GetUserId())) + if (!mdb_admin_allow_bypass_owner_checks(GetUserId(), nspForm->nspowner) + && !pg_namespace_ownercheck(nspForm->oid, GetUserId())) aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_SCHEMA, NameStr(nspForm->nspname)); - /* Must be able to become new owner */ - check_is_member_of_role(GetUserId(), newOwnerId); + check_mdb_admin_is_member_of_role(GetUserId(), newOwnerId); /* * must have create-schema rights @@ -614,8 +614,13 @@ AlterSchemaOwner_internal(HeapTuple tup, Relation rel, Oid newOwnerId) * schemas. Because superusers will always have this right, we need * no special case for them. */ - aclresult = pg_database_aclcheck(MyDatabaseId, GetUserId(), + if (mdb_admin_allow_bypass_owner_checks(GetUserId(), nspForm->nspowner)) { + aclresult = ACLCHECK_OK; + } else { + aclresult = pg_database_aclcheck(MyDatabaseId, GetUserId(), ACL_CREATE); + } + if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_DATABASE, get_database_name(MyDatabaseId)); diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c index 42e00efe81d..07f00a212b0 100644 --- a/src/backend/commands/tablecmds.c +++ b/src/backend/commands/tablecmds.c @@ -15704,13 +15704,14 @@ ATExecChangeOwner(Oid relationOid, Oid newOwnerId, bool recursing, LOCKMODE lock AclResult aclresult; /* Otherwise, must be owner of the existing object */ - if (!pg_class_ownercheck(relationOid, GetUserId())) + if (!mdb_admin_allow_bypass_owner_checks(GetUserId(), tuple_class->relowner) + && !pg_class_ownercheck(relationOid, GetUserId())) aclcheck_error(ACLCHECK_NOT_OWNER, get_relkind_objtype(get_rel_relkind(relationOid)), RelationGetRelationName(target_rel)); - /* Must be able to become new owner */ - check_is_member_of_role(GetUserId(), newOwnerId); + check_mdb_admin_is_member_of_role(GetUserId(), newOwnerId); + /* New owner must have CREATE privilege on namespace */ aclresult = pg_namespace_aclcheck(namespaceOid, newOwnerId, ACL_CREATE); @@ -20791,7 +20792,7 @@ RangeVarCallbackForAlterRelation(const RangeVar *rv, Oid relid, Oid oldrelid, Form_pg_class classform; AclResult aclresult; char relkind; - + tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relid)); if (!HeapTupleIsValid(tuple)) return; /* concurrently dropped */ @@ -20799,7 +20800,8 @@ RangeVarCallbackForAlterRelation(const RangeVar *rv, Oid relid, Oid oldrelid, relkind = classform->relkind; /* Must own relation. */ - if (!pg_class_ownercheck(relid, GetUserId())) + if (!mdb_admin_allow_bypass_owner_checks(GetUserId(), classform->relowner) + && !pg_class_ownercheck(relid, GetUserId())) aclcheck_error(ACLCHECK_NOT_OWNER, get_relkind_objtype(get_rel_relkind(relid)), rv->relname); /* No system table modifications unless explicitly allowed. */ diff --git a/src/backend/commands/tablespace.c b/src/backend/commands/tablespace.c index 9175ebfb5ba..3d7d040c462 100644 --- a/src/backend/commands/tablespace.c +++ b/src/backend/commands/tablespace.c @@ -113,6 +113,7 @@ /* GUC variables */ char *default_tablespace = NULL; char *temp_tablespaces = NULL; +bool allow_in_place_tablespaces = false; static void create_tablespace_directories(const char *location, @@ -295,6 +296,7 @@ CreateTableSpace(CreateTableSpaceStmt *stmt) Datum newOptions; List *nonContentOptions = NIL; char *fileHandler = NULL; + bool in_place; /* Must be super user */ if (!superuser()) @@ -362,12 +364,15 @@ CreateTableSpace(CreateTableSpaceStmt *stmt) (errcode(ERRCODE_INVALID_NAME), errmsg("tablespace location cannot contain single quotes"))); + in_place = allow_in_place_tablespaces && strlen(location) == 0; + /* * Allowing relative paths seems risky * - * this also helps us ensure that location is not empty or whitespace + * This also helps us ensure that location is not empty or whitespace, + * unless specifying a developer-only in-place tablespace. */ - if (!is_absolute_path(location)) + if (!in_place && !is_absolute_path(location)) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), errmsg("tablespace location must be an absolute path"))); @@ -862,20 +867,40 @@ create_tablespace_directories(const char *location, const Oid tablespaceoid) char *location_with_dbid_dir; char *location_with_version_dir; struct stat st; + bool in_place; elog(DEBUG5, "creating tablespace directories for tablespaceoid %d on dbid %d", tablespaceoid, GpIdentity.dbid); linkloc = psprintf("pg_tblspc/%u", tablespaceoid); + + /* + * If we're asked to make an 'in place' tablespace, create the directory + * directly where the symlink would normally go. This is a developer-only + * option for now, to facilitate regression testing. + */ + in_place = strlen(location) == 0; + + if (in_place) + { + if (MakePGDirectory(linkloc) < 0 && errno != EEXIST) + ereport(ERROR, + (errcode_for_file_access(), + errmsg("could not create directory \"%s\": %m", + linkloc))); + } + location_with_dbid_dir = psprintf("%s/%d", location, GpIdentity.dbid); - location_with_version_dir = psprintf("%s/%s", location_with_dbid_dir, + location_with_version_dir = psprintf("%s/%s", in_place ? linkloc : location_with_dbid_dir, GP_TABLESPACE_VERSION_DIRECTORY); /* * Attempt to coerce target directory to safe permissions. If this fails, - * it doesn't exist or has the wrong owner. + * it doesn't exist or has the wrong owner. Not needed for in-place mode, + * because in that case we created the directory with the desired + * permissions. */ - if (chmod(location, pg_dir_create_mode) != 0) + if (!in_place && chmod(location, pg_dir_create_mode) != 0) { if (errno == ENOENT) ereport(ERROR, @@ -949,13 +974,13 @@ create_tablespace_directories(const char *location, const Oid tablespaceoid) /* * In recovery, remove old symlink, in case it points to the wrong place. */ - if (InRecovery) + if (!in_place && InRecovery) remove_tablespace_symlink(linkloc); /* * Create the symlink under PGDATA */ - if (symlink(location_with_dbid_dir, linkloc) < 0) + if (!in_place && symlink(location_with_dbid_dir, linkloc) < 0) ereport(ERROR, (errcode_for_file_access(), errmsg("could not create symbolic link \"%s\": %m", diff --git a/src/backend/gporca/libgpdbcost/src/CCostModelGPDB.cpp b/src/backend/gporca/libgpdbcost/src/CCostModelGPDB.cpp index 73330059c72..0bf5e167469 100644 --- a/src/backend/gporca/libgpdbcost/src/CCostModelGPDB.cpp +++ b/src/backend/gporca/libgpdbcost/src/CCostModelGPDB.cpp @@ -1622,21 +1622,24 @@ CCostModelGPDB::CostSequenceProject(CMemoryPool *mp, CExpressionHandle &exprhdl, } // we process (sorted window of) input tuples to compute window function values + // Use at least 1 to account for the base cost of evaluating window functions + // even when there are no sort columns (e.g., no ORDER BY in the window spec) + ULONG ulCostFactor = std::max(ulSortCols, (ULONG) 1); CCost costLocal = - CCost(pci->NumRebinds() * (ulSortCols * num_rows_outer * dWidthOuter * + CCost(pci->NumRebinds() * (ulCostFactor * num_rows_outer * dWidthOuter * dTupDefaultProcCostUnit)); CCost costChild = CostChildren(mp, exprhdl, pci, pcmgpdb->GetCostModelParams()); - + return costLocal + costChild; } //--------------------------------------------------------------------------- // @function: -// CCostModelGPDB::CostSequenceProject +// CCostModelGPDB::CostHashSequenceProject // // @doc: -// Cost of sequence project +// Cost of hash sequence project // //--------------------------------------------------------------------------- CCost @@ -1675,8 +1678,11 @@ CCostModelGPDB::CostHashSequenceProject(CMemoryPool *mp, CExpressionHandle &expr } // we process (sorted window of) input tuples to compute window function values + // Use at least 1 to account for the base cost of evaluating window functions + // even when there are no sort columns (e.g., no ORDER BY in the window spec) + ULONG ulCostFactor = std::max(ulSortCols, (ULONG) 1); CCost costLocal = - CCost(pci->NumRebinds() * (ulSortCols * num_rows_outer * dWidthOuter * + CCost(pci->NumRebinds() * (ulCostFactor * num_rows_outer * dWidthOuter * dTupDefaultProcCostUnit)); CCost costChild = CostChildren(mp, exprhdl, pci, pcmgpdb->GetCostModelParams()); diff --git a/src/backend/gporca/libgpos/server/src/unittest/gpos/string/CWStringTest.cpp b/src/backend/gporca/libgpos/server/src/unittest/gpos/string/CWStringTest.cpp index 60bccf59341..bb086954403 100644 --- a/src/backend/gporca/libgpos/server/src/unittest/gpos/string/CWStringTest.cpp +++ b/src/backend/gporca/libgpos/server/src/unittest/gpos/string/CWStringTest.cpp @@ -12,6 +12,7 @@ #include "unittest/gpos/string/CWStringTest.h" #include +#include "common/mdb_locale.h" #include "gpos/base.h" #include "gpos/error/CAutoTrace.h" @@ -177,18 +178,18 @@ CWStringTest::EresUnittest_AppendFormatInvalidLocale() CWStringDynamic *expected = GPOS_NEW(mp) CWStringDynamic(mp, GPOS_WSZ_LIT("UNKNOWN")); - CHAR *oldLocale = setlocale(LC_CTYPE, nullptr); + CHAR *oldLocale = SETLOCALE(LC_CTYPE, nullptr); CWStringDynamic *pstr1 = GPOS_NEW(mp) CWStringDynamic(mp); GPOS_RESULT eres = GPOS_OK; - setlocale(LC_CTYPE, "C"); + SETLOCALE(LC_CTYPE, "C"); pstr1->AppendFormat(GPOS_WSZ_LIT("%s"), (CHAR *) "ÃË", 123); pstr1->Equals(expected); // cleanup - setlocale(LC_CTYPE, oldLocale); + SETLOCALE(LC_CTYPE, oldLocale); GPOS_DELETE(pstr1); GPOS_DELETE(expected); diff --git a/src/backend/parser/parse_relation.c b/src/backend/parser/parse_relation.c index 9e82bd85c75..dfe348c1f40 100644 --- a/src/backend/parser/parse_relation.c +++ b/src/backend/parser/parse_relation.c @@ -3487,6 +3487,9 @@ expandNSItemAttrs(ParseState *pstate, ParseNamespaceItem *nsitem, * * "*" is returned if the given attnum is InvalidAttrNumber --- this case * occurs when a Var represents a whole tuple of a relation. + * + * It is caller's responsibility to not call this on a dropped attribute. + * (You will get some answer for such cases, but it might not be sensible.) */ char * get_rte_attribute_name(RangeTblEntry *rte, AttrNumber attnum) diff --git a/src/backend/replication/logical/relation.c b/src/backend/replication/logical/relation.c index fad8c92b2ef..bd303546cce 100644 --- a/src/backend/replication/logical/relation.c +++ b/src/backend/replication/logical/relation.c @@ -249,6 +249,67 @@ logicalrep_report_missing_attrs(LogicalRepRelation *remoterel, } } +/* + * Check if replica identity matches and mark the updatable flag. + * + * We allow for stricter replica identity (fewer columns) on subscriber as + * that will not stop us from finding unique tuple. IE, if publisher has + * identity (id,timestamp) and subscriber just (id) this will not be a + * problem, but in the opposite scenario it will. + * + * We just mark the relation entry as not updatable here if the local + * replica identity is found to be insufficient for applying + * updates/deletes (inserts don't care!) and leave it to + * check_relation_updatable() to throw the actual error if needed. + */ +static void +logicalrep_rel_mark_updatable(LogicalRepRelMapEntry *entry) +{ + Bitmapset *idkey; + LogicalRepRelation *remoterel = &entry->remoterel; + int i; + + entry->updatable = true; + + idkey = RelationGetIndexAttrBitmap(entry->localrel, + INDEX_ATTR_BITMAP_IDENTITY_KEY); + /* fallback to PK if no replica identity */ + if (idkey == NULL) + { + idkey = RelationGetIndexAttrBitmap(entry->localrel, + INDEX_ATTR_BITMAP_PRIMARY_KEY); + + /* + * If no replica identity index and no PK, the published table must + * have replica identity FULL. + */ + if (idkey == NULL && remoterel->replident != REPLICA_IDENTITY_FULL) + entry->updatable = false; + } + + i = -1; + while ((i = bms_next_member(idkey, i)) >= 0) + { + int attnum = i + FirstLowInvalidHeapAttributeNumber; + + if (!AttrNumberIsForUserDefinedAttr(attnum)) + ereport(ERROR, + (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("logical replication target relation \"%s.%s\" uses " + "system columns in REPLICA IDENTITY index", + remoterel->nspname, remoterel->relname))); + + attnum = AttrNumberGetAttrOffset(attnum); + + if (entry->attrmap->attnums[attnum] < 0 || + !bms_is_member(entry->attrmap->attnums[attnum], remoterel->attkeys)) + { + entry->updatable = false; + break; + } + } +} + /* * Open the local relation associated with the remote one. * @@ -307,7 +368,6 @@ logicalrep_rel_open(LogicalRepRelId remoteid, LOCKMODE lockmode) if (!entry->localrelvalid) { Oid relid; - Bitmapset *idkey; TupleDesc desc; MemoryContext oldctx; int i; @@ -316,7 +376,7 @@ logicalrep_rel_open(LogicalRepRelId remoteid, LOCKMODE lockmode) /* Release the no-longer-useful attrmap, if any. */ if (entry->attrmap) { - pfree(entry->attrmap); + free_attrmap(entry->attrmap); entry->attrmap = NULL; } @@ -373,54 +433,10 @@ logicalrep_rel_open(LogicalRepRelId remoteid, LOCKMODE lockmode) bms_free(missingatts); /* - * Check that replica identity matches. We allow for stricter replica - * identity (fewer columns) on subscriber as that will not stop us - * from finding unique tuple. IE, if publisher has identity - * (id,timestamp) and subscriber just (id) this will not be a problem, - * but in the opposite scenario it will. - * - * Don't throw any error here just mark the relation entry as not - * updatable, as replica identity is only for updates and deletes but - * inserts can be replicated even without it. + * Set if the table's replica identity is enough to apply + * update/delete. */ - entry->updatable = true; - idkey = RelationGetIndexAttrBitmap(entry->localrel, - INDEX_ATTR_BITMAP_IDENTITY_KEY); - /* fallback to PK if no replica identity */ - if (idkey == NULL) - { - idkey = RelationGetIndexAttrBitmap(entry->localrel, - INDEX_ATTR_BITMAP_PRIMARY_KEY); - - /* - * If no replica identity index and no PK, the published table - * must have replica identity FULL. - */ - if (idkey == NULL && remoterel->replident != REPLICA_IDENTITY_FULL) - entry->updatable = false; - } - - i = -1; - while ((i = bms_next_member(idkey, i)) >= 0) - { - int attnum = i + FirstLowInvalidHeapAttributeNumber; - - if (!AttrNumberIsForUserDefinedAttr(attnum)) - ereport(ERROR, - (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("logical replication target relation \"%s.%s\" uses " - "system columns in REPLICA IDENTITY index", - remoterel->nspname, remoterel->relname))); - - attnum = AttrNumberGetAttrOffset(attnum); - - if (entry->attrmap->attnums[attnum] < 0 || - !bms_is_member(entry->attrmap->attnums[attnum], remoterel->attkeys)) - { - entry->updatable = false; - break; - } - } + logicalrep_rel_mark_updatable(entry); entry->localrelvalid = true; } @@ -493,6 +509,40 @@ logicalrep_partmap_invalidate_cb(Datum arg, Oid reloid) } } +/* + * Reset the entries in the partition map that refer to remoterel. + * + * Called when new relation mapping is sent by the publisher to update our + * expected view of incoming data from said publisher. + * + * Note that we don't update the remoterel information in the entry here, + * we will update the information in logicalrep_partition_open to avoid + * unnecessary work. + */ +void +logicalrep_partmap_reset_relmap(LogicalRepRelation *remoterel) +{ + HASH_SEQ_STATUS status; + LogicalRepPartMapEntry *part_entry; + LogicalRepRelMapEntry *entry; + + if (LogicalRepPartMap == NULL) + return; + + hash_seq_init(&status, LogicalRepPartMap); + while ((part_entry = (LogicalRepPartMapEntry *) hash_seq_search(&status)) != NULL) + { + entry = &part_entry->relmapentry; + + if (entry->remoterel.remoteid != remoterel->remoteid) + continue; + + logicalrep_relmap_free_entry(entry); + + memset(entry, 0, sizeof(LogicalRepRelMapEntry)); + } +} + /* * Initialize the partition map cache. */ @@ -553,8 +603,20 @@ logicalrep_partition_open(LogicalRepRelMapEntry *root, entry = &part_entry->relmapentry; + /* + * We must always overwrite entry->localrel with the latest partition + * Relation pointer, because the Relation pointed to by the old value may + * have been cleared after the caller would have closed the partition + * relation after the last use of this entry. Note that localrelvalid is + * only updated by the relcache invalidation callback, so it may still be + * true irrespective of whether the Relation pointed to by localrel has + * been cleared or not. + */ if (found && entry->localrelvalid) + { + entry->localrel = partrel; return entry; + } /* Switch to longer-lived context. */ oldctx = MemoryContextSwitchTo(LogicalRepPartMapContext); @@ -565,6 +627,13 @@ logicalrep_partition_open(LogicalRepRelMapEntry *root, part_entry->partoid = partOid; } + /* Release the no-longer-useful attrmap, if any. */ + if (entry->attrmap) + { + free_attrmap(entry->attrmap); + entry->attrmap = NULL; + } + if (!entry->remoterel.remoteid) { int i; @@ -624,7 +693,8 @@ logicalrep_partition_open(LogicalRepRelMapEntry *root, attrmap->maplen * sizeof(AttrNumber)); } - entry->updatable = root->updatable; + /* Set if the table's replica identity is enough to apply update/delete. */ + logicalrep_rel_mark_updatable(entry); entry->localrelvalid = true; diff --git a/src/backend/replication/logical/worker.c b/src/backend/replication/logical/worker.c index adcbc36ecef..7190dd94ebf 100644 --- a/src/backend/replication/logical/worker.c +++ b/src/backend/replication/logical/worker.c @@ -1191,6 +1191,9 @@ apply_handle_relation(StringInfo s) rel = logicalrep_read_rel(s); logicalrep_relmap_update(rel); + + /* Also reset all entries in the partition map that refer to remoterel. */ + logicalrep_partmap_reset_relmap(rel); } /* @@ -1320,6 +1323,13 @@ apply_handle_insert_internal(ApplyExecutionData *edata, static void check_relation_updatable(LogicalRepRelMapEntry *rel) { + /* + * For partitioned tables, we only need to care if the target partition is + * updatable (aka has PK or RI defined for it). + */ + if (rel->localrel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE) + return; + /* Updatable, no error. */ if (rel->updatable) return; @@ -1673,6 +1683,8 @@ apply_handle_tuple_routing(ApplyExecutionData *edata, TupleTableSlot *remoteslot_part; TupleConversionMap *map; MemoryContext oldctx; + LogicalRepRelMapEntry *part_entry = NULL; + AttrMap *attrmap = NULL; /* ModifyTableState is needed for ExecFindPartition(). */ edata->mtstate = mtstate = makeNode(ModifyTableState); @@ -1704,8 +1716,11 @@ apply_handle_tuple_routing(ApplyExecutionData *edata, remoteslot_part = table_slot_create(partrel, &estate->es_tupleTable); map = partrelinfo->ri_RootToPartitionMap; if (map != NULL) - remoteslot_part = execute_attr_map_slot(map->attrMap, remoteslot, + { + attrmap = map->attrMap; + remoteslot_part = execute_attr_map_slot(attrmap, remoteslot, remoteslot_part); + } else { remoteslot_part = ExecCopySlot(remoteslot_part, remoteslot); @@ -1713,6 +1728,14 @@ apply_handle_tuple_routing(ApplyExecutionData *edata, } MemoryContextSwitchTo(oldctx); + /* Check if we can do the update or delete on the leaf partition. */ + if (operation == CMD_UPDATE || operation == CMD_DELETE) + { + part_entry = logicalrep_partition_open(relmapentry, partrel, + attrmap); + check_relation_updatable(part_entry); + } + switch (operation) { case CMD_INSERT: @@ -1734,15 +1757,10 @@ apply_handle_tuple_routing(ApplyExecutionData *edata, * suitable partition. */ { - AttrMap *attrmap = map ? map->attrMap : NULL; - LogicalRepRelMapEntry *part_entry; TupleTableSlot *localslot; ResultRelInfo *partrelinfo_new; bool found; - part_entry = logicalrep_partition_open(relmapentry, partrel, - attrmap); - /* Get the matching local tuple from the partition. */ found = FindReplTupleInLocalRel(estate, partrel, &part_entry->remoterel, diff --git a/src/backend/statistics/extended_stats.c b/src/backend/statistics/extended_stats.c index d3561b779ab..ee1c25416bd 100644 --- a/src/backend/statistics/extended_stats.c +++ b/src/backend/statistics/extended_stats.c @@ -1135,8 +1135,8 @@ build_sorted_items(StatsBuildData *data, int *nitems, } /* do the sort, using the multi-sort */ - qsort_arg((void *) items, nrows, sizeof(SortItem), - multi_sort_compare, mss); + qsort_interruptible((void *) items, nrows, sizeof(SortItem), + multi_sort_compare, mss); return items; } @@ -1318,10 +1318,38 @@ choose_best_statistics(List *stats, char requiredkind, * statext_is_compatible_clause_internal * Determines if the clause is compatible with MCV lists. * - * Does the heavy lifting of actually inspecting the clauses for - * statext_is_compatible_clause. It needs to be split like this because - * of recursion. The attnums bitmap is an input/output parameter collecting - * attribute numbers from all compatible clauses (recursively). + * To be compatible, the given clause must be a combination of supported + * clauses built from Vars or sub-expressions (where a sub-expression is + * something that exactly matches an expression found in statistics objects). + * This function recursively examines the clause and extracts any + * sub-expressions that will need to be matched against statistics. + * + * Currently, we only support the following types of clauses: + * + * (a) OpExprs of the form (Var/Expr op Const), or (Const op Var/Expr), where + * the op is one of ("=", "<", ">", ">=", "<=") + * + * (b) (Var/Expr IS [NOT] NULL) + * + * (c) combinations using AND/OR/NOT + * + * (d) ScalarArrayOpExprs of the form (Var/Expr op ANY (array)) or (Var/Expr + * op ALL (array)) + * + * In the future, the range of supported clauses may be expanded to more + * complex cases, for example (Var op Var). + * + * Arguments: + * clause: (sub)clause to be inspected (bare clause, not a RestrictInfo) + * relid: rel that all Vars in clause must belong to + * *attnums: input/output parameter collecting attribute numbers of all + * mentioned Vars. Note that we do not offset the attribute numbers, + * so we can't cope with system columns. + * *exprs: input/output parameter collecting primitive subclauses within + * the clause tree + * + * Returns false if there is something we definitively can't handle. + * On true return, we can proceed to match the *exprs against statistics. */ static bool statext_is_compatible_clause_internal(PlannerInfo *root, Node *clause, @@ -1345,10 +1373,14 @@ statext_is_compatible_clause_internal(PlannerInfo *root, Node *clause, if (var->varlevelsup > 0) return false; - /* Also skip system attributes (we don't allow stats on those). */ + /* + * Also reject system attributes and whole-row Vars (we don't allow + * stats on those). + */ if (!AttrNumberIsForUserDefinedAttr(var->varattno)) return false; + /* OK, record the attnum for later permissions checks. */ *attnums = bms_add_member(*attnums, var->varattno); return true; @@ -1503,7 +1535,7 @@ statext_is_compatible_clause_internal(PlannerInfo *root, Node *clause, foreach(lc, expr->args) { /* - * Had we found incompatible clause in the arguments, treat the + * If we find an incompatible clause in the arguments, treat the * whole clause as incompatible. */ if (!statext_is_compatible_clause_internal(root, @@ -1542,27 +1574,28 @@ statext_is_compatible_clause_internal(PlannerInfo *root, Node *clause, * statext_is_compatible_clause * Determines if the clause is compatible with MCV lists. * - * Currently, we only support the following types of clauses: + * See statext_is_compatible_clause_internal, above, for the basic rules. + * This layer deals with RestrictInfo superstructure and applies permissions + * checks to verify that it's okay to examine all mentioned Vars. * - * (a) OpExprs of the form (Var/Expr op Const), or (Const op Var/Expr), where - * the op is one of ("=", "<", ">", ">=", "<=") + * Arguments: + * clause: clause to be inspected (in RestrictInfo form) + * relid: rel that all Vars in clause must belong to + * *attnums: input/output parameter collecting attribute numbers of all + * mentioned Vars. Note that we do not offset the attribute numbers, + * so we can't cope with system columns. + * *exprs: input/output parameter collecting primitive subclauses within + * the clause tree * - * (b) (Var/Expr IS [NOT] NULL) - * - * (c) combinations using AND/OR/NOT - * - * (d) ScalarArrayOpExprs of the form (Var/Expr op ANY (array)) or (Var/Expr - * op ALL (array)) - * - * In the future, the range of supported clauses may be expanded to more - * complex cases, for example (Var op Var). + * Returns false if there is something we definitively can't handle. + * On true return, we can proceed to match the *exprs against statistics. */ static bool statext_is_compatible_clause(PlannerInfo *root, Node *clause, Index relid, Bitmapset **attnums, List **exprs) { RangeTblEntry *rte = root->simple_rte_array[relid]; - RestrictInfo *rinfo = (RestrictInfo *) clause; + RestrictInfo *rinfo; int clause_relid; Oid userid; @@ -1591,8 +1624,9 @@ statext_is_compatible_clause(PlannerInfo *root, Node *clause, Index relid, } /* Otherwise it must be a RestrictInfo. */ - if (!IsA(rinfo, RestrictInfo)) + if (!IsA(clause, RestrictInfo)) return false; + rinfo = (RestrictInfo *) clause; /* Pseudoconstants are not really interesting here. */ if (rinfo->pseudoconstant) @@ -1614,34 +1648,48 @@ statext_is_compatible_clause(PlannerInfo *root, Node *clause, Index relid, */ userid = rte->checkAsUser ? rte->checkAsUser : GetUserId(); + /* Table-level SELECT privilege is sufficient for all columns */ if (pg_class_aclcheck(rte->relid, userid, ACL_SELECT) != ACLCHECK_OK) { Bitmapset *clause_attnums = NULL; + int attnum = -1; - /* Don't have table privilege, must check individual columns */ - if (*exprs != NIL) + /* + * We have to check per-column privileges. *attnums has the attnums + * for individual Vars we saw, but there may also be Vars within + * subexpressions in *exprs. We can use pull_varattnos() to extract + * those, but there's an impedance mismatch: attnums returned by + * pull_varattnos() are offset by FirstLowInvalidHeapAttributeNumber, + * while attnums within *attnums aren't. Convert *attnums to the + * offset style so we can combine the results. + */ + while ((attnum = bms_next_member(*attnums, attnum)) >= 0) { - pull_varattnos((Node *) exprs, relid, &clause_attnums); - clause_attnums = bms_add_members(clause_attnums, *attnums); + clause_attnums = + bms_add_member(clause_attnums, + attnum - FirstLowInvalidHeapAttributeNumber); } - else - clause_attnums = *attnums; - if (bms_is_member(InvalidAttrNumber, clause_attnums)) - { - /* Have a whole-row reference, must have access to all columns */ - if (pg_attribute_aclcheck_all(rte->relid, userid, ACL_SELECT, - ACLMASK_ALL) != ACLCHECK_OK) - return false; - } - else + /* Now merge attnums from *exprs into clause_attnums */ + if (*exprs != NIL) + pull_varattnos((Node *) *exprs, relid, &clause_attnums); + + attnum = -1; + while ((attnum = bms_next_member(clause_attnums, attnum)) >= 0) { - /* Check the columns referenced by the clause */ - int attnum = -1; + /* Undo the offset */ + AttrNumber attno = attnum + FirstLowInvalidHeapAttributeNumber; - while ((attnum = bms_next_member(clause_attnums, attnum)) >= 0) + if (attno == InvalidAttrNumber) + { + /* Whole-row reference, so must have access to all columns */ + if (pg_attribute_aclcheck_all(rte->relid, userid, ACL_SELECT, + ACLMASK_ALL) != ACLCHECK_OK) + return false; + } + else { - if (pg_attribute_aclcheck(rte->relid, attnum, userid, + if (pg_attribute_aclcheck(rte->relid, attno, userid, ACL_SELECT) != ACLCHECK_OK) return false; } diff --git a/src/backend/statistics/mcv.c b/src/backend/statistics/mcv.c index ef118952c74..e6a60865282 100644 --- a/src/backend/statistics/mcv.c +++ b/src/backend/statistics/mcv.c @@ -404,7 +404,7 @@ count_distinct_groups(int numrows, SortItem *items, MultiSortSupport mss) * order. */ static int -compare_sort_item_count(const void *a, const void *b) +compare_sort_item_count(const void *a, const void *b, void *arg) { SortItem *ia = (SortItem *) a; SortItem *ib = (SortItem *) b; @@ -457,8 +457,8 @@ build_distinct_groups(int numrows, SortItem *items, MultiSortSupport mss, Assert(j + 1 == ngroups); /* Sort the distinct groups by frequency (in descending order). */ - pg_qsort((void *) groups, ngroups, sizeof(SortItem), - compare_sort_item_count); + qsort_interruptible((void *) groups, ngroups, sizeof(SortItem), + compare_sort_item_count, NULL); *ndistinct = ngroups; return groups; @@ -528,8 +528,8 @@ build_column_frequencies(SortItem *groups, int ngroups, } /* sort the values, deduplicate */ - qsort_arg((void *) result[dim], ngroups, sizeof(SortItem), - sort_item_compare, ssup); + qsort_interruptible((void *) result[dim], ngroups, sizeof(SortItem), + sort_item_compare, ssup); /* * Identify distinct values, compute frequency (there might be @@ -695,8 +695,8 @@ statext_mcv_serialize(MCVList *mcvlist, VacAttrStats **stats) PrepareSortSupportFromOrderingOp(typentry->lt_opr, &ssup[dim]); - qsort_arg(values[dim], counts[dim], sizeof(Datum), - compare_scalars_simple, &ssup[dim]); + qsort_interruptible(values[dim], counts[dim], sizeof(Datum), + compare_scalars_simple, &ssup[dim]); /* * Walk through the array and eliminate duplicate values, but keep the diff --git a/src/backend/statistics/mvdistinct.c b/src/backend/statistics/mvdistinct.c index 4481312d61d..4b4ecec9361 100644 --- a/src/backend/statistics/mvdistinct.c +++ b/src/backend/statistics/mvdistinct.c @@ -488,8 +488,8 @@ ndistinct_for_combination(double totalrows, StatsBuildData *data, } /* We can sort the array now ... */ - qsort_arg((void *) items, numrows, sizeof(SortItem), - multi_sort_compare, mss); + qsort_interruptible((void *) items, numrows, sizeof(SortItem), + multi_sort_compare, mss); /* ... and count the number of distinct combinations */ diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c index 6d0afd34356..dd16c3df60a 100644 --- a/src/backend/storage/buffer/bufmgr.c +++ b/src/backend/storage/buffer/bufmgr.c @@ -718,18 +718,28 @@ ReadRecentBuffer(RelFileNode rnode, ForkNumber forkNum, BlockNumber blockNum, if (BufferIsLocal(recent_buffer)) { - bufHdr = GetBufferDescriptor(-recent_buffer - 1); + int b = -recent_buffer - 1; + + bufHdr = GetLocalBufferDescriptor(b); buf_state = pg_atomic_read_u32(&bufHdr->state); /* Is it still valid and holding the right tag? */ if ((buf_state & BM_VALID) && BUFFERTAGS_EQUAL(tag, bufHdr->tag)) { - /* Bump local buffer's ref and usage counts. */ + /* + * Bump buffer's ref and usage counts. This is equivalent of + * PinBuffer for a shared buffer. + */ + if (LocalRefCount[b] == 0) + { + if (BUF_STATE_GET_USAGECOUNT(buf_state) < BM_MAX_USAGE_COUNT) + { + buf_state += BUF_USAGECOUNT_ONE; + pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state); + } + } + LocalRefCount[b]++; ResourceOwnerRememberBuffer(CurrentResourceOwner, recent_buffer); - LocalRefCount[-recent_buffer - 1]++; - if (BUF_STATE_GET_USAGECOUNT(buf_state) < BM_MAX_USAGE_COUNT) - pg_atomic_write_u32(&bufHdr->state, - buf_state + BUF_USAGECOUNT_ONE); return true; } diff --git a/src/backend/storage/ipc/latch.c b/src/backend/storage/ipc/latch.c index e81041ae029..7ecd3afe1b9 100644 --- a/src/backend/storage/ipc/latch.c +++ b/src/backend/storage/ipc/latch.c @@ -72,7 +72,7 @@ #if defined(WAIT_USE_EPOLL) || defined(WAIT_USE_POLL) || \ defined(WAIT_USE_KQUEUE) || defined(WAIT_USE_WIN32) /* don't overwrite manual choice */ -#elif defined(HAVE_SYS_EPOLL_H) && defined(HAVE_SYS_SIGNALFD_H) +#elif defined(HAVE_SYS_EPOLL_H) #define WAIT_USE_EPOLL #elif defined(HAVE_KQUEUE) #define WAIT_USE_KQUEUE @@ -84,6 +84,22 @@ #error "no wait set implementation available" #endif +/* + * By default, we use a self-pipe with poll() and a signalfd with epoll(), if + * available. We avoid signalfd on illumos for now based on problem reports. + * For testing the choice can also be manually specified. + */ +#if defined(WAIT_USE_POLL) || defined(WAIT_USE_EPOLL) +#if defined(WAIT_USE_SELF_PIPE) || defined(WAIT_USE_SIGNALFD) +/* don't overwrite manual choice */ +#elif defined(WAIT_USE_EPOLL) && defined(HAVE_SYS_SIGNALFD_H) && \ + !defined(__illumos__) +#define WAIT_USE_SIGNALFD +#else +#define WAIT_USE_SELF_PIPE +#endif +#endif + /* typedef in latch.h */ struct WaitEventSet { @@ -146,12 +162,12 @@ static WaitEventSet *LatchWaitSet; static volatile sig_atomic_t waiting = false; #endif -#ifdef WAIT_USE_EPOLL +#ifdef WAIT_USE_SIGNALFD /* On Linux, we'll receive SIGURG via a signalfd file descriptor. */ static int signal_fd = -1; #endif -#if defined(WAIT_USE_POLL) +#ifdef WAIT_USE_SELF_PIPE /* Read and write ends of the self-pipe */ static int selfpipe_readfd = -1; static int selfpipe_writefd = -1; @@ -164,7 +180,7 @@ static void latch_sigurg_handler(SIGNAL_ARGS); static void sendSelfPipeByte(void); #endif -#if defined(WAIT_USE_POLL) || defined(WAIT_USE_EPOLL) +#if defined(WAIT_USE_SELF_PIPE) || defined(WAIT_USE_SIGNALFD) static void drain(void); #endif @@ -190,7 +206,7 @@ static inline int WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout, void InitializeLatchSupport(void) { -#if defined(WAIT_USE_POLL) +#if defined(WAIT_USE_SELF_PIPE) int pipefd[2]; if (IsUnderPostmaster) @@ -264,7 +280,7 @@ InitializeLatchSupport(void) pqsignal(SIGURG, latch_sigurg_handler); #endif -#ifdef WAIT_USE_EPOLL +#ifdef WAIT_USE_SIGNALFD sigset_t signalfd_mask; /* Block SIGURG, because we'll receive it through a signalfd. */ @@ -316,7 +332,7 @@ ShutdownLatchSupport(void) LatchWaitSet = NULL; } -#if defined(WAIT_USE_POLL) +#if defined(WAIT_USE_SELF_PIPE) close(selfpipe_readfd); close(selfpipe_writefd); selfpipe_readfd = -1; @@ -324,7 +340,7 @@ ShutdownLatchSupport(void) selfpipe_owner_pid = InvalidPid; #endif -#if defined(WAIT_USE_EPOLL) +#if defined(WAIT_USE_SIGNALFD) close(signal_fd); signal_fd = -1; #endif @@ -341,9 +357,12 @@ InitLatch(Latch *latch) latch->owner_pid = MyProcPid; latch->is_shared = false; -#if defined(WAIT_USE_POLL) +#if defined(WAIT_USE_SELF_PIPE) /* Assert InitializeLatchSupport has been called in this process */ Assert(selfpipe_readfd >= 0 && selfpipe_owner_pid == MyProcPid); +#elif defined(WAIT_USE_SIGNALFD) + /* Assert InitializeLatchSupport has been called in this process */ + Assert(signal_fd >= 0); #elif defined(WAIT_USE_WIN32) latch->event = CreateEvent(NULL, TRUE, FALSE, NULL); if (latch->event == NULL) @@ -405,9 +424,12 @@ OwnLatch(Latch *latch) /* Sanity checks */ Assert(latch->is_shared); -#if defined(WAIT_USE_POLL) +#if defined(WAIT_USE_SELF_PIPE) /* Assert InitializeLatchSupport has been called in this process */ Assert(selfpipe_readfd >= 0 && selfpipe_owner_pid == MyProcPid); +#elif defined(WAIT_USE_SIGNALFD) + /* Assert InitializeLatchSupport has been called in this process */ + Assert(signal_fd >= 0); #endif if (latch->owner_pid != 0) @@ -618,7 +640,7 @@ SetLatch(Latch *latch) return; else if (owner_pid == MyProcPid) { -#if defined(WAIT_USE_POLL) +#if defined(WAIT_USE_SELF_PIPE) if (waiting) sendSelfPipeByte(); #else @@ -983,9 +1005,9 @@ AddWaitEventToSet(WaitEventSet *set, uint32 events, pgsocket fd, Latch *latch, { set->latch = latch; set->latch_pos = event->pos; -#if defined(WAIT_USE_POLL) +#if defined(WAIT_USE_SELF_PIPE) event->fd = selfpipe_readfd; -#elif defined(WAIT_USE_EPOLL) +#elif defined(WAIT_USE_SIGNALFD) event->fd = signal_fd; #else event->fd = PGINVALID_SOCKET; @@ -2102,7 +2124,7 @@ GetNumRegisteredWaitEvents(WaitEventSet *set) return set->nevents; } -#if defined(WAIT_USE_POLL) +#if defined(WAIT_USE_SELF_PIPE) /* * SetLatch uses SIGURG to wake up the process waiting on the latch. @@ -2153,7 +2175,7 @@ sendSelfPipeByte(void) #endif -#if defined(WAIT_USE_POLL) || defined(WAIT_USE_EPOLL) +#if defined(WAIT_USE_SELF_PIPE) || defined(WAIT_USE_SIGNALFD) /* * Read all available data from self-pipe or signalfd. @@ -2169,7 +2191,7 @@ drain(void) int rc; int fd; -#ifdef WAIT_USE_POLL +#ifdef WAIT_USE_SELF_PIPE fd = selfpipe_readfd; #else fd = signal_fd; @@ -2187,7 +2209,7 @@ drain(void) else { waiting = false; -#ifdef WAIT_USE_POLL +#ifdef WAIT_USE_SELF_PIPE elog(ERROR, "read() on self-pipe failed: %m"); #else elog(ERROR, "read() on signalfd failed: %m"); @@ -2197,7 +2219,7 @@ drain(void) else if (rc == 0) { waiting = false; -#ifdef WAIT_USE_POLL +#ifdef WAIT_USE_SELF_PIPE elog(ERROR, "unexpected EOF on self-pipe"); #else elog(ERROR, "unexpected EOF on signalfd"); diff --git a/src/backend/storage/ipc/procarray.c b/src/backend/storage/ipc/procarray.c index 57c03cce7d9..89fafdbedc1 100644 --- a/src/backend/storage/ipc/procarray.c +++ b/src/backend/storage/ipc/procarray.c @@ -277,6 +277,11 @@ static ProcArrayStruct *procArray; static PGPROC *allProcs; static TMGXACT *allTmGxact; +/* + * Cache to reduce overhead of repeated calls to TransactionIdIsInProgress() + */ +static TransactionId cachedXidIsNotInProgress = InvalidTransactionId; + /* * Bookkeeping for tracking emulated transactions in recovery */ @@ -1486,7 +1491,7 @@ TransactionIdIsInProgress(TransactionId xid) * already known to be completed, we can fall out without any access to * shared memory. */ - if (TransactionIdIsKnownCompleted(xid)) + if (TransactionIdEquals(cachedXidIsNotInProgress, xid)) { xc_by_known_xact_inc(); return false; @@ -1644,6 +1649,7 @@ TransactionIdIsInProgress(TransactionId xid) if (nxids == 0) { xc_no_overflow_inc(); + cachedXidIsNotInProgress = xid; return false; } @@ -1658,7 +1664,10 @@ TransactionIdIsInProgress(TransactionId xid) xc_slow_answer_inc(); if (TransactionIdDidAbort(xid)) + { + cachedXidIsNotInProgress = xid; return false; + } /* * It isn't aborted, so check whether the transaction tree it belongs to @@ -1676,6 +1685,7 @@ TransactionIdIsInProgress(TransactionId xid) } } + cachedXidIsNotInProgress = xid; return false; } diff --git a/src/backend/storage/ipc/signalfuncs.c b/src/backend/storage/ipc/signalfuncs.c index 0d5ccaa201d..7f8e420a6a5 100644 --- a/src/backend/storage/ipc/signalfuncs.c +++ b/src/backend/storage/ipc/signalfuncs.c @@ -72,8 +72,13 @@ pg_signal_backend(int pid, int sig, char *msg) return SIGNAL_BACKEND_ERROR; } - /* Only allow superusers to signal superuser-owned backends. */ - if (superuser_arg(proc->roleId) && !superuser()) + /* + * Only allow superusers to signal superuser-owned backends. Any process + * not advertising a role might have the importance of a superuser-owned + * backend, so treat it that way. + */ + if ((!OidIsValid(proc->roleId) || superuser_arg(proc->roleId)) && + !superuser()) return SIGNAL_BACKEND_NOSUPERUSER; /* Users can signal backends they have role membership in. */ diff --git a/src/backend/tcop/utility.c b/src/backend/tcop/utility.c index 8eefbf93b88..72d4fc4c89b 100644 --- a/src/backend/tcop/utility.c +++ b/src/backend/tcop/utility.c @@ -2049,7 +2049,7 @@ ProcessUtilitySlow(ParseState *pstate, /* * The QD might have looked up the OID of the base table - * already, and stashed it in stmt->relid + * already, and stashed it in stmt->relationOid */ if (stmt->relationOid) relid = stmt->relationOid; diff --git a/src/backend/tsearch/ts_typanalyze.c b/src/backend/tsearch/ts_typanalyze.c index 1ebba4b3f56..504ba1569ee 100644 --- a/src/backend/tsearch/ts_typanalyze.c +++ b/src/backend/tsearch/ts_typanalyze.c @@ -44,8 +44,10 @@ static void prune_lexemes_hashtable(HTAB *lexemes_tab, int b_current); static uint32 lexeme_hash(const void *key, Size keysize); static int lexeme_match(const void *key1, const void *key2, Size keysize); static int lexeme_compare(const void *key1, const void *key2); -static int trackitem_compare_frequencies_desc(const void *e1, const void *e2); -static int trackitem_compare_lexemes(const void *e1, const void *e2); +static int trackitem_compare_frequencies_desc(const void *e1, const void *e2, + void *arg); +static int trackitem_compare_lexemes(const void *e1, const void *e2, + void *arg); /* @@ -347,8 +349,8 @@ compute_tsvector_stats(VacAttrStats *stats, */ if (num_mcelem < track_len) { - qsort(sort_table, track_len, sizeof(TrackItem *), - trackitem_compare_frequencies_desc); + qsort_interruptible(sort_table, track_len, sizeof(TrackItem *), + trackitem_compare_frequencies_desc, NULL); /* reset minfreq to the smallest frequency we're keeping */ minfreq = sort_table[num_mcelem - 1]->frequency; } @@ -376,8 +378,8 @@ compute_tsvector_stats(VacAttrStats *stats, * presorted we can employ binary search for that. See * ts_selfuncs.c for a real usage scenario. */ - qsort(sort_table, num_mcelem, sizeof(TrackItem *), - trackitem_compare_lexemes); + qsort_interruptible(sort_table, num_mcelem, sizeof(TrackItem *), + trackitem_compare_lexemes, NULL); /* Must copy the target values into anl_context */ old_context = MemoryContextSwitchTo(stats->anl_context); @@ -510,10 +512,10 @@ lexeme_compare(const void *key1, const void *key2) } /* - * qsort() comparator for sorting TrackItems on frequencies (descending sort) + * Comparator for sorting TrackItems on frequencies (descending sort) */ static int -trackitem_compare_frequencies_desc(const void *e1, const void *e2) +trackitem_compare_frequencies_desc(const void *e1, const void *e2, void *arg) { const TrackItem *const *t1 = (const TrackItem *const *) e1; const TrackItem *const *t2 = (const TrackItem *const *) e2; @@ -522,10 +524,10 @@ trackitem_compare_frequencies_desc(const void *e1, const void *e2) } /* - * qsort() comparator for sorting TrackItems on lexemes + * Comparator for sorting TrackItems on lexemes */ static int -trackitem_compare_lexemes(const void *e1, const void *e2) +trackitem_compare_lexemes(const void *e1, const void *e2, void *arg) { const TrackItem *const *t1 = (const TrackItem *const *) e1; const TrackItem *const *t2 = (const TrackItem *const *) e2; diff --git a/src/backend/utils/activity/backend_status.c b/src/backend/utils/activity/backend_status.c index 9a0918bceff..217483c1c61 100644 --- a/src/backend/utils/activity/backend_status.c +++ b/src/backend/utils/activity/backend_status.c @@ -1102,6 +1102,22 @@ pgstat_fetch_stat_local_beentry(int beid) return &localBackendStatusTable[beid - 1]; } +/* -- mdb admin patch -- */ +LocalPgBackendStatus * +pgstat_fetch_stat_local_beentry_by_pid(int pid) +{ + pgstat_read_current_status(); + + for (int i = 1; i <= localNumBackends; ++i) { + if (localBackendStatusTable[i - 1].backendStatus.st_procpid == pid) { + return &localBackendStatusTable[i - 1]; + } + } + + return NULL; +} + +/* -- mdb admin patch end -- */ /* ---------- * pgstat_fetch_stat_numbackends() - diff --git a/src/backend/utils/adt/Makefile b/src/backend/utils/adt/Makefile index bd5479c546b..58dd15a6f8b 100644 --- a/src/backend/utils/adt/Makefile +++ b/src/backend/utils/adt/Makefile @@ -117,7 +117,8 @@ OBJS = \ windowfuncs.o \ xid.o \ xid8funcs.o \ - xml.o + xml.o \ + mdb.o jsonpath_scan.c: FLEXFLAGS = -CF -p -p jsonpath_scan.c: FLEX_NO_BACKUP=yes diff --git a/src/backend/utils/adt/acl.c b/src/backend/utils/adt/acl.c index 714a536e93d..e3463f636ae 100644 --- a/src/backend/utils/adt/acl.c +++ b/src/backend/utils/adt/acl.c @@ -116,6 +116,7 @@ static AclResult pg_role_aclcheck(Oid role_oid, Oid roleid, AclMode mode); static void RoleMembershipCacheCallback(Datum arg, int cacheid, uint32 hashvalue); +static bool has_privs_of_unwanted_system_role(Oid role); /* * getid @@ -4991,9 +4992,65 @@ roles_is_member_of(Oid roleid, enum RoleRecurseType type, * set; for such roles, membership implies the ability to do SET ROLE, but * the privileges are not available until you've done so. */ + +/* +* This is basically original postgresql privs-check function +*/ + +// -- mdb_superuser patch + +bool +has_privs_of_role_strict(Oid member, Oid role) +{ + /* Fast path for simple case */ + if (member == role) + return true; + + /* Superusers have every privilege, so are part of every role */ + if (superuser_arg(member)) + return true; + + /* + * Find all the roles that member has the privileges of, including + * multi-level recursion, then see if target role is any one of them. + */ + return list_member_oid(roles_is_member_of(member, ROLERECURSE_PRIVS, + InvalidOid, NULL), + role); +} + +/* +* Check that role is either one of "dangerous" system role +* or has "strict" (not through mdb_admin or mdb_superuser) +* privs of this role +*/ + +static bool +has_privs_of_unwanted_system_role(Oid role) { + if (has_privs_of_role_strict(role, ROLE_PG_READ_SERVER_FILES)) { + return true; + } + if (has_privs_of_role_strict(role, ROLE_PG_WRITE_SERVER_FILES)) { + return true; + } + if (has_privs_of_role_strict(role, ROLE_PG_EXECUTE_SERVER_PROGRAM)) { + return true; + } + if (has_privs_of_role_strict(role, ROLE_PG_READ_ALL_DATA)) { + return true; + } + if (has_privs_of_role_strict(role, ROLE_PG_WRITE_ALL_DATA)) { + return true; + } + + return false; +} + bool has_privs_of_role(Oid member, Oid role) { + Oid mdb_superuser_roleoid; + /* Fast path for simple case */ if (member == role) return true; @@ -5002,6 +5059,23 @@ has_privs_of_role(Oid member, Oid role) if (superuser_arg(member)) return true; + mdb_superuser_roleoid = get_role_oid("mdb_superuser", true /*if nodoby created mdb_superuser role in this database*/); + + if (is_member_of_role(member, mdb_superuser_roleoid)) { + /* if target role is superuser, disallow */ + if (!superuser_arg(role)) { + /* we want mdb_roles_admin to bypass + * has_priv_of_roles test + * if target role is neither superuser nor + * some dangerous system role + */ + if (!has_privs_of_unwanted_system_role(role)) { + return true; + } + } + } + + /* * Find all the roles that member has the privileges of, including * multi-level recursion, then see if target role is any one of them. @@ -5011,6 +5085,49 @@ has_privs_of_role(Oid member, Oid role) role); } +// -- mdb_superuser patch + +// -- non-upstream patch begin +/* + * Is userId allowed to bypass ownership check + * and tranfer onwership to ownerId role? + */ +bool +mdb_admin_allow_bypass_owner_checks(Oid userId, Oid ownerId) +{ + Oid mdb_admin_roleoid; + /* + * Never allow nobody to grant objects to + * superusers. + * This can result in various CVE. + * For paranoic reasons, check this even before + * membership of mdb_admin role. + */ + if (superuser_arg(ownerId)) { + return false; + } + + mdb_admin_roleoid = get_role_oid("mdb_admin", true /*if nodoby created mdb_admin role in this database*/); + /* Is userId actually member of mdb admin? */ + if (!is_member_of_role(userId, mdb_admin_roleoid)) { + /* if no, disallow. */ + return false; + } + + /* + * Now, we need to check if ownerId + * is some dangerous role to trasfer membership to. + * + * For now, we check that ownerId does not have + * priviledge to execute server program or/and + * read/write server files, or/and pg read/write all data + */ + + /* All checks passed, hope will not be hacked here (again) */ + return !has_privs_of_unwanted_system_role(ownerId); +} + +// -- non-upstream patch end /* * Is member a member of role (directly or indirectly)? @@ -5051,6 +5168,53 @@ check_is_member_of_role(Oid member, Oid role) GetUserNameFromId(role, false)))); } +// -- mdb admin patch +/* + * check_mdb_admin_is_member_of_role + * is_member_of_role with a standard permission-violation error if not in usual case + * Is case `member` in mdb_admin we check that role is neither of superuser, pg_read/write + * server files nor pg_execute_server_program or pg_read/write all data + */ +void +check_mdb_admin_is_member_of_role(Oid member, Oid role) +{ + Oid mdb_admin_roleoid; + /* fast path - if we are superuser, its ok */ + if (superuser_arg(member)) { + return; + } + + mdb_admin_roleoid = get_role_oid("mdb_admin", true /*if nodoby created mdb_admin role in this database*/); + /* Is userId actually member of mdb admin? */ + if (is_member_of_role(member, mdb_admin_roleoid)) { + + /* role is mdb admin */ + if (superuser_arg(role)) { + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + errmsg("cannot transfer ownership to superuser \"%s\"", + GetUserNameFromId(role, false)))); + } + + if (has_privs_of_unwanted_system_role(role)) { + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + errmsg("forbidden to transfer ownership to this system role in Cloud"))); + } + } else { + /* if no, check membership transfer in usual way. */ + + if (!is_member_of_role(member, role)) { + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + errmsg("must be member of role \"%s\"", + GetUserNameFromId(role, false)))); + } + } +} + +// -- mdb admin patch + /* * Is member a member of role, not considering superuserness? * @@ -5175,6 +5339,7 @@ select_best_grantor(Oid roleId, AclMode privileges, List *roles_list; int nrights; ListCell *l; + Oid mdb_superuser_roleoid; /* * The object owner is always treated as having all grant options, so if @@ -5189,6 +5354,16 @@ select_best_grantor(Oid roleId, AclMode privileges, return; } + mdb_superuser_roleoid = get_role_oid("mdb_superuser", true /*if nodoby created mdb_superuser role in this database*/); + + if (is_member_of_role(GetUserId(), mdb_superuser_roleoid) + && has_privs_of_role(GetUserId(), ownerId)) { + *grantorId = mdb_superuser_roleoid; + AclMode mdb_superuser_allowed_privs = needed_goptions; + *grantOptions = mdb_superuser_allowed_privs; + return; + } + /* * Otherwise we have to do a careful search to see if roleId has the * privileges of any suitable role. Note: we can hang onto the result of @@ -5197,7 +5372,6 @@ select_best_grantor(Oid roleId, AclMode privileges, */ roles_list = roles_is_member_of(roleId, ROLERECURSE_PRIVS, InvalidOid, NULL); - /* initialize candidate result as default */ *grantorId = roleId; *grantOptions = ACL_NO_RIGHTS; diff --git a/src/backend/utils/adt/array_typanalyze.c b/src/backend/utils/adt/array_typanalyze.c index c5008a0c169..e873d228592 100644 --- a/src/backend/utils/adt/array_typanalyze.c +++ b/src/backend/utils/adt/array_typanalyze.c @@ -86,9 +86,9 @@ static void prune_element_hashtable(HTAB *elements_tab, int b_current); static uint32 element_hash(const void *key, Size keysize); static int element_match(const void *key1, const void *key2, Size keysize); static int element_compare(const void *key1, const void *key2); -static int trackitem_compare_frequencies_desc(const void *e1, const void *e2); -static int trackitem_compare_element(const void *e1, const void *e2); -static int countitem_compare_count(const void *e1, const void *e2); +static int trackitem_compare_frequencies_desc(const void *e1, const void *e2, void *arg); +static int trackitem_compare_element(const void *e1, const void *e2, void *arg); +static int countitem_compare_count(const void *e1, const void *e2, void *arg); /* @@ -502,8 +502,8 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc, */ if (num_mcelem < track_len) { - qsort(sort_table, track_len, sizeof(TrackItem *), - trackitem_compare_frequencies_desc); + qsort_interruptible(sort_table, track_len, sizeof(TrackItem *), + trackitem_compare_frequencies_desc, NULL); /* reset minfreq to the smallest frequency we're keeping */ minfreq = sort_table[num_mcelem - 1]->frequency; } @@ -522,8 +522,8 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc, * the element type's default comparison function. This permits * fast binary searches in selectivity estimation functions. */ - qsort(sort_table, num_mcelem, sizeof(TrackItem *), - trackitem_compare_element); + qsort_interruptible(sort_table, num_mcelem, sizeof(TrackItem *), + trackitem_compare_element, NULL); /* Must copy the target values into anl_context */ old_context = MemoryContextSwitchTo(stats->anl_context); @@ -599,8 +599,9 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc, { sorted_count_items[j++] = count_item; } - qsort(sorted_count_items, count_items_count, - sizeof(DECountItem *), countitem_compare_count); + qsort_interruptible(sorted_count_items, count_items_count, + sizeof(DECountItem *), + countitem_compare_count, NULL); /* * Prepare to fill stanumbers with the histogram, followed by the @@ -751,10 +752,10 @@ element_compare(const void *key1, const void *key2) } /* - * qsort() comparator for sorting TrackItems by frequencies (descending sort) + * Comparator for sorting TrackItems by frequencies (descending sort) */ static int -trackitem_compare_frequencies_desc(const void *e1, const void *e2) +trackitem_compare_frequencies_desc(const void *e1, const void *e2, void *arg) { const TrackItem *const *t1 = (const TrackItem *const *) e1; const TrackItem *const *t2 = (const TrackItem *const *) e2; @@ -763,10 +764,10 @@ trackitem_compare_frequencies_desc(const void *e1, const void *e2) } /* - * qsort() comparator for sorting TrackItems by element values + * Comparator for sorting TrackItems by element values */ static int -trackitem_compare_element(const void *e1, const void *e2) +trackitem_compare_element(const void *e1, const void *e2, void *arg) { const TrackItem *const *t1 = (const TrackItem *const *) e1; const TrackItem *const *t2 = (const TrackItem *const *) e2; @@ -775,10 +776,10 @@ trackitem_compare_element(const void *e1, const void *e2) } /* - * qsort() comparator for sorting DECountItems by count + * Comparator for sorting DECountItems by count */ static int -countitem_compare_count(const void *e1, const void *e2) +countitem_compare_count(const void *e1, const void *e2, void *arg) { const DECountItem *const *t1 = (const DECountItem *const *) e1; const DECountItem *const *t2 = (const DECountItem *const *) e2; diff --git a/src/backend/utils/adt/mdb.c b/src/backend/utils/adt/mdb.c new file mode 100644 index 00000000000..e5c695de1b6 --- /dev/null +++ b/src/backend/utils/adt/mdb.c @@ -0,0 +1,37 @@ +/*------------------------------------------------------------------------- + * + * mdb.c + * mdb routines + * + * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * src/backend/utils/adt/mdb.c + * + *------------------------------------------------------------------------- + */ + + +#include "postgres.h" +#include "fmgr.h" +#include "utils/fmgrprotos.h" + +/* + * mdb_admin_enabled + * Check that mdb locale patch is enabled + */ +Datum +mdb_locale_enabled(PG_FUNCTION_ARGS) +{ + bool res; + +#if USE_MDBLOCALES + res = true; +#else + res = false; +#endif + + PG_RETURN_BOOL(res); +} diff --git a/src/backend/utils/adt/misc.c b/src/backend/utils/adt/misc.c index eb8877fd7e8..eba029daa74 100644 --- a/src/backend/utils/adt/misc.c +++ b/src/backend/utils/adt/misc.c @@ -15,6 +15,7 @@ #include "postgres.h" #include +#include #include #include #include @@ -312,6 +313,9 @@ pg_tablespace_location(PG_FUNCTION_ARGS) char sourcepath[MAXPGPATH]; char targetpath[MAXPGPATH]; int rllen; +#ifndef WIN32 + struct stat st; +#endif /* * It's useful to apply this function to pg_class.reltablespace, wherein @@ -336,6 +340,31 @@ pg_tablespace_location(PG_FUNCTION_ARGS) */ snprintf(sourcepath, sizeof(sourcepath), "pg_tblspc/%u", tablespaceOid); + /* + * Before reading the link, check if the source path is a link or a + * junction point. Note that a directory is possible for a tablespace + * created with allow_in_place_tablespaces enabled. If a directory is + * found, a relative path to the data directory is returned. + */ +#ifdef WIN32 + if (!pgwin32_is_junction(sourcepath)) + PG_RETURN_TEXT_P(cstring_to_text(sourcepath)); +#else + if (lstat(sourcepath, &st) < 0) + { + ereport(ERROR, + (errcode_for_file_access(), + errmsg("could not stat file \"%s\": %m", + sourcepath))); + } + + if (!S_ISLNK(st.st_mode)) + PG_RETURN_TEXT_P(cstring_to_text(sourcepath)); +#endif + + /* + * In presence of a link or a junction point, return the path pointing to. + */ rllen = readlink(sourcepath, targetpath, sizeof(targetpath)); if (rllen < 0) ereport(ERROR, diff --git a/src/backend/utils/adt/pg_locale.c b/src/backend/utils/adt/pg_locale.c index 11392891538..a9acb875eee 100644 --- a/src/backend/utils/adt/pg_locale.c +++ b/src/backend/utils/adt/pg_locale.c @@ -66,6 +66,7 @@ #include "utils/memutils.h" #include "utils/pg_locale.h" #include "utils/syscache.h" +#include "common/mdb_locale.h" #ifdef USE_ICU #include @@ -147,7 +148,7 @@ pg_perm_setlocale(int category, const char *locale) const char *envvar; #ifndef WIN32 - result = setlocale(category, locale); + result = SETLOCALE(category, locale); #else /* @@ -165,7 +166,7 @@ pg_perm_setlocale(int category, const char *locale) } else #endif - result = setlocale(category, locale); + result = SETLOCALE(category, locale); #endif /* WIN32 */ if (result == NULL) @@ -252,7 +253,7 @@ check_locale(int category, const char *locale, char **canonname) if (canonname) *canonname = NULL; /* in case of failure */ - save = setlocale(category, NULL); + save = SETLOCALE(category, NULL); if (!save) return false; /* won't happen, we hope */ @@ -260,14 +261,14 @@ check_locale(int category, const char *locale, char **canonname) save = pstrdup(save); /* set the locale with setlocale, to see if it accepts it. */ - res = setlocale(category, locale); + res = SETLOCALE(category, locale); /* save canonical name if requested. */ if (res && canonname) *canonname = pstrdup(res); /* restore old value. */ - if (!setlocale(category, save)) + if (!SETLOCALE(category, save)) elog(WARNING, "failed to restore old locale \"%s\"", save); pfree(save); @@ -501,12 +502,12 @@ PGLC_localeconv(void) memset(&worklconv, 0, sizeof(worklconv)); /* Save prevailing values of monetary and numeric locales */ - save_lc_monetary = setlocale(LC_MONETARY, NULL); + save_lc_monetary = SETLOCALE(LC_MONETARY, NULL); if (!save_lc_monetary) elog(ERROR, "setlocale(NULL) failed"); save_lc_monetary = pstrdup(save_lc_monetary); - save_lc_numeric = setlocale(LC_NUMERIC, NULL); + save_lc_numeric = SETLOCALE(LC_NUMERIC, NULL); if (!save_lc_numeric) elog(ERROR, "setlocale(NULL) failed"); save_lc_numeric = pstrdup(save_lc_numeric); @@ -528,7 +529,7 @@ PGLC_localeconv(void) */ /* Save prevailing value of ctype locale */ - save_lc_ctype = setlocale(LC_CTYPE, NULL); + save_lc_ctype = SETLOCALE(LC_CTYPE, NULL); if (!save_lc_ctype) elog(ERROR, "setlocale(NULL) failed"); save_lc_ctype = pstrdup(save_lc_ctype); @@ -536,11 +537,11 @@ PGLC_localeconv(void) /* Here begins the critical section where we must not throw error */ /* use numeric to set the ctype */ - setlocale(LC_CTYPE, locale_numeric); + SETLOCALE(LC_CTYPE, locale_numeric); #endif /* Get formatting information for numeric */ - setlocale(LC_NUMERIC, locale_numeric); + SETLOCALE(LC_NUMERIC, locale_numeric); extlconv = localeconv(); /* Must copy data now in case setlocale() overwrites it */ @@ -550,11 +551,11 @@ PGLC_localeconv(void) #ifdef WIN32 /* use monetary to set the ctype */ - setlocale(LC_CTYPE, locale_monetary); + SETLOCALE(LC_CTYPE, locale_monetary); #endif /* Get formatting information for monetary */ - setlocale(LC_MONETARY, locale_monetary); + SETLOCALE(LC_MONETARY, locale_monetary); extlconv = localeconv(); /* Must copy data now in case setlocale() overwrites it */ @@ -584,12 +585,12 @@ PGLC_localeconv(void) * should fail. */ #ifdef WIN32 - if (!setlocale(LC_CTYPE, save_lc_ctype)) + if (!SETLOCALE(LC_CTYPE, save_lc_ctype)) elog(FATAL, "failed to restore LC_CTYPE to \"%s\"", save_lc_ctype); #endif - if (!setlocale(LC_MONETARY, save_lc_monetary)) + if (!SETLOCALE(LC_MONETARY, save_lc_monetary)) elog(FATAL, "failed to restore LC_MONETARY to \"%s\"", save_lc_monetary); - if (!setlocale(LC_NUMERIC, save_lc_numeric)) + if (!SETLOCALE(LC_NUMERIC, save_lc_numeric)) elog(FATAL, "failed to restore LC_NUMERIC to \"%s\"", save_lc_numeric); /* @@ -773,7 +774,7 @@ cache_locale_time(void) */ /* Save prevailing value of time locale */ - save_lc_time = setlocale(LC_TIME, NULL); + save_lc_time = SETLOCALE(LC_TIME, NULL); if (!save_lc_time) elog(ERROR, "setlocale(NULL) failed"); save_lc_time = pstrdup(save_lc_time); @@ -788,16 +789,16 @@ cache_locale_time(void) */ /* Save prevailing value of ctype locale */ - save_lc_ctype = setlocale(LC_CTYPE, NULL); + save_lc_ctype = SETLOCALE(LC_CTYPE, NULL); if (!save_lc_ctype) elog(ERROR, "setlocale(NULL) failed"); save_lc_ctype = pstrdup(save_lc_ctype); /* use lc_time to set the ctype */ - setlocale(LC_CTYPE, locale_time); + SETLOCALE(LC_CTYPE, locale_time); #endif - setlocale(LC_TIME, locale_time); + SETLOCALE(LC_TIME, locale_time); /* We use times close to current time as data for strftime(). */ timenow = time(NULL); @@ -846,10 +847,10 @@ cache_locale_time(void) * failure to do so is fatal. */ #ifdef WIN32 - if (!setlocale(LC_CTYPE, save_lc_ctype)) + if (!SETLOCALE(LC_CTYPE, save_lc_ctype)) elog(FATAL, "failed to restore LC_CTYPE to \"%s\"", save_lc_ctype); #endif - if (!setlocale(LC_TIME, save_lc_time)) + if (!SETLOCALE(LC_TIME, save_lc_time)) elog(FATAL, "failed to restore LC_TIME to \"%s\"", save_lc_time); /* @@ -1225,7 +1226,7 @@ check_strxfrm_bug(void) ereport(ERROR, (errcode(ERRCODE_SYSTEM_ERROR), errmsg_internal("strxfrm(), in locale \"%s\", writes past the specified array length", - setlocale(LC_COLLATE, NULL)), + SETLOCALE(LC_COLLATE, NULL)), errhint("Apply system library package updates."))); } @@ -1339,7 +1340,7 @@ lc_collate_is_c(Oid collation) if (result >= 0) return (bool) result; - localeptr = setlocale(LC_COLLATE, NULL); + localeptr = SETLOCALE(LC_COLLATE, NULL); if (!localeptr) elog(ERROR, "invalid LC_COLLATE setting"); @@ -1389,7 +1390,7 @@ lc_ctype_is_c(Oid collation) if (result >= 0) return (bool) result; - localeptr = setlocale(LC_CTYPE, NULL); + localeptr = SETLOCALE(LC_CTYPE, NULL); if (!localeptr) elog(ERROR, "invalid LC_CTYPE setting"); @@ -1518,8 +1519,10 @@ pg_newlocale_from_collation(Oid collid) /* Normal case where they're the same */ errno = 0; #ifndef WIN32 - loc = newlocale(LC_COLLATE_MASK | LC_CTYPE_MASK, collcollate, + + loc = NEWLOCALE(LC_COLLATE_MASK | LC_CTYPE_MASK, collcollate, NULL); + #else loc = _create_locale(LC_ALL, collcollate); #endif @@ -1533,11 +1536,11 @@ pg_newlocale_from_collation(Oid collid) locale_t loc1; errno = 0; - loc1 = newlocale(LC_COLLATE_MASK, collcollate, NULL); + loc1 = NEWLOCALE(LC_COLLATE_MASK, collcollate, NULL); if (!loc1) report_newlocale_failure(collcollate); errno = 0; - loc = newlocale(LC_CTYPE_MASK, collctype, loc1); + loc = NEWLOCALE(LC_CTYPE_MASK, collctype, loc1); if (!loc) report_newlocale_failure(collctype); #else @@ -1680,12 +1683,16 @@ get_collation_actual_version(char collprovider, const char *collcollate) { #if defined(__GLIBC__) /* Use the glibc version because we don't have anything better. */ +#ifdef USE_MDBLOCALES + collversion = pstrdup(mdb_localesversion()); +#else collversion = pstrdup(gnu_get_libc_version()); +#endif #elif defined(LC_VERSION_MASK) locale_t loc; /* Look up FreeBSD collation version. */ - loc = newlocale(LC_COLLATE, collcollate, NULL); + loc = NEWLOCALE(LC_COLLATE, collcollate, NULL); if (loc) { collversion = diff --git a/src/backend/utils/adt/rangetypes_typanalyze.c b/src/backend/utils/adt/rangetypes_typanalyze.c index 0d01252cd7c..9d5cf897c45 100644 --- a/src/backend/utils/adt/rangetypes_typanalyze.c +++ b/src/backend/utils/adt/rangetypes_typanalyze.c @@ -32,7 +32,7 @@ #include "utils/rangetypes.h" #include "utils/multirangetypes.h" -static int float8_qsort_cmp(const void *a1, const void *a2); +static int float8_qsort_cmp(const void *a1, const void *a2, void *arg); static int range_bound_qsort_cmp(const void *a1, const void *a2, void *arg); static void compute_range_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc, int samplerows, @@ -93,7 +93,7 @@ multirange_typanalyze(PG_FUNCTION_ARGS) * Comparison function for sorting float8s, used for range lengths. */ static int -float8_qsort_cmp(const void *a1, const void *a2) +float8_qsort_cmp(const void *a1, const void *a2, void *arg) { const float8 *f1 = (const float8 *) a1; const float8 *f2 = (const float8 *) a2; @@ -280,10 +280,10 @@ compute_range_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc, if (non_empty_cnt >= 2) { /* Sort bound values */ - qsort_arg(lowers, non_empty_cnt, sizeof(RangeBound), - range_bound_qsort_cmp, typcache); - qsort_arg(uppers, non_empty_cnt, sizeof(RangeBound), - range_bound_qsort_cmp, typcache); + qsort_interruptible(lowers, non_empty_cnt, sizeof(RangeBound), + range_bound_qsort_cmp, typcache); + qsort_interruptible(uppers, non_empty_cnt, sizeof(RangeBound), + range_bound_qsort_cmp, typcache); num_hist = non_empty_cnt; if (num_hist > num_bins) @@ -345,7 +345,8 @@ compute_range_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc, * Ascending sort of range lengths for further filling of * histogram */ - qsort(lengths, non_empty_cnt, sizeof(float8), float8_qsort_cmp); + qsort_interruptible(lengths, non_empty_cnt, sizeof(float8), + float8_qsort_cmp, NULL); num_hist = non_empty_cnt; if (num_hist > num_bins) diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c index cdbd27d4d95..ea8156bebad 100644 --- a/src/backend/utils/adt/ruleutils.c +++ b/src/backend/utils/adt/ruleutils.c @@ -58,6 +58,7 @@ #include "parser/parse_node.h" #include "parser/parse_oper.h" #include "parser/parse_cte.h" +#include "parser/parse_relation.h" #include "parser/parser.h" #include "parser/parsetree.h" #include "rewrite/rewriteHandler.h" @@ -4241,9 +4242,9 @@ set_relation_column_names(deparse_namespace *dpns, RangeTblEntry *rte, int j; /* - * Extract the RTE's "real" column names. This is comparable to - * get_rte_attribute_name, except that it's important to disregard dropped - * columns. We put NULL into the array for a dropped column. + * Construct an array of the current "real" column names of the RTE. + * real_colnames[] will be indexed by physical column number, with NULL + * entries for dropped columns. */ if (rte->rtekind == RTE_RELATION) { @@ -4270,19 +4271,43 @@ set_relation_column_names(deparse_namespace *dpns, RangeTblEntry *rte, } else { - /* Otherwise use the column names from eref */ + /* Otherwise get the column names from eref or expandRTE() */ + List *colnames; ListCell *lc; - ncolumns = list_length(rte->eref->colnames); + /* + * Functions returning composites have the annoying property that some + * of the composite type's columns might have been dropped since the + * query was parsed. If possible, use expandRTE() to handle that + * case, since it has the tedious logic needed to find out about + * dropped columns. However, if we're explaining a plan, then we + * don't have rte->functions because the planner thinks that won't be + * needed later, and that breaks expandRTE(). So in that case we have + * to rely on rte->eref, which may lead us to report a dropped + * column's old name; that seems close enough for EXPLAIN's purposes. + * + * For non-RELATION, non-FUNCTION RTEs, we can just look at rte->eref, + * which should be sufficiently up-to-date: no other RTE types can + * have columns get dropped from under them after parsing. + */ + if (rte->rtekind == RTE_FUNCTION && rte->functions != NIL) + { + /* Since we're not creating Vars, rtindex etc. don't matter */ + expandRTE(rte, 1, 0, -1, true /* include dropped */ , + &colnames, NULL); + } + else + colnames = rte->eref->colnames; + + ncolumns = list_length(colnames); real_colnames = (char **) palloc(ncolumns * sizeof(char *)); i = 0; - foreach(lc, rte->eref->colnames) + foreach(lc, colnames) { /* - * If the column name shown in eref is an empty string, then it's - * a column that was dropped at the time of parsing the query, so - * treat it as dropped. + * If the column name we find here is an empty string, then it's a + * dropped column, so change to NULL. */ char *cname = strVal(lfirst(lc)); @@ -7296,9 +7321,16 @@ get_variable(Var *var, int levelsup, bool istoplevel, deparse_context *context) elog(ERROR, "invalid attnum %d for relation \"%s\"", attnum, rte->eref->aliasname); attname = colinfo->colnames[attnum - 1]; - if (attname == NULL) /* dropped column? */ - elog(ERROR, "invalid attnum %d for relation \"%s\"", - attnum, rte->eref->aliasname); + + /* + * If we find a Var referencing a dropped column, it seems better to + * print something (anything) than to fail. In general this should + * not happen, but there are specific cases involving functions + * returning named composite types where we don't sufficiently enforce + * that you can't drop a column that's referenced in some view. + */ + if (attname == NULL) + attname = "?dropped?column?"; } else { diff --git a/src/backend/utils/adt/selfuncs.c b/src/backend/utils/adt/selfuncs.c index 6001982a6d2..10017cb583a 100644 --- a/src/backend/utils/adt/selfuncs.c +++ b/src/backend/utils/adt/selfuncs.c @@ -4336,6 +4336,7 @@ convert_to_scalar(Datum value, Oid valuetypid, Oid collid, double *scaledvalue, case REGOPERATOROID: case REGCLASSOID: case REGTYPEOID: + case REGCOLLATIONOID: case REGCONFIGOID: case REGDICTIONARYOID: case REGROLEOID: @@ -4467,6 +4468,7 @@ convert_numeric_to_scalar(Datum value, Oid typid, bool *failure) case REGOPERATOROID: case REGCLASSOID: case REGTYPEOID: + case REGCOLLATIONOID: case REGCONFIGOID: case REGDICTIONARYOID: case REGROLEOID: diff --git a/src/backend/utils/adt/xid8funcs.c b/src/backend/utils/adt/xid8funcs.c index 78b0b9b6d68..0c9f14a0c83 100644 --- a/src/backend/utils/adt/xid8funcs.c +++ b/src/backend/utils/adt/xid8funcs.c @@ -36,6 +36,7 @@ #include "miscadmin.h" #include "postmaster/postmaster.h" #include "storage/lwlock.h" +#include "storage/procarray.h" #include "utils/builtins.h" #include "utils/memutils.h" #include "utils/snapmgr.h" @@ -810,29 +811,22 @@ pg_xact_status(PG_FUNCTION_ARGS) { Assert(TransactionIdIsValid(xid)); - if (TransactionIdIsCurrentTransactionId(xid)) + /* + * Like when doing visiblity checks on a row, check whether the + * transaction is still in progress before looking into the CLOG. + * Otherwise we would incorrectly return "committed" for a transaction + * that is committing and has already updated the CLOG, but hasn't + * removed its XID from the proc array yet. (See comment on that race + * condition at the top of heapam_visibility.c) + */ + if (TransactionIdIsInProgress(xid)) status = "in progress"; else if (TransactionIdDidCommit(xid)) status = "committed"; - else if (TransactionIdDidAbort(xid)) - status = "aborted"; else { - /* - * The xact is not marked as either committed or aborted in clog. - * - * It could be a transaction that ended without updating clog or - * writing an abort record due to a crash. We can safely assume - * it's aborted if it isn't committed and is older than our - * snapshot xmin. - * - * Otherwise it must be in-progress (or have been at the time we - * checked commit/abort status). - */ - if (TransactionIdPrecedes(xid, GetActiveSnapshot()->xmin)) - status = "aborted"; - else - status = "in progress"; + /* it must have aborted or crashed */ + status = "aborted"; } } else diff --git a/src/backend/utils/cache/catcache.c b/src/backend/utils/cache/catcache.c index 60f643c2d87..5ccb028a1a2 100644 --- a/src/backend/utils/cache/catcache.c +++ b/src/backend/utils/cache/catcache.c @@ -240,6 +240,7 @@ GetCCHashEqFuncs(Oid keytype, CCHashFN *hashfunc, RegProcedure *eqfunc, CCFastEq case REGOPERATOROID: case REGCLASSOID: case REGTYPEOID: + case REGCOLLATIONOID: case REGCONFIGOID: case REGDICTIONARYOID: case REGROLEOID: diff --git a/src/backend/utils/mb/mbutils.c b/src/backend/utils/mb/mbutils.c index 29287088ecf..952d1474870 100644 --- a/src/backend/utils/mb/mbutils.c +++ b/src/backend/utils/mb/mbutils.c @@ -40,6 +40,7 @@ #include "utils/builtins.h" #include "utils/memutils.h" #include "utils/syscache.h" +#include "common/mdb_locale.h" /* * We maintain a simple linked list caching the fmgr lookup info for the @@ -1308,7 +1309,7 @@ pg_bind_textdomain_codeset(const char *domainname) int new_msgenc; #ifndef WIN32 - const char *ctype = setlocale(LC_CTYPE, NULL); + const char *ctype = SETLOCALE(LC_CTYPE, NULL); if (pg_strcasecmp(ctype, "C") == 0 || pg_strcasecmp(ctype, "POSIX") == 0) #endif diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c index 154d6e39737..cb3b1a1cbdd 100644 --- a/src/backend/utils/misc/guc.c +++ b/src/backend/utils/misc/guc.c @@ -51,6 +51,7 @@ #include "catalog/index.h" #include "commands/async.h" #include "commands/prepare.h" +#include "commands/tablespace.h" #include "commands/trigger.h" #include "commands/user.h" #include "commands/vacuum.h" @@ -2045,6 +2046,17 @@ static struct config_bool ConfigureNamesBool[] = NULL, NULL, NULL }, + { + {"allow_in_place_tablespaces", PGC_SUSET, DEVELOPER_OPTIONS, + gettext_noop("Allows tablespaces directly inside pg_tblspc, for testing."), + NULL, + GUC_NOT_IN_SAMPLE + }, + &allow_in_place_tablespaces, + false, + NULL, NULL, NULL + }, + { {"lo_compat_privileges", PGC_SUSET, COMPAT_OPTIONS_PREVIOUS, gettext_noop("Enables backward compatibility mode for privilege checks on large objects."), @@ -4928,7 +4940,7 @@ static struct config_enum ConfigureNamesEnum[] = { {"session_replication_role", PGC_SUSET, CLIENT_CONN_STATEMENT, gettext_noop("Sets the session's behavior for triggers and rewrite rules."), - NULL + NULL, 0, 0, 0, 0, 0, 0, 0, 0, NULL, NULL, NULL, 0, true, }, &SessionReplicationRole, SESSION_REPLICATION_ROLE_ORIGIN, session_replication_role_options, @@ -7625,6 +7637,7 @@ set_config_option(const char *name, const char *value, void *newextra = NULL; bool prohibitValueChange = false; bool makeDefault; + Oid role; if (elevel == 0) { @@ -7782,10 +7795,13 @@ set_config_option(const char *name, const char *value, case PGC_SUSET: if (context == PGC_USERSET || context == PGC_BACKEND) { - ereport(elevel, - (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("permission denied to set parameter \"%s\"", - name))); + role = get_role_oid("mdb_admin", true /*if nodoby created mdb_admin role in this database*/); + if (!(record->mdb_admin_allowed && is_member_of_role(GetUserId(), role))) { + ereport(elevel, + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + errmsg("permission denied to set parameter \"%s\"", + name))); + } return 0; } break; diff --git a/src/backend/utils/sort/Makefile b/src/backend/utils/sort/Makefile index 26f65fcaf7a..2c31fd453d6 100644 --- a/src/backend/utils/sort/Makefile +++ b/src/backend/utils/sort/Makefile @@ -16,6 +16,7 @@ override CPPFLAGS := -I. -I$(srcdir) $(CPPFLAGS) OBJS = \ logtape.o \ + qsort_interruptible.o \ sharedtuplestore.o \ sortsupport.o \ tuplesort.o \ diff --git a/src/backend/utils/sort/qsort_interruptible.c b/src/backend/utils/sort/qsort_interruptible.c new file mode 100644 index 00000000000..66e018b991d --- /dev/null +++ b/src/backend/utils/sort/qsort_interruptible.c @@ -0,0 +1,18 @@ +/* + * qsort_interruptible.c: qsort_arg that includes CHECK_FOR_INTERRUPTS + * + * Portions Copyright (c) 2021-2026, PostgreSQL Global Development Group + */ + +#include "postgres.h" +#include "miscadmin.h" + +#define ST_SORT qsort_interruptible +#define ST_ELEMENT_TYPE_VOID +#define ST_COMPARATOR_TYPE_NAME qsort_arg_comparator +#define ST_COMPARE_RUNTIME_POINTER +#define ST_COMPARE_ARG_TYPE void +#define ST_SCOPE +#define ST_DEFINE +#define ST_CHECK_FOR_INTERRUPTS +#include "lib/sort_template.h" diff --git a/src/bin/initdb/initdb.c b/src/bin/initdb/initdb.c index 53c3a82a45e..f7c34a3e208 100644 --- a/src/bin/initdb/initdb.c +++ b/src/bin/initdb/initdb.c @@ -75,6 +75,7 @@ #include "getopt_long.h" #include "mb/pg_wchar.h" #include "miscadmin.h" +#include "common/mdb_locale.h" #include "catalog/catalog.h" @@ -2274,12 +2275,13 @@ locale_date_order(const char *locale) result = DATEORDER_MDY; /* default */ - save = setlocale(LC_TIME, NULL); + save = SETLOCALE(LC_TIME, NULL); + if (!save) return result; save = pg_strdup(save); - setlocale(LC_TIME, locale); + SETLOCALE(LC_TIME, locale); memset(&testtime, 0, sizeof(testtime)); testtime.tm_mday = 22; @@ -2288,7 +2290,7 @@ locale_date_order(const char *locale) res = my_strftime(buf, sizeof(buf), "%x", &testtime); - setlocale(LC_TIME, save); + SETLOCALE(LC_TIME, save); free(save); if (res == 0) @@ -2332,7 +2334,7 @@ check_locale_name(int category, const char *locale, char **canonname) if (canonname) *canonname = NULL; /* in case of failure */ - save = setlocale(category, NULL); + save = SETLOCALE(category, NULL); if (!save) { pg_log_error("setlocale() failed"); @@ -2347,14 +2349,14 @@ check_locale_name(int category, const char *locale, char **canonname) locale = ""; /* set the locale with setlocale, to see if it accepts it. */ - res = setlocale(category, locale); + res = SETLOCALE(category, locale); /* save canonical name if requested. */ if (res && canonname) *canonname = pg_strdup(res); /* restore old value. */ - if (!setlocale(category, save)) + if (!SETLOCALE(category, save)) { pg_log_error("failed to restore old locale \"%s\"", save); exit(1); diff --git a/src/bin/pg_upgrade/check.c b/src/bin/pg_upgrade/check.c index d0905f3d588..456d13c28a3 100644 --- a/src/bin/pg_upgrade/check.c +++ b/src/bin/pg_upgrade/check.c @@ -16,6 +16,8 @@ #include "mb/pg_wchar.h" #include "pg_upgrade.h" #include "greenplum/pg_upgrade_greenplum.h" +#include "common/mdb_locale.h" + static void check_new_cluster_is_empty(void); static void check_databases_are_compatible(void); @@ -26,6 +28,7 @@ static void check_proper_datallowconn(ClusterInfo *cluster); static void check_for_prepared_transactions(ClusterInfo *cluster); static void check_for_isn_and_int8_passing_mismatch(ClusterInfo *cluster); static void check_for_user_defined_postfix_ops(ClusterInfo *cluster); +static void check_for_incompatible_polymorphics(ClusterInfo *cluster); static void check_for_tables_with_oids(ClusterInfo *cluster); static void check_for_composite_data_type_usage(ClusterInfo *cluster); static void check_for_reg_data_type_usage(ClusterInfo *cluster); @@ -155,6 +158,13 @@ check_and_dump_old_cluster(bool live_check, char **sequence_script_file_name) check_for_removed_data_type_usage(&old_cluster, "12", "tinterval"); } + /* + * PG 14 changed polymorphic functions from anyarray to + * anycompatiblearray. + */ + if (GET_MAJOR_VERSION(old_cluster.major_version) <= 1300) + check_for_incompatible_polymorphics(&old_cluster); + /* * Pre-PG 12 allowed tables to be declared WITH OIDS, which is not * supported anymore. Verify there are none, iff applicable. @@ -1176,6 +1186,135 @@ check_for_user_defined_postfix_ops(ClusterInfo *cluster) check_ok(); } +/* + * check_for_incompatible_polymorphics() + * + * Make sure nothing is using old polymorphic functions with + * anyarray/anyelement rather than the new anycompatible variants. + */ +static void +check_for_incompatible_polymorphics(ClusterInfo *cluster) +{ + PGresult *res; + FILE *script = NULL; + char output_path[MAXPGPATH]; + PQExpBufferData old_polymorphics; + + prep_status("Checking for incompatible polymorphic functions"); + + snprintf(output_path, sizeof(output_path), + "incompatible_polymorphics.txt"); + + /* The set of problematic functions varies a bit in different versions */ + initPQExpBuffer(&old_polymorphics); + + appendPQExpBufferStr(&old_polymorphics, + "'array_append(anyarray,anyelement)'" + ", 'array_cat(anyarray,anyarray)'" + ", 'array_prepend(anyelement,anyarray)'"); + + if (GET_MAJOR_VERSION(cluster->major_version) >= 903) + appendPQExpBufferStr(&old_polymorphics, + ", 'array_remove(anyarray,anyelement)'" + ", 'array_replace(anyarray,anyelement,anyelement)'"); + + if (GET_MAJOR_VERSION(cluster->major_version) >= 905) + appendPQExpBufferStr(&old_polymorphics, + ", 'array_position(anyarray,anyelement)'" + ", 'array_position(anyarray,anyelement,integer)'" + ", 'array_positions(anyarray,anyelement)'" + ", 'width_bucket(anyelement,anyarray)'"); + + for (int dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++) + { + bool db_used = false; + DbInfo *active_db = &cluster->dbarr.dbs[dbnum]; + PGconn *conn = connectToServer(cluster, active_db->db_name); + int ntups; + int i_objkind, + i_objname; + + /* + * The query below hardcodes FirstNormalObjectId as 16384 rather than + * interpolating that C #define into the query because, if that + * #define is ever changed, the cutoff we want to use is the value + * used by pre-version 14 servers, not that of some future version. + */ + res = executeQueryOrDie(conn, + /* Aggregate transition functions */ + "SELECT 'aggregate' AS objkind, p.oid::regprocedure::text AS objname " + "FROM pg_proc AS p " + "JOIN pg_aggregate AS a ON a.aggfnoid=p.oid " + "JOIN pg_proc AS transfn ON transfn.oid=a.aggtransfn " + "WHERE p.oid >= 16384 " + "AND a.aggtransfn = ANY(ARRAY[%s]::regprocedure[]) " + "AND a.aggtranstype = ANY(ARRAY['anyarray', 'anyelement']::regtype[]) " + + /* Aggregate final functions */ + "UNION ALL " + "SELECT 'aggregate' AS objkind, p.oid::regprocedure::text AS objname " + "FROM pg_proc AS p " + "JOIN pg_aggregate AS a ON a.aggfnoid=p.oid " + "JOIN pg_proc AS finalfn ON finalfn.oid=a.aggfinalfn " + "WHERE p.oid >= 16384 " + "AND a.aggfinalfn = ANY(ARRAY[%s]::regprocedure[]) " + "AND a.aggtranstype = ANY(ARRAY['anyarray', 'anyelement']::regtype[]) " + + /* Operators */ + "UNION ALL " + "SELECT 'operator' AS objkind, op.oid::regoperator::text AS objname " + "FROM pg_operator AS op " + "WHERE op.oid >= 16384 " + "AND oprcode = ANY(ARRAY[%s]::regprocedure[]) " + "AND oprleft = ANY(ARRAY['anyarray', 'anyelement']::regtype[]);", + old_polymorphics.data, + old_polymorphics.data, + old_polymorphics.data); + + ntups = PQntuples(res); + + i_objkind = PQfnumber(res, "objkind"); + i_objname = PQfnumber(res, "objname"); + + for (int rowno = 0; rowno < ntups; rowno++) + { + if (script == NULL && + (script = fopen_priv(output_path, "w")) == NULL) + pg_fatal("could not open file \"%s\": %s\n", + output_path, strerror(errno)); + if (!db_used) + { + fprintf(script, "In database: %s\n", active_db->db_name); + db_used = true; + } + + fprintf(script, " %s: %s\n", + PQgetvalue(res, rowno, i_objkind), + PQgetvalue(res, rowno, i_objname)); + } + + PQclear(res); + PQfinish(conn); + } + + if (script) + { + fclose(script); + pg_log(PG_REPORT, "fatal\n"); + pg_fatal("Your installation contains user-defined objects that refer to internal\n" + "polymorphic functions with arguments of type \"anyarray\" or \"anyelement\".\n" + "These user-defined objects must be dropped before upgrading and restored\n" + "afterwards, changing them to refer to the new corresponding functions with\n" + "arguments of type \"anycompatiblearray\" and \"anycompatible\".\n" + "A list of the problematic objects is in the file:\n" + " %s\n\n", output_path); + } + else + check_ok(); + + termPQExpBuffer(&old_polymorphics); +} + /* * Verify that no tables are declared WITH OIDS. */ @@ -1629,7 +1768,8 @@ get_canonical_locale_name(int category, const char *locale) char *res; /* get the current setting, so we can restore it. */ - save = setlocale(category, NULL); + + save = SETLOCALE(category, NULL); if (!save) pg_fatal("failed to get the current locale\n"); @@ -1637,7 +1777,7 @@ get_canonical_locale_name(int category, const char *locale) save = (char *) pg_strdup(save); /* set the locale with setlocale, to see if it accepts it. */ - res = setlocale(category, locale); + res = SETLOCALE(category, locale); if (!res) pg_fatal("failed to get system locale name for \"%s\"\n", locale); @@ -1645,7 +1785,7 @@ get_canonical_locale_name(int category, const char *locale) res = pg_strdup(res); /* restore old value. */ - if (!setlocale(category, save)) + if (!SETLOCALE(category, save)) pg_fatal("failed to restore old locale \"%s\"\n", save); pg_free(save); diff --git a/src/common/exec.c b/src/common/exec.c index 7dd2f8c4942..5159b616a39 100644 --- a/src/common/exec.c +++ b/src/common/exec.c @@ -24,6 +24,8 @@ #include #include #include +#include "common/mdb_locale.h" + /* Inhibit mingw CRT's auto-globbing of command line arguments */ #if defined(WIN32) && !defined(_MSC_VER) @@ -443,7 +445,7 @@ set_pglocale_pgservice(const char *argv0, const char *app) /* don't set LC_ALL in the backend */ if (strcmp(app, PG_TEXTDOMAIN("postgres")) != 0) { - setlocale(LC_ALL, ""); + SETLOCALE(LC_ALL, ""); /* * One could make a case for reproducing here PostmasterMain()'s test diff --git a/src/common/file_utils.c b/src/common/file_utils.c index 40b73bbe1ab..fd2d11375c9 100644 --- a/src/common/file_utils.c +++ b/src/common/file_utils.c @@ -465,5 +465,21 @@ get_dirent_type(const char *path, #endif } +#if defined(WIN32) && !defined(_MSC_VER) + + /* + * If we're on native Windows (not Cygwin, which has its own POSIX + * symlinks), but not using the MSVC compiler, then we're using a + * readdir() emulation provided by the MinGW runtime that has no d_type. + * Since the lstat() fallback code reports junction points as directories, + * we need an extra system call to check if we should report them as + * symlinks instead, following our convention. + */ + if (result == PGFILETYPE_DIR && + !look_through_symlinks && + pgwin32_is_junction(path)) + result = PGFILETYPE_LNK; +#endif + return result; } diff --git a/src/include/access/multixact.h b/src/include/access/multixact.h index 4bbb035eaea..f053a30b009 100644 --- a/src/include/access/multixact.h +++ b/src/include/access/multixact.h @@ -30,8 +30,8 @@ #define MaxMultiXactOffset ((MultiXactOffset) 0xFFFFFFFF) /* Number of SLRU buffers to use for multixact */ -#define NUM_MULTIXACTOFFSET_BUFFERS 8 -#define NUM_MULTIXACTMEMBER_BUFFERS 16 +#define NUM_MULTIXACTOFFSET_BUFFERS 32 +#define NUM_MULTIXACTMEMBER_BUFFERS 64 /* * Possible multixact lock modes ("status"). The first four modes are for diff --git a/src/include/access/subtrans.h b/src/include/access/subtrans.h index 9a54dc0fb3b..73503a26dcc 100644 --- a/src/include/access/subtrans.h +++ b/src/include/access/subtrans.h @@ -12,7 +12,7 @@ #define SUBTRANS_H /* Number of SLRU buffers to use for subtrans */ -#define NUM_SUBTRANS_BUFFERS 32 +#define NUM_SUBTRANS_BUFFERS 64 typedef struct SubTransData { diff --git a/src/include/catalog/pg_proc.dat b/src/include/catalog/pg_proc.dat index a47b1ef1615..1093fa948b8 100644 --- a/src/include/catalog/pg_proc.dat +++ b/src/include/catalog/pg_proc.dat @@ -11758,7 +11758,9 @@ # # GPDB ADDITIONS START HERE # - +{ oid => '16383', descr => 'contains', + proname => 'mdb_locale_enabled', prorettype => 'bool', + proargtypes => '', prosrc => 'mdb_locale_enabled' }, { oid => '7178', descr => 'for use by pg_upgrade', proname => 'binary_upgrade_set_preassigned_oids', provolatile => 'v', proparallel => 'u', prorettype => 'void', proargtypes => '_oid', diff --git a/src/include/commands/tablespace.h b/src/include/commands/tablespace.h index 1f41964cf75..fe13c5d75d7 100644 --- a/src/include/commands/tablespace.h +++ b/src/include/commands/tablespace.h @@ -20,6 +20,8 @@ #include "nodes/parsenodes.h" #include "storage/dbdirnode.h" +extern bool allow_in_place_tablespaces; + /* XLOG stuff */ #define XLOG_TBLSPC_CREATE 0x00 #define XLOG_TBLSPC_DROP 0x10 diff --git a/src/include/common/mdb_locale.h b/src/include/common/mdb_locale.h new file mode 100644 index 00000000000..91d8656c2c2 --- /dev/null +++ b/src/include/common/mdb_locale.h @@ -0,0 +1,41 @@ +/*------------------------------------------------------------------------- + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + * + * mdb_locale.h + * Generic headers for custom MDB-locales patch. + * + * IDENTIFICATION + * src/include/common/mdb_locale.h + * + *------------------------------------------------------------------------- + */ + +#ifndef PG_MDB_LOCALE_H +#define PG_MDB_LOCALE_H + +#ifdef USE_MDBLOCALES +#include +#define SETLOCALE(category, locale) mdb_setlocale(category, locale) +#define NEWLOCALE(category, locale, base) mdb_newlocale(category, locale, base) +#else +#define SETLOCALE(category, locale) setlocale(category, locale) +#define NEWLOCALE(category, locale, base) newlocale(category, locale, base) +#endif + +#endif /* PG_MDB_LOCALE_H */ diff --git a/src/include/pg_config.h.in b/src/include/pg_config.h.in index aaa3ea32e8a..54de6844f58 100644 --- a/src/include/pg_config.h.in +++ b/src/include/pg_config.h.in @@ -392,6 +392,9 @@ /* Define to 1 if you have the `m' library (-lm). */ #undef HAVE_LIBM +/* Define to 1 if you have the `mdblocales' library (-lmdblocales). */ +#undef HAVE_LIBMDBLOCALES + /* Define to 1 if you have the `numa' library (-lnuma). */ #undef HAVE_LIBNUMA @@ -1041,6 +1044,9 @@ /* Define to 1 to build with LZ4 support. (--with-lz4) */ #undef USE_LZ4 +/* Define to 1 to build with MDB locales. (--with-mdblocales) */ +#undef USE_MDBLOCALES + /* Define to 1 to build with Mapreduce capabilities (--enable-mapreduce) */ #undef USE_MAPREDUCE diff --git a/src/include/port.h b/src/include/port.h index cb34aca03eb..c30e558a362 100644 --- a/src/include/port.h +++ b/src/include/port.h @@ -511,6 +511,9 @@ typedef int (*qsort_arg_comparator) (const void *a, const void *b, void *arg); extern void qsort_arg(void *base, size_t nel, size_t elsize, qsort_arg_comparator cmp, void *arg); +extern void qsort_interruptible(void *base, size_t nel, size_t elsize, + qsort_arg_comparator cmp, void *arg); + extern void *bsearch_arg(const void *key, const void *base, size_t nmemb, size_t size, int (*compar) (const void *, const void *, void *), diff --git a/src/include/replication/logicalrelation.h b/src/include/replication/logicalrelation.h index 3c662d3abcf..10f91490b5c 100644 --- a/src/include/replication/logicalrelation.h +++ b/src/include/replication/logicalrelation.h @@ -38,6 +38,7 @@ typedef struct LogicalRepRelMapEntry } LogicalRepRelMapEntry; extern void logicalrep_relmap_update(LogicalRepRelation *remoterel); +extern void logicalrep_partmap_reset_relmap(LogicalRepRelation *remoterel); extern LogicalRepRelMapEntry *logicalrep_rel_open(LogicalRepRelId remoteid, LOCKMODE lockmode); diff --git a/src/include/utils/acl.h b/src/include/utils/acl.h index 223175099bd..49068f04b2f 100644 --- a/src/include/utils/acl.h +++ b/src/include/utils/acl.h @@ -207,9 +207,17 @@ extern AclMode aclmask(const Acl *acl, Oid roleid, Oid ownerId, extern int aclmembers(const Acl *acl, Oid **roleids); extern bool has_privs_of_role(Oid member, Oid role); +extern bool has_privs_of_role_strict(Oid member, Oid role); extern bool is_member_of_role(Oid member, Oid role); extern bool is_member_of_role_nosuper(Oid member, Oid role); extern bool is_admin_of_role(Oid member, Oid role); + +// -- non-upstream patch begin +extern bool mdb_admin_allow_bypass_owner_checks(Oid userId, Oid ownerId); + +extern void check_mdb_admin_is_member_of_role(Oid member, Oid role); +// -- non-upstream patch end + extern void check_is_member_of_role(Oid member, Oid role); extern Oid get_role_oid(const char *rolename, bool missing_ok); extern Oid get_role_oid_or_public(const char *rolename); diff --git a/src/include/utils/backend_status.h b/src/include/utils/backend_status.h index 139b7355d13..139646d4a40 100644 --- a/src/include/utils/backend_status.h +++ b/src/include/utils/backend_status.h @@ -319,6 +319,9 @@ extern uint64 pgstat_get_my_query_id(void); extern int pgstat_fetch_stat_numbackends(void); extern PgBackendStatus *pgstat_fetch_stat_beentry(int beid); extern LocalPgBackendStatus *pgstat_fetch_stat_local_beentry(int beid); +/* -- mdb admin patch -- */ +extern LocalPgBackendStatus *pgstat_fetch_stat_local_beentry_by_pid(int pid); +/* -- mdb admin patch end -- */ extern char *pgstat_clip_activity(const char *raw_activity); diff --git a/src/include/utils/guc_tables.h b/src/include/utils/guc_tables.h index 17d2a166b09..08584e4db54 100644 --- a/src/include/utils/guc_tables.h +++ b/src/include/utils/guc_tables.h @@ -204,6 +204,8 @@ struct config_generic char *sourcefile; /* file current setting is from (NULL if not * set in config file) */ int sourceline; /* line in source file */ + + bool mdb_admin_allowed; /* is mdb admin allowed to change this, makes sence only for superuser/not superuser ctx */ }; /* bit values in status field */ diff --git a/src/include/utils/sync_guc_name.h b/src/include/utils/sync_guc_name.h index 6d09f49155f..3a99016d813 100644 --- a/src/include/utils/sync_guc_name.h +++ b/src/include/utils/sync_guc_name.h @@ -10,6 +10,7 @@ "allow_dml_directory_table", "allow_segment_DML", "allow_system_table_mods", + "allow_in_place_tablespaces", "array_nulls", "backtrace_functions", "bytea_output", diff --git a/src/interfaces/ecpg/ecpglib/connect.c b/src/interfaces/ecpg/ecpglib/connect.c index 056940cb252..f4d2da9173a 100644 --- a/src/interfaces/ecpg/ecpglib/connect.c +++ b/src/interfaces/ecpg/ecpglib/connect.c @@ -9,6 +9,7 @@ #include "ecpglib_extern.h" #include "ecpgtype.h" #include "sqlca.h" +#include "common/mdb_locale.h" #ifdef HAVE_USELOCALE locale_t ecpg_clocale = (locale_t) 0; @@ -517,7 +518,7 @@ ECPGconnect(int lineno, int c, const char *name, const char *user, const char *p #ifdef HAVE_USELOCALE if (!ecpg_clocale) { - ecpg_clocale = newlocale(LC_NUMERIC_MASK, "C", (locale_t) 0); + ecpg_clocale = NEWLOCALE(LC_NUMERIC_MASK, "C", (locale_t) 0); if (!ecpg_clocale) { #ifdef ENABLE_THREAD_SAFETY diff --git a/src/interfaces/ecpg/ecpglib/descriptor.c b/src/interfaces/ecpg/ecpglib/descriptor.c index f1898dec6a6..2238febbbdd 100644 --- a/src/interfaces/ecpg/ecpglib/descriptor.c +++ b/src/interfaces/ecpg/ecpglib/descriptor.c @@ -15,6 +15,8 @@ #include "sql3types.h" #include "sqlca.h" #include "sqlda.h" +#include "common/mdb_locale.h" + static void descriptor_free(struct descriptor *desc); @@ -500,8 +502,8 @@ ECPGget_desc(int lineno, const char *desc_name, int index,...) #ifdef HAVE__CONFIGTHREADLOCALE stmt.oldthreadlocale = _configthreadlocale(_ENABLE_PER_THREAD_LOCALE); #endif - stmt.oldlocale = ecpg_strdup(setlocale(LC_NUMERIC, NULL), lineno); - setlocale(LC_NUMERIC, "C"); + stmt.oldlocale = ecpg_strdup(SETLOCALE(LC_NUMERIC, NULL), lineno); + SETLOCALE(LC_NUMERIC, "C"); #endif /* desperate try to guess something sensible */ @@ -514,7 +516,7 @@ ECPGget_desc(int lineno, const char *desc_name, int index,...) #else if (stmt.oldlocale) { - setlocale(LC_NUMERIC, stmt.oldlocale); + SETLOCALE(LC_NUMERIC, stmt.oldlocale); ecpg_free(stmt.oldlocale); } #ifdef HAVE__CONFIGTHREADLOCALE diff --git a/src/interfaces/ecpg/ecpglib/execute.c b/src/interfaces/ecpg/ecpglib/execute.c index e8e8fb2b2c3..eafdd8e421a 100644 --- a/src/interfaces/ecpg/ecpglib/execute.c +++ b/src/interfaces/ecpg/ecpglib/execute.c @@ -31,6 +31,7 @@ #include "sqlca.h" #include "sqlda-compat.h" #include "sqlda-native.h" +#include "common/mdb_locale.h" /* * This function returns a newly malloced string that has ' and \ @@ -2002,13 +2003,13 @@ ecpg_do_prologue(int lineno, const int compat, const int force_indicator, #ifdef HAVE__CONFIGTHREADLOCALE stmt->oldthreadlocale = _configthreadlocale(_ENABLE_PER_THREAD_LOCALE); #endif - stmt->oldlocale = ecpg_strdup(setlocale(LC_NUMERIC, NULL), lineno); + stmt->oldlocale = ecpg_strdup(SETLOCALE(LC_NUMERIC, NULL), lineno); if (stmt->oldlocale == NULL) { ecpg_do_epilogue(stmt); return false; } - setlocale(LC_NUMERIC, "C"); + SETLOCALE(LC_NUMERIC, "C"); #endif /* @@ -2222,7 +2223,7 @@ ecpg_do_epilogue(struct statement *stmt) uselocale(stmt->oldlocale); #else if (stmt->oldlocale) - setlocale(LC_NUMERIC, stmt->oldlocale); + SETLOCALE(LC_NUMERIC, stmt->oldlocale); #ifdef HAVE__CONFIGTHREADLOCALE /* diff --git a/src/interfaces/libpq/Makefile b/src/interfaces/libpq/Makefile index 43682574b23..ed3df424ae4 100644 --- a/src/interfaces/libpq/Makefile +++ b/src/interfaces/libpq/Makefile @@ -83,7 +83,7 @@ endif # that are built correctly for use in a shlib. SHLIB_LINK_INTERNAL = -lpgcommon_shlib -lpgport_shlib ifneq ($(PORTNAME), win32) -SHLIB_LINK += $(filter -lcrypt -ldes -lcom_err -lcrypto -lk5crypto -lkrb5 -lgssapi_krb5 -lgss -lgssapi -lssl -lsocket -lnsl -lresolv -lintl -lm, $(LIBS)) $(LDAP_LIBS_FE) $(PTHREAD_LIBS) +SHLIB_LINK += $(filter -lcrypt -ldes -lcom_err -lcrypto -lk5crypto -lkrb5 -lgssapi_krb5 -lgss -lgssapi -lssl -lsocket -lnsl -lresolv -lintl -lm -lmdblocales, $(LIBS)) $(LDAP_LIBS_FE) $(PTHREAD_LIBS) else SHLIB_LINK += $(filter -lcrypt -ldes -lcom_err -lcrypto -lk5crypto -lkrb5 -lgssapi32 -lssl -lsocket -lnsl -lresolv -lintl -lm $(PTHREAD_LIBS), $(LIBS)) $(LDAP_LIBS_FE) endif diff --git a/src/interfaces/libpq/fe-exec.c b/src/interfaces/libpq/fe-exec.c index 85696712b38..ea3a78420ca 100644 --- a/src/interfaces/libpq/fe-exec.c +++ b/src/interfaces/libpq/fe-exec.c @@ -1313,7 +1313,8 @@ pqAppendCmdQueueEntry(PGconn *conn, PGcmdQueueEntry *entry) * itself consume commands from the queue; if we're in any other * state, we don't have to do anything. */ - if (conn->asyncStatus == PGASYNC_IDLE) + if (conn->asyncStatus == PGASYNC_IDLE || + conn->asyncStatus == PGASYNC_PIPELINE_IDLE) { resetPQExpBuffer(&conn->errorMessage); pqPipelineProcessQueue(conn); @@ -1372,6 +1373,7 @@ int PQsendQueryInternal(PGconn *conn, const char *query, bool newQuery) { PGcmdQueueEntry *entry = NULL; + PGcmdQueueEntry *entry2 = NULL; if (!PQsendQueryStart(conn, newQuery)) return 0; @@ -1387,6 +1389,12 @@ PQsendQueryInternal(PGconn *conn, const char *query, bool newQuery) entry = pqAllocCmdQueueEntry(conn); if (entry == NULL) return 0; /* error msg already set */ + if (conn->pipelineStatus != PQ_PIPELINE_OFF) + { + entry2 = pqAllocCmdQueueEntry(conn); + if (entry2 == NULL) + goto sendFailed; + } /* Send the query message(s) */ if (conn->pipelineStatus == PQ_PIPELINE_OFF) @@ -1456,10 +1464,25 @@ PQsendQueryInternal(PGconn *conn, const char *query, bool newQuery) /* OK, it's launched! */ pqAppendCmdQueueEntry(conn, entry); + + /* + * When pipeline mode is in use, we need a second entry in the command + * queue to represent Close Portal message. This allows us later to wait + * for the CloseComplete message to be received before getting in IDLE + * state. + */ + if (conn->pipelineStatus != PQ_PIPELINE_OFF) + { + entry2->queryclass = PGQUERY_CLOSE; + entry2->query = NULL; + pqAppendCmdQueueEntry(conn, entry2); + } + return 1; sendFailed: pqRecycleCmdQueueEntry(conn, entry); + pqRecycleCmdQueueEntry(conn, entry2); /* error message should be set up already */ return 0; } @@ -1702,11 +1725,13 @@ PQsendQueryStart(PGconn *conn, bool newQuery) switch (conn->asyncStatus) { case PGASYNC_IDLE: + case PGASYNC_PIPELINE_IDLE: case PGASYNC_READY: case PGASYNC_READY_MORE: case PGASYNC_BUSY: /* ok to queue */ break; + case PGASYNC_COPY_IN: case PGASYNC_COPY_OUT: case PGASYNC_COPY_BOTH: @@ -2082,19 +2107,22 @@ PQgetResult(PGconn *conn) { case PGASYNC_IDLE: res = NULL; /* query is complete */ - if (conn->pipelineStatus != PQ_PIPELINE_OFF) - { - /* - * We're about to return the NULL that terminates the round of - * results from the current query; prepare to send the results - * of the next query when we're called next. Also, since this - * is the start of the results of the next query, clear any - * prior error message. - */ - resetPQExpBuffer(&conn->errorMessage); - pqPipelineProcessQueue(conn); - } break; + case PGASYNC_PIPELINE_IDLE: + Assert(conn->pipelineStatus != PQ_PIPELINE_OFF); + + /* + * We're about to return the NULL that terminates the round of + * results from the current query; prepare to send the results + * of the next query, if any, when we're called next. If there's + * no next element in the command queue, this gets us in IDLE + * state. + */ + resetPQExpBuffer(&conn->errorMessage); + pqPipelineProcessQueue(conn); + res = NULL; /* query is complete */ + break; + case PGASYNC_READY: /* @@ -2115,7 +2143,7 @@ PQgetResult(PGconn *conn) * We're about to send the results of the current query. Set * us idle now, and ... */ - conn->asyncStatus = PGASYNC_IDLE; + conn->asyncStatus = PGASYNC_PIPELINE_IDLE; /* * ... in cases when we're sending a pipeline-sync result, @@ -2159,6 +2187,22 @@ PQgetResult(PGconn *conn) break; } + /* If the next command we expect is CLOSE, read and consume it */ + if (conn->asyncStatus == PGASYNC_PIPELINE_IDLE && + conn->cmd_queue_head && + conn->cmd_queue_head->queryclass == PGQUERY_CLOSE) + { + if (res && res->resultStatus != PGRES_FATAL_ERROR) + { + conn->asyncStatus = PGASYNC_BUSY; + parseInput(conn); + conn->asyncStatus = PGASYNC_PIPELINE_IDLE; + } + else + /* we won't ever see the Close */ + pqCommandQueueAdvance(conn); + } + if (res) { int i; @@ -2967,7 +3011,10 @@ PQexitPipelineMode(PGconn *conn) if (!conn) return 0; - if (conn->pipelineStatus == PQ_PIPELINE_OFF) + if (conn->pipelineStatus == PQ_PIPELINE_OFF && + (conn->asyncStatus == PGASYNC_IDLE || + conn->asyncStatus == PGASYNC_PIPELINE_IDLE) && + conn->cmd_queue_head == NULL) return 1; switch (conn->asyncStatus) @@ -2984,9 +3031,16 @@ PQexitPipelineMode(PGconn *conn) libpq_gettext("cannot exit pipeline mode while busy\n")); return 0; - default: + case PGASYNC_IDLE: + case PGASYNC_PIPELINE_IDLE: /* OK */ break; + + case PGASYNC_COPY_IN: + case PGASYNC_COPY_OUT: + case PGASYNC_COPY_BOTH: + appendPQExpBufferStr(&conn->errorMessage, + libpq_gettext("cannot exit pipeline mode while in COPY\n")); } /* still work to process */ @@ -3023,6 +3077,10 @@ pqCommandQueueAdvance(PGconn *conn) prevquery = conn->cmd_queue_head; conn->cmd_queue_head = conn->cmd_queue_head->next; + /* If the queue is now empty, reset the tail too */ + if (conn->cmd_queue_head == NULL) + conn->cmd_queue_tail = NULL; + /* and make it recyclable */ prevquery->next = NULL; pqRecycleCmdQueueEntry(conn, prevquery); @@ -3045,15 +3103,35 @@ pqPipelineProcessQueue(PGconn *conn) case PGASYNC_BUSY: /* client still has to process current query or results */ return; + case PGASYNC_IDLE: + /* + * If we're in IDLE mode and there's some command in the queue, + * get us into PIPELINE_IDLE mode and process normally. Otherwise + * there's nothing for us to do. + */ + if (conn->cmd_queue_head != NULL) + { + conn->asyncStatus = PGASYNC_PIPELINE_IDLE; + break; + } + return; + + case PGASYNC_PIPELINE_IDLE: + Assert(conn->pipelineStatus != PQ_PIPELINE_OFF); /* next query please */ break; } - /* Nothing to do if not in pipeline mode, or queue is empty */ - if (conn->pipelineStatus == PQ_PIPELINE_OFF || - conn->cmd_queue_head == NULL) + /* + * If there are no further commands to process in the queue, get us in + * "real idle" mode now. + */ + if (conn->cmd_queue_head == NULL) + { + conn->asyncStatus = PGASYNC_IDLE; return; + } /* Initialize async result-accumulation state */ pqClearAsyncResult(conn); @@ -3140,6 +3218,7 @@ PQpipelineSync(PGconn *conn) case PGASYNC_READY_MORE: case PGASYNC_BUSY: case PGASYNC_IDLE: + case PGASYNC_PIPELINE_IDLE: /* OK to send sync */ break; } diff --git a/src/interfaces/libpq/fe-protocol3.c b/src/interfaces/libpq/fe-protocol3.c index 9d74dd0e39d..5311a40a147 100644 --- a/src/interfaces/libpq/fe-protocol3.c +++ b/src/interfaces/libpq/fe-protocol3.c @@ -192,18 +192,6 @@ pqParseInput3(PGconn *conn) if (conn->asyncStatus != PGASYNC_IDLE) return; - /* - * We're also notionally not-IDLE when in pipeline mode the state - * says "idle" (so we have completed receiving the results of one - * query from the server and dispatched them to the application) - * but another query is queued; yield back control to caller so - * that they can initiate processing of the next query in the - * queue. - */ - if (conn->pipelineStatus != PQ_PIPELINE_OFF && - conn->cmd_queue_head != NULL) - return; - /* * Unexpected message in IDLE state; need to recover somehow. * ERROR messages are handled using the notice processor; @@ -330,8 +318,24 @@ pqParseInput3(PGconn *conn) } break; case '2': /* Bind Complete */ + /* Nothing to do for this message type */ + break; case '3': /* Close Complete */ - /* Nothing to do for these message types */ + /* + * If we get CloseComplete when waiting for it, consume + * the queue element and keep going. A result is not + * expected from this message; it is just there so that + * we know to wait for it when PQsendQuery is used in + * pipeline mode, before going in IDLE state. Failing to + * do this makes us receive CloseComplete when IDLE, which + * creates problems. + */ + if (conn->cmd_queue_head && + conn->cmd_queue_head->queryclass == PGQUERY_CLOSE) + { + pqCommandQueueAdvance(conn); + } + break; case 'S': /* parameter status */ if (getParameterStatus(conn)) diff --git a/src/interfaces/libpq/libpq-int.h b/src/interfaces/libpq/libpq-int.h index 70094e5fb70..0cbd611bd98 100644 --- a/src/interfaces/libpq/libpq-int.h +++ b/src/interfaces/libpq/libpq-int.h @@ -268,7 +268,8 @@ typedef enum * query */ PGASYNC_COPY_IN, /* Copy In data transfer in progress */ PGASYNC_COPY_OUT, /* Copy Out data transfer in progress */ - PGASYNC_COPY_BOTH /* Copy In/Out data transfer in progress */ + PGASYNC_COPY_BOTH, /* Copy In/Out data transfer in progress */ + PGASYNC_PIPELINE_IDLE, /* "Idle" between commands in pipeline mode */ } PGAsyncStatusType; /* Target server type (decoded value of target_session_attrs) */ @@ -354,7 +355,8 @@ typedef enum PGQUERY_EXTENDED, /* full Extended protocol (PQexecParams) */ PGQUERY_PREPARE, /* Parse only (PQprepare) */ PGQUERY_DESCRIBE, /* Describe Statement or Portal */ - PGQUERY_SYNC /* Sync (at end of a pipeline) */ + PGQUERY_SYNC, /* Sync (at end of a pipeline) */ + PGQUERY_CLOSE } PGQueryClass; /* diff --git a/src/pl/plperl/plperl.c b/src/pl/plperl/plperl.c index 48591e48429..3aff8e95450 100644 --- a/src/pl/plperl/plperl.c +++ b/src/pl/plperl/plperl.c @@ -38,6 +38,7 @@ #include "utils/rel.h" #include "utils/syscache.h" #include "utils/typcache.h" +#include "common/mdb_locale.h" /* define our text domain for translations */ #undef TEXTDOMAIN @@ -743,15 +744,15 @@ plperl_init_interp(void) *save_numeric, *save_time; - loc = setlocale(LC_COLLATE, NULL); + loc = SETLOCALE(LC_COLLATE, NULL); save_collate = loc ? pstrdup(loc) : NULL; - loc = setlocale(LC_CTYPE, NULL); + loc = SETLOCALE(LC_CTYPE, NULL); save_ctype = loc ? pstrdup(loc) : NULL; - loc = setlocale(LC_MONETARY, NULL); + loc = SETLOCALE(LC_MONETARY, NULL); save_monetary = loc ? pstrdup(loc) : NULL; - loc = setlocale(LC_NUMERIC, NULL); + loc = SETLOCALE(LC_NUMERIC, NULL); save_numeric = loc ? pstrdup(loc) : NULL; - loc = setlocale(LC_TIME, NULL); + loc = SETLOCALE(LC_TIME, NULL); save_time = loc ? pstrdup(loc) : NULL; #define PLPERL_RESTORE_LOCALE(name, saved) \ @@ -4167,7 +4168,7 @@ static char * setlocale_perl(int category, char *locale) { dTHX; - char *RETVAL = setlocale(category, locale); + char *RETVAL = SETLOCALE(category, locale); if (RETVAL) { @@ -4182,7 +4183,7 @@ setlocale_perl(int category, char *locale) #ifdef LC_ALL if (category == LC_ALL) - newctype = setlocale(LC_CTYPE, NULL); + newctype = SETLOCALE(LC_CTYPE, NULL); else #endif newctype = RETVAL; @@ -4200,7 +4201,7 @@ setlocale_perl(int category, char *locale) #ifdef LC_ALL if (category == LC_ALL) - newcoll = setlocale(LC_COLLATE, NULL); + newcoll = SETLOCALE(LC_COLLATE, NULL); else #endif newcoll = RETVAL; @@ -4219,7 +4220,7 @@ setlocale_perl(int category, char *locale) #ifdef LC_ALL if (category == LC_ALL) - newnum = setlocale(LC_NUMERIC, NULL); + newnum = SETLOCALE(LC_NUMERIC, NULL); else #endif newnum = RETVAL; diff --git a/src/port/chklocale.c b/src/port/chklocale.c index 3d47d37eae4..2dae78e74e9 100644 --- a/src/port/chklocale.c +++ b/src/port/chklocale.c @@ -18,6 +18,8 @@ #else #include "postgres_fe.h" #endif +#include "common/mdb_locale.h" + #ifdef HAVE_LANGINFO_H #include @@ -343,7 +345,7 @@ pg_get_encoding_from_locale(const char *ctype, bool write_message) pg_strcasecmp(ctype, "POSIX") == 0) return PG_SQL_ASCII; - save = setlocale(LC_CTYPE, NULL); + save = SETLOCALE(LC_CTYPE, NULL); if (!save) return -1; /* setlocale() broken? */ /* must copy result, or it might change after setlocale */ @@ -351,7 +353,7 @@ pg_get_encoding_from_locale(const char *ctype, bool write_message) if (!save) return -1; /* out of memory; unlikely */ - name = setlocale(LC_CTYPE, ctype); + name = SETLOCALE(LC_CTYPE, ctype); if (!name) { free(save); @@ -366,13 +368,13 @@ pg_get_encoding_from_locale(const char *ctype, bool write_message) sys = win32_langinfo(name); #endif - setlocale(LC_CTYPE, save); + SETLOCALE(LC_CTYPE, save); free(save); } else { /* much easier... */ - ctype = setlocale(LC_CTYPE, NULL); + ctype = SETLOCALE(LC_CTYPE, NULL); if (!ctype) return -1; /* setlocale() broken? */ diff --git a/src/port/dirent.c b/src/port/dirent.c index 77b90e7e302..2cd134495ff 100644 --- a/src/port/dirent.c +++ b/src/port/dirent.c @@ -106,13 +106,17 @@ readdir(DIR *d) } strcpy(d->ret.d_name, fd.cFileName); /* Both strings are MAX_PATH long */ d->ret.d_namlen = strlen(d->ret.d_name); - /* The only identified types are: directory, regular file or symbolic link */ - if ((fd.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) != 0) - d->ret.d_type = DT_DIR; - /* For reparse points dwReserved0 field will contain the ReparseTag */ - else if ((fd.dwFileAttributes & FILE_ATTRIBUTE_REPARSE_POINT) != 0 && - (fd.dwReserved0 == IO_REPARSE_TAG_MOUNT_POINT)) + + /* + * For reparse points dwReserved0 field will contain the ReparseTag. We + * check this first, because reparse points are also reported as + * directories. + */ + if ((fd.dwFileAttributes & FILE_ATTRIBUTE_REPARSE_POINT) != 0 && + (fd.dwReserved0 == IO_REPARSE_TAG_MOUNT_POINT)) d->ret.d_type = DT_LNK; + else if ((fd.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) != 0) + d->ret.d_type = DT_DIR; else d->ret.d_type = DT_REG; diff --git a/src/test/Makefile b/src/test/Makefile index d84edb282df..150c4e97b73 100644 --- a/src/test/Makefile +++ b/src/test/Makefile @@ -18,6 +18,9 @@ SUBDIRS = perl regress isolation modules authentication recovery SUBDIRS += fsync walrep heap_checksum isolation2 fdw singlenode_regress singlenode_isolation2 +# MDB addon +SUBDIRS += mdb_admin + # Test suites that are not safe by default but can be run if selected # by the user via the whitespace-separated list in variable # PG_TEST_EXTRA: diff --git a/src/test/locale/test-ctype.c b/src/test/locale/test-ctype.c index a3f896c5ecb..10c2b49cb92 100644 --- a/src/test/locale/test-ctype.c +++ b/src/test/locale/test-ctype.c @@ -23,6 +23,8 @@ the author shall be liable for any damage, etc. #include #include #include +#include "common/mdb_locale.h" + char *flag(int b); void describe_char(int c); @@ -62,7 +64,7 @@ main() short c; char *cur_locale; - cur_locale = setlocale(LC_ALL, ""); + cur_locale = SETLOCALE(LC_ALL, ""); if (cur_locale) fprintf(stderr, "Successfully set locale to \"%s\"\n", cur_locale); else diff --git a/src/test/mdb_admin/.gitignore b/src/test/mdb_admin/.gitignore new file mode 100644 index 00000000000..871e943d50e --- /dev/null +++ b/src/test/mdb_admin/.gitignore @@ -0,0 +1,2 @@ +# Generated by test suite +/tmp_check/ diff --git a/src/test/mdb_admin/Makefile b/src/test/mdb_admin/Makefile new file mode 100644 index 00000000000..e4e82367da9 --- /dev/null +++ b/src/test/mdb_admin/Makefile @@ -0,0 +1,23 @@ +#------------------------------------------------------------------------- +# +# Makefile for src/test/mdb_admin +# +# Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group +# Portions Copyright (c) 1994, Regents of the University of California +# +# src/test/mdb_admin/Makefile +# +#------------------------------------------------------------------------- + +subdir = src/test/mdb_admin +top_builddir = ../../.. +include $(top_builddir)/src/Makefile.global + +check: + $(prove_check) + +installcheck: + $(prove_installcheck) + +clean distclean maintainer-clean: + rm -rf tmp_check diff --git a/src/test/mdb_admin/t/signals.pl b/src/test/mdb_admin/t/signals.pl new file mode 100644 index 00000000000..a11db27a527 --- /dev/null +++ b/src/test/mdb_admin/t/signals.pl @@ -0,0 +1,74 @@ + +# Copyright (c) 2024-2024, MDB, Mother Russia + +# Minimal test testing streaming replication +use strict; +use warnings; +use PostgreSQL::Test::Cluster; +use PostgreSQL::Test::Utils; +use Test::More; + +# Initialize primary node +my $node_primary = PostgreSQL::Test::Cluster->new('primary'); +$node_primary->init(); +$node_primary->start; + +# Create some content on primary and check its presence in standby nodes +$node_primary->safe_psql('postgres', + " + CREATE DATABASE regress; + CREATE ROLE mdb_admin; + CREATE ROLE mdb_reg_lh_1; + CREATE ROLE mdb_reg_lh_2; + GRANT pg_signal_backend TO mdb_admin; + GRANT pg_signal_backend TO mdb_reg_lh_1; + GRANT mdb_admin TO mdb_reg_lh_2; +"); + +# Create some content on primary and check its presence in standby nodes +$node_primary->safe_psql('regress', + " + CREATE TABLE tab_int(i int); + INSERT INTO tab_int SELECT * FROm generate_series(1, 1000000); + ALTER SYSTEM SET autovacuum_vacuum_cost_limit TO 1; + ALTER SYSTEM SET autovacuum_vacuum_cost_delay TO 100; + ALTER SYSTEM SET autovacuum_naptime TO 1; +"); + +$node_primary->restart; + +sleep 1; + +my $res_pid = $node_primary->safe_psql('regress', + " + SELECT pid FROM pg_stat_activity WHERE backend_type = 'autovacuum worker' and datname = 'regress';; +"); + + +print "pid is $res_pid\n"; + +ok(1); + + +my ($res_reg_lh_1, $stdout_reg_lh_1, $stderr_reg_lh_1) = $node_primary->psql('regress', + " + SET ROLE mdb_reg_lh_1; + SELECT pg_terminate_backend($res_pid); +"); + +# print ($res_reg_lh_1, $stdout_reg_lh_1, $stderr_reg_lh_1, "\n"); + +ok($res_reg_lh_1 != 0, "should fail for non-mdb_admin"); +like($stderr_reg_lh_1, qr/ERROR: must be a superuser to terminate superuser process/, "matches"); + +my ($res_reg_lh_2, $stdout_reg_lh_2, $stderr_reg_lh_2) = $node_primary->psql('regress', + " + SET ROLE mdb_reg_lh_2; + SELECT pg_terminate_backend($res_pid); +"); + +ok($res_reg_lh_2 == 0, "should success for mdb_admin"); + +# print ($res_reg_lh_2, $stdout_reg_lh_2, $stderr_reg_lh_2, "\n"); + +done_testing(); \ No newline at end of file diff --git a/src/test/modules/libpq_pipeline/libpq_pipeline.c b/src/test/modules/libpq_pipeline/libpq_pipeline.c index c27c4e0adaf..dfab924965d 100644 --- a/src/test/modules/libpq_pipeline/libpq_pipeline.c +++ b/src/test/modules/libpq_pipeline/libpq_pipeline.c @@ -581,8 +581,6 @@ test_pipeline_abort(PGconn *conn) if (PQpipelineStatus(conn) != PQ_PIPELINE_OFF) pg_fatal("exiting pipeline mode didn't seem to work"); - fprintf(stderr, "ok\n"); - /*- * Since we fired the pipelines off without a surrounding xact, the results * should be: @@ -614,6 +612,8 @@ test_pipeline_abort(PGconn *conn) } PQclear(res); + + fprintf(stderr, "ok\n"); } /* State machine enum for test_pipelined_insert */ @@ -968,6 +968,207 @@ test_prepared(PGconn *conn) fprintf(stderr, "ok\n"); } +/* Notice processor: print notices, and count how many we got */ +static void +notice_processor(void *arg, const char *message) +{ + int *n_notices = (int *) arg; + + (*n_notices)++; + fprintf(stderr, "NOTICE %d: %s", *n_notices, message); +} + +/* Verify behavior in "idle" state */ +static void +test_pipeline_idle(PGconn *conn) +{ + PGresult *res; + int n_notices = 0; + + fprintf(stderr, "\npipeline idle...\n"); + + PQsetNoticeProcessor(conn, notice_processor, &n_notices); + + /* + * Cause a Close message to be sent to the server, and watch libpq's + * reaction to the resulting CloseComplete. libpq must not get in IDLE + * state until that has been received. + */ + if (PQenterPipelineMode(conn) != 1) + pg_fatal("failed to enter pipeline mode: %s", PQerrorMessage(conn)); + + if (PQsendQuery(conn, "SELECT 1") != 1) + pg_fatal("failed to send query: %s", PQerrorMessage(conn)); + PQsendFlushRequest(conn); + res = PQgetResult(conn); + if (res == NULL) + pg_fatal("PQgetResult returned null when there's a pipeline item: %s", + PQerrorMessage(conn)); + if (PQresultStatus(res) != PGRES_TUPLES_OK) + pg_fatal("Unexpected result code %s from first pipeline item", + PQresStatus(PQresultStatus(res))); + PQclear(res); + + res = PQgetResult(conn); + if (res != NULL) + pg_fatal("expected NULL result"); + + if (PQpipelineSync(conn) != 1) + pg_fatal("pipeline sync failed: %s", PQerrorMessage(conn)); + res = PQgetResult(conn); + if (res == NULL) + pg_fatal("PQgetResult returned null when there's a pipeline item: %s", + PQerrorMessage(conn)); + if (PQresultStatus(res) != PGRES_PIPELINE_SYNC) + pg_fatal("Unexpected result code %s instead of PGRES_PIPELINE_SYNC, error: %s", + PQresStatus(PQresultStatus(res)), PQerrorMessage(conn)); + PQclear(res); + res = NULL; + + if (PQexitPipelineMode(conn) != 1) + pg_fatal("attempt to exit pipeline mode failed when it should've succeeded: %s", + PQerrorMessage(conn)); + + /* + * Must not have got any notices here; note bug as described in + * https://postgr.es/m/CA+mi_8bvD0_CW3sumgwPvWdNzXY32itoG_16tDYRu_1S2gV2iw@mail.gmail.com + */ + if (n_notices > 0) + pg_fatal("got %d notice(s)", n_notices); + fprintf(stderr, "ok - 1\n"); + + /* + * Verify that we can send a query using simple query protocol after one + * in pipeline mode. + */ + if (PQenterPipelineMode(conn) != 1) + pg_fatal("failed to enter pipeline mode: %s", PQerrorMessage(conn)); + if (PQsendQuery(conn, "SELECT 1") != 1) + pg_fatal("failed to send query: %s", PQerrorMessage(conn)); + PQsendFlushRequest(conn); + res = PQgetResult(conn); + if (res == NULL) + pg_fatal("PQgetResult returned null when there's a pipeline item: %s", + PQerrorMessage(conn)); + if (PQresultStatus(res) != PGRES_TUPLES_OK) + pg_fatal("unexpected result code %s from first pipeline item", + PQresStatus(PQresultStatus(res))); + res = PQgetResult(conn); + if (res != NULL) + pg_fatal("got unexpected non-null result"); + /* We can exit pipeline mode now */ + if (PQexitPipelineMode(conn) != 1) + pg_fatal("attempt to exit pipeline mode failed when it should've succeeded: %s", + PQerrorMessage(conn)); + res = PQexec(conn, "SELECT 2"); + if (n_notices > 0) + pg_fatal("got %d notice(s)", n_notices); + if (res == NULL) + pg_fatal("PQexec returned NULL"); + if (PQresultStatus(res) != PGRES_TUPLES_OK) + pg_fatal("unexpected result code %s from non-pipeline query", + PQresStatus(PQresultStatus(res))); + res = PQgetResult(conn); + if (res != NULL) + pg_fatal("did not receive terminating NULL"); + if (n_notices > 0) + pg_fatal("got %d notice(s)", n_notices); + fprintf(stderr, "ok - 2\n"); + + /* + * Case 2: exiting pipeline mode is not OK if a second command is sent. + */ + + if (PQenterPipelineMode(conn) != 1) + pg_fatal("failed to enter pipeline mode: %s", PQerrorMessage(conn)); + if (PQsendQuery(conn, "SELECT 1") != 1) + pg_fatal("failed to send query: %s", PQerrorMessage(conn)); + PQsendFlushRequest(conn); + res = PQgetResult(conn); + if (res == NULL) + pg_fatal("PQgetResult returned null when there's a pipeline item: %s", + PQerrorMessage(conn)); + if (PQresultStatus(res) != PGRES_TUPLES_OK) + pg_fatal("unexpected result code %s from first pipeline item", + PQresStatus(PQresultStatus(res))); + if (PQsendQuery(conn, "SELECT 2") != 1) + pg_fatal("failed to send query: %s", PQerrorMessage(conn)); + PQsendFlushRequest(conn); + /* read terminating null from first query */ + res = PQgetResult(conn); + if (res != NULL) + pg_fatal("did not receive terminating NULL"); + res = PQgetResult(conn); + if (res == NULL) + pg_fatal("PQgetResult returned null when there's a pipeline item: %s", + PQerrorMessage(conn)); + if (PQresultStatus(res) != PGRES_TUPLES_OK) + pg_fatal("unexpected result code %s from first pipeline item", + PQresStatus(PQresultStatus(res))); + res = PQgetResult(conn); + if (res != NULL) + pg_fatal("did not receive terminating NULL"); + if (PQexitPipelineMode(conn) != 1) + pg_fatal("attempt to exit pipeline mode failed when it should've succeeded: %s", + PQerrorMessage(conn)); + + /* Try to exit pipeline mode in pipeline-idle state */ + if (PQenterPipelineMode(conn) != 1) + pg_fatal("failed to enter pipeline mode: %s", PQerrorMessage(conn)); + if (PQsendQuery(conn, "SELECT 1") != 1) + pg_fatal("failed to send query: %s", PQerrorMessage(conn)); + PQsendFlushRequest(conn); + res = PQgetResult(conn); + if (res == NULL) + pg_fatal("PQgetResult returned null when there's a pipeline item: %s", + PQerrorMessage(conn)); + if (PQresultStatus(res) != PGRES_TUPLES_OK) + pg_fatal("unexpected result code %s from first pipeline item", + PQresStatus(PQresultStatus(res))); + PQclear(res); + res = PQgetResult(conn); + if (res != NULL) + pg_fatal("did not receive terminating NULL"); + if (PQsendQuery(conn, "SELECT 2") != 1) + pg_fatal("failed to send query: %s", PQerrorMessage(conn)); + if (PQexitPipelineMode(conn) == 1) + pg_fatal("exiting pipeline succeeded when it shouldn't"); + if (strncmp(PQerrorMessage(conn), "cannot exit pipeline mode", + strlen("cannot exit pipeline mode")) != 0) + pg_fatal("did not get expected error; got: %s", + PQerrorMessage(conn)); + PQsendFlushRequest(conn); + res = PQgetResult(conn); + if (PQresultStatus(res) != PGRES_TUPLES_OK) + pg_fatal("unexpected result code %s from second pipeline item", + PQresStatus(PQresultStatus(res))); + PQclear(res); + res = PQgetResult(conn); + if (res != NULL) + pg_fatal("did not receive terminating NULL"); + if (PQexitPipelineMode(conn) != 1) + pg_fatal("exiting pipeline failed: %s", PQerrorMessage(conn)); + + if (n_notices > 0) + pg_fatal("got %d notice(s)", n_notices); + fprintf(stderr, "ok - 3\n"); + + /* Have a WARNING in the middle of a resultset */ + if (PQenterPipelineMode(conn) != 1) + pg_fatal("entering pipeline mode failed: %s", PQerrorMessage(conn)); + if (PQsendQuery(conn, "SELECT pg_catalog.pg_advisory_unlock(1,1)") != 1) + pg_fatal("failed to send query: %s", PQerrorMessage(conn)); + PQsendFlushRequest(conn); + res = PQgetResult(conn); + if (res == NULL) + pg_fatal("unexpected NULL result received"); + if (PQresultStatus(res) != PGRES_TUPLES_OK) + pg_fatal("unexpected result code %s", PQresStatus(PQresultStatus(res))); + if (PQexitPipelineMode(conn) != 1) + pg_fatal("failed to exit pipeline mode: %s", PQerrorMessage(conn)); + fprintf(stderr, "ok - 4\n"); +} + static void test_simple_pipeline(PGconn *conn) { @@ -1160,6 +1361,8 @@ test_singlerowmode(PGconn *conn) if (PQexitPipelineMode(conn) != 1) pg_fatal("failed to end pipeline mode: %s", PQerrorMessage(conn)); + + fprintf(stderr, "ok\n"); } /* @@ -1549,6 +1752,7 @@ print_test_list(void) printf("multi_pipelines\n"); printf("nosync\n"); printf("pipeline_abort\n"); + printf("pipeline_idle\n"); printf("pipelined_insert\n"); printf("prepared\n"); printf("simple_pipeline\n"); @@ -1630,7 +1834,10 @@ main(int argc, char **argv) /* Set the trace file, if requested */ if (tracefile != NULL) { - trace = fopen(tracefile, "w"); + if (strcmp(tracefile, "-") == 0) + trace = stdout; + else + trace = fopen(tracefile, "w"); if (trace == NULL) pg_fatal("could not open file \"%s\": %m", tracefile); @@ -1650,6 +1857,8 @@ main(int argc, char **argv) test_nosync(conn); else if (strcmp(testname, "pipeline_abort") == 0) test_pipeline_abort(conn); + else if (strcmp(testname, "pipeline_idle") == 0) + test_pipeline_idle(conn); else if (strcmp(testname, "pipelined_insert") == 0) test_pipelined_insert(conn, numrows); else if (strcmp(testname, "prepared") == 0) diff --git a/src/test/modules/libpq_pipeline/t/001_libpq_pipeline.pl b/src/test/modules/libpq_pipeline/t/001_libpq_pipeline.pl index d8d496c995a..b02928cad29 100644 --- a/src/test/modules/libpq_pipeline/t/001_libpq_pipeline.pl +++ b/src/test/modules/libpq_pipeline/t/001_libpq_pipeline.pl @@ -26,7 +26,8 @@ my @extraargs = ('-r', $numrows); my $cmptrace = grep(/^$testname$/, qw(simple_pipeline nosync multi_pipelines prepared singlerow - pipeline_abort transaction disallowed_in_pipeline)) > 0; + pipeline_abort pipeline_idle transaction + disallowed_in_pipeline)) > 0; # For a bunch of tests, generate a libpq trace file too. my $traceout = "$TestLib::tmp_check/traces/$testname.trace"; diff --git a/src/test/modules/libpq_pipeline/traces/pipeline_idle.trace b/src/test/modules/libpq_pipeline/traces/pipeline_idle.trace new file mode 100644 index 00000000000..3957ee4dfe1 --- /dev/null +++ b/src/test/modules/libpq_pipeline/traces/pipeline_idle.trace @@ -0,0 +1,93 @@ +F 16 Parse "" "SELECT 1" 0 +F 12 Bind "" "" 0 0 0 +F 6 Describe P "" +F 9 Execute "" 0 +F 6 Close P "" +F 4 Flush +B 4 ParseComplete +B 4 BindComplete +B 33 RowDescription 1 "?column?" NNNN 0 NNNN 4 -1 0 +B 11 DataRow 1 1 '1' +B 13 CommandComplete "SELECT 1" +B 4 CloseComplete +F 4 Sync +B 5 ReadyForQuery I +F 16 Parse "" "SELECT 1" 0 +F 12 Bind "" "" 0 0 0 +F 6 Describe P "" +F 9 Execute "" 0 +F 6 Close P "" +F 4 Flush +B 4 ParseComplete +B 4 BindComplete +B 33 RowDescription 1 "?column?" NNNN 0 NNNN 4 -1 0 +B 11 DataRow 1 1 '1' +B 13 CommandComplete "SELECT 1" +B 4 CloseComplete +F 13 Query "SELECT 2" +B 33 RowDescription 1 "?column?" NNNN 0 NNNN 4 -1 0 +B 11 DataRow 1 1 '2' +B 13 CommandComplete "SELECT 1" +B 5 ReadyForQuery I +F 16 Parse "" "SELECT 1" 0 +F 12 Bind "" "" 0 0 0 +F 6 Describe P "" +F 9 Execute "" 0 +F 6 Close P "" +F 4 Flush +B 4 ParseComplete +B 4 BindComplete +B 33 RowDescription 1 "?column?" NNNN 0 NNNN 4 -1 0 +B 11 DataRow 1 1 '1' +B 13 CommandComplete "SELECT 1" +B 4 CloseComplete +F 16 Parse "" "SELECT 2" 0 +F 12 Bind "" "" 0 0 0 +F 6 Describe P "" +F 9 Execute "" 0 +F 6 Close P "" +F 4 Flush +B 4 ParseComplete +B 4 BindComplete +B 33 RowDescription 1 "?column?" NNNN 0 NNNN 4 -1 0 +B 11 DataRow 1 1 '2' +B 13 CommandComplete "SELECT 1" +B 4 CloseComplete +F 16 Parse "" "SELECT 1" 0 +F 12 Bind "" "" 0 0 0 +F 6 Describe P "" +F 9 Execute "" 0 +F 6 Close P "" +F 4 Flush +B 4 ParseComplete +B 4 BindComplete +B 33 RowDescription 1 "?column?" NNNN 0 NNNN 4 -1 0 +B 11 DataRow 1 1 '1' +B 13 CommandComplete "SELECT 1" +B 4 CloseComplete +F 16 Parse "" "SELECT 2" 0 +F 12 Bind "" "" 0 0 0 +F 6 Describe P "" +F 9 Execute "" 0 +F 6 Close P "" +F 4 Flush +B 4 ParseComplete +B 4 BindComplete +B 33 RowDescription 1 "?column?" NNNN 0 NNNN 4 -1 0 +B 11 DataRow 1 1 '2' +B 13 CommandComplete "SELECT 1" +B 4 CloseComplete +F 49 Parse "" "SELECT pg_catalog.pg_advisory_unlock(1,1)" 0 +F 12 Bind "" "" 0 0 0 +F 6 Describe P "" +F 9 Execute "" 0 +F 6 Close P "" +F 4 Flush +B 4 ParseComplete +B 4 BindComplete +B 43 RowDescription 1 "pg_advisory_unlock" NNNN 0 NNNN 1 -1 0 +B NN NoticeResponse S "WARNING" V "WARNING" C "01000" M "you don't own a lock of type ExclusiveLock" F "SSSS" L "SSSS" R "SSSS" \x00 +B 11 DataRow 1 1 'f' +B 13 CommandComplete "SELECT 1" +B 4 CloseComplete +F 4 Terminate diff --git a/src/test/perl/PostgreSQL/Test/Cluster.pm b/src/test/perl/PostgreSQL/Test/Cluster.pm index 12339c23de1..14e9138a394 100644 --- a/src/test/perl/PostgreSQL/Test/Cluster.pm +++ b/src/test/perl/PostgreSQL/Test/Cluster.pm @@ -1,15 +1,13 @@ # Copyright (c) 2022, PostgreSQL Global Development Group -# allow use of release 15+ perl namespace in older branches -# just 'use' the older module name. -# See PostgresNode.pm for function implementations - -package PostgreSQL::Test::Cluster; +# Allow use of release 15+ Perl package name in older branches, by giving that +# package the same symbol table as the older package. See PostgresNode::new +# for supporting heuristics. use strict; use warnings; - -use PostgresNode; +BEGIN { *PostgreSQL::Test::Cluster:: = \*PostgresNode::; } +use PostgresNode (); 1; diff --git a/src/test/perl/PostgreSQL/Test/Utils.pm b/src/test/perl/PostgreSQL/Test/Utils.pm index bdbbd6e4706..2d15bbf21d7 100644 --- a/src/test/perl/PostgreSQL/Test/Utils.pm +++ b/src/test/perl/PostgreSQL/Test/Utils.pm @@ -1,48 +1,11 @@ # Copyright (c) 2022, PostgreSQL Global Development Group -# allow use of release 15+ perl namespace in older branches -# just 'use' the older module name. -# We export the same names as the v15 module. -# See TestLib.pm for alias assignment that makes this all work. - -package PostgreSQL::Test::Utils; +# Allow use of release 15+ Perl package name in older branches, by giving that +# package the same symbol table as the older package. use strict; use warnings; - -use Exporter 'import'; - -use TestLib; - -our @EXPORT = qw( - generate_ascii_string - slurp_dir - slurp_file - append_to_file - check_mode_recursive - chmod_recursive - check_pg_config - dir_symlink - system_or_bail - system_log - run_log - run_command - pump_until - - command_ok - command_fails - command_exit_is - program_help_ok - program_version_ok - program_options_handling_ok - command_like - command_like_safe - command_fails_like - command_checks_all - - $windows_os - $is_msys2 - $use_unix_sockets -); +BEGIN { *PostgreSQL::Test::Utils:: = \*TestLib::; } +use TestLib (); 1; diff --git a/src/test/perl/PostgresNode.pm b/src/test/perl/PostgresNode.pm index 9e6d4c653b9..241ed8d49e8 100644 --- a/src/test/perl/PostgresNode.pm +++ b/src/test/perl/PostgresNode.pm @@ -162,6 +162,17 @@ of finding port numbers, registering instances for cleanup, etc. sub new { my ($class, $name, $pghost, $pgport) = @_; + + # Use release 15+ semantics when the arguments look like (node_name, + # %params). We can't use $class to decide, because get_new_node() passes + # a v14- argument list regardless of the class. $class might be an + # out-of-core subclass. $class->isa('PostgresNode') returns true even for + # descendants of PostgreSQL::Test::Cluster, so it doesn't help. + return $class->get_new_node(@_[ 1 .. $#_ ]) + if !$pghost + or !$pgport + or $pghost =~ /^[a-zA-Z0-9_]$/; + my $testname = basename($0); $testname =~ s/\.[^.]+$//; @@ -3068,18 +3079,4 @@ sub corrupt_page_checksum =cut -# support release 15+ perl module namespace - -package PostgreSQL::Test::Cluster; ## no critic (ProhibitMultiplePackages) - -sub new -{ - shift; # remove class param from args - return PostgresNode->get_new_node(@_); -} - -no warnings 'once'; - -*get_free_port = *PostgresNode::get_free_port; - 1; diff --git a/src/test/perl/TestLib.pm b/src/test/perl/TestLib.pm index f3ee20af41c..610050e1c4b 100644 --- a/src/test/perl/TestLib.pm +++ b/src/test/perl/TestLib.pm @@ -979,46 +979,4 @@ sub command_checks_all =cut -# support release 15+ perl module namespace - -package PostgreSQL::Test::Utils; ## no critic (ProhibitMultiplePackages) - -# we don't want to export anything here, but we want to support things called -# via this package name explicitly. - -# use typeglobs to alias these functions and variables - -no warnings qw(once); - -*generate_ascii_string = *TestLib::generate_ascii_string; -*slurp_dir = *TestLib::slurp_dir; -*slurp_file = *TestLib::slurp_file; -*append_to_file = *TestLib::append_to_file; -*check_mode_recursive = *TestLib::check_mode_recursive; -*chmod_recursive = *TestLib::chmod_recursive; -*check_pg_config = *TestLib::check_pg_config; -*dir_symlink = *TestLib::dir_symlink; -*system_or_bail = *TestLib::system_or_bail; -*system_log = *TestLib::system_log; -*run_log = *TestLib::run_log; -*run_command = *TestLib::run_command; -*command_ok = *TestLib::command_ok; -*command_fails = *TestLib::command_fails; -*command_exit_is = *TestLib::command_exit_is; -*program_help_ok = *TestLib::program_help_ok; -*program_version_ok = *TestLib::program_version_ok; -*program_options_handling_ok = *TestLib::program_options_handling_ok; -*command_like = *TestLib::command_like; -*command_like_safe = *TestLib::command_like_safe; -*command_fails_like = *TestLib::command_fails_like; -*command_checks_all = *TestLib::command_checks_all; - -*windows_os = *TestLib::windows_os; -*is_msys2 = *TestLib::is_msys2; -*use_unix_sockets = *TestLib::use_unix_sockets; -*timeout_default = *TestLib::timeout_default; -*tmp_check = *TestLib::tmp_check; -*log_path = *TestLib::log_path; -*test_logfile = *TestLib::test_log_file; - 1; diff --git a/src/test/regress/expected/create_function_3.out b/src/test/regress/expected/create_function_3.out index 8380df1591f..7842a3c1c82 100644 --- a/src/test/regress/expected/create_function_3.out +++ b/src/test/regress/expected/create_function_3.out @@ -166,10 +166,10 @@ SET SESSION AUTHORIZATION regress_unpriv_user; SET search_path TO temp_func_test, public; ALTER FUNCTION functest_E_1(int) NOT LEAKPROOF; ALTER FUNCTION functest_E_2(int) LEAKPROOF; -ERROR: only superuser can define a leakproof function +ERROR: only superuser or mdb_admin can define a leakproof function CREATE FUNCTION functest_E_3(int) RETURNS bool LANGUAGE 'sql' LEAKPROOF AS 'SELECT $1 < 200'; -- fail -ERROR: only superuser can define a leakproof function +ERROR: only superuser or mdb_admin can define a leakproof function RESET SESSION AUTHORIZATION; -- -- CALLED ON NULL INPUT | RETURNS NULL ON NULL INPUT | STRICT diff --git a/src/test/regress/expected/create_function_3_optimizer.out b/src/test/regress/expected/create_function_3_optimizer.out index 3ae669d518a..3256709e1aa 100644 --- a/src/test/regress/expected/create_function_3_optimizer.out +++ b/src/test/regress/expected/create_function_3_optimizer.out @@ -166,10 +166,10 @@ SET SESSION AUTHORIZATION regress_unpriv_user; SET search_path TO temp_func_test, public; ALTER FUNCTION functest_E_1(int) NOT LEAKPROOF; ALTER FUNCTION functest_E_2(int) LEAKPROOF; -ERROR: only superuser can define a leakproof function +ERROR: only superuser or mdb_admin can define a leakproof function CREATE FUNCTION functest_E_3(int) RETURNS bool LANGUAGE 'sql' LEAKPROOF AS 'SELECT $1 < 200'; -- fail -ERROR: only superuser can define a leakproof function +ERROR: only superuser or mdb_admin can define a leakproof function RESET SESSION AUTHORIZATION; -- -- CALLED ON NULL INPUT | RETURNS NULL ON NULL INPUT | STRICT diff --git a/src/test/regress/expected/create_view.out b/src/test/regress/expected/create_view.out index 82332a47c11..fdb0657bb72 100644 --- a/src/test/regress/expected/create_view.out +++ b/src/test/regress/expected/create_view.out @@ -1551,17 +1551,26 @@ select * from tt14v; begin; -- this perhaps should be rejected, but it isn't: alter table tt14t drop column f3; --- f3 is still in the view ... +-- column f3 is still in the view, sort of ... select pg_get_viewdef('tt14v', true); - pg_get_viewdef --------------------------------- - SELECT t.f1, + - t.f3, + - t.f4 + - FROM tt14f() t(f1, f3, f4); + pg_get_viewdef +--------------------------------- + SELECT t.f1, + + t."?dropped?column?" AS f3,+ + t.f4 + + FROM tt14f() t(f1, f4); (1 row) --- but will fail at execution +-- ... and you can even EXPLAIN it ... +explain (verbose, costs off) select * from tt14v; + QUERY PLAN +---------------------------------------- + Function Scan on testviewschm2.tt14f t + Output: t.f1, t.f3, t.f4 + Function Call: tt14f() +(3 rows) + +-- but it will fail at execution select f1, f4 from tt14v; f1 | f4 -----+---- diff --git a/src/test/regress/expected/create_view_optimizer.out b/src/test/regress/expected/create_view_optimizer.out index 2123c0150c1..5719aea410c 100755 --- a/src/test/regress/expected/create_view_optimizer.out +++ b/src/test/regress/expected/create_view_optimizer.out @@ -1550,17 +1550,26 @@ select * from tt14v; begin; -- this perhaps should be rejected, but it isn't: alter table tt14t drop column f3; --- f3 is still in the view ... +-- column f3 is still in the view, sort of ... select pg_get_viewdef('tt14v', true); - pg_get_viewdef --------------------------------- - SELECT t.f1, + - t.f3, + - t.f4 + - FROM tt14f() t(f1, f3, f4); + pg_get_viewdef +--------------------------------- + SELECT t.f1, + + t."?dropped?column?" AS f3,+ + t.f4 + + FROM tt14f() t(f1, f4); (1 row) --- but will fail at execution +-- ... and you can even EXPLAIN it ... +explain (verbose, costs off) select * from tt14v; + QUERY PLAN +---------------------------------------- + Function Scan on testviewschm2.tt14f t + Output: t.f1, t.f3, t.f4 + Function Call: tt14f() +(3 rows) + +-- but it will fail at execution select f1, f4 from tt14v; f1 | f4 -----+---- diff --git a/src/test/regress/expected/cte_prune_optimizer.out b/src/test/regress/expected/cte_prune_optimizer.out index e7f2339a0eb..1b0833673e8 100644 --- a/src/test/regress/expected/cte_prune_optimizer.out +++ b/src/test/regress/expected/cte_prune_optimizer.out @@ -1306,16 +1306,16 @@ LIMIT 10; Output: t4.d -> Redistribute Motion 1:3 (slice4) (cost=0.00..1356696139.21 rows=4 width=24) Output: t4_1.c, t4_1.d, (avg(share0_ref3.b) OVER (?)), (sum(share0_ref2.d) OVER (?)) - -> Limit (cost=0.00..1356696139.20 rows=10 width=24) + -> Limit (cost=0.00..1356696139.21 rows=10 width=24) Output: t4_1.c, t4_1.d, (avg(share0_ref3.b) OVER (?)), (sum(share0_ref2.d) OVER (?)) - -> Gather Motion 3:1 (slice5; segments: 3) (cost=0.00..1356696139.20 rows=10 width=24) + -> Gather Motion 3:1 (slice5; segments: 3) (cost=0.00..1356696139.21 rows=10 width=24) Output: t4_1.c, t4_1.d, (avg(share0_ref3.b) OVER (?)), (sum(share0_ref2.d) OVER (?)) Merge Key: t4_1.c, t4_1.d, (avg(share0_ref3.b) OVER (?)), (sum(share0_ref2.d) OVER (?)) - -> Limit (cost=0.00..1356696139.20 rows=4 width=24) + -> Limit (cost=0.00..1356696139.21 rows=4 width=24) Output: t4_1.c, t4_1.d, (avg(share0_ref3.b) OVER (?)), (sum(share0_ref2.d) OVER (?)) - -> Result (cost=0.00..1356696139.20 rows=134 width=24) + -> Result (cost=0.00..1356696139.21 rows=134 width=24) Output: t4_1.c, t4_1.d, (avg(share0_ref3.b) OVER (?)), (sum(share0_ref2.d) OVER (?)) - -> Sort (cost=0.00..1356696139.20 rows=134 width=24) + -> Sort (cost=0.00..1356696139.21 rows=134 width=24) Output: t4_1.c, t4_1.d, (avg(share0_ref3.b) OVER (?)), (sum(share0_ref2.d) OVER (?)), share0_ref2.b Sort Key: t4_1.c, t4_1.d, (avg(share0_ref3.b) OVER (?)), (sum(share0_ref2.d) OVER (?)) -> WindowAgg (cost=0.00..1356696139.08 rows=134 width=24) diff --git a/src/test/regress/expected/mdb_admin.out b/src/test/regress/expected/mdb_admin.out new file mode 100644 index 00000000000..e4dfc436802 --- /dev/null +++ b/src/test/regress/expected/mdb_admin.out @@ -0,0 +1,100 @@ +CREATE ROLE regress_mdb_admin_user1; +CREATE ROLE regress_mdb_admin_user2; +CREATE ROLE regress_mdb_admin_user3; +CREATE ROLE regress_superuser WITH SUPERUSER; +GRANT mdb_admin TO regress_mdb_admin_user1; +GRANT CREATE ON DATABASE regression TO regress_mdb_admin_user2; +GRANT CREATE ON DATABASE regression TO regress_mdb_admin_user3; +-- mdb admin trasfers ownership to another role +SET ROLE regress_mdb_admin_user2; +CREATE FUNCTION regress_mdb_admin_add(integer, integer) RETURNS integer + AS 'SELECT $1 + $2;' + LANGUAGE SQL + IMMUTABLE + RETURNS NULL ON NULL INPUT; +CREATE SCHEMA regress_mdb_admin_schema; +GRANT CREATE ON SCHEMA regress_mdb_admin_schema TO regress_mdb_admin_user3; +CREATE TABLE regress_mdb_admin_schema.regress_mdb_admin_table(); +CREATE TABLE regress_mdb_admin_table(); +CREATE VIEW regress_mdb_admin_view as SELECT 1; +SET ROLE regress_mdb_admin_user1; +ALTER FUNCTION regress_mdb_admin_add (integer, integer) OWNER TO regress_mdb_admin_user3; +ALTER VIEW regress_mdb_admin_view OWNER TO regress_mdb_admin_user3; +ALTER TABLE regress_mdb_admin_schema.regress_mdb_admin_table OWNER TO regress_mdb_admin_user3; +ALTER TABLE regress_mdb_admin_table OWNER TO regress_mdb_admin_user3; +ALTER SCHEMA regress_mdb_admin_schema OWNER TO regress_mdb_admin_user3; +-- mdb admin fails to transfer ownership to superusers and particular system roles +ALTER FUNCTION regress_mdb_admin_add (integer, integer) OWNER TO regress_superuser; +ERROR: cannot transfer ownership to superuser "regress_superuser" +ALTER VIEW regress_mdb_admin_view OWNER TO regress_superuser; +ERROR: cannot transfer ownership to superuser "regress_superuser" +ALTER TABLE regress_mdb_admin_schema.regress_mdb_admin_table OWNER TO regress_superuser; +ERROR: cannot transfer ownership to superuser "regress_superuser" +ALTER TABLE regress_mdb_admin_table OWNER TO regress_superuser; +ERROR: cannot transfer ownership to superuser "regress_superuser" +ALTER SCHEMA regress_mdb_admin_schema OWNER TO regress_superuser; +ERROR: cannot transfer ownership to superuser "regress_superuser" +ALTER FUNCTION regress_mdb_admin_add (integer, integer) OWNER TO pg_execute_server_program; +ERROR: forbidden to transfer ownership to this system role in Cloud +ALTER VIEW regress_mdb_admin_view OWNER TO pg_execute_server_program; +ERROR: forbidden to transfer ownership to this system role in Cloud +ALTER TABLE regress_mdb_admin_schema.regress_mdb_admin_table OWNER TO pg_execute_server_program; +ERROR: forbidden to transfer ownership to this system role in Cloud +ALTER TABLE regress_mdb_admin_table OWNER TO pg_execute_server_program; +ERROR: forbidden to transfer ownership to this system role in Cloud +ALTER SCHEMA regress_mdb_admin_schema OWNER TO pg_execute_server_program; +ERROR: forbidden to transfer ownership to this system role in Cloud +ALTER FUNCTION regress_mdb_admin_add (integer, integer) OWNER TO pg_write_server_files; +ERROR: forbidden to transfer ownership to this system role in Cloud +ALTER VIEW regress_mdb_admin_view OWNER TO pg_write_server_files; +ERROR: forbidden to transfer ownership to this system role in Cloud +ALTER TABLE regress_mdb_admin_schema.regress_mdb_admin_table OWNER TO pg_write_server_files; +ERROR: forbidden to transfer ownership to this system role in Cloud +ALTER TABLE regress_mdb_admin_table OWNER TO pg_write_server_files; +ERROR: forbidden to transfer ownership to this system role in Cloud +ALTER SCHEMA regress_mdb_admin_schema OWNER TO pg_write_server_files; +ERROR: forbidden to transfer ownership to this system role in Cloud +ALTER FUNCTION regress_mdb_admin_add (integer, integer) OWNER TO pg_read_server_files; +ERROR: forbidden to transfer ownership to this system role in Cloud +ALTER VIEW regress_mdb_admin_view OWNER TO pg_read_server_files; +ERROR: forbidden to transfer ownership to this system role in Cloud +ALTER TABLE regress_mdb_admin_schema.regress_mdb_admin_table OWNER TO pg_read_server_files; +ERROR: forbidden to transfer ownership to this system role in Cloud +ALTER TABLE regress_mdb_admin_table OWNER TO pg_read_server_files; +ERROR: forbidden to transfer ownership to this system role in Cloud +ALTER SCHEMA regress_mdb_admin_schema OWNER TO pg_read_server_files; +ERROR: forbidden to transfer ownership to this system role in Cloud +ALTER FUNCTION regress_mdb_admin_add (integer, integer) OWNER TO pg_write_all_data; +ERROR: forbidden to transfer ownership to this system role in Cloud +ALTER VIEW regress_mdb_admin_view OWNER TO pg_write_all_data; +ERROR: forbidden to transfer ownership to this system role in Cloud +ALTER TABLE regress_mdb_admin_schema.regress_mdb_admin_table OWNER TO pg_write_all_data; +ERROR: forbidden to transfer ownership to this system role in Cloud +ALTER TABLE regress_mdb_admin_table OWNER TO pg_write_all_data; +ERROR: forbidden to transfer ownership to this system role in Cloud +ALTER SCHEMA regress_mdb_admin_schema OWNER TO pg_write_all_data; +ERROR: forbidden to transfer ownership to this system role in Cloud +ALTER FUNCTION regress_mdb_admin_add (integer, integer) OWNER TO pg_read_all_data; +ERROR: forbidden to transfer ownership to this system role in Cloud +ALTER VIEW regress_mdb_admin_view OWNER TO pg_read_all_data; +ERROR: forbidden to transfer ownership to this system role in Cloud +ALTER TABLE regress_mdb_admin_schema.regress_mdb_admin_table OWNER TO pg_read_all_data; +ERROR: forbidden to transfer ownership to this system role in Cloud +ALTER TABLE regress_mdb_admin_table OWNER TO pg_read_all_data; +ERROR: forbidden to transfer ownership to this system role in Cloud +ALTER SCHEMA regress_mdb_admin_schema OWNER TO pg_read_all_data; +ERROR: forbidden to transfer ownership to this system role in Cloud +-- end tests +RESET SESSION AUTHORIZATION; +-- +REVOKE CREATE ON DATABASE regression FROM regress_mdb_admin_user2; +REVOKE CREATE ON DATABASE regression FROM regress_mdb_admin_user3; +DROP VIEW regress_mdb_admin_view; +DROP FUNCTION regress_mdb_admin_add; +DROP TABLE regress_mdb_admin_schema.regress_mdb_admin_table; +DROP TABLE regress_mdb_admin_table; +DROP SCHEMA regress_mdb_admin_schema; +DROP ROLE regress_mdb_admin_user1; +DROP ROLE regress_mdb_admin_user2; +DROP ROLE regress_mdb_admin_user3; +DROP ROLE regress_superuser; diff --git a/src/test/regress/expected/mdb_superuser.out b/src/test/regress/expected/mdb_superuser.out new file mode 100644 index 00000000000..21bafb1011b --- /dev/null +++ b/src/test/regress/expected/mdb_superuser.out @@ -0,0 +1,115 @@ +CREATE ROLE regress_mdb_superuser_user1; +CREATE ROLE regress_mdb_superuser_user2; +CREATE ROLE regress_mdb_superuser_user3; +GRANT mdb_admin TO mdb_superuser; +CREATE ROLE regress_superuser WITH SUPERUSER; +GRANT mdb_superuser TO regress_mdb_superuser_user1; +GRANT CREATE ON DATABASE regression TO regress_mdb_superuser_user2; +GRANT CREATE ON DATABASE regression TO regress_mdb_superuser_user3; +SET ROLE regress_mdb_superuser_user2; +CREATE FUNCTION regress_mdb_superuser_add(integer, integer) RETURNS integer + AS 'SELECT $1 + $2;' + LANGUAGE SQL + IMMUTABLE + RETURNS NULL ON NULL INPUT; +CREATE SCHEMA regress_mdb_superuser_schema; +CREATE TABLE regress_mdb_superuser_schema.regress_mdb_superuser_table(); +CREATE TABLE regress_mdb_superuser_table(); +CREATE VIEW regress_mdb_superuser_view as SELECT 1; +SET ROLE regress_mdb_superuser_user3; +INSERT INTO regress_mdb_superuser_table SELECT * FROM regress_mdb_superuser_table; +ERROR: permission denied for table regress_mdb_superuser_table +SET ROLE regress_mdb_superuser_user1; +-- mdb_superuser can grant to other role +GRANT USAGE, CREATE ON SCHEMA regress_mdb_superuser_schema TO regress_mdb_superuser_user3; +GRANT ALL PRIVILEGES ON TABLE regress_mdb_superuser_table TO regress_mdb_superuser_user3; +REVOKE ALL PRIVILEGES ON TABLE regress_mdb_superuser_table FROM regress_mdb_superuser_user3; +GRANT INSERT, SELECT ON TABLE regress_mdb_superuser_table TO regress_mdb_superuser_user3; +-- grant works +SET ROLE regress_mdb_superuser_user3; +INSERT INTO regress_mdb_superuser_table SELECT * FROM regress_mdb_superuser_table; +SET ROLE mdb_superuser; +-- mdb_superuser drop object of other role +DROP TABLE regress_mdb_superuser_table; +-- mdb admin fails to transfer ownership to superusers and system roles +RESET SESSION AUTHORIZATION; +CREATE TABLE regress_superuser_table(); +SET ROLE pg_read_server_files; +CREATE TABLE regress_pgrsf_table(); +SET ROLE pg_write_server_files; +CREATE TABLE regress_pgwsf_table(); +SET ROLE pg_execute_server_program; +CREATE TABLE regress_pgxsp_table(); +SET ROLE pg_read_all_data; +CREATE TABLE regress_pgrad_table(); +SET ROLE pg_write_all_data; +CREATE TABLE regress_pgrwd_table(); +SET ROLE mdb_superuser; +-- cannot read all data (fail) +SELECT * FROM pg_authid; +ERROR: permission denied for table pg_authid +-- can not drop superuser objects, because does not has_privs_of pg_database_owner +DROP TABLE regress_superuser_table; +ERROR: must be owner of table regress_superuser_table +DROP TABLE regress_pgrsf_table; +ERROR: must be owner of table regress_pgrsf_table +DROP TABLE regress_pgwsf_table; +ERROR: must be owner of table regress_pgwsf_table +DROP TABLE regress_pgxsp_table; +ERROR: must be owner of table regress_pgxsp_table +DROP TABLE regress_pgrad_table; +ERROR: must be owner of table regress_pgrad_table +DROP TABLE regress_pgrwd_table; +ERROR: must be owner of table regress_pgrwd_table +-- does allowed to creare database, role or extension +-- or grant such priviledge +CREATE DATABASE regress_db_fail; +ERROR: permission denied to create database +CREATE ROLE regress_role_fail; +ERROR: permission denied to create role +ALTER ROLE mdb_superuser WITH CREATEROLE; +ERROR: permission denied +ALTER ROLE mdb_superuser WITH CREATEDB; +ERROR: permission denied +ALTER ROLE regress_mdb_superuser_user2 WITH CREATEROLE; +ERROR: permission denied +ALTER ROLE regress_mdb_superuser_user2 WITH CREATEDB; +ERROR: permission denied +-- mdb_superuser more powerfull than pg_database_owner +RESET SESSION AUTHORIZATION; +CREATE DATABASE regress_check_owner OWNER regress_mdb_superuser_user2; +\c regress_check_owner; +SET ROLE regress_mdb_superuser_user2; +CREATE SCHEMA regtest; +CREATE TABLE regtest.regtest(); +-- this should fail +SET ROLE regress_mdb_superuser_user3; +GRANT ALL ON TABLE regtest.regtest TO regress_mdb_superuser_user3; +ERROR: permission denied for schema regtest +ALTER TABLE regtest.regtest OWNER TO regress_mdb_superuser_user3; +ERROR: permission denied for schema regtest +SET ROLE regress_mdb_superuser_user1; +GRANT ALL ON TABLE regtest.regtest TO regress_mdb_superuser_user1; +ALTER TABLE regtest.regtest OWNER TO regress_mdb_superuser_user1; +\c regression +DROP DATABASE regress_check_owner; +-- end tests +RESET SESSION AUTHORIZATION; +-- +REVOKE CREATE ON DATABASE regression FROM regress_mdb_superuser_user2; +REVOKE CREATE ON DATABASE regression FROM regress_mdb_superuser_user3; +DROP VIEW regress_mdb_superuser_view; +DROP FUNCTION regress_mdb_superuser_add; +DROP TABLE regress_mdb_superuser_schema.regress_mdb_superuser_table; +DROP TABLE regress_mdb_superuser_table; +ERROR: table "regress_mdb_superuser_table" does not exist +DROP SCHEMA regress_mdb_superuser_schema; +DROP ROLE regress_mdb_superuser_user1; +DROP ROLE regress_mdb_superuser_user2; +DROP ROLE regress_mdb_superuser_user3; +DROP TABLE regress_superuser_table; +DROP TABLE regress_pgrsf_table; +DROP TABLE regress_pgwsf_table; +DROP TABLE regress_pgxsp_table; +DROP TABLE regress_pgrad_table; +DROP TABLE regress_pgrwd_table; diff --git a/src/test/regress/expected/motion_socket.out b/src/test/regress/expected/motion_socket.out index 62a70660f59..baca679ecf3 100644 --- a/src/test/regress/expected/motion_socket.out +++ b/src/test/regress/expected/motion_socket.out @@ -48,7 +48,7 @@ for pid in pids_to_check: # We check count of those connections which have not been established. # Use the regex for example: "TCP :\d+ .*" (without '->') lsof_ret = subprocess.run(["lsof", "-i", "-nP", "-a", "-p", str(pid)], - capture_output=True, check=True).stdout + stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True).stdout plpy.info( f'Checking postgres backend {pid}, ' \ f'lsof output:\n{os.linesep.join(map(str, lsof_ret.splitlines()))}') diff --git a/src/test/regress/expected/privileges.out b/src/test/regress/expected/privileges.out index b9dfc3116ef..aa81504d8f0 100644 --- a/src/test/regress/expected/privileges.out +++ b/src/test/regress/expected/privileges.out @@ -1986,6 +1986,24 @@ TABLE information_schema.enabled_roles; INSERT INTO datdba_only DEFAULT VALUES; ERROR: permission denied for table datdba_only +ROLLBACK; +-- pg_signal_backend can't signal superusers +RESET SESSION AUTHORIZATION; +BEGIN; +CREATE OR REPLACE FUNCTION terminate_nothrow(pid int) RETURNS bool + LANGUAGE plpgsql SECURITY DEFINER SET client_min_messages = error AS $$ +BEGIN + RETURN pg_terminate_backend($1); +EXCEPTION WHEN OTHERS THEN + RETURN false; +END$$; +ALTER FUNCTION terminate_nothrow OWNER TO pg_signal_backend; +SELECT backend_type FROM pg_stat_activity +WHERE CASE WHEN COALESCE(usesysid, 10) = 10 THEN terminate_nothrow(pid) END; + backend_type +-------------- +(0 rows) + ROLLBACK; -- test default ACLs \c - diff --git a/src/test/regress/expected/rules.out b/src/test/regress/expected/rules.out index cf2f24a5a67..16a471245a9 100644 --- a/src/test/regress/expected/rules.out +++ b/src/test/regress/expected/rules.out @@ -2439,10 +2439,7 @@ pg_stats_ext| SELECT cn.nspname AS schemaname, array_agg(pg_mcv_list_items.frequency) AS most_common_freqs, array_agg(pg_mcv_list_items.base_frequency) AS most_common_base_freqs FROM pg_mcv_list_items(sd.stxdmcv) pg_mcv_list_items(index, "values", nulls, frequency, base_frequency)) m ON ((sd.stxdmcv IS NOT NULL))) - WHERE ((NOT (EXISTS ( SELECT 1 - FROM (unnest(s.stxkeys) k(k) - JOIN pg_attribute a ON (((a.attrelid = s.stxrelid) AND (a.attnum = k.k)))) - WHERE (NOT has_column_privilege(c.oid, a.attnum, 'select'::text))))) AND ((c.relrowsecurity = false) OR (NOT row_security_active(c.oid)))); + WHERE (pg_has_role(c.relowner, 'USAGE'::text) AND ((c.relrowsecurity = false) OR (NOT row_security_active(c.oid)))); pg_stats_ext_exprs| SELECT cn.nspname AS schemaname, c.relname AS tablename, sn.nspname AS statistics_schemaname, @@ -2514,7 +2511,8 @@ pg_stats_ext_exprs| SELECT cn.nspname AS schemaname, LEFT JOIN pg_namespace cn ON ((cn.oid = c.relnamespace))) LEFT JOIN pg_namespace sn ON ((sn.oid = s.stxnamespace))) JOIN LATERAL ( SELECT unnest(pg_get_statisticsobjdef_expressions(s.oid)) AS expr, - unnest(sd.stxdexpr) AS a) stat ON ((stat.expr IS NOT NULL))); + unnest(sd.stxdexpr) AS a) stat ON ((stat.expr IS NOT NULL))) + WHERE (pg_has_role(c.relowner, 'USAGE'::text) AND ((c.relrowsecurity = false) OR (NOT row_security_active(c.oid)))); pg_tables| SELECT n.nspname AS schemaname, c.relname AS tablename, pg_get_userbyid(c.relowner) AS tableowner, diff --git a/src/test/regress/expected/stats_ext.out b/src/test/regress/expected/stats_ext.out index 3fc90553026..06aff4d5bd0 100644 --- a/src/test/regress/expected/stats_ext.out +++ b/src/test/regress/expected/stats_ext.out @@ -3196,6 +3196,10 @@ GRANT USAGE ON SCHEMA tststats TO regress_stats_user1; SET SESSION AUTHORIZATION regress_stats_user1; SELECT * FROM tststats.priv_test_tbl; -- Permission denied ERROR: permission denied for table priv_test_tbl +-- Check individual columns if we don't have table privilege +SELECT * FROM tststats.priv_test_tbl + WHERE a = 1 and tststats.priv_test_tbl.* > (1, 1) is not null; +ERROR: permission denied for table priv_test_tbl -- Attempt to gain access using a leaky operator CREATE FUNCTION op_leak(int, int) RETURNS bool AS 'BEGIN RAISE NOTICE ''op_leak => %, %'', $1, $2; RETURN $1 < $2; END' @@ -3231,10 +3235,53 @@ SELECT * FROM tststats.priv_test_tbl WHERE a <<< 0 AND b <<< 0; -- Should not le (0 rows) DELETE FROM tststats.priv_test_tbl WHERE a <<< 0 AND b <<< 0; -- Should not leak +-- privilege checks for pg_stats_ext and pg_stats_ext_exprs +RESET SESSION AUTHORIZATION; +CREATE TABLE stats_ext_tbl (id INT PRIMARY KEY GENERATED BY DEFAULT AS IDENTITY, col TEXT); +INSERT INTO stats_ext_tbl (col) VALUES ('secret'), ('secret'), ('very secret'); +CREATE STATISTICS s_col ON id, col FROM stats_ext_tbl; +CREATE STATISTICS s_expr ON mod(id, 2), lower(col) FROM stats_ext_tbl; +ANALYZE stats_ext_tbl; +-- unprivileged role should not have access +SET SESSION AUTHORIZATION regress_stats_user1; +SELECT statistics_name, most_common_vals FROM pg_stats_ext x + WHERE tablename = 'stats_ext_tbl' ORDER BY ROW(x.*); + statistics_name | most_common_vals +-----------------+------------------ +(0 rows) + +SELECT statistics_name, most_common_vals FROM pg_stats_ext_exprs x + WHERE tablename = 'stats_ext_tbl' ORDER BY ROW(x.*); + statistics_name | most_common_vals +-----------------+------------------ +(0 rows) + +-- give unprivileged role ownership of table +RESET SESSION AUTHORIZATION; +ALTER TABLE stats_ext_tbl OWNER TO regress_stats_user1; +-- unprivileged role should now have access +SET SESSION AUTHORIZATION regress_stats_user1; +SELECT statistics_name, most_common_vals FROM pg_stats_ext x + WHERE tablename = 'stats_ext_tbl' ORDER BY ROW(x.*); + statistics_name | most_common_vals +-----------------+------------------------------------------- + s_col | {{1,secret},{2,secret},{3,"very secret"}} + s_expr | {{0,secret},{1,secret},{1,"very secret"}} +(2 rows) + +SELECT statistics_name, most_common_vals FROM pg_stats_ext_exprs x + WHERE tablename = 'stats_ext_tbl' ORDER BY ROW(x.*); + statistics_name | most_common_vals +-----------------+------------------ + s_expr | {secret} + s_expr | {1} +(2 rows) + -- Tidy up DROP OPERATOR <<< (int, int); DROP FUNCTION op_leak(int, int); RESET SESSION AUTHORIZATION; +DROP TABLE stats_ext_tbl; DROP SCHEMA tststats CASCADE; NOTICE: drop cascades to 2 other objects DETAIL: drop cascades to table tststats.priv_test_tbl diff --git a/src/test/regress/expected/stats_ext_optimizer.out b/src/test/regress/expected/stats_ext_optimizer.out index d19caa775d1..dafbf0a28b4 100644 --- a/src/test/regress/expected/stats_ext_optimizer.out +++ b/src/test/regress/expected/stats_ext_optimizer.out @@ -3231,6 +3231,10 @@ GRANT USAGE ON SCHEMA tststats TO regress_stats_user1; SET SESSION AUTHORIZATION regress_stats_user1; SELECT * FROM tststats.priv_test_tbl; -- Permission denied ERROR: permission denied for table priv_test_tbl +-- Check individual columns if we don't have table privilege +SELECT * FROM tststats.priv_test_tbl + WHERE a = 1 and tststats.priv_test_tbl.* > (1, 1) is not null; +ERROR: permission denied for table priv_test_tbl -- Attempt to gain access using a leaky operator CREATE FUNCTION op_leak(int, int) RETURNS bool AS 'BEGIN RAISE NOTICE ''op_leak => %, %'', $1, $2; RETURN $1 < $2; END' @@ -3266,10 +3270,53 @@ SELECT * FROM tststats.priv_test_tbl WHERE a <<< 0 AND b <<< 0; -- Should not le (0 rows) DELETE FROM tststats.priv_test_tbl WHERE a <<< 0 AND b <<< 0; -- Should not leak +-- privilege checks for pg_stats_ext and pg_stats_ext_exprs +RESET SESSION AUTHORIZATION; +CREATE TABLE stats_ext_tbl (id INT PRIMARY KEY GENERATED BY DEFAULT AS IDENTITY, col TEXT); +INSERT INTO stats_ext_tbl (col) VALUES ('secret'), ('secret'), ('very secret'); +CREATE STATISTICS s_col ON id, col FROM stats_ext_tbl; +CREATE STATISTICS s_expr ON mod(id, 2), lower(col) FROM stats_ext_tbl; +ANALYZE stats_ext_tbl; +-- unprivileged role should not have access +SET SESSION AUTHORIZATION regress_stats_user1; +SELECT statistics_name, most_common_vals FROM pg_stats_ext x + WHERE tablename = 'stats_ext_tbl' ORDER BY ROW(x.*); + statistics_name | most_common_vals +-----------------+------------------ +(0 rows) + +SELECT statistics_name, most_common_vals FROM pg_stats_ext_exprs x + WHERE tablename = 'stats_ext_tbl' ORDER BY ROW(x.*); + statistics_name | most_common_vals +-----------------+------------------ +(0 rows) + +-- give unprivileged role ownership of table +RESET SESSION AUTHORIZATION; +ALTER TABLE stats_ext_tbl OWNER TO regress_stats_user1; +-- unprivileged role should now have access +SET SESSION AUTHORIZATION regress_stats_user1; +SELECT statistics_name, most_common_vals FROM pg_stats_ext x + WHERE tablename = 'stats_ext_tbl' ORDER BY ROW(x.*); + statistics_name | most_common_vals +-----------------+------------------------------------------- + s_col | {{1,secret},{2,secret},{3,"very secret"}} + s_expr | {{0,secret},{1,secret},{1,"very secret"}} +(2 rows) + +SELECT statistics_name, most_common_vals FROM pg_stats_ext_exprs x + WHERE tablename = 'stats_ext_tbl' ORDER BY ROW(x.*); + statistics_name | most_common_vals +-----------------+------------------ + s_expr | {secret} + s_expr | {1} +(2 rows) + -- Tidy up DROP OPERATOR <<< (int, int); DROP FUNCTION op_leak(int, int); RESET SESSION AUTHORIZATION; +DROP TABLE stats_ext_tbl; DROP SCHEMA tststats CASCADE; NOTICE: drop cascades to 2 other objects DETAIL: drop cascades to table tststats.priv_test_tbl diff --git a/src/test/regress/expected/test_setup.out b/src/test/regress/expected/test_setup.out new file mode 100644 index 00000000000..c1cb724ef37 --- /dev/null +++ b/src/test/regress/expected/test_setup.out @@ -0,0 +1,5 @@ +-- +-- TEST_SETUP --- prepare environment expected by regression test scripts +-- +CREATE ROLE mdb_admin; +CREATE ROLE mdb_superuser; diff --git a/src/test/regress/init_file b/src/test/regress/init_file index f63ba979c0b..31ec253f592 100644 --- a/src/test/regress/init_file +++ b/src/test/regress/init_file @@ -134,8 +134,14 @@ s/ERROR: FIPS enabled OpenSSL is required for strict FIPS mode .*/ERROR: FIPS # Mask out OpenSSL behavior change in different version m/ERROR: Cannot use "md5": No such hash algorithm/ s/ERROR: Cannot use "md5": No such hash algorithm/ERROR: Cannot use "md5": / -m/ERROR: Cannot use "md5": Some PX error \(not specified\)/ -s/ERROR: Cannot use "md5": Some PX error \(not specified\)/ERROR: Cannot use "md5": / +m/ERROR: Cannot use "md5": Some PX error \(not specified\)/ +s/ERROR: Cannot use "md5": Some PX error \(not specified\)/ERROR: Cannot use "md5": / + +# Mask out FIPS error line numbers +m/ERROR: requested functionality not allowed in FIPS mode \(pgcrypto.c:\d+\)/ +s/ERROR: requested functionality not allowed in FIPS mode \(pgcrypto.c:\d+\)/ERROR: requested functionality not allowed in FIPS mode (pgcrypto.c:XXX)/ +m/ERROR: requested functionality not allowed in FIPS mode \(openssl.c:\d+\)/ +s/ERROR: requested functionality not allowed in FIPS mode \(openssl.c:\d+\)/ERROR: requested functionality not allowed in FIPS mode (openssl.c:XXX)/ # Mask out gp_execution_segment() m/One-Time Filter: \(gp_execution_segment\(\) = \d+/ diff --git a/src/test/regress/input/misc.source b/src/test/regress/input/misc.source index 331499a2aba..2abe2c82eb8 100644 --- a/src/test/regress/input/misc.source +++ b/src/test/regress/input/misc.source @@ -264,3 +264,8 @@ SELECT *, (equipment(CAST((h.*) AS hobbies_r))).name FROM hobbies_r h; -- -- rewrite rules -- + + +--- mdb-related + +SELECT mdb_locale_enabled(); diff --git a/src/test/regress/output/misc.source b/src/test/regress/output/misc.source index 18bcc227f0a..f2f7c0dee32 100644 --- a/src/test/regress/output/misc.source +++ b/src/test/regress/output/misc.source @@ -609,3 +609,10 @@ CONTEXT: SQL function "equipment" during startup -- -- rewrite rules -- +--- mdb-related +SELECT mdb_locale_enabled(); + mdb_locale_enabled +-------------------- + f +(1 row) + diff --git a/src/test/regress/parallel_schedule b/src/test/regress/parallel_schedule index e2df0208627..b2ed818f677 100644 --- a/src/test/regress/parallel_schedule +++ b/src/test/regress/parallel_schedule @@ -5,10 +5,18 @@ # this limits the number of connections needed to run the tests. # ---------- +# mdb admin simple checks +test: test_setup + # run tablespace by itself, and first, because it forces a checkpoint; # we'd prefer not to have checkpoints later in the tests because that # interferes with crash-recovery testing. test: tablespace + +test: mdb_admin + +test: mdb_superuser + # ---------- # The first group of parallel tests # ---------- diff --git a/src/test/regress/sql/create_view.sql b/src/test/regress/sql/create_view.sql index d8f44923945..2e7452ac9ea 100644 --- a/src/test/regress/sql/create_view.sql +++ b/src/test/regress/sql/create_view.sql @@ -533,9 +533,11 @@ begin; -- this perhaps should be rejected, but it isn't: alter table tt14t drop column f3; --- f3 is still in the view ... +-- column f3 is still in the view, sort of ... select pg_get_viewdef('tt14v', true); --- but will fail at execution +-- ... and you can even EXPLAIN it ... +explain (verbose, costs off) select * from tt14v; +-- but it will fail at execution select f1, f4 from tt14v; select * from tt14v; diff --git a/src/test/regress/sql/mdb_admin.sql b/src/test/regress/sql/mdb_admin.sql new file mode 100644 index 00000000000..b6b048e5692 --- /dev/null +++ b/src/test/regress/sql/mdb_admin.sql @@ -0,0 +1,87 @@ +CREATE ROLE regress_mdb_admin_user1; +CREATE ROLE regress_mdb_admin_user2; +CREATE ROLE regress_mdb_admin_user3; + +CREATE ROLE regress_superuser WITH SUPERUSER; + +GRANT mdb_admin TO regress_mdb_admin_user1; +GRANT CREATE ON DATABASE regression TO regress_mdb_admin_user2; +GRANT CREATE ON DATABASE regression TO regress_mdb_admin_user3; + +-- mdb admin trasfers ownership to another role + +SET ROLE regress_mdb_admin_user2; +CREATE FUNCTION regress_mdb_admin_add(integer, integer) RETURNS integer + AS 'SELECT $1 + $2;' + LANGUAGE SQL + IMMUTABLE + RETURNS NULL ON NULL INPUT; + +CREATE SCHEMA regress_mdb_admin_schema; +GRANT CREATE ON SCHEMA regress_mdb_admin_schema TO regress_mdb_admin_user3; +CREATE TABLE regress_mdb_admin_schema.regress_mdb_admin_table(); +CREATE TABLE regress_mdb_admin_table(); +CREATE VIEW regress_mdb_admin_view as SELECT 1; +SET ROLE regress_mdb_admin_user1; + +ALTER FUNCTION regress_mdb_admin_add (integer, integer) OWNER TO regress_mdb_admin_user3; +ALTER VIEW regress_mdb_admin_view OWNER TO regress_mdb_admin_user3; +ALTER TABLE regress_mdb_admin_schema.regress_mdb_admin_table OWNER TO regress_mdb_admin_user3; +ALTER TABLE regress_mdb_admin_table OWNER TO regress_mdb_admin_user3; +ALTER SCHEMA regress_mdb_admin_schema OWNER TO regress_mdb_admin_user3; + + +-- mdb admin fails to transfer ownership to superusers and particular system roles + +ALTER FUNCTION regress_mdb_admin_add (integer, integer) OWNER TO regress_superuser; +ALTER VIEW regress_mdb_admin_view OWNER TO regress_superuser; +ALTER TABLE regress_mdb_admin_schema.regress_mdb_admin_table OWNER TO regress_superuser; +ALTER TABLE regress_mdb_admin_table OWNER TO regress_superuser; +ALTER SCHEMA regress_mdb_admin_schema OWNER TO regress_superuser; + +ALTER FUNCTION regress_mdb_admin_add (integer, integer) OWNER TO pg_execute_server_program; +ALTER VIEW regress_mdb_admin_view OWNER TO pg_execute_server_program; +ALTER TABLE regress_mdb_admin_schema.regress_mdb_admin_table OWNER TO pg_execute_server_program; +ALTER TABLE regress_mdb_admin_table OWNER TO pg_execute_server_program; +ALTER SCHEMA regress_mdb_admin_schema OWNER TO pg_execute_server_program; + +ALTER FUNCTION regress_mdb_admin_add (integer, integer) OWNER TO pg_write_server_files; +ALTER VIEW regress_mdb_admin_view OWNER TO pg_write_server_files; +ALTER TABLE regress_mdb_admin_schema.regress_mdb_admin_table OWNER TO pg_write_server_files; +ALTER TABLE regress_mdb_admin_table OWNER TO pg_write_server_files; +ALTER SCHEMA regress_mdb_admin_schema OWNER TO pg_write_server_files; + +ALTER FUNCTION regress_mdb_admin_add (integer, integer) OWNER TO pg_read_server_files; +ALTER VIEW regress_mdb_admin_view OWNER TO pg_read_server_files; +ALTER TABLE regress_mdb_admin_schema.regress_mdb_admin_table OWNER TO pg_read_server_files; +ALTER TABLE regress_mdb_admin_table OWNER TO pg_read_server_files; +ALTER SCHEMA regress_mdb_admin_schema OWNER TO pg_read_server_files; + +ALTER FUNCTION regress_mdb_admin_add (integer, integer) OWNER TO pg_write_all_data; +ALTER VIEW regress_mdb_admin_view OWNER TO pg_write_all_data; +ALTER TABLE regress_mdb_admin_schema.regress_mdb_admin_table OWNER TO pg_write_all_data; +ALTER TABLE regress_mdb_admin_table OWNER TO pg_write_all_data; +ALTER SCHEMA regress_mdb_admin_schema OWNER TO pg_write_all_data; + +ALTER FUNCTION regress_mdb_admin_add (integer, integer) OWNER TO pg_read_all_data; +ALTER VIEW regress_mdb_admin_view OWNER TO pg_read_all_data; +ALTER TABLE regress_mdb_admin_schema.regress_mdb_admin_table OWNER TO pg_read_all_data; +ALTER TABLE regress_mdb_admin_table OWNER TO pg_read_all_data; +ALTER SCHEMA regress_mdb_admin_schema OWNER TO pg_read_all_data; + +-- end tests + +RESET SESSION AUTHORIZATION; +-- +REVOKE CREATE ON DATABASE regression FROM regress_mdb_admin_user2; +REVOKE CREATE ON DATABASE regression FROM regress_mdb_admin_user3; + +DROP VIEW regress_mdb_admin_view; +DROP FUNCTION regress_mdb_admin_add; +DROP TABLE regress_mdb_admin_schema.regress_mdb_admin_table; +DROP TABLE regress_mdb_admin_table; +DROP SCHEMA regress_mdb_admin_schema; +DROP ROLE regress_mdb_admin_user1; +DROP ROLE regress_mdb_admin_user2; +DROP ROLE regress_mdb_admin_user3; +DROP ROLE regress_superuser; diff --git a/src/test/regress/sql/mdb_superuser.sql b/src/test/regress/sql/mdb_superuser.sql new file mode 100644 index 00000000000..f96338f3aec --- /dev/null +++ b/src/test/regress/sql/mdb_superuser.sql @@ -0,0 +1,144 @@ +CREATE ROLE regress_mdb_superuser_user1; +CREATE ROLE regress_mdb_superuser_user2; +CREATE ROLE regress_mdb_superuser_user3; + +GRANT mdb_admin TO mdb_superuser; + +CREATE ROLE regress_superuser WITH SUPERUSER; + +GRANT mdb_superuser TO regress_mdb_superuser_user1; + +GRANT CREATE ON DATABASE regression TO regress_mdb_superuser_user2; +GRANT CREATE ON DATABASE regression TO regress_mdb_superuser_user3; + + +SET ROLE regress_mdb_superuser_user2; + +CREATE FUNCTION regress_mdb_superuser_add(integer, integer) RETURNS integer + AS 'SELECT $1 + $2;' + LANGUAGE SQL + IMMUTABLE + RETURNS NULL ON NULL INPUT; + +CREATE SCHEMA regress_mdb_superuser_schema; +CREATE TABLE regress_mdb_superuser_schema.regress_mdb_superuser_table(); +CREATE TABLE regress_mdb_superuser_table(); +CREATE VIEW regress_mdb_superuser_view as SELECT 1; + +SET ROLE regress_mdb_superuser_user3; +INSERT INTO regress_mdb_superuser_table SELECT * FROM regress_mdb_superuser_table; + +SET ROLE regress_mdb_superuser_user1; + +-- mdb_superuser can grant to other role +GRANT USAGE, CREATE ON SCHEMA regress_mdb_superuser_schema TO regress_mdb_superuser_user3; +GRANT ALL PRIVILEGES ON TABLE regress_mdb_superuser_table TO regress_mdb_superuser_user3; +REVOKE ALL PRIVILEGES ON TABLE regress_mdb_superuser_table FROM regress_mdb_superuser_user3; + +GRANT INSERT, SELECT ON TABLE regress_mdb_superuser_table TO regress_mdb_superuser_user3; + +-- grant works +SET ROLE regress_mdb_superuser_user3; +INSERT INTO regress_mdb_superuser_table SELECT * FROM regress_mdb_superuser_table; + +SET ROLE mdb_superuser; + +-- mdb_superuser drop object of other role +DROP TABLE regress_mdb_superuser_table; +-- mdb admin fails to transfer ownership to superusers and system roles + +RESET SESSION AUTHORIZATION; + +CREATE TABLE regress_superuser_table(); + +SET ROLE pg_read_server_files; + +CREATE TABLE regress_pgrsf_table(); + +SET ROLE pg_write_server_files; + +CREATE TABLE regress_pgwsf_table(); + +SET ROLE pg_execute_server_program; + +CREATE TABLE regress_pgxsp_table(); + +SET ROLE pg_read_all_data; + +CREATE TABLE regress_pgrad_table(); + +SET ROLE pg_write_all_data; + +CREATE TABLE regress_pgrwd_table(); + +SET ROLE mdb_superuser; + +-- cannot read all data (fail) +SELECT * FROM pg_authid; + +-- can not drop superuser objects, because does not has_privs_of pg_database_owner +DROP TABLE regress_superuser_table; +DROP TABLE regress_pgrsf_table; +DROP TABLE regress_pgwsf_table; +DROP TABLE regress_pgxsp_table; +DROP TABLE regress_pgrad_table; +DROP TABLE regress_pgrwd_table; + + +-- does allowed to creare database, role or extension +-- or grant such priviledge + +CREATE DATABASE regress_db_fail; +CREATE ROLE regress_role_fail; + +ALTER ROLE mdb_superuser WITH CREATEROLE; +ALTER ROLE mdb_superuser WITH CREATEDB; + +ALTER ROLE regress_mdb_superuser_user2 WITH CREATEROLE; +ALTER ROLE regress_mdb_superuser_user2 WITH CREATEDB; + +-- mdb_superuser more powerfull than pg_database_owner + +RESET SESSION AUTHORIZATION; +CREATE DATABASE regress_check_owner OWNER regress_mdb_superuser_user2; + +\c regress_check_owner; + +SET ROLE regress_mdb_superuser_user2; +CREATE SCHEMA regtest; +CREATE TABLE regtest.regtest(); + +-- this should fail + +SET ROLE regress_mdb_superuser_user3; +GRANT ALL ON TABLE regtest.regtest TO regress_mdb_superuser_user3; +ALTER TABLE regtest.regtest OWNER TO regress_mdb_superuser_user3; + +SET ROLE regress_mdb_superuser_user1; +GRANT ALL ON TABLE regtest.regtest TO regress_mdb_superuser_user1; +ALTER TABLE regtest.regtest OWNER TO regress_mdb_superuser_user1; + +\c regression +DROP DATABASE regress_check_owner; + +-- end tests + +RESET SESSION AUTHORIZATION; +-- +REVOKE CREATE ON DATABASE regression FROM regress_mdb_superuser_user2; +REVOKE CREATE ON DATABASE regression FROM regress_mdb_superuser_user3; + +DROP VIEW regress_mdb_superuser_view; +DROP FUNCTION regress_mdb_superuser_add; +DROP TABLE regress_mdb_superuser_schema.regress_mdb_superuser_table; +DROP TABLE regress_mdb_superuser_table; +DROP SCHEMA regress_mdb_superuser_schema; +DROP ROLE regress_mdb_superuser_user1; +DROP ROLE regress_mdb_superuser_user2; +DROP ROLE regress_mdb_superuser_user3; +DROP TABLE regress_superuser_table; +DROP TABLE regress_pgrsf_table; +DROP TABLE regress_pgwsf_table; +DROP TABLE regress_pgxsp_table; +DROP TABLE regress_pgrad_table; +DROP TABLE regress_pgrwd_table; diff --git a/src/test/regress/sql/misc.sql b/src/test/regress/sql/misc.sql new file mode 100644 index 00000000000..2abe2c82eb8 --- /dev/null +++ b/src/test/regress/sql/misc.sql @@ -0,0 +1,271 @@ +-- +-- MISC +-- + +-- +-- BTREE +-- +--UPDATE onek +-- SET unique1 = onek.unique1 + 1; + +--UPDATE onek +-- SET unique1 = onek.unique1 - 1; + +-- +-- BTREE partial +-- +-- UPDATE onek2 +-- SET unique1 = onek2.unique1 + 1; + +--UPDATE onek2 +-- SET unique1 = onek2.unique1 - 1; + +-- +-- BTREE shutting out non-functional updates +-- +-- the following two tests seem to take a long time on some +-- systems. This non-func update stuff needs to be examined +-- more closely. - jolly (2/22/96) +-- +/* GPDB TODO: This test is disabled for now, because when running with ORCA, + you get an error: + ERROR: multiple updates to a row by the same query is not allowed +UPDATE tmp + SET stringu1 = reverse_name(onek.stringu1) + FROM onek + WHERE onek.stringu1 = 'JBAAAA' and + onek.stringu1 = tmp.stringu1; + +UPDATE tmp + SET stringu1 = reverse_name(onek2.stringu1) + FROM onek2 + WHERE onek2.stringu1 = 'JCAAAA' and + onek2.stringu1 = tmp.stringu1; +*/ + +DROP TABLE tmp; + +--UPDATE person* +-- SET age = age + 1; + +--UPDATE person* +-- SET age = age + 3 +-- WHERE name = 'linda'; + +-- +-- copy +-- +COPY onek TO '@abs_builddir@/results/onek.data'; + +DELETE FROM onek; + +COPY onek FROM '@abs_builddir@/results/onek.data'; + +SELECT unique1 FROM onek WHERE unique1 < 2 ORDER BY unique1; + +DELETE FROM onek2; + +COPY onek2 FROM '@abs_builddir@/results/onek.data'; + +SELECT unique1 FROM onek2 WHERE unique1 < 2 ORDER BY unique1; + +COPY BINARY stud_emp TO '@abs_builddir@/results/stud_emp.data'; + +DELETE FROM stud_emp; + +COPY BINARY stud_emp FROM '@abs_builddir@/results/stud_emp.data'; + +SELECT * FROM stud_emp; + +-- COPY aggtest FROM stdin; +-- 56 7.8 +-- 100 99.097 +-- 0 0.09561 +-- 42 324.78 +-- . +-- COPY aggtest TO stdout; + + +-- +-- inheritance stress test +-- +SELECT * FROM a_star*; + +SELECT * + FROM b_star* x + WHERE x.b = text 'bumble' or x.a < 3; + +SELECT class, a + FROM c_star* x + WHERE x.c ~ text 'hi'; + +SELECT class, b, c + FROM d_star* x + WHERE x.a < 100; + +SELECT class, c FROM e_star* x WHERE x.c NOTNULL; + +SELECT * FROM f_star* x WHERE x.c ISNULL; + +-- grouping and aggregation on inherited sets have been busted in the past... + +SELECT sum(a) FROM a_star*; + +SELECT class, sum(a) FROM a_star* GROUP BY class ORDER BY class; + + +ALTER TABLE f_star RENAME COLUMN f TO ff; + +ALTER TABLE e_star* RENAME COLUMN e TO ee; + +ALTER TABLE d_star* RENAME COLUMN d TO dd; + +ALTER TABLE c_star* RENAME COLUMN c TO cc; + +ALTER TABLE b_star* RENAME COLUMN b TO bb; + +ALTER TABLE a_star* RENAME COLUMN a TO aa; + +SELECT class, aa + FROM a_star* x + WHERE aa ISNULL; + +-- As of Postgres 7.1, ALTER implicitly recurses, +-- so this should be same as ALTER a_star* + +ALTER TABLE a_star RENAME COLUMN aa TO foo; + +SELECT class, foo + FROM a_star* x + WHERE x.foo >= 2; + +ALTER TABLE a_star RENAME COLUMN foo TO aa; + +SELECT * + from a_star* + WHERE aa < 1000; + +ALTER TABLE f_star ADD COLUMN f int4; + +UPDATE f_star SET f = 10; + +ALTER TABLE e_star* ADD COLUMN e int4; + +--UPDATE e_star* SET e = 42; + +SELECT * FROM e_star*; + +ALTER TABLE a_star* ADD COLUMN a text; + +-- That ALTER TABLE should have added TOAST tables. +SELECT relname, reltoastrelid <> 0 AS has_toast_table + FROM pg_class + WHERE oid::regclass IN ('a_star', 'c_star') + ORDER BY 1; + +--UPDATE b_star* +-- SET a = text 'gazpacho' +-- WHERE aa > 4; + +SELECT class, aa, a FROM a_star*; + + +-- +-- versions +-- + +-- +-- postquel functions +-- +-- +-- mike does post_hacking, +-- joe and sally play basketball, and +-- everyone else does nothing. +-- +SELECT p.name, name(p.hobbies) FROM ONLY person p; + +-- +-- as above, but jeff also does post_hacking. +-- +SELECT p.name, name(p.hobbies) FROM person* p; + +-- +-- the next two queries demonstrate how functions generate bogus duplicates. +-- this is a "feature" .. +-- +SELECT DISTINCT hobbies_r.name, name(hobbies_r.equipment) FROM hobbies_r + ORDER BY 1,2; + +SELECT hobbies_r.name, (hobbies_r.equipment).name FROM hobbies_r; + +-- +-- mike needs advil and peet's coffee, +-- joe and sally need hightops, and +-- everyone else is fine. +-- +SELECT p.name, name(p.hobbies), name(equipment(p.hobbies)) FROM ONLY person p; + +-- +-- as above, but jeff needs advil and peet's coffee as well. +-- +SELECT p.name, name(p.hobbies), name(equipment(p.hobbies)) FROM person* p; + +-- +-- just like the last two, but make sure that the target list fixup and +-- unflattening is being done correctly. +-- +SELECT name(equipment(p.hobbies)), p.name, name(p.hobbies) FROM ONLY person p; + +SELECT (p.hobbies).equipment.name, p.name, name(p.hobbies) FROM person* p; + +SELECT (p.hobbies).equipment.name, name(p.hobbies), p.name FROM ONLY person p; + +SELECT name(equipment(p.hobbies)), name(p.hobbies), p.name FROM person* p; + +SELECT name(equipment(hobby_construct(text 'skywalking', text 'mer'))); + +SELECT name(equipment(hobby_construct_named(text 'skywalking', text 'mer'))); + +SELECT name(equipment_named(hobby_construct_named(text 'skywalking', text 'mer'))); + +SELECT name(equipment_named_ambiguous_1a(hobby_construct_named(text 'skywalking', text 'mer'))); + +SELECT name(equipment_named_ambiguous_1b(hobby_construct_named(text 'skywalking', text 'mer'))); + +SELECT name(equipment_named_ambiguous_1c(hobby_construct_named(text 'skywalking', text 'mer'))); + +SELECT name(equipment_named_ambiguous_2a(text 'skywalking')); + +SELECT name(equipment_named_ambiguous_2b(text 'skywalking')); + +SELECT hobbies_by_name('basketball'); + +SELECT name, overpaid(emp.*) FROM emp; + +-- +-- Try a few cases with SQL-spec row constructor expressions +-- +SELECT * FROM equipment(ROW('skywalking', 'mer')); + +SELECT name(equipment(ROW('skywalking', 'mer'))); + +SELECT *, name(equipment(h.*)) FROM hobbies_r h; + +SELECT *, (equipment(CAST((h.*) AS hobbies_r))).name FROM hobbies_r h; + +-- +-- functional joins +-- + +-- +-- instance rules +-- + +-- +-- rewrite rules +-- + + +--- mdb-related + +SELECT mdb_locale_enabled(); diff --git a/src/test/regress/sql/motion_socket.sql b/src/test/regress/sql/motion_socket.sql index 6d1a08973e0..be5cd21b1cd 100644 --- a/src/test/regress/sql/motion_socket.sql +++ b/src/test/regress/sql/motion_socket.sql @@ -50,7 +50,7 @@ for pid in pids_to_check: # We check count of those connections which have not been established. # Use the regex for example: "TCP :\d+ .*" (without '->') lsof_ret = subprocess.run(["lsof", "-i", "-nP", "-a", "-p", str(pid)], - capture_output=True, check=True).stdout + stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True).stdout plpy.info( f'Checking postgres backend {pid}, ' \ f'lsof output:\n{os.linesep.join(map(str, lsof_ret.splitlines()))}') diff --git a/src/test/regress/sql/privileges.sql b/src/test/regress/sql/privileges.sql index 02300dd5b7f..2e6242a379a 100644 --- a/src/test/regress/sql/privileges.sql +++ b/src/test/regress/sql/privileges.sql @@ -1208,6 +1208,21 @@ TABLE information_schema.enabled_roles; INSERT INTO datdba_only DEFAULT VALUES; ROLLBACK; +-- pg_signal_backend can't signal superusers +RESET SESSION AUTHORIZATION; +BEGIN; +CREATE OR REPLACE FUNCTION terminate_nothrow(pid int) RETURNS bool + LANGUAGE plpgsql SECURITY DEFINER SET client_min_messages = error AS $$ +BEGIN + RETURN pg_terminate_backend($1); +EXCEPTION WHEN OTHERS THEN + RETURN false; +END$$; +ALTER FUNCTION terminate_nothrow OWNER TO pg_signal_backend; +SELECT backend_type FROM pg_stat_activity +WHERE CASE WHEN COALESCE(usesysid, 10) = 10 THEN terminate_nothrow(pid) END; +ROLLBACK; + -- test default ACLs \c - diff --git a/src/test/regress/sql/stats_ext.sql b/src/test/regress/sql/stats_ext.sql index 91edd3a5bba..744bb00c161 100644 --- a/src/test/regress/sql/stats_ext.sql +++ b/src/test/regress/sql/stats_ext.sql @@ -1615,6 +1615,10 @@ GRANT USAGE ON SCHEMA tststats TO regress_stats_user1; SET SESSION AUTHORIZATION regress_stats_user1; SELECT * FROM tststats.priv_test_tbl; -- Permission denied +-- Check individual columns if we don't have table privilege +SELECT * FROM tststats.priv_test_tbl + WHERE a = 1 and tststats.priv_test_tbl.* > (1, 1) is not null; + -- Attempt to gain access using a leaky operator CREATE FUNCTION op_leak(int, int) RETURNS bool AS 'BEGIN RAISE NOTICE ''op_leak => %, %'', $1, $2; RETURN $1 < $2; END' @@ -1645,10 +1649,37 @@ SET SESSION AUTHORIZATION regress_stats_user1; SELECT * FROM tststats.priv_test_tbl WHERE a <<< 0 AND b <<< 0; -- Should not leak DELETE FROM tststats.priv_test_tbl WHERE a <<< 0 AND b <<< 0; -- Should not leak +-- privilege checks for pg_stats_ext and pg_stats_ext_exprs +RESET SESSION AUTHORIZATION; +CREATE TABLE stats_ext_tbl (id INT PRIMARY KEY GENERATED BY DEFAULT AS IDENTITY, col TEXT); +INSERT INTO stats_ext_tbl (col) VALUES ('secret'), ('secret'), ('very secret'); +CREATE STATISTICS s_col ON id, col FROM stats_ext_tbl; +CREATE STATISTICS s_expr ON mod(id, 2), lower(col) FROM stats_ext_tbl; +ANALYZE stats_ext_tbl; + +-- unprivileged role should not have access +SET SESSION AUTHORIZATION regress_stats_user1; +SELECT statistics_name, most_common_vals FROM pg_stats_ext x + WHERE tablename = 'stats_ext_tbl' ORDER BY ROW(x.*); +SELECT statistics_name, most_common_vals FROM pg_stats_ext_exprs x + WHERE tablename = 'stats_ext_tbl' ORDER BY ROW(x.*); + +-- give unprivileged role ownership of table +RESET SESSION AUTHORIZATION; +ALTER TABLE stats_ext_tbl OWNER TO regress_stats_user1; + +-- unprivileged role should now have access +SET SESSION AUTHORIZATION regress_stats_user1; +SELECT statistics_name, most_common_vals FROM pg_stats_ext x + WHERE tablename = 'stats_ext_tbl' ORDER BY ROW(x.*); +SELECT statistics_name, most_common_vals FROM pg_stats_ext_exprs x + WHERE tablename = 'stats_ext_tbl' ORDER BY ROW(x.*); + -- Tidy up DROP OPERATOR <<< (int, int); DROP FUNCTION op_leak(int, int); RESET SESSION AUTHORIZATION; +DROP TABLE stats_ext_tbl; DROP SCHEMA tststats CASCADE; DROP USER regress_stats_user1; diff --git a/src/test/regress/sql/test_setup.sql b/src/test/regress/sql/test_setup.sql new file mode 100644 index 00000000000..7ec5ccc7471 --- /dev/null +++ b/src/test/regress/sql/test_setup.sql @@ -0,0 +1,6 @@ +-- +-- TEST_SETUP --- prepare environment expected by regression test scripts +-- + +CREATE ROLE mdb_admin; +CREATE ROLE mdb_superuser; diff --git a/src/test/singlenode_regress/expected/create_function_3.out b/src/test/singlenode_regress/expected/create_function_3.out index 3a4fd451471..6423fdb7965 100644 --- a/src/test/singlenode_regress/expected/create_function_3.out +++ b/src/test/singlenode_regress/expected/create_function_3.out @@ -166,10 +166,10 @@ SET SESSION AUTHORIZATION regress_unpriv_user; SET search_path TO temp_func_test, public; ALTER FUNCTION functest_E_1(int) NOT LEAKPROOF; ALTER FUNCTION functest_E_2(int) LEAKPROOF; -ERROR: only superuser can define a leakproof function +ERROR: only superuser or mdb_admin can define a leakproof function CREATE FUNCTION functest_E_3(int) RETURNS bool LANGUAGE 'sql' LEAKPROOF AS 'SELECT $1 < 200'; -- fail -ERROR: only superuser can define a leakproof function +ERROR: only superuser or mdb_admin can define a leakproof function RESET SESSION AUTHORIZATION; -- -- CALLED ON NULL INPUT | RETURNS NULL ON NULL INPUT | STRICT diff --git a/src/test/singlenode_regress/expected/create_view.out b/src/test/singlenode_regress/expected/create_view.out index fdc9294f1f2..e70f8e788f3 100644 --- a/src/test/singlenode_regress/expected/create_view.out +++ b/src/test/singlenode_regress/expected/create_view.out @@ -1551,17 +1551,26 @@ select * from tt14v; begin; -- this perhaps should be rejected, but it isn't: alter table tt14t drop column f3; --- f3 is still in the view ... +-- column f3 is still in the view, sort of ... select pg_get_viewdef('tt14v', true); - pg_get_viewdef --------------------------------- - SELECT t.f1, + - t.f3, + - t.f4 + - FROM tt14f() t(f1, f3, f4); + pg_get_viewdef +--------------------------------- + SELECT t.f1, + + t."?dropped?column?" AS f3,+ + t.f4 + + FROM tt14f() t(f1, f4); (1 row) --- but will fail at execution +-- ... and you can even EXPLAIN it ... +explain (verbose, costs off) select * from tt14v; + QUERY PLAN +---------------------------------------- + Function Scan on testviewschm2.tt14f t + Output: t.f1, t.f3, t.f4 + Function Call: tt14f() +(3 rows) + +-- but it will fail at execution select f1, f4 from tt14v; f1 | f4 -----+---- diff --git a/src/test/singlenode_regress/sql/create_view.sql b/src/test/singlenode_regress/sql/create_view.sql index e1b013fe7a5..a47f81613c7 100644 --- a/src/test/singlenode_regress/sql/create_view.sql +++ b/src/test/singlenode_regress/sql/create_view.sql @@ -533,9 +533,11 @@ begin; -- this perhaps should be rejected, but it isn't: alter table tt14t drop column f3; --- f3 is still in the view ... +-- column f3 is still in the view, sort of ... select pg_get_viewdef('tt14v', true); --- but will fail at execution +-- ... and you can even EXPLAIN it ... +explain (verbose, costs off) select * from tt14v; +-- but it will fail at execution select f1, f4 from tt14v; select * from tt14v; diff --git a/src/test/subscription/t/013_partition.pl b/src/test/subscription/t/013_partition.pl index e53bc5b568f..dfe2cb6deae 100644 --- a/src/test/subscription/t/013_partition.pl +++ b/src/test/subscription/t/013_partition.pl @@ -6,7 +6,7 @@ use warnings; use PostgresNode; use TestLib; -use Test::More tests => 69; +use Test::More tests => 71; # setup @@ -841,3 +841,32 @@ BEGIN $result = $node_subscriber2->safe_psql('postgres', "SELECT a, b, c FROM tab5 ORDER BY 1"); is($result, qq(3|1|), 'updates of tab5 replicated correctly after altering table on subscriber'); + +# Test that replication into the partitioned target table continues to +# work correctly when the published table is altered. +$node_publisher->safe_psql( + 'postgres', q{ + ALTER TABLE tab5 DROP COLUMN b, ADD COLUMN c INT; + ALTER TABLE tab5 ADD COLUMN b INT;}); + +$node_publisher->safe_psql('postgres', "UPDATE tab5 SET c = 1 WHERE a = 3"); + +$node_publisher->wait_for_catchup('sub2'); + +$result = $node_subscriber2->safe_psql('postgres', + "SELECT a, b, c FROM tab5 ORDER BY 1"); +is($result, qq(3||1), 'updates of tab5 replicated correctly after altering table on publisher'); + +# Test that replication works correctly as long as the leaf partition +# has the necessary REPLICA IDENTITY, even though the actual target +# partitioned table does not. +$node_subscriber2->safe_psql('postgres', + "ALTER TABLE tab5 REPLICA IDENTITY NOTHING"); + +$node_publisher->safe_psql('postgres', "UPDATE tab5 SET a = 4 WHERE a = 3"); + +$node_publisher->wait_for_catchup('sub2'); + +$result = $node_subscriber2->safe_psql('postgres', + "SELECT a, b, c FROM tab5_1 ORDER BY 1"); +is($result, qq(4||1), 'updates of tab5 replicated correctly');