diff --git a/.github/workflows/CI-legacy-g2-genai.yml b/.github/workflows/CI-legacy-g2-genai.yml new file mode 100644 index 0000000000..e39ac6221d --- /dev/null +++ b/.github/workflows/CI-legacy-g2-genai.yml @@ -0,0 +1,19 @@ +name: CI-legacy-g2-genai +run-name: '${{ github.event.workflow_run && github.event.workflow_run.head_branch || github.ref_name }} ${{ github.workflow }} ${{ github.event.workflow_run && github.event.workflow_run.head_sha || github.sha }}' + +on: + workflow_dispatch: + workflow_run: + workflows: [ CI-trigger ] + types: [ in_progress ] + +concurrency: + group: ${{ github.workflow }}-${{ github.event.workflow_run && github.event.workflow_run.head_branch || github.ref_name }} + cancel-in-progress: true + +jobs: + run: + uses: sysown/proxysql/.github/workflows/ci-legacy-g2-genai.yml@GH-Actions + secrets: inherit + with: + trigger: ${{ toJson(github) }} diff --git a/.github/workflows/CI-legacy-g2.yml b/.github/workflows/CI-legacy-g2.yml new file mode 100644 index 0000000000..c452dacb08 --- /dev/null +++ b/.github/workflows/CI-legacy-g2.yml @@ -0,0 +1,19 @@ +name: CI-legacy-g2 +run-name: '${{ github.event.workflow_run && github.event.workflow_run.head_branch || github.ref_name }} ${{ github.workflow }} ${{ github.event.workflow_run && github.event.workflow_run.head_sha || github.sha }}' + +on: + workflow_dispatch: + workflow_run: + workflows: [ CI-trigger ] + types: [ in_progress ] + +concurrency: + group: ${{ github.workflow }}-${{ github.event.workflow_run && github.event.workflow_run.head_branch || github.ref_name }} + cancel-in-progress: true + +jobs: + run: + uses: sysown/proxysql/.github/workflows/ci-legacy-g2.yml@GH-Actions + secrets: inherit + with: + trigger: ${{ toJson(github) }} diff --git a/doc/GH-Actions/README.md b/doc/GH-Actions/README.md new file mode 100644 index 0000000000..fc8a48efcb --- /dev/null +++ b/doc/GH-Actions/README.md @@ -0,0 +1,380 @@ +# GitHub Actions CI/CD Documentation + +## Architecture + +ProxySQL CI uses a **two-tier workflow architecture**: + +1. **Caller workflows** live on the main branches (e.g. `v3.0`). They are thin wrappers that trigger on events (push, PR, manual dispatch) and delegate to reusable workflows. +2. **Reusable workflows** live on the `GH-Actions` branch. They contain the actual build/test logic, matrix definitions, caching strategy, and infrastructure orchestration. + +This separation allows updating CI logic on the `GH-Actions` branch without touching the main codebase. + +### Execution Flow + +``` +Push/PR + └─► CI-trigger (on: push/pull_request) + │ + ├─[in_progress]─► CI-builds (3 matrix builds, caches artifacts) + │ ├── debian12-dbg + │ ├── ubuntu22-tap + │ └── ubuntu24-tap-genai-gcov + │ + ├─[in_progress]─► CI-legacy-g2 (ubuntu22-tap cache) + ├─[in_progress]─► CI-legacy-g2-genai (ubuntu24-tap-genai-gcov cache) + │ + ├─[completed]──► CI-selftests (ubuntu22-tap cache) + ├─[completed]──► CI-basictests (ubuntu22-tap cache) + ├─[completed]──► CI-maketest (ubuntu22-tap cache) + ├─[completed]──► CI-taptests-groups (ubuntu22-tap cache) [BROKEN: #5521] + ├─[completed]──► CI-taptests (ubuntu22-tap cache) [BROKEN: #5521] + ├─[completed]──► CI-taptests-ssl (ubuntu22-tap cache) [BROKEN: #5521] + ├─[completed]──► CI-taptests-asan (ubuntu22-tap cache) [BROKEN: #5521] + ├─[completed]──► CI-repltests (ubuntu22-tap cache) [BROKEN: #5521] + ├─[completed]──► CI-shuntest (ubuntu22-tap cache) [BROKEN: #5521] + ├─[completed]──► CI-codeql + ├─[completed]──► CI-package-build + │ + └─[completed]──► CI-3p-* (10 third-party integration workflows) +``` + +**Trigger types:** +- `in_progress` — fires as soon as CI-trigger starts. Used by CI-builds (which must run first) and workflows that wait on build caches. +- `completed` — fires after CI-trigger finishes. Used by test workflows that depend on CI-builds having already populated the cache. + +### Concurrency + +Every workflow uses concurrency groups to prevent resource waste: + +```yaml +concurrency: + group: ${{ github.workflow }}-${{ github.ref_name }} + cancel-in-progress: true +``` + +Only one run per workflow per branch is active at any time. A new push cancels the previous run. + +--- + +## CI-trigger + +| | | +|---|---| +| **File** | `CI-trigger.yml` → `ci-trigger.yml@GH-Actions` | +| **Triggers** | Push to version branches (`v[0-9].[0-9x]+...`), pull requests, manual dispatch | +| **Paths ignored** | `.github/**`, `**.md` | +| **Purpose** | Entry point for the entire CI pipeline. Does not do any work itself — its `in_progress` and `completed` events trigger all downstream workflows. | + +--- + +## CI-builds + +| | | +|---|---| +| **File** | `CI-builds.yml` → `ci-builds.yml@GH-Actions` | +| **Triggers** | `CI-trigger` `in_progress`, manual dispatch | +| **Runs on** | `ubuntu-24.04` | +| **Purpose** | Compiles ProxySQL in Docker containers and caches the artifacts for downstream test workflows. | + +### Build Matrix + +| Matrix Entry | Make Target | Build Flags | Use Case | +|---|---|---|---| +| `debian12-dbg` | `make debian12-dbg` | Debug (`-O0 -ggdb`) | 3rd-party integration testing | +| `ubuntu22-tap` | `make ubuntu22-dbg` | Debug + TAP test binaries | Standard TAP testing | +| `ubuntu24-tap-genai-gcov` | `make ubuntu24-dbg` | Debug + `PROXYSQLGENAI=1` + `WITHGCOV=1` | GenAI feature testing with code coverage | + +### Build Flag Injection + +Build flags are injected into `docker-compose.yml` environment before the build runs, using the same pattern for each: + +```yaml +# In the Build step: +if [[ "${{ matrix.type }}" =~ "-asan" ]]; then + sed -i "/command/i \ - WITHASAN=1" docker-compose.yml +fi +if [[ "${{ matrix.type }}" =~ "-genai" ]]; then + sed -i "/command/i \ - PROXYSQLGENAI=1" docker-compose.yml +fi +if [[ "${{ matrix.type }}" =~ "-gcov" ]]; then + sed -i "/command/i \ - WITHGCOV=1" docker-compose.yml +fi +``` + +### Cache Strategy + +Each build produces four cache entries keyed by `{SHA}_{dist}{type}`: + +| Cache Suffix | Contents | Used By | +|---|---|---| +| `_bin` | `.git/` + `binaries/` | Package builds | +| `_src` | `src/` (includes proxysql binary) | All test workflows | +| `_test` | `test/` (includes TAP test binaries) | TAP test workflows | +| `_matrix` | `tap-matrix*.json` files | TAP matrix workflows | + +Caches expire after 7 days of inactivity. + +--- + +## Test Workflows — In-Repo Infrastructure + +These workflows use the `test/infra/` orchestration system (no external dependencies). + +### CI-selftests + +| | | +|---|---| +| **File** | `CI-selftests.yml` → `ci-selftests.yml@GH-Actions` | +| **Cache** | `ubuntu22-tap_src` | +| **Status** | Working | +| **Infrastructure** | None (runs ProxySQL directly on the runner) | +| **What it tests** | ProxySQL self-test commands (`PROXYSQLTEST 1-6`), core dumps, query digest stats | + +### CI-basictests + +| | | +|---|---| +| **File** | `CI-basictests.yml` → `ci-basictests.yml@GH-Actions` | +| **Cache** | `ubuntu22-tap_src` | +| **Status** | Working (migrated from jenkins-build-scripts, PR #5525) | +| **Infrastructure** | MySQL 5.7 (via `test/infra/infra-mysql57/`) | +| **TAP Group** | `basictests` | +| **What it tests** | Sysbench benchmark, COM_CHANGE_USER, orchestrator failover | +| **Env flags** | `SKIP_CLUSTER_START=1` | + +Uses `ensure-infras.bash` → `start-proxysql-isolated.bash` → `run-tests-isolated.bash` pipeline with the `basictests` TAP group. The `setup-infras.bash` hook aliases hostgroup pairs (e.g. 1300/1301 → 0/1) for backwards compatibility with `proxysql-tester.py`. + +### CI-legacy-g2 + +| | | +|---|---| +| **File** | `CI-legacy-g2.yml` → `ci-legacy-g2.yml@GH-Actions` | +| **Cache** | `ubuntu22-tap_src` + `ubuntu22-tap_test` | +| **Status** | New | +| **Infrastructure** | MySQL 5.7, MariaDB 10, PostgreSQL 16, ClickHouse 23 | +| **TAP Group** | `legacy-g2` (44 TAP tests) | +| **Env flags** | `SKIP_CLUSTER_START=1`, `TAP_USE_NOISE=1` | + +Runs the second batch of legacy TAP tests using the standard ubuntu22 build. Noise injection is enabled to help detect race conditions. + +### CI-legacy-g2-genai + +| | | +|---|---| +| **File** | `CI-legacy-g2-genai.yml` → `ci-legacy-g2-genai.yml@GH-Actions` | +| **Cache** | `ubuntu24-tap-genai-gcov_src` + `ubuntu24-tap-genai-gcov_test` | +| **Status** | New | +| **Infrastructure** | MySQL 5.7, MariaDB 10, PostgreSQL 16, ClickHouse 23 | +| **TAP Group** | `legacy-g2` (44 TAP tests) | +| **Env flags** | `SKIP_CLUSTER_START=1`, `TAP_USE_NOISE=1`, `COVERAGE=1` | +| **Artifacts** | Coverage report uploaded as workflow artifact | + +Same tests as CI-legacy-g2 but runs against the GenAI build (`PROXYSQLGENAI=1`) with code coverage enabled (`WITHGCOV=1`). This validates that GenAI features don't break existing functionality. + +### CI-maketest + +| | | +|---|---| +| **File** | `CI-maketest.yml` → `ci-maketest.yml@GH-Actions` | +| **Cache** | `ubuntu22-tap_src` | +| **Status** | Working | +| **Infrastructure** | MySQL 5.7 (Docker) | +| **What it tests** | Runs `make test` inside a Docker build container | + +--- + +## Test Workflows — Broken (Depend on jenkins-build-scripts) + +These workflows still reference the private `proxysql/jenkins-build-scripts` repository and fail with `fatal: repository not found`. See issue #5521 for migration tracking. + +### CI-taptests-groups + +| | | +|---|---| +| **Status** | Broken (#5521) | +| **Purpose** | Runs TAP tests in parallel groups. Dynamically builds a test matrix from `tap-matrix.json`. | +| **Migration** | Medium effort. Most infrastructure exists in-repo (`test/tap/groups/`). | + +### CI-repltests + +| | | +|---|---| +| **Status** | Broken (#5521, #5523) | +| **Purpose** | Replication chain tests with sysbench load and data consistency verification. Tests MySQL 5.6/5.7/8.0 with/without SSL and debezium. | +| **Migration** | Requires porting `proxysql_repl_tests/` from jenkins-build-scripts. | + +### CI-shuntest + +| | | +|---|---| +| **Status** | Broken (#5521) | +| **Purpose** | Backend shunning/failover algorithm tests. Uses the same `proxysql_repl_tests` infrastructure as CI-repltests. | +| **Migration** | Unblocked by CI-repltests migration. | + +### CI-taptests, CI-taptests-ssl, CI-taptests-asan + +| | | +|---|---| +| **Status** | Broken (#5521) | +| **Purpose** | Various TAP test execution modes: standard, SSL-focused, and AddressSanitizer. | +| **Migration** | Similar to CI-taptests-groups. | + +--- + +## CI-package-build + +| | | +|---|---| +| **File** | `CI-package-build.yml` → `ci-package-build.yml@GH-Actions` | +| **Triggers** | Push (not workflow_run) | +| **Status** | Working | +| **Purpose** | Builds `.deb` and `.rpm` packages for distribution. Independent of the CI-trigger cascade. | + +--- + +## CI-codeql + +| | | +|---|---| +| **File** | `CI-codeql.yml` → `ci-codeql.yml@GH-Actions` | +| **Triggers** | `CI-trigger` `completed` | +| **Status** | Working | +| **Purpose** | GitHub CodeQL security analysis. | + +--- + +## Third-Party Integration Workflows (CI-3p-*) + +Ten workflows test ProxySQL against external client libraries and frameworks. They all: +- Trigger on `CI-trigger` `completed` +- Use repository variables for matrix configuration (database versions, connector versions) +- Run separate MySQL and MariaDB/PostgreSQL jobs +- Are independent of the build cache (they build ProxySQL inline) + +| Workflow | Client Library | Protocols | +|---|---|---| +| `CI-3p-aiomysql` | Python aiomysql (async) | MySQL | +| `CI-3p-django-framework` | Django ORM | MySQL, PostgreSQL | +| `CI-3p-laravel-framework` | Laravel Eloquent | MySQL, PostgreSQL | +| `CI-3p-mariadb-connector-c` | MariaDB C connector | MySQL | +| `CI-3p-mysql-connector-j` | MySQL Connector/J (Java) | MySQL | +| `CI-3p-pgjdbc` | PostgreSQL JDBC | PostgreSQL | +| `CI-3p-php-pdo-mysql` | PHP PDO MySQL | MySQL | +| `CI-3p-php-pdo-pgsql` | PHP PDO PostgreSQL | PostgreSQL | +| `CI-3p-postgresql` | libpq (native) | PostgreSQL | +| `CI-3p-sqlalchemy` | SQLAlchemy ORM | MySQL, PostgreSQL | + +### Matrix Variables + +Each 3p workflow reads its test matrix from GitHub repository variables: + +``` +MATRIX_3P_AIOMYSQL_infradb_mysql # e.g. ["mysql57", "mysql84", "mysql91"] +MATRIX_3P_AIOMYSQL_connector_mysql # e.g. ["0.1.1", "0.2.0"] +MATRIX_3P_AIOMYSQL_infradb_mariadb # e.g. ["mariadb105", "mariadb115"] +MATRIX_3P_AIOMYSQL_connector_mariadb # e.g. ["0.1.1", "0.2.0"] +``` + +--- + +## Other Workflows + +### claude.yml + +Claude Code automation for AI-assisted development. Triggers on issue comments, PR comments, and issue assignments. + +### claude-code-review.yml + +Automated code review using Claude. Triggers on pull request events. + +--- + +## Test Infrastructure (test/infra/) + +Test workflows that use the in-repo infrastructure follow this pipeline: + +``` +ensure-infras.bash # Start ProxySQL + backends based on TAP group's infras.lst + ├── start-proxysql-isolated.bash # ProxySQL in Docker container + ├── infra-mysql57/docker-compose-init.bash # MySQL 5.7 (3 nodes + 3 orchestrators) + ├── docker-pgsql16-single/... # PostgreSQL 16 + └── ...other backends from infras.lst + +run-tests-isolated.bash # Launch test runner container + ├── Source group env.sh (TEST_PY_* flags) + ├── Source env-isolated.bash (TAP_* connection vars) + └── Run proxysql-tester.py + +stop-proxysql-isolated.bash # Cleanup ProxySQL +destroy-infras.bash # Cleanup backends +``` + +### TAP Groups + +Tests are organized into groups under `test/tap/groups/`. Each group has: + +| File | Purpose | +|---|---| +| `infras.lst` | Required backend infrastructure (one per line) | +| `env.sh` | Environment overrides (test flags, hostgroup defaults) | +| `pre-proxysql.bash` | Hook: runs before tests (cluster setup) | +| `setup-infras.bash` | Hook: runs after infra is up (hostgroup aliasing, etc.) | +| `pre-cleanup.bash` | Hook: runs before teardown | + +Group assignment is defined in `test/tap/groups/groups.json` which maps test binary names to group names (e.g. `legacy-g1`, `legacy-g2`, `mysql84-g1`, etc.). + +### Key Environment Variables + +| Variable | Default | Purpose | +|---|---|---| +| `INFRA_ID` | `dev-$USER` | Namespace for Docker containers (enables parallel runs) | +| `TAP_GROUP` | — | Which test group to run | +| `SKIP_CLUSTER_START` | `0` | Skip ProxySQL cluster setup | +| `TAP_USE_NOISE` | `0` | Enable random delay injection for race condition testing | +| `COVERAGE` | `0` | Enable code coverage collection | +| `WITHGCOV` | `0` | Tell `proxysql-tester.py` binary was built with gcov | + +--- + +## Adding a New Test Workflow + +To add a new workflow for TAP group `` with build variant ``: + +1. **Reusable workflow** — Create `.github/workflows/ci-.yml` on the `GH-Actions` branch. Use `ci-legacy-g2.yml` as a template. Set the `BLDCACHE` env to match the build variant cache key (e.g. `ubuntu22-tap_src`). + +2. **Caller workflow** — Create `.github/workflows/CI-.yml` on the main branch: + ```yaml + name: CI- + on: + workflow_dispatch: + workflow_run: + workflows: [ CI-trigger ] + types: [ in_progress ] # or completed + jobs: + run: + uses: sysown/proxysql/.github/workflows/ci-.yml@GH-Actions + secrets: inherit + with: + trigger: ${{ toJson(github) }} + ``` + +3. **TAP group** (if new) — Create `test/tap/groups//` with `infras.lst` and `env.sh`. Add test assignments to `groups.json`. + +Use `in_progress` trigger type if the workflow should start building immediately (parallel with CI-builds). Use `completed` if it must wait for CI-builds to finish populating the cache. + +--- + +## File Locations + +| What | Where | +|---|---| +| Caller workflows | `.github/workflows/CI-*.yml` (main branch) | +| Reusable workflows | `.github/workflows/ci-*.yml` (`GH-Actions` branch) | +| Test infrastructure | `test/infra/` | +| Control scripts | `test/infra/control/` | +| TAP groups | `test/tap/groups/` | +| Group-to-test mapping | `test/tap/groups/groups.json` | +| Python test runner | `test/scripts/bin/proxysql-tester.py` | +| Tester config | `test/scripts/etc/proxysql-tester.yml` | +| Docker base image | `test/infra/docker-base/Dockerfile` | +| Migration tracking | Issue #5521 | diff --git a/test/infra/control/check_all_nodes.bash b/test/infra/control/check_all_nodes.bash index 1f49adbcba..f5b3689207 100755 --- a/test/infra/control/check_all_nodes.bash +++ b/test/infra/control/check_all_nodes.bash @@ -1,22 +1,28 @@ #!/usr/bin/env bash -set -e -set -o pipefail +# +# Scheduler script: polls all ProxySQL cluster nodes to keep them active. +# Runs inside the ProxySQL container where all nodes are on 127.0.0.1. +# +# Port scheme: +# Primary: 6032 +# proxy-node1: 6042 +# proxy-node2: 6052 +# ... +# proxy-node9: 6122 TABLES=(mysql_servers mysql_users mysql_query_rules mysql_query_rules_fast_routing global_variables proxysql_servers mysql_galera_hostgroups mysql_group_replication_hostgroups mysql_replication_hostgroups mysql_hostgroup_attributes) ALL_TABLES=() - -for i in ; do - ALL_TABLES+=() - ALL_TABLES+=("runtime_") +for i in ${!TABLES[@]}; do + ALL_TABLES+=(${TABLES[$i]}) + ALL_TABLES+=("runtime_"${TABLES[$i]}) done -# The nodes in our containerized cluster -NODES="proxysql proxy-node1 proxy-node2 proxy-node3 proxy-node4 proxy-node5 proxy-node6 proxy-node7 proxy-node8 proxy-node9" +# Primary + up to 9 nodes, all on 127.0.0.1 with different ports +PORTS="6032 6042 6052 6062 6072 6082 6092 6102 6112 6122" -for host in ; do - # Use radmin/radmin for remote access between containers - for i in ; do - echo "SELECT COUNT(*) FROM ;" - done | mysql -u radmin -pradmin -h -P 6032 > /dev/null & +for port in ${PORTS}; do + for i in ${!ALL_TABLES[@]}; do + echo "SELECT COUNT(*) FROM ${ALL_TABLES[$i]};" + done | mysql -u admin -padmin -h 127.0.0.1 -P ${port} > /dev/null 2>&1 & done diff --git a/test/infra/control/ensure-infras.bash b/test/infra/control/ensure-infras.bash index b02ec5ee38..6dd07896b4 100755 --- a/test/infra/control/ensure-infras.bash +++ b/test/infra/control/ensure-infras.bash @@ -74,11 +74,29 @@ else echo ">>> ProxySQL is already running." fi -# 3. Ensure Docker Compose helper is available +# 3. Execute pre-proxysql hook (cluster setup) — BEFORE starting backends +# Backends need ProxySQL (and optionally the cluster) to be fully ready +# because their docker-proxy-post.bash configures ProxySQL. +PRE_PROXYSQL_HOOK="${WORKSPACE}/test/tap/groups/${TAP_GROUP}/pre-proxysql.bash" +if [ ! -f "${PRE_PROXYSQL_HOOK}" ]; then + PRE_PROXYSQL_HOOK="${WORKSPACE}/test/tap/groups/${BASE_GROUP}/pre-proxysql.bash" +fi +if [ ! -f "${PRE_PROXYSQL_HOOK}" ]; then + PRE_PROXYSQL_HOOK="${WORKSPACE}/test/tap/groups/default/pre-proxysql.bash" +fi + +if [ -f "${PRE_PROXYSQL_HOOK}" ]; then + echo ">>> Executing pre-proxysql hook: ${PRE_PROXYSQL_HOOK}" + "${PRE_PROXYSQL_HOOK}" +fi + +# 4. Ensure Docker Compose helper is available COMPOSE_CMD="docker compose" if ! $COMPOSE_CMD version &>/dev/null; then COMPOSE_CMD="docker-compose"; fi -# 4. Start Required Backends +# 5. Start Required Backends — one by one, sequentially +# Each backend's docker-compose-init.bash starts containers, waits for +# health, provisions users, and configures ProxySQL via docker-proxy-post.bash. for INFRA_NAME in ${INFRAS}; do INFRA_DIR="${WORKSPACE}/test/infra/${INFRA_NAME}" if [ ! -d "${INFRA_DIR}" ]; then @@ -88,8 +106,7 @@ for INFRA_NAME in ${INFRAS}; do COMPOSE_PROJECT="${INFRA_NAME}-${INFRA_ID}" echo ">>> Checking if backend '${INFRA_NAME}' (Project: ${COMPOSE_PROJECT}) is running..." - - # Check if ANY container for this project is running + if [ -z "$($COMPOSE_CMD -p "${COMPOSE_PROJECT}" ps -q 2>/dev/null)" ]; then echo ">>> '${INFRA_NAME}' is NOT running. Starting it now..." cd "${INFRA_DIR}" @@ -98,11 +115,10 @@ for INFRA_NAME in ${INFRAS}; do echo ">>> '${INFRA_NAME}' started successfully." else echo ">>> '${INFRA_NAME}' is already running." - # Run proxy post-configuration to ensure query rules are loaded if [ -f "${INFRA_DIR}/bin/docker-proxy-post.bash" ]; then echo ">>> Ensuring ProxySQL configuration for '${INFRA_NAME}'..." cd "${INFRA_DIR}" - ./bin/docker-proxy-post.bash || true + ./bin/docker-proxy-post.bash cd - >/dev/null fi fi @@ -110,8 +126,7 @@ done echo ">>> All required infrastructures for '${TAP_GROUP}' are READY (INFRA_ID: ${INFRA_ID})." -# 5. Derive DEFAULT_MYSQL_INFRA and DEFAULT_PGSQL_INFRA for hooks -# These are used by group-specific hooks to connect to backends +# 6. Derive DEFAULT_MYSQL_INFRA and DEFAULT_PGSQL_INFRA for hooks for INFRA_NAME in ${INFRAS}; do if [[ "${INFRA_NAME}" == *mysql* ]] || [[ "${INFRA_NAME}" == *mariadb* ]]; then export DEFAULT_MYSQL_INFRA="${DEFAULT_MYSQL_INFRA:-${INFRA_NAME}}" @@ -121,28 +136,10 @@ for INFRA_NAME in ${INFRAS}; do fi done -# 6. Execute pre-proxysql hook if it exists (for cluster setup) -# This starts additional ProxySQL cluster nodes if needed -PRE_PROXYSQL_HOOK="${WORKSPACE}/test/tap/groups/${TAP_GROUP}/pre-proxysql.bash" -if [ ! -f "${PRE_PROXYSQL_HOOK}" ]; then - # Try base group if subgroup doesn't have the hook - PRE_PROXYSQL_HOOK="${WORKSPACE}/test/tap/groups/${BASE_GROUP}/pre-proxysql.bash" -fi -# Fall back to default group if still not found -if [ ! -f "${PRE_PROXYSQL_HOOK}" ]; then - PRE_PROXYSQL_HOOK="${WORKSPACE}/test/tap/groups/default/pre-proxysql.bash" -fi - -if [ -f "${PRE_PROXYSQL_HOOK}" ]; then - echo ">>> Executing pre-proxysql hook: ${PRE_PROXYSQL_HOOK}" - "${PRE_PROXYSQL_HOOK}" -fi - # 7. Execute group-specific setup hook if it exists # This allows TAP groups to perform additional setup after all backends are running SETUP_HOOK="${WORKSPACE}/test/tap/groups/${TAP_GROUP}/setup-infras.bash" if [ ! -f "${SETUP_HOOK}" ]; then - # Try base group if subgroup doesn't have the hook SETUP_HOOK="${WORKSPACE}/test/tap/groups/${BASE_GROUP}/setup-infras.bash" fi diff --git a/test/infra/control/env-isolated.bash b/test/infra/control/env-isolated.bash index 096da63883..33d2b241d3 100755 --- a/test/infra/control/env-isolated.bash +++ b/test/infra/control/env-isolated.bash @@ -54,15 +54,22 @@ export TESTS_LOGS_PATH="${WORKSPACE}/ci_infra_logs/${INFRA_ID}/tests" # Test directories and paths export TAP_WORKDIR="${WORKSPACE}/test/tap/tests/" -export TAP_WORKDIRS="${WORKSPACE}/test/tap/tests/ ${WORKSPACE}/test/tap/tests_with_deps/deprecate_eof_support/" +export TAP_WORKDIRS="${WORKSPACE}/test/tap/tests/ ${WORKSPACE}/test/tap/tests_with_deps/deprecate_eof_support/ ${WORKSPACE}/test/tap/tests/unit/" export TAP_DEPS="${WORKSPACE}/test/tap/tap" export TEST_DEPS_PATH="${WORKSPACE}/test-scripts/deps" export TEST_DEPS="${TEST_DEPS_PATH}" -# Cluster Nodes +# Cluster Nodes — all run inside the ProxySQL container on different ports +# Port scheme: proxy-node1=6042, proxy-node2=6052, ..., proxy-node9=6122 +# From the test-runner container, reach them via the proxysql hostname +NUM_CLUSTER_NODES=${PROXYSQL_CLUSTER_NODES:-9} +if [[ "${SKIP_CLUSTER_START}" == "1" ]] || [[ "${SKIP_CLUSTER_START}" == "true" ]]; then + NUM_CLUSTER_NODES=0 +fi CLUSTER_NODES="" -for i in $(seq 1 9); do - CLUSTER_NODES="${CLUSTER_NODES}proxy-node${i}:6032," +for i in $(seq 1 ${NUM_CLUSTER_NODES}); do + PORT=$((6032 + i * 10)) + CLUSTER_NODES="${CLUSTER_NODES}proxysql:${PORT}," done export TAP_CLUSTER_NODES=${CLUSTER_NODES%,} @@ -73,7 +80,7 @@ export MALLOC_CONF="retain:false" export PROXYSQL_LAYOUT="flat" # Test execution defaults -export WITHGCOV="${WITHGCOV:-1}" +export WITHGCOV="${WITHGCOV:-0}" export WITHASAN="${WITHASAN:-0}" export TEST_EXIT_ON_FAIL="${TEST_EXIT_ON_FAIL:-0}" export TEST_JDBC="${TEST_JDBC:-1}" diff --git a/test/infra/control/run-multi-group.bash b/test/infra/control/run-multi-group.bash index b11a844f20..e7f41ffce0 100755 --- a/test/infra/control/run-multi-group.bash +++ b/test/infra/control/run-multi-group.bash @@ -11,7 +11,7 @@ set -euo pipefail # PARALLEL_JOBS=4 # Max parallel groups (default: unlimited) # TIMEOUT_MINUTES=60 # Hard timeout per group (default: 60) # EXIT_ON_FIRST_FAIL=0 # Stop on first failure (default: 0) -# AUTO_CLEANUP=0 # Auto cleanup successful groups (default: 0) +# AUTO_CLEANUP=1 # Auto cleanup successful groups (default: 1) # SKIP_CLUSTER_START=1 # Skip ProxySQL cluster initialization (default: 0) # COVERAGE=1 # Enable code coverage collection (default: 0) # TAP_USE_NOISE=1 # Enable noise injection for race condition testing (default: 0) @@ -35,7 +35,7 @@ TAP_GROUPS="${TAP_GROUPS:-}" PARALLEL_JOBS="${PARALLEL_JOBS:-2}" # Default: 2 parallel groups TIMEOUT_MINUTES="${TIMEOUT_MINUTES:-60}" EXIT_ON_FIRST_FAIL="${EXIT_ON_FIRST_FAIL:-0}" -AUTO_CLEANUP="${AUTO_CLEANUP:-0}" +AUTO_CLEANUP="${AUTO_CLEANUP:-1}" SKIP_CLUSTER_START="${SKIP_CLUSTER_START:-0}" COVERAGE="${COVERAGE:-0}" TAP_USE_NOISE="${TAP_USE_NOISE:-0}" @@ -354,19 +354,22 @@ if [ "${COVERAGE}" -eq 1 ]; then if [ -n "${COVERAGE_FILES}" ]; then COMBINED_INFO="${COMBINED_COVERAGE_DIR}/combined-coverage.info" + COVERAGE_LOG="${COMBINED_COVERAGE_DIR}/coverage-generation.log" echo ">>> Combining coverage reports into: ${COMBINED_INFO}" + echo ">>> Coverage generation log: ${COVERAGE_LOG}" # Run coverage combination in container (tools may not be on host) docker run --rm \ -v "${WORKSPACE}:${WORKSPACE}" \ -e COVERAGE_FILES="${COVERAGE_FILES}" \ -e COMBINED_INFO="${COMBINED_INFO}" \ + -e COVERAGE_LOG="${COVERAGE_LOG}" \ proxysql-ci-base:latest \ bash -c ' set -e if command -v fastcov >/dev/null 2>&1; then - fastcov -b -l -C ${COVERAGE_FILES} -o "${COMBINED_INFO}" 2>&1 || { - echo ">>> WARNING: fastcov combine failed, trying lcov..." + fastcov -b -l -C ${COVERAGE_FILES} -o "${COMBINED_INFO}" >> "${COVERAGE_LOG}" 2>&1 || { + echo ">>> WARNING: fastcov combine failed, trying lcov..." >> "${COVERAGE_LOG}" if command -v lcov >/dev/null 2>&1; then FIRST_FILE=true for info_file in ${COVERAGE_FILES}; do @@ -374,7 +377,7 @@ if [ "${COVERAGE}" -eq 1 ]; then cp "${info_file}" "${COMBINED_INFO}" FIRST_FILE=false else - lcov -a "${COMBINED_INFO}" -a "${info_file}" -o "${COMBINED_INFO}".tmp 2>/dev/null && \ + lcov -a "${COMBINED_INFO}" -a "${info_file}" -o "${COMBINED_INFO}".tmp >> "${COVERAGE_LOG}" 2>&1 && \ mv "${COMBINED_INFO}".tmp "${COMBINED_INFO}" fi done @@ -387,7 +390,7 @@ if [ "${COVERAGE}" -eq 1 ]; then cp "${info_file}" "${COMBINED_INFO}" FIRST_FILE=false else - lcov -a "${COMBINED_INFO}" -a "${info_file}" -o "${COMBINED_INFO}".tmp 2>/dev/null && \ + lcov -a "${COMBINED_INFO}" -a "${info_file}" -o "${COMBINED_INFO}".tmp >> "${COVERAGE_LOG}" 2>&1 && \ mv "${COMBINED_INFO}".tmp "${COMBINED_INFO}" fi done @@ -395,7 +398,7 @@ if [ "${COVERAGE}" -eq 1 ]; then echo ">>> ERROR: Neither fastcov nor lcov available" exit 1 fi - ' || echo ">>> WARNING: Coverage combination failed" + ' || echo ">>> WARNING: Coverage combination failed (see ${COVERAGE_LOG})" if [ -f "${COMBINED_INFO}" ]; then echo ">>> Combined coverage report: ${COMBINED_INFO}" @@ -409,11 +412,12 @@ if [ "${COVERAGE}" -eq 1 ]; then -v "${WORKSPACE}:${WORKSPACE}" \ -e COMBINED_INFO="${COMBINED_INFO}" \ -e COMBINED_HTML="${COMBINED_HTML}" \ + -e COVERAGE_LOG="${COVERAGE_LOG}" \ proxysql-ci-base:latest \ bash -c ' if command -v genhtml >/dev/null 2>&1; then - genhtml --branch-coverage --ignore-errors negative,source --synthesize-missing "${COMBINED_INFO}" --output-directory "${COMBINED_HTML}" 2>&1 || \ - echo ">>> WARNING: HTML generation failed" + genhtml --branch-coverage --ignore-errors negative,source --synthesize-missing "${COMBINED_INFO}" --output-directory "${COMBINED_HTML}" >> "${COVERAGE_LOG}" 2>&1 || \ + echo ">>> WARNING: HTML generation failed (see ${COVERAGE_LOG})" else echo ">>> WARNING: genhtml not available" fi diff --git a/test/infra/control/run-tests-isolated.bash b/test/infra/control/run-tests-isolated.bash index dec518800e..39666fc3fb 100755 --- a/test/infra/control/run-tests-isolated.bash +++ b/test/infra/control/run-tests-isolated.bash @@ -264,13 +264,15 @@ docker run \ if command -v fastcov >/dev/null 2>&1; then mkdir -p \"\${COVERAGE_REPORT_DIR}\" local coverage_file=\"\${COVERAGE_REPORT_DIR}/\${INFRA_ID}.info\" + local coverage_log=\"\${COVERAGE_REPORT_DIR}/coverage-generation.log\" echo \">>> Generating coverage report: \${coverage_file}\" + echo \">>> Coverage generation log: \${coverage_log}\" local nproc_val=\$(nproc) # Copy .gcno files to /gcov so fastcov can find both .gcno and .gcda together # This avoids race conditions when multiple groups run in parallel if [ -d \"/gcov\" ] && [ \"\$(ls -A /gcov 2>/dev/null)\" ]; then - echo \">>> Preparing coverage data directory...\" + echo \">>> Preparing coverage data directory...\" >> \"\${coverage_log}\" 2>&1 cd \"\${WORKSPACE}\" && find . -path './ci_infra_logs' -prune -o -name '*.gcno' -type f -print | while read gcno; do target=\"/gcov/\${gcno#./}\" target_dir=\"\$(dirname \"\$target\")\" @@ -279,7 +281,9 @@ docker run \ done echo \">>> Running fastcov on /gcov...\" cd /gcov - fastcov -b -j\"\${nproc_val}\" --process-gcno -l -e /usr/include/ -e \"\${WORKSPACE}/test/tap/tests\" -e \"\${WORKSPACE}/deps/\" -d . -o \"\${coverage_file}\" 2>&1 || echo \">>> WARNING: Coverage generation failed\" + fastcov -b -j\"\${nproc_val}\" --process-gcno -l \ + --include \"\${WORKSPACE}/include/\" \"\${WORKSPACE}/lib/\" \"\${WORKSPACE}/src/\" \"\${WORKSPACE}/test/\" \ + -d . -o \"\${coverage_file}\" >> \"\${coverage_log}\" 2>&1 || echo \">>> WARNING: Coverage generation failed (see \${coverage_log})\" else echo \">>> WARNING: /gcov directory is empty or missing, skipping coverage\" fi @@ -290,11 +294,12 @@ docker run \ if command -v genhtml >/dev/null 2>&1; then local html_dir=\"\${COVERAGE_REPORT_DIR}/html\" mkdir -p \"\${html_dir}\" - genhtml --branch-coverage --ignore-errors negative,source --synthesize-missing \"\${coverage_file}\" --output-directory \"\${html_dir}\" 2>&1 || echo \">>> WARNING: HTML generation failed\" + echo \">>> Generating HTML coverage report...\" + genhtml --branch-coverage --ignore-errors negative,source --synthesize-missing \"\${coverage_file}\" --output-directory \"\${html_dir}\" >> \"\${coverage_log}\" 2>&1 || echo \">>> WARNING: HTML generation failed (see \${coverage_log})\" [ -f \"\${html_dir}/index.html\" ] && echo \">>> HTML coverage report: \${html_dir}/index.html\" fi else - echo \">>> WARNING: Coverage info file not generated\" + echo \">>> WARNING: Coverage info file not generated (see \${coverage_log})\" fi else echo \">>> WARNING: fastcov not found in container, skipping coverage collection\" @@ -319,9 +324,41 @@ docker run \ [ -n \"${MYSQL_BINLOG_BIN}\" ] && ln -sf \"${MYSQL_BINLOG_BIN}\" \"${WORKSPACE}/test-scripts/deps/mysqlbinlog\" [ -n \"${BINLOG_READER_BIN}\" ] && ln -sf \"${BINLOG_READER_BIN}\" \"${WORKSPACE}/test-scripts/deps/test_binlog_reader-t\" - # Source the local isolated environment + # Source group environment first (sets TEST_PY_* flags etc.) + if [ -n \"${TAP_GROUP}\" ]; then + BASE_GROUP=\$(echo \"${TAP_GROUP}\" | sed -E 's/[-_]g[0-9]+.*//') + if [ -f \"${WORKSPACE}/test/tap/groups/${TAP_GROUP}/env.sh\" ]; then + source \"${WORKSPACE}/test/tap/groups/${TAP_GROUP}/env.sh\" + elif [ -f \"${WORKSPACE}/test/tap/groups/\${BASE_GROUP}/env.sh\" ]; then + source \"${WORKSPACE}/test/tap/groups/\${BASE_GROUP}/env.sh\" + fi + fi + + # Source the local isolated environment (defaults for unset vars) source ${SCRIPT_DIR}/env-isolated.bash + # Wait for ProxySQL to be reachable from this container + # Docker DNS resolution can take a few seconds on newly created containers + echo -n '>>> Waiting for ProxySQL admin (proxysql:6032) ...' + WAIT_COUNT=0 + WAIT_MAX=30 + while [ \$WAIT_COUNT -lt \$WAIT_MAX ]; do + if mysql -uradmin -pradmin -hproxysql -P6032 -e 'SELECT 1' >/dev/null 2>&1; then + echo ' OK.' + break + fi + echo -n '.' + sleep 1 + WAIT_COUNT=\$((WAIT_COUNT + 1)) + done + if [ \$WAIT_COUNT -ge \$WAIT_MAX ]; then + echo ' FAILED after \${WAIT_MAX}s' + echo 'ERROR: Cannot reach ProxySQL admin from test-runner container.' + echo 'DNS resolution test:' + getent hosts proxysql || echo 'DNS lookup failed for proxysql' + exit 1 + fi + # Dump ProxySQL configuration before running tests echo '================================================================================' echo 'ProxySQL Configuration Dump (BEFORE TESTS)' @@ -329,32 +366,32 @@ docker run \ # MySQL configuration echo '--- mysql_servers ---' - mysql -uradmin -pradmin -hproxysql -P6032 -e 'SELECT hostgroup_id, hostname, port, status, comment FROM mysql_servers ORDER BY hostgroup_id, hostname' 2>/dev/null || echo 'ERROR: Failed to query mysql_servers' + mysql -uradmin -pradmin -hproxysql -P6032 -e 'SELECT hostgroup_id, hostname, port, status, comment FROM mysql_servers ORDER BY hostgroup_id, hostname' echo '--- mysql_users ---' - mysql -uradmin -pradmin -hproxysql -P6032 -e 'SELECT username, password, active, default_hostgroup, transaction_persistent FROM mysql_users ORDER BY username' 2>/dev/null || echo 'ERROR: Failed to query mysql_users' + mysql -uradmin -pradmin -hproxysql -P6032 -e 'SELECT username, password, active, default_hostgroup, transaction_persistent FROM mysql_users ORDER BY username' echo '--- mysql_replication_hostgroups ---' - mysql -uradmin -pradmin -hproxysql -P6032 -e 'SELECT writer_hostgroup, reader_hostgroup, comment FROM mysql_replication_hostgroups' 2>/dev/null || echo 'ERROR: Failed to query mysql_replication_hostgroups' + mysql -uradmin -pradmin -hproxysql -P6032 -e 'SELECT writer_hostgroup, reader_hostgroup, comment FROM mysql_replication_hostgroups' echo '--- mysql_query_rules ---' - mysql -uradmin -pradmin -hproxysql -P6032 -e 'SELECT rule_id, active, username, match_pattern, destination_hostgroup, apply, comment FROM mysql_query_rules ORDER BY rule_id' 2>/dev/null || echo 'ERROR: Failed to query mysql_query_rules (or empty)' + mysql -uradmin -pradmin -hproxysql -P6032 -e 'SELECT rule_id, active, username, match_pattern, destination_hostgroup, apply, comment FROM mysql_query_rules ORDER BY rule_id' echo '--- runtime_mysql_query_rules ---' - mysql -uradmin -pradmin -hproxysql -P6032 -e 'SELECT rule_id, active, username, match_pattern, destination_hostgroup, apply, comment FROM runtime_mysql_query_rules ORDER BY rule_id' 2>/dev/null || echo 'ERROR: Failed to query runtime_mysql_query_rules (or empty)' + mysql -uradmin -pradmin -hproxysql -P6032 -e 'SELECT rule_id, active, username, match_pattern, destination_hostgroup, apply, comment FROM runtime_mysql_query_rules ORDER BY rule_id' # PgSQL configuration echo '--- pgsql_servers ---' - mysql -uradmin -pradmin -hproxysql -P6032 -e 'SELECT hostgroup_id, hostname, port, status, comment FROM pgsql_servers ORDER BY hostgroup_id, hostname' 2>/dev/null || echo 'INFO: pgsql_servers not configured (or error)' + mysql -uradmin -pradmin -hproxysql -P6032 -e 'SELECT hostgroup_id, hostname, port, status, comment FROM pgsql_servers ORDER BY hostgroup_id, hostname' echo '--- pgsql_users ---' - mysql -uradmin -pradmin -hproxysql -P6032 -e 'SELECT username, password, active, default_hostgroup FROM pgsql_users ORDER BY username' 2>/dev/null || echo 'INFO: pgsql_users not configured (or error)' + mysql -uradmin -pradmin -hproxysql -P6032 -e 'SELECT username, password, active, default_hostgroup FROM pgsql_users ORDER BY username' echo '--- pgsql_replication_hostgroups ---' - mysql -uradmin -pradmin -hproxysql -P6032 -e 'SELECT writer_hostgroup, reader_hostgroup, comment FROM pgsql_replication_hostgroups' 2>/dev/null || echo 'INFO: pgsql_replication_hostgroups not configured (or error)' + mysql -uradmin -pradmin -hproxysql -P6032 -e 'SELECT writer_hostgroup, reader_hostgroup, comment FROM pgsql_replication_hostgroups' echo '--- pgsql_query_rules ---' - mysql -uradmin -pradmin -hproxysql -P6032 -e 'SELECT rule_id, active, username, match_pattern, destination_hostgroup, apply, comment FROM pgsql_query_rules ORDER BY rule_id' 2>/dev/null || echo 'INFO: pgsql_query_rules not configured (or empty)' + mysql -uradmin -pradmin -hproxysql -P6032 -e 'SELECT rule_id, active, username, match_pattern, destination_hostgroup, apply, comment FROM pgsql_query_rules ORDER BY rule_id' echo '================================================================================' diff --git a/test/infra/control/start-proxysql-isolated.bash b/test/infra/control/start-proxysql-isolated.bash index 357284dd3f..a0fa8923f6 100755 --- a/test/infra/control/start-proxysql-isolated.bash +++ b/test/infra/control/start-proxysql-isolated.bash @@ -21,8 +21,13 @@ INFRA_LOGS_PATH="${WORKSPACE}/ci_infra_logs" PROXY_DATA_DIR="${INFRA_LOGS_PATH}/${INFRA_ID}/proxysql" GENERIC_CONFIG="${SCRIPT_DIR}/proxysql-ci.cnf" +# Cluster configuration +NUM_NODES=${PROXYSQL_CLUSTER_NODES:-9} +if [[ "${SKIP_CLUSTER_START}" == "1" ]] || [[ "${SKIP_CLUSTER_START}" == "true" ]]; then + NUM_NODES=0 +fi + # Coverage data directory (separate per INFRA_ID to avoid parallel write conflicts) -# GCOV_PREFIX redirects .gcda files to a separate directory for each ProxySQL instance COVERAGE_DATA_DIR="${INFRA_LOGS_PATH}/${INFRA_ID}/gcov" mkdir -p "${COVERAGE_DATA_DIR}" @@ -36,20 +41,91 @@ $SUDO rm -f "${PROXY_DATA_DIR}/proxysql.db" "${PROXY_DATA_DIR}"/*.pem docker rm -f "${PROXY_CONTAINER}" >/dev/null 2>&1 || true -echo ">>> Starting ProxySQL container: ${PROXY_CONTAINER}" -docker run -d --name "${PROXY_CONTAINER}" --hostname "proxysql" --network "${NETWORK_NAME}" --network-alias "proxysql" -v "${WORKSPACE}/src/proxysql:/usr/bin/proxysql" -v "${GENERIC_CONFIG}:/etc/proxysql.cnf" -v "${PROXY_DATA_DIR}:/var/lib/proxysql" -v "${COVERAGE_DATA_DIR}:/gcov" -e GCOV_PREFIX="/gcov" -e GCOV_PREFIX_STRIP="3" proxysql-ci-base:latest /bin/bash -c "/usr/bin/proxysql --idle-threads --clickhouse-server --sqlite3-server -f -c /etc/proxysql.cnf -D /var/lib/proxysql 2>&1 | tee /var/lib/proxysql/proxysql.log" +# Build the startup command for the container. +# Primary ProxySQL runs in foreground. Cluster nodes (if any) run as +# background processes inside the same container, each with its own +# data directory and port pair. +# +# Port scheme: +# Primary: admin=6032, mysql=6033, pgsql=6133 +# proxy-node1: admin=6042, mysql=6043 +# proxy-node2: admin=6052, mysql=6053 +# ... +# proxy-nodeN: admin=6032+(N*10), mysql=6033+(N*10) + +STARTUP_CMD=" +# Disable gcov for background cluster nodes to avoid concurrent .gcda writes +unset GCOV_PREFIX GCOV_PREFIX_STRIP + +# Start cluster nodes as background processes +for i in \$(seq 1 ${NUM_NODES}); do + ADMIN_PORT=\$((6032 + i * 10)) + MYSQL_PORT=\$((6033 + i * 10)) + NODE_DIR=/var/lib/proxysql-node\${i} + mkdir -p \${NODE_DIR} + + PGSQL_PORT=\$((7133 + i * 10)) + PGSQL_ADMIN_PORT=\$((7132 + i * 10)) + SQLITE_PORT=\$((7030 + i * 10)) + cat > \${NODE_DIR}/proxysql-node.cnf <> \${NODE_DIR}/proxysql.log 2>&1 & + echo \"Started proxy-node\${i} (admin=\${ADMIN_PORT}, mysql=\${MYSQL_PORT})\" +done + +# Start primary ProxySQL in foreground +exec /usr/bin/proxysql --idle-threads --clickhouse-server --sqlite3-server -f -c /etc/proxysql.cnf -D /var/lib/proxysql 2>&1 | tee /var/lib/proxysql/proxysql.log +" + +echo ">>> Starting ProxySQL container: ${PROXY_CONTAINER} (cluster nodes: ${NUM_NODES})" +docker run -d \ + --name "${PROXY_CONTAINER}" \ + --hostname "proxysql" \ + --network "${NETWORK_NAME}" \ + --network-alias "proxysql" \ + -v "${WORKSPACE}/src/proxysql:/usr/bin/proxysql" \ + -v "${GENERIC_CONFIG}:/etc/proxysql.cnf" \ + -v "${PROXY_DATA_DIR}:/var/lib/proxysql" \ + -v "${COVERAGE_DATA_DIR}:/gcov" \ + -e GCOV_PREFIX="/gcov" \ + -e GCOV_PREFIX_STRIP="3" \ + proxysql-ci-base:latest \ + /bin/bash -c "${STARTUP_CMD}" if [ -f /.dockerenv ]; then RUNNER_ID=$(hostname) docker network connect "${NETWORK_NAME}" "${RUNNER_ID}" || true fi +# Wait for primary echo -n "Waiting for ${PROXY_CONTAINER}:6032 " MAX_WAIT=30 COUNT=0 while [ $COUNT -lt $MAX_WAIT ]; do if docker exec "${PROXY_CONTAINER}" mysql -uadmin -padmin -h127.0.0.1 -P6032 -e 'SELECT 1' >/dev/null 2>&1; then - # Provision clickhouse interface if needed docker exec "${PROXY_CONTAINER}" mysql -uadmin -padmin -h127.0.0.1 -P6032 -e " SET clickhouse-mysql_ifaces='0.0.0.0:8000'; LOAD CLICKHOUSE VARIABLES TO RUNTIME; @@ -61,8 +137,99 @@ while [ $COUNT -lt $MAX_WAIT ]; do sleep 1 COUNT=$((COUNT+1)) done +if [ $COUNT -ge $MAX_WAIT ]; then echo " TIMEOUT"; exit 1; fi + +# Wait for cluster nodes +for i in $(seq 1 "${NUM_NODES}"); do + ADMIN_PORT=$((6032 + i * 10)) + echo -n "Waiting for proxy-node${i} (port ${ADMIN_PORT}) " + COUNT=0 + while [ $COUNT -lt $MAX_WAIT ]; do + if docker exec "${PROXY_CONTAINER}" mysql -uadmin -padmin -h127.0.0.1 -P${ADMIN_PORT} -e 'SELECT 1' >/dev/null 2>&1; then + echo " OK." + break + fi + echo -n "." + sleep 1 + COUNT=$((COUNT+1)) + done + if [ $COUNT -ge $MAX_WAIT ]; then echo " TIMEOUT (node ${i})"; exit 1; fi +done -if [ $COUNT -ge $MAX_WAIT ]; then - echo " TIMEOUT" - exit 1 +# Initialize cluster if nodes were started +if [ "${NUM_NODES}" -gt 0 ]; then + echo ">>> Initializing ProxySQL Cluster (${NUM_NODES} nodes)..." + + MYSQL_CMD="docker exec -i ${PROXY_CONTAINER} mysql -uadmin -padmin -h127.0.0.1" + + # Build proxysql_servers entries: primary + up to first 3 nodes as core + CORE_NODES=3 + if [ "${NUM_NODES}" -lt 3 ]; then CORE_NODES="${NUM_NODES}"; fi + PROXYSQL_SERVERS_SQL="DELETE FROM proxysql_servers;" + for i in $(seq 1 "${CORE_NODES}"); do + PORT=$((6032 + i * 10)) + PROXYSQL_SERVERS_SQL="${PROXYSQL_SERVERS_SQL} INSERT INTO proxysql_servers (hostname,port,weight,comment) VALUES ('proxysql',${PORT},0,'core-node${i}');" + done + + # Configure primary + ${MYSQL_CMD} -P6032 <>> Configuring proxy-node${i} (port ${ADMIN_PORT})" + + ${MYSQL_CMD} -P${ADMIN_PORT} <>> Installing scheduler on cluster nodes..." + SCHEDULER_SCRIPT="${SCRIPT_DIR}/check_all_nodes.bash" + + # Install on primary + docker cp "${SCHEDULER_SCRIPT}" "${PROXY_CONTAINER}:/tmp/check_all_nodes.bash" + docker exec "${PROXY_CONTAINER}" chmod +x /tmp/check_all_nodes.bash + + ${MYSQL_CMD} -P6032 <>> ProxySQL Cluster initialized (${NUM_NODES} nodes in single container)." fi + +echo ">>> ProxySQL is UP." diff --git a/test/infra/docker-base/Dockerfile b/test/infra/docker-base/Dockerfile index cc77f62d65..eb7fb434a7 100755 --- a/test/infra/docker-base/Dockerfile +++ b/test/infra/docker-base/Dockerfile @@ -24,6 +24,7 @@ RUN apt-get update -qq && \ python3-yaml \ python3-packaging \ python3-structlog \ + sysbench \ php-cli \ php-mysql \ lcov \ diff --git a/test/infra/infra-mysql57/bin/local-docker-benchmark.bash b/test/infra/infra-mysql57/bin/local-docker-benchmark.bash index 45b2f558d1..e4e575d09a 100755 --- a/test/infra/infra-mysql57/bin/local-docker-benchmark.bash +++ b/test/infra/infra-mysql57/bin/local-docker-benchmark.bash @@ -9,11 +9,9 @@ SIZE_TABLES=10 REPORT_INTERVAL=1 TIME=10 SCRIPT=oltp_read_write.lua -MYSQL_HOST=127.0.0.1 -#MYSQL_HOST=mysql1.infra-mysql57 -MYSQL_PORT=6033 -#MYSQL_PORT=3306 -MYSQL_PWD=root +MYSQL_HOST=${TAP_ROOTHOST:-127.0.0.1} +MYSQL_PORT=${TAP_ROOTPORT:-6033} +MYSQL_PWD=${TAP_ROOTPASSWORD:-root} printf "[$(date)] Dropping 'sysbench' schema if present and preparing test dataset:\n" mysql -h$MYSQL_HOST -P$MYSQL_PORT -uroot -p$MYSQL_PWD -e"DROP DATABASE IF EXISTS sysbench; CREATE DATABASE IF NOT EXISTS sysbench;" 2>&1 | grep -vP 'mysql: .?Warning' diff --git a/test/infra/infra-mysql57/conf/proxysql/infra-config.sql b/test/infra/infra-mysql57/conf/proxysql/infra-config.sql index d6397bf216..884d34e899 100644 --- a/test/infra/infra-mysql57/conf/proxysql/infra-config.sql +++ b/test/infra/infra-mysql57/conf/proxysql/infra-config.sql @@ -89,6 +89,28 @@ REPLACE INTO debug_filters VALUES ('MySQL_Session.cpp',0,'handler_again___verify REPLACE INTO debug_filters VALUES ('MySQL_Thread.cpp',0,'tune_timeout_for_myds_needs_pause'); REPLACE INTO debug_filters VALUES ('mysql_connection.cpp',0,'handler'); REPLACE INTO debug_filters VALUES ('mysql_connection.cpp',0,'real_query_cont'); +REPLACE INTO debug_filters VALUES ('MySQL_Session.cpp',0,'handler_again___verify_multiple_variables'); +REPLACE INTO debug_filters VALUES ('PgSQL_Session.cpp',0,'handler'); +REPLACE INTO debug_filters VALUES ('PgSQL_Connection.cpp',0,'handler'); +REPLACE INTO debug_filters VALUES ('Base_Thread.cpp',0,'tune_timeout_for_myds_needs_pause'); +REPLACE INTO debug_filters VALUES ('Query_Processor.cpp',0,'process_query'); +REPLACE INTO debug_filters VALUES ('MySQL_Session.cpp',0,'handler___client_DSS_QUERY_SENT___server_DSS_NOT_INITIALIZED__get_connection'); +REPLACE INTO debug_filters VALUES ('PgSQL_Session.cpp',0,'handler___client_DSS_QUERY_SENT___server_DSS_NOT_INITIALIZED__get_connection'); +REPLACE INTO debug_filters VALUES ('PgSQL_Session.cpp',0,'handler_again___verify_backend_user_db'); +REPLACE INTO debug_filters VALUES ('PgSQL_Session.cpp',0,'get_pkts_from_client'); +REPLACE INTO debug_filters VALUES ('MySQL_HostGroups_Manager.cpp',0,'push_MyConn_to_pool'); +REPLACE INTO debug_filters VALUES ('MySQL_HostGroups_Manager.cpp',0,'get_MyConn_from_pool'); +REPLACE INTO debug_filters VALUES ('MySrvConnList.cpp',0,'get_random_MyConn'); +REPLACE INTO debug_filters VALUES ('MyHGC.cpp',0,'get_random_MySrvC'); +REPLACE INTO debug_filters VALUES ('PgSQL_Session.cpp',0,'handler___status_WAITING_CLIENT_DATA___STATE_SLEEP___PGSQL_S'); +REPLACE INTO debug_filters VALUES ('PgSQL_HostGroups_Manager.cpp',0,'get_random_MySrvC'); +REPLACE INTO debug_filters VALUES ('mysql_connection.cpp',0,'stmt_prepare_cont'); +REPLACE INTO debug_filters VALUES ('mysql_connection.cpp',0,'stmt_execute_cont'); +REPLACE INTO debug_filters VALUES ('MySQL_Monitor.cpp',0,'event_loop'); +REPLACE INTO debug_filters VALUES ('MySQL_Monitor.cpp',0,'get_connection'); +REPLACE INTO debug_filters VALUES ('MySQL_Monitor.cpp',0,'put_connection'); +REPLACE INTO debug_filters VALUES ('PgSQL_Monitor.cpp',0,'worker_thread'); +REPLACE INTO debug_filters VALUES ('PgSQL_Data_Stream.cpp',0,'assign_fd_from_pgsql_conn'); LOAD DEBUG TO RUNTIME; SAVE DEBUG TO DISK; diff --git a/test/scripts/bin/proxysql-tester.py b/test/scripts/bin/proxysql-tester.py index 311e31f52d..c6315f221b 100755 --- a/test/scripts/bin/proxysql-tester.py +++ b/test/scripts/bin/proxysql-tester.py @@ -806,7 +806,7 @@ def disk_usage(): if test_file in zero_sec_level_tap_tests: tap_env["OPENSSL_CONF"] = ( - os.environ["JENKINS_SCRIPTS_PATH"] + "/test-scripts/datadir/openssl_level_zero.cnf" + os.environ.get("WORKSPACE", ".") + "/test-scripts/datadir/openssl_level_zero.cnf" ) try: @@ -1129,18 +1129,21 @@ def run_failover(self): summary = [] self.pre_failover_tests() + mysql_infra = os.environ.get('DEFAULT_MYSQL_INFRA', 'infra-mysql57') if os.environ['DOCKER_MODE'].endswith('dns'): - orc_prefix = 'ORCHESTRATOR_API="http://orc1.infra-mysql57:3000/api http://orc2.infra-mysql57:3000/api http://orc3.infra-mysql57:3000/api"' + orc_prefix = 'ORCHESTRATOR_API="http://orc1.{infra}:3000/api http://orc2.{infra}:3000/api http://orc3.{infra}:3000/api"'.format(infra=mysql_infra) + mysql1_alias = 'mysql1.{}'.format(mysql_infra) else: orc_prefix = 'ORCHESTRATOR_API="http://localhost:23101/api http://localhost:23102/api http://localhost:23103/api"' - fo_cmd = '{} orchestrator-client -c graceful-master-takeover-auto -a mysql1'.format(orc_prefix) + mysql1_alias = 'mysql1' + fo_cmd = '{} orchestrator-client -c graceful-master-takeover-auto -a {}'.format(orc_prefix, mysql1_alias) fop = subprocess.Popen(fo_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) fo_stdout, fo_stderr = fop.communicate() log.debug('Failover output is - {} / {}'.format(fo_stdout, fo_stderr)) - cf_cmd = '{} orchestrator-client -c topology -i mysql1'.format(orc_prefix) + cf_cmd = '{} orchestrator-client -c topology -i {}:3306'.format(orc_prefix, mysql1_alias) cfp = subprocess.Popen(cf_cmd, shell=True, stdout=subprocess.PIPE, @@ -1148,7 +1151,7 @@ def run_failover(self): cf_stdout, cf_stderr = cfp.communicate() log.debug('Topology verification - {} / {}'.format(cf_stdout, cf_stderr)) self.post_failover_tests() - if b"mysql2:3306" in fo_stdout or b"mysql3:3306" in fo_stdout: + if b"mysql2" in fo_stdout or b"mysql3" in fo_stdout: rc = 0 return rc, logs, summary @@ -1551,7 +1554,7 @@ def start_proxysql(conn_args, timeout): subprocess.call( args="./test-scripts/proxysql_cluster_init.sh", shell=True, - cwd=os.environ["JENKINS_SCRIPTS_PATH"], + cwd=os.environ.get("WORKSPACE", "."), env=os.environ.copy() ) @@ -1633,7 +1636,7 @@ def main(argv): log.debug(f"TEST_PY_TAP_SHUFFLE_LIMIT is disabled (current value: {shuffle_limit})") # Options - coverage = int(os.environ['WITHGCOV']) + coverage = int(os.environ.get('WITHGCOV', 0)) or int(os.environ.get('COVERAGE_MODE', 0)) for opt, arg in opts: if opt in ('-h', "--help"): diff --git a/test/scripts/etc/proxysql-tester.yml b/test/scripts/etc/proxysql-tester.yml index 45ef575f15..bbde1ec6ca 100644 --- a/test/scripts/etc/proxysql-tester.yml +++ b/test/scripts/etc/proxysql-tester.yml @@ -1,3 +1,3 @@ GLOBAL: OG_API_KEY: c4a8af4e-ac7c-4641-81b4-bd4935b46af4 - BENCHMARK_SCRIPT: $JENKINS_SCRIPTS_PATH/docker-mysql-proxysql/bin/local-docker-benchmark.bash + BENCHMARK_SCRIPT: $WORKSPACE/test/infra/infra-mysql57/bin/local-docker-benchmark.bash diff --git a/test/tap/Makefile b/test/tap/Makefile index 2c50397a30..6591ebdf04 100644 --- a/test/tap/Makefile +++ b/test/tap/Makefile @@ -31,7 +31,7 @@ tests_with_deps: tap test_deps .PHONY: unit_tests unit_tests: - cd tests/unit && CC=${CC} CXX=${CXX} ${MAKE} + cd tests/unit && CC=${CC} CXX=${CXX} ${MAKE} $(MAKECMDGOALS) .PHONY: clean_utils diff --git a/test/tap/groups/basictests/env.sh b/test/tap/groups/basictests/env.sh new file mode 100644 index 0000000000..8dfb7538df --- /dev/null +++ b/test/tap/groups/basictests/env.sh @@ -0,0 +1,21 @@ +# Basictests Group Environment +# Runs Python-based functional tests: sysbench benchmark, change-user, failover. +# No ProxySQL cluster needed. + +export DEFAULT_MYSQL_INFRA="infra-mysql57" + +# No cluster +export SKIP_CLUSTER_START=1 + +# Test selection +export TEST_PY_BENCHMARK=1 +export TEST_PY_CHUSER=1 +export TEST_PY_FAILOVER=1 + +# Disable everything else +export TEST_PY_INTERNAL=0 +export TEST_PY_STATS=0 +export TEST_PY_TAP=0 +export TEST_PY_TAPINT=0 +export TEST_PY_WARMING=0 +export TEST_PY_READONLY=0 diff --git a/test/tap/groups/basictests/infras.lst b/test/tap/groups/basictests/infras.lst new file mode 100644 index 0000000000..51f5b953a5 --- /dev/null +++ b/test/tap/groups/basictests/infras.lst @@ -0,0 +1 @@ +infra-mysql57 diff --git a/test/tap/groups/basictests/setup-infras.bash b/test/tap/groups/basictests/setup-infras.bash new file mode 100755 index 0000000000..2285825958 --- /dev/null +++ b/test/tap/groups/basictests/setup-infras.bash @@ -0,0 +1,50 @@ +#!/bin/bash +set -e +set -o pipefail +# +# Basictests setup hook +# +# The in-repo infra configures servers in hostgroup pairs like 1300/1301. +# The legacy test scripts (proxysql-tester.py benchmark) expect servers +# in hostgroups 0 and 1. This hook moves everything from the original +# pair to 0/1 and removes the originals to avoid monitor conflicts. +# + +export INFRA_ID="${INFRA_ID:-dev-$USER}" +PROXY_CONTAINER="proxysql.${INFRA_ID}" + +MYSQL_CMD="docker exec ${PROXY_CONTAINER} mysql -uradmin -pradmin -h127.0.0.1 -P6032 -NB" + +# Find the first writer/reader hostgroup pair +PAIR=$(${MYSQL_CMD} -e "SELECT writer_hostgroup, reader_hostgroup FROM mysql_replication_hostgroups WHERE writer_hostgroup != 0 LIMIT 1;" 2>/dev/null) +if [ -z "${PAIR}" ]; then + echo ">>> No non-zero replication hostgroups found. Skipping." + exit 0 +fi + +WRITER_HG=$(echo "${PAIR}" | awk '{print $1}') +READER_HG=$(echo "${PAIR}" | awk '{print $2}') + +echo ">>> Remapping hostgroup pair ${WRITER_HG}/${READER_HG} -> 0/1" + +# Move servers: update hostgroup IDs in-place +${MYSQL_CMD} -e "UPDATE mysql_servers SET hostgroup_id = 0 WHERE hostgroup_id = ${WRITER_HG};" +${MYSQL_CMD} -e "UPDATE mysql_servers SET hostgroup_id = 1 WHERE hostgroup_id = ${READER_HG};" + +# Replace replication hostgroup mapping +${MYSQL_CMD} -e "DELETE FROM mysql_replication_hostgroups WHERE writer_hostgroup = ${WRITER_HG};" +${MYSQL_CMD} -e "INSERT OR REPLACE INTO mysql_replication_hostgroups (writer_hostgroup, reader_hostgroup, comment) VALUES (0, 1, 'basictests');" + +# Update users +${MYSQL_CMD} -e "UPDATE mysql_users SET default_hostgroup = 0 WHERE default_hostgroup = ${WRITER_HG};" + +# Update query rules +${MYSQL_CMD} -e "UPDATE mysql_query_rules SET destination_hostgroup = 0 WHERE destination_hostgroup = ${WRITER_HG};" +${MYSQL_CMD} -e "UPDATE mysql_query_rules SET destination_hostgroup = 1 WHERE destination_hostgroup = ${READER_HG};" + +# Load all to runtime +${MYSQL_CMD} -e "LOAD MYSQL SERVERS TO RUNTIME;" +${MYSQL_CMD} -e "LOAD MYSQL USERS TO RUNTIME;" +${MYSQL_CMD} -e "LOAD MYSQL QUERY RULES TO RUNTIME;" + +echo ">>> Hostgroup remapping done." diff --git a/test/tap/groups/default/pre-proxysql.bash b/test/tap/groups/default/pre-proxysql.bash index d961fad9b6..d992bbe705 100755 --- a/test/tap/groups/default/pre-proxysql.bash +++ b/test/tap/groups/default/pre-proxysql.bash @@ -2,14 +2,18 @@ set -e set -o pipefail # -# change infra config -# inherits env from tester script +# Default pre-proxysql hook. +# +# Cluster startup is now handled by start-proxysql-isolated.bash +# (controlled by SKIP_CLUSTER_START and PROXYSQL_CLUSTER_NODES). +# This hook only needs to wait for the cluster to stabilize. # +NUM_NODES=${PROXYSQL_CLUSTER_NODES:-9} +if [[ "${SKIP_CLUSTER_START}" == "1" ]] || [[ "${SKIP_CLUSTER_START}" == "true" ]] || [[ "${NUM_NODES}" == "0" ]]; then + echo "[$(date '+%Y-%m-%d %H:%M:%S')] >>> Pre-proxysql: no cluster, nothing to do." + exit 0 +fi -# Start ProxySQL Cluster if available -/home/rene/proxysql/test/infra/control/cluster_start.bash -/home/rene/proxysql/test/infra/control/cluster_init.bash - -# wait for cluster to stabilize +echo "[$(date '+%Y-%m-%d %H:%M:%S')] >>> Pre-proxysql: waiting for cluster to stabilize..." sleep 10 diff --git a/test/tap/groups/groups.json b/test/tap/groups/groups.json index 528b03cce8..fa931f8270 100644 --- a/test/tap/groups/groups.json +++ b/test/tap/groups/groups.json @@ -17,7 +17,7 @@ "backend_sync_unit-t" : [ "unit-tests-g1" ], "basic-t" : [ "legacy-g1","mysql84-g1","mysql-auto_increment_delay_multiplex=0-g1","mysql-multiplexing=false-g1","mysql-query_digests=0-g1","mysql-query_digests_keep_comment=1-g1","mysql84-gr-g1","mysql90-g1","mysql90-gr-g1","mysql91-g1","mysql91-gr-g1","mysql92-g1","mysql92-gr-g1","mysql93-g1","mysql93-gr-g1" ], "charset_unsigned_int-t" : [ "legacy-g1","mysql84-g1","mysql-auto_increment_delay_multiplex=0-g1","mysql-multiplexing=false-g1","mysql-query_digests=0-g1","mysql-query_digests_keep_comment=1-g1" ], - "clickhouse_php_conn-t" : [ "legacy-g1","mysql-auto_increment_delay_multiplex=0-g1","mysql-multiplexing=false-g1","mysql-query_digests=0-g1","mysql-query_digests_keep_comment=1-g1" ], + "clickhouse_php_conn-t" : [ "legacy-clickhouse-g1","mysql-auto_increment_delay_multiplex=0-g1","mysql-multiplexing=false-g1","mysql-query_digests=0-g1","mysql-query_digests_keep_comment=1-g1" ], "connection_pool_unit-t" : [ "unit-tests-g1" ], "deprecate_eof_cache-t" : [ "legacy-g4","mysql84-g4","mysql-auto_increment_delay_multiplex=0-g4","mysql-multiplexing=false-g4","mysql-query_digests=0-g4","mysql-query_digests_keep_comment=1-g4" ], "envvars-t" : [ "legacy-g1","mysql84-g1","mysql-auto_increment_delay_multiplex=0-g1","mysql-multiplexing=false-g1","mysql-query_digests=0-g1","mysql-query_digests_keep_comment=1-g1","mysql84-gr-g1","mysql90-g1","mysql90-gr-g1","mysql91-g1","mysql91-gr-g1","mysql92-g1","mysql92-gr-g1","mysql93-g1","mysql93-gr-g1" ], @@ -166,7 +166,7 @@ "reg_test_3606-mysql_warnings-t" : [ "legacy-g1","mysql84-g1","mysql-auto_increment_delay_multiplex=0-g1","mysql-multiplexing=false-g1","mysql-query_digests=0-g1","mysql-query_digests_keep_comment=1-g1" ], "reg_test_3625-sqlite3_session_client_error_limit-t" : [ "legacy-g1","mysql84-g1","mysql-auto_increment_delay_multiplex=0-g1","mysql-multiplexing=false-g1","mysql-query_digests=0-g1","mysql-query_digests_keep_comment=1-g1" ], "reg_test_3690-admin_large_pkts-t" : [ "legacy-g1","mysql84-g1","mysql-auto_increment_delay_multiplex=0-g1","mysql-multiplexing=false-g1","mysql-query_digests=0-g1","mysql-query_digests_keep_comment=1-g1" ], - "reg_test_3765_ssl_pollout-t" : [ "legacy-g2","mysql84-g2","mysql-auto_increment_delay_multiplex=0-g2","mysql-multiplexing=false-g2","mysql-query_digests=0-g2","mysql-query_digests_keep_comment=1-g2" ], + "reg_test_3765_ssl_pollout-t" : [ "legacy-g5","mysql84-g2","mysql-auto_increment_delay_multiplex=0-g2","mysql-multiplexing=false-g2","mysql-query_digests=0-g2","mysql-query_digests_keep_comment=1-g2" ], "reg_test_3838-restapi_eintr-t" : [ "legacy-g2","mysql84-g2","mysql-auto_increment_delay_multiplex=0-g2","mysql-multiplexing=false-g2","mysql-query_digests=0-g2","mysql-query_digests_keep_comment=1-g2" ], "reg_test_3847_admin_lock-t" : [ "legacy-g2","mysql84-g2","mysql-auto_increment_delay_multiplex=0-g2","mysql-multiplexing=false-g2","mysql-query_digests=0-g2","mysql-query_digests_keep_comment=1-g2" ], "reg_test_3992_fast_forward_malformed_packet-mysqlsh-t" : [ "legacy-g2","mysql84-g2","mysql-auto_increment_delay_multiplex=0-g2","mysql-multiplexing=false-g2","mysql-query_digests=0-g2","mysql-query_digests_keep_comment=1-g2" ], @@ -225,8 +225,8 @@ "test_binlog_reader_uses_previous_hostgroup-t" : [ "legacy-binlog-g1" ], "test_cacert_load_and_verify_duration-t" : [ "legacy-g3","mysql84-g3","mysql-auto_increment_delay_multiplex=0-g3","mysql-multiplexing=false-g3","mysql-query_digests=0-g3","mysql-query_digests_keep_comment=1-g3" ], "test_change_user-t" : [ "legacy-g3","mysql84-g3","mysql-auto_increment_delay_multiplex=0-g3","mysql-multiplexing=false-g3","mysql-query_digests=0-g3","mysql-query_digests_keep_comment=1-g3" ], - "test_clickhouse_server-t" : [ "legacy-g3","mysql84-g3","mysql-auto_increment_delay_multiplex=0-g3","mysql-multiplexing=false-g3","mysql-query_digests=0-g3","mysql-query_digests_keep_comment=1-g3" ], - "test_clickhouse_server_libmysql-t" : [ "legacy-g3","mysql84-g3","mysql-auto_increment_delay_multiplex=0-g3","mysql-multiplexing=false-g3","mysql-query_digests=0-g3","mysql-query_digests_keep_comment=1-g3" ], + "test_clickhouse_server-t" : [ "legacy-clickhouse-g1","mysql84-g3","mysql-auto_increment_delay_multiplex=0-g3","mysql-multiplexing=false-g3","mysql-query_digests=0-g3","mysql-query_digests_keep_comment=1-g3" ], + "test_clickhouse_server_libmysql-t" : [ "legacy-clickhouse-g1","mysql84-g3","mysql-auto_increment_delay_multiplex=0-g3","mysql-multiplexing=false-g3","mysql-query_digests=0-g3","mysql-query_digests_keep_comment=1-g3" ], "test_client_limit_error-t" : [ "todo-g1" ], "test_cluster1-t" : [ "legacy-g3","mysql-auto_increment_delay_multiplex=0-g3","mysql-multiplexing=false-g3","mysql-query_digests=0-g3","mysql-query_digests_keep_comment=1-g3" ], "test_cluster_sync-t" : [ "legacy-g3","mysql-auto_increment_delay_multiplex=0-g3","mysql-multiplexing=false-g3","mysql-query_digests=0-g3","mysql-query_digests_keep_comment=1-g3" ], @@ -313,8 +313,8 @@ "test_ssl_large_query-2-t" : [ "legacy-g4","mysql84-g4","mysql-auto_increment_delay_multiplex=0-g4","mysql-multiplexing=false-g4","mysql-query_digests=0-g4","mysql-query_digests_keep_comment=1-g4" ], "test_stats_proxysql_message_metrics-t" : [ "legacy-g4","mysql84-g4","mysql-auto_increment_delay_multiplex=0-g4","mysql-multiplexing=false-g4","mysql-query_digests=0-g4","mysql-query_digests_keep_comment=1-g4" ], "test_thread_conn_dist-t" : [ "legacy-g4","mysql84-g4","mysql-auto_increment_delay_multiplex=0-g4","mysql-multiplexing=false-g4","mysql-query_digests=0-g4","mysql-query_digests_keep_comment=1-g4" ], - "test_tls_stats-t" : [ "legacy-g4","mysql84-g4","mysql-auto_increment_delay_multiplex=0-g4","mysql-multiplexing=false-g4","mysql-query_digests=0-g4","mysql-query_digests_keep_comment=1-g4" ], "test_throttle_max_bytes_per_second_to_client-t" : [ "legacy-g4","mysql84-g4","mysql-auto_increment_delay_multiplex=0-g4","mysql-multiplexing=false-g4","mysql-query_digests=0-g4","mysql-query_digests_keep_comment=1-g4" ], + "test_tls_stats-t" : [ "legacy-g4","mysql84-g4","mysql-auto_increment_delay_multiplex=0-g4","mysql-multiplexing=false-g4","mysql-query_digests=0-g4","mysql-query_digests_keep_comment=1-g4" ], "test_tsdb_api-t" : [ "ai-g1" ], "test_tsdb_variables-t" : [ "ai-g1" ], "test_unshun_algorithm-t" : [ "legacy-g4","mysql84-g4","mysql-auto_increment_delay_multiplex=0-g4","mysql-multiplexing=false-g4","mysql-query_digests=0-g4","mysql-query_digests_keep_comment=1-g4" ], diff --git a/test/tap/groups/legacy-clickhouse/env.sh b/test/tap/groups/legacy-clickhouse/env.sh new file mode 100644 index 0000000000..f39fbca8a3 --- /dev/null +++ b/test/tap/groups/legacy-clickhouse/env.sh @@ -0,0 +1,3 @@ +# Legacy ClickHouse Test Group Environment + +export REGULAR_INFRA_DATADIR="/var/lib/proxysql" diff --git a/test/tap/groups/legacy-clickhouse/infras.lst b/test/tap/groups/legacy-clickhouse/infras.lst new file mode 100644 index 0000000000..a3cc15d751 --- /dev/null +++ b/test/tap/groups/legacy-clickhouse/infras.lst @@ -0,0 +1 @@ +infra-clickhouse23 diff --git a/test/tap/groups/legacy-clickhouse/pre-proxysql.bash b/test/tap/groups/legacy-clickhouse/pre-proxysql.bash new file mode 100755 index 0000000000..136b97795f --- /dev/null +++ b/test/tap/groups/legacy-clickhouse/pre-proxysql.bash @@ -0,0 +1,3 @@ +#!/usr/bin/env bash +# No-op: cluster startup is handled by start-proxysql-isolated.bash +exit 0 diff --git a/test/tap/groups/legacy/infras.lst b/test/tap/groups/legacy/infras.lst index 4ed5a5156e..741c9ca165 100644 --- a/test/tap/groups/legacy/infras.lst +++ b/test/tap/groups/legacy/infras.lst @@ -1,4 +1,3 @@ infra-mysql57 infra-mariadb10 docker-pgsql16-single -infra-clickhouse23 diff --git a/test/tap/groups/legacy/pre-proxysql.bash b/test/tap/groups/legacy/pre-proxysql.bash index 84e5c186c1..136b97795f 100755 --- a/test/tap/groups/legacy/pre-proxysql.bash +++ b/test/tap/groups/legacy/pre-proxysql.bash @@ -1,13 +1,3 @@ #!/usr/bin/env bash -set -e -set -o pipefail -# -# Start ProxySQL Cluster if available -# inherits env from tester script -# - -/home/rene/proxysql/test/infra/control/cluster_start.bash -/home/rene/proxysql/test/infra/control/cluster_init.bash - -# wait for cluster to stabilize -sleep 10 +# No-op: cluster startup is handled by start-proxysql-isolated.bash +exit 0 diff --git a/test/tap/groups/mysql84-gr/pre-proxysql.bash b/test/tap/groups/mysql84-gr/pre-proxysql.bash index c1e7e33c11..9c5593549c 100755 --- a/test/tap/groups/mysql84-gr/pre-proxysql.bash +++ b/test/tap/groups/mysql84-gr/pre-proxysql.bash @@ -1,6 +1,7 @@ #!/usr/bin/env bash set -e set -o pipefail +REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../../../.." && pwd)" # # change infra config # inherits env from tester script @@ -10,8 +11,6 @@ set -o pipefail INFRA=infra-$(basename $(dirname "$0") | sed 's/-g[0-9]//' | sed 's/_.*//') -# destroy running infras -/home/rene/proxysql/test/infra/control/infra-default/docker-compose-destroy.bash # cleanup mysql ${SSLOPT} -h127.0.0.1 -P6032 -uadmin -padmin -e " \ DELETE FROM mysql_users; \ @@ -38,11 +37,11 @@ SAVE PGSQL SERVERS TO DISK; \ " 2>&1 | grep -vP 'mysql: .?Warning' # load environment for infra -source /home/rene/proxysql/test/infra/control/${INFRA}/.env +source ${REPO_ROOT}/test/infra/${INFRA}/.env # Start infra -# /home/rene/proxysql/test/infra/control/infra-docker-hoster/docker-compose-init.bash -/home/rene/proxysql/test/infra/control/${INFRA}/docker-compose-init.bash +# ${REPO_ROOT}/test/infra/control/infra-docker-hoster/docker-compose-init.bash +${REPO_ROOT}/test/infra/${INFRA}/docker-compose-init.bash # create default users for MYUSER in root user testuser sbtest1 sbtest2 sbtest3 sbtest4 ssluser ; do diff --git a/test/tap/groups/mysql84/pre-proxysql.bash b/test/tap/groups/mysql84/pre-proxysql.bash index b407e30c7e..380fea5921 100755 --- a/test/tap/groups/mysql84/pre-proxysql.bash +++ b/test/tap/groups/mysql84/pre-proxysql.bash @@ -1,21 +1,20 @@ #!/usr/bin/env bash set -e set -o pipefail +REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../../../.." && pwd)" # # change infra config # inherits env from tester script # # Start ProxySQL Cluster if available -/home/rene/proxysql/test/infra/control/cluster_start.bash -/home/rene/proxysql/test/infra/control/cluster_init.bash +${REPO_ROOT}/test/infra/control/cluster_start.bash +${REPO_ROOT}/test/infra/control/cluster_init.bash [[ $(mysql --skip-ssl-verify-server-cert -h 2>&1) =~ skip-ssl-verify-server-cert ]] || export SSLOPT=--skip-ssl-verify-server-cert INFRA=infra-$(basename $(dirname "$0") | sed 's/-g[0-9]//' | sed 's/_.*//') -# destroy running infras -/home/rene/proxysql/test/infra/control/infra-default/docker-compose-destroy.bash # cleanup mysql ${SSLOPT} -h127.0.0.1 -P6032 -uadmin -padmin -e " \ DELETE FROM mysql_users; \ @@ -42,11 +41,11 @@ SAVE PGSQL SERVERS TO DISK; \ " 2>&1 | grep -vP 'mysql: .?Warning' # load environment for infra -source /home/rene/proxysql/test/infra/control/${INFRA}/.env +source ${REPO_ROOT}/test/infra/${INFRA}/.env # Start infra -# /home/rene/proxysql/test/infra/control/infra-docker-hoster/docker-compose-init.bash -/home/rene/proxysql/test/infra/control/${INFRA}/docker-compose-init.bash +# ${REPO_ROOT}/test/infra/control/infra-docker-hoster/docker-compose-init.bash +${REPO_ROOT}/test/infra/${INFRA}/docker-compose-init.bash # create default users for MYUSER in root user testuser sbtest1 sbtest2 sbtest3 sbtest4 ssluser ; do diff --git a/test/tap/groups/mysql90-gr/pre-proxysql.bash b/test/tap/groups/mysql90-gr/pre-proxysql.bash index c1e7e33c11..9c5593549c 100755 --- a/test/tap/groups/mysql90-gr/pre-proxysql.bash +++ b/test/tap/groups/mysql90-gr/pre-proxysql.bash @@ -1,6 +1,7 @@ #!/usr/bin/env bash set -e set -o pipefail +REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../../../.." && pwd)" # # change infra config # inherits env from tester script @@ -10,8 +11,6 @@ set -o pipefail INFRA=infra-$(basename $(dirname "$0") | sed 's/-g[0-9]//' | sed 's/_.*//') -# destroy running infras -/home/rene/proxysql/test/infra/control/infra-default/docker-compose-destroy.bash # cleanup mysql ${SSLOPT} -h127.0.0.1 -P6032 -uadmin -padmin -e " \ DELETE FROM mysql_users; \ @@ -38,11 +37,11 @@ SAVE PGSQL SERVERS TO DISK; \ " 2>&1 | grep -vP 'mysql: .?Warning' # load environment for infra -source /home/rene/proxysql/test/infra/control/${INFRA}/.env +source ${REPO_ROOT}/test/infra/${INFRA}/.env # Start infra -# /home/rene/proxysql/test/infra/control/infra-docker-hoster/docker-compose-init.bash -/home/rene/proxysql/test/infra/control/${INFRA}/docker-compose-init.bash +# ${REPO_ROOT}/test/infra/control/infra-docker-hoster/docker-compose-init.bash +${REPO_ROOT}/test/infra/${INFRA}/docker-compose-init.bash # create default users for MYUSER in root user testuser sbtest1 sbtest2 sbtest3 sbtest4 ssluser ; do diff --git a/test/tap/groups/mysql90/pre-proxysql.bash b/test/tap/groups/mysql90/pre-proxysql.bash index c1e7e33c11..9c5593549c 100755 --- a/test/tap/groups/mysql90/pre-proxysql.bash +++ b/test/tap/groups/mysql90/pre-proxysql.bash @@ -1,6 +1,7 @@ #!/usr/bin/env bash set -e set -o pipefail +REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../../../.." && pwd)" # # change infra config # inherits env from tester script @@ -10,8 +11,6 @@ set -o pipefail INFRA=infra-$(basename $(dirname "$0") | sed 's/-g[0-9]//' | sed 's/_.*//') -# destroy running infras -/home/rene/proxysql/test/infra/control/infra-default/docker-compose-destroy.bash # cleanup mysql ${SSLOPT} -h127.0.0.1 -P6032 -uadmin -padmin -e " \ DELETE FROM mysql_users; \ @@ -38,11 +37,11 @@ SAVE PGSQL SERVERS TO DISK; \ " 2>&1 | grep -vP 'mysql: .?Warning' # load environment for infra -source /home/rene/proxysql/test/infra/control/${INFRA}/.env +source ${REPO_ROOT}/test/infra/${INFRA}/.env # Start infra -# /home/rene/proxysql/test/infra/control/infra-docker-hoster/docker-compose-init.bash -/home/rene/proxysql/test/infra/control/${INFRA}/docker-compose-init.bash +# ${REPO_ROOT}/test/infra/control/infra-docker-hoster/docker-compose-init.bash +${REPO_ROOT}/test/infra/${INFRA}/docker-compose-init.bash # create default users for MYUSER in root user testuser sbtest1 sbtest2 sbtest3 sbtest4 ssluser ; do diff --git a/test/tap/groups/mysql91-gr/pre-proxysql.bash b/test/tap/groups/mysql91-gr/pre-proxysql.bash index c1e7e33c11..9c5593549c 100755 --- a/test/tap/groups/mysql91-gr/pre-proxysql.bash +++ b/test/tap/groups/mysql91-gr/pre-proxysql.bash @@ -1,6 +1,7 @@ #!/usr/bin/env bash set -e set -o pipefail +REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../../../.." && pwd)" # # change infra config # inherits env from tester script @@ -10,8 +11,6 @@ set -o pipefail INFRA=infra-$(basename $(dirname "$0") | sed 's/-g[0-9]//' | sed 's/_.*//') -# destroy running infras -/home/rene/proxysql/test/infra/control/infra-default/docker-compose-destroy.bash # cleanup mysql ${SSLOPT} -h127.0.0.1 -P6032 -uadmin -padmin -e " \ DELETE FROM mysql_users; \ @@ -38,11 +37,11 @@ SAVE PGSQL SERVERS TO DISK; \ " 2>&1 | grep -vP 'mysql: .?Warning' # load environment for infra -source /home/rene/proxysql/test/infra/control/${INFRA}/.env +source ${REPO_ROOT}/test/infra/${INFRA}/.env # Start infra -# /home/rene/proxysql/test/infra/control/infra-docker-hoster/docker-compose-init.bash -/home/rene/proxysql/test/infra/control/${INFRA}/docker-compose-init.bash +# ${REPO_ROOT}/test/infra/control/infra-docker-hoster/docker-compose-init.bash +${REPO_ROOT}/test/infra/${INFRA}/docker-compose-init.bash # create default users for MYUSER in root user testuser sbtest1 sbtest2 sbtest3 sbtest4 ssluser ; do diff --git a/test/tap/groups/mysql91/pre-proxysql.bash b/test/tap/groups/mysql91/pre-proxysql.bash index c1e7e33c11..9c5593549c 100755 --- a/test/tap/groups/mysql91/pre-proxysql.bash +++ b/test/tap/groups/mysql91/pre-proxysql.bash @@ -1,6 +1,7 @@ #!/usr/bin/env bash set -e set -o pipefail +REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../../../.." && pwd)" # # change infra config # inherits env from tester script @@ -10,8 +11,6 @@ set -o pipefail INFRA=infra-$(basename $(dirname "$0") | sed 's/-g[0-9]//' | sed 's/_.*//') -# destroy running infras -/home/rene/proxysql/test/infra/control/infra-default/docker-compose-destroy.bash # cleanup mysql ${SSLOPT} -h127.0.0.1 -P6032 -uadmin -padmin -e " \ DELETE FROM mysql_users; \ @@ -38,11 +37,11 @@ SAVE PGSQL SERVERS TO DISK; \ " 2>&1 | grep -vP 'mysql: .?Warning' # load environment for infra -source /home/rene/proxysql/test/infra/control/${INFRA}/.env +source ${REPO_ROOT}/test/infra/${INFRA}/.env # Start infra -# /home/rene/proxysql/test/infra/control/infra-docker-hoster/docker-compose-init.bash -/home/rene/proxysql/test/infra/control/${INFRA}/docker-compose-init.bash +# ${REPO_ROOT}/test/infra/control/infra-docker-hoster/docker-compose-init.bash +${REPO_ROOT}/test/infra/${INFRA}/docker-compose-init.bash # create default users for MYUSER in root user testuser sbtest1 sbtest2 sbtest3 sbtest4 ssluser ; do diff --git a/test/tap/groups/mysql92-gr/pre-proxysql.bash b/test/tap/groups/mysql92-gr/pre-proxysql.bash index c1e7e33c11..9c5593549c 100755 --- a/test/tap/groups/mysql92-gr/pre-proxysql.bash +++ b/test/tap/groups/mysql92-gr/pre-proxysql.bash @@ -1,6 +1,7 @@ #!/usr/bin/env bash set -e set -o pipefail +REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../../../.." && pwd)" # # change infra config # inherits env from tester script @@ -10,8 +11,6 @@ set -o pipefail INFRA=infra-$(basename $(dirname "$0") | sed 's/-g[0-9]//' | sed 's/_.*//') -# destroy running infras -/home/rene/proxysql/test/infra/control/infra-default/docker-compose-destroy.bash # cleanup mysql ${SSLOPT} -h127.0.0.1 -P6032 -uadmin -padmin -e " \ DELETE FROM mysql_users; \ @@ -38,11 +37,11 @@ SAVE PGSQL SERVERS TO DISK; \ " 2>&1 | grep -vP 'mysql: .?Warning' # load environment for infra -source /home/rene/proxysql/test/infra/control/${INFRA}/.env +source ${REPO_ROOT}/test/infra/${INFRA}/.env # Start infra -# /home/rene/proxysql/test/infra/control/infra-docker-hoster/docker-compose-init.bash -/home/rene/proxysql/test/infra/control/${INFRA}/docker-compose-init.bash +# ${REPO_ROOT}/test/infra/control/infra-docker-hoster/docker-compose-init.bash +${REPO_ROOT}/test/infra/${INFRA}/docker-compose-init.bash # create default users for MYUSER in root user testuser sbtest1 sbtest2 sbtest3 sbtest4 ssluser ; do diff --git a/test/tap/groups/mysql92/pre-proxysql.bash b/test/tap/groups/mysql92/pre-proxysql.bash index c1e7e33c11..9c5593549c 100755 --- a/test/tap/groups/mysql92/pre-proxysql.bash +++ b/test/tap/groups/mysql92/pre-proxysql.bash @@ -1,6 +1,7 @@ #!/usr/bin/env bash set -e set -o pipefail +REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../../../.." && pwd)" # # change infra config # inherits env from tester script @@ -10,8 +11,6 @@ set -o pipefail INFRA=infra-$(basename $(dirname "$0") | sed 's/-g[0-9]//' | sed 's/_.*//') -# destroy running infras -/home/rene/proxysql/test/infra/control/infra-default/docker-compose-destroy.bash # cleanup mysql ${SSLOPT} -h127.0.0.1 -P6032 -uadmin -padmin -e " \ DELETE FROM mysql_users; \ @@ -38,11 +37,11 @@ SAVE PGSQL SERVERS TO DISK; \ " 2>&1 | grep -vP 'mysql: .?Warning' # load environment for infra -source /home/rene/proxysql/test/infra/control/${INFRA}/.env +source ${REPO_ROOT}/test/infra/${INFRA}/.env # Start infra -# /home/rene/proxysql/test/infra/control/infra-docker-hoster/docker-compose-init.bash -/home/rene/proxysql/test/infra/control/${INFRA}/docker-compose-init.bash +# ${REPO_ROOT}/test/infra/control/infra-docker-hoster/docker-compose-init.bash +${REPO_ROOT}/test/infra/${INFRA}/docker-compose-init.bash # create default users for MYUSER in root user testuser sbtest1 sbtest2 sbtest3 sbtest4 ssluser ; do diff --git a/test/tap/groups/mysql93-gr/pre-proxysql.bash b/test/tap/groups/mysql93-gr/pre-proxysql.bash index c1e7e33c11..9c5593549c 100755 --- a/test/tap/groups/mysql93-gr/pre-proxysql.bash +++ b/test/tap/groups/mysql93-gr/pre-proxysql.bash @@ -1,6 +1,7 @@ #!/usr/bin/env bash set -e set -o pipefail +REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../../../.." && pwd)" # # change infra config # inherits env from tester script @@ -10,8 +11,6 @@ set -o pipefail INFRA=infra-$(basename $(dirname "$0") | sed 's/-g[0-9]//' | sed 's/_.*//') -# destroy running infras -/home/rene/proxysql/test/infra/control/infra-default/docker-compose-destroy.bash # cleanup mysql ${SSLOPT} -h127.0.0.1 -P6032 -uadmin -padmin -e " \ DELETE FROM mysql_users; \ @@ -38,11 +37,11 @@ SAVE PGSQL SERVERS TO DISK; \ " 2>&1 | grep -vP 'mysql: .?Warning' # load environment for infra -source /home/rene/proxysql/test/infra/control/${INFRA}/.env +source ${REPO_ROOT}/test/infra/${INFRA}/.env # Start infra -# /home/rene/proxysql/test/infra/control/infra-docker-hoster/docker-compose-init.bash -/home/rene/proxysql/test/infra/control/${INFRA}/docker-compose-init.bash +# ${REPO_ROOT}/test/infra/control/infra-docker-hoster/docker-compose-init.bash +${REPO_ROOT}/test/infra/${INFRA}/docker-compose-init.bash # create default users for MYUSER in root user testuser sbtest1 sbtest2 sbtest3 sbtest4 ssluser ; do diff --git a/test/tap/groups/mysql93/pre-proxysql.bash b/test/tap/groups/mysql93/pre-proxysql.bash index c1e7e33c11..9c5593549c 100755 --- a/test/tap/groups/mysql93/pre-proxysql.bash +++ b/test/tap/groups/mysql93/pre-proxysql.bash @@ -1,6 +1,7 @@ #!/usr/bin/env bash set -e set -o pipefail +REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../../../.." && pwd)" # # change infra config # inherits env from tester script @@ -10,8 +11,6 @@ set -o pipefail INFRA=infra-$(basename $(dirname "$0") | sed 's/-g[0-9]//' | sed 's/_.*//') -# destroy running infras -/home/rene/proxysql/test/infra/control/infra-default/docker-compose-destroy.bash # cleanup mysql ${SSLOPT} -h127.0.0.1 -P6032 -uadmin -padmin -e " \ DELETE FROM mysql_users; \ @@ -38,11 +37,11 @@ SAVE PGSQL SERVERS TO DISK; \ " 2>&1 | grep -vP 'mysql: .?Warning' # load environment for infra -source /home/rene/proxysql/test/infra/control/${INFRA}/.env +source ${REPO_ROOT}/test/infra/${INFRA}/.env # Start infra -# /home/rene/proxysql/test/infra/control/infra-docker-hoster/docker-compose-init.bash -/home/rene/proxysql/test/infra/control/${INFRA}/docker-compose-init.bash +# ${REPO_ROOT}/test/infra/control/infra-docker-hoster/docker-compose-init.bash +${REPO_ROOT}/test/infra/${INFRA}/docker-compose-init.bash # create default users for MYUSER in root user testuser sbtest1 sbtest2 sbtest3 sbtest4 ssluser ; do diff --git a/test/tap/tap/noise_utils.cpp b/test/tap/tap/noise_utils.cpp index 0a84bcdc79..0f9103bfd7 100644 --- a/test/tap/tap/noise_utils.cpp +++ b/test/tap/tap/noise_utils.cpp @@ -296,14 +296,23 @@ void internal_noise_mysql_traffic_v2(const CommandLine& cl, const NoiseOptions& const char* my_user = cl.root_username[0] ? cl.root_username : "root"; const char* my_pass = cl.root_password[0] ? cl.root_password : ""; + noise_log("[NOISE] MySQL Traffic v2: Connecting with host=" + std::string(cl.host) + + " port=" + std::to_string(cl.port) + + " user=" + std::string(my_user) + "\n"); + // --- Phase A & B: Ensure tables exist and are populated --- MYSQL* setup_conn = mysql_init(NULL); if (!mysql_real_connect(setup_conn, cl.host, my_user, my_pass, NULL, cl.port, NULL, 0)) { - noise_log("[NOISE] MySQL Traffic v2: Setup connection failure: " + std::string(mysql_error(setup_conn)) + "\n"); + noise_log("[NOISE] MySQL Traffic v2: Setup connection FAILED:" + " host=" + std::string(cl.host) + + " port=" + std::to_string(cl.port) + + " user=" + std::string(my_user) + + " error=" + std::string(mysql_error(setup_conn)) + "\n"); mysql_close(setup_conn); register_noise_failure("MySQL Traffic v2 (Setup)"); return; } + noise_log("[NOISE] MySQL Traffic v2: Setup connection OK\n"); mysql_query(setup_conn, "SELECT schema_name FROM information_schema.schemata WHERE schema_name = 'test'"); MYSQL_RES* db_res = mysql_store_result(setup_conn); @@ -644,7 +653,11 @@ void internal_noise_mysql_traffic(const CommandLine& cl, const NoiseOptions& opt conn = mysql_init(NULL); if (!conn || !mysql_real_connect(conn, cl.host, cl.username, cl.password, NULL, cl.port, NULL, 0)) { retries++; - noise_log("[NOISE] MySQL Traffic: Connection failure (retry " + std::to_string(retries) + "/" + std::to_string(max_retries) + ")\n"); + noise_log("[NOISE] MySQL Traffic: Connection FAILED (retry " + std::to_string(retries) + "/" + std::to_string(max_retries) + "):" + " host=" + std::string(cl.host) + + " port=" + std::to_string(cl.port) + + " user=" + std::string(cl.username) + + " error=" + std::string(conn ? mysql_error(conn) : "mysql_init failed") + "\n"); if (retries >= max_retries) { register_noise_failure("MySQL Traffic"); break; @@ -686,7 +699,11 @@ void internal_noise_pgsql_traffic(const CommandLine& cl, const NoiseOptions& opt conn = PQconnectdb(conninfo.c_str()); if (PQstatus(conn) != CONNECTION_OK) { retries++; - noise_log("[NOISE] PgSQL Traffic: Connection failure (retry " + std::to_string(retries) + "/" + std::to_string(max_retries) + ")\n"); + noise_log("[NOISE] PgSQL Traffic: Connection FAILED (retry " + std::to_string(retries) + "/" + std::to_string(max_retries) + "):" + " host=" + std::string(cl.host) + + " port=" + std::to_string(cl.pgsql_port) + + " user=" + std::string(cl.pgsql_username) + + " error=" + std::string(PQerrorMessage(conn)) + "\n"); if (retries >= max_retries) { register_noise_failure("PgSQL Traffic"); break; @@ -723,14 +740,24 @@ void internal_noise_pgsql_traffic_v2(const CommandLine& cl, const NoiseOptions& " user=" + std::string(pg_user) + " password=" + std::string(pg_pass) + " dbname=postgres connect_timeout=5"; + noise_log("[NOISE] PgSQL Traffic v2: Connecting with host=" + std::string(cl.host) + + " port=" + std::to_string(cl.pgsql_port) + + " user=" + std::string(pg_user) + + " dbname=postgres\n"); + // --- Phase A & B: Ensure tables exist and are populated --- PGconn* setup_conn = PQconnectdb(conninfo.c_str()); if (PQstatus(setup_conn) != CONNECTION_OK) { - noise_log("[NOISE] PgSQL Traffic v2: Setup connection failure: " + std::string(PQerrorMessage(setup_conn)) + "\n"); + noise_log("[NOISE] PgSQL Traffic v2: Setup connection FAILED:" + " host=" + std::string(cl.host) + + " port=" + std::to_string(cl.pgsql_port) + + " user=" + std::string(pg_user) + + " error=" + std::string(PQerrorMessage(setup_conn)) + "\n"); PQfinish(setup_conn); register_noise_failure("PgSQL Traffic v2 (Setup)"); return; } + noise_log("[NOISE] PgSQL Traffic v2: Setup connection OK\n"); pg_noise_query(setup_conn, "SET search_path TO public"); diff --git a/test/tap/tests/reg_test__ssl_client_busy_wait-t.cpp b/test/tap/tests/reg_test__ssl_client_busy_wait-t.cpp index e012985345..2b904d532c 100644 --- a/test/tap/tests/reg_test__ssl_client_busy_wait-t.cpp +++ b/test/tap/tests/reg_test__ssl_client_busy_wait-t.cpp @@ -266,19 +266,27 @@ int main(int argc, char** argv) { return -1; } + diag("Test: SSL client busy/infinite loop regression"); + diag(" Verifies that ProxySQL CPU usage stays low when SSL clients"); + diag(" disconnect unexpectedly (both busy-loop and infinite-loop scenarios)."); + diag(" Config: BUSY_THREADS=%d, BUSY_WAIT_SECS=%d, MAX_IDLE_CPU=%d%%, MAX_BUSY_CPU=%d%%", + BUSY_THREADS, BUSY_WAIT_SECS, MAX_IDLE_CPU, MAX_BUSY_CPU); + plan(4); + diag("Step 1: Connect to ProxySQL admin"); MYSQL* admin = mysql_init(NULL); if (!mysql_real_connect(admin, cl.host, cl.admin_username, cl.admin_password, NULL, cl.admin_port, NULL, 0)) { fprintf(stderr, "File %s, line %d, Error: %s\n", __FILE__, __LINE__, mysql_error(admin)); return EXIT_FAILURE; } + diag("Step 2: Disable cluster scheduler to reduce CPU noise during measurement"); pair> p_err_nodes_conns { disable_core_nodes_scheduler(cl, admin) }; if (p_err_nodes_conns.first) { return EXIT_FAILURE; } vector& nodes_conns { p_err_nodes_conns.second }; - diag("Checking ProxySQL idle CPU usage"); + diag("Step 3: Measure idle CPU baseline (sampling for %d seconds)", SAMPLE_INTV_SECS); double idle_cpu = 0; int ret_i_cpu = get_proxysql_cpu_usage(cl, idle_cpu, SAMPLE_INTV_SECS); if (ret_i_cpu) { @@ -293,10 +301,11 @@ int main(int argc, char** argv) { MAX_IDLE_CPU, idle_cpu ); - diag("Trigger BUSY_LOOP regression BUSY_THREADS=%d BUSY_WAIT_SECS=%d", BUSY_THREADS, BUSY_WAIT_SECS); + diag("Step 4: Trigger BUSY_LOOP regression (SSL clients disconnect during query)"); + diag(" Creating %d threads that connect with SSL and close socket mid-query", BUSY_THREADS); create_busy_loops(argc, argv, cl, BUSY_LOOP_T::BUSY_LOOP); - diag("Checking ProxySQL final CPU usage for 'BUSY_LOOP'"); + diag("Step 5: Measure CPU after BUSY_LOOP (should stay below %d%%)", MAX_BUSY_CPU); double final_cpu_usage = 0; int ret_f_cpu = get_proxysql_cpu_usage(cl, final_cpu_usage, SAMPLE_INTV_SECS); @@ -308,10 +317,10 @@ int main(int argc, char** argv) { // Extra wait to ensure cleanup of faulty client conns. See 'BUSY_WAIT_SECS' NOTE in def. int BUSY_WAIT_CLEANUP = BUSY_WAIT_SECS < 5 ? 5 : BUSY_WAIT_SECS / 2; - diag("Sleeping for %d secs for BUSY_LOOP client cleanup", BUSY_WAIT_CLEANUP); + diag("Step 6: Wait %d seconds for BUSY_LOOP client cleanup", BUSY_WAIT_CLEANUP); sleep(BUSY_WAIT_CLEANUP); - diag("Checking ProxySQL idle CPU usage"); + diag("Step 7: Verify CPU returned to idle after BUSY_LOOP cleanup"); ret_i_cpu = get_proxysql_cpu_usage(cl, idle_cpu, SAMPLE_INTV_SECS); if (ret_i_cpu) { diag("Getting initial CPU usage failed with error - %d", ret_i_cpu); @@ -325,10 +334,11 @@ int main(int argc, char** argv) { MAX_IDLE_CPU, idle_cpu ); - diag("Trigger INF_LOOP regression BUSY_THREADS=%d BUSY_WAIT_SECS=%d", BUSY_THREADS, BUSY_WAIT_SECS); + diag("Step 8: Trigger INF_LOOP regression (SSL clients disconnect after data written)"); + diag(" Creating %d threads that connect with SSL and close socket after response", BUSY_THREADS); create_busy_loops(argc, argv, cl, BUSY_LOOP_T::INF_LOOP); - diag("Checking ProxySQL final CPU usage for 'BUSY_LOOP'"); + diag("Step 9: Measure CPU after INF_LOOP (should stay below %d%%)", MAX_BUSY_CPU); final_cpu_usage = 0; ret_f_cpu = get_proxysql_cpu_usage(cl, final_cpu_usage, SAMPLE_INTV_SECS); @@ -338,6 +348,7 @@ int main(int argc, char** argv) { MAX_BUSY_CPU, final_cpu_usage ); + diag("Step 10: Recover cluster scheduler"); // Recover cluster scheduler for (MYSQL* myconn : nodes_conns) { MYSQL_QUERY_T(myconn, "LOAD SCHEDULER FROM DISK"); diff --git a/test/tap/tests/unit/Makefile b/test/tap/tests/unit/Makefile index 04a5e77778..022c9c7be1 100644 --- a/test/tap/tests/unit/Makefile +++ b/test/tap/tests/unit/Makefile @@ -101,8 +101,11 @@ STATIC_LIBS := $(CITYHASH_LDIR)/libcityhash.a \ $(LZ4_LDIR)/liblz4.a \ $(ZSTD_LDIR)/libzstd.a -ifeq ($(PROXYSQLCLICKHOUSE),1) - STATIC_LIBS += $(CLICKHOUSE_CPP_LDIR)/libclickhouse-cpp-lib.a +# ClickHouse library: auto-detect if built (always built in current builds) +# Append LZ4/ZSTD again after ClickHouse to resolve its dependencies +ifneq ($(wildcard $(CLICKHOUSE_CPP_LDIR)/libclickhouse-cpp-lib.a),) + STATIC_LIBS += $(CLICKHOUSE_CPP_LDIR)/libclickhouse-cpp-lib.a \ + $(LZ4_LDIR)/liblz4.a $(ZSTD_LDIR)/libzstd.a endif ifeq ($(UNAME_S),Linux) @@ -170,24 +173,32 @@ endif # Compiler flags # =========================================================================== +# Auto-detect feature flags from libproxysql.a symbols. +# This ensures unit tests compile with the same flags the library was built with, +# regardless of whether the environment variables are set during `make`. PSQLCH := -ifeq ($(PROXYSQLCLICKHOUSE),1) +ifneq ($(shell nm $(LIBPROXYSQLAR) 2>/dev/null | grep -c ClickHouse_Server),0) + PROXYSQLCLICKHOUSE := 1 PSQLCH := -DPROXYSQLCLICKHOUSE endif PSQLGA := -ifeq ($(PROXYSQLGENAI),1) +ifneq ($(shell nm $(LIBPROXYSQLAR) 2>/dev/null | grep -c GenAI_Thread),0) + PROXYSQLGENAI := 1 PSQLGA := -DPROXYSQLGENAI endif -PSQL31 := -ifeq ($(PROXYSQL31),1) - PSQL31 := -DPROXYSQL31 -endif PSQLFFTO := -ifeq ($(PROXYSQLFFTO),1) +ifneq ($(shell nm $(LIBPROXYSQLAR) 2>/dev/null | grep -c MySQLFFTO),0) + PROXYSQLFFTO := 1 PSQLFFTO := -DPROXYSQLFFTO endif +PSQL31 := +ifneq ($(shell nm $(LIBPROXYSQLAR) 2>/dev/null | grep -c MySQLFFTO),0) + PROXYSQL31 := 1 + PSQL31 := -DPROXYSQL31 +endif PSQLTSDB := -ifeq ($(PROXYSQLTSDB),1) +ifneq ($(shell nm $(LIBPROXYSQLAR) 2>/dev/null | grep -c ProxySQL_TSDB),0) + PROXYSQLTSDB := 1 PSQLTSDB := -DPROXYSQLTSDB endif