diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 00000000000..612550b68fe --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,185 @@ +name: CI + +on: + push: + branches: [master, edge] + pull_request: + +env: + GO_VERSION: "1.24" + +jobs: + check-go-generate: + runs-on: ubuntu-latest + container: owncloudci/golang:1.24 + steps: + - uses: actions/checkout@v4 + - run: git config --global --add safe.directory "$GITHUB_WORKSPACE" + - run: make go-generate && git diff --exit-code + + unit-tests: + runs-on: ubuntu-latest + container: owncloudci/golang:1.24 + steps: + - uses: actions/checkout@v4 + - run: git config --global --add safe.directory "$GITHUB_WORKSPACE" + - run: apk update && apk add --no-cache inotify-tools + - run: make test + - uses: actions/upload-artifact@v4 + if: always() + with: + name: coverage + path: coverage.out + + build: + needs: [unit-tests] + runs-on: ubuntu-latest + container: owncloudci/golang:1.24 + steps: + - uses: actions/checkout@v4 + - run: git config --global --add safe.directory "$GITHUB_WORKSPACE" + - run: make dist + + integration-tests: + needs: [unit-tests] + runs-on: ubuntu-latest + container: owncloudci/golang:1.24 + services: + redis: + image: redis:6-alpine + env: + REDIS_DATABASES: 1 + steps: + - uses: actions/checkout@v4 + - run: git config --global --add safe.directory "$GITHUB_WORKSPACE" + - run: make test-integration + env: + REDIS_ADDRESS: redis:6379 + + litmus: + needs: [unit-tests] + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + endpoint: [old-webdav, new-webdav, spaces-dav] + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version: ${{ env.GO_VERSION }} + - run: python3 tests/acceptance/run-litmus.py --endpoint ${{ matrix.endpoint }} + + cs3api-validator: + needs: [unit-tests] + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + storage: [ocis, s3ng] + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version: ${{ env.GO_VERSION }} + - run: python3 tests/acceptance/run-cs3api.py --storage ${{ matrix.storage }} + + acceptance-tests-ocis: + needs: [unit-tests] + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + part: [1, 2, 3, 4] + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version: ${{ env.GO_VERSION }} + - uses: shivammathur/setup-php@v2 + with: + php-version: "8.4" + - run: python3 tests/acceptance/run-acceptance.py --storage ocis --total-parts 4 --run-part ${{ matrix.part }} + - uses: actions/upload-artifact@v4 + if: failure() + with: + name: acceptance-ocis-part-${{ matrix.part }} + path: tmp/testrunner/tests/acceptance/output/ + + acceptance-tests-s3ng: + needs: [unit-tests] + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + part: [1, 2, 3, 4, 5, 6] + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version: ${{ env.GO_VERSION }} + - uses: shivammathur/setup-php@v2 + with: + php-version: "8.4" + - run: python3 tests/acceptance/run-acceptance.py --storage s3ng --total-parts 6 --run-part ${{ matrix.part }} + - uses: actions/upload-artifact@v4 + if: failure() + with: + name: acceptance-s3ng-part-${{ matrix.part }} + path: tmp/testrunner/tests/acceptance/output/ + + acceptance-tests-posixfs: + needs: [unit-tests] + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + part: [1, 2, 3, 4] + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version: ${{ env.GO_VERSION }} + - uses: shivammathur/setup-php@v2 + with: + php-version: "8.4" + - run: sudo apt-get update && sudo apt-get install -y inotify-tools + - run: python3 tests/acceptance/run-acceptance.py --storage posixfs --total-parts 4 --run-part ${{ matrix.part }} + - uses: actions/upload-artifact@v4 + if: failure() + with: + name: acceptance-posixfs-part-${{ matrix.part }} + path: tmp/testrunner/tests/acceptance/output/ + + ci-ok: + if: always() + needs: + - check-go-generate + - unit-tests + - build + - integration-tests + - litmus + - cs3api-validator + - acceptance-tests-ocis + - acceptance-tests-s3ng + - acceptance-tests-posixfs + runs-on: ubuntu-latest + steps: + - run: | + results=( + "${{ needs.check-go-generate.result }}" + "${{ needs.unit-tests.result }}" + "${{ needs.build.result }}" + "${{ needs.integration-tests.result }}" + "${{ needs.litmus.result }}" + "${{ needs.cs3api-validator.result }}" + "${{ needs.acceptance-tests-ocis.result }}" + "${{ needs.acceptance-tests-s3ng.result }}" + "${{ needs.acceptance-tests-posixfs.result }}" + ) + for r in "${results[@]}"; do + if [[ "$r" != "success" && "$r" != "skipped" ]]; then + echo "FAILED: $r" + exit 1 + fi + done diff --git a/tests/acceptance/run-acceptance.py b/tests/acceptance/run-acceptance.py new file mode 100755 index 00000000000..40287c8650e --- /dev/null +++ b/tests/acceptance/run-acceptance.py @@ -0,0 +1,363 @@ +#!/usr/bin/env python3 +""" +Run PHP Behat acceptance tests against reva with different storage backends. + +Replaces the ocis-integration-tests, s3ng-integration-tests, and +posixfs-integration-tests Drone pipelines. + +Usage: + python3 tests/acceptance/run-acceptance.py --storage ocis --total-parts 4 --run-part 1 + python3 tests/acceptance/run-acceptance.py --storage s3ng --total-parts 6 --run-part 3 + python3 tests/acceptance/run-acceptance.py --storage posixfs --total-parts 4 --run-part 2 +""" + +import argparse +import os +import signal +import socket +import subprocess +import sys +import time +from pathlib import Path + +REPO_ROOT = Path(__file__).resolve().parents[2] +DRONE_CONFIG_DIR = REPO_ROOT / "tests" / "oc-integration-tests" / "drone" +DRONE_ENV_FILE = REPO_ROOT / ".drone.env" +REVAD_BIN = REPO_ROOT / "cmd" / "revad" / "revad" + +FRONTEND_PORT = 20080 +GATEWAY_PORT = 19000 +LDAP_PORT = 636 +REDIS_PORT = 6379 +CEPH_PORT = 8080 + +# Container images +LDAP_IMAGE = "osixia/openldap:1.3.0" +REDIS_IMAGE = "redis:6-alpine" +CEPH_IMAGE = "ceph/daemon" +PHP_IMAGE = "owncloudci/php:8.4" + +# Revad configs per storage backend +# Matches the exact configs from .drone.star for each storage type +REVAD_CONFIGS = { + "ocis": [ + "frontend.toml", + "gateway.toml", + "shares.toml", + "storage-shares.toml", + "machine-auth.toml", + "storage-users-ocis.toml", + "storage-publiclink.toml", + "permissions-ocis-ci.toml", + "ldap-users.toml", + ], + "s3ng": [ + "frontend.toml", + "gateway.toml", + "shares.toml", + "storage-users-s3ng.toml", + "storage-publiclink.toml", + "storage-shares.toml", + "ldap-users.toml", + "permissions-ocis-ci.toml", + "machine-auth.toml", + ], + "posixfs": [ + "frontend.toml", + "gateway.toml", + "shares.toml", + "storage-shares.toml", + "machine-auth.toml", + "storage-users-posixfs.toml", + "storage-publiclink.toml", + "permissions-ocis-ci.toml", + "ldap-users.toml", + ], +} + +# Storage driver names as expected by the test framework +STORAGE_DRIVER_MAP = { + "ocis": "OCIS", + "s3ng": "S3NG", + "posixfs": "POSIX", +} + +# Expected failures files per storage backend +EXPECTED_FAILURES_MAP = { + "ocis": "tests/acceptance/expected-failures-on-OCIS-storage.md", + "s3ng": "tests/acceptance/expected-failures-on-S3NG-storage.md", + "posixfs": "tests/acceptance/expected-failures-on-POSIX-storage.md", +} + +# DELETE_USER_DATA_CMD per storage backend (paths rewritten at runtime) +DELETE_CMD_MAP = { + "ocis": "rm -rf {data}/spaces/* {data}/blobs/* {data}/indexes/by-type/*", + "s3ng": "rm -rf {data}/spaces/* {data}/blobs/* {data}/indexes/by-type/*", + "posixfs": "bash -cx 'rm -rf {data}/users/* {data}/indexes/by-type/*'", +} + +# Services required per storage backend +SERVICES_NEEDED = { + "ocis": ["ldap"], + "s3ng": ["ldap", "ceph"], + "posixfs": ["redis", "ldap"], +} + + +def wait_for_port(port, timeout=60, label="service"): + start = time.time() + while time.time() - start < timeout: + try: + with socket.create_connection(("localhost", port), timeout=1): + return + except OSError: + time.sleep(0.5) + sys.exit(f"Timeout waiting for {label} on port {port} after {timeout}s") + + + +def parse_drone_env(): + """Parse .drone.env file into a dict.""" + env = {} + if DRONE_ENV_FILE.exists(): + for line in DRONE_ENV_FILE.read_text().splitlines(): + line = line.strip() + if line and not line.startswith("#") and "=" in line: + key, value = line.split("=", 1) + env[key.strip()] = value.strip() + return env + + +def prepare_configs(storage): + """Copy drone configs to a temp dir, rewriting hostnames and paths for GHA.""" + import tempfile + + config_dir = Path(tempfile.mkdtemp(prefix="reva-ci-config-")) + data_dir = REPO_ROOT / "tmp" / "reva" / "data" + data_dir.mkdir(parents=True, exist_ok=True) + + # Hostname/path replacements: drone container DNS -> localhost + replacements = { + "/drone/src": str(REPO_ROOT), + "revad-services": "localhost", + "ldaps://ldap:": "ldaps://localhost:", + "redis://redis:": "redis://localhost:", + "http://ceph:": "http://localhost:", + } + + for name in os.listdir(DRONE_CONFIG_DIR): + src = DRONE_CONFIG_DIR / name + dst = config_dir / name + if src.is_file(): + content = src.read_text() + for old, new in replacements.items(): + content = content.replace(old, new) + dst.write_text(content) + + return config_dir + + +def start_ldap(): + """Start LDAP service container.""" + print("Starting LDAP...") + subprocess.run( + ["docker", "run", "-d", "--name", "ldap", "--network", "host", + "-e", "LDAP_DOMAIN=owncloud.com", + "-e", "LDAP_ORGANISATION=ownCloud", + "-e", "LDAP_ADMIN_PASSWORD=admin", + "-e", "LDAP_TLS_VERIFY_CLIENT=never", + "-e", "HOSTNAME=ldap", + LDAP_IMAGE], + check=True, + ) + wait_for_port(LDAP_PORT, timeout=60, label="LDAP") + + +def start_redis(): + """Start Redis service container.""" + print("Starting Redis...") + subprocess.run( + ["docker", "run", "-d", "--name", "redis", "--network", "host", + "-e", "REDIS_DATABASES=1", + REDIS_IMAGE], + check=True, + ) + wait_for_port(REDIS_PORT, timeout=30, label="Redis") + + +def start_ceph(): + print("Starting Ceph...") + subprocess.run( + ["docker", "run", "-d", "--name", "ceph", + "-p", f"{CEPH_PORT}:{CEPH_PORT}", + "-e", "CEPH_DAEMON=demo", + "-e", "NETWORK_AUTO_DETECT=1", + "-e", "MON_IP=0.0.0.0", + "-e", "CEPH_PUBLIC_NETWORK=0.0.0.0/0", + "-e", f"RGW_CIVETWEB_PORT={CEPH_PORT}", + "-e", "RGW_NAME=ceph", + "-e", "CEPH_DEMO_UID=test-user", + "-e", "CEPH_DEMO_ACCESS_KEY=test", + "-e", "CEPH_DEMO_SECRET_KEY=test", + "-e", "CEPH_DEMO_BUCKET=test", + CEPH_IMAGE], + check=True, + ) + wait_for_port(CEPH_PORT, timeout=180, label="Ceph RGW") + # Wait for demo bucket creation after RGW starts accepting connections + time.sleep(15) + + +SERVICE_STARTERS = { + "ldap": start_ldap, + "redis": start_redis, + "ceph": start_ceph, +} + + +def start_revad_services(config_dir, storage): + """Start all revad processes, return list of Popen objects.""" + procs = [] + for config_name in REVAD_CONFIGS[storage]: + config_path = config_dir / config_name + p = subprocess.Popen( + [str(REVAD_BIN), "-c", str(config_path)], + cwd=str(config_dir), + ) + procs.append(p) + return procs + + +def clone_test_repos(): + """Clone the ocis testing repo at the pinned commit (mirrors cloneApiTestReposStep).""" + drone_env = parse_drone_env() + commit_id = drone_env.get("APITESTS_COMMITID", "") + branch = drone_env.get("APITESTS_BRANCH", "master") + repo_url = drone_env.get("APITESTS_REPO_GIT_URL", "https://github.com/owncloud/ocis.git") + + testing_dir = REPO_ROOT / "tmp" / "testing" + testrunner_dir = REPO_ROOT / "tmp" / "testrunner" + + if not testing_dir.exists(): + print("Cloning testing repo...") + subprocess.run( + ["git", "clone", "-b", "master", "--depth=1", + "https://github.com/owncloud/testing.git", str(testing_dir)], + check=True, + ) + + if not testrunner_dir.exists(): + print(f"Cloning test runner (branch={branch}, commit={commit_id})...") + subprocess.run( + ["git", "clone", "-b", branch, "--single-branch", "--no-tags", + repo_url, str(testrunner_dir)], + check=True, + ) + if commit_id: + subprocess.run( + ["git", "checkout", commit_id], + cwd=str(testrunner_dir), + check=True, + ) + + +def run_acceptance_tests(storage, total_parts, run_part): + """Run PHP Behat acceptance tests via make test-acceptance-api.""" + data_dir = str(REPO_ROOT / "tmp" / "reva" / "data") + testrunner_dir = REPO_ROOT / "tmp" / "testrunner" + expected_failures = str(REPO_ROOT / EXPECTED_FAILURES_MAP[storage]) + + delete_cmd = DELETE_CMD_MAP[storage].format(data=data_dir) + + env = { + **os.environ, + "TEST_SERVER_URL": f"http://localhost:{FRONTEND_PORT}", + "OCIS_REVA_DATA_ROOT": f"{data_dir}/", + "DELETE_USER_DATA_CMD": delete_cmd, + "STORAGE_DRIVER": STORAGE_DRIVER_MAP[storage], + "TEST_WITH_LDAP": "true", + "REVA_LDAP_HOSTNAME": "localhost", + "TEST_REVA": "true", + "BEHAT_FILTER_TAGS": "~@skip&&~@skipOnReva&&~@env-config", + "DIVIDE_INTO_NUM_PARTS": str(total_parts), + "RUN_PART": str(run_part), + "EXPECTED_FAILURES_FILE": expected_failures, + "REGULAR_USER_PASSWORD": "relativity", + "ACCEPTANCE_TEST_TYPE": "core-api", + } + + print(f"Running acceptance tests: storage={storage}, part={run_part}/{total_parts}") + result = subprocess.run( + ["make", "test-acceptance-api"], + cwd=str(testrunner_dir), + env=env, + ) + return result.returncode + + +def main(): + parser = argparse.ArgumentParser(description="Run reva acceptance tests") + parser.add_argument("--storage", required=True, + choices=["ocis", "s3ng", "posixfs"], + help="Storage backend to test") + parser.add_argument("--total-parts", type=int, required=True, + help="Total number of parallel shards") + parser.add_argument("--run-part", type=int, required=True, + help="Which shard to run (1-based)") + args = parser.parse_args() + + procs = [] + docker_containers = [] + + def cleanup(*_): + for p in procs: + try: + p.terminate() + except Exception: + pass + for p in procs: + try: + p.wait(timeout=5) + except Exception: + p.kill() + for name in docker_containers: + subprocess.run(["docker", "rm", "-f", name], capture_output=True) + + signal.signal(signal.SIGTERM, cleanup) + signal.signal(signal.SIGINT, cleanup) + + try: + # Build revad + print("Building revad...") + subprocess.run(["make", "build-ci"], cwd=str(REPO_ROOT), check=True) + + # Start required services + for service in SERVICES_NEEDED[args.storage]: + SERVICE_STARTERS[service]() + docker_containers.append(service) + + # Prepare configs + config_dir = prepare_configs(args.storage) + print(f"Config dir: {config_dir}") + + # Start revad services + print(f"Starting revad services ({args.storage})...") + procs = start_revad_services(config_dir, args.storage) + + # Wait for frontend and gateway to be ready + wait_for_port(FRONTEND_PORT, timeout=60, label="frontend") + wait_for_port(GATEWAY_PORT, timeout=60, label="gateway") + + # Clone test repos + clone_test_repos() + + # Run acceptance tests + rc = run_acceptance_tests(args.storage, args.total_parts, args.run_part) + sys.exit(rc) + + finally: + cleanup() + + +if __name__ == "__main__": + main() diff --git a/tests/acceptance/run-cs3api.py b/tests/acceptance/run-cs3api.py new file mode 100755 index 00000000000..21f5a6f06ca --- /dev/null +++ b/tests/acceptance/run-cs3api.py @@ -0,0 +1,214 @@ +#!/usr/bin/env python3 +""" +Run CS3 API validator against reva. + +Replaces the cs3api-validator-ocis and cs3api-validator-S3NG Drone pipelines. + +Usage: + python3 tests/acceptance/run-cs3api.py --storage ocis + python3 tests/acceptance/run-cs3api.py --storage s3ng +""" + +import argparse +import os +import signal +import socket +import subprocess +import sys +import tempfile +import time +from pathlib import Path + +REPO_ROOT = Path(__file__).resolve().parents[2] +DRONE_CONFIG_DIR = REPO_ROOT / "tests" / "oc-integration-tests" / "drone" +REVAD_BIN = REPO_ROOT / "cmd" / "revad" / "revad" + +CS3API_VALIDATOR_IMAGE = "owncloud/cs3api-validator:0.2.1" + +GATEWAY_PORT = 19000 +FRONTEND_PORT = 20080 + +# Revad configs per storage backend +REVAD_CONFIGS = { + "ocis": [ + "frontend.toml", + "gateway.toml", + "storage-users-ocis.toml", + "storage-shares.toml", + "storage-publiclink.toml", + "shares.toml", + "permissions-ocis-ci.toml", + "users.toml", + ], + "s3ng": [ + "frontend.toml", + "gateway.toml", + "storage-users-s3ng.toml", + "storage-shares.toml", + "storage-publiclink.toml", + "shares.toml", + "permissions-ocis-ci.toml", + "users.toml", + ], +} + +# Ceph service config (for S3NG) +CEPH_IMAGE = "ceph/daemon" +CEPH_PORT = 8080 +CEPH_ENV = { + "CEPH_DAEMON": "demo", + "NETWORK_AUTO_DETECT": "1", + "MON_IP": "0.0.0.0", + "CEPH_PUBLIC_NETWORK": "0.0.0.0/0", + "RGW_CIVETWEB_PORT": str(CEPH_PORT), + "RGW_NAME": "ceph", + "CEPH_DEMO_UID": "test-user", + "CEPH_DEMO_ACCESS_KEY": "test", + "CEPH_DEMO_SECRET_KEY": "test", + "CEPH_DEMO_BUCKET": "test", +} + + +def wait_for_port(port, timeout=60, label="service"): + start = time.time() + while time.time() - start < timeout: + try: + with socket.create_connection(("localhost", port), timeout=1): + return + except OSError: + time.sleep(0.5) + sys.exit(f"Timeout waiting for {label} on port {port} after {timeout}s") + + + +def get_docker_bridge_ip(): + """Get the Docker bridge gateway IP for container-to-host communication.""" + r = subprocess.run( + ["docker", "network", "inspect", "bridge", + "--format", "{{range .IPAM.Config}}{{.Gateway}}{{end}}"], + capture_output=True, text=True, check=True, + ) + return r.stdout.strip() + + +def prepare_configs(storage): + config_dir = Path(tempfile.mkdtemp(prefix="reva-ci-config-")) + (REPO_ROOT / "tmp" / "reva" / "data").mkdir(parents=True, exist_ok=True) + replacements = { + "/drone/src": str(REPO_ROOT), + "revad-services": "localhost", + "ldaps://ldap:": "ldaps://localhost:", + "redis://redis:": "redis://localhost:", + "http://ceph:": "http://localhost:", + } + + for name in os.listdir(DRONE_CONFIG_DIR): + src = DRONE_CONFIG_DIR / name + dst = config_dir / name + if src.is_file(): + content = src.read_text() + for old, new in replacements.items(): + content = content.replace(old, new) + dst.write_text(content) + + return config_dir + + +def start_ceph(): + print("Starting Ceph...") + env_args = [] + for k, v in CEPH_ENV.items(): + env_args.extend(["-e", f"{k}={v}"]) + + subprocess.run( + ["docker", "run", "-d", "--name", "ceph", + "-p", f"{CEPH_PORT}:{CEPH_PORT}"] + + env_args + [CEPH_IMAGE], + check=True, + ) + wait_for_port(CEPH_PORT, timeout=180, label="Ceph RGW") + # Wait for demo bucket creation after RGW starts accepting connections + time.sleep(15) + + +def start_revad_services(config_dir, storage): + """Start all revad processes, return list of Popen objects.""" + procs = [] + for config_name in REVAD_CONFIGS[storage]: + config_path = config_dir / config_name + p = subprocess.Popen( + [str(REVAD_BIN), "-c", str(config_path)], + cwd=str(config_dir), + ) + procs.append(p) + return procs + + +def main(): + parser = argparse.ArgumentParser(description="Run CS3 API validator against reva") + parser.add_argument("--storage", required=True, choices=["ocis", "s3ng"], + help="Storage backend to test") + args = parser.parse_args() + + procs = [] + docker_containers = [] + + def cleanup(*_): + for p in procs: + try: + p.terminate() + except Exception: + pass + for p in procs: + try: + p.wait(timeout=5) + except Exception: + p.kill() + for name in docker_containers: + subprocess.run(["docker", "rm", "-f", name], capture_output=True) + + signal.signal(signal.SIGTERM, cleanup) + signal.signal(signal.SIGINT, cleanup) + + try: + # Build revad + print("Building revad...") + subprocess.run(["make", "build-ci"], cwd=str(REPO_ROOT), check=True) + + # Start Ceph if S3NG + if args.storage == "s3ng": + start_ceph() + docker_containers.append("ceph") + + # Prepare configs + config_dir = prepare_configs(args.storage) + print(f"Config dir: {config_dir}") + + # Start revad services + print(f"Starting revad services ({args.storage})...") + procs = start_revad_services(config_dir, args.storage) + + # Wait for gateway to be ready + wait_for_port(GATEWAY_PORT, timeout=60, label="gateway") + wait_for_port(FRONTEND_PORT, timeout=60, label="frontend") + + # Get bridge IP for validator container to reach host revad + bridge_ip = get_docker_bridge_ip() + + # Run CS3 API validator + print(f"Running CS3 API validator (storage={args.storage}, endpoint={bridge_ip}:{GATEWAY_PORT})...") + result = subprocess.run( + ["docker", "run", "--rm", "--network", "host", + "--entrypoint", "/usr/bin/cs3api-validator", + CS3API_VALIDATOR_IMAGE, + "/var/lib/cs3api-validator", + f"--endpoint={bridge_ip}:{GATEWAY_PORT}"], + ) + sys.exit(result.returncode) + + finally: + cleanup() + + +if __name__ == "__main__": + main() diff --git a/tests/acceptance/run-litmus.py b/tests/acceptance/run-litmus.py new file mode 100755 index 00000000000..827b4c3f9cf --- /dev/null +++ b/tests/acceptance/run-litmus.py @@ -0,0 +1,221 @@ +#!/usr/bin/env python3 +""" +Run litmus WebDAV compliance tests against reva. + +Replaces the litmus-ocis-old-webdav, litmus-ocis-new-webdav, and +litmus-owncloud-spaces-dav Drone pipelines. + +Usage: + python3 tests/acceptance/run-litmus.py --endpoint old-webdav + python3 tests/acceptance/run-litmus.py --endpoint new-webdav + python3 tests/acceptance/run-litmus.py --endpoint spaces-dav +""" + +import argparse +import os +import signal +import socket +import subprocess +import sys +import tempfile +import time +from pathlib import Path + +REPO_ROOT = Path(__file__).resolve().parents[2] +DRONE_CONFIG_DIR = REPO_ROOT / "tests" / "oc-integration-tests" / "drone" +REVAD_BIN = REPO_ROOT / "cmd" / "revad" / "revad" + +LITMUS_IMAGE = "owncloud/litmus:latest" +LITMUS_USERNAME = "einstein" +LITMUS_PASSWORD = "relativity" +LITMUS_TESTS = "basic http copymove props" + +# einstein user UUID (see https://github.com/owncloud/ocis-accounts/blob/8de0530f31ed5ffb0bbb7f7f3471f87f429cb2ea/pkg/service/v0/service.go#L45) +EINSTEIN_UUID = "4c510ada-c86b-4815-8820-42cdf82c3d51" + +# Revad configs for litmus tests (same set used by all three endpoints) +REVAD_CONFIGS = [ + "frontend.toml", + "gateway.toml", + "storage-users-ocis.toml", + "storage-shares.toml", + "storage-publiclink.toml", + "shares.toml", + "permissions-ocis-ci.toml", + "users.toml", +] + +FRONTEND_PORT = 20080 +GATEWAY_PORT = 19000 + + +def wait_for_port(port, timeout=60, label="service"): + """Poll until a TCP port is accepting connections.""" + start = time.time() + while time.time() - start < timeout: + try: + with socket.create_connection(("localhost", port), timeout=1): + print(f" {label} ready on port {port}") + return + except OSError: + time.sleep(0.5) + print(f"Timeout waiting for {label} on port {port} after {timeout}s", file=sys.stderr) + sys.exit(1) + + +def get_docker_bridge_ip(): + """Get the Docker bridge gateway IP for container-to-host communication.""" + r = subprocess.run( + ["docker", "network", "inspect", "bridge", + "--format", "{{range .IPAM.Config}}{{.Gateway}}{{end}}"], + capture_output=True, text=True, check=True, + ) + return r.stdout.strip() + + +def prepare_configs(): + config_dir = Path(tempfile.mkdtemp(prefix="reva-ci-config-")) + (REPO_ROOT / "tmp" / "reva" / "data").mkdir(parents=True, exist_ok=True) + replacements = { + "/drone/src": str(REPO_ROOT), + "revad-services": "localhost", + } + for name in os.listdir(DRONE_CONFIG_DIR): + src = DRONE_CONFIG_DIR / name + dst = config_dir / name + if src.is_file(): + content = src.read_text() + for old, new in replacements.items(): + content = content.replace(old, new) + dst.write_text(content) + return config_dir + + +def start_revad_services(config_dir): + """Start all revad processes, return list of Popen objects.""" + procs = [] + for config_name in REVAD_CONFIGS: + config_path = config_dir / config_name + p = subprocess.Popen( + [str(REVAD_BIN), "-c", str(config_path)], + cwd=str(config_dir), + ) + procs.append(p) + return procs + + +def get_space_id(bridge_ip): + """Get the personal space ID for einstein user via PROPFIND.""" + base = f"http://{bridge_ip}:{FRONTEND_PORT}" + + # Trigger home creation by accessing the files endpoint + subprocess.run( + ["curl", "-s", "-k", "-u", f"{LITMUS_USERNAME}:{LITMUS_PASSWORD}", + "-I", f"{base}/remote.php/dav/files/{LITMUS_USERNAME}"], + capture_output=True, + ) + time.sleep(1) + + # Get space ID via PROPFIND + r = subprocess.run( + ["curl", "-XPROPFIND", "-s", "-k", + "-u", f"{LITMUS_USERNAME}:{LITMUS_PASSWORD}", + f"{base}/dav/files/{LITMUS_USERNAME}"], + capture_output=True, text=True, + ) + # Parse space ID from oc:spaceid element + import re + match = re.search(r"([^<]+)", r.stdout) + if not match: + # Fallback: try awk-style extraction + for line in r.stdout.split("\n"): + if "oc:spaceid" in line: + parts = line.split("<") + for part in parts: + if part.startswith("oc:spaceid>"): + return part.split(">")[1].split("<")[0] + print(f"Could not extract space ID from PROPFIND response", file=sys.stderr) + print(f"Response: {r.stdout[:500]}", file=sys.stderr) + sys.exit(1) + return match.group(1) + + +def run_litmus(endpoint_url, bridge_ip): + """Run litmus Docker container against the given endpoint.""" + result = subprocess.run( + ["docker", "run", "--rm", "--network", "host", + "-e", f"LITMUS_URL={endpoint_url}", + "-e", f"LITMUS_USERNAME={LITMUS_USERNAME}", + "-e", f"LITMUS_PASSWORD={LITMUS_PASSWORD}", + "-e", f"TESTS={LITMUS_TESTS}", + LITMUS_IMAGE], + ) + return result.returncode + + +def main(): + parser = argparse.ArgumentParser(description="Run litmus WebDAV tests against reva") + parser.add_argument("--endpoint", required=True, + choices=["old-webdav", "new-webdav", "spaces-dav"], + help="Which litmus endpoint variant to test") + args = parser.parse_args() + + procs = [] + + def cleanup(*_): + for p in procs: + try: + p.terminate() + except Exception: + pass + # Wait briefly for processes to exit + for p in procs: + try: + p.wait(timeout=5) + except Exception: + p.kill() + + signal.signal(signal.SIGTERM, cleanup) + signal.signal(signal.SIGINT, cleanup) + + try: + # Build revad + print("Building revad...") + subprocess.run(["make", "build-ci"], cwd=str(REPO_ROOT), check=True) + + # Prepare configs (rewrite /drone/src paths) + config_dir = prepare_configs() + print(f"Config dir: {config_dir}") + + # Start revad services + print("Starting revad services...") + procs = start_revad_services(config_dir) + + # Wait for frontend and gateway to be ready + wait_for_port(FRONTEND_PORT, timeout=60, label="frontend") + wait_for_port(GATEWAY_PORT, timeout=60, label="gateway") + + # Get Docker bridge IP for container-to-host communication + bridge_ip = get_docker_bridge_ip() + base_url = f"http://{bridge_ip}:{FRONTEND_PORT}" + + # Determine litmus URL based on endpoint type + if args.endpoint == "old-webdav": + litmus_url = f"{base_url}/remote.php/webdav" + elif args.endpoint == "new-webdav": + litmus_url = f"{base_url}/remote.php/dav/files/{EINSTEIN_UUID}" + elif args.endpoint == "spaces-dav": + space_id = get_space_id(bridge_ip) + litmus_url = f"{base_url}/remote.php/dav/spaces/{space_id}" + print(f"Space ID: {space_id}") + + print(f"Running litmus: {args.endpoint} -> {litmus_url}") + rc = run_litmus(litmus_url, bridge_ip) + sys.exit(rc) + + finally: + cleanup() + + +if __name__ == "__main__": + main()