diff --git a/.gitignore b/.gitignore index a0dcf03..9a27ff7 100644 --- a/.gitignore +++ b/.gitignore @@ -2,3 +2,5 @@ ipns vendor Cassandra transaction table.cql composer.json +.docker-cache +.coverage-html diff --git a/README.md b/README.md index fb01c9c..7502435 100644 --- a/README.md +++ b/README.md @@ -67,6 +67,29 @@ People are the Priority: People are the most important. - If it can be hacked, it will be hacked: Never save, store, or transmit secret info, like passwords or keys. Open source & auditable. +## Test + +Run tests in Docker (no local PHP needed): +- `./bin/test` — defaults to PHP 8.3 +- `./bin/test -V 7.4` — choose a PHP version (matrix: >= 7, ie: + `7.0.33`, `7.4`, `8`) +- `./bin/test -V 7.4,8.3` — run multiple versions in one call (comma-separated) +- `./bin/test -V all` — run every known `php:-cli-alpine` tag (discovered + from Docker Hub and cached in `.docker-cache/php-cli-tag-cache.txt`; cache + refreshes weekly) + +Coverage (HTML in `./.coverage-html`): +- `./bin/test --coverage` +- `./bin/test -V 7.0 --coverage` (works across the matrix) +- After a coverage run, open `.coverage-html/index.html` (a clickable + link is printed) + +Notes +- The runner builds a versioned image and caches by tag + (`comchain-phpunit:`). +- No repo phpunit.xml required; the script generates one per run. +- Officially exercised versions: 7.0.33, 7.4.33, 8.3.29. + ## Contact If you can think of any other features or run into bugs, let us know. You can drop a line at it {at} monnaie {-} leman dot org. diff --git a/api.php b/api.php index f0a2114..e68638c 100644 --- a/api.php +++ b/api.php @@ -337,9 +337,10 @@ function storeTransaction($is_valid_shop, $transaction_ash, $web_hook_status, $a $val[]='?'; */ + $now = time(); // build the query $query = "INSERT INTO testtransactions (".join(', ',array_keys($fields)); - $query = $query.',time,receivedAt) VALUES ('.join(', ',$val).','.time().','.time().')'; + $query = $query.',time,receivedAt) VALUES ('.join(', ',$val).','.$now.','.$now.')'; $keyspace = 'comchain'; // for pledge only the other direction is inserted diff --git a/bin/test b/bin/test new file mode 100755 index 0000000..ac9369a --- /dev/null +++ b/bin/test @@ -0,0 +1,470 @@ +#!/usr/bin/env bash +# Build and run tests inside Docker without installing PHP locally. +# Usage: bin/test [-V PHP_VERSION] [--coverage] [-- ] +# Example: bin/test -V 8.2 --coverage -- --testsuite trnslist + +set -euo pipefail + +COVERAGE=false +DOCKER_USER="$(id -u):$(id -g)" + +PCOV_VERSION="1.0.12" +VERSIONS=() +PHPUNIT_ARGS=() +COVERAGE_REPORTS=() +ALL_VERSIONS=() +DEBUG="${DEBUG:-}" + +WHITE=$'\e[37;1m' +RED=$'\e[31m' +GREEN=$'\e[32;1m' +RESET=$'\e[0m' + +add_version() { + local version="$1" + + if [[ ! "$version" =~ ^[0-9]+(\.[0-9]+){0,2}$ ]]; then + echo "Warning: ignoring invalid PHP version specified: '$version'" >&2 + return 1 + fi + + local existing="" + for existing in "${VERSIONS[@]}"; do + if [ "$existing" = "$version" ]; then + return 1 + fi + done + VERSIONS+=("$version") +} + +version:gt() { [ "$(printf '%s\n' "$@" | sort -V | head -n 1)" != "$1" ]; } +version:le() { ! version:gt "$@"; } + +# Avoid writing Docker metadata (buildx activity store) into $HOME when it may +# be read-only inside the sandbox; default to a writable repo-local directory. +if [ -z "${DOCKER_CONFIG:-}" ]; then + export DOCKER_CONFIG="$PWD/.docker-cache" +fi +mkdir -p "${DOCKER_CONFIG}" + +TAG_CACHE_FILE="${DOCKER_CONFIG}/php-cli-tag-cache.txt" +# Refresh cache every 7 days unless overridden via PHP_TAG_CACHE_MAX_AGE_SECONDS +TAG_CACHE_MAX_AGE_SECONDS="${PHP_TAG_CACHE_MAX_AGE_SECONDS:-604800}" +DIGEST_CACHE_DIR="${DOCKER_CONFIG}/php-cli-digest-cache" + +refresh_php_tag_cache() { + # Requires curl and python3; silently fall back to defaults if unavailable. + if ! command -v curl >/dev/null 2>&1 || ! command -v python3 >/dev/null 2>&1; then + return 1 + fi + + + local tmp_cache="${TAG_CACHE_FILE}.tmp" + echo "Fetching all latest valid tags..." >&2 + if ! python3 - <<'PY' > "${tmp_cache}"; then +import json, re, sys, urllib.request + +url = "https://hub.docker.com/v2/repositories/library/php/tags?page_size=100&name=cli-alpine" +pattern = re.compile(r"^\d+\.\d+-cli-alpine$") +seen = set() + +def version_key(tag: str): + base = tag.split("-")[0] + return [int(p) for p in base.split(".")] + +while url: + sys.stderr.write(f" Fetching URL: {url}\n") + with urllib.request.urlopen(url) as resp: + data = json.load(resp) + for result in data.get("results", []): + name = result.get("name") or "" + if pattern.match(name): + version = name.split("-")[0] + if int(version.split(".")[0]) >= 7: + seen.add(version) # keep "N.M" + sys.stderr.write(f" Found version: {version}\n") + url = data.get("next") + +for tag in sorted(seen, key=version_key): + print(tag) +PY + return 1 + fi + echo " ... fetched $(wc -l < "${tmp_cache}") tags." >&2 + mv "${tmp_cache}" "${TAG_CACHE_FILE}" +} + +load_all_versions() { + local now cache_mtime cache_age + + if [ -f "${TAG_CACHE_FILE}" ]; then + cache_mtime=$(stat -c %Y "${TAG_CACHE_FILE}" 2>/dev/null || stat -f %m "${TAG_CACHE_FILE}" 2>/dev/null || echo 0) + now=$(date +%s) + cache_age=$((now - cache_mtime)) + else + cache_age=$((TAG_CACHE_MAX_AGE_SECONDS + 1)) + fi + + if [ ! -f "${TAG_CACHE_FILE}" ] || [ "${cache_age}" -ge "${TAG_CACHE_MAX_AGE_SECONDS}" ]; then + if ! refresh_php_tag_cache; then + echo "Error: unable to retrieve PHP tags." >&2 + return 1 + fi + fi + + if [ -f "${TAG_CACHE_FILE}" ]; then + mapfile -t ALL_VERSIONS < "${TAG_CACHE_FILE}" + fi +} + + +# Known supported PHP versions (matching official php:-cli-alpine tags) +while [ $# -gt 0 ]; do + case "$1" in + -V|--version) + IFS=',' read -r -a version_list <<< "$2" + for version in "${version_list[@]}"; do + version="${version//[[:space:]]/}" + if [ -n "$version" ]; then + if [ "$version" = "all" ]; then + load_all_versions || exit 1 + + for all_version in "${ALL_VERSIONS[@]}"; do + add_version "$all_version" || exit 1 + done + else + add_version "$version" || exit 1 + fi + fi + done + shift 2 + ;; + --coverage) + COVERAGE=true + shift + ;; + --) + shift + break + ;; + *) + echo "Unknown option: $1" >&2 + echo "Usage: $0 [-V PHP_VERSION] [-- ]" >&2 + exit 1 + ;; + esac +done + +if [ $# -gt 0 ]; then + PHPUNIT_ARGS=("$@") +fi + +run_for_version() { + local php_version="$1" + shift + local phpunit_args=("$@") + local tmp_cfg="" + local base_tag="" + local run_status=0 + + # Resolve PHP version tag via docker; accept things like "" (latest) + # "8" (latest 8) or "8.5" or even "7.0.33". + if [ -z "$php_version" ]; then + base_tag="php:cli-alpine" + else + base_tag="php:${php_version}-cli-alpine" + fi + if ! docker image inspect "$base_tag" >/dev/null 2>&1; then + echo "Pulling docker image ${base_tag} as base PHP image..." >&2 + if ! docker pull "$base_tag" >/dev/null 2>&1; then + echo "Error: unable to pull Docker image $base_tag. Please use an existing php:-cli-alpine tag (e.g., 7.4, 8.3, 8, 8.5)." >&2 + return 1 + fi + echo " ... pulled." >&2 + else + [ -n "$DEBUG" ] && echo "Using cached docker image ${base_tag} as base PHP image" >&2 + fi + + # Discover the actual PHP version inside the base image + FULL_VERSION=$(docker run --rm "$base_tag" php -r 'echo PHP_VERSION;' 2>/dev/null || true) + if [ -z "$FULL_VERSION" ]; then + echo "Could not determine PHP version from $base_tag" >&2 + return 1 + fi + base_tag="php:${FULL_VERSION}-cli-alpine" + + ## to avoid buildx querying registry + mkdir -p "${DIGEST_CACHE_DIR}" + if [ -f "${DIGEST_CACHE_DIR}/$base_tag" ]; then + base_image_by_digest="$(cat "${DIGEST_CACHE_DIR}/$base_tag")" + else + if ! inspect_base_image=$(docker buildx imagetools inspect "$base_tag"); then + echo "Could not inspect base image '$base_tag'" >&2 + return 1 + fi + base_image_by_digest=php@$(printf "%s" "$inspect_base_image" | grep 'Digest:' | head -n1 | awk '{print $2}') + echo "$base_image_by_digest" > "${DIGEST_CACHE_DIR}/$base_tag" + fi + + major="${FULL_VERSION%%.*}" + minor_part="${FULL_VERSION#*.}" + minor="${minor_part%%.*}" + PHPUNIT_URL="" + COVERAGE_DRIVER="pcov" + XDEBUG_VERSION="" + PHPUNIT_MAJOR=12 + + if [ "$major" -eq 7 ] && [ "$minor" -eq 0 ]; then + # PHP 7.0 - no branch coverage (xdebug 2.x) + PHPUNIT_URL="https://phar.phpunit.de/phpunit-6.5.14.phar" + COVERAGE_DRIVER="xdebug" + XDEBUG_VERSION="2.6.1" + PHPUNIT_MAJOR=6 + elif [ "$major" -eq 7 ] && [ "$minor" -eq 1 ]; then + # PHP 7.1 - no branch coverage (xdebug 2.x) + PHPUNIT_URL="https://phar.phpunit.de/phpunit-7.5.20.phar" + COVERAGE_DRIVER="xdebug" + XDEBUG_VERSION="2.6.1" + PHPUNIT_MAJOR=7 + elif [ "$major" -eq 7 ] && [ "$minor" -eq 2 ]; then + # PHP 7.2 - no branch coverage (phpunit < 9.3) + # cf: https://github.com/sebastianbergmann/phpunit/issues/4260 + # phpunit 9.3 requires php 7.3+ + # cf: https://phpunit.de/announcements/phpunit-9.html + PHPUNIT_URL="https://phar.phpunit.de/phpunit-8.5.38.phar" + COVERAGE_DRIVER="xdebug" + XDEBUG_VERSION="3.0.4" + PHPUNIT_MAJOR=8 + elif [ "$major" -eq 7 ]; then + # PHP 7.3–7.4 - branch coverage supported (xdebug 3.1.x) + PHPUNIT_URL="https://phar.phpunit.de/phpunit-9.6.19.phar" + COVERAGE_DRIVER="xdebug" + XDEBUG_VERSION="3.1.6" + PHPUNIT_MAJOR=9 + elif [ "$major" -eq 8 ] && [ "$minor" -eq 0 ]; then + # PHP 8.0 - branch coverage supported (xdebug 3.2.x) + # Note: xdebug 3.1.x has a path coverage segfault bug, fixed in 3.2.x + PHPUNIT_URL="https://phar.phpunit.de/phpunit-9.6.19.phar" + COVERAGE_DRIVER="xdebug" + XDEBUG_VERSION="3.2.2" + PHPUNIT_MAJOR=9 + elif [ "$major" -eq 8 ] && [ "$minor" -eq 1 ]; then + # PHP 8.1 - branch coverage supported (xdebug 3.2.x) + PHPUNIT_URL="https://phar.phpunit.de/phpunit-10.phar" + COVERAGE_DRIVER="xdebug" + XDEBUG_VERSION="3.2.2" + PHPUNIT_MAJOR=10 + elif [ "$major" -eq 8 ] && [ "$minor" -eq 2 ]; then + # PHP 8.2 - branch coverage supported (xdebug 3.2.x) + PHPUNIT_URL="https://phar.phpunit.de/phpunit-11.phar" + COVERAGE_DRIVER="xdebug" + XDEBUG_VERSION="3.2.2" + PHPUNIT_MAJOR=11 + elif [ "$major" -eq 8 ] && [ "$minor" -le 4 ]; then + # PHP 8.3-8.4 - branch coverage supported (xdebug 3.4.x) + # Note: xdebug 3.3.x has compilation errors with PHP 8.3.29+ + PHPUNIT_URL="https://phar.phpunit.de/phpunit-12.phar" + COVERAGE_DRIVER="xdebug" + XDEBUG_VERSION="3.4.0" + PHPUNIT_MAJOR=12 + elif [ "$major" -eq 8 ]; then + # PHP 8.5+ - branch coverage supported (xdebug 3.5.x) + PHPUNIT_URL="https://phar.phpunit.de/phpunit-12.phar" + COVERAGE_DRIVER="xdebug" + XDEBUG_VERSION="3.5.0" + PHPUNIT_MAJOR=12 + else + # PHP 9+ (assume latest PHPUnit and xdebug) + PHPUNIT_URL="https://phar.phpunit.de/phpunit-12.phar" + COVERAGE_DRIVER="xdebug" + XDEBUG_VERSION="" + PHPUNIT_MAJOR=12 + fi + + IMAGE="comchain-phpunit:${FULL_VERSION}" + if ! docker image inspect "$IMAGE" >/dev/null 2>&1; then + + DOCKERFILE_CONTENT=$(cat <<'EOF' +ARG BASE_TAG=php:cli-alpine +FROM ${BASE_TAG} + +ARG PHPUNIT_URL=https://phar.phpunit.de/phpunit-12.5.0.phar +ARG COVERAGE_DRIVER=pcov +ARG PCOV_VERSION=1.0.12 +ARG XDEBUG_VERSION=2.6.1 + +RUN set -e; \ + for i in 1 2 3; do apk update && break || sleep 2; done; \ + apk add --no-cache curl git linux-headers $PHPIZE_DEPS; \ + if [ "$COVERAGE_DRIVER" = "pcov" ]; then \ + if pecl install pcov-${PCOV_VERSION}; then \ + docker-php-ext-enable pcov; \ + { echo "pcov.enabled=1"; echo "pcov.directory=/app"; echo "pcov.exclude=#/app/vendor#"; } > /usr/local/etc/php/conf.d/coverage.ini; \ + else \ + echo "pcov install failed, falling back to xdebug" >&2; \ + if [ -n "${XDEBUG_VERSION}" ]; then pecl install xdebug-${XDEBUG_VERSION}; else pecl install xdebug; fi; \ + docker-php-ext-enable xdebug; \ + { echo "xdebug.mode=coverage"; echo "xdebug.start_with_request=no"; } > /usr/local/etc/php/conf.d/coverage.ini; \ + fi; \ + else \ + if [ -n "${XDEBUG_VERSION}" ]; then pecl install xdebug-${XDEBUG_VERSION}; else pecl install xdebug; fi; \ + docker-php-ext-enable xdebug; \ + { echo "xdebug.mode=coverage"; echo "xdebug.start_with_request=no"; } > /usr/local/etc/php/conf.d/coverage.ini; \ + fi; \ + apk del $PHPIZE_DEPS; \ + curl -Ls ${PHPUNIT_URL} -o /usr/local/bin/phpunit; \ + chmod +x /usr/local/bin/phpunit + +WORKDIR /app +CMD ["phpunit"] +EOF + ) + + echo "Building image ${IMAGE} (PHP ${php_version})..." >&2 + if ! out=$(echo "${DOCKERFILE_CONTENT}" | docker build \ + --build-arg BASE_TAG="${base_image_by_digest}" \ + --build-arg PHPUNIT_URL="${PHPUNIT_URL}" \ + --build-arg COVERAGE_DRIVER="${COVERAGE_DRIVER}" \ + --build-arg PCOV_VERSION="${PCOV_VERSION}" \ + --build-arg XDEBUG_VERSION="${XDEBUG_VERSION}" \ + -t "${IMAGE}" -f - . ); then + echo "Error: unable to build Docker image ${IMAGE}" >&2 + return 1 + fi + echo " Built image: ${out}" >&2 + else + [ -n "$DEBUG" ] && echo "Using cached image ${IMAGE}" >&2 + fi + # Generate phpunit configuration tailored to version (written to a temp file) + tmp_cfg="$(mktemp /tmp/phpunit.XXXXXX.xml)" + + COVERAGE_XML="" + if $COVERAGE; then + if [ "${PHPUNIT_MAJOR}" -lt 9 ]; then + COVERAGE_XML=" + + /app/trnslist.php + + " + elif [ "${PHPUNIT_MAJOR}" -lt 10 ]; then + COVERAGE_XML=" + + /app/trnslist.php + + " + else + COVERAGE_XML=" + + /app/trnslist.php + + " + fi + fi + + cat > "$tmp_cfg" < + + + + /app/tests + + +${COVERAGE_XML} + +CFG + + [ -n "$DEBUG" ] && echo "Running phpunit in ${IMAGE}..." >&2 + PHPUNIT_OPTS=() + if $COVERAGE; then + COVERAGE_DIR="$(pwd)/.coverage-html/${FULL_VERSION}" + rm -rf "${COVERAGE_DIR}" + mkdir -p "${COVERAGE_DIR}" + chmod 777 "${COVERAGE_DIR}" + PHPUNIT_OPTS+=(--coverage-html "${COVERAGE_DIR}" --coverage-text) + # Branch coverage requires Xdebug 3.0+ and PHPUnit 9.3+ + if [ "${PHPUNIT_MAJOR}" -ge 9 ] && [ "${COVERAGE_DRIVER}" = "xdebug" ]; then + PHPUNIT_OPTS+=(--path-coverage) + fi + if [ "${PHPUNIT_MAJOR}" -ge 10 ]; then + PHPUNIT_OPTS+=(--coverage-filter /app/trnslist.php) + fi + fi + + DOCKER_COVERAGE_MOUNT=() + if $COVERAGE; then + DOCKER_COVERAGE_MOUNT=(-v "${COVERAGE_DIR}:${COVERAGE_DIR}") + fi + + # Memory limit for coverage (path coverage is memory intensive) + MEMORY_LIMIT="512M" + if $COVERAGE; then + MEMORY_LIMIT="1G" + fi + + docker_run_opts=( + --rm + --user "${DOCKER_USER}" + -v "$PWD":/app + -v "$tmp_cfg":/tmp/phpunit.xml + "${DOCKER_COVERAGE_MOUNT[@]}" "${IMAGE}" + ) + + php_run_opts=( + -d memory_limit="${MEMORY_LIMIT}" + ) + if [ "$major" -ge 8 ]; then + php_run_opts+=(-d error_reporting=8191) + fi + + docker_run_cmd=( + docker run "${docker_run_opts[@]}" + php "${php_run_opts[@]}" + /usr/local/bin/phpunit -c /tmp/phpunit.xml "${PHPUNIT_OPTS[@]}" "${phpunit_args[@]}" + ) + + [ -n "$DEBUG" ] && echo "Running command: ${docker_run_cmd[*]}" >&2 + + "${docker_run_cmd[@]}" + run_status=$? + + rm -f "$tmp_cfg" + if [ "$run_status" -ne 0 ]; then + echo "Tests ${RED}failed${RESET} for PHP ${FULL_VERSION}" >&2 + return "$run_status" + fi + echo "Tests ${GREEN}passed${RESET} for PHP ${FULL_VERSION}" >&2 + + if $COVERAGE; then + LINK="file://${COVERAGE_DIR}/index.html" + COVERAGE_REPORTS+=("$FULL_VERSION|$LINK") + fi +} + +print_coverage_reports() { + if $COVERAGE && [ "${#COVERAGE_REPORTS[@]}" -gt 0 ]; then + echo + echo "${WHITE}HTML Coverage reports${RESET}:" + for report in "${COVERAGE_REPORTS[@]}"; do + version="${report%%|*}" + link="${report#*|}" + printf ' - \e]8;;%s\e\\./coverage-html/%s\e]8;;\e\\\n' "$link" "$version" + done + fi +} + +status=0 +if [ "${#VERSIONS[@]}" -eq 0 ]; then + if ! run_for_version "" "${PHPUNIT_ARGS[@]}"; then + status=$? + fi +else + for version in "${VERSIONS[@]}"; do + if ! run_for_version "$version" "${PHPUNIT_ARGS[@]}"; then + status=$? + break + fi + done +fi + +print_coverage_reports +exit "$status" diff --git a/includes/cassandra.inc b/includes/cassandra.inc new file mode 100644 index 0000000..210b710 --- /dev/null +++ b/includes/cassandra.inc @@ -0,0 +1,14 @@ +isLastPage()) break; + $page = $page->nextPage(); + } +} diff --git a/includes/features.inc b/includes/features.inc new file mode 100644 index 0000000..95323c2 --- /dev/null +++ b/includes/features.inc @@ -0,0 +1,69 @@ + 'Missing required header: X-Client-Features', + 'supported' => $supported_features + ]); + return null; + } + + $client_features = parse_features($client_header); + $common = array_values(array_intersect($supported_features, $client_features)); + + if (empty($common)) { + http_response_code(406); + echo json_encode([ + 'error' => 'No common feature found', + 'supported' => $supported_features + ]); + return null; + } + + header('X-Selected-Features: ' . implode(' ', $common)); + return $common; +} diff --git a/tests/Mocks.php b/tests/Mocks.php new file mode 100644 index 0000000..1cf3f30 --- /dev/null +++ b/tests/Mocks.php @@ -0,0 +1,141 @@ +val = $val; } + public function value() { return $this->val; } +} + +class MockRow implements \ArrayAccess, \JsonSerializable { + private $data; + public function __construct(array $data) { + $this->data = $data; + if (isset($data['time'])) { + $this->data['time'] = new MockCassandraValue($data['time']); + } + if (isset($data['receivedat'])) { + $this->data['receivedat'] = new MockCassandraValue($data['receivedat']); + } + } + public function offsetExists($offset) { return isset($this->data[$offset]); } + public function offsetGet($offset) { return $this->data[$offset] ?? null; } + public function offsetSet($offset, $value) { $this->data[$offset] = $value; } + public function offsetUnset($offset) { unset($this->data[$offset]); } + public function jsonSerialize() { return $this->data; } +} + +// Micro DSL helpers +function tx(string $hash, int $time, int $status = 0, int $direction = 1): array { + return [ + 'hash' => $hash, + 'time' => $time, + 'status' => $status, + 'direction' => $direction, + 'add1' => '0xaddr1', + 'add2' => '0xaddr2', + 'receivedat' => $time, + ]; +} + +function page(array $rows, $next = null): \Iterator { + return new class($rows, $next) implements \Iterator { + private $rows; + private $i = 0; + private $next; + public function __construct(array $rows, $next) { + $this->rows = array_map(function ($r) { return new \MockRow($r); }, $rows); + $this->next = $next; + } + public function isLastPage() { return $this->next === null; } + public function nextPage() { return $this->next; } + public function current() { return $this->rows[$this->i]; } + public function key() { return $this->i; } + public function next() { ++$this->i; } + public function rewind() { $this->i = 0; } + public function valid() { return isset($this->rows[$this->i]); } + }; +} + +/** + * Create a mock session + * + * @param array $queryMap Map of query patterns to pages + * @param array $cursors Optional list of valid cursors as [[time, hash], ...] + */ +function session(array $queryMap, array $cursors = null) { + // If cursors not explicitly provided, extract from queryMap pages + if ($cursors === null) { + $cursors = []; + foreach ($queryMap as $page) { + if ($page instanceof \Iterator) { + // Clone and iterate to extract cursors + $page->rewind(); + while ($page->valid()) { + $row = $page->current(); + $time = $row['time'] instanceof MockCassandraValue ? $row['time']->value() : $row['time']; + $cursors[] = [$time, $row['hash']]; + $page->next(); + } + $page->rewind(); + } + } + } + + return new class($queryMap, $cursors) { + private $map; + private $cursors; + public function __construct(array $map, array $cursors) { + $this->map = $map; + $this->cursors = $cursors; + } + public function execute($statement, $options) { + $query = (string)$statement; + + // Handle cursor validation queries + if (strpos($query, 'time = ?') !== false && strpos($query, 'hash = ?') !== false) { + $args = $options['arguments'] ?? []; + if (count($args) >= 3) { + $time = $args[1]; + $hash = $args[2]; + foreach ($this->cursors as $cursor) { + if ($cursor[0] === $time && $cursor[1] === $hash) { + return page([['hash' => $hash]]); + } + } + } + return page([]); + } + + // Handle regular queries + foreach ($this->map as $needle => $page) { + if (strpos($query, $needle) !== false) { + // Reset iterator for reuse + if ($page instanceof \Iterator) { + $page->rewind(); + } + return $page; + } + } + return page([]); + } + }; +} + +class MockStatement { + private $query; + public function __construct($query) { $this->query = $query; } + public function __toString(): string { return $this->query; } +} + +} // end global namespace + +// Namespace shim so Cassandra\SimpleStatement resolves without extension +namespace Cassandra { + class SimpleStatement extends \MockStatement { + public function __construct(string $query) { parent::__construct($query); } + } +} diff --git a/tests/bootstrap.php b/tests/bootstrap.php new file mode 100644 index 0000000..3d080a6 --- /dev/null +++ b/tests/bootstrap.php @@ -0,0 +1,32 @@ += 80000) { + error_reporting(error_reporting() & ~E_DEPRECATED & ~E_USER_DEPRECATED); + set_error_handler(function ($errno, $errstr, $errfile, $errline) { + if ($errno === E_DEPRECATED || $errno === E_USER_DEPRECATED) { + return true; // swallow deprecations from legacy test doubles + } + return false; // let PHPUnit handle others + }); +} + +// Define test constants - lower values for faster tests +if (!defined('TXS_MAX_QUERY_LIMIT')) { + define('TXS_MAX_QUERY_LIMIT', 5); +} +if (!defined('TXS_RECENT_MAX_BUFFER_TX_COUNT')) { + define('TXS_RECENT_MAX_BUFFER_TX_COUNT', 10); +} +if (!defined('TXS_CASSANDRA_QUERY_PAGE_SIZE')) { + define('TXS_CASSANDRA_QUERY_PAGE_SIZE', 2); +} +if (!defined('TXS_PENDING_CUTOFF_AGE')) { + define('TXS_PENDING_CUTOFF_AGE', 1000); +} +if (!defined('TXS_PENDING_CLOSURE_PAST_LOOKUP_LIMIT')) { + define('TXS_PENDING_CLOSURE_PAST_LOOKUP_LIMIT', 500); +} + +require_once __DIR__ . '/Mocks.php'; // provides Cassandra\\SimpleStatement shim +require_once __DIR__ . '/../trnslist.php'; diff --git a/tests/trnslistTest.php b/tests/trnslistTest.php new file mode 100644 index 0000000..ba4d7e2 --- /dev/null +++ b/tests/trnslistTest.php @@ -0,0 +1,275 @@ + page([ + tx('0xhash1', 1000), + tx('0xhash1', 950), // duplicate, older + tx('0xhash2', 900), + ]), + 'status = 1' => page([]), + ]); + + $result = get_transactions($session, '0xaddr1', 10, 0); + + $this->assertCount(2, $result); + $hashes = array_map(function ($r) { return json_decode($r, true)['hash']; }, $result); + $this->assertSame(['0xhash1', '0xhash2'], $hashes); + } + + public function testPendingCutoff() + { + $now = time(); + $session = session([ + 'status = 0' => page([]), + 'status = 1' => page([ + // Only include rows that would pass the real query time >= cutoff + tx('0xpending1', $now - 1800, 1), // within 1h + ]), + ]); + + $result = get_transactions($session, '0xaddr1', 10, 0); + + $this->assertCount(1, $result); + $tx = json_decode($result[0], true); + $this->assertSame('0xpending1', $tx['hash']); + } + + public function testDuplicateHashWithDifferentStatusShouldDedup() + { + // Status=1 (pending) arrives first, followed by status=0 (confirmed) of same hash. + $session = session([ + 'status = 1' => page([ + tx('0xdup', 1000, 1), + ]), + 'status = 0' => page([ + tx('0xdup', 900, 0), + ]), + ]); + + $result = get_transactions($session, '0xaddr1', 10, 0); + + // Expected behavior: only one tx per hash. + $this->assertCount(1, $result, 'Should not emit the same hash twice even if statuses differ'); + } + + public function testPendingReplacedByConfirmedWhenLimitIsOne() + { + // Status=1 (pending) arrives first, followed by status=0 (confirmed) of same hash. + // With limit=1, the code should still find and use the confirmed version. + $session = session([ + 'status = 1' => page([ + tx('0xdup', 1000, 1), + ]), + 'status = 0' => page([ + tx('0xdup', 900, 0), + ]), + ]); + + $result = get_transactions($session, '0xaddr1', 1, 0); + + $this->assertCount(1, $result); + $tx = json_decode($result[0], true); + $this->assertSame(0, $tx['status'], 'Pending should be replaced by confirmed version'); + } + + public function testPendingReplacedWhenConfirmedWithinLookupLimit() + { + // TXS_PENDING_CLOSURE_PAST_LOOKUP_LIMIT = 500 in tests + // Pending at 1000, confirmed at 501 (499s diff - within limit) + $session = session([ + 'status = 1' => page([ + tx('0xdup', 1000, 1), + ]), + 'status = 0' => page([ + tx('0xdup', 501, 0), // 1000 - 501 = 499 < 500, within limit + ]), + ]); + + $result = get_transactions($session, '0xaddr1', 1, 0); + + $this->assertCount(1, $result); + $tx = json_decode($result[0], true); + $this->assertSame(0, $tx['status'], 'Pending should be replaced when confirmed is within lookup limit'); + } + + public function testPendingStaysPendingWhenConfirmedBeyondLookupLimit() + { + // TXS_PENDING_CLOSURE_PAST_LOOKUP_LIMIT = 500 in tests + // Pending at 1000, confirmed at 500 (500s diff - at/beyond limit) + $session = session([ + 'status = 1' => page([ + tx('0xdup', 1000, 1), + ]), + 'status = 0' => page([ + tx('0xdup', 500, 0), // 1000 - 500 = 500, not < 500, beyond limit + ]), + ]); + + $result = get_transactions($session, '0xaddr1', 1, 0); + + $this->assertCount(1, $result); + $tx = json_decode($result[0], true); + $this->assertSame(1, $tx['status'], 'Pending should stay pending when confirmed is beyond lookup limit'); + } + + public function testMixedPendingConfirmedWithLookupLimitBoundaries() + { + // TXS_PENDING_CLOSURE_PAST_LOOKUP_LIMIT = 500 in tests + // TXS_PENDING_CUTOFF_AGE = 1000 in tests + // + // Request limit=3 to trigger closure mode after collecting 3 transactions. + // The closure mode will then look for confirmed versions of pending txs. + // + // Timeline (time DESC): + // - 0xreplaced: pending@1000, confirmed@501 (499s diff, within limit) -> expect status=0 + // - 0xkept: pending@950, confirmed@450 (500s diff, beyond limit) -> expect status=1 + // - 0xpending_only: pending@900, no confirmed -> expect status=1 + // - 0xconfirmed_only: confirmed@850, no pending -> not in result (limit=3) + // - 0xold_confirmed: confirmed@200, no pending -> not in result + // + $session = session([ + 'status = 1' => page([ + tx('0xreplaced', 1000, 1), + tx('0xkept', 950, 1), + tx('0xpending_only', 900, 1), + ]), + 'status = 0' => page([ + tx('0xconfirmed_only', 850, 0), + tx('0xreplaced', 501, 0), // 1000 - 501 = 499 < 500, within limit + tx('0xkept', 450, 0), // 950 - 450 = 500, not < 500, beyond limit + tx('0xold_confirmed', 200, 0), + ]), + ]); + + // Request only 3 to trigger closure mode + $result = get_transactions($session, '0xaddr1', 3, 0); + + $this->assertCount(3, $result); + + // Parse results into a hash -> status map + $byHash = []; + foreach ($result as $r) { + $tx = json_decode($r, true); + $byHash[$tx['hash']] = $tx['status']; + } + + // Verify each transaction's expected status + $this->assertSame(0, $byHash['0xreplaced'], '0xreplaced: pending should be replaced (499s < 500 limit)'); + $this->assertSame(1, $byHash['0xkept'], '0xkept: pending should stay pending (500s >= 500 limit)'); + $this->assertSame(1, $byHash['0xpending_only'], '0xpending_only: pending with no confirmed stays pending'); + + // Verify order (by time DESC) + $hashes = array_map(function ($r) { return json_decode($r, true)['hash']; }, $result); + $this->assertSame(['0xreplaced', '0xkept', '0xpending_only'], $hashes); + } + + public function testPaginationOffsetAndLimit() + { + // Four transactions, request 2 with offset 1 -> expect hash2, hash3 + $session = session([ + 'status = 0' => page([ + tx('0xhash1', 1000), + tx('0xhash2', 900), + tx('0xhash3', 800), + tx('0xhash4', 700), + ]), + 'status = 1' => page([]), + ]); + + $result = get_transactions($session, '0xaddr1', 2, 1); + + $hashes = array_map(function ($r) { return json_decode($r, true)['hash']; }, $result); + $this->assertSame(['0xhash2', '0xhash3'], $hashes); + } + + public function testPagingAcrossPagesMaintainsOrder() + { + // Simulate Cassandra paging: page1 then page2 + $page2 = page([ + tx('0xhash3', 800), + tx('0xhash4', 700), + ]); + $page1 = page([ + tx('0xhash1', 1000), + tx('0xhash2', 900), + ], $page2); + + $session = session([ + 'status = 0' => $page1, + 'status = 1' => page([]), + ]); + + $result = get_transactions($session, '0xaddr1', 10, 0); + $hashes = array_map(function ($r) { return json_decode($r, true)['hash']; }, $result); + + $this->assertSame(['0xhash1', '0xhash2', '0xhash3', '0xhash4'], $hashes); + } + + public function testDirectionAndReceivedAtFormatting() + { + // direction=0 should flip add1/add2, and null receivedat should fallback to time + $session = session([ + 'status = 0' => page([ + [ + 'hash' => '0xdir', + 'time' => 1234, + 'status' => 0, + 'direction' => 0, + 'add1' => 'A', + 'add2' => 'B', + 'receivedat' => null, + ], + ]), + 'status = 1' => page([]), + ]); + + $result = get_transactions($session, '0xaddr1', 1, 0); + $tx = json_decode($result[0], true); + + $this->assertSame('B', $tx['addr_from']); + $this->assertSame('A', $tx['addr_to']); + $this->assertSame(1234, $tx['receivedat'], 'receivedat should default to time when null'); + } + + public function testOnlyPendingTransactionReturned() + { + $session = session([ + 'status = 0' => page([]), + 'status = 1' => page([ + tx('0xpending', 1000, 1), + ]), + ]); + + $result = get_transactions($session, '0xaddr1', 5, 0); + $this->assertCount(1, $result); + $tx = json_decode($result[0], true); + $this->assertSame(1, $tx['status']); + $this->assertSame('0xpending', $tx['hash']); + } + + public function testPendingAndConfirmedDifferentHashes() + { + $session = session([ + 'status = 0' => page([ + tx('0xconfirmed', 900, 0), + ]), + 'status = 1' => page([ + tx('0xpending', 1000, 1), + ]), + ]); + + $result = get_transactions($session, '0xaddr1', 5, 0); + $this->assertCount(2, $result); + $hashes = array_map(function ($r) { return json_decode($r, true)['hash']; }, $result); + $this->assertSame(['0xpending', '0xconfirmed'], $hashes); + } + +} diff --git a/tests/txsV1Test.php b/tests/txsV1Test.php new file mode 100644 index 0000000..75a9f53 --- /dev/null +++ b/tests/txsV1Test.php @@ -0,0 +1,620 @@ + '0x1234567890123456789012345678901234567890', + 'n' => '5', + ]); + $this->assertArrayNotHasKey('error', $result); + $this->assertSame('0x1234567890123456789012345678901234567890', $result['addr']); + $this->assertSame(5, $result['n']); + } + + public function testEntrypointInvalidAddressTooShort() + { + $result = txs_entrypoint([ + 'addr' => '0x1234', + 'n' => '5', + ]); + $this->assertSame(['error' => 'Invalid address'], $result); + } + + public function testEntrypointMissingAddress() + { + $result = txs_entrypoint(['n' => '5']); + $this->assertSame(['error' => 'Invalid address'], $result); + } + + public function testEntrypointMissingN() + { + $result = txs_entrypoint([ + 'addr' => '0x1234567890123456789012345678901234567890', + ]); + $this->assertSame(['error' => 'n is required and must be a non-zero integer'], $result); + } + + public function testEntrypointZeroN() + { + $result = txs_entrypoint([ + 'addr' => '0x1234567890123456789012345678901234567890', + 'n' => '0', + ]); + $this->assertSame(['error' => 'n is required and must be a non-zero integer'], $result); + } + + public function testEntrypointNonNumericN() + { + $result = txs_entrypoint([ + 'addr' => '0x1234567890123456789012345678901234567890', + 'n' => 'abc', + ]); + $this->assertSame(['error' => 'n is required and must be a non-zero integer'], $result); + } + + public function testEntrypointNegativeNWithoutCursor() + { + $result = txs_entrypoint([ + 'addr' => '0x1234567890123456789012345678901234567890', + 'n' => '-5', + ]); + $this->assertSame(['error' => 'n must be positive when no cursor is provided'], $result); + } + + public function testEntrypointNegativeNWithCursor() + { + $result = txs_entrypoint([ + 'addr' => '0x1234567890123456789012345678901234567890', + 'n' => '-5', + 'cursor_time' => '1000', + 'cursor_hash' => '0xabc', + ]); + $this->assertArrayNotHasKey('error', $result); + $this->assertSame(-5, $result['n']); + $this->assertSame(1000, $result['cursor_time']); + $this->assertSame('0xabc', $result['cursor_hash']); + } + + public function testEntrypointCursorTimeWithoutHash() + { + $result = txs_entrypoint([ + 'addr' => '0x1234567890123456789012345678901234567890', + 'n' => '5', + 'cursor_time' => '1000', + ]); + $this->assertSame(['error' => 'Both cursor_time and cursor_hash must be provided together'], $result); + } + + public function testEntrypointCursorHashWithoutTime() + { + $result = txs_entrypoint([ + 'addr' => '0x1234567890123456789012345678901234567890', + 'n' => '5', + 'cursor_hash' => '0xabc', + ]); + $this->assertSame(['error' => 'Both cursor_time and cursor_hash must be provided together'], $result); + } + + public function testEntrypointNonNumericCursorTime() + { + $result = txs_entrypoint([ + 'addr' => '0x1234567890123456789012345678901234567890', + 'n' => '5', + 'cursor_time' => 'abc', + 'cursor_hash' => '0xabc', + ]); + $this->assertSame(['error' => 'cursor_time must be numeric'], $result); + } + + // ========== Transaction retrieval tests ========== + + public function testBasicNReturnsLastNTransactions() + { + $session = session([ + 'status = 0' => page([ + tx('0xhash1', 1000), + tx('0xhash2', 900), + tx('0xhash3', 800), + tx('0xhash4', 700), + ]), + 'status = 1' => page([]), + ]); + + $result = txs_get($session, '0xaddr1', 3); + + $this->assertCount(3, $result); + $hashes = array_column($result, 'hash'); + $this->assertSame(['0xhash1', '0xhash2', '0xhash3'], $hashes); + } + + public function testNLargerThanAvailable() + { + $session = session([ + 'status = 0' => page([ + tx('0xhash1', 1000), + tx('0xhash2', 900), + ]), + 'status = 1' => page([]), + ]); + + $result = txs_get($session, '0xaddr1', 5); + + $this->assertCount(2, $result); + $hashes = array_column($result, 'hash'); + $this->assertSame(['0xhash1', '0xhash2'], $hashes); + } + + public function testCursorWithNegativeNReturnsTransactionsBeforeCursor() + { + $session = session([ + 'status = 0' => page([ + tx('0xhash1', 1000), + tx('0xhash2', 900), + tx('0xhash3', 800), + tx('0xhash4', 700), + ]), + 'status = 1' => page([]), + ]); + + $result = txs_get($session, '0xaddr1', -5, 800, '0xhash3'); + + $hashes = array_column($result, 'hash'); + $this->assertSame(['0xhash1', '0xhash2'], $hashes); + } + + public function testCursorWithPositiveNReturnsTransactionsAfterCursor() + { + $session = session([ + 'status = 0' => page([ + tx('0xhash1', 1000), + tx('0xhash2', 900), + tx('0xhash3', 800), + tx('0xhash4', 700), + ]), + 'status = 1' => page([]), + ]); + + $result = txs_get($session, '0xaddr1', 5, 900, '0xhash2'); + + $hashes = array_column($result, 'hash'); + $this->assertSame(['0xhash3', '0xhash4'], $hashes); + } + + public function testCursorWithPositiveNRespectsCount() + { + $session = session([ + 'status = 0' => page([ + tx('0xhash1', 1000), + tx('0xhash2', 900), + tx('0xhash3', 800), + tx('0xhash4', 700), + tx('0xhash5', 600), + ]), + 'status = 1' => page([]), + ]); + + $result = txs_get($session, '0xaddr1', 2, 900, '0xhash2'); + + $hashes = array_column($result, 'hash'); + $this->assertSame(['0xhash3', '0xhash4'], $hashes); + } + + public function testCursorWithNegativeNRespectsCount() + { + $session = session([ + 'status = 0' => page([ + tx('0xhash1', 1000), + tx('0xhash2', 900), + tx('0xhash3', 800), + tx('0xhash4', 700), + ]), + 'status = 1' => page([]), + ]); + + $result = txs_get($session, '0xaddr1', -2, 700, '0xhash4'); + + $this->assertCount(2, $result); + $hashes = array_column($result, 'hash'); + $this->assertSame(['0xhash2', '0xhash3'], $hashes); + } + + public function testCursorAtFirstTransactionWithNegativeNReturnsEmpty() + { + $session = session([ + 'status = 0' => page([ + tx('0xhash1', 1000), + tx('0xhash2', 900), + ]), + 'status = 1' => page([]), + ]); + + $result = txs_get($session, '0xaddr1', -5, 1000, '0xhash1'); + + $this->assertCount(0, $result); + } + + public function testCursorAtLastTransactionWithPositiveNReturnsEmpty() + { + $session = session([ + 'status = 0' => page([ + tx('0xhash1', 1000), + tx('0xhash2', 900), + ]), + 'status = 1' => page([]), + ]); + + $result = txs_get($session, '0xaddr1', 5, 900, '0xhash2'); + + $this->assertCount(0, $result); + } + + public function testDeduplicationWithCursor() + { + $session = session([ + 'status = 0' => page([ + tx('0xhash1', 1000), + tx('0xhash1', 950), // duplicate, older + tx('0xhash2', 900), + tx('0xhash3', 800), + ]), + 'status = 1' => page([]), + ]); + + $result = txs_get($session, '0xaddr1', -5, 800, '0xhash3'); + + $this->assertCount(2, $result); + $hashes = array_column($result, 'hash'); + $this->assertSame(['0xhash1', '0xhash2'], $hashes); + } + + public function testPendingTransactionsIncluded() + { + $now = time(); + $session = session([ + 'status = 0' => page([ + tx('0xconfirmed', $now - 500, 0), + ]), + 'status = 1' => page([ + tx('0xpending', $now - 100, 1), + ]), + ]); + + $result = txs_get($session, '0xaddr1', 5); + + $this->assertCount(2, $result); + $hashes = array_column($result, 'hash'); + $this->assertSame(['0xpending', '0xconfirmed'], $hashes); + } + + public function testPendingReplacedByConfirmed() + { + $session = session([ + 'status = 1' => page([ + tx('0xdup', 1000, 1), + ]), + 'status = 0' => page([ + tx('0xdup', 900, 0), + ]), + ]); + + $result = txs_get($session, '0xaddr1', 5); + + $this->assertCount(1, $result); + $this->assertSame(0, $result[0]['status']); + } + + /** + * Test realistic scenario: pending (status=1) arrives first with later timestamp, + * confirmed (status=0) arrives later but has earlier timestamp. + * When confirmed is within TXS_PENDING_CLOSURE_PAST_LOOKUP_LIMIT, pending should be deduplicated. + */ + public function testPendingDeduplicatedWhenConfirmedWithinLookupLimit() + { + // Request n=2 transactions + // Pending tx at time 1000, confirmed version at time 700 (300s diff, within 500s limit) + // Other tx at time 900 to fill the page + $session = session([ + 'status = 1' => page([ + tx('0xdup', 1000, 1), // pending - will be in page + tx('0xother', 900, 1), // another pending + ]), + 'status = 0' => page([ + tx('0xdup', 700, 0), // confirmed - 300s earlier, within lookup limit + ]), + ]); + + $result = txs_get($session, '0xaddr1', 2); + + // The pending 0xdup should be replaced by its confirmed version + $hashes = array_column($result, 'hash'); + $this->assertCount(2, $result); + $this->assertContains('0xdup', $hashes); + $this->assertContains('0xother', $hashes); + + // Find the 0xdup transaction and verify it's the confirmed version + $dupTx = array_filter($result, function($tx) { return $tx['hash'] === '0xdup'; }); + $dupTx = array_values($dupTx)[0]; + $this->assertSame(0, $dupTx['status'], 'Pending should be replaced by confirmed when within lookup limit'); + } + + /** + * Test that pending stays pending when confirmed version is beyond the lookup limit. + */ + public function testPendingStaysPendingWhenConfirmedBeyondLookupLimit() + { + // Pending tx at time 1000, confirmed version at time 400 (600s diff, beyond 500s limit) + $session = session([ + 'status = 1' => page([ + tx('0xdup', 1000, 1), // pending + tx('0xother', 900, 1), // another pending + ]), + 'status = 0' => page([ + tx('0xdup', 500, 0), // confirmed - 500s earlier, beyond lookup limit + tx('0xother', 401, 0), // confirmed - 499s earlier, in lookup limit + ]), + ]); + + $result = txs_get($session, '0xaddr1', 2); + + $hashes = array_column($result, 'hash'); + $this->assertCount(2, $result); + $this->assertContains('0xdup', $hashes); + $this->assertContains('0xother', $hashes); + + // Find the 0xdup transaction - should still be pending since confirmed is too far back + $dupTx = array_filter($result, function($tx) { return $tx['hash'] === '0xdup'; }); + $dupTx = array_values($dupTx)[0]; + $this->assertSame(1, $dupTx['status'], 'Pending should stay pending when confirmed is beyond lookup limit'); + // Find the 0xother transaction - should not be pending since confirmed is not too far back + $otherTx = array_filter($result, function($tx) { return $tx['hash'] === '0xother'; }); + $otherTx = array_values($otherTx)[0]; + $this->assertSame(0, $otherTx['status'], 'Pending should not be pending when confirmed is before lookup limit'); + } + + /** + * Test that in "before" direction, pending stays pending when confirmed version is beyond the lookup limit. + * Same as testPendingStaysPendingWhenConfirmedBeyondLookupLimit but with cursor and negative n. + */ + public function testPendingStaysPendingWhenConfirmedBeyondLookupLimitInBeforeDirection() + { + // Cursor at time 600 + // Pending tx at time 1000, confirmed at time 500 (500s diff, beyond 500s limit) + // Pending tx at time 900, confirmed at time 401 (499s diff, within 500s limit) + // Both confirmed versions are older than cursor, so code must look past cursor + $session = session([ + 'status = 1' => page([ + tx('0xdup', 1000, 1), // pending + tx('0xother', 900, 1), // another pending + ]), + 'status = 0' => page([ + tx('0xcursor', 600, 0), // cursor transaction + tx('0xdup', 500, 0), // confirmed - 500s earlier (1000-500), beyond lookup limit + tx('0xother', 401, 0), // confirmed - 499s earlier (900-401), within lookup limit + ]), + ]); + + // Request transactions before cursor (negative n) + $result = txs_get($session, '0xaddr1', -2, 600, '0xcursor'); + + $hashes = array_column($result, 'hash'); + $this->assertCount(2, $result); + $this->assertContains('0xdup', $hashes); + $this->assertContains('0xother', $hashes); + + // Find the 0xdup transaction - should still be pending since confirmed is beyond lookup limit + $dupTx = array_filter($result, function($tx) { return $tx['hash'] === '0xdup'; }); + $dupTx = array_values($dupTx)[0]; + $this->assertSame(1, $dupTx['status'], 'Pending should stay pending when confirmed is beyond lookup limit'); + + // Find the 0xother transaction - should be confirmed since confirmed is within lookup limit + $otherTx = array_filter($result, function($tx) { return $tx['hash'] === '0xother'; }); + $otherTx = array_values($otherTx)[0]; + $this->assertSame(0, $otherTx['status'], 'Pending should be replaced by confirmed when within lookup limit'); + } + + public function testCursorWithSameTimeUsesHashAsTiebreaker() + { + $session = session([ + 'status = 0' => page([ + tx('0xhashB', 1000), + tx('0xhashA', 1000), + tx('0xhash2', 900), + ]), + 'status = 1' => page([]), + ]); + + $result = txs_get($session, '0xaddr1', 5, 1000, '0xhashA'); + + $hashes = array_column($result, 'hash'); + $this->assertSame(['0xhash2'], $hashes); + } + + public function testDirectionFormatting() + { + $session = session([ + 'status = 0' => page([ + [ + 'hash' => '0xdir', + 'time' => 1234, + 'status' => 0, + 'direction' => 0, + 'add1' => 'A', + 'add2' => 'B', + 'receivedat' => null, + ], + ]), + 'status = 1' => page([]), + ]); + + $result = txs_get($session, '0xaddr1', 1); + $tx = $result[0]; + + $this->assertSame('B', $tx['addr_from']); + $this->assertSame('A', $tx['addr_to']); + $this->assertSame(1234, $tx['receivedat'], 'receivedat should default to time when null'); + } + + public function testPagingAcrossMultiplePages() + { + $page2 = page([ + tx('0xhash3', 800), + tx('0xhash4', 700), + ]); + $page1 = page([ + tx('0xhash1', 1000), + tx('0xhash2', 900), + ], $page2); + + $session = session([ + 'status = 0' => $page1, + 'status = 1' => page([]), + ]); + + $result = txs_get($session, '0xaddr1', 5); + $hashes = array_column($result, 'hash'); + + $this->assertSame(['0xhash1', '0xhash2', '0xhash3', '0xhash4'], $hashes); + } + + public function testCursorNavigationWithMultiplePages() + { + $page2 = page([ + tx('0xhash3', 800), + tx('0xhash4', 700), + ]); + $page1 = page([ + tx('0xhash1', 1000), + tx('0xhash2', 900), + ], $page2); + + $session = session([ + 'status = 0' => $page1, + 'status = 1' => page([]), + ]); + + $result = txs_get($session, '0xaddr1', 5, 900, '0xhash2'); + $hashes = array_column($result, 'hash'); + + $this->assertSame(['0xhash3', '0xhash4'], $hashes); + } + + public function testEmptyResult() + { + $session = session([ + 'status = 0' => page([]), + 'status = 1' => page([]), + ]); + + $result = txs_get($session, '0xaddr1', 5); + + $this->assertCount(0, $result); + } + + public function testCursorOnNonExistentTransactionReturnsError() + { + $session = session([ + 'status = 0' => page([ + tx('0xhash1', 1000), + tx('0xhash2', 900), + tx('0xhash3', 800), + ]), + 'status = 1' => page([]), + ]); + + $result = txs_get($session, '0xaddr1', 5, 850, '0xnonexistent'); + + $this->assertSame(['error' => 'Cursor not found'], $result); + } + + public function testCursorExistsFunction() + { + $session = session([ + 'status = 0' => page([ + tx('0xhash1', 1000), + tx('0xhash2', 900), + ]), + 'status = 1' => page([]), + ]); + + $this->assertTrue(txs_cursor_exists($session, '0xaddr1', 1000, '0xhash1')); + $this->assertTrue(txs_cursor_exists($session, '0xaddr1', 900, '0xhash2')); + $this->assertFalse(txs_cursor_exists($session, '0xaddr1', 999, '0xhash1')); + $this->assertFalse(txs_cursor_exists($session, '0xaddr1', 1000, '0xnonexistent')); + } + + // ========== Query limit tests ========== + + public function testNExceedsMaxLimitReturnsError() + { + $session = session([ + 'status = 0' => page([]), + 'status = 1' => page([]), + ]); + + $result = txs_get($session, '0xaddr1', TXS_MAX_QUERY_LIMIT + 1); + $this->assertSame(['error' => '|n| exceeds maximum limit of ' . TXS_MAX_QUERY_LIMIT], $result); + } + + public function testNegativeNExceedsMaxLimitReturnsError() + { + $session = session([ + 'status = 0' => page([tx('0xhash1', 1000)]), + 'status = 1' => page([]), + ]); + + $result = txs_get($session, '0xaddr1', -(TXS_MAX_QUERY_LIMIT + 1), 1000, '0xhash1'); + $this->assertSame(['error' => '|n| exceeds maximum limit of ' . TXS_MAX_QUERY_LIMIT], $result); + } + + public function testNAtMaxLimitIsValid() + { + $session = session([ + 'status = 0' => page([tx('0xhash1', 1000)]), + 'status = 1' => page([]), + ]); + + $result = txs_get($session, '0xaddr1', TXS_MAX_QUERY_LIMIT); + $this->assertArrayNotHasKey('error', $result); + } + + // ========== Memory/buffer tests ========== + + public function testBeforeDirectionWithManyTransactionsCropsBuffer() + { + // Create more transactions than TXS_RECENT_MAX_BUFFER_TX_COUNT (10) + $txCount = 20; + $txs = []; + for ($i = 0; $i < $txCount; $i++) { + $txs[] = tx('0xhash' . $i, 10000 - $i); + } + + $session = session([ + 'status = 0' => page($txs), + 'status = 1' => page([]), + ]); + + // Request 3 transactions before the last one (cursor at oldest tx) + $cursorTime = 10000 - ($txCount - 1); + $cursorHash = '0xhash' . ($txCount - 1); + + $result = txs_get($session, '0xaddr1', -3, $cursorTime, $cursorHash); + + // Should return 3 transactions closest to cursor + $this->assertCount(3, $result); + + // Verify correct transactions returned (closest to cursor) + $hashes = array_column($result, 'hash'); + $this->assertSame('0xhash16', $hashes[0]); // 3 before hash19 + $this->assertSame('0xhash18', $hashes[2]); + } +} diff --git a/trnslist.php b/trnslist.php index dd194c3..f17910e 100644 --- a/trnslist.php +++ b/trnslist.php @@ -1,20 +1,11 @@ isLastPage()) break; - $page = $page->nextPage(); - } -} +require_once __DIR__ . '/includes/cassandra.inc'; function get_transactions($session, $addr, $limit, $offset) { $needed = $offset + $limit; - $page_size = 50; - $pending_cutoff = time() - 3600; + $page_size = TXS_CASSANDRA_QUERY_PAGE_SIZE; + $pending_cutoff = time() - TXS_PENDING_CUTOFF_AGE; $iters = [ paged_rows($session->execute( @@ -33,11 +24,21 @@ function get_transactions($session, $addr, $limit, $offset) { }); $seen = []; + $seen_idx = []; $txs = []; $txs_count = 0; + $enough_but_remaining_seen = false; // Merge all streams by time DESC, deduplicating while ($iters) { + // Check if we have enough but still have pending transactions to close + if ($txs_count >= $needed) { + if (empty($seen_idx)) { + break; + } + $enough_but_remaining_seen = true; + } + // Find iterator with highest time (most recent), hash as tiebreaker $best_key = null; $best_rank = null; @@ -58,15 +59,56 @@ function get_transactions($session, $addr, $limit, $offset) { unset($iters[$best_key]); } + if ($enough_but_remaining_seen) { + // Allow to look for more transactions in the past to + // close possible pending transactions + $last_time = $row['time']->value(); + $found = false; + foreach ($seen_idx as $h => $sidx) { + if ($txs[$sidx]['time']->value() - $last_time < TXS_PENDING_CLOSURE_PAST_LOOKUP_LIMIT) { + $found = true; + break; + } + unset($seen_idx[$h]); // too old + } + if (!$found) break; + $enough_but_remaining_seen = false; + } + // Deduplicate by hash $hash = $row['hash']; - if (isset($seen[$hash]) && $seen[$hash] <= $row['status']) { + if (isset($seen[$hash])) { + // Status 0 is final. + if ($seen[$hash] == 0) { + continue; + } + // Replace previously stored row with the lower-status one + if (isset($seen_idx[$hash])) { + $txs[$seen_idx[$hash]] = $row; + // Once we keep a status 0 version, we no longer need an index tracked. + if ($row['status'] == 0) { + unset($seen_idx[$hash]); + } + } + + if ($txs_count >= $needed) + continue; + + $seen[$hash] = $row['status']; continue; } + + if ($txs_count >= $needed) + continue; + $seen[$hash] = $row['status']; + // Only track index when status > 0; status 0 is final and won't be replaced. + if ($row['status'] > 0) { + $seen_idx[$hash] = $txs_count; + } $txs[] = $row; - if (++$txs_count >= $needed) break; + $txs_count++; } // Apply pagination @@ -95,8 +137,24 @@ function get_transactions($session, $addr, $limit, $offset) { return $output; } +// @codeCoverageIgnoreStart // Main entry point - only runs when executed directly if (realpath($_SERVER['SCRIPT_FILENAME']) === realpath(__FILE__)) { + /** + * Page size for Cassandra queries. + */ + define('TXS_CASSANDRA_QUERY_PAGE_SIZE', 50); + + /** + * Maximum age in seconds for pending transactions to be included. + */ + define('TXS_PENDING_CUTOFF_AGE', 3600); + + /** + * How far back in time (seconds) to look for confirmed versions of pending transactions. + */ + define('TXS_PENDING_CLOSURE_PAST_LOOKUP_LIMIT', 24 * 3600); + header('Access-Control-Allow-Origin: *'); // Validate and parse input @@ -116,4 +174,5 @@ function get_transactions($session, $addr, $limit, $offset) { echo json_encode(get_transactions($session, $addr, $limit, $offset)); } +// @codeCoverageIgnoreEnd ?> diff --git a/v1/txs.php b/v1/txs.php new file mode 100644 index 0000000..e04edd5 --- /dev/null +++ b/v1/txs.php @@ -0,0 +1,375 @@ + string] or ['addr' => string, 'n' => int, 'cursor_time' => int|null, 'cursor_hash' => string|null] + */ +function txs_entrypoint($params) { + // Validate address + if (strlen($params['addr'] ?? '') != 42) { + return ['error' => 'Invalid address']; + } + $addr = strtolower(preg_replace("/[^a-zA-Z0-9]+/", "", $params['addr'])); + + // Validate n parameter + if (!isset($params['n']) || !is_numeric($params['n']) || (int)$params['n'] == 0) { + return ['error' => 'n is required and must be a non-zero integer']; + } + $n = (int)$params['n']; + + // Parse optional cursor parameters + $cursor_time = null; + $cursor_hash = null; + + $has_time = isset($params['cursor_time']); + $has_hash = isset($params['cursor_hash']); + + if ($has_time && $has_hash) { + if (!is_numeric($params['cursor_time'])) { + return ['error' => 'cursor_time must be numeric']; + } + $cursor_time = (int)$params['cursor_time']; + $cursor_hash = $params['cursor_hash']; + } elseif ($has_time || $has_hash) { + return ['error' => 'Both cursor_time and cursor_hash must be provided together']; + } else { + // No cursor - n must be positive + if ($n < 0) { + return ['error' => 'n must be positive when no cursor is provided']; + } + } + + return [ + 'addr' => $addr, + 'n' => $n, + 'cursor_time' => $cursor_time, + 'cursor_hash' => $cursor_hash, + ]; +} + +/** + * Check if a cursor (time, hash) exists in the database + * + * @param object $session Cassandra session + * @param string $addr Wallet address + * @param int $time Transaction timestamp + * @param string $hash Transaction hash + * @return bool True if cursor exists + */ +function txs_cursor_exists($session, $addr, $time, $hash) { + // Try status = 0 first (most likely), then status = 1 + foreach ([0, 1] as $status) { + $result = $session->execute( + new Cassandra\SimpleStatement( + "SELECT hash FROM testtransactions WHERE add1 = ? AND time = ? AND status = $status AND hash = ?" + ), + ['arguments' => [$addr, $time, $hash]] + ); + foreach ($result as $row) { + return true; + } + } + return false; +} + +/** + * Get transactions with cursor-based pagination + * + * @param object $session Cassandra session + * @param string $addr Wallet address + * @param int $n Signed integer: count (magnitude) and direction (sign when cursor provided) + * @param int|null $cursor_time Timestamp of cursor transaction (optional) + * @param string|null $cursor_hash Hash of cursor transaction (optional) + * @return array Array of transaction rows, or ['error' => string] on failure + */ +function txs_get($session, $addr, $n, $cursor_time = null, $cursor_hash = null) { + if (abs($n) > TXS_MAX_QUERY_LIMIT) { + return ['error' => '|n| exceeds maximum limit of ' . TXS_MAX_QUERY_LIMIT]; + } + + $pending_cutoff = time() - TXS_PENDING_CUTOFF_AGE; + + // Determine direction and count + $count = abs($n); + $has_cursor = $cursor_time !== null && $cursor_hash !== null; + $is_direction_after = $n >= 0; + + if ($has_cursor) { + // Validate cursor exists + if (!txs_cursor_exists($session, $addr, $cursor_time, $cursor_hash)) { + return ['error' => 'Cursor not found']; + } + } + + $iters = [ + paged_rows($session->execute( + new Cassandra\SimpleStatement("SELECT * FROM testtransactions WHERE add1 = ? AND status = 0 ORDER BY time DESC"), + ['arguments' => [$addr], 'page_size' => TXS_CASSANDRA_QUERY_PAGE_SIZE] + )), + paged_rows($session->execute( + new Cassandra\SimpleStatement("SELECT * FROM testtransactions WHERE add1 = ? AND status = 1 AND time>=". $pending_cutoff ." ORDER BY time DESC"), + ['arguments' => [$addr], 'page_size' => TXS_CASSANDRA_QUERY_PAGE_SIZE] + )), + ]; + + // Remove exhausted iterators + $iters = array_filter($iters, function ($it) { + return $it->valid(); + }); + + $seen = []; // all hash seen yet (required for deduplication) + $seen_idx = []; // keep index of pending txs to close + $txs = []; + $txs_count = 0; + $cursor_rank = $cursor_time !== null ? [$cursor_time, $cursor_hash] : null; + $enough_but_remaining_seen = false; + + // For 'before' direction: collect all txs more recent than cursor, then take last N + // For 'after' direction: skip until past cursor (or start if no cursor), then collect N + + // Merge all streams by time DESC, deduplicating + while ($iters) { + // For 'after': stop when we have enough + if ($is_direction_after && $txs_count >= $count) { + if (empty($seen_idx)) { + break; + } + $enough_but_remaining_seen = true; + } + + // Find iterator with highest time (most recent), hash as tiebreaker + $best_key = null; + $best_rank = null; + foreach ($iters as $key => $iter) { + $row = $iter->current(); + $rank = [$row['time']->value(), $row['hash']]; + if ($best_rank === null || $rank > $best_rank) { + $best_key = $key; + $best_rank = $rank; + } + } + + // Get row and advance iterator + $best_iter = $iters[$best_key]; + $row = $best_iter->current(); + $best_iter->next(); + if (!$best_iter->valid()) { + unset($iters[$best_key]); + } + + if ($enough_but_remaining_seen) { + // Allow to look for more transactions in the past to + // close possible pending transactions + $last_time = $row['time']->value(); + $found = false; + foreach($seen_idx as $h => $sidx) { + if ($txs[$sidx]['time']->value() - $last_time < TXS_PENDING_CLOSURE_PAST_LOOKUP_LIMIT) { + $found = true; + break; + } + unset($seen_idx[$h]); // too old + } + if (!$found) break; + $enough_but_remaining_seen = false; + } + + $current_rank = [$row['time']->value(), $row['hash']]; + + // Handle cursor-based filtering + if ($has_cursor) { + if ($is_direction_after) { + // We want txs OLDER than cursor + // Skip until we pass the cursor + if ($current_rank >= $cursor_rank) { + continue; + } + } else { + // We want txs MORE RECENT than cursor + // When we reach the cursor, continue looking for pending closures + if ($current_rank <= $cursor_rank) { + if (empty($seen_idx)) { + break; + } + $enough_but_remaining_seen = true; + } + } + } + + // Deduplicate by hash + $hash = $row['hash']; + if (isset($seen[$hash])) { + // Status 0 is final. + if ($seen[$hash] == 0) { + continue; + } + // Replace previously stored row with the lower-status one + if (isset($seen_idx[$hash])) { + $txs[$seen_idx[$hash]] = $row; + } + + if ($enough_but_remaining_seen) + continue; + + $seen[$hash] = $row['status']; + // Once we keep a status 0 version, we no longer need an index tracked. + if ($row['status'] == 0) { + unset($seen_idx[$hash]); + } + continue; + } + + if ($enough_but_remaining_seen) + continue; + + $seen[$hash] = $row['status']; + // Only track index when status > 0; status 0 is final and won't be replaced. + if ($row['status'] > 0) { + $seen_idx[$hash] = $txs_count; + } + + $txs[] = $row; + $txs_count++; + + // For 'before' direction: crop buffer when it exceeds max to avoid memory issues + if ($has_cursor && !$is_direction_after && $txs_count > TXS_RECENT_MAX_BUFFER_TX_COUNT) { + $txs = array_slice($txs, -$count, null, true); // preserve indexes + $txs_count = count($txs); + } + } + + // Apply count limit + if ($has_cursor && !$is_direction_after) { + // For 'before': take the last N (closest to cursor) + $txs = array_slice($txs, -$count); + } else { + // For 'after' and no-cursor: take first N + $txs = array_slice($txs, 0, $count); + } + + // Format output + $output = []; + foreach ($txs as $row) { + $tx = []; + $tx['hash'] = $row['hash']; + $tx['status'] = $row['status']; + $tx['time'] = $row['time']->value(); + $tx['receivedat'] = !is_null($row['receivedat']) + ? $row['receivedat']->value() + : $tx['time']; + + if ($row['direction'] == 1) { + $tx['addr_from'] = $row['add1']; + $tx['addr_to'] = $row['add2']; + } else { + $tx['addr_from'] = $row['add2']; + $tx['addr_to'] = $row['add1']; + } + + $tx['direction'] = $row['direction']; + $tx['add1'] = $row['add1']; + $tx['add2'] = $row['add2']; + + $output[] = $tx; + } + + return $output; +} + +// @codeCoverageIgnoreStart +// Main entry point - only runs when executed directly +if (realpath($_SERVER['SCRIPT_FILENAME']) === realpath(__FILE__)) { + /** + * Maximum number of transactions that can be requested in a single query. + */ + define('TXS_MAX_QUERY_LIMIT', 100); + + /** + * Maximum buffer size for "before" direction queries. + * Must be significantly higher than TXS_MAX_QUERY_LIMIT to avoid + */ + define('TXS_RECENT_MAX_BUFFER_TX_COUNT', 500); + + /** + * Page size for Cassandra queries. + */ + define('TXS_CASSANDRA_QUERY_PAGE_SIZE', 50); + + /** + * Maximum age in seconds for pending transactions to be included. + */ + define('TXS_PENDING_CUTOFF_AGE', 3600); + + /** + * How far back in time (seconds) to look for confirmed versions of pending transactions. + */ + define('TXS_PENDING_CLOSURE_PAST_LOOKUP_LIMIT', 24 * 3600); + + header('Access-Control-Allow-Origin: *'); + header('Content-Type: application/json'); + + // Feature negotiation (required - no legacy support) + $common_features = negotiate_features(TXS_SUPPORTED_FEATURES); + if ($common_features === null) { + exit; + } + + $result = txs_entrypoint($_GET); + + if (isset($result['error'])) { + http_response_code(400); + echo json_encode($result); + exit; + } + + // Connect to Cassandra + $cluster = Cassandra::cluster('127.0.0.1') + ->withCredentials("transactions_ro", "Public_transactions") + ->build(); + $session = $cluster->connect('comchain'); + + $txs = txs_get( + $session, + $result['addr'], + $result['n'], + $result['cursor_time'], + $result['cursor_hash'] + ); + + if (isset($txs['error'])) { + http_response_code(400); + echo json_encode($txs); + exit; + } + + echo json_encode($txs); +} +// @codeCoverageIgnoreEnd +?>