diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..75b4dcd --- /dev/null +++ b/.gitignore @@ -0,0 +1,146 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# pipenv +Pipfile.lock + +# poetry +poetry.lock + +# pdm +.pdm.toml + +# PEP 582 +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# Editor backup files +*~ +.*.swp +.*.swo + +# IDE +.vscode/ +.idea/ +*.iml diff --git a/CHANGELOG.md b/CHANGELOG.md index 1beaf38..176108b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,42 @@ This file should adhere to [Keep a Changelog](https://keepachangelog.com/en/1.1. This project should adhere to [Semantic Versioning](https://semver.org/spec/v2.0.0.html), though some earlier releases may be incompatible with the SemVer standard. +## [Unreleased] + +### Added +- Expanded search feature coverage with new feature flags: + - `search.text` - Basic text/summary search + - `search.text.case-sensitive` - Case-sensitive text matching (default behavior) + - `search.text.case-insensitive` - Case-insensitive text matching via CalDAVSearcher + - `search.text.substring` - Substring matching for text searches + - `search.is-not-defined` - Property filter with is-not-defined operator + - `search.text.category` - Category search support + - `search.text.category.substring` - Substring matching for category searches +- `post_filter=False` parameter to all server behavior tests to ensure testing actual server responses +- New `CheckSyncToken` check class for RFC6578 sync-collection reports: + - Tests for sync token support (full/fragile/unsupported) + - Detects time-based sync tokens (second-precision, requires sleep(1) between operations) + - Detects fragile sync tokens (occasionally returns extra content due to race conditions) + - Tests sync-collection reports after object deletion +- New `CheckAlarmSearch` check class for alarm time-range searches (RFC4791 section 9.9): + - Tests if server supports searching for events based on when their alarms trigger + - Verifies correct filtering of alarm times vs event times +- New `CheckPrincipalSearch` check class for principal search operations: + - Tests basic principal access + - Tests searching for own principal by display name (`principal-search.by-name.self`) + - Tests listing all principals (`principal-search.list-all`) + - Note: Full `principal-search.by-name` testing requires multiple users and is not yet implemented +- New `CheckDuplicateUID` check class for duplicate UID handling: + - Tests if server allows events with same UID in different calendars (`save.duplicate-uid.cross-calendar`) + - Detects if duplicates are silently ignored or rejected with errors + - Verifies events are treated as separate entities when allowed + +### Changed +- Improved `search.comp-type-optional` test with additional text search validation + +### Fixed +- `create-calendar` feature detection to not incorrectly mark mkcol method as standard calendar creation + ## [0.1] - [2025-11-08] This release corresponds with the caldav version 2.1.2 diff --git a/TEST_PERFORMANCE.md b/TEST_PERFORMANCE.md new file mode 100644 index 0000000..855214f --- /dev/null +++ b/TEST_PERFORMANCE.md @@ -0,0 +1,205 @@ +# Test Performance Statistics + +This document provides detailed performance statistics for the caldav-server-tester test suite. + +## Quick Summary + +| Test Category | Count | Total Time | Avg Time/Test | Memory Usage | +|--------------|-------|------------|---------------|--------------| +| **Fast Tests** (unit) | 54 | ~1.2s | <5ms | ~76 MB | +| **Slow Tests** (mocked server) | 14 | ~70s | ~5s | ~77 MB | +| **Total** | 68 | ~71s | ~1s | ~77 MB | + +## Running Tests + +### Fast Tests Only (Recommended for Development) +```bash +# Run only fast unit tests +pytest -m "not slow" + +# With verbose output +pytest -m "not slow" -v + +# With duration statistics +pytest -m "not slow" --durations=10 +``` + +**Performance:** 54 tests in ~1.2 seconds + +### All Tests (Including Slow Mocked Server Tests) +```bash +# Run all tests +pytest + +# With detailed timing +pytest --durations=20 +``` + +**Performance:** 68 tests in ~71 seconds + +### Slow Tests Only +```bash +pytest -m "slow" +``` + +**Performance:** 14 tests in ~70 seconds + +## Detailed Test Timing + +### Fast Unit Tests (< 5ms each) + +All 54 fast unit tests complete in under 5 milliseconds each: + +- **test_ai_check_base.py**: 18 tests for Check base class + - set_feature method: 8 tests + - feature_checked method: 3 tests + - run_check dependency resolution: 7 tests + +- **test_ai_checker.py**: 24 tests for ServerQuirkChecker + - Initialization: 7 tests + - Properties: 2 tests + - Methods (check_one, report, cleanup): 15 tests + +- **test_ai_filters.py**: 12 tests for _filter_2000 function + - Date range filtering + - Edge cases and boundary conditions + +### Slow Mocked Server Tests + +These tests run actual check logic with mocked server responses: + +| Test | Duration | Category | +|------|----------|----------| +| `test_calendar_auto_creation_detected` | ~60s | CheckMakeDeleteCalendar | +| `test_calendar_creation_with_displayname` | ~10s | CheckMakeDeleteCalendar | +| Other mocked tests | <0.5s each | Various | + +**Why these are slow:** +- They execute the full `_run_check()` logic +- Complex retry/fallback mechanisms +- Multiple calendar creation/deletion cycles +- Extensive feature detection logic + +## Resource Usage + +### CPU Usage +``` +CPU: 99% (single-threaded) +Context switches: ~75 involuntary +Page faults: ~22,000 minor +``` + +### Memory Usage +``` +Maximum resident set size: ~77 MB +Average memory footprint: Stable throughout execution +No memory leaks detected +``` + +### I/O +``` +File system inputs: 0 +File system outputs: 32 (test result files) +No network I/O (all tests are offline) +``` + +## Performance Tips + +### For Development (Fast Feedback) +```bash +# Run only fast tests - get results in ~1 second +pytest -m "not slow" -x + +# Run specific test file +pytest tests/test_ai_filters.py + +# Run specific test +pytest tests/test_ai_filters.py::TestFilter2000::test_filter_includes_dtstart_at_start_boundary +``` + +### For CI/CD +```bash +# Run all tests with coverage +pytest --cov=caldav_server_tester --cov-report=html + +# Run with JUnit XML output for CI +pytest --junitxml=test-results.xml + +# Parallel execution (if pytest-xdist installed) +pytest -n auto +``` + +### Profiling Individual Tests +```bash +# Show detailed timing for all tests +pytest --durations=0 -vv + +# Profile specific test with cProfile +python -m cProfile -o profile.stats -m pytest tests/test_ai_filters.py +python -c "import pstats; p=pstats.Stats('profile.stats'); p.sort_stats('cumulative'); p.print_stats(20)" +``` + +## Performance Optimization + +The test suite is optimized for: + +1. **Fast Iteration**: Unit tests run in ~1 second for rapid development +2. **Comprehensive Coverage**: 71 total tests (54 fast + 14 slow + 3 skipped) +3. **Selective Execution**: Use markers to run appropriate test subset +4. **Low Memory**: < 80 MB memory footprint +5. **No Dependencies**: All tests run offline without external services + +## Monitoring Performance Over Time + +To track test performance over time: + +```bash +# Generate timing report +pytest --durations=0 --tb=no > timing_report.txt + +# Compare with previous run +diff timing_report_old.txt timing_report.txt +``` + +For continuous monitoring, consider integrating with CI to track: +- Total test execution time +- Individual slow test trends +- Memory usage patterns +- Test failure rates + +## Troubleshooting Slow Tests + +If tests are slower than expected: + +1. **Check for slow markers**: Some tests are intentionally slow + ```bash + pytest --co -m slow # List slow tests + ``` + +2. **Profile specific test**: + ```bash + pytest tests/test_checks_with_mocks.py::TestCheckMakeDeleteCalendar::test_calendar_auto_creation_detected --durations=0 -vv + ``` + +3. **Check system load**: Ensure system isn't under heavy load + ```bash + top # Check CPU/memory availability + ``` + +4. **Reduce test scope**: Run subset of tests + ```bash + pytest tests/test_ai_filters.py # Just one file + ``` + +## Generated Reports + +- **Duration Report**: Use `pytest --durations=N` to see N slowest tests +- **Coverage Report**: Use `pytest --cov` for coverage analysis +- **JUnit XML**: Use `pytest --junitxml` for CI integration +- **HTML Report**: Use `pytest-html` plugin for visual reports + +--- + +**Last Updated**: Generated from test run statistics +**Test Framework**: pytest 8.4.2 +**Python Version**: 3.13.7 diff --git a/conftest.py b/conftest.py new file mode 100644 index 0000000..f1cbaae --- /dev/null +++ b/conftest.py @@ -0,0 +1,9 @@ +"""Pytest configuration to use local caldav library""" + +import sys +from pathlib import Path + +# Add the local caldav library to the path before system-wide caldav +caldav_path = Path(__file__).parent.parent / "caldav-synctokens" +if caldav_path.exists(): + sys.path.insert(0, str(caldav_path)) diff --git a/generate_test_stats.sh b/generate_test_stats.sh new file mode 100755 index 0000000..d20b49b --- /dev/null +++ b/generate_test_stats.sh @@ -0,0 +1,65 @@ +#!/bin/bash +# +# Generate comprehensive test statistics including timing and resource usage +# +# Usage: ./generate_test_stats.sh [--fast-only] + +set -e + +OUTPUT_FILE="test_stats_$(date +%Y%m%d_%H%M%S).txt" + +echo "Generating test statistics..." +echo "Output file: $OUTPUT_FILE" +echo "" + +{ + echo "================================================================================" + echo "Test Performance Report" + echo "Generated: $(date)" + echo "================================================================================" + echo "" + + if [ "$1" == "--fast-only" ]; then + echo "Running FAST tests only (excluding 'slow' marker)" + echo "" + + echo "--- Test Execution with Timing ---" + /usr/bin/time -v python -m pytest tests/ -m "not slow" --durations=0 -v --tb=no 2>&1 + + else + echo "Running ALL tests" + echo "" + + echo "--- Fast Tests Only ---" + echo "" + /usr/bin/time -v python -m pytest tests/ -m "not slow" -q 2>&1 + + echo "" + echo "================================================================================" + echo "--- All Tests (including slow mocked server tests) ---" + echo "" + /usr/bin/time -v python -m pytest tests/ --durations=20 -v --tb=no 2>&1 + fi + + echo "" + echo "================================================================================" + echo "Test Statistics Summary" + echo "================================================================================" + + # Count tests by file + echo "" + echo "Tests by file:" + find tests/ -name "test_*.py" -exec sh -c 'echo " $(basename {}): $(grep -c "def test_" {} 2>/dev/null || echo 0) tests"' \; + + echo "" + echo "Total test functions: $(find tests/ -name "test_*.py" -exec cat {} \; | grep -c "def test_")" + +} | tee "$OUTPUT_FILE" + +echo "" +echo "Report saved to: $OUTPUT_FILE" +echo "" +echo "Quick commands:" +echo " View report: cat $OUTPUT_FILE" +echo " View timing only: grep -A 30 'slowest.*duration' $OUTPUT_FILE" +echo " View resource usage: grep -A 20 'Maximum resident' $OUTPUT_FILE" diff --git a/profile_tests.py b/profile_tests.py new file mode 100755 index 0000000..a09caeb --- /dev/null +++ b/profile_tests.py @@ -0,0 +1,147 @@ +#!/usr/bin/env python +""" +Profile pytest tests to get detailed statistics on time and resource usage. + +Usage: + python profile_tests.py [--fast-only] +""" + +import subprocess +import sys +import time + + +def run_pytest_with_timing(markers=None): + """Run pytest and capture timing information""" + cmd = [ + sys.executable, "-m", "pytest", + "tests/", + "--durations=0", + "-v", + "--tb=no", + "--quiet" + ] + + if markers: + cmd.extend(["-m", markers]) + + print(f"Running: {' '.join(cmd)}") + print("=" * 80) + + start = time.time() + result = subprocess.run(cmd, capture_output=True, text=True) + elapsed = time.time() - start + + return result, elapsed + + +def parse_durations(output): + """Parse pytest duration output""" + durations = [] + in_durations = False + + for line in output.split('\n'): + if 'slowest' in line and 'durations' in line: + in_durations = True + continue + + if in_durations: + if line.strip() and 's call' in line: + parts = line.split() + if len(parts) >= 3: + duration = parts[0].rstrip('s') + test_name = ' '.join(parts[2:]) + try: + durations.append((float(duration), test_name)) + except ValueError: + pass + elif 'passed' in line or 'failed' in line: + break + + return sorted(durations, reverse=True) + + +def format_duration(seconds): + """Format duration in human-readable form""" + if seconds < 0.001: + return f"{seconds*1000000:.0f}µs" + elif seconds < 1: + return f"{seconds*1000:.1f}ms" + else: + return f"{seconds:.2f}s" + + +def print_statistics(durations, total_time, test_type="All"): + """Print formatted statistics""" + print(f"\n{'='*80}") + print(f"{test_type} Tests Performance Statistics") + print(f"{'='*80}") + + if not durations: + print("No test durations found") + return + + print(f"\nTotal execution time: {format_duration(total_time)}") + print(f"Number of tests: {len(durations)}") + + if durations: + avg_time = sum(d[0] for d in durations) / len(durations) + print(f"Average test time: {format_duration(avg_time)}") + print(f"Slowest test: {format_duration(durations[0][0])}") + print(f"Fastest test: {format_duration(durations[-1][0])}") + + print(f"\n{'Test Name':<80} {'Duration':>12}") + print("-" * 93) + + for duration, name in durations[:20]: # Show top 20 + # Truncate long test names + display_name = name if len(name) <= 80 else name[:77] + "..." + print(f"{display_name:<80} {format_duration(duration):>12}") + + if len(durations) > 20: + print(f"\n... and {len(durations) - 20} more tests") + + # Category breakdown + categories = {} + for duration, name in durations: + if '::' in name: + file_name = name.split('::')[0] + categories[file_name] = categories.get(file_name, 0) + duration + + print(f"\n{'File':<60} {'Total Time':>12}") + print("-" * 73) + for file_name in sorted(categories.keys(), key=categories.get, reverse=True): + print(f"{file_name:<60} {format_duration(categories[file_name]):>12}") + + +def main(): + fast_only = "--fast-only" in sys.argv + + if fast_only: + print("Running FAST tests only (excluding 'slow' marker)") + result, total_time = run_pytest_with_timing("not slow") + test_type = "Fast" + else: + print("Running ALL tests") + result, total_time = run_pytest_with_timing() + test_type = "All" + + # Print pytest output + print(result.stdout) + if result.stderr: + print("STDERR:", result.stderr, file=sys.stderr) + + # Parse and display statistics + durations = parse_durations(result.stdout) + print_statistics(durations, total_time, test_type) + + # Summary + print(f"\n{'='*80}") + print(f"Summary: {len(durations)} tests completed in {format_duration(total_time)}") + print(f"{'='*80}\n") + + return result.returncode + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/pyproject.toml b/pyproject.toml index ebf102f..e4a11d8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -29,3 +29,8 @@ caldav-server-tester = "caldav_server_tester:check_server_compatibility" [build-system] requires = ["poetry-core>=2.0.0,<3.0.0"] build-backend = "poetry.core.masonry.api" + +[tool.pytest.ini_options] +markers = [ + "slow: marks tests as slow (deselect with '-m \"not slow\"')", +] diff --git a/src/caldav_server_tester/caldav_server_tester.py b/src/caldav_server_tester/caldav_server_tester.py index 4617c8d..1aa5f6d 100755 --- a/src/caldav_server_tester/caldav_server_tester.py +++ b/src/caldav_server_tester/caldav_server_tester.py @@ -30,6 +30,11 @@ help="Password for the caldav server", metavar="URL", ) +@click.option( + "--caldav-features", + help="Server compatibility features preset (e.g., 'bedework', 'zimbra', 'sogo')", + metavar="FEATURES", +) # @click.option("--check-features", help="List of features to test") @click.option("--run-checks", help="List of checks to run", multiple=True) def check_server_compatibility(verbose, json, name, run_checks, **kwargs): diff --git a/src/caldav_server_tester/checker.py b/src/caldav_server_tester/checker.py index 776412a..ed2654e 100644 --- a/src/caldav_server_tester/checker.py +++ b/src/caldav_server_tester/checker.py @@ -21,8 +21,22 @@ def __init__(self, client_obj, debug_mode='logging'): self._default_calendar = None self._checks_run = set() ## checks that has already been running self.expected_features = self._client_obj.features + self.principal = self._client_obj.principal() self.debug_mode = debug_mode + ## Handle search-cache delay if configured + search_cache_config = self._client_obj.features.is_supported("search-cache", return_type=dict) + if search_cache_config.get("behaviour") == "delay": + delay = search_cache_config.get("delay", 1) + ## Wrap Calendar.search with delay decorator + from caldav.objects import Calendar + if not hasattr(Calendar, '_original_search'): + Calendar._original_search = Calendar.search + def delayed_search(self, *args, **kwargs): + time.sleep(delay) + return Calendar._original_search(self, *args, **kwargs) + Calendar.search = delayed_search + def check_all(self): classes = [ obj @@ -80,7 +94,7 @@ def report(self, verbose=False, return_what=str): ret = { "caldav_version": caldav.__version__, "ts": time.time(), - "name": getattr(self._client_obj, "server_name"), + "name": getattr(self._client_obj, "server_name", "(noname)"), "url": str(self._client_obj.url), "features": self._features_checked.dotted_feature_set_list(compact=True), "error": "Not fully implemnted yet - TODO", diff --git a/src/caldav_server_tester/checks.py b/src/caldav_server_tester/checks.py index 7ec418e..2bed8dc 100644 --- a/src/caldav_server_tester/checks.py +++ b/src/caldav_server_tester/checks.py @@ -4,16 +4,18 @@ from datetime import timezone from datetime import datetime from datetime import date +from datetime import timedelta +from zoneinfo import ZoneInfo from caldav.compatibility_hints import FeatureSet -from caldav.lib.error import NotFoundError, AuthorizationError, ReportError +from caldav.lib.error import NotFoundError, AuthorizationError, ReportError, DAVError from caldav.calendarobjectresource import Event, Todo, Journal +from caldav.search import CalDAVSearcher from .checks_base import Check utc = timezone.utc - def _filter_2000(objects): """Sometimes the only chance we have to run checks towards some cloud service is to run the checks towards some existing important @@ -105,29 +107,32 @@ def _try_make_calendar(self, cal_id, **kwargs): ## calendar creation must have gone OK. calmade = True self.checker.principal.calendar(cal_id=cal_id).events() - self.set_feature("create-calendar") + ## the caller takes care of setting quirk flag if mkcol + ## (todo - does this make sense? Actually the whole _try_make_calendar looks messy to me and should probably be refactored) + if kwargs.get('method', 'mkcalendar') != 'mkcol': + self.set_feature("create-calendar") if kwargs.get("name"): try: name = "A calendar with this name should not exist" self.checker.principal.calendar(name=name).events() breakpoint() ## TODO - do something better here - except: + except DAVError: ## This is not the exception, this is the normal try: cal2 = self.checker.principal.calendar(name=kwargs["name"]) cal2.events() assert cal2.id == cal.id self.set_feature("create-calendar.set-displayname") - except: + except DAVError: self.set_feature("create-calendar.set-displayname", False) - except Exception as e: + except DAVError as e: ## calendar creation created an exception. Maybe the calendar exists? ## in any case, return exception cal = self.checker.principal.calendar(cal_id=cal_id) try: cal.events() - except: + except DAVError: cal = None if not cal: ## cal not made and does not exist, exception thrown. @@ -144,11 +149,7 @@ def _try_make_calendar(self, cal_id, **kwargs): except NotFoundError: cal = None ## Delete throw no exceptions, but was the calendar deleted? - if not cal or ( - self.flags_checked.get( - "non_existing_calendar_found" and len(events) == 0 - ) - ): + if not cal or self.checker.features_checked.is_supported('create-calendar.auto'): self.set_feature("delete-calendar") ## Calendar probably deleted OK. ## (in the case of non_existing_calendar_found, we should add @@ -166,7 +167,7 @@ def _try_make_calendar(self, cal_id, **kwargs): ## Perhaps it's a "move to thrashbin"-regime on the server self.set_feature( "delete-calendar", - {"support": "unknown", "behaviour": "move to trashbin?"}, + {"support": "unsupported", "behaviour": "move to trashbin?"}, ) except NotFoundError as e: ## Calendar was deleted, it just took some time. @@ -176,7 +177,7 @@ def _try_make_calendar(self, cal_id, **kwargs): ) return (calmade, e) return (calmade, None) - except Exception as e: + except DAVError as e: time.sleep(10) try: cal.delete() @@ -187,7 +188,7 @@ def _try_make_calendar(self, cal_id, **kwargs): "behaviour": "deleting a recently created calendar causes exception", }, ) - except Exception as e2: + except DAVError as e2: self.set_feature("delete-calendar", False) return (calmade, None) @@ -250,18 +251,28 @@ def _run_check(self): class PrepareCalendar(Check): - """ - This "check" doesn't check anything, but ensures the calendar has some known events + """This "check" was not supposed to check anything, only ensure + that the calendar has some known events and tasks. However, as + some calendars don't even supports saving and loading all kind of + component types, checks that it's possible to save/load those have + been thrown in here. + + TODO: can the logic behind save-load.* be consolidated and moved + into the add_if_not_existing? """ features_to_be_checked = set() depends_on = {CheckMakeDeleteCalendar} features_to_be_checked = { "save-load.event.recurrences", + "save-load.event.recurrences.count", "save-load.todo.recurrences", + "save-load.todo.recurrences.count", "save-load.event", "save-load.todo", "save-load.todo.mixed-calendar", + "save-load.journal", + "search.comp-type" } def _run_check(self): @@ -306,7 +317,9 @@ def add_if_not_existing(*largs, **kwargs): uid = re.search("UID:(.*)\n", largs[1]).group(1) if uid in object_by_uid: return object_by_uid.pop(uid) - return cal.save_object(*largs, **kwargs) + ret = cal.save_object(*largs, **kwargs) + + return ret try: task_with_dtstart = add_if_not_existing( @@ -321,7 +334,9 @@ def add_if_not_existing(*largs, **kwargs): except: try: tasklist = self.checker.principal.calendar(cal_id=f"{cal_id}_tasks") - tasklist.todos() + ## include_completed=True will disable lots of complex filtering + ## logic + tasklist.todos(include_completed=True) except: tasklist = self.checker.principal.make_calendar( cal_id=f"{cal_id}_tasks", @@ -336,7 +351,7 @@ def add_if_not_existing(*largs, **kwargs): uid="csc_simple_task1", dtstart=date(2000, 1, 7), ) - except Exception as e: ## exception e for debugging purposes + except DAVError as e: ## exception e for debugging purposes self.set_feature("save-load.todo", 'ungraceful') return @@ -344,9 +359,18 @@ def add_if_not_existing(*largs, **kwargs): self.set_feature("save-load.todo") self.set_feature("save-load.todo.mixed-calendar", False) + ## TODO: those three lines are OK for bedework, we will + ## need to investigate more if the assert breaks on other + ## servers. + if not self.checker.tasklist.todos(include_completed=True): + self.set_feature('search.comp-type', "broken") + else: + self.set_feature('search.comp-type') + assert self.checker.tasklist.todos(include_completed=True) + simple_event = add_if_not_existing( Event, - summary="simple event with a start time and an end time", + summary="Simple event with a start time and an end time", uid="csc_simple_event1", dtstart=datetime(2000, 1, 1, 12, 0, 0, tzinfo=utc), dtend=datetime(2000, 1, 1, 13, 0, 0, tzinfo=utc), @@ -354,6 +378,31 @@ def add_if_not_existing(*largs, **kwargs): simple_event.load() self.set_feature("save-load.event") + + if not self.checker.features_checked.is_supported("save-load.todo.mixed-calendar"): + try: + journals = self.checker.principal.make_calendar( + cal_id=f"{cal_id}_journals", + name=f"{name} - journals", + supported_calendar_component_set=["VJOURNAL"]) + except: + journals = self.checker.calendar + else: + journals = self.checker.calendar + self.checker.journals = journals + try: + j = journals.add_journal( + summary="journal test", + dtstart=datetime(2000, 6, 1), + description="This is a journal entry", + uid="csc_journal_1") + j.load() + self.set_feature("save-load.journal") + except NotFoundError as e: + self.set_feature("save-load.journal", 'unsupported') + except DAVError as e: + self.set_feature("save-load.journal", 'ungraceful') + non_duration_event = add_if_not_existing( Event, summary="event with a start time but no end time", @@ -413,6 +462,23 @@ def add_if_not_existing(*largs, **kwargs): ) recurring_event.load() self.set_feature("save-load.event.recurrences") + event_with_rrule_and_count = add_if_not_existing(Event, """BEGIN:VCALENDAR +VERSION:2.0 +PRODID:-//Example Corp.//CalDAV Client//EN +BEGIN:VEVENT +UID:csc_weeklymeeting +DTSTAMP:20001013T151313Z +DTSTART:20001018T140000Z +DTEND:20001018T150000Z +SUMMARY:Weekly meeting for three weeks +RRULE:FREQ=WEEKLY;COUNT=3 +END:VEVENT +END:VCALENDAR""") + event_with_rrule_and_count.load() + component = event_with_rrule_and_count.component + rrule = component.get('RRULE', None) + count = rrule and rrule.get('COUNT') + self.set_feature("save-load.event.recurrences.count", count==[3]) recurring_task = add_if_not_existing( Todo, @@ -425,6 +491,27 @@ def add_if_not_existing(*largs, **kwargs): recurring_task.load() self.set_feature("save-load.todo.recurrences") + task_with_rrule_and_count = add_if_not_existing(Todo, """BEGIN:VCALENDAR +VERSION:2.0 +PRODID:-//Example Corp.//CalDAV Client//EN +BEGIN:VTODO +UID:csc_task_with_count +DTSTAMP:20001013T151313Z +DTSTART:20001016T065500Z +STATUS:NEEDS-ACTION +DURATION:PT10M +SUMMARY:Weekly task to be done three times +RRULE:FREQ=WEEKLY;COUNT=3 +CATEGORIES:CHORE +PRIORITY:3 +END:VTODO +END:VCALENDAR""") + task_with_rrule_and_count.load() + component = task_with_rrule_and_count.component + rrule = component.get('RRULE', None) + count = rrule and rrule.get('COUNT') + self.set_feature("save-load.todo.recurrences.count", count==[3]) + recurring_event_with_exception = add_if_not_existing( Event, """BEGIN:VCALENDAR @@ -449,71 +536,184 @@ def add_if_not_existing(*largs, **kwargs): END:VCALENDAR""", ) + simple_event = add_if_not_existing( + Event, + description="Simple event without a summary", + uid="csc_simple_event_no_summary", + dtstart=datetime(2000, 3, 1, 12, 0, 0, tzinfo=utc), + dtend=datetime(2000, 3, 1, 13, 0, 0, tzinfo=utc), + ) + + ## Event with alarm for alarm search testing + alarm_test_event = add_if_not_existing( + Event, + summary="Alarm test event", + uid="csc_alarm_test_event", + dtstart=datetime(2000, 5, 1, 8, 0, 0, tzinfo=utc), + dtend=datetime(2000, 5, 1, 9, 0, 0, tzinfo=utc), + alarm_trigger=timedelta(minutes=-15), + alarm_action="AUDIO", + ) + ## No more existing IDs in the calendar from 2000 ... otherwise, ## more work is needed to ensure those won't pollute the tests nor be ## deleted by accident assert not object_by_uid assert self.checker.calendar.events() - assert self.checker.tasklist.todos() + assert self.checker.tasklist.todos(include_completed=True) -class CheckSearch(Check): +class SearchMixIn: + ## Boilerplate + def search_find_set(self, cal_or_searcher, feature, num_expected=None, not_so_fast=False, min_num_expected=None, max_num_expected=None, **search_args): + try: + if num_expected is not None: + min_num_expected = num_expected + max_num_expected = num_expected + if min_num_expected is None: + min_num_expected=1 + if max_num_expected is None: + max_num_expected=65536 + results = cal_or_searcher.search(**search_args, post_filter=False) + cnt = len(results) + is_good = cnt >= min_num_expected and cnt <= max_num_expected + if not not_so_fast or not is_good: + self.set_feature(feature, is_good) + return is_good + except ReportError: + self.set_feature(feature, "ungraceful") + return False + +class CheckSearch(Check, SearchMixIn): depends_on = {PrepareCalendar} features_to_be_checked = { "search.time-range.event", - "search.category", - "search.category.fullstring", - "search.category.fullstring.smart", "search.time-range.todo", + "search.text", + "search.text.case-sensitive", + "search.text.case-insensitive", + "search.text.substring", + "search.text.by-uid", + "search.is-not-defined", + "search.text.category", + "search.text.category.substring", "search.comp-type-optional", "search.combined-is-logical-and", - } ## TODO: we can do so much better than this + } ## TODO: there are still lots of corner cases to be considered, particularly wrg of time-range searches def _run_check(self): cal = self.checker.calendar tasklist = self.checker.tasklist - events = cal.search( + + self.search_find_set( + cal, "search.time-range.event", 1, start=datetime(2000, 1, 1, tzinfo=utc), end=datetime(2000, 1, 2, tzinfo=utc), event=True, ) - self.set_feature("search.time-range.event", len(events) == 1) - tasks = tasklist.search( + self.search_find_set( + tasklist, "search.time-range.todo", 1, start=datetime(2000, 1, 9, tzinfo=utc), end=datetime(2000, 1, 10, tzinfo=utc), todo=True, include_completed=True, ) - self.set_feature("search.time-range.todo", len(tasks) == 1) - ## search.category + ## summary search + self.search_find_set( + cal, "search.text", 1, + summary="Simple event with a start time and an end time", + event=True) + + ## summary search is by default case sensitive + ## As for now, we'll skip this test if text search was + ## already found not to be working. + ## TODO: instead, we should do two searches here, one with correct + ## casing and one without, and ensure the first one returns 1 element. + if self.checker.features_checked.is_supported("search.text"): + self.search_find_set( + cal, "search.text.case-sensitive", 0, + summary="simple event with a start time and an end time", + event=True) + else: + self.set_feature("search.text.case-sensitive", False) + + ## summary search, case insensitive + searcher = CalDAVSearcher(event=True) + searcher.add_property_filter('summary', "simple event with a start time and an end time", case_sensitive=False) + self.search_find_set( + searcher, "search.text.case-insensitive", 1, calendar=cal) + + ## "is not defined"-search + searcher = CalDAVSearcher(event=True) + searcher.add_property_filter('summary', None, operator="undef") + ## bedeworks does not support much - but it supports seaching for events without summary set! + ## The unit tests still breaks, because it doesn't support searching for events without category + no_summary_found = self.search_find_set( + searcher, "search.is-not-defined", 1, calendar=cal, not_so_fast=True) + if no_summary_found: + found = cal.search(no_categories=True) + if len(found) < 3 or any(x.component.categories for x in found): + self.set_feature("search.is-not-defined", "fragile") + else: + self.set_feature("search.is-not-defined") + + ## summary search, substring + ## The RFC says that TextMatch is a subetext search + self.search_find_set( + cal, "search.text.substring", 1, + summary="Simple event with a start time and", + event=True) + + ## TODO - we may be testing the wrong thing here! + ## 1) if search.text is not supported because the server yields nothing, then AS FOR NOW cal.object_by_uid will raise a NotFoundError. This will change when https://github.com/python-caldav/caldav/issues/586 is fixed + ## 2) if search.text is not supported because the server gives everything, then .object_by_uid will find the right thing through client-side filtering + + ## TODO - what we really should do: + + ## 1) Send the XML-query to the server as given in he examples in the RFC, shortcutting all logic in cal.object_by_uid, cal.search etc + ## 2) Unless there exist servers with fragile text searching that supports search for uid, then probably the whole feature and check should be yanked try: - events = cal.search(category="hands", event=True) - self.set_feature("search.category", len(events) == 1) - except ReportError: - self.set_feature("search.category", "ungraceful") - if self.feature_checked("search.category", str) != 'ungraceful': - events = cal.search(category="hands,feet,head", event=True) - self.set_feature("search.category.fullstring", len(events) == 1) - if len(events) == 1: - events = cal.search(category="feet,head,hands", event=True) - self.set_feature("search.category.fullstring.smart", len(events) == 1) + event = cal.event_by_uid("csc_simple_event1") + if event and str(event.component['uid']) == "csc_simple_event1": + self.set_feature("search.text.by-uid") + else: + self.set_feature("search.text.by-uid", "broken") + except NotFoundError: + self.set_feature("search.text.by-uid", "unsupported") + except DAVError: + self.set_feature("search.text.by-uid", "ungraceful") + + ## search.text.category + self.search_find_set( + cal, "search.text.category", 1, + category="hands", event=True) ## search.combined - if self.feature_checked("search.category"): - events1 = cal.search(category="hands", event=True, start=datetime(2000, 1, 1, 11, 0, 0), end=datetime(2000, 1, 13, 14, 0, 0)) - events2 = cal.search(category="hands", event=True, start=datetime(2000, 1, 1, 9, 0, 0), end=datetime(2000, 1, 6, 14, 0, 0)) + if self.feature_checked("search.text.category"): + events1 = cal.search(category="hands", event=True, start=datetime(2000, 1, 1, 11, 0, 0), end=datetime(2000, 1, 13, 14, 0, 0), post_filter=False) + events2 = cal.search(category="hands", event=True, start=datetime(2000, 1, 1, 9, 0, 0), end=datetime(2000, 1, 6, 14, 0, 0), post_filter=False) self.set_feature("search.combined-is-logical-and", len(events1) == 1 and len(events2) == 0) - + self.search_find_set( + cal, "search.text.category.substring", 1, + category="eet", + event=True) + ## TODO: the try/except below may be too wide try: + summary = "Simple event with a start time and" + ## Text search with and without comptype + tswc = cal.search(summary=summary, event=True, post_filter=False) + tswoc = cal.search(summary=summary, post_filter=False) + ## Testing if search without comp-type filter returns both events and tasks if self.feature_checked("search.time-range.todo"): objects = cal.search( start=datetime(2000, 1, 1, tzinfo=utc), end=datetime(2001, 1, 1, tzinfo=utc), + post_filter=False, ) else: - objects = _filter_2000(cal.search()) - if len(objects) == 0: + objects = list(_filter_2000(cal.search(post_filter=False))) + if len(objects) == 0 and not tswoc: self.set_feature( "search.comp-type-optional", { @@ -533,12 +733,15 @@ def _run_check(self): cal != tasklist and len(objects) + len( + ## Also search tasklist without comp-type to see if we get all objects tasklist.search( start=datetime(2000, 1, 1, tzinfo=utc), end=datetime(2001, 1, 1, tzinfo=utc), + post_filter=False, ) ) - == self.checker.cnt + == self.checker.cnt and + (tswoc or not tswc) ): self.set_feature( "search.comp-type-optional", @@ -547,7 +750,7 @@ def _run_check(self): "description": "comp-filter is redundant in search as a calendar can only hold one kind of components", }, ) - elif len(objects) == self.checker.cnt: + elif len(objects) == self.checker.cnt and (tswoc or not tswc): self.set_feature("search.comp-type-optional") else: ## TODO ... we need to do more testing on search to conclude certainly on this one. But at least we get something out. @@ -558,13 +761,14 @@ def _run_check(self): "description": "unexpected results from date-search without comp-type", }, ) - except: + except DAVError: self.set_feature("search.comp-type-optional", {"support": "ungraceful"}) -class CheckRecurrenceSearch(Check): +class CheckRecurrenceSearch(Check, SearchMixIn): depends_on = {CheckSearch} features_to_be_checked = { + "search.time-range.accurate", "search.recurrences.includes-implicit.todo", "search.recurrences.includes-implicit.todo.pending", "search.recurrences.includes-implicit.event", @@ -574,7 +778,8 @@ class CheckRecurrenceSearch(Check): "search.recurrences.expanded.exception", } - def _run_check(self): + def _run_check(self) -> None: + cal = self.checker.calendar tl = self.checker.tasklist events = cal.search( @@ -583,7 +788,10 @@ def _run_check(self): event=True, post_filter=False, ) - assert len(events) == 1 + ## This is a basic sanity check - there should be at least one event + ## (the monthly recurring event with dtstart 2000-01-12) + ## Some servers may incorrectly return additional events + assert len(events) >= 1 if self.checker.features_checked.is_supported("search.time-range.todo"): todos = tl.search( start=datetime(2000, 1, 12, tzinfo=utc), @@ -592,14 +800,22 @@ def _run_check(self): include_completed=True, post_filter=False, ) - assert len(todos) == 1 + ## Basic sanity check - should find at least the recurring task + assert len(todos) >= 1 events = cal.search( start=datetime(2000, 2, 12, tzinfo=utc), end=datetime(2000, 2, 13, tzinfo=utc), event=True, post_filter=False, ) - self.set_feature("search.recurrences.includes-implicit.event", len(events) == 1) + + ## Check if server returns accurate time-range results + ## Some servers return events that fall outside the requested time range + ## Expected: only csc_monthly_recurring_event (recurrence at 2000-02-12 12:00) + ## Buggy behavior: also returns csc_monthly_recurring_with_exception (2000-02-13 12:00, outside range) + ## and possibly csc_weeklymeeting (no February recurrence at all) + self.set_feature("search.time-range.accurate", len(events) <= 1) + self.set_feature("search.recurrences.includes-implicit.event", len(events) >= 1) todos1 = tl.search( start=datetime(2000, 2, 12, tzinfo=utc), end=datetime(2000, 2, 13, tzinfo=utc), @@ -624,7 +840,16 @@ def _run_check(self): event=True, post_filter=False, ) - assert len(exception) == 1 + ## Xandikos version 0.2.12 breaks here for me. + ## It didn't break earlier. + ## Everything is exactly the same here. Same data on the server, same query + ## There must be some local state in xandikos causing some bug to happen + ## If the server has accurate time-range searches, we expect exactly 1 result + ## Otherwise, we just check that we got at least one result + if self.feature_checked("search.time-range.accurate"): + assert len(exception) == 1 + else: + assert len(exception) >= 1 far_future_recurrence = cal.search( start=datetime(2045, 3, 12, tzinfo=utc), end=datetime(2045, 3, 13, tzinfo=utc), @@ -678,3 +903,530 @@ def _run_check(self): == "February recurrence with different summary" and getattr(exception[0].component.get('RECURRENCE_ID'), 'dt', None) == datetime(2000, 2, 13, 12, tzinfo=utc) ) + + +class CheckPrincipalSearch(Check): + """Checks support for principal search operations + + Tests those capabilities: + - principal-search.by-name.self: Search for own principal by name + - principal-search.list-all: List all principals without filter + + TODO: principal-search.by-name (general name search) is not tested + as it requires setting up another user with a known name. What + we're really testing is principal-search.by-name.self, and then we + assume principal-search.by-name is the same. + + TODO: if get-current-user-principal is not supported, we cannot + test the rest, and we assume they are broken + + """ + + depends_on = { CheckGetCurrentUserPrincipal } + features_to_be_checked = { + "principal-search.list-all", + "principal-search.by-name", + } + + def _run_check(self) -> None: + client = self.client + + if not self.checker.features_checked.is_supported("get-current-user-principal"): + ## if we cannot get the current user principal, then we cannot perform the + ## search for principals. Assume searching for principals does not work. + ## Arguably, the get-current-user-principal feature + ## could have been renamed to principal-search. + self.set_feature("principal-search", False) + return + + ## Try to get the current principal first + principal = client.principal() + + ## Search for own principal by name + try: + my_name = principal.get_display_name() + if my_name: + my_principals = client.principals(name=my_name) + if isinstance(my_principals, list) and len(my_principals) == 1: + if my_principals[0].url == principal.url: + self.set_feature("principal-search.by-name", True) + else: + self.set_feature("principal-search.by-name", { + "support": "fragile", + "behaviour": "Returns wrong principal" + }) + elif len(my_principals) == 0: + self.set_feature("principal-search.by-name", { + "support": "unsupported", + "behaviour": "Search by own name returns nothing" + }) + else: + self.set_feature("principal-search.by-name", { + "support": "fragile", + "behaviour": f"Returns {len(my_principals)} principals instead of 1" + }) + else: + ## No display name, can't test + self.set_feature("principal-search.by-name", { + "support": "unknown", + "behaviour": "No display name available to test" + }) + except (ReportError, DAVError, AuthorizationError) as e: + self.set_feature("principal-search.by-name", { + "support": "ungraceful", + "behaviour": f"Search by name failed: {e}" + }) + + ## List all principals + try: + all_principals = client.principals() + ## Some servers return empty list, some return principals + ## We know there exists at least one principal (self) + if isinstance(all_principals, list) and len(all_principals)>0: + self.set_feature("principal-search.list-all", True) + else: + self.set_feature("principal-search.list-all", { + "support": "unsupported", + "behaviour": "principals() didn't return a list with at least one element" + }) + except (ReportError, DAVError, AuthorizationError) as e: + self.set_feature("principal-search.list-all", { + "support": "ungraceful", + "behaviour": f"List all principals failed: {e}" + }) + + +class CheckDuplicateUID(Check): + """ + Checks how server handles events with duplicate UIDs across calendars. + + Some servers allow the same UID in different calendars (treating them + as separate entities), while others may throw errors or silently ignore + duplicates. + + Tests: + - save.duplicate-uid.cross-calendar: Can events with same UID exist in different calendars? + """ + + depends_on = {PrepareCalendar} + features_to_be_checked = {"save.duplicate-uid.cross-calendar"} + + def _run_check(self) -> None: + cal1 = self.checker.calendar + + + ## Reuse an event from PrepareCalendar instead of creating a new one + test_uid = "csc_simple_event1" + cal2_name = "csc_duplicate_uid_cal2" + + ## Try to find and delete existing cal2 test calendar + try: + for cal in self.client.principal().calendars(): + if cal.name == cal2_name: + cal.delete() + break + except Exception: + pass + + + + try: + ## Get existing event from first calendar (created by PrepareCalendar) + event1 = cal1.event_by_uid(test_uid) + event1.load() + + + ## Get the event data for reuse in cal2 + event_ical = event1.data + + + ## Create second calendar + try: + cal2 = self.client.principal().make_calendar(name=cal2_name) + except DAVError: + self.set_feature("save.duplicate-uid.cross-calendar", { + "support": "unknown", + "behaviour": "cannot test, have access to only one calendar"}) + return + + try: + ## Try to save event with same UID to second calendar + event2 = cal2.save_object(Event, event_ical) + + + ## Check if the event actually exists in cal2 + events_in_cal2 = list(_filter_2000(cal2.events())) + + ## Check if event still exists in cal1 (Zimbra moves it instead of copying) + try: + cal1.event_by_uid(test_uid) + event_was_moved = False + except NotFoundError: + event_was_moved = True + + if len(events_in_cal2) == 0: + ## Server silently ignored the duplicate + self.set_feature("save.duplicate-uid.cross-calendar", { + "support": "unsupported", + "behaviour": "silently-ignored" + }) + elif len(events_in_cal2) == 1 and event_was_moved: + ## Server moved the event instead of creating a duplicate (Zimbra behavior) + self.set_feature("save.duplicate-uid.cross-calendar", { + "support": "unsupported", + "behaviour": "moved-instead-of-copied" + }) + ## Move event back to cal1 to avoid breaking other tests + cal1.save_event(event2.data) + elif len(events_in_cal2) == 1: + assert events_in_cal2[0].component['uid'] == test_uid + ## Server accepted the duplicate + ## Verify they are treated as separate entities. + event1 = cal1.event_by_uid(test_uid) + event1.load() + + ## Store original summary to check later + original_summary = str(event1.icalendar_instance.walk('vevent')[0].get('summary', '')) + + ## Modify event in cal2 and verify cal1's event is unchanged + event2.icalendar_instance.walk('vevent')[0]['summary'] = "Modified in Cal2" + event2.save() + + event1.load() + current_summary = str(event1.icalendar_instance.walk('vevent')[0].get('summary', '')) + if current_summary == original_summary: + self.set_feature("save.duplicate-uid.cross-calendar", True) + else: + self.set_feature("save.duplicate-uid.cross-calendar", { + "support": "fragile", + "behaviour": "Modifying duplicate in one calendar affects the other" + }) + else: + self.set_feature("save.duplicate-uid.cross-calendar", { + "support": "fragile", + "behaviour": f"Unexpected: {len(events_in_cal2)} events in cal2" + }) + + except (DAVError, AuthorizationError) as e: + + ## Server rejected the duplicate with an error + self.set_feature("save.duplicate-uid.cross-calendar", { + "support": "ungraceful", + "behaviour": f"Server error: {type(e).__name__}" + }) + finally: + ## Cleanup + try: + cal2.delete() + except Exception: + pass + + finally: + ## No need to cleanup test event - it's owned by PrepareCalendar + pass + + + + +class CheckAlarmSearch(Check): + """ + Checks support for time-range searches on alarms (RFC4791 section 9.9) + """ + + depends_on = {PrepareCalendar} + features_to_be_checked = {"search.time-range.alarm"} + + def _run_check(self) -> None: + cal = self.checker.calendar + + ## The alarm test event was created in PrepareCalendar + ## Event at 08:00, alarm at 07:45 (15 minutes before) + test_uid = "csc_alarm_test_event" + + try: + ## Search for alarms after the event start (should find nothing) + events_after = cal.search( + event=True, + alarm_start=datetime(2000, 5, 1, 8, 1, tzinfo=utc), + alarm_end=datetime(2000, 5, 1, 8, 7, tzinfo=utc), + post_filter=False, + ) + + ## Search for alarms around the alarm time (should find the event) + events_alarm_time = cal.search( + event=True, + alarm_start=datetime(2000, 5, 1, 7, 40, tzinfo=utc), + alarm_end=datetime(2000, 5, 1, 7, 55, tzinfo=utc), + post_filter=False, + ) + + ## Check results + if len(events_after) == 0 and len(events_alarm_time) == 1: + self.set_feature("search.time-range.alarm", True) + else: + self.set_feature("search.time-range.alarm", False) + + except (ReportError, DAVError) as e: + ## Some servers don't support alarm searches at all + self.set_feature("search.time-range.alarm", { + "support": "unsupported", + "behaviour": f"alarm search failed: {e}" + }) + + +class CheckSyncToken(Check): + """ + Checks support for RFC6578 sync-collection reports (sync tokens) + + Tests for four known issues: + 1. No sync token support at all + 2. Time-based sync tokens (second-precision, requires sleep between ops) + 3. Fragile sync tokens (returns extra content, race conditions) + 4. Sync breaks on delete (server fails after object deletion) + """ + + depends_on = {PrepareCalendar} + features_to_be_checked = { + "sync-token", + "sync-token.delete", + } + + def _run_check(self) -> None: + cal = self.checker.calendar + + ## Test 1: Check if sync tokens are supported at all + ## Use disable_fallback=True to detect true server support + try: + my_objects = cal.objects(disable_fallback=True) + sync_token = my_objects.sync_token + + if not sync_token or sync_token == "": + self.set_feature("sync-token", False) + return + + ## Initially assume full support + sync_support = "full" + sync_behaviour = None + except (ReportError, DAVError, AttributeError) as e: + self.set_feature("sync-token", { + "support": "ungraceful", + "behaviour": f"Server error on sync-collection REPORT: {type(e).__name__}" + }) + return + + ## Clean up any leftover test event from previous failed run + test_uid = "csc_sync_test_event_1" + try: + events = _filter_2000(cal.search( + start=datetime(2000, 4, 1, tzinfo=utc), + end=datetime(2000, 4, 2, tzinfo=utc), + event=True, + post_filter=False, + )) + for evt in events: + if evt.component.get("uid") == test_uid: + evt.delete() + break + except: + pass + + ## Test 2 & 3: Check for time-based and fragile sync tokens + ## Create a new event + test_event = None + try: + test_event = cal.save_object( + Event, + summary="Sync token test event", + uid=test_uid, + dtstart=datetime(2000, 4, 1, 12, 0, 0, tzinfo=utc), + dtend=datetime(2000, 4, 1, 13, 0, 0, tzinfo=utc), + ) + + ## Get objects with new sync token + my_objects = cal.objects(disable_fallback=True) + sync_token1 = my_objects.sync_token + + ## Immediately check for changes (should be none) + my_changed_objects = cal.objects_by_sync_token(sync_token=sync_token1, disable_fallback=True) + immediate_count = len(list(my_changed_objects)) + + if immediate_count > 0: + ## Fragile sync tokens return extra content + sync_support = "fragile" + + ## Test for time-based sync tokens + ## Modify the event within the same second + test_event.icalendar_instance.subcomponents[0]["SUMMARY"] = "Modified immediately" + test_event.save() + + ## Check for changes immediately (time-based tokens need sleep(1)) + my_changed_objects = cal.objects_by_sync_token(sync_token=sync_token1, disable_fallback=True) + changed_count_no_sleep = len(list(my_changed_objects)) + + if changed_count_no_sleep == 0: + ## Might be time-based, wait a second and try again + time.sleep(1) + test_event.icalendar_instance.subcomponents[0]["SUMMARY"] = "Modified after sleep" + test_event.save() + time.sleep(1) + + my_changed_objects = cal.objects_by_sync_token(sync_token=sync_token1, disable_fallback=True) + changed_count_with_sleep = len(list(my_changed_objects)) + + if changed_count_with_sleep >= 1: + sync_behaviour = "time-based" + else: + ## Sync tokens might be completely broken + sync_support = "broken" + + ## Set the sync-token feature with support and behaviour + if sync_behaviour: + self.set_feature("sync-token", {"support": sync_support, "behaviour": sync_behaviour}) + else: + self.set_feature("sync-token", {"support": sync_support}) + + ## Test 4: Check if sync breaks on delete + sync_token2 = my_changed_objects.sync_token + + ## Sleep if needed + if sync_behaviour == "time-based": + time.sleep(1) + + ## Delete the test event + test_event.delete() + test_event = None ## Mark as deleted + + if sync_behaviour == "time-based": + time.sleep(1) + + try: + my_changed_objects = cal.objects_by_sync_token(sync_token=sync_token2, disable_fallback=True) + deleted_count = len(list(my_changed_objects)) + + ## If we get here without exception, deletion is supported + self.set_feature("sync-token.delete", True) + except (ReportError, DAVError) as e: + ## Some servers (like sabre-based) return "418 I'm a teapot" or other errors + self.set_feature("sync-token.delete", { + "support": "unsupported", + "behaviour": f"sync fails after deletion: {e}" + }) + finally: + ## Ensure cleanup even if an exception occurred + if test_event is not None: + try: + test_event.delete() + except: + pass + + +class CheckFreeBusyQuery(Check): + """ + Checks support for RFC4791 free/busy-query REPORT + + Tests if the server supports free/busy queries as specified in RFC4791 section 7.10. + The free/busy query allows clients to retrieve free/busy information for a time range. + """ + + depends_on = {PrepareCalendar} + features_to_be_checked = { + "freebusy-query.rfc4791", + } + + def _run_check(self) -> None: + cal = self.checker.calendar + + try: + ## Try to perform a simple freebusy query + ## Use a time range in year 2000 to avoid conflicts with real calendar data + start = datetime(2000, 1, 1, 0, 0, 0, tzinfo=utc) + end = datetime(2000, 1, 31, 23, 59, 59, tzinfo=utc) + + freebusy = cal.freebusy_request(start, end) + + ## If we got here without exception, the feature is supported + ## Verify we got a valid freebusy object + if freebusy and hasattr(freebusy, 'vobject_instance'): + self.set_feature("freebusy-query.rfc4791", True) + else: + self.set_feature("freebusy-query.rfc4791", { + "support": "unsupported", + "behaviour": "freebusy query returned invalid or empty response" + }) + except (ReportError, DAVError, NotFoundError) as e: + ## Server doesn't support freebusy queries + ## Common responses: 500 Internal Server Error, 501 Not Implemented + self.set_feature("freebusy-query.rfc4791", { + "support": "ungraceful", + "behaviour": f"freebusy query failed: {e}" + }) + except Exception as e: + ## Unexpected error + self.set_feature("freebusy-query.rfc4791", { + "support": "broken", + "behaviour": f"unexpected error during freebusy query: {e}" + }) + + +class CheckTimezone(Check): + """ + Checks support for non-UTC timezone information in events. + + Tests if the server accepts events with timezone information using zoneinfo. + Some servers reject events with timezone data (returning 403 Forbidden). + Related to GitHub issue https://github.com/python-caldav/caldav/issues/372 + """ + + depends_on = {PrepareCalendar} + features_to_be_checked = { + "save-load.event.timezone", + } + + def _run_check(self) -> None: + cal = self.checker.calendar + + try: + ## Create an event with a non-UTC timezone (America/Los_Angeles) + tz = ZoneInfo("America/Los_Angeles") + event = cal.save_event( + summary="Timezone test event", + dtstart=datetime(2000, 6, 15, 14, 0, 0, tzinfo=tz), + dtend=datetime(2000, 6, 15, 15, 0, 0, tzinfo=tz), + uid="csc_timezone_test_event", + ) + + ## Try to load the event back + event.load() + + ## Verify the event was saved correctly + if event.vobject_instance: + self.set_feature("save-load.event.timezone") + ## Clean up + try: + event.delete() + except: + pass + else: + self.set_feature("save-load.event.timezone", { + "support": "broken", + "behaviour": "Event with timezone was saved but could not be loaded" + }) + except AuthorizationError as e: + ## Server rejected the event with a 403 Forbidden + ## This is the specific issue reported in GitHub #372 + self.set_feature("save-load.event.timezone", { + "support": "unsupported", + "behaviour": f"Server rejected event with timezone (403 Forbidden): {e}" + }) + except DAVError as e: + ## Other DAV error (e.g., 400 Bad Request, 500 Internal Server Error) + self.set_feature("save-load.event.timezone", { + "support": "ungraceful", + "behaviour": f"Server error when saving event with timezone: {e}" + }) + except Exception as e: + ## Unexpected error + self.set_feature("save-load.event.timezone", { + "support": "broken", + "behaviour": f"Unexpected error during timezone test: {e}" + }) diff --git a/src/caldav_server_tester/checks_base.py b/src/caldav_server_tester/checks_base.py index 59799d9..4bc7553 100644 --- a/src/caldav_server_tester/checks_base.py +++ b/src/caldav_server_tester/checks_base.py @@ -28,28 +28,16 @@ def __init__(self, checker): def set_feature(self, feature, value=True): fs = self.checker._features_checked - if isinstance(value, dict): - fc = {feature: value} - elif isinstance(value, str): - fc = {feature: {"support": value}} - elif value is True: - fc = {feature: {"support": "full"}} - elif value is False: - fc = {feature: {"support": "unsupported"}} - elif value is None: - fc = {feature: {"support": "unknown"}} - else: - assert False - fs.copyFeatureSet(fc, collapse=False) - feat_def = self.checker._features_checked.find_feature(feature) - feat_type = feat_def.get('type', 'server-feature') - sup = fc[feature].get('support', feat_def.get('default', 'full')) + fs.set_feature(feature, value) - ## The last bit is about verifying that the expectations are met. + ## verifying that the expectations are met. ## We skip this if debug_mode is None if self.checker.debug_mode is None: return + + feat_def = self.checker._features_checked.find_feature(feature) + feat_type = feat_def.get('type', 'server-feature') if feat_type not in ('server-peculiarity', 'server-feature'): ## client-behaviour, tests-behaviour or client-feature @@ -58,13 +46,17 @@ def set_feature(self, feature, value=True): assert(feat_type in ('server-observation',)) return + value_str = fs.is_supported(feature, str) + ## Fragile support is ... fragile and should be ignored - if sup == 'fragile' or self.expected_features.is_supported(feature, str) == 'fragile': + ## same with unknown + if value_str in ('fragile', 'unknown') or self.expected_features.is_supported(feature, str) in ('fragile', 'unknown'): return expected_ = self.expected_features.is_supported(feature, dict) expected = copy.deepcopy(expected_) - observed = copy.deepcopy(fc[feature]) + observed_ = fs.is_supported(feature, dict) + observed = copy.deepcopy(observed_) ## Strip all free-text information from both observed and expected for stripdict in observed, expected: @@ -78,9 +70,9 @@ def set_feature(self, feature, value=True): if observed != expected: if self.checker.debug_mode == 'logging': - logging.error(f"Server checker found something unexpected for {feature}. Expected: {expected_}, observed: {fc[feature]}") + logging.error(f"Server checker found something unexpected for {feature}. Expected: {expected_}, observed: {observed_}") elif self.checker.debug_mode == 'pdb': - import pdb; pdb.set_trace() + breakpoint() else: assert(False) diff --git a/tests/README b/tests/README new file mode 100644 index 0000000..91b867f --- /dev/null +++ b/tests/README @@ -0,0 +1,3 @@ +The files with names *_ai_* is AI-generated. I do believe that AI-generated test code is better than no test code. + +If bugfixes, refactorings and feature fixing causes tsts to break and you think it's probable that the code changes are good but the test code is not good, then it's fair to just remove the broken tests. diff --git a/tests/test_sync_token_check.py b/tests/test_sync_token_check.py new file mode 100644 index 0000000..304a222 --- /dev/null +++ b/tests/test_sync_token_check.py @@ -0,0 +1,172 @@ +"""Unit tests for CheckSyncToken to catch API usage errors""" + +from unittest.mock import Mock, MagicMock, PropertyMock +from datetime import datetime, timezone +import pytest + +from caldav.compatibility_hints import FeatureSet +from caldav_server_tester.checks import CheckSyncToken + + +class TestCheckSyncTokenAPI: + """Test that CheckSyncToken uses the correct caldav API""" + + def create_mock_calendar(self) -> Mock: + """Helper to create a mock calendar object""" + cal = Mock() + + # Mock objects() to return a sync token + mock_objects = Mock() + mock_objects.sync_token = "test-token-1" + cal.objects.return_value = mock_objects + + # Mock save_object to return an event + mock_event = Mock() + mock_event.icalendar_instance = Mock() + mock_event.icalendar_instance.subcomponents = [{"SUMMARY": "Test"}] + cal.save_object.return_value = mock_event + + # Mock objects_by_sync_token + mock_changed = Mock() + mock_changed.sync_token = "test-token-2" + mock_changed.__iter__ = Mock(return_value=iter([])) + mock_changed.__len__ = Mock(return_value=0) + cal.objects_by_sync_token.return_value = mock_changed + + return cal + + def create_mock_checker(self) -> Mock: + """Helper to create a mock checker object""" + checker = Mock() + checker._features_checked = FeatureSet() + checker.features_checked = checker._features_checked + checker.debug_mode = None + checker._client_obj = Mock() + checker._client_obj.features = FeatureSet() + checker.expected_features = FeatureSet() + checker.calendar = self.create_mock_calendar() + return checker + + def test_uses_save_object_not_save_event(self) -> None: + """CheckSyncToken should use cal.save_object() not cal.save_event()""" + checker = self.create_mock_checker() + check = CheckSyncToken(checker) + + # Run the check + check._run_check() + + # Verify save_object was called (not save_event) + assert checker.calendar.save_object.called + + # Verify it was called with Event as first parameter + call_args = checker.calendar.save_object.call_args + from caldav.calendarobjectresource import Event + assert call_args[0][0] == Event + + def test_sync_token_unsupported_exits_early(self) -> None: + """If sync tokens aren't supported, check should exit early""" + checker = self.create_mock_checker() + + # Mock no sync token support + mock_objects = Mock() + mock_objects.sync_token = "" + checker.calendar.objects.return_value = mock_objects + + check = CheckSyncToken(checker) + check._run_check() + + # Should set sync-token to unsupported + result = checker._features_checked.is_supported("sync-token", return_type=bool) + assert result is False + + # Should not try to create test events + assert not checker.calendar.save_object.called + + def test_handles_sync_token_exception(self) -> None: + """If objects() raises exception, should mark as unsupported""" + checker = self.create_mock_checker() + + # Mock exception on objects() + from caldav.lib.error import ReportError + checker.calendar.objects.side_effect = ReportError() + + check = CheckSyncToken(checker) + check._run_check() + + # Should set sync-token to unsupported + result = checker._features_checked.is_supported("sync-token", return_type=bool) + assert result is False + + def test_detects_time_based_tokens(self) -> None: + """Should detect time-based tokens when changes aren't seen immediately""" + checker = self.create_mock_checker() + + # First sync: no changes immediately + empty_result = Mock() + empty_result.__iter__ = Mock(return_value=iter([])) + empty_result.__len__ = Mock(return_value=0) + empty_result.sync_token = "test-token-2" + + # After sleep: changes appear + changed_result = Mock() + changed_result.__iter__ = Mock(return_value=iter([Mock()])) + changed_result.__len__ = Mock(return_value=1) + changed_result.sync_token = "test-token-3" + + # Mock delete test + delete_result = Mock() + delete_result.__iter__ = Mock(return_value=iter([])) + delete_result.__len__ = Mock(return_value=0) + delete_result.sync_token = "test-token-4" + + checker.calendar.objects_by_sync_token.side_effect = [ + empty_result, # Immediate check after creating + empty_result, # First modification (no sleep) + changed_result, # After sleep + empty_result, # After second modification + delete_result, # After deletion + ] + + check = CheckSyncToken(checker) + check._run_check() + + # Should detect time-based behaviour + result = checker._features_checked.is_supported("sync-token", return_type=dict) + assert result is not None + assert result.get("behaviour") == "time-based" + + def test_detects_fragile_tokens(self) -> None: + """Should detect fragile tokens when extra content appears""" + checker = self.create_mock_checker() + + # Immediately after getting token, return content (shouldn't happen) + fragile_result = Mock() + fragile_result.__iter__ = Mock(return_value=iter([Mock()])) + fragile_result.__len__ = Mock(return_value=1) + fragile_result.sync_token = "test-token-2" + + # After modification, return content (correct behaviour when modified) + modified_result = Mock() + modified_result.__iter__ = Mock(return_value=iter([Mock()])) + modified_result.__len__ = Mock(return_value=1) + modified_result.sync_token = "test-token-3" + + # After deletion test + delete_result = Mock() + delete_result.__iter__ = Mock(return_value=iter([])) + delete_result.__len__ = Mock(return_value=0) + delete_result.sync_token = "test-token-4" + + checker.calendar.objects_by_sync_token.side_effect = [ + fragile_result, # Immediate check shows content (fragile!) + modified_result, # Modification check (shows the change) + delete_result, # After deletion + ] + + check = CheckSyncToken(checker) + check._run_check() + + # Should detect fragile support + result = checker._features_checked.is_supported("sync-token", return_type=dict) + assert result is not None + assert result.get("support") == "fragile"