From d9bd9e9420b337a75aa4e59c4d642776f56fed6a Mon Sep 17 00:00:00 2001 From: Divanshu Chauhan Date: Fri, 16 Jan 2026 17:11:21 -0700 Subject: [PATCH 01/30] docs(spec): add spec for python-sdk Spec artifacts: - research.md: feasibility analysis and codebase exploration - requirements.md: user stories and acceptance criteria - design.md: architecture and technical decisions - tasks.md: POC-first implementation plan (22 tasks) Ready for implementation. --- specs/python-sdk/.ralph-state.json | 12 ++ specs/python-sdk/design.md | 311 +++++++++++++++++++++++++++++ specs/python-sdk/requirements.md | 145 ++++++++++++++ specs/python-sdk/research.md | 95 +++++++++ specs/python-sdk/tasks.md | 214 ++++++++++++++++++++ 5 files changed, 777 insertions(+) create mode 100644 specs/python-sdk/.ralph-state.json create mode 100644 specs/python-sdk/design.md create mode 100644 specs/python-sdk/requirements.md create mode 100644 specs/python-sdk/research.md create mode 100644 specs/python-sdk/tasks.md diff --git a/specs/python-sdk/.ralph-state.json b/specs/python-sdk/.ralph-state.json new file mode 100644 index 0000000..0e0303b --- /dev/null +++ b/specs/python-sdk/.ralph-state.json @@ -0,0 +1,12 @@ +{ + "source": "plan", + "name": "python-sdk", + "basePath": "./specs/python-sdk", + "phase": "tasks", + "taskIndex": 0, + "totalTasks": 0, + "taskIteration": 1, + "maxTaskIterations": 5, + "globalIteration": 1, + "maxGlobalIterations": 100 +} diff --git a/specs/python-sdk/design.md b/specs/python-sdk/design.md new file mode 100644 index 0000000..ba7c8d4 --- /dev/null +++ b/specs/python-sdk/design.md @@ -0,0 +1,311 @@ +--- +spec: python-sdk +phase: design +created: 2026-01-16T00:00:00Z +generated: auto +--- + +# Design: Python SDK + +## Overview + +Port of TypeScript SDK to Python. Mirrors module structure, class design, and API surface. Uses httpx for async HTTP, threading for timer-based flush, and inspect for source location. + +## Architecture + +```mermaid +graph TB + subgraph "Public API" + L[Logwell Client] + CL[Child Logger] + end + + subgraph "Internal Components" + Q[BatchQueue] + T[HttpTransport] + SL[SourceLocation] + C[Config] + E[Errors] + end + + L -->|creates| CL + L -->|uses| Q + CL -->|shares| Q + Q -->|sends via| T + L -->|captures| SL + L -->|validates| C + T -->|throws| E + Q -->|throws| E +``` + +## Components + +### Logwell (client.py) + +**Purpose**: Main entry point. Exposes all logging methods. + +**Responsibilities**: +- Validate and store config +- Create transport and queue +- Route log calls to queue +- Capture source location when enabled +- Create child loggers + +**API**: +```python +class Logwell: + def __init__(self, config: LogwellConfig) -> None + + @property + def queue_size(self) -> int + + def debug(self, message: str, metadata: dict | None = None) -> None + def info(self, message: str, metadata: dict | None = None) -> None + def warn(self, message: str, metadata: dict | None = None) -> None + def error(self, message: str, metadata: dict | None = None) -> None + def fatal(self, message: str, metadata: dict | None = None) -> None + def log(self, entry: LogEntry) -> None + + async def flush(self) -> IngestResponse | None + async def shutdown(self) -> None + + def child(self, *, service: str | None = None, + metadata: dict | None = None) -> Logwell +``` + +### BatchQueue (queue.py) + +**Purpose**: Buffer logs and manage auto-flush. + +**Responsibilities**: +- Store logs in thread-safe list +- Trigger flush on batch_size threshold +- Manage flush timer via threading.Timer +- Handle overflow by dropping oldest +- Re-queue failed batches +- Prevent concurrent flushes + +**API**: +```python +class BatchQueue: + def __init__(self, send_batch: SendBatchFn, config: QueueConfig) -> None + + @property + def size(self) -> int + + def add(self, entry: LogEntry) -> None + async def flush(self) -> IngestResponse | None + async def shutdown(self) -> None +``` + +### HttpTransport (transport.py) + +**Purpose**: Send HTTP requests with retry logic. + +**Responsibilities**: +- POST to `/v1/ingest` endpoint +- Add Authorization header +- Retry with exponential backoff + jitter +- Classify errors by HTTP status +- Set retryable flag appropriately + +**API**: +```python +class HttpTransport: + def __init__(self, config: TransportConfig) -> None + async def send(self, logs: list[LogEntry]) -> IngestResponse +``` + +### SourceLocation (source_location.py) + +**Purpose**: Capture caller file/line info. + +**Responsibilities**: +- Use `inspect.stack()` to get call frames +- Skip internal SDK frames +- Return file path and line number + +**API**: +```python +@dataclass +class SourceLocation: + source_file: str + line_number: int + +def capture_source_location(skip_frames: int = 0) -> SourceLocation | None +``` + +### Config (config.py) + +**Purpose**: Validate and merge config with defaults. + +**Responsibilities**: +- Validate API key format with regex +- Validate endpoint as URL +- Validate numeric bounds +- Merge with DEFAULT_CONFIG + +**API**: +```python +DEFAULT_CONFIG: dict[str, Any] +API_KEY_REGEX: re.Pattern + +def validate_api_key_format(api_key: str) -> bool +def validate_config(config: LogwellConfig) -> LogwellConfig +``` + +### Errors (errors.py) + +**Purpose**: Custom exception with error codes. + +**API**: +```python +class LogwellErrorCode(str, Enum): + NETWORK_ERROR = "NETWORK_ERROR" + UNAUTHORIZED = "UNAUTHORIZED" + VALIDATION_ERROR = "VALIDATION_ERROR" + RATE_LIMITED = "RATE_LIMITED" + SERVER_ERROR = "SERVER_ERROR" + QUEUE_OVERFLOW = "QUEUE_OVERFLOW" + INVALID_CONFIG = "INVALID_CONFIG" + +class LogwellError(Exception): + def __init__(self, message: str, code: LogwellErrorCode, + status_code: int | None = None, retryable: bool = False) +``` + +### Types (types.py) + +**Purpose**: Type definitions. + +**API**: +```python +LogLevel = Literal["debug", "info", "warn", "error", "fatal"] + +class LogEntry(TypedDict, total=False): + level: Required[LogLevel] + message: Required[str] + timestamp: str + service: str + metadata: dict[str, Any] + source_file: str + line_number: int + +class LogwellConfig(TypedDict, total=False): + api_key: Required[str] + endpoint: Required[str] + service: str + batch_size: int + flush_interval: float # seconds + max_queue_size: int + max_retries: int + capture_source_location: bool + on_error: Callable[[Exception], None] + on_flush: Callable[[int], None] + +class IngestResponse(TypedDict): + accepted: int + rejected: NotRequired[int] + errors: NotRequired[list[str]] +``` + +## Data Flow + +1. User calls `logger.info("message", {"key": "value"})` +2. `Logwell._add_log()` captures source location if enabled +3. Log entry enriched with timestamp, service, merged metadata +4. `BatchQueue.add()` appends to queue +5. If `len(queue) >= batch_size`, trigger `flush()` +6. If first item, start flush timer +7. On flush: `HttpTransport.send()` POSTs to endpoint +8. On success: clear batch, call `on_flush` +9. On failure: re-queue, call `on_error`, restart timer + +## Technical Decisions + +| Decision | Options | Choice | Rationale | +|----------|---------|--------|-----------| +| HTTP Client | httpx, aiohttp, requests | httpx | Modern, async-native, sync support | +| Timer | threading.Timer, asyncio.create_task | threading.Timer | Works without running event loop | +| Types | TypedDict, dataclass, Pydantic | TypedDict | Zero runtime overhead | +| Packaging | setuptools, poetry, flit | pyproject.toml (hatch) | PEP 517/518 compliant | +| Linting | flake8+black, ruff | ruff | Fast, all-in-one | +| Testing | pytest, unittest | pytest | Industry standard | + +## File Structure + +| File | Action | Purpose | +|------|--------|---------| +| `sdks/python/pyproject.toml` | Create | Package metadata, deps | +| `sdks/python/README.md` | Create | Usage docs | +| `sdks/python/LICENSE` | Create | MIT license | +| `sdks/python/src/logwell/__init__.py` | Create | Public exports | +| `sdks/python/src/logwell/client.py` | Create | Logwell class | +| `sdks/python/src/logwell/types.py` | Create | TypedDict definitions | +| `sdks/python/src/logwell/config.py` | Create | Validation, defaults | +| `sdks/python/src/logwell/errors.py` | Create | LogwellError class | +| `sdks/python/src/logwell/queue.py` | Create | BatchQueue class | +| `sdks/python/src/logwell/transport.py` | Create | HttpTransport class | +| `sdks/python/src/logwell/source_location.py` | Create | Source capture | +| `sdks/python/tests/__init__.py` | Create | Test package | +| `sdks/python/tests/conftest.py` | Create | Pytest fixtures | +| `sdks/python/tests/unit/test_client.py` | Create | Client unit tests | +| `sdks/python/tests/unit/test_config.py` | Create | Config unit tests | +| `sdks/python/tests/unit/test_errors.py` | Create | Error unit tests | +| `sdks/python/tests/unit/test_queue.py` | Create | Queue unit tests | +| `sdks/python/tests/unit/test_source_location.py` | Create | Source loc tests | +| `sdks/python/tests/integration/test_e2e.py` | Create | E2E tests | + +## Error Handling + +| Error | Handling | User Impact | +|-------|----------|-------------| +| Invalid API key format | Raise `LogwellError(INVALID_CONFIG)` at init | Clear error message | +| Invalid endpoint URL | Raise `LogwellError(INVALID_CONFIG)` at init | Clear error message | +| Network failure | Retry with backoff, then raise/callback | Logs re-queued | +| 401 Unauthorized | No retry, raise immediately | `on_error` callback | +| 429 Rate Limited | Retry with backoff | Transparent to user | +| 5xx Server Error | Retry with backoff | Transparent to user | +| Queue overflow | Drop oldest, call `on_error` | Partial data loss | + +## Thread Safety + +```python +class BatchQueue: + def __init__(self, ...): + self._queue: list[LogEntry] = [] + self._lock = threading.Lock() + self._timer: threading.Timer | None = None + self._flushing = False + self._stopped = False + + def add(self, entry: LogEntry) -> None: + with self._lock: + if self._stopped: + return + # Handle overflow, append, maybe trigger flush +``` + +## Async/Sync Pattern + +```python +# Internal async implementation +async def _flush_async(self) -> IngestResponse | None: + ... + +# Public async method +async def flush(self) -> IngestResponse | None: + return await self._flush_async() + +# For sync contexts, use asyncio.run() or similar +def flush_sync(self) -> IngestResponse | None: + return asyncio.get_event_loop().run_until_complete(self._flush_async()) +``` + +## Existing Patterns to Follow + +- TypeScript SDK module structure: `sdks/typescript/src/` +- Test organization: `tests/unit/`, `tests/integration/` +- Config defaults pattern: `sdks/typescript/src/config.ts:7-13` +- Queue re-queue on failure: `sdks/typescript/src/queue.ts:112-113` +- Exponential backoff: `sdks/typescript/src/transport.ts:17-21` diff --git a/specs/python-sdk/requirements.md b/specs/python-sdk/requirements.md new file mode 100644 index 0000000..f7057c7 --- /dev/null +++ b/specs/python-sdk/requirements.md @@ -0,0 +1,145 @@ +--- +spec: python-sdk +phase: requirements +created: 2026-01-16T00:00:00Z +generated: auto +--- + +# Requirements: Python SDK + +## Summary + +Port TypeScript SDK to Python with identical API surface. Must support async operations, automatic batching, retry with backoff, and optional source location capture. + +## User Stories + +### US-1: Basic Logging + +As a Python developer, I want to send structured logs to Logwell so that I can monitor my application. + +**Acceptance Criteria**: +- AC-1.1: Can instantiate `Logwell` client with API key and endpoint +- AC-1.2: Can log at all levels: `debug()`, `info()`, `warn()`, `error()`, `fatal()` +- AC-1.3: Can include arbitrary metadata dict with each log +- AC-1.4: Logs include ISO8601 timestamp automatically +- AC-1.5: Can specify service name in config or per-log + +### US-2: Batch Management + +As a Python developer, I want logs batched automatically so that I minimize HTTP overhead. + +**Acceptance Criteria**: +- AC-2.1: Logs queue until batch_size reached, then auto-flush +- AC-2.2: Logs auto-flush after flush_interval seconds if batch not full +- AC-2.3: Can manually call `flush()` to send immediately +- AC-2.4: Queue drops oldest logs when max_queue_size exceeded +- AC-2.5: `on_error` callback fires on overflow + +### US-3: Graceful Shutdown + +As a Python developer, I want to ensure all logs are sent before my application exits so that I don't lose telemetry. + +**Acceptance Criteria**: +- AC-3.1: `shutdown()` flushes all remaining logs +- AC-3.2: `shutdown()` stops the flush timer +- AC-3.3: `shutdown()` is idempotent (safe to call multiple times) +- AC-3.4: No logs accepted after shutdown + +### US-4: Child Loggers + +As a Python developer, I want to create child loggers with inherited context so that I can add request-scoped metadata. + +**Acceptance Criteria**: +- AC-4.1: `child()` returns new logger sharing parent's queue +- AC-4.2: Child can override service name +- AC-4.3: Child metadata merged with parent metadata +- AC-4.4: Child inherits parent's config (batch size, etc) + +### US-5: Error Handling + +As a Python developer, I want clear error handling so that I can debug issues with log delivery. + +**Acceptance Criteria**: +- AC-5.1: `LogwellError` raised with code, message, retryable flag +- AC-5.2: Network errors are retried with exponential backoff +- AC-5.3: 401 errors marked non-retryable +- AC-5.4: 429 errors marked retryable +- AC-5.5: `on_error` callback receives exceptions + +### US-6: Source Location Capture + +As a Python developer, I want optional file/line info so that I can trace logs back to code. + +**Acceptance Criteria**: +- AC-6.1: Disabled by default (no performance overhead) +- AC-6.2: When enabled, logs include `source_file` and `line_number` +- AC-6.3: Captures caller location, not SDK internals +- AC-6.4: Works in all log methods including `log()` + +### US-7: Configuration Validation + +As a Python developer, I want config validation at startup so that I catch misconfigurations early. + +**Acceptance Criteria**: +- AC-7.1: API key format validated: `lw_[32 chars]` +- AC-7.2: Endpoint validated as valid URL +- AC-7.3: Numeric options validated positive/non-negative +- AC-7.4: Clear error messages on invalid config + +## Functional Requirements + +| ID | Requirement | Priority | Source | +|----|-------------|----------|--------| +| FR-1 | Client exposes debug/info/warn/error/fatal methods | Must | US-1 | +| FR-2 | Client exposes generic log() method | Must | US-1 | +| FR-3 | Logs batched by count threshold | Must | US-2 | +| FR-4 | Logs batched by time interval | Must | US-2 | +| FR-5 | Manual flush() method | Must | US-2 | +| FR-6 | Queue overflow drops oldest | Must | US-2 | +| FR-7 | shutdown() flushes and stops | Must | US-3 | +| FR-8 | child() creates scoped logger | Must | US-4 | +| FR-9 | LogwellError with error codes | Must | US-5 | +| FR-10 | Retry with exponential backoff | Must | US-5 | +| FR-11 | Source location capture | Should | US-6 | +| FR-12 | Config validation at init | Must | US-7 | +| FR-13 | queue_size property | Should | US-2 | +| FR-14 | on_flush callback | Should | US-2 | + +## Non-Functional Requirements + +| ID | Requirement | Category | +|----|-------------|----------| +| NFR-1 | Python 3.9+ compatibility | Compatibility | +| NFR-2 | Thread-safe queue operations | Concurrency | +| NFR-3 | Async-first with sync wrappers | API Design | +| NFR-4 | < 100KB installed size (excluding httpx) | Size | +| NFR-5 | 90%+ test coverage | Quality | +| NFR-6 | Type hints throughout | Quality | +| NFR-7 | Documented public API | Documentation | + +## Out of Scope + +- Sync-only fallback without httpx +- Custom serializers for metadata +- Automatic exception logging +- Integration with Python logging module +- OTLP protocol support (future) +- Compression of payloads + +## Dependencies + +| Dependency | Version | Purpose | +|------------|---------|---------| +| httpx | >= 0.25.0 | Async HTTP client | +| typing_extensions | >= 4.0.0 | Backport typing features (3.9) | + +### Dev Dependencies + +| Dependency | Version | Purpose | +|------------|---------|---------| +| pytest | >= 8.0.0 | Test framework | +| pytest-asyncio | >= 0.23.0 | Async test support | +| respx | >= 0.21.0 | HTTP mocking | +| mypy | >= 1.8.0 | Type checking | +| ruff | >= 0.4.0 | Linting/formatting | +| pytest-cov | >= 4.1.0 | Coverage reporting | diff --git a/specs/python-sdk/research.md b/specs/python-sdk/research.md new file mode 100644 index 0000000..a68d8df --- /dev/null +++ b/specs/python-sdk/research.md @@ -0,0 +1,95 @@ +--- +spec: python-sdk +phase: research +created: 2026-01-16T00:00:00Z +generated: auto +--- + +# Research: Python SDK + +## Executive Summary + +Port TypeScript SDK to Python with full API parity. Uses `httpx` for async HTTP, `threading.Timer` for flush intervals, and `inspect` module for source location capture. High feasibility - standard Python patterns map directly to TypeScript implementation. + +## Codebase Analysis + +### TypeScript SDK Structure (Reference Implementation) + +| Module | Purpose | Python Equivalent | +|--------|---------|-------------------| +| `client.ts` | Main Logwell class, log methods, child loggers | `client.py` - identical API | +| `types.ts` | TypedDict/Literal types | `types.py` - typing module | +| `config.ts` | Validation, defaults, API key regex | `config.py` - same logic | +| `errors.ts` | LogwellError class, error codes | `errors.py` - Exception subclass | +| `queue.ts` | BatchQueue with timer-based flush | `queue.py` - threading.Timer | +| `transport.ts` | HTTP transport with retry/backoff | `transport.py` - httpx async | +| `source-location.ts` | Stack frame parsing | `source_location.py` - inspect | + +### Existing Patterns + +1. **TypeScript API Surface** (`sdks/typescript/src/client.ts:56-234`) + - Constructor with config validation + - Log level methods: `debug`, `info`, `warn`, `error`, `fatal` + - Generic `log()` method + - `flush()` and `shutdown()` async methods + - `child()` for nested loggers with context inheritance + +2. **Queue Management** (`sdks/typescript/src/queue.ts:30-157`) + - Timer-based auto-flush + - Batch size threshold triggering + - Queue overflow (drop oldest) + - Re-queue on failure + - Concurrent flush prevention + +3. **Error Handling** (`sdks/typescript/src/transport.ts:115-129`) + - Error codes: NETWORK_ERROR, UNAUTHORIZED, VALIDATION_ERROR, RATE_LIMITED, SERVER_ERROR, QUEUE_OVERFLOW, INVALID_CONFIG + - Retryable flag per error type + - Exponential backoff with jitter + +### Dependencies + +| Python Package | Purpose | Alternative | +|---------------|---------|-------------| +| `httpx` | Async HTTP client | `aiohttp` (heavier) | +| `pytest` | Testing framework | - | +| `pytest-asyncio` | Async test support | - | +| `respx` | HTTP mocking for httpx | `pytest-httpx` | +| `ruff` | Linting/formatting | `black` + `flake8` | +| `mypy` | Type checking | `pyright` | + +### Constraints + +1. **Python 3.9+ minimum** - `typing.Literal`, `|` union syntax (3.10+), `typing.TypedDict` +2. **Async/sync duality** - SDK must work in both sync and async contexts +3. **No runtime dependencies** except `httpx` - keep SDK lightweight +4. **Thread safety** - Queue must handle concurrent access +5. **GIL considerations** - Timer-based flush runs in separate thread + +## Feasibility Assessment + +| Aspect | Assessment | Notes | +|--------|------------|-------| +| Technical Viability | High | Direct 1:1 mapping to TS patterns | +| Effort Estimate | M | ~15-20 tasks, 3-5 days | +| Risk Level | Low | Standard Python async patterns | + +### Technical Mapping + +| TypeScript | Python | +|------------|--------| +| `interface LogEntry` | `TypedDict` or `dataclass` | +| `type LogLevel` | `Literal['debug', 'info', ...]` | +| `setTimeout`/`clearTimeout` | `threading.Timer` | +| `fetch()` | `httpx.AsyncClient.post()` | +| `Promise` | `async/await`, `asyncio.Future` | +| `Error.stack` | `inspect.stack()` | +| `class X { private y }` | `_y` naming convention | + +## Recommendations + +1. **Use `httpx`** - Modern, async-native HTTP client, better than `requests` for async +2. **Use `threading.Lock`** - For thread-safe queue operations +3. **Support both sync/async** - Provide `flush()` and `async flush()` variants +4. **Use `inspect.stack()`** - More reliable than parsing `traceback` strings +5. **Use `pyproject.toml`** - Modern Python packaging standard +6. **Target Python 3.9+** - Broadest compatibility with modern typing diff --git a/specs/python-sdk/tasks.md b/specs/python-sdk/tasks.md new file mode 100644 index 0000000..46b7ffc --- /dev/null +++ b/specs/python-sdk/tasks.md @@ -0,0 +1,214 @@ +--- +spec: python-sdk +phase: tasks +total_tasks: 22 +created: 2026-01-16T00:00:00Z +generated: auto +--- + +# Tasks: Python SDK + +## Phase 1: Make It Work (POC) + +Focus: Validate Python SDK works end-to-end. Skip tests, accept minimal error handling. + +- [ ] 1.1 Create package structure + - **Do**: Create `sdks/python/` directory with `pyproject.toml`, `src/logwell/__init__.py`, empty module files + - **Files**: `sdks/python/pyproject.toml`, `sdks/python/src/logwell/__init__.py`, `sdks/python/src/logwell/py.typed` + - **Done when**: `cd sdks/python && pip install -e .` succeeds + - **Verify**: `python -c "import logwell; print(logwell.__version__)"` + - **Commit**: `feat(python-sdk): scaffold package structure` + - _Requirements: NFR-1_ + - _Design: File Structure_ + +- [ ] 1.2 Implement types module + - **Do**: Create `types.py` with LogLevel, LogEntry, LogwellConfig, IngestResponse TypedDicts + - **Files**: `sdks/python/src/logwell/types.py` + - **Done when**: Types importable, mypy passes on module + - **Verify**: `python -c "from logwell.types import LogLevel, LogEntry"` + - **Commit**: `feat(python-sdk): add type definitions` + - _Requirements: FR-1, NFR-6_ + - _Design: Types_ + +- [ ] 1.3 Implement errors module + - **Do**: Create `errors.py` with LogwellErrorCode enum and LogwellError exception class + - **Files**: `sdks/python/src/logwell/errors.py` + - **Done when**: Can raise and catch LogwellError with code/message/retryable + - **Verify**: `python -c "from logwell.errors import LogwellError, LogwellErrorCode; raise LogwellError('test', LogwellErrorCode.NETWORK_ERROR)"` + - **Commit**: `feat(python-sdk): add error types` + - _Requirements: FR-9_ + - _Design: Errors_ + +- [ ] 1.4 Implement config module + - **Do**: Create `config.py` with DEFAULT_CONFIG, API_KEY_REGEX, validate_api_key_format(), validate_config() + - **Files**: `sdks/python/src/logwell/config.py` + - **Done when**: Validates API key format, endpoint URL, merges defaults + - **Verify**: `python -c "from logwell.config import validate_config; print(validate_config({'api_key': 'lw_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 'endpoint': 'https://example.com'}))"` + - **Commit**: `feat(python-sdk): add config validation` + - _Requirements: FR-12, AC-7.1, AC-7.2, AC-7.3_ + - _Design: Config_ + +- [ ] 1.5 Implement transport module + - **Do**: Create `transport.py` with HttpTransport class, send() with retry and backoff + - **Files**: `sdks/python/src/logwell/transport.py` + - **Done when**: Can POST to endpoint with auth header, retries on failure + - **Verify**: Manual test against local server or mock + - **Commit**: `feat(python-sdk): add HTTP transport with retry` + - _Requirements: FR-10, AC-5.2, AC-5.3, AC-5.4_ + - _Design: HttpTransport_ + +- [ ] 1.6 Implement queue module + - **Do**: Create `queue.py` with BatchQueue class, add(), flush(), shutdown(), timer management + - **Files**: `sdks/python/src/logwell/queue.py` + - **Done when**: Queues logs, auto-flushes on batch_size, timer-based flush works + - **Verify**: Manual test with print statements + - **Commit**: `feat(python-sdk): add batch queue with auto-flush` + - _Requirements: FR-3, FR-4, FR-5, FR-6, AC-2.1, AC-2.2, AC-2.3, AC-2.4_ + - _Design: BatchQueue_ + +- [ ] 1.7 Implement source_location module + - **Do**: Create `source_location.py` with SourceLocation dataclass and capture_source_location() + - **Files**: `sdks/python/src/logwell/source_location.py` + - **Done when**: Returns file/line of caller, skips SDK frames + - **Verify**: `python -c "from logwell.source_location import capture_source_location; print(capture_source_location(0))"` + - **Commit**: `feat(python-sdk): add source location capture` + - _Requirements: FR-11, AC-6.1, AC-6.2, AC-6.3_ + - _Design: SourceLocation_ + +- [ ] 1.8 Implement client module + - **Do**: Create `client.py` with Logwell class, all log methods, flush(), shutdown(), child() + - **Files**: `sdks/python/src/logwell/client.py` + - **Done when**: Can instantiate, log, flush, create child loggers + - **Verify**: Manual test logging to local server + - **Commit**: `feat(python-sdk): add Logwell client class` + - _Requirements: FR-1, FR-2, FR-7, FR-8, AC-1.1 through AC-4.4_ + - _Design: Logwell_ + +- [ ] 1.9 Wire up __init__.py exports + - **Do**: Export Logwell, LogwellError, LogwellErrorCode, types from `__init__.py` + - **Files**: `sdks/python/src/logwell/__init__.py` + - **Done when**: `from logwell import Logwell, LogwellError` works + - **Verify**: `python -c "from logwell import Logwell, LogwellError, LogwellErrorCode"` + - **Commit**: `feat(python-sdk): expose public API` + - _Requirements: NFR-7_ + - _Design: Architecture_ + +- [ ] 1.10 POC Checkpoint + - **Do**: Test full flow: instantiate, log, flush against mock or real server + - **Done when**: Logs appear in server, no errors + - **Verify**: Run manual E2E test script + - **Commit**: `feat(python-sdk): complete POC` + +## Phase 2: Refactoring + +After POC validated, clean up code. + +- [ ] 2.1 Add thread safety to queue + - **Do**: Add threading.Lock to BatchQueue, protect queue/timer operations + - **Files**: `sdks/python/src/logwell/queue.py` + - **Done when**: No race conditions under concurrent add/flush + - **Verify**: `mypy sdks/python/src && ruff check sdks/python/src` + - **Commit**: `refactor(python-sdk): add thread safety to queue` + - _Requirements: NFR-2_ + - _Design: Thread Safety_ + +- [ ] 2.2 Improve error messages + - **Do**: Add detailed context to all LogwellError raises + - **Files**: `sdks/python/src/logwell/*.py` + - **Done when**: Each error includes actionable message + - **Verify**: `mypy sdks/python/src` + - **Commit**: `refactor(python-sdk): improve error messages` + - _Requirements: AC-7.4_ + - _Design: Error Handling_ + +- [ ] 2.3 Add type hints throughout + - **Do**: Ensure all functions have full type annotations, run mypy strict + - **Files**: `sdks/python/src/logwell/*.py` + - **Done when**: `mypy --strict sdks/python/src` passes + - **Verify**: `mypy --strict sdks/python/src` + - **Commit**: `refactor(python-sdk): add strict type hints` + - _Requirements: NFR-6_ + - _Design: Types_ + +- [ ] 2.4 Add README and LICENSE + - **Do**: Create README.md with usage examples, create LICENSE (MIT) + - **Files**: `sdks/python/README.md`, `sdks/python/LICENSE` + - **Done when**: README has install, basic usage, API reference sections + - **Verify**: Visual inspection + - **Commit**: `docs(python-sdk): add README and LICENSE` + - _Requirements: NFR-7_ + +## Phase 3: Testing + +- [ ] 3.1 Create test fixtures + - **Do**: Create conftest.py with valid/invalid configs, mock responses + - **Files**: `sdks/python/tests/__init__.py`, `sdks/python/tests/conftest.py` + - **Done when**: Fixtures importable in tests + - **Verify**: `pytest sdks/python/tests --collect-only` + - **Commit**: `test(python-sdk): add test fixtures` + - _Design: File Structure_ + +- [ ] 3.2 Unit tests for config + - **Do**: Test validate_config, validate_api_key_format, edge cases + - **Files**: `sdks/python/tests/unit/__init__.py`, `sdks/python/tests/unit/test_config.py` + - **Done when**: 100% coverage of config.py + - **Verify**: `pytest sdks/python/tests/unit/test_config.py -v` + - **Commit**: `test(python-sdk): add config unit tests` + - _Requirements: AC-7.1, AC-7.2, AC-7.3, AC-7.4_ + +- [ ] 3.3 Unit tests for errors + - **Do**: Test LogwellError construction, attributes, inheritance + - **Files**: `sdks/python/tests/unit/test_errors.py` + - **Done when**: All error codes tested + - **Verify**: `pytest sdks/python/tests/unit/test_errors.py -v` + - **Commit**: `test(python-sdk): add error unit tests` + - _Requirements: FR-9_ + +- [ ] 3.4 Unit tests for queue + - **Do**: Test add, flush, overflow, timer, shutdown, concurrent ops + - **Files**: `sdks/python/tests/unit/test_queue.py` + - **Done when**: All BatchQueue methods tested + - **Verify**: `pytest sdks/python/tests/unit/test_queue.py -v` + - **Commit**: `test(python-sdk): add queue unit tests` + - _Requirements: AC-2.1 through AC-2.5_ + +- [ ] 3.5 Unit tests for source_location + - **Do**: Test capture at different frame depths, invalid frames + - **Files**: `sdks/python/tests/unit/test_source_location.py` + - **Done when**: Source location capture verified + - **Verify**: `pytest sdks/python/tests/unit/test_source_location.py -v` + - **Commit**: `test(python-sdk): add source location tests` + - _Requirements: AC-6.1, AC-6.2, AC-6.3_ + +- [ ] 3.6 Unit tests for client + - **Do**: Test all log methods, child loggers, flush, shutdown + - **Files**: `sdks/python/tests/unit/test_client.py` + - **Done when**: Client API fully tested + - **Verify**: `pytest sdks/python/tests/unit/test_client.py -v` + - **Commit**: `test(python-sdk): add client unit tests` + - _Requirements: AC-1.1 through AC-4.4_ + +- [ ] 3.7 Integration tests + - **Do**: Test full flow with mocked HTTP (respx) + - **Files**: `sdks/python/tests/integration/__init__.py`, `sdks/python/tests/integration/test_e2e.py` + - **Done when**: E2E flow tested with mock server + - **Verify**: `pytest sdks/python/tests/integration -v` + - **Commit**: `test(python-sdk): add integration tests` + +## Phase 4: Quality Gates + +- [ ] 4.1 Local quality check + - **Do**: Run mypy, ruff, pytest with coverage + - **Verify**: `cd sdks/python && mypy --strict src && ruff check src && pytest --cov=src --cov-report=term-missing` + - **Done when**: All pass, coverage >= 90% + - **Commit**: `fix(python-sdk): address lint/type issues` (if needed) + +- [ ] 4.2 Create PR + - **Do**: Push branch, create PR with gh CLI + - **Verify**: `gh pr checks --watch` all green + - **Done when**: PR ready for review + +## Notes + +- **POC shortcuts taken**: Minimal error messages, no thread safety initially, manual testing only +- **Production TODOs**: Thread safety (Phase 2), comprehensive tests (Phase 3), type strictness (Phase 2) From a2da1f45300bd45df040f3b40041ed5f865cb39e Mon Sep 17 00:00:00 2001 From: Divanshu Chauhan Date: Fri, 16 Jan 2026 17:15:05 -0700 Subject: [PATCH 02/30] feat(python-sdk): scaffold package structure - Create pyproject.toml with hatch build, httpx dependency - Add src/logwell/__init__.py with __version__ - Add py.typed marker for type checking - Add minimal README.md for hatchling build --- sdks/python/README.md | 13 ++++++ sdks/python/pyproject.toml | 66 +++++++++++++++++++++++++++++ sdks/python/src/logwell/__init__.py | 4 ++ sdks/python/src/logwell/py.typed | 0 specs/python-sdk/.progress.md | 33 +++++++++++++++ specs/python-sdk/tasks.md | 2 +- 6 files changed, 117 insertions(+), 1 deletion(-) create mode 100644 sdks/python/README.md create mode 100644 sdks/python/pyproject.toml create mode 100644 sdks/python/src/logwell/__init__.py create mode 100644 sdks/python/src/logwell/py.typed create mode 100644 specs/python-sdk/.progress.md diff --git a/sdks/python/README.md b/sdks/python/README.md new file mode 100644 index 0000000..7f9f4e2 --- /dev/null +++ b/sdks/python/README.md @@ -0,0 +1,13 @@ +# Logwell Python SDK + +Official Python SDK for the Logwell logging platform. + +## Installation + +```bash +pip install logwell +``` + +## Usage + +Documentation coming soon. diff --git a/sdks/python/pyproject.toml b/sdks/python/pyproject.toml new file mode 100644 index 0000000..a689b29 --- /dev/null +++ b/sdks/python/pyproject.toml @@ -0,0 +1,66 @@ +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[project] +name = "logwell" +version = "0.1.0" +description = "Official Python SDK for Logwell logging platform" +readme = "README.md" +license = "MIT" +authors = [{ name = "Logwell", email = "dev@logwell.io" }] +keywords = ["logging", "logs", "observability", "logwell", "python"] +classifiers = [ + "Development Status :: 4 - Beta", + "Intended Audience :: Developers", + "License :: OSI Approved :: MIT License", + "Operating System :: OS Independent", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", + "Typing :: Typed", +] +requires-python = ">=3.9" +dependencies = [ + "httpx>=0.25.0", + "typing_extensions>=4.0.0", +] + +[project.optional-dependencies] +dev = [ + "pytest>=8.0.0", + "pytest-asyncio>=0.23.0", + "respx>=0.21.0", + "mypy>=1.8.0", + "ruff>=0.4.0", + "pytest-cov>=4.1.0", +] + +[project.urls] +Homepage = "https://github.com/Divkix/Logwell" +Documentation = "https://github.com/Divkix/Logwell/tree/main/sdks/python#readme" +Repository = "https://github.com/Divkix/Logwell" +Issues = "https://github.com/Divkix/Logwell/issues" + +[tool.hatch.build.targets.wheel] +packages = ["src/logwell"] + +[tool.mypy] +python_version = "3.9" +strict = true +warn_return_any = true +warn_unused_configs = true + +[tool.ruff] +target-version = "py39" +line-length = 100 + +[tool.ruff.lint] +select = ["E", "F", "I", "W", "UP", "B", "SIM", "TCH"] + +[tool.pytest.ini_options] +asyncio_mode = "auto" +testpaths = ["tests"] diff --git a/sdks/python/src/logwell/__init__.py b/sdks/python/src/logwell/__init__.py new file mode 100644 index 0000000..f202827 --- /dev/null +++ b/sdks/python/src/logwell/__init__.py @@ -0,0 +1,4 @@ +"""Logwell Python SDK - Official logging client for Logwell platform.""" + +__version__ = "0.1.0" +__all__ = ["__version__"] diff --git a/sdks/python/src/logwell/py.typed b/sdks/python/src/logwell/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/specs/python-sdk/.progress.md b/specs/python-sdk/.progress.md new file mode 100644 index 0000000..b1fbb6b --- /dev/null +++ b/specs/python-sdk/.progress.md @@ -0,0 +1,33 @@ +# Progress: python-sdk + +## Goal +Port the TypeScript SDK at `sdks/typescript/` to Python with full feature parity. The server is ready, and we need a Python SDK so Python developers can also use the logging platform. + +## Status +- Phase: Execution (Phase 1 - POC) +- Started: 2026-01-16 +- Artifacts generated: research.md, requirements.md, design.md, tasks.md + +## Completed Tasks +- [x] 1.1 Create package structure - c83c5b5 + +## Current Task +Awaiting next task + +## Learnings + +- TypeScript SDK has clean 1:1 mappable module structure to Python +- Queue uses setTimeout/clearTimeout - maps to threading.Timer in Python +- Transport uses native fetch - httpx is the modern Python equivalent +- Source location uses Error.stack - Python uses inspect.stack() which is more direct +- Child loggers share parent queue reference - same pattern works in Python +- API key regex: `^lw_[A-Za-z0-9_-]{32}$` - same in Python with `re` module +- Config defaults: batchSize=50, flushInterval=5000ms, maxQueueSize=1000, maxRetries=3 +- Exponential backoff formula: `min(baseDelay * 2^attempt, 10000)` + 30% jitter +- Error codes map to enum in Python (LogwellErrorCode) +- TypedDict preferred over dataclass for zero runtime overhead on types +- Python 3.14 on macOS uses externally-managed-environment, need venv for dev +- hatchling requires README.md to exist for build + +## Next +Task 1.2: Implement types module diff --git a/specs/python-sdk/tasks.md b/specs/python-sdk/tasks.md index 46b7ffc..a7162dd 100644 --- a/specs/python-sdk/tasks.md +++ b/specs/python-sdk/tasks.md @@ -12,7 +12,7 @@ generated: auto Focus: Validate Python SDK works end-to-end. Skip tests, accept minimal error handling. -- [ ] 1.1 Create package structure +- [x] 1.1 Create package structure - **Do**: Create `sdks/python/` directory with `pyproject.toml`, `src/logwell/__init__.py`, empty module files - **Files**: `sdks/python/pyproject.toml`, `sdks/python/src/logwell/__init__.py`, `sdks/python/src/logwell/py.typed` - **Done when**: `cd sdks/python && pip install -e .` succeeds From 8889e572138d40a842f871bb922bae9ad8160894 Mon Sep 17 00:00:00 2001 From: Divanshu Chauhan Date: Fri, 16 Jan 2026 17:15:31 -0700 Subject: [PATCH 03/30] chore: add Python ignores to .gitignore --- .gitignore | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/.gitignore b/.gitignore index 0d4b427..f0be618 100644 --- a/.gitignore +++ b/.gitignore @@ -29,3 +29,15 @@ playwright-report/ .playwright/ specs/.current-spec **/.progress.md + +# Python +__pycache__/ +*.py[cod] +*$py.class +.venv/ +*.egg-info/ +dist/ +build/ +.mypy_cache/ +.ruff_cache/ +.pytest_cache/ From 379951bbda4c322744e3211758107fb4d23359be Mon Sep 17 00:00:00 2001 From: Divanshu Chauhan Date: Fri, 16 Jan 2026 17:17:24 -0700 Subject: [PATCH 04/30] feat(python-sdk): add type definitions Add types.py module with TypedDict definitions: - LogLevel: Literal type for log severity levels - LogEntry: Log entry with level, message, timestamp, metadata - LogwellConfig: Client configuration with api_key, endpoint, callbacks - IngestResponse: API response with accepted/rejected counts Implements FR-1 (log methods), NFR-6 (type hints throughout). --- sdks/python/src/logwell/__init__.py | 10 +++- sdks/python/src/logwell/types.py | 81 +++++++++++++++++++++++++++++ specs/python-sdk/.progress.md | 3 +- specs/python-sdk/tasks.md | 2 +- 4 files changed, 93 insertions(+), 3 deletions(-) create mode 100644 sdks/python/src/logwell/types.py diff --git a/sdks/python/src/logwell/__init__.py b/sdks/python/src/logwell/__init__.py index f202827..eb8b4d4 100644 --- a/sdks/python/src/logwell/__init__.py +++ b/sdks/python/src/logwell/__init__.py @@ -1,4 +1,12 @@ """Logwell Python SDK - Official logging client for Logwell platform.""" +from logwell.types import IngestResponse, LogEntry, LogLevel, LogwellConfig + __version__ = "0.1.0" -__all__ = ["__version__"] +__all__ = [ + "__version__", + "IngestResponse", + "LogEntry", + "LogLevel", + "LogwellConfig", +] diff --git a/sdks/python/src/logwell/types.py b/sdks/python/src/logwell/types.py new file mode 100644 index 0000000..31dedac --- /dev/null +++ b/sdks/python/src/logwell/types.py @@ -0,0 +1,81 @@ +"""Type definitions for the Logwell Python SDK. + +This module provides type definitions for log entries, configuration, +and API responses using TypedDict for zero runtime overhead. +""" + +from __future__ import annotations + +from typing import Any, Callable, Literal + +from typing_extensions import NotRequired, Required, TypedDict + +LogLevel = Literal["debug", "info", "warn", "error", "fatal"] + + +class LogEntry(TypedDict, total=False): + """A log entry to be sent to Logwell. + + Required fields: + level: Log severity level + message: Log message content + + Optional fields: + timestamp: ISO8601 timestamp (auto-generated if not provided) + service: Service name for this log + metadata: Arbitrary key-value metadata + source_file: Source file path where log was called + line_number: Line number where log was called + """ + + level: Required[LogLevel] + message: Required[str] + timestamp: str + service: str + metadata: dict[str, Any] + source_file: str + line_number: int + + +class LogwellConfig(TypedDict, total=False): + """Configuration for the Logwell client. + + Required fields: + api_key: API key in format lw_[32 chars] + endpoint: Logwell server endpoint URL + + Optional fields: + service: Default service name for all logs + batch_size: Number of logs to batch before auto-flush (default: 50) + flush_interval: Seconds between auto-flushes (default: 5.0) + max_queue_size: Maximum queue size before dropping oldest (default: 1000) + max_retries: Maximum retry attempts for failed requests (default: 3) + capture_source_location: Whether to capture file/line info (default: False) + on_error: Callback function for errors + on_flush: Callback function after successful flush with count of logs sent + """ + + api_key: Required[str] + endpoint: Required[str] + service: str + batch_size: int + flush_interval: float + max_queue_size: int + max_retries: int + capture_source_location: bool + on_error: Callable[[Exception], None] + on_flush: Callable[[int], None] + + +class IngestResponse(TypedDict): + """Response from the Logwell ingest API. + + Fields: + accepted: Number of logs accepted + rejected: Number of logs rejected (optional) + errors: List of error messages (optional) + """ + + accepted: int + rejected: NotRequired[int] + errors: NotRequired[list[str]] diff --git a/specs/python-sdk/.progress.md b/specs/python-sdk/.progress.md index b1fbb6b..72115d3 100644 --- a/specs/python-sdk/.progress.md +++ b/specs/python-sdk/.progress.md @@ -10,6 +10,7 @@ Port the TypeScript SDK at `sdks/typescript/` to Python with full feature parity ## Completed Tasks - [x] 1.1 Create package structure - c83c5b5 +- [x] 1.2 Implement types module - 948626d ## Current Task Awaiting next task @@ -30,4 +31,4 @@ Awaiting next task - hatchling requires README.md to exist for build ## Next -Task 1.2: Implement types module +Task 1.3: Implement errors module diff --git a/specs/python-sdk/tasks.md b/specs/python-sdk/tasks.md index a7162dd..b2ed6ec 100644 --- a/specs/python-sdk/tasks.md +++ b/specs/python-sdk/tasks.md @@ -21,7 +21,7 @@ Focus: Validate Python SDK works end-to-end. Skip tests, accept minimal error ha - _Requirements: NFR-1_ - _Design: File Structure_ -- [ ] 1.2 Implement types module +- [x] 1.2 Implement types module - **Do**: Create `types.py` with LogLevel, LogEntry, LogwellConfig, IngestResponse TypedDicts - **Files**: `sdks/python/src/logwell/types.py` - **Done when**: Types importable, mypy passes on module From 9ee5a1d050557bfcdcc252c5b706b0024036da0d Mon Sep 17 00:00:00 2001 From: Divanshu Chauhan Date: Fri, 16 Jan 2026 17:19:31 -0700 Subject: [PATCH 05/30] feat(python-sdk): add error types --- sdks/python/src/logwell/errors.py | 87 +++++++++++++++++++++++++++++++ specs/python-sdk/.progress.md | 3 +- specs/python-sdk/tasks.md | 2 +- 3 files changed, 90 insertions(+), 2 deletions(-) create mode 100644 sdks/python/src/logwell/errors.py diff --git a/sdks/python/src/logwell/errors.py b/sdks/python/src/logwell/errors.py new file mode 100644 index 0000000..4018b37 --- /dev/null +++ b/sdks/python/src/logwell/errors.py @@ -0,0 +1,87 @@ +"""Error types for the Logwell Python SDK. + +This module provides custom exception classes and error codes +for handling Logwell-specific errors. +""" + +from __future__ import annotations + +from enum import Enum + + +class LogwellErrorCode(str, Enum): + """Error codes for Logwell SDK errors. + + Each code represents a specific category of error that can occur + during SDK operations. + """ + + NETWORK_ERROR = "NETWORK_ERROR" + """Network connectivity or timeout error.""" + + UNAUTHORIZED = "UNAUTHORIZED" + """Invalid or expired API key (401).""" + + VALIDATION_ERROR = "VALIDATION_ERROR" + """Invalid request data or format.""" + + RATE_LIMITED = "RATE_LIMITED" + """Too many requests (429).""" + + SERVER_ERROR = "SERVER_ERROR" + """Server-side error (5xx).""" + + QUEUE_OVERFLOW = "QUEUE_OVERFLOW" + """Queue exceeded max size, logs dropped.""" + + INVALID_CONFIG = "INVALID_CONFIG" + """Invalid configuration value.""" + + +class LogwellError(Exception): + """Custom exception for Logwell SDK errors. + + Attributes: + message: Human-readable error description + code: Error category code + status_code: HTTP status code if applicable + retryable: Whether the operation can be retried + """ + + def __init__( + self, + message: str, + code: LogwellErrorCode, + status_code: int | None = None, + retryable: bool = False, + ) -> None: + """Initialize a LogwellError. + + Args: + message: Human-readable error description + code: Error category code from LogwellErrorCode enum + status_code: HTTP status code if applicable (default: None) + retryable: Whether the operation can be retried (default: False) + """ + super().__init__(message) + self.message = message + self.code = code + self.status_code = status_code + self.retryable = retryable + + def __str__(self) -> str: + """Return string representation of the error.""" + parts = [f"[{self.code.value}] {self.message}"] + if self.status_code is not None: + parts.append(f" (HTTP {self.status_code})") + return "".join(parts) + + def __repr__(self) -> str: + """Return detailed representation of the error.""" + return ( + f"LogwellError(" + f"message={self.message!r}, " + f"code={self.code!r}, " + f"status_code={self.status_code!r}, " + f"retryable={self.retryable!r})" + ) diff --git a/specs/python-sdk/.progress.md b/specs/python-sdk/.progress.md index 72115d3..4ed172a 100644 --- a/specs/python-sdk/.progress.md +++ b/specs/python-sdk/.progress.md @@ -11,6 +11,7 @@ Port the TypeScript SDK at `sdks/typescript/` to Python with full feature parity ## Completed Tasks - [x] 1.1 Create package structure - c83c5b5 - [x] 1.2 Implement types module - 948626d +- [x] 1.3 Implement errors module - 0ca0585 ## Current Task Awaiting next task @@ -31,4 +32,4 @@ Awaiting next task - hatchling requires README.md to exist for build ## Next -Task 1.3: Implement errors module +Task 1.4: Implement config module diff --git a/specs/python-sdk/tasks.md b/specs/python-sdk/tasks.md index b2ed6ec..dc4f5f1 100644 --- a/specs/python-sdk/tasks.md +++ b/specs/python-sdk/tasks.md @@ -30,7 +30,7 @@ Focus: Validate Python SDK works end-to-end. Skip tests, accept minimal error ha - _Requirements: FR-1, NFR-6_ - _Design: Types_ -- [ ] 1.3 Implement errors module +- [x] 1.3 Implement errors module - **Do**: Create `errors.py` with LogwellErrorCode enum and LogwellError exception class - **Files**: `sdks/python/src/logwell/errors.py` - **Done when**: Can raise and catch LogwellError with code/message/retryable From 11c199dc00b23d4cec84f43fe819b6ea9c51c599 Mon Sep 17 00:00:00 2001 From: Divanshu Chauhan Date: Fri, 16 Jan 2026 17:21:20 -0700 Subject: [PATCH 06/30] feat(python-sdk): add config validation - Add DEFAULT_CONFIG with batch_size, flush_interval, max_queue_size, max_retries, capture_source_location defaults - Add API_KEY_REGEX pattern: lw_[32 alphanumeric chars] - Add validate_api_key_format() function - Add validate_config() that validates required fields, API key format, endpoint URL, numeric bounds, and merges with defaults --- sdks/python/src/logwell/config.py | 146 ++++++++++++++++++++++++++++++ specs/python-sdk/.progress.md | 3 +- specs/python-sdk/tasks.md | 2 +- 3 files changed, 149 insertions(+), 2 deletions(-) create mode 100644 sdks/python/src/logwell/config.py diff --git a/sdks/python/src/logwell/config.py b/sdks/python/src/logwell/config.py new file mode 100644 index 0000000..5d4cd9a --- /dev/null +++ b/sdks/python/src/logwell/config.py @@ -0,0 +1,146 @@ +"""Configuration validation for the Logwell Python SDK. + +This module provides configuration defaults, validation functions, +and config merging utilities. +""" + +from __future__ import annotations + +import re +from typing import Any +from urllib.parse import urlparse + +from logwell.errors import LogwellError, LogwellErrorCode +from logwell.types import LogwellConfig + +# Default configuration values +DEFAULT_CONFIG: dict[str, Any] = { + "batch_size": 50, + "flush_interval": 5.0, # seconds + "max_queue_size": 1000, + "max_retries": 3, + "capture_source_location": False, +} + +# API key format regex: lw_[32 alphanumeric chars including - and _] +API_KEY_REGEX: re.Pattern[str] = re.compile(r"^lw_[A-Za-z0-9_-]{32}$") + + +def validate_api_key_format(api_key: str) -> bool: + """Validate API key format. + + Args: + api_key: API key to validate + + Returns: + True if valid format, False otherwise + """ + if not api_key or not isinstance(api_key, str): + return False + return bool(API_KEY_REGEX.match(api_key)) + + +def _is_valid_url(url: str) -> bool: + """Validate a URL string. + + Args: + url: URL string to validate + + Returns: + True if valid URL with scheme and netloc, False otherwise + """ + try: + result = urlparse(url) + return bool(result.scheme and result.netloc) + except (ValueError, AttributeError): + return False + + +def validate_config(config: LogwellConfig) -> LogwellConfig: + """Validate configuration and return merged config with defaults. + + Args: + config: Configuration dict to validate + + Returns: + Complete configuration with defaults applied + + Raises: + LogwellError: If configuration is invalid (INVALID_CONFIG code) + """ + # Validate required fields + if "api_key" not in config or not config["api_key"]: + raise LogwellError( + "api_key is required", + LogwellErrorCode.INVALID_CONFIG, + ) + + if "endpoint" not in config or not config["endpoint"]: + raise LogwellError( + "endpoint is required", + LogwellErrorCode.INVALID_CONFIG, + ) + + # Validate API key format + if not validate_api_key_format(config["api_key"]): + raise LogwellError( + "Invalid API key format. Expected: lw_[32 characters]", + LogwellErrorCode.INVALID_CONFIG, + ) + + # Validate endpoint URL + if not _is_valid_url(config["endpoint"]): + raise LogwellError( + "Invalid endpoint URL", + LogwellErrorCode.INVALID_CONFIG, + ) + + # Validate numeric options + if "batch_size" in config and config["batch_size"] <= 0: + raise LogwellError( + "batch_size must be positive", + LogwellErrorCode.INVALID_CONFIG, + ) + + if "flush_interval" in config and config["flush_interval"] <= 0: + raise LogwellError( + "flush_interval must be positive", + LogwellErrorCode.INVALID_CONFIG, + ) + + if "max_queue_size" in config and config["max_queue_size"] <= 0: + raise LogwellError( + "max_queue_size must be positive", + LogwellErrorCode.INVALID_CONFIG, + ) + + if "max_retries" in config and config["max_retries"] < 0: + raise LogwellError( + "max_retries must be non-negative", + LogwellErrorCode.INVALID_CONFIG, + ) + + # Return merged config with defaults + merged: LogwellConfig = { + "api_key": config["api_key"], + "endpoint": config["endpoint"], + "batch_size": config.get("batch_size", DEFAULT_CONFIG["batch_size"]), + "flush_interval": config.get("flush_interval", DEFAULT_CONFIG["flush_interval"]), + "max_queue_size": config.get("max_queue_size", DEFAULT_CONFIG["max_queue_size"]), + "max_retries": config.get("max_retries", DEFAULT_CONFIG["max_retries"]), + "capture_source_location": config.get( + "capture_source_location", DEFAULT_CONFIG["capture_source_location"] + ), + } + + # Add optional fields if present + if "service" in config: + merged["service"] = config["service"] + + if "on_error" in config: + merged["on_error"] = config["on_error"] + + if "on_flush" in config: + merged["on_flush"] = config["on_flush"] + + return merged diff --git a/specs/python-sdk/.progress.md b/specs/python-sdk/.progress.md index 4ed172a..a986505 100644 --- a/specs/python-sdk/.progress.md +++ b/specs/python-sdk/.progress.md @@ -12,6 +12,7 @@ Port the TypeScript SDK at `sdks/typescript/` to Python with full feature parity - [x] 1.1 Create package structure - c83c5b5 - [x] 1.2 Implement types module - 948626d - [x] 1.3 Implement errors module - 0ca0585 +- [x] 1.4 Implement config module - 4412b95 ## Current Task Awaiting next task @@ -32,4 +33,4 @@ Awaiting next task - hatchling requires README.md to exist for build ## Next -Task 1.4: Implement config module +Task 1.5: Implement transport module diff --git a/specs/python-sdk/tasks.md b/specs/python-sdk/tasks.md index dc4f5f1..b322cd6 100644 --- a/specs/python-sdk/tasks.md +++ b/specs/python-sdk/tasks.md @@ -39,7 +39,7 @@ Focus: Validate Python SDK works end-to-end. Skip tests, accept minimal error ha - _Requirements: FR-9_ - _Design: Errors_ -- [ ] 1.4 Implement config module +- [x] 1.4 Implement config module - **Do**: Create `config.py` with DEFAULT_CONFIG, API_KEY_REGEX, validate_api_key_format(), validate_config() - **Files**: `sdks/python/src/logwell/config.py` - **Done when**: Validates API key format, endpoint URL, merges defaults From a82ec723a2262150dc65bf91e877c0c36b459964 Mon Sep 17 00:00:00 2001 From: Divanshu Chauhan Date: Fri, 16 Jan 2026 17:23:24 -0700 Subject: [PATCH 07/30] feat(python-sdk): add HTTP transport with retry MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Implement HttpTransport class with: - POST to /v1/ingest endpoint with Bearer auth header - Exponential backoff with jitter (min(base * 2^attempt, 10s) + 30% jitter) - Error classification by HTTP status: - 401 → UNAUTHORIZED (non-retryable) - 400 → VALIDATION_ERROR (non-retryable) - 429 → RATE_LIMITED (retryable) - 5xx → SERVER_ERROR (retryable) - TransportConfig from LogwellConfig helper - Async httpx client with lazy initialization Requirements: FR-10, AC-5.2, AC-5.3, AC-5.4 --- sdks/python/.gitignore | 216 ++++++++++++++++++++++++ sdks/python/src/logwell/transport.py | 241 +++++++++++++++++++++++++++ specs/python-sdk/.progress.md | 3 +- specs/python-sdk/tasks.md | 2 +- 4 files changed, 460 insertions(+), 2 deletions(-) create mode 100644 sdks/python/.gitignore create mode 100644 sdks/python/src/logwell/transport.py diff --git a/sdks/python/.gitignore b/sdks/python/.gitignore new file mode 100644 index 0000000..e15106e --- /dev/null +++ b/sdks/python/.gitignore @@ -0,0 +1,216 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[codz] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py.cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +# Pipfile.lock + +# UV +# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# uv.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +# poetry.lock +# poetry.toml + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +# pdm recommends including project-wide configuration in pdm.toml, but excluding .pdm-python. +# https://pdm-project.org/en/latest/usage/project/#working-with-version-control +# pdm.lock +# pdm.toml +.pdm-python +.pdm-build/ + +# pixi +# Similar to Pipfile.lock, it is generally recommended to include pixi.lock in version control. +# pixi.lock +# Pixi creates a virtual environment in the .pixi directory, just like venv module creates one +# in the .venv directory. It is recommended not to include this directory in version control. +.pixi + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# Redis +*.rdb +*.aof +*.pid + +# RabbitMQ +mnesia/ +rabbitmq/ +rabbitmq-data/ + +# ActiveMQ +activemq-data/ + +# SageMath parsed files +*.sage.py + +# Environments +.env +.envrc +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +# .idea/ + +# Abstra +# Abstra is an AI-powered process automation framework. +# Ignore directories containing user credentials, local state, and settings. +# Learn more at https://abstra.io/docs +.abstra/ + +# Visual Studio Code +# Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore +# that can be found at https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore +# and can be added to the global gitignore or merged into this file. However, if you prefer, +# you could uncomment the following to ignore the entire vscode folder +# .vscode/ + +# Ruff stuff: +.ruff_cache/ + +# PyPI configuration file +.pypirc + +# Marimo +marimo/_static/ +marimo/_lsp/ +__marimo__/ + +# Streamlit +.streamlit/secrets.toml diff --git a/sdks/python/src/logwell/transport.py b/sdks/python/src/logwell/transport.py new file mode 100644 index 0000000..3596293 --- /dev/null +++ b/sdks/python/src/logwell/transport.py @@ -0,0 +1,241 @@ +"""HTTP transport for the Logwell Python SDK. + +This module provides the HttpTransport class for sending log batches +to the Logwell server with retry logic and exponential backoff. +""" + +from __future__ import annotations + +import asyncio +import random +from typing import Any + +import httpx + +from logwell.errors import LogwellError, LogwellErrorCode +from logwell.types import IngestResponse, LogEntry, LogwellConfig + + +class TransportConfig: + """Configuration for HTTP transport. + + Attributes: + endpoint: Logwell server endpoint URL + api_key: API key for authentication + max_retries: Maximum number of retry attempts + timeout: Request timeout in seconds (default: 30) + """ + + def __init__( + self, + endpoint: str, + api_key: str, + max_retries: int = 3, + timeout: float = 30.0, + ) -> None: + self.endpoint = endpoint + self.api_key = api_key + self.max_retries = max_retries + self.timeout = timeout + + @classmethod + def from_logwell_config(cls, config: LogwellConfig) -> TransportConfig: + """Create TransportConfig from LogwellConfig.""" + return cls( + endpoint=config["endpoint"], + api_key=config["api_key"], + max_retries=config.get("max_retries", 3), + ) + + +async def _delay(attempt: int, base_delay: float = 0.1) -> None: + """Delay with exponential backoff and jitter. + + Args: + attempt: Current attempt number (0-indexed) + base_delay: Base delay in seconds (default: 100ms) + + Formula: min(base_delay * 2^attempt, 10) + 30% jitter + """ + delay_secs = min(base_delay * (2**attempt), 10.0) + jitter = random.random() * delay_secs * 0.3 + await asyncio.sleep(delay_secs + jitter) + + +class HttpTransport: + """HTTP transport for sending logs to Logwell server. + + Features: + - Automatic retry with exponential backoff + - Error classification with retryable flag + - Proper error handling for all HTTP status codes + """ + + def __init__(self, config: LogwellConfig | TransportConfig) -> None: + """Initialize the HTTP transport. + + Args: + config: Either a LogwellConfig or TransportConfig + """ + if isinstance(config, TransportConfig): + self._config = config + else: + self._config = TransportConfig.from_logwell_config(config) + + self._ingest_url = f"{self._config.endpoint}/v1/ingest" + self._client: httpx.AsyncClient | None = None + + async def _get_client(self) -> httpx.AsyncClient: + """Get or create the HTTP client.""" + if self._client is None: + self._client = httpx.AsyncClient(timeout=self._config.timeout) + return self._client + + async def close(self) -> None: + """Close the HTTP client.""" + if self._client is not None: + await self._client.aclose() + self._client = None + + async def send(self, logs: list[LogEntry]) -> IngestResponse: + """Send logs to the Logwell server. + + Args: + logs: Array of log entries to send + + Returns: + Response with accepted/rejected counts + + Raises: + LogwellError: On failure after all retries + """ + last_error: LogwellError = LogwellError( + "Max retries exceeded", + LogwellErrorCode.NETWORK_ERROR, + None, + True, + ) + + for attempt in range(self._config.max_retries + 1): + try: + return await self._do_request(logs) + except LogwellError as error: + last_error = error + + # Don't retry non-retryable errors + if not error.retryable: + raise + + # Don't delay after the last attempt + if attempt < self._config.max_retries: + await _delay(attempt) + + raise last_error + + async def _do_request(self, logs: list[LogEntry]) -> IngestResponse: + """Execute the HTTP request. + + Args: + logs: Array of log entries to send + + Returns: + Parsed IngestResponse + + Raises: + LogwellError: On network or HTTP errors + """ + client = await self._get_client() + + try: + response = await client.post( + self._ingest_url, + headers={ + "Authorization": f"Bearer {self._config.api_key}", + "Content-Type": "application/json", + }, + json=logs, + ) + except httpx.TimeoutException as e: + raise LogwellError( + f"Request timeout: {e}", + LogwellErrorCode.NETWORK_ERROR, + None, + True, + ) from e + except httpx.RequestError as e: + raise LogwellError( + f"Network error: {e}", + LogwellErrorCode.NETWORK_ERROR, + None, + True, + ) from e + + # Handle error responses + if not response.is_success: + error_body = self._try_parse_error(response) + raise self._create_error(response.status_code, error_body) + + # Parse successful response + data: IngestResponse = response.json() + return data + + def _try_parse_error(self, response: httpx.Response) -> str: + """Try to parse error message from response body. + + Args: + response: HTTP response + + Returns: + Error message string + """ + try: + body: dict[str, Any] = response.json() + return body.get("message") or body.get("error") or "Unknown error" + except Exception: + return f"HTTP {response.status_code}" + + def _create_error(self, status: int, message: str) -> LogwellError: + """Create appropriate LogwellError based on status code. + + Args: + status: HTTP status code + message: Error message + + Returns: + LogwellError with appropriate code and retryable flag + """ + if status == 401: + return LogwellError( + f"Unauthorized: {message}", + LogwellErrorCode.UNAUTHORIZED, + status, + False, + ) + elif status == 400: + return LogwellError( + f"Validation error: {message}", + LogwellErrorCode.VALIDATION_ERROR, + status, + False, + ) + elif status == 429: + return LogwellError( + f"Rate limited: {message}", + LogwellErrorCode.RATE_LIMITED, + status, + True, + ) + elif status >= 500: + return LogwellError( + f"Server error: {message}", + LogwellErrorCode.SERVER_ERROR, + status, + True, + ) + else: + return LogwellError( + f"HTTP error {status}: {message}", + LogwellErrorCode.SERVER_ERROR, + status, + False, + ) diff --git a/specs/python-sdk/.progress.md b/specs/python-sdk/.progress.md index a986505..d1339e1 100644 --- a/specs/python-sdk/.progress.md +++ b/specs/python-sdk/.progress.md @@ -13,6 +13,7 @@ Port the TypeScript SDK at `sdks/typescript/` to Python with full feature parity - [x] 1.2 Implement types module - 948626d - [x] 1.3 Implement errors module - 0ca0585 - [x] 1.4 Implement config module - 4412b95 +- [x] 1.5 Implement transport module - 7f6938c ## Current Task Awaiting next task @@ -33,4 +34,4 @@ Awaiting next task - hatchling requires README.md to exist for build ## Next -Task 1.5: Implement transport module +Task 1.6: Implement queue module diff --git a/specs/python-sdk/tasks.md b/specs/python-sdk/tasks.md index b322cd6..55f5e79 100644 --- a/specs/python-sdk/tasks.md +++ b/specs/python-sdk/tasks.md @@ -48,7 +48,7 @@ Focus: Validate Python SDK works end-to-end. Skip tests, accept minimal error ha - _Requirements: FR-12, AC-7.1, AC-7.2, AC-7.3_ - _Design: Config_ -- [ ] 1.5 Implement transport module +- [x] 1.5 Implement transport module - **Do**: Create `transport.py` with HttpTransport class, send() with retry and backoff - **Files**: `sdks/python/src/logwell/transport.py` - **Done when**: Can POST to endpoint with auth header, retries on failure From 83cc8413b1be79b98d021491e818fe8fa740a100 Mon Sep 17 00:00:00 2001 From: Divanshu Chauhan Date: Fri, 16 Jan 2026 17:26:42 -0700 Subject: [PATCH 08/30] feat(python-sdk): add batch queue with auto-flush Implements BatchQueue class with: - Automatic flush on batch_size threshold - Timer-based flush using threading.Timer - Queue overflow protection (drops oldest) - Re-queue on send failure - Graceful shutdown with final flush - Thread-safe operations with threading.Lock --- sdks/python/src/logwell/queue.py | 243 +++++++++++++++++++++++++++++++ specs/python-sdk/.progress.md | 5 +- specs/python-sdk/tasks.md | 2 +- 3 files changed, 248 insertions(+), 2 deletions(-) create mode 100644 sdks/python/src/logwell/queue.py diff --git a/sdks/python/src/logwell/queue.py b/sdks/python/src/logwell/queue.py new file mode 100644 index 0000000..a6d24c3 --- /dev/null +++ b/sdks/python/src/logwell/queue.py @@ -0,0 +1,243 @@ +"""Batch queue for the Logwell Python SDK. + +This module provides the BatchQueue class for buffering logs and managing +automatic flush operations based on batch size and time interval. +""" + +from __future__ import annotations + +import asyncio +import threading +from typing import TYPE_CHECKING, Awaitable, Callable + +from logwell.errors import LogwellError, LogwellErrorCode +from logwell.types import IngestResponse, LogEntry + +if TYPE_CHECKING: + from logwell.types import LogwellConfig + +# Type alias for the send batch callback +SendBatchFn = Callable[[list[LogEntry]], Awaitable[IngestResponse]] + + +class QueueConfig: + """Configuration for the batch queue. + + Attributes: + batch_size: Number of logs to batch before auto-flush + flush_interval: Seconds between auto-flushes + max_queue_size: Maximum queue size before dropping oldest + on_error: Callback function for errors + on_flush: Callback function after successful flush + """ + + def __init__( + self, + batch_size: int = 50, + flush_interval: float = 5.0, + max_queue_size: int = 1000, + on_error: Callable[[Exception], None] | None = None, + on_flush: Callable[[int], None] | None = None, + ) -> None: + self.batch_size = batch_size + self.flush_interval = flush_interval + self.max_queue_size = max_queue_size + self.on_error = on_error + self.on_flush = on_flush + + @classmethod + def from_logwell_config(cls, config: LogwellConfig) -> QueueConfig: + """Create QueueConfig from LogwellConfig.""" + return cls( + batch_size=config.get("batch_size", 50), + flush_interval=config.get("flush_interval", 5.0), + max_queue_size=config.get("max_queue_size", 1000), + on_error=config.get("on_error"), + on_flush=config.get("on_flush"), + ) + + +class BatchQueue: + """Batch queue for buffering and sending logs. + + Features: + - Automatic flush on batch size threshold + - Automatic flush on time interval + - Queue overflow protection (drops oldest) + - Re-queue on send failure + - Graceful shutdown + """ + + def __init__( + self, + send_batch: SendBatchFn, + config: QueueConfig | LogwellConfig, + ) -> None: + """Initialize the batch queue. + + Args: + send_batch: Async callback function to send a batch of logs + config: Either a QueueConfig or LogwellConfig + """ + if isinstance(config, QueueConfig): + self._config = config + else: + self._config = QueueConfig.from_logwell_config(config) + + self._send_batch = send_batch + self._queue: list[LogEntry] = [] + self._lock = threading.Lock() + self._timer: threading.Timer | None = None + self._flushing = False + self._stopped = False + + @property + def size(self) -> int: + """Current number of logs in the queue.""" + with self._lock: + return len(self._queue) + + def add(self, entry: LogEntry) -> None: + """Add a log entry to the queue. + + Triggers flush if batch size is reached. + Drops oldest log if queue overflows. + + Args: + entry: Log entry to add + """ + with self._lock: + if self._stopped: + return + + # Handle queue overflow + if len(self._queue) >= self._config.max_queue_size: + dropped = self._queue.pop(0) + if self._config.on_error: + msg = dropped.get("message", "")[:50] + self._config.on_error( + LogwellError( + f"Queue overflow. Dropped log: {msg}...", + LogwellErrorCode.QUEUE_OVERFLOW, + ) + ) + + self._queue.append(entry) + + # Start timer on first entry + if self._timer is None and not self._stopped: + self._start_timer() + + # Flush immediately if batch size reached + should_flush = len(self._queue) >= self._config.batch_size + + if should_flush: + self._trigger_flush() + + def _trigger_flush(self) -> None: + """Trigger an asynchronous flush operation. + + This method schedules the flush to run in the background + without blocking the caller. + """ + try: + loop = asyncio.get_running_loop() + loop.create_task(self.flush()) + except RuntimeError: + # No running event loop, run in new loop + asyncio.run(self.flush()) + + async def flush(self) -> IngestResponse | None: + """Flush all queued logs immediately. + + Returns: + Response from the server, or None if queue was empty or flush in progress + """ + with self._lock: + # Prevent concurrent flushes + if self._flushing or len(self._queue) == 0: + return None + + self._flushing = True + self._stop_timer() + + # Take current batch + batch = self._queue.copy() + self._queue.clear() + count = len(batch) + + try: + response = await self._send_batch(batch) + if self._config.on_flush: + self._config.on_flush(count) + + # Restart timer if more logs remain (added during flush) + with self._lock: + if len(self._queue) > 0 and not self._stopped: + self._start_timer() + + return response + except Exception as error: + # Re-queue failed logs at the front + with self._lock: + self._queue = batch + self._queue + if self._config.on_error: + self._config.on_error(error) + + # Restart timer to retry + if not self._stopped: + self._start_timer() + + return None + finally: + with self._lock: + self._flushing = False + + async def shutdown(self) -> None: + """Flush remaining logs and stop the queue. + + This method is idempotent - safe to call multiple times. + After shutdown, no more logs will be accepted. + """ + with self._lock: + if self._stopped: + return + + self._stopped = True + self._stop_timer() + self._flushing = False # Reset flushing flag + + # Flush all remaining logs + if self.size > 0: + await self.flush() + + def _start_timer(self) -> None: + """Start the flush timer. + + Note: Must be called while holding the lock. + """ + self._stop_timer() + self._timer = threading.Timer( + self._config.flush_interval, + self._on_timer_expired, + ) + self._timer.daemon = True + self._timer.start() + + def _stop_timer(self) -> None: + """Stop the flush timer. + + Note: Must be called while holding the lock. + """ + if self._timer is not None: + self._timer.cancel() + self._timer = None + + def _on_timer_expired(self) -> None: + """Handle timer expiration by triggering a flush.""" + with self._lock: + self._timer = None + if self._stopped: + return + + self._trigger_flush() diff --git a/specs/python-sdk/.progress.md b/specs/python-sdk/.progress.md index d1339e1..d9b1575 100644 --- a/specs/python-sdk/.progress.md +++ b/specs/python-sdk/.progress.md @@ -14,6 +14,7 @@ Port the TypeScript SDK at `sdks/typescript/` to Python with full feature parity - [x] 1.3 Implement errors module - 0ca0585 - [x] 1.4 Implement config module - 4412b95 - [x] 1.5 Implement transport module - 7f6938c +- [x] 1.6 Implement queue module - 5e85776 ## Current Task Awaiting next task @@ -32,6 +33,8 @@ Awaiting next task - TypedDict preferred over dataclass for zero runtime overhead on types - Python 3.14 on macOS uses externally-managed-environment, need venv for dev - hatchling requires README.md to exist for build +- BatchQueue uses threading.Timer with daemon=True to auto-cleanup on program exit +- _trigger_flush handles both running event loop (create_task) and sync context (asyncio.run) ## Next -Task 1.6: Implement queue module +Task 1.7: Implement source_location module diff --git a/specs/python-sdk/tasks.md b/specs/python-sdk/tasks.md index 55f5e79..3bbedbe 100644 --- a/specs/python-sdk/tasks.md +++ b/specs/python-sdk/tasks.md @@ -57,7 +57,7 @@ Focus: Validate Python SDK works end-to-end. Skip tests, accept minimal error ha - _Requirements: FR-10, AC-5.2, AC-5.3, AC-5.4_ - _Design: HttpTransport_ -- [ ] 1.6 Implement queue module +- [x] 1.6 Implement queue module - **Do**: Create `queue.py` with BatchQueue class, add(), flush(), shutdown(), timer management - **Files**: `sdks/python/src/logwell/queue.py` - **Done when**: Queues logs, auto-flushes on batch_size, timer-based flush works From e2927bcc3a9cdc0f444ceb69e63bf660c0264422 Mon Sep 17 00:00:00 2001 From: Divanshu Chauhan Date: Fri, 16 Jan 2026 17:29:41 -0700 Subject: [PATCH 09/30] feat(python-sdk): add source location capture Implement source_location module with: - SourceLocation dataclass (frozen, slots) - capture_source_location() function using inspect.stack() - Supports skip_frames parameter to skip SDK internals - Returns None when stack depth exceeded Satisfies: FR-11, AC-6.1, AC-6.2, AC-6.3 --- sdks/python/src/logwell/source_location.py | 63 ++++++++++++++++++++++ specs/python-sdk/.progress.md | 4 +- specs/python-sdk/tasks.md | 2 +- 3 files changed, 67 insertions(+), 2 deletions(-) create mode 100644 sdks/python/src/logwell/source_location.py diff --git a/sdks/python/src/logwell/source_location.py b/sdks/python/src/logwell/source_location.py new file mode 100644 index 0000000..46c3bdf --- /dev/null +++ b/sdks/python/src/logwell/source_location.py @@ -0,0 +1,63 @@ +"""Source location capture for adding file/line info to log entries.""" + +from __future__ import annotations + +import inspect +from dataclasses import dataclass + + +@dataclass(frozen=True, slots=True) +class SourceLocation: + """Source location information captured from call stack. + + Attributes: + source_file: Absolute path to the source file + line_number: Line number in the source file + """ + + source_file: str + line_number: int + + +def capture_source_location(skip_frames: int = 0) -> SourceLocation | None: + """Capture the source location of the caller. + + Uses Python's inspect module to get the call stack and extract + the file path and line number of the caller. + + Args: + skip_frames: Number of stack frames to skip (0 = immediate caller + of this function). Typically you'd use skip_frames=1 to get + the caller of the function that calls capture_source_location. + + Returns: + SourceLocation with source_file and line_number, or None if + capture fails (e.g., skipFrames exceeds stack depth). + + Example: + # In a logging function that calls this + def log(message: str) -> None: + location = capture_source_location(1) # Skip log() frame + # location.source_file = file where log() was called + """ + try: + # inspect.stack() returns list of FrameInfo objects + # Index 0 is this function (capture_source_location) + # Index 1 is the immediate caller + # So we need index 1 + skip_frames + stack = inspect.stack() + + # Target frame: skip capture_source_location frame + user-specified frames + target_index = 1 + skip_frames + + if target_index >= len(stack): + return None + + frame_info = stack[target_index] + + return SourceLocation( + source_file=frame_info.filename, + line_number=frame_info.lineno, + ) + except (IndexError, AttributeError): + return None diff --git a/specs/python-sdk/.progress.md b/specs/python-sdk/.progress.md index d9b1575..13c26d3 100644 --- a/specs/python-sdk/.progress.md +++ b/specs/python-sdk/.progress.md @@ -15,6 +15,7 @@ Port the TypeScript SDK at `sdks/typescript/` to Python with full feature parity - [x] 1.4 Implement config module - 4412b95 - [x] 1.5 Implement transport module - 7f6938c - [x] 1.6 Implement queue module - 5e85776 +- [x] 1.7 Implement source_location module - 5ae4d54 ## Current Task Awaiting next task @@ -35,6 +36,7 @@ Awaiting next task - hatchling requires README.md to exist for build - BatchQueue uses threading.Timer with daemon=True to auto-cleanup on program exit - _trigger_flush handles both running event loop (create_task) and sync context (asyncio.run) +- Python's inspect.stack() provides structured FrameInfo objects directly - much simpler than parsing JS Error.stack strings ## Next -Task 1.7: Implement source_location module +Task 1.8: Implement client module diff --git a/specs/python-sdk/tasks.md b/specs/python-sdk/tasks.md index 3bbedbe..cba7984 100644 --- a/specs/python-sdk/tasks.md +++ b/specs/python-sdk/tasks.md @@ -66,7 +66,7 @@ Focus: Validate Python SDK works end-to-end. Skip tests, accept minimal error ha - _Requirements: FR-3, FR-4, FR-5, FR-6, AC-2.1, AC-2.2, AC-2.3, AC-2.4_ - _Design: BatchQueue_ -- [ ] 1.7 Implement source_location module +- [x] 1.7 Implement source_location module - **Do**: Create `source_location.py` with SourceLocation dataclass and capture_source_location() - **Files**: `sdks/python/src/logwell/source_location.py` - **Done when**: Returns file/line of caller, skips SDK frames From 3c928c0d3ad7c778eb326f0f30f8f71d041927a1 Mon Sep 17 00:00:00 2001 From: Divanshu Chauhan Date: Fri, 16 Jan 2026 17:33:07 -0700 Subject: [PATCH 10/30] feat(python-sdk): add Logwell client class Main entry point for the Python SDK that provides: - All log level methods (debug, info, warn, error, fatal) - Generic log() method for custom entries - Automatic timestamp generation - Source location capture when enabled - Metadata merging with parent context - Child logger creation with shared queue - Graceful shutdown with flush The client validates config on init, creates transport and queue, and routes logs through the batch queue for efficient delivery. --- sdks/python/src/logwell/client.py | 282 ++++++++++++++++++++++++++++++ specs/python-sdk/.progress.md | 5 +- specs/python-sdk/tasks.md | 2 +- 3 files changed, 287 insertions(+), 2 deletions(-) create mode 100644 sdks/python/src/logwell/client.py diff --git a/sdks/python/src/logwell/client.py b/sdks/python/src/logwell/client.py new file mode 100644 index 0000000..52bcbc6 --- /dev/null +++ b/sdks/python/src/logwell/client.py @@ -0,0 +1,282 @@ +"""Main client class for the Logwell Python SDK. + +This module provides the Logwell class, the primary entry point for +logging to the Logwell platform. +""" + +from __future__ import annotations + +from datetime import datetime, timezone +from typing import TYPE_CHECKING, Any + +from logwell.config import validate_config +from logwell.queue import BatchQueue, QueueConfig +from logwell.source_location import capture_source_location +from logwell.transport import HttpTransport + +if TYPE_CHECKING: + from logwell.types import IngestResponse, LogEntry, LogwellConfig + + +class Logwell: + """Main Logwell client for logging to the Logwell platform. + + Provides methods for logging at different levels with automatic + batching, retry, and queue management. + + Example: + >>> client = Logwell({ + ... 'api_key': 'lw_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', + ... 'endpoint': 'https://logs.example.com', + ... 'service': 'my-app', + ... }) + >>> client.info('User logged in', {'user_id': '123'}) + >>> await client.shutdown() + """ + + def __init__( + self, + config: LogwellConfig, + *, + _queue: BatchQueue | None = None, + _parent_metadata: dict[str, Any] | None = None, + ) -> None: + """Initialize the Logwell client. + + Args: + config: Configuration dict with api_key, endpoint, and optional settings + _queue: Internal: shared queue for child loggers (do not use directly) + _parent_metadata: Internal: inherited metadata from parent (do not use directly) + """ + # Validate and apply defaults + self._config = validate_config(config) + self._parent_metadata = _parent_metadata + self._stopped = False + + # Create transport + self._transport = HttpTransport(self._config) + + # Use existing queue (for child loggers) or create new one + if _queue is not None: + self._queue = _queue + self._owns_queue = False + else: + queue_config = QueueConfig.from_logwell_config(self._config) + self._queue = BatchQueue( + send_batch=self._transport.send, + config=queue_config, + ) + self._owns_queue = True + + @property + def queue_size(self) -> int: + """Current number of logs waiting in the queue.""" + return self._queue.size + + def _add_log(self, entry: LogEntry, skip_frames: int) -> None: + """Internal log method with source location capture. + + Args: + entry: The log entry to add + skip_frames: Number of frames to skip for source location + """ + if self._stopped: + return + + source_file: str | None = None + line_number: int | None = None + + if self._config.get("capture_source_location", False): + location = capture_source_location(skip_frames) + if location: + source_file = location.source_file + line_number = location.line_number + + # Build full entry with defaults + full_entry: LogEntry = { + "level": entry["level"], + "message": entry["message"], + "timestamp": entry.get("timestamp") or datetime.now(timezone.utc).isoformat(), + } + + # Add service from entry, config, or omit + service = entry.get("service") or self._config.get("service") + if service: + full_entry["service"] = service + + # Merge metadata + merged_metadata = self._merge_metadata(entry.get("metadata")) + if merged_metadata: + full_entry["metadata"] = merged_metadata + + # Add source location if captured + if source_file is not None: + full_entry["source_file"] = source_file + if line_number is not None: + full_entry["line_number"] = line_number + + self._queue.add(full_entry) + + def log(self, entry: LogEntry) -> None: + """Log a message at the specified level. + + Args: + entry: Log entry with level, message, and optional metadata + """ + self._add_log(entry, skip_frames=2) + + def debug(self, message: str, metadata: dict[str, Any] | None = None) -> None: + """Log a debug message. + + Args: + message: Log message content + metadata: Optional key-value metadata + """ + entry: LogEntry = {"level": "debug", "message": message} + if metadata: + entry["metadata"] = metadata + self._add_log(entry, skip_frames=2) + + def info(self, message: str, metadata: dict[str, Any] | None = None) -> None: + """Log an info message. + + Args: + message: Log message content + metadata: Optional key-value metadata + """ + entry: LogEntry = {"level": "info", "message": message} + if metadata: + entry["metadata"] = metadata + self._add_log(entry, skip_frames=2) + + def warn(self, message: str, metadata: dict[str, Any] | None = None) -> None: + """Log a warning message. + + Args: + message: Log message content + metadata: Optional key-value metadata + """ + entry: LogEntry = {"level": "warn", "message": message} + if metadata: + entry["metadata"] = metadata + self._add_log(entry, skip_frames=2) + + def error(self, message: str, metadata: dict[str, Any] | None = None) -> None: + """Log an error message. + + Args: + message: Log message content + metadata: Optional key-value metadata + """ + entry: LogEntry = {"level": "error", "message": message} + if metadata: + entry["metadata"] = metadata + self._add_log(entry, skip_frames=2) + + def fatal(self, message: str, metadata: dict[str, Any] | None = None) -> None: + """Log a fatal error message. + + Args: + message: Log message content + metadata: Optional key-value metadata + """ + entry: LogEntry = {"level": "fatal", "message": message} + if metadata: + entry["metadata"] = metadata + self._add_log(entry, skip_frames=2) + + async def flush(self) -> IngestResponse | None: + """Flush all queued logs immediately. + + Returns: + Response from the server, or None if queue was empty + """ + return await self._queue.flush() + + async def shutdown(self) -> None: + """Flush remaining logs and stop the client. + + Call this before process exit to ensure all logs are sent. + This method is idempotent (safe to call multiple times). + """ + self._stopped = True + if self._owns_queue: + await self._queue.shutdown() + await self._transport.close() + + def child( + self, + metadata: dict[str, Any] | None = None, + *, + service: str | None = None, + ) -> Logwell: + """Create a child logger with additional context. + + Child loggers share the same queue as the parent, + but can have their own service name and default metadata. + + Args: + metadata: Additional metadata to include in all logs from this child + service: Override service name for this child logger + + Returns: + A new Logwell instance sharing the parent's queue + + Example: + >>> request_logger = logger.child({'request_id': req.id}) + >>> request_logger.info('Request received') + """ + child_config: LogwellConfig = { + "api_key": self._config["api_key"], + "endpoint": self._config["endpoint"], + "batch_size": self._config.get("batch_size", 50), + "flush_interval": self._config.get("flush_interval", 5.0), + "max_queue_size": self._config.get("max_queue_size", 1000), + "max_retries": self._config.get("max_retries", 3), + "capture_source_location": self._config.get("capture_source_location", False), + } + + # Set service: override > config > none + if service is not None: + child_config["service"] = service + elif "service" in self._config: + child_config["service"] = self._config["service"] + + # Preserve callbacks + if "on_error" in self._config: + child_config["on_error"] = self._config["on_error"] + if "on_flush" in self._config: + child_config["on_flush"] = self._config["on_flush"] + + # Merge metadata: parent -> new + child_metadata: dict[str, Any] | None = None + if self._parent_metadata or metadata: + child_metadata = { + **(self._parent_metadata or {}), + **(metadata or {}), + } + + return Logwell( + child_config, + _queue=self._queue, + _parent_metadata=child_metadata, + ) + + def _merge_metadata( + self, + entry_metadata: dict[str, Any] | None, + ) -> dict[str, Any] | None: + """Merge parent metadata with entry metadata. + + Args: + entry_metadata: Metadata from the log entry + + Returns: + Merged metadata dict, or None if neither exists + """ + if not self._parent_metadata and not entry_metadata: + return None + return { + **(self._parent_metadata or {}), + **(entry_metadata or {}), + } diff --git a/specs/python-sdk/.progress.md b/specs/python-sdk/.progress.md index 13c26d3..b1c8386 100644 --- a/specs/python-sdk/.progress.md +++ b/specs/python-sdk/.progress.md @@ -16,6 +16,7 @@ Port the TypeScript SDK at `sdks/typescript/` to Python with full feature parity - [x] 1.5 Implement transport module - 7f6938c - [x] 1.6 Implement queue module - 5e85776 - [x] 1.7 Implement source_location module - 5ae4d54 +- [x] 1.8 Implement client module - 9ffb954 ## Current Task Awaiting next task @@ -37,6 +38,8 @@ Awaiting next task - BatchQueue uses threading.Timer with daemon=True to auto-cleanup on program exit - _trigger_flush handles both running event loop (create_task) and sync context (asyncio.run) - Python's inspect.stack() provides structured FrameInfo objects directly - much simpler than parsing JS Error.stack strings +- Client uses _owns_queue flag to track if it created the queue (needed for child loggers sharing parent queue) +- TYPE_CHECKING block prevents circular imports and satisfies ruff TC001 lint rule ## Next -Task 1.8: Implement client module +Task 1.9: Wire up __init__.py exports diff --git a/specs/python-sdk/tasks.md b/specs/python-sdk/tasks.md index cba7984..bf9fe6a 100644 --- a/specs/python-sdk/tasks.md +++ b/specs/python-sdk/tasks.md @@ -75,7 +75,7 @@ Focus: Validate Python SDK works end-to-end. Skip tests, accept minimal error ha - _Requirements: FR-11, AC-6.1, AC-6.2, AC-6.3_ - _Design: SourceLocation_ -- [ ] 1.8 Implement client module +- [x] 1.8 Implement client module - **Do**: Create `client.py` with Logwell class, all log methods, flush(), shutdown(), child() - **Files**: `sdks/python/src/logwell/client.py` - **Done when**: Can instantiate, log, flush, create child loggers From 920ba8d3a25b509bbb21c1ab1726677285e0e7c2 Mon Sep 17 00:00:00 2001 From: Divanshu Chauhan Date: Fri, 16 Jan 2026 17:35:28 -0700 Subject: [PATCH 11/30] feat(python-sdk): expose public API Export Logwell, LogwellError, LogwellErrorCode, and all types from the package's __init__.py for clean imports: from logwell import Logwell, LogwellError, LogwellErrorCode Task: 1.9 Spec: python-sdk --- sdks/python/src/logwell/__init__.py | 5 +++++ specs/python-sdk/.progress.md | 3 ++- specs/python-sdk/tasks.md | 2 +- 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/sdks/python/src/logwell/__init__.py b/sdks/python/src/logwell/__init__.py index eb8b4d4..c158607 100644 --- a/sdks/python/src/logwell/__init__.py +++ b/sdks/python/src/logwell/__init__.py @@ -1,5 +1,7 @@ """Logwell Python SDK - Official logging client for Logwell platform.""" +from logwell.client import Logwell +from logwell.errors import LogwellError, LogwellErrorCode from logwell.types import IngestResponse, LogEntry, LogLevel, LogwellConfig __version__ = "0.1.0" @@ -8,5 +10,8 @@ "IngestResponse", "LogEntry", "LogLevel", + "Logwell", "LogwellConfig", + "LogwellError", + "LogwellErrorCode", ] diff --git a/specs/python-sdk/.progress.md b/specs/python-sdk/.progress.md index b1c8386..21c4574 100644 --- a/specs/python-sdk/.progress.md +++ b/specs/python-sdk/.progress.md @@ -17,6 +17,7 @@ Port the TypeScript SDK at `sdks/typescript/` to Python with full feature parity - [x] 1.6 Implement queue module - 5e85776 - [x] 1.7 Implement source_location module - 5ae4d54 - [x] 1.8 Implement client module - 9ffb954 +- [x] 1.9 Wire up __init__.py exports - 3c8711d ## Current Task Awaiting next task @@ -42,4 +43,4 @@ Awaiting next task - TYPE_CHECKING block prevents circular imports and satisfies ruff TC001 lint rule ## Next -Task 1.9: Wire up __init__.py exports +Task 1.10: POC Checkpoint diff --git a/specs/python-sdk/tasks.md b/specs/python-sdk/tasks.md index bf9fe6a..0e1b939 100644 --- a/specs/python-sdk/tasks.md +++ b/specs/python-sdk/tasks.md @@ -84,7 +84,7 @@ Focus: Validate Python SDK works end-to-end. Skip tests, accept minimal error ha - _Requirements: FR-1, FR-2, FR-7, FR-8, AC-1.1 through AC-4.4_ - _Design: Logwell_ -- [ ] 1.9 Wire up __init__.py exports +- [x] 1.9 Wire up __init__.py exports - **Do**: Export Logwell, LogwellError, LogwellErrorCode, types from `__init__.py` - **Files**: `sdks/python/src/logwell/__init__.py` - **Done when**: `from logwell import Logwell, LogwellError` works From ca26d46d1744472462695065a94b2d9e63ab9909 Mon Sep 17 00:00:00 2001 From: Divanshu Chauhan Date: Fri, 16 Jan 2026 17:39:05 -0700 Subject: [PATCH 12/30] feat(python-sdk): complete POC POC E2E verification complete: - Client instantiation works - All log levels (debug, info, warn, error, fatal) - Metadata attachment - Child loggers with inherited metadata - Flush and shutdown operations - Type check (mypy) passes - Error handling validated Minor fix: removed slots=True from SourceLocation dataclass for mypy compatibility. --- sdks/python/src/logwell/source_location.py | 2 +- specs/python-sdk/.progress.md | 6 ++++-- specs/python-sdk/tasks.md | 2 +- 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/sdks/python/src/logwell/source_location.py b/sdks/python/src/logwell/source_location.py index 46c3bdf..85a1eba 100644 --- a/sdks/python/src/logwell/source_location.py +++ b/sdks/python/src/logwell/source_location.py @@ -6,7 +6,7 @@ from dataclasses import dataclass -@dataclass(frozen=True, slots=True) +@dataclass(frozen=True) class SourceLocation: """Source location information captured from call stack. diff --git a/specs/python-sdk/.progress.md b/specs/python-sdk/.progress.md index 21c4574..ca9881d 100644 --- a/specs/python-sdk/.progress.md +++ b/specs/python-sdk/.progress.md @@ -18,9 +18,10 @@ Port the TypeScript SDK at `sdks/typescript/` to Python with full feature parity - [x] 1.7 Implement source_location module - 5ae4d54 - [x] 1.8 Implement client module - 9ffb954 - [x] 1.9 Wire up __init__.py exports - 3c8711d +- [x] 1.10 POC Checkpoint - 0e0046e ## Current Task -Awaiting next task +Awaiting next task (Phase 1 POC complete) ## Learnings @@ -41,6 +42,7 @@ Awaiting next task - Python's inspect.stack() provides structured FrameInfo objects directly - much simpler than parsing JS Error.stack strings - Client uses _owns_queue flag to track if it created the queue (needed for child loggers sharing parent queue) - TYPE_CHECKING block prevents circular imports and satisfies ruff TC001 lint rule +- mypy type stubs may not support newer dataclass features like slots=True - removed for compatibility ## Next -Task 1.10: POC Checkpoint +Phase 2: Refactoring (Task 2.1: Add thread safety to queue) diff --git a/specs/python-sdk/tasks.md b/specs/python-sdk/tasks.md index 0e1b939..972be85 100644 --- a/specs/python-sdk/tasks.md +++ b/specs/python-sdk/tasks.md @@ -93,7 +93,7 @@ Focus: Validate Python SDK works end-to-end. Skip tests, accept minimal error ha - _Requirements: NFR-7_ - _Design: Architecture_ -- [ ] 1.10 POC Checkpoint +- [x] 1.10 POC Checkpoint - **Do**: Test full flow: instantiate, log, flush against mock or real server - **Done when**: Logs appear in server, no errors - **Verify**: Run manual E2E test script From fcc0e0a2b6dc37b7d28a41006c7b7b3bc741deb8 Mon Sep 17 00:00:00 2001 From: Divanshu Chauhan Date: Fri, 16 Jan 2026 17:41:43 -0700 Subject: [PATCH 13/30] refactor(python-sdk): add thread safety to queue - Verify thread safety already implemented in POC (threading.Lock on all ops) - Fix lint issues: TC001 - move type imports to TYPE_CHECKING blocks - Fix lint issues: UP035 - use collections.abc for Awaitable/Callable - All queue/timer operations protected by self._lock --- sdks/python/src/logwell/config.py | 6 ++++-- sdks/python/src/logwell/queue.py | 3 ++- sdks/python/src/logwell/transport.py | 6 ++++-- specs/python-sdk/.progress.md | 9 ++++++--- specs/python-sdk/tasks.md | 2 +- 5 files changed, 17 insertions(+), 9 deletions(-) diff --git a/sdks/python/src/logwell/config.py b/sdks/python/src/logwell/config.py index 5d4cd9a..5c7fe16 100644 --- a/sdks/python/src/logwell/config.py +++ b/sdks/python/src/logwell/config.py @@ -7,11 +7,13 @@ from __future__ import annotations import re -from typing import Any +from typing import TYPE_CHECKING, Any from urllib.parse import urlparse from logwell.errors import LogwellError, LogwellErrorCode -from logwell.types import LogwellConfig + +if TYPE_CHECKING: + from logwell.types import LogwellConfig # Default configuration values DEFAULT_CONFIG: dict[str, Any] = { diff --git a/sdks/python/src/logwell/queue.py b/sdks/python/src/logwell/queue.py index a6d24c3..28fc5bf 100644 --- a/sdks/python/src/logwell/queue.py +++ b/sdks/python/src/logwell/queue.py @@ -8,7 +8,8 @@ import asyncio import threading -from typing import TYPE_CHECKING, Awaitable, Callable +from collections.abc import Awaitable, Callable +from typing import TYPE_CHECKING from logwell.errors import LogwellError, LogwellErrorCode from logwell.types import IngestResponse, LogEntry diff --git a/sdks/python/src/logwell/transport.py b/sdks/python/src/logwell/transport.py index 3596293..453b77f 100644 --- a/sdks/python/src/logwell/transport.py +++ b/sdks/python/src/logwell/transport.py @@ -8,12 +8,14 @@ import asyncio import random -from typing import Any +from typing import TYPE_CHECKING, Any import httpx from logwell.errors import LogwellError, LogwellErrorCode -from logwell.types import IngestResponse, LogEntry, LogwellConfig + +if TYPE_CHECKING: + from logwell.types import IngestResponse, LogEntry, LogwellConfig class TransportConfig: diff --git a/specs/python-sdk/.progress.md b/specs/python-sdk/.progress.md index ca9881d..c0ce859 100644 --- a/specs/python-sdk/.progress.md +++ b/specs/python-sdk/.progress.md @@ -4,7 +4,7 @@ Port the TypeScript SDK at `sdks/typescript/` to Python with full feature parity. The server is ready, and we need a Python SDK so Python developers can also use the logging platform. ## Status -- Phase: Execution (Phase 1 - POC) +- Phase: Execution (Phase 2 - Refactoring) - Started: 2026-01-16 - Artifacts generated: research.md, requirements.md, design.md, tasks.md @@ -19,9 +19,10 @@ Port the TypeScript SDK at `sdks/typescript/` to Python with full feature parity - [x] 1.8 Implement client module - 9ffb954 - [x] 1.9 Wire up __init__.py exports - 3c8711d - [x] 1.10 POC Checkpoint - 0e0046e +- [x] 2.1 Add thread safety to queue - f4c7d26 ## Current Task -Awaiting next task (Phase 1 POC complete) +Awaiting next task ## Learnings @@ -43,6 +44,8 @@ Awaiting next task (Phase 1 POC complete) - Client uses _owns_queue flag to track if it created the queue (needed for child loggers sharing parent queue) - TYPE_CHECKING block prevents circular imports and satisfies ruff TC001 lint rule - mypy type stubs may not support newer dataclass features like slots=True - removed for compatibility +- Thread safety was already implemented during POC - task 2.1 verified and fixed lint issues (TC001, UP035) +- UP035: Use collections.abc instead of typing for Awaitable, Callable in Python 3.9+ ## Next -Phase 2: Refactoring (Task 2.1: Add thread safety to queue) +Task 2.2: Improve error messages diff --git a/specs/python-sdk/tasks.md b/specs/python-sdk/tasks.md index 972be85..a721971 100644 --- a/specs/python-sdk/tasks.md +++ b/specs/python-sdk/tasks.md @@ -103,7 +103,7 @@ Focus: Validate Python SDK works end-to-end. Skip tests, accept minimal error ha After POC validated, clean up code. -- [ ] 2.1 Add thread safety to queue +- [x] 2.1 Add thread safety to queue - **Do**: Add threading.Lock to BatchQueue, protect queue/timer operations - **Files**: `sdks/python/src/logwell/queue.py` - **Done when**: No race conditions under concurrent add/flush From 25379af5d6e29e4559a01a4e4a28df63aa953fdb Mon Sep 17 00:00:00 2001 From: Divanshu Chauhan Date: Fri, 16 Jan 2026 17:44:42 -0700 Subject: [PATCH 14/30] refactor(python-sdk): improve error messages - Config errors: Include example usage and explain what's missing/wrong - API key validation: Mask key in error, explain format requirements - Endpoint validation: Show invalid URL, provide correct examples - Numeric config: Show invalid value, explain purpose of each setting - Transport errors: Include endpoint URL and timeout info - HTTP errors: Explain what each status code means and what to do - Queue overflow: Show max size, explain why it happened and fixes Each error now answers: What failed? Why? How to fix it? --- sdks/python/src/logwell/config.py | 33 +++++++++++++++++++++------- sdks/python/src/logwell/queue.py | 6 ++++- sdks/python/src/logwell/transport.py | 29 +++++++++++++++++------- specs/python-sdk/.progress.md | 5 ++++- specs/python-sdk/tasks.md | 2 +- 5 files changed, 56 insertions(+), 19 deletions(-) diff --git a/sdks/python/src/logwell/config.py b/sdks/python/src/logwell/config.py index 5c7fe16..5c84982 100644 --- a/sdks/python/src/logwell/config.py +++ b/sdks/python/src/logwell/config.py @@ -73,52 +73,69 @@ def validate_config(config: LogwellConfig) -> LogwellConfig: # Validate required fields if "api_key" not in config or not config["api_key"]: raise LogwellError( - "api_key is required", + "Configuration missing 'api_key'. " + "Provide your Logwell API key in the config: " + "Logwell({'api_key': 'lw_...', 'endpoint': '...'})", LogwellErrorCode.INVALID_CONFIG, ) if "endpoint" not in config or not config["endpoint"]: raise LogwellError( - "endpoint is required", + "Configuration missing 'endpoint'. " + "Provide your Logwell server URL in the config: " + "Logwell({'api_key': '...', 'endpoint': 'https://logs.example.com'})", LogwellErrorCode.INVALID_CONFIG, ) # Validate API key format if not validate_api_key_format(config["api_key"]): + masked_key = config["api_key"][:10] + "..." if len(config["api_key"]) > 10 else "***" raise LogwellError( - "Invalid API key format. Expected: lw_[32 characters]", + f"Invalid API key format: '{masked_key}'. " + "Expected format: 'lw_' followed by 32 alphanumeric characters. " + "Get your API key from your Logwell project settings.", LogwellErrorCode.INVALID_CONFIG, ) # Validate endpoint URL if not _is_valid_url(config["endpoint"]): raise LogwellError( - "Invalid endpoint URL", + f"Invalid endpoint URL: '{config['endpoint']}'. " + "Expected a valid URL with scheme (http:// or https://) and host. " + "Example: 'https://logs.example.com' or 'http://localhost:3000'", LogwellErrorCode.INVALID_CONFIG, ) # Validate numeric options if "batch_size" in config and config["batch_size"] <= 0: raise LogwellError( - "batch_size must be positive", + f"Invalid batch_size: {config['batch_size']}. " + "batch_size must be a positive integer (e.g., 50). " + "This controls how many logs are batched before auto-flush.", LogwellErrorCode.INVALID_CONFIG, ) if "flush_interval" in config and config["flush_interval"] <= 0: raise LogwellError( - "flush_interval must be positive", + f"Invalid flush_interval: {config['flush_interval']}. " + "flush_interval must be a positive number in seconds (e.g., 5.0). " + "This controls how often logs are automatically flushed.", LogwellErrorCode.INVALID_CONFIG, ) if "max_queue_size" in config and config["max_queue_size"] <= 0: raise LogwellError( - "max_queue_size must be positive", + f"Invalid max_queue_size: {config['max_queue_size']}. " + "max_queue_size must be a positive integer (e.g., 1000). " + "When exceeded, oldest logs are dropped to prevent memory issues.", LogwellErrorCode.INVALID_CONFIG, ) if "max_retries" in config and config["max_retries"] < 0: raise LogwellError( - "max_retries must be non-negative", + f"Invalid max_retries: {config['max_retries']}. " + "max_retries must be 0 or greater (e.g., 3). " + "Set to 0 to disable retries on network failures.", LogwellErrorCode.INVALID_CONFIG, ) diff --git a/sdks/python/src/logwell/queue.py b/sdks/python/src/logwell/queue.py index 28fc5bf..fe931a3 100644 --- a/sdks/python/src/logwell/queue.py +++ b/sdks/python/src/logwell/queue.py @@ -118,7 +118,11 @@ def add(self, entry: LogEntry) -> None: msg = dropped.get("message", "")[:50] self._config.on_error( LogwellError( - f"Queue overflow. Dropped log: {msg}...", + f"Queue overflow: max_queue_size ({self._config.max_queue_size}) exceeded. " + f"Dropped oldest log: '{msg}...'. " + "Logs are being generated faster than they can be sent. " + "Consider increasing max_queue_size, reducing log volume, " + "or calling flush() more frequently.", LogwellErrorCode.QUEUE_OVERFLOW, ) ) diff --git a/sdks/python/src/logwell/transport.py b/sdks/python/src/logwell/transport.py index 453b77f..5ec0cb2 100644 --- a/sdks/python/src/logwell/transport.py +++ b/sdks/python/src/logwell/transport.py @@ -112,7 +112,8 @@ async def send(self, logs: list[LogEntry]) -> IngestResponse: LogwellError: On failure after all retries """ last_error: LogwellError = LogwellError( - "Max retries exceeded", + f"Failed to send logs after {self._config.max_retries + 1} attempts to {self._ingest_url}. " + "Check your network connection and endpoint URL.", LogwellErrorCode.NETWORK_ERROR, None, True, @@ -159,14 +160,16 @@ async def _do_request(self, logs: list[LogEntry]) -> IngestResponse: ) except httpx.TimeoutException as e: raise LogwellError( - f"Request timeout: {e}", + f"Request to {self._ingest_url} timed out after {self._config.timeout}s. " + f"The server may be slow or unreachable. Error: {e}", LogwellErrorCode.NETWORK_ERROR, None, True, ) from e except httpx.RequestError as e: raise LogwellError( - f"Network error: {e}", + f"Network error connecting to {self._ingest_url}. " + f"Check your internet connection and that the endpoint is reachable. Error: {e}", LogwellErrorCode.NETWORK_ERROR, None, True, @@ -208,35 +211,45 @@ def _create_error(self, status: int, message: str) -> LogwellError: """ if status == 401: return LogwellError( - f"Unauthorized: {message}", + f"Authentication failed (401): {message}. " + "Your API key is invalid, expired, or missing. " + "Verify the api_key in your Logwell config matches your project settings.", LogwellErrorCode.UNAUTHORIZED, status, False, ) elif status == 400: return LogwellError( - f"Validation error: {message}", + f"Invalid log data (400): {message}. " + "The server rejected the log entries. Check that log entries " + "have valid 'level' and 'message' fields.", LogwellErrorCode.VALIDATION_ERROR, status, False, ) elif status == 429: return LogwellError( - f"Rate limited: {message}", + f"Rate limit exceeded (429): {message}. " + "Too many requests sent to the server. The SDK will automatically " + "retry with exponential backoff.", LogwellErrorCode.RATE_LIMITED, status, True, ) elif status >= 500: return LogwellError( - f"Server error: {message}", + f"Server error ({status}): {message}. " + "The Logwell server encountered an error. This is typically temporary. " + "The SDK will automatically retry with exponential backoff.", LogwellErrorCode.SERVER_ERROR, status, True, ) else: return LogwellError( - f"HTTP error {status}: {message}", + f"Unexpected HTTP error ({status}): {message}. " + "The server returned an unexpected status code. " + "Check the server logs or contact support if this persists.", LogwellErrorCode.SERVER_ERROR, status, False, diff --git a/specs/python-sdk/.progress.md b/specs/python-sdk/.progress.md index c0ce859..e499111 100644 --- a/specs/python-sdk/.progress.md +++ b/specs/python-sdk/.progress.md @@ -20,6 +20,7 @@ Port the TypeScript SDK at `sdks/typescript/` to Python with full feature parity - [x] 1.9 Wire up __init__.py exports - 3c8711d - [x] 1.10 POC Checkpoint - 0e0046e - [x] 2.1 Add thread safety to queue - f4c7d26 +- [x] 2.2 Improve error messages - b7727fe ## Current Task Awaiting next task @@ -46,6 +47,8 @@ Awaiting next task - mypy type stubs may not support newer dataclass features like slots=True - removed for compatibility - Thread safety was already implemented during POC - task 2.1 verified and fixed lint issues (TC001, UP035) - UP035: Use collections.abc instead of typing for Awaitable, Callable in Python 3.9+ +- Actionable error messages should include: what failed, why, and how to fix it +- API keys should be masked in error messages to avoid leaking credentials in logs ## Next -Task 2.2: Improve error messages +Task 2.3: Add type hints throughout diff --git a/specs/python-sdk/tasks.md b/specs/python-sdk/tasks.md index a721971..3a249cf 100644 --- a/specs/python-sdk/tasks.md +++ b/specs/python-sdk/tasks.md @@ -112,7 +112,7 @@ After POC validated, clean up code. - _Requirements: NFR-2_ - _Design: Thread Safety_ -- [ ] 2.2 Improve error messages +- [x] 2.2 Improve error messages - **Do**: Add detailed context to all LogwellError raises - **Files**: `sdks/python/src/logwell/*.py` - **Done when**: Each error includes actionable message From 30185888b6105d7a277ddba41133abd7c1d8f1e9 Mon Sep 17 00:00:00 2001 From: Divanshu Chauhan Date: Fri, 16 Jan 2026 17:46:25 -0700 Subject: [PATCH 15/30] refactor(python-sdk): add strict type hints Verified mypy --strict passes on all source files. Type hints were comprehensive from initial implementation. --- specs/python-sdk/.progress.md | 4 +++- specs/python-sdk/tasks.md | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/specs/python-sdk/.progress.md b/specs/python-sdk/.progress.md index e499111..5b9c263 100644 --- a/specs/python-sdk/.progress.md +++ b/specs/python-sdk/.progress.md @@ -21,6 +21,7 @@ Port the TypeScript SDK at `sdks/typescript/` to Python with full feature parity - [x] 1.10 POC Checkpoint - 0e0046e - [x] 2.1 Add thread safety to queue - f4c7d26 - [x] 2.2 Improve error messages - b7727fe +- [x] 2.3 Add type hints throughout - (verified) ## Current Task Awaiting next task @@ -49,6 +50,7 @@ Awaiting next task - UP035: Use collections.abc instead of typing for Awaitable, Callable in Python 3.9+ - Actionable error messages should include: what failed, why, and how to fix it - API keys should be masked in error messages to avoid leaking credentials in logs +- mypy --strict passes already - type hints were comprehensive from the start ## Next -Task 2.3: Add type hints throughout +Task 2.4: Add README and LICENSE diff --git a/specs/python-sdk/tasks.md b/specs/python-sdk/tasks.md index 3a249cf..05e15b8 100644 --- a/specs/python-sdk/tasks.md +++ b/specs/python-sdk/tasks.md @@ -121,7 +121,7 @@ After POC validated, clean up code. - _Requirements: AC-7.4_ - _Design: Error Handling_ -- [ ] 2.3 Add type hints throughout +- [x] 2.3 Add type hints throughout - **Do**: Ensure all functions have full type annotations, run mypy strict - **Files**: `sdks/python/src/logwell/*.py` - **Done when**: `mypy --strict sdks/python/src` passes From 34fb8d3b8a34caa8d9441149684ed9ee5617e799 Mon Sep 17 00:00:00 2001 From: Divanshu Chauhan Date: Fri, 16 Jan 2026 17:48:10 -0700 Subject: [PATCH 16/30] docs(python-sdk): add README and LICENSE --- sdks/python/LICENSE | 21 +++ sdks/python/README.md | 241 +++++++++++++++++++++++++++++++++- specs/python-sdk/.progress.md | 3 +- specs/python-sdk/tasks.md | 2 +- 4 files changed, 262 insertions(+), 5 deletions(-) create mode 100644 sdks/python/LICENSE diff --git a/sdks/python/LICENSE b/sdks/python/LICENSE new file mode 100644 index 0000000..b5c7a02 --- /dev/null +++ b/sdks/python/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2026 Logwell + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/sdks/python/README.md b/sdks/python/README.md index 7f9f4e2..a82be8c 100644 --- a/sdks/python/README.md +++ b/sdks/python/README.md @@ -1,6 +1,6 @@ # Logwell Python SDK -Official Python SDK for the Logwell logging platform. +Official Python SDK for the [Logwell](https://github.com/Divkix/Logwell) logging platform. ## Installation @@ -8,6 +8,241 @@ Official Python SDK for the Logwell logging platform. pip install logwell ``` -## Usage +## Quick Start -Documentation coming soon. +```python +import asyncio +from logwell import Logwell + +# Initialize client +client = Logwell({ + 'api_key': 'lw_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', + 'endpoint': 'https://logs.example.com', + 'service': 'my-app', +}) + +# Log messages at different levels +client.debug('Debug message') +client.info('User logged in', {'user_id': '123'}) +client.warn('Disk space low', {'available_gb': 5}) +client.error('Failed to process request', {'request_id': 'abc'}) +client.fatal('Database connection lost') + +# Ensure logs are sent before exit +asyncio.run(client.shutdown()) +``` + +## Configuration + +| Option | Type | Default | Description | +|--------|------|---------|-------------| +| `api_key` | `str` | **required** | API key in format `lw_[32 chars]` | +| `endpoint` | `str` | **required** | Logwell server URL | +| `service` | `str` | `None` | Default service name for all logs | +| `batch_size` | `int` | `50` | Number of logs to batch before auto-flush | +| `flush_interval` | `float` | `5.0` | Seconds between auto-flushes | +| `max_queue_size` | `int` | `1000` | Maximum queue size before dropping oldest | +| `max_retries` | `int` | `3` | Retry attempts for failed requests | +| `capture_source_location` | `bool` | `False` | Capture file/line info | +| `on_error` | `Callable` | `None` | Error callback | +| `on_flush` | `Callable` | `None` | Flush callback | + +### Example with all options + +```python +from logwell import Logwell + +def on_error(err): + print(f'Logging error: {err}') + +def on_flush(count): + print(f'Flushed {count} logs') + +client = Logwell({ + 'api_key': 'lw_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', + 'endpoint': 'https://logs.example.com', + 'service': 'my-app', + 'batch_size': 100, + 'flush_interval': 10.0, + 'max_queue_size': 5000, + 'max_retries': 5, + 'capture_source_location': True, + 'on_error': on_error, + 'on_flush': on_flush, +}) +``` + +## API Reference + +### Logwell + +The main client class. + +#### Constructor + +```python +Logwell(config: LogwellConfig) +``` + +#### Methods + +| Method | Description | +|--------|-------------| +| `debug(message, metadata=None)` | Log at debug level | +| `info(message, metadata=None)` | Log at info level | +| `warn(message, metadata=None)` | Log at warning level | +| `error(message, metadata=None)` | Log at error level | +| `fatal(message, metadata=None)` | Log at fatal level | +| `log(entry)` | Log with explicit LogEntry | +| `flush()` | Async: Flush queued logs immediately | +| `shutdown()` | Async: Flush and stop the client | +| `child(metadata=None, service=None)` | Create child logger with context | +| `queue_size` | Property: Current queue size | + +### Child Loggers + +Create child loggers to add persistent context: + +```python +# Create child logger with request context +request_logger = client.child({'request_id': 'abc-123'}) +request_logger.info('Processing request') # Includes request_id + +# Override service name +db_logger = client.child(service='my-app-db') +db_logger.info('Query executed', {'duration_ms': 45}) +``` + +### Log Entry + +```python +from logwell import LogLevel + +# Using log() with explicit entry +client.log({ + 'level': 'info', + 'message': 'Custom log', + 'metadata': {'key': 'value'}, + 'service': 'override-service', + 'timestamp': '2024-01-01T00:00:00Z', # Optional, auto-generated +}) +``` + +### LogLevel + +Available log levels: `debug`, `info`, `warn`, `error`, `fatal` + +### LogwellConfig + +TypedDict with configuration options. See Configuration section above. + +### IngestResponse + +Response from the server after flushing logs: + +```python +{ + 'accepted': 50, # Logs accepted + 'rejected': 0, # Logs rejected (optional) + 'errors': [], # Error messages (optional) +} +``` + +## Error Handling + +### LogwellError + +All SDK errors are wrapped in `LogwellError`: + +```python +from logwell import Logwell, LogwellError, LogwellErrorCode + +try: + client = Logwell({'api_key': 'invalid', 'endpoint': 'https://example.com'}) +except LogwellError as e: + print(e.message) # Human-readable message + print(e.code) # LogwellErrorCode enum + print(e.status_code) # HTTP status (if applicable) + print(e.retryable) # Whether operation can be retried +``` + +### Error Codes + +| Code | Description | +|------|-------------| +| `INVALID_CONFIG` | Invalid configuration value | +| `NETWORK_ERROR` | Network connectivity or timeout | +| `UNAUTHORIZED` | Invalid or expired API key (401) | +| `VALIDATION_ERROR` | Invalid request data | +| `RATE_LIMITED` | Too many requests (429) | +| `SERVER_ERROR` | Server-side error (5xx) | +| `QUEUE_OVERFLOW` | Queue exceeded max size | + +### Error Callback + +Handle errors without try/catch: + +```python +def handle_error(err: Exception): + if isinstance(err, LogwellError): + if err.code == LogwellErrorCode.NETWORK_ERROR: + print('Network issue, logs will be retried') + elif err.code == LogwellErrorCode.QUEUE_OVERFLOW: + print('Queue full, some logs dropped') + +client = Logwell({ + 'api_key': 'lw_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', + 'endpoint': 'https://logs.example.com', + 'on_error': handle_error, +}) +``` + +## Async Usage + +The SDK uses async for flush and shutdown operations: + +```python +import asyncio +from logwell import Logwell + +async def main(): + client = Logwell({ + 'api_key': 'lw_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', + 'endpoint': 'https://logs.example.com', + }) + + client.info('Starting app') + + # Manual flush + response = await client.flush() + print(f'Sent {response["accepted"]} logs') + + # Shutdown gracefully + await client.shutdown() + +asyncio.run(main()) +``` + +## Source Location Capture + +Enable automatic file/line capture: + +```python +client = Logwell({ + 'api_key': 'lw_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', + 'endpoint': 'https://logs.example.com', + 'capture_source_location': True, +}) + +client.info('This log includes file and line number') +# Log includes: source_file='app.py', line_number=42 +``` + +## Requirements + +- Python 3.9+ +- httpx >= 0.25.0 + +## License + +MIT License - see [LICENSE](LICENSE) for details. diff --git a/specs/python-sdk/.progress.md b/specs/python-sdk/.progress.md index 5b9c263..7ea10ec 100644 --- a/specs/python-sdk/.progress.md +++ b/specs/python-sdk/.progress.md @@ -22,6 +22,7 @@ Port the TypeScript SDK at `sdks/typescript/` to Python with full feature parity - [x] 2.1 Add thread safety to queue - f4c7d26 - [x] 2.2 Improve error messages - b7727fe - [x] 2.3 Add type hints throughout - (verified) +- [x] 2.4 Add README and LICENSE - b6f61bd ## Current Task Awaiting next task @@ -53,4 +54,4 @@ Awaiting next task - mypy --strict passes already - type hints were comprehensive from the start ## Next -Task 2.4: Add README and LICENSE +Task 3.1: Create test fixtures diff --git a/specs/python-sdk/tasks.md b/specs/python-sdk/tasks.md index 05e15b8..a9bbd2b 100644 --- a/specs/python-sdk/tasks.md +++ b/specs/python-sdk/tasks.md @@ -130,7 +130,7 @@ After POC validated, clean up code. - _Requirements: NFR-6_ - _Design: Types_ -- [ ] 2.4 Add README and LICENSE +- [x] 2.4 Add README and LICENSE - **Do**: Create README.md with usage examples, create LICENSE (MIT) - **Files**: `sdks/python/README.md`, `sdks/python/LICENSE` - **Done when**: README has install, basic usage, API reference sections From 9e8a29bf68bedbe415c43959159ba08b8ab51296 Mon Sep 17 00:00:00 2001 From: Divanshu Chauhan Date: Fri, 16 Jan 2026 17:51:18 -0700 Subject: [PATCH 17/30] test(python-sdk): add test fixtures Add pytest fixtures for SDK testing: - Valid/invalid configuration fixtures - Mock HTTP response fixtures (success, error, rate limit) - Sample log entry fixtures - Callback capture helpers for testing events - Factory fixtures for creating test data --- sdks/python/tests/__init__.py | 1 + sdks/python/tests/conftest.py | 477 ++++++++++++++++++++++++++++++++++ specs/python-sdk/.progress.md | 8 +- specs/python-sdk/tasks.md | 2 +- 4 files changed, 485 insertions(+), 3 deletions(-) create mode 100644 sdks/python/tests/__init__.py create mode 100644 sdks/python/tests/conftest.py diff --git a/sdks/python/tests/__init__.py b/sdks/python/tests/__init__.py new file mode 100644 index 0000000..067ca77 --- /dev/null +++ b/sdks/python/tests/__init__.py @@ -0,0 +1 @@ +"""Tests for the Logwell Python SDK.""" diff --git a/sdks/python/tests/conftest.py b/sdks/python/tests/conftest.py new file mode 100644 index 0000000..c31edca --- /dev/null +++ b/sdks/python/tests/conftest.py @@ -0,0 +1,477 @@ +"""Pytest fixtures for Logwell SDK tests. + +Provides reusable fixtures for: +- Valid/invalid configurations +- Mock HTTP responses +- Sample log entries +""" + +from __future__ import annotations + +from datetime import datetime, timezone +from typing import TYPE_CHECKING, Any +from unittest.mock import MagicMock + +import httpx +import pytest + +if TYPE_CHECKING: + from collections.abc import Callable + + from logwell.types import IngestResponse, LogEntry, LogwellConfig + + +# ============================================================================= +# Valid Configurations +# ============================================================================= + + +@pytest.fixture +def valid_api_key() -> str: + """A valid API key in lw_[32 chars] format.""" + return "lw_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + +@pytest.fixture +def valid_endpoint() -> str: + """A valid HTTPS endpoint URL.""" + return "https://logs.example.com" + + +@pytest.fixture +def valid_config(valid_api_key: str, valid_endpoint: str) -> LogwellConfig: + """Minimal valid configuration with required fields only.""" + return { + "api_key": valid_api_key, + "endpoint": valid_endpoint, + } + + +@pytest.fixture +def valid_config_full(valid_api_key: str, valid_endpoint: str) -> LogwellConfig: + """Complete valid configuration with all fields.""" + return { + "api_key": valid_api_key, + "endpoint": valid_endpoint, + "service": "test-service", + "batch_size": 100, + "flush_interval": 10.0, + "max_queue_size": 500, + "max_retries": 5, + "capture_source_location": True, + } + + +@pytest.fixture +def valid_config_localhost() -> LogwellConfig: + """Valid config with localhost endpoint (for local testing).""" + return { + "api_key": "lw_bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", + "endpoint": "http://localhost:3000", + } + + +# ============================================================================= +# Invalid Configurations +# ============================================================================= + + +@pytest.fixture +def invalid_config_missing_api_key(valid_endpoint: str) -> dict[str, Any]: + """Config missing required api_key field.""" + return { + "endpoint": valid_endpoint, + } + + +@pytest.fixture +def invalid_config_missing_endpoint(valid_api_key: str) -> dict[str, Any]: + """Config missing required endpoint field.""" + return { + "api_key": valid_api_key, + } + + +@pytest.fixture +def invalid_config_empty_api_key(valid_endpoint: str) -> dict[str, Any]: + """Config with empty api_key string.""" + return { + "api_key": "", + "endpoint": valid_endpoint, + } + + +@pytest.fixture +def invalid_config_empty_endpoint(valid_api_key: str) -> dict[str, Any]: + """Config with empty endpoint string.""" + return { + "api_key": valid_api_key, + "endpoint": "", + } + + +@pytest.fixture +def invalid_config_bad_api_key_format(valid_endpoint: str) -> dict[str, Any]: + """Config with malformed API key (wrong prefix).""" + return { + "api_key": "bad_key_format", + "endpoint": valid_endpoint, + } + + +@pytest.fixture +def invalid_config_short_api_key(valid_endpoint: str) -> dict[str, Any]: + """Config with API key too short.""" + return { + "api_key": "lw_short", + "endpoint": valid_endpoint, + } + + +@pytest.fixture +def invalid_config_long_api_key(valid_endpoint: str) -> dict[str, Any]: + """Config with API key too long.""" + return { + "api_key": "lw_" + "a" * 40, + "endpoint": valid_endpoint, + } + + +@pytest.fixture +def invalid_config_bad_endpoint_format(valid_api_key: str) -> dict[str, Any]: + """Config with malformed endpoint URL (missing scheme).""" + return { + "api_key": valid_api_key, + "endpoint": "logs.example.com", + } + + +@pytest.fixture +def invalid_config_bad_endpoint_relative(valid_api_key: str) -> dict[str, Any]: + """Config with relative endpoint path.""" + return { + "api_key": valid_api_key, + "endpoint": "/api/logs", + } + + +@pytest.fixture +def invalid_config_negative_batch_size( + valid_api_key: str, valid_endpoint: str +) -> dict[str, Any]: + """Config with negative batch_size.""" + return { + "api_key": valid_api_key, + "endpoint": valid_endpoint, + "batch_size": -1, + } + + +@pytest.fixture +def invalid_config_zero_batch_size( + valid_api_key: str, valid_endpoint: str +) -> dict[str, Any]: + """Config with zero batch_size.""" + return { + "api_key": valid_api_key, + "endpoint": valid_endpoint, + "batch_size": 0, + } + + +@pytest.fixture +def invalid_config_negative_flush_interval( + valid_api_key: str, valid_endpoint: str +) -> dict[str, Any]: + """Config with negative flush_interval.""" + return { + "api_key": valid_api_key, + "endpoint": valid_endpoint, + "flush_interval": -1.0, + } + + +@pytest.fixture +def invalid_config_negative_max_queue_size( + valid_api_key: str, valid_endpoint: str +) -> dict[str, Any]: + """Config with negative max_queue_size.""" + return { + "api_key": valid_api_key, + "endpoint": valid_endpoint, + "max_queue_size": -100, + } + + +@pytest.fixture +def invalid_config_negative_max_retries( + valid_api_key: str, valid_endpoint: str +) -> dict[str, Any]: + """Config with negative max_retries.""" + return { + "api_key": valid_api_key, + "endpoint": valid_endpoint, + "max_retries": -1, + } + + +@pytest.fixture +def invalid_configs( + invalid_config_missing_api_key: dict[str, Any], + invalid_config_missing_endpoint: dict[str, Any], + invalid_config_empty_api_key: dict[str, Any], + invalid_config_empty_endpoint: dict[str, Any], + invalid_config_bad_api_key_format: dict[str, Any], + invalid_config_short_api_key: dict[str, Any], + invalid_config_long_api_key: dict[str, Any], + invalid_config_bad_endpoint_format: dict[str, Any], + invalid_config_negative_batch_size: dict[str, Any], + invalid_config_zero_batch_size: dict[str, Any], + invalid_config_negative_flush_interval: dict[str, Any], + invalid_config_negative_max_queue_size: dict[str, Any], + invalid_config_negative_max_retries: dict[str, Any], +) -> list[dict[str, Any]]: + """Collection of all invalid configurations for parametrized tests.""" + return [ + invalid_config_missing_api_key, + invalid_config_missing_endpoint, + invalid_config_empty_api_key, + invalid_config_empty_endpoint, + invalid_config_bad_api_key_format, + invalid_config_short_api_key, + invalid_config_long_api_key, + invalid_config_bad_endpoint_format, + invalid_config_negative_batch_size, + invalid_config_zero_batch_size, + invalid_config_negative_flush_interval, + invalid_config_negative_max_queue_size, + invalid_config_negative_max_retries, + ] + + +# ============================================================================= +# Mock HTTP Responses +# ============================================================================= + + +@pytest.fixture +def mock_success_response() -> IngestResponse: + """Successful ingest API response.""" + return { + "accepted": 10, + } + + +@pytest.fixture +def mock_partial_success_response() -> IngestResponse: + """Partial success response with some rejections.""" + return { + "accepted": 8, + "rejected": 2, + "errors": ["Invalid log format at index 3", "Missing timestamp at index 7"], + } + + +@pytest.fixture +def mock_full_rejection_response() -> IngestResponse: + """Response where all logs were rejected.""" + return { + "accepted": 0, + "rejected": 10, + "errors": ["All logs failed validation"], + } + + +@pytest.fixture +def mock_httpx_success_response(mock_success_response: IngestResponse) -> httpx.Response: + """Mock httpx.Response for successful request.""" + return httpx.Response( + status_code=200, + json=mock_success_response, + ) + + +@pytest.fixture +def mock_httpx_unauthorized_response() -> httpx.Response: + """Mock httpx.Response for 401 Unauthorized.""" + return httpx.Response( + status_code=401, + json={"error": "Invalid API key"}, + ) + + +@pytest.fixture +def mock_httpx_rate_limited_response() -> httpx.Response: + """Mock httpx.Response for 429 Rate Limited.""" + return httpx.Response( + status_code=429, + json={"error": "Too many requests"}, + headers={"Retry-After": "60"}, + ) + + +@pytest.fixture +def mock_httpx_server_error_response() -> httpx.Response: + """Mock httpx.Response for 500 Server Error.""" + return httpx.Response( + status_code=500, + json={"error": "Internal server error"}, + ) + + +@pytest.fixture +def mock_httpx_validation_error_response() -> httpx.Response: + """Mock httpx.Response for 400 Bad Request.""" + return httpx.Response( + status_code=400, + json={"error": "Validation failed", "details": ["Invalid log level"]}, + ) + + +# ============================================================================= +# Sample Log Entries +# ============================================================================= + + +@pytest.fixture +def sample_log_entry() -> LogEntry: + """A minimal valid log entry.""" + return { + "level": "info", + "message": "Test log message", + } + + +@pytest.fixture +def sample_log_entry_full() -> LogEntry: + """A complete log entry with all fields populated.""" + return { + "level": "error", + "message": "Something went wrong", + "timestamp": datetime.now(timezone.utc).isoformat(), + "service": "test-service", + "metadata": {"user_id": "123", "request_id": "abc-def"}, + "source_file": "/app/main.py", + "line_number": 42, + } + + +@pytest.fixture +def sample_log_entries() -> list[LogEntry]: + """A batch of varied log entries.""" + return [ + {"level": "debug", "message": "Debug message"}, + {"level": "info", "message": "Info message"}, + {"level": "warn", "message": "Warning message"}, + {"level": "error", "message": "Error message"}, + {"level": "fatal", "message": "Fatal message"}, + ] + + +@pytest.fixture +def sample_log_entry_with_metadata() -> LogEntry: + """Log entry with complex metadata.""" + return { + "level": "info", + "message": "User action", + "metadata": { + "user_id": 12345, + "action": "login", + "ip_address": "192.168.1.1", + "nested": {"key": "value"}, + }, + } + + +# ============================================================================= +# Callback Fixtures +# ============================================================================= + + +@pytest.fixture +def mock_on_error() -> MagicMock: + """Mock on_error callback for testing error handling.""" + return MagicMock() + + +@pytest.fixture +def mock_on_flush() -> MagicMock: + """Mock on_flush callback for testing flush events.""" + return MagicMock() + + +@pytest.fixture +def capture_errors() -> tuple[list[Exception], Callable[[Exception], None]]: + """Capture errors in a list for assertions. + + Returns: + Tuple of (error_list, callback_function) + """ + errors: list[Exception] = [] + + def on_error(error: Exception) -> None: + errors.append(error) + + return errors, on_error + + +@pytest.fixture +def capture_flushes() -> tuple[list[int], Callable[[int], None]]: + """Capture flush counts in a list for assertions. + + Returns: + Tuple of (count_list, callback_function) + """ + counts: list[int] = [] + + def on_flush(count: int) -> None: + counts.append(count) + + return counts, on_flush + + +# ============================================================================= +# Test Helpers +# ============================================================================= + + +@pytest.fixture +def timestamp_now() -> str: + """Current UTC timestamp in ISO format.""" + return datetime.now(timezone.utc).isoformat() + + +@pytest.fixture +def make_log_entry() -> Callable[..., LogEntry]: + """Factory fixture for creating log entries with custom fields.""" + + def _make( + level: str = "info", + message: str = "test message", + **kwargs: Any, + ) -> LogEntry: + entry: LogEntry = { + "level": level, # type: ignore[typeddict-item] + "message": message, + } + entry.update(kwargs) # type: ignore[typeddict-item] + return entry + + return _make + + +@pytest.fixture +def make_config( + valid_api_key: str, valid_endpoint: str +) -> Callable[..., LogwellConfig]: + """Factory fixture for creating configs with custom overrides.""" + + def _make(**overrides: Any) -> LogwellConfig: + config: LogwellConfig = { + "api_key": valid_api_key, + "endpoint": valid_endpoint, + } + config.update(overrides) # type: ignore[typeddict-item] + return config + + return _make diff --git a/specs/python-sdk/.progress.md b/specs/python-sdk/.progress.md index 7ea10ec..bffd959 100644 --- a/specs/python-sdk/.progress.md +++ b/specs/python-sdk/.progress.md @@ -4,7 +4,7 @@ Port the TypeScript SDK at `sdks/typescript/` to Python with full feature parity. The server is ready, and we need a Python SDK so Python developers can also use the logging platform. ## Status -- Phase: Execution (Phase 2 - Refactoring) +- Phase: Execution (Phase 3 - Testing) - Started: 2026-01-16 - Artifacts generated: research.md, requirements.md, design.md, tasks.md @@ -23,6 +23,7 @@ Port the TypeScript SDK at `sdks/typescript/` to Python with full feature parity - [x] 2.2 Improve error messages - b7727fe - [x] 2.3 Add type hints throughout - (verified) - [x] 2.4 Add README and LICENSE - b6f61bd +- [x] 3.1 Create test fixtures - 607f87a ## Current Task Awaiting next task @@ -52,6 +53,9 @@ Awaiting next task - Actionable error messages should include: what failed, why, and how to fix it - API keys should be masked in error messages to avoid leaking credentials in logs - mypy --strict passes already - type hints were comprehensive from the start +- pytest --collect-only exits 5 with no tests (expected) but fixtures load fine +- respx added for HTTP mocking in integration tests +- 39 fixtures defined covering valid/invalid configs, mock responses, sample entries ## Next -Task 3.1: Create test fixtures +Task 3.2: Unit tests for config diff --git a/specs/python-sdk/tasks.md b/specs/python-sdk/tasks.md index a9bbd2b..37f329e 100644 --- a/specs/python-sdk/tasks.md +++ b/specs/python-sdk/tasks.md @@ -140,7 +140,7 @@ After POC validated, clean up code. ## Phase 3: Testing -- [ ] 3.1 Create test fixtures +- [x] 3.1 Create test fixtures - **Do**: Create conftest.py with valid/invalid configs, mock responses - **Files**: `sdks/python/tests/__init__.py`, `sdks/python/tests/conftest.py` - **Done when**: Fixtures importable in tests From ae134b39b25b686d103d5167bfdd8a15dd298c55 Mon Sep 17 00:00:00 2001 From: Divanshu Chauhan Date: Fri, 16 Jan 2026 17:56:10 -0700 Subject: [PATCH 18/30] test(python-sdk): add config unit tests - Add tests/unit/__init__.py and tests/unit/test_config.py - 78 tests covering validate_api_key_format and validate_config - 100% code coverage of config.py module - Test cases for: - Valid API keys (lowercase, uppercase, mixed case, numbers, hyphens, underscores) - Invalid API keys (wrong prefix, length, invalid chars, empty, None, wrong types) - Missing/empty required fields (api_key, endpoint) - Invalid endpoint URLs (no scheme, relative paths) - Numeric bounds (batch_size, flush_interval, max_queue_size, max_retries) - Default value merging - Optional fields (service, on_error, on_flush, capture_source_location) - Edge cases and validation order --- sdks/python/tests/unit/__init__.py | 1 + sdks/python/tests/unit/test_config.py | 835 ++++++++++++++++++++++++++ specs/python-sdk/.progress.md | 7 +- specs/python-sdk/tasks.md | 2 +- 4 files changed, 843 insertions(+), 2 deletions(-) create mode 100644 sdks/python/tests/unit/__init__.py create mode 100644 sdks/python/tests/unit/test_config.py diff --git a/sdks/python/tests/unit/__init__.py b/sdks/python/tests/unit/__init__.py new file mode 100644 index 0000000..6756570 --- /dev/null +++ b/sdks/python/tests/unit/__init__.py @@ -0,0 +1 @@ +"""Unit tests for the Logwell Python SDK.""" diff --git a/sdks/python/tests/unit/test_config.py b/sdks/python/tests/unit/test_config.py new file mode 100644 index 0000000..ba9454e --- /dev/null +++ b/sdks/python/tests/unit/test_config.py @@ -0,0 +1,835 @@ +"""Unit tests for config.py - validate_config and validate_api_key_format. + +Tests cover: +- validate_api_key_format: valid keys, invalid keys (wrong prefix, wrong length, invalid chars) +- validate_config: missing/empty required fields, invalid formats, numeric bounds +- validate_config: default value merging and optional field handling +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING, Any + +import pytest + +from logwell.config import ( + API_KEY_REGEX, + DEFAULT_CONFIG, + validate_api_key_format, + validate_config, +) +from logwell.errors import LogwellError, LogwellErrorCode + +if TYPE_CHECKING: + from collections.abc import Callable + + from logwell.types import LogwellConfig + + +# ============================================================================= +# validate_api_key_format Tests +# ============================================================================= + + +class TestValidateApiKeyFormat: + """Tests for validate_api_key_format function.""" + + def test_valid_api_key_lowercase(self) -> None: + """Valid key with lowercase alphanumeric chars.""" + assert validate_api_key_format("lw_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") is True + + def test_valid_api_key_uppercase(self) -> None: + """Valid key with uppercase alphanumeric chars.""" + assert validate_api_key_format("lw_AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA") is True + + def test_valid_api_key_mixed_case(self) -> None: + """Valid key with mixed case alphanumeric chars.""" + assert validate_api_key_format("lw_AaBbCcDdEeFfGgHhIiJjKkLlMmNnOoPp") is True + + def test_valid_api_key_with_numbers(self) -> None: + """Valid key with numbers.""" + assert validate_api_key_format("lw_12345678901234567890123456789012") is True + + def test_valid_api_key_with_hyphens(self) -> None: + """Valid key with hyphens.""" + # 32 chars: abcd-efgh-ijkl-mnop-qrst-uvwx012 + assert validate_api_key_format("lw_abcd-efgh-ijkl-mnop-qrst-uvwx012") is True + + def test_valid_api_key_with_underscores(self) -> None: + """Valid key with underscores after prefix.""" + # 32 chars: abcd_efgh_ijkl_mnop_qrst_uvwx_012 + assert validate_api_key_format("lw_abcd_efgh_ijkl_mnop_qrst_uvwx012") is True + + def test_valid_api_key_mixed_special_chars(self) -> None: + """Valid key with mixed hyphens, underscores, and alphanumeric.""" + # 32 chars: aB3_Cd5-Ef7_Gh9-Ij1_Kl3-Mn5_Op7XY + assert validate_api_key_format("lw_aB3_Cd5-Ef7_Gh9-Ij1_Kl3-Mn5Op7XY") is True + + def test_invalid_api_key_wrong_prefix(self) -> None: + """Invalid key with wrong prefix.""" + assert validate_api_key_format("pk_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") is False + + def test_invalid_api_key_no_prefix(self) -> None: + """Invalid key with no prefix.""" + assert validate_api_key_format("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") is False + + def test_invalid_api_key_too_short(self) -> None: + """Invalid key that is too short.""" + assert validate_api_key_format("lw_short") is False + + def test_invalid_api_key_too_long(self) -> None: + """Invalid key that is too long.""" + assert validate_api_key_format("lw_" + "a" * 40) is False + + def test_invalid_api_key_31_chars_after_prefix(self) -> None: + """Invalid key with exactly 31 chars after prefix (off by one).""" + assert validate_api_key_format("lw_" + "a" * 31) is False + + def test_invalid_api_key_33_chars_after_prefix(self) -> None: + """Invalid key with exactly 33 chars after prefix (off by one).""" + assert validate_api_key_format("lw_" + "a" * 33) is False + + def test_invalid_api_key_special_chars(self) -> None: + """Invalid key with invalid special characters.""" + assert validate_api_key_format("lw_aaaaaaaaaa!@#$%^&*()aaaaaaaaaa") is False + + def test_invalid_api_key_spaces(self) -> None: + """Invalid key with spaces.""" + assert validate_api_key_format("lw_aaaa aaaa aaaa aaaa aaaa aaaa a") is False + + def test_invalid_api_key_empty_string(self) -> None: + """Invalid key - empty string.""" + assert validate_api_key_format("") is False + + def test_invalid_api_key_none(self) -> None: + """Invalid key - None value.""" + assert validate_api_key_format(None) is False # type: ignore[arg-type] + + def test_invalid_api_key_number(self) -> None: + """Invalid key - number instead of string.""" + assert validate_api_key_format(12345) is False # type: ignore[arg-type] + + def test_invalid_api_key_list(self) -> None: + """Invalid key - list instead of string.""" + assert validate_api_key_format(["lw_aaa"]) is False # type: ignore[arg-type] + + def test_invalid_api_key_dict(self) -> None: + """Invalid key - dict instead of string.""" + assert validate_api_key_format({"key": "value"}) is False # type: ignore[arg-type] + + def test_api_key_regex_pattern(self) -> None: + """Verify the regex pattern is correct.""" + # Pattern should be: ^lw_[A-Za-z0-9_-]{32}$ + assert API_KEY_REGEX.pattern == r"^lw_[A-Za-z0-9_-]{32}$" + + +# ============================================================================= +# validate_config Tests - Missing/Empty Required Fields +# ============================================================================= + + +class TestValidateConfigMissingFields: + """Tests for validate_config with missing or empty required fields.""" + + def test_missing_api_key(self, valid_endpoint: str) -> None: + """Raises LogwellError when api_key is missing.""" + config: dict[str, Any] = {"endpoint": valid_endpoint} + + with pytest.raises(LogwellError) as exc_info: + validate_config(config) # type: ignore[arg-type] + + assert exc_info.value.code == LogwellErrorCode.INVALID_CONFIG + assert "api_key" in exc_info.value.message + + def test_empty_api_key(self, valid_endpoint: str) -> None: + """Raises LogwellError when api_key is empty string.""" + config: dict[str, Any] = {"api_key": "", "endpoint": valid_endpoint} + + with pytest.raises(LogwellError) as exc_info: + validate_config(config) # type: ignore[arg-type] + + assert exc_info.value.code == LogwellErrorCode.INVALID_CONFIG + assert "api_key" in exc_info.value.message + + def test_none_api_key(self, valid_endpoint: str) -> None: + """Raises LogwellError when api_key is None.""" + config: dict[str, Any] = {"api_key": None, "endpoint": valid_endpoint} + + with pytest.raises(LogwellError) as exc_info: + validate_config(config) # type: ignore[arg-type] + + assert exc_info.value.code == LogwellErrorCode.INVALID_CONFIG + assert "api_key" in exc_info.value.message + + def test_missing_endpoint(self, valid_api_key: str) -> None: + """Raises LogwellError when endpoint is missing.""" + config: dict[str, Any] = {"api_key": valid_api_key} + + with pytest.raises(LogwellError) as exc_info: + validate_config(config) # type: ignore[arg-type] + + assert exc_info.value.code == LogwellErrorCode.INVALID_CONFIG + assert "endpoint" in exc_info.value.message + + def test_empty_endpoint(self, valid_api_key: str) -> None: + """Raises LogwellError when endpoint is empty string.""" + config: dict[str, Any] = {"api_key": valid_api_key, "endpoint": ""} + + with pytest.raises(LogwellError) as exc_info: + validate_config(config) # type: ignore[arg-type] + + assert exc_info.value.code == LogwellErrorCode.INVALID_CONFIG + assert "endpoint" in exc_info.value.message + + def test_none_endpoint(self, valid_api_key: str) -> None: + """Raises LogwellError when endpoint is None.""" + config: dict[str, Any] = {"api_key": valid_api_key, "endpoint": None} + + with pytest.raises(LogwellError) as exc_info: + validate_config(config) # type: ignore[arg-type] + + assert exc_info.value.code == LogwellErrorCode.INVALID_CONFIG + assert "endpoint" in exc_info.value.message + + def test_both_missing(self) -> None: + """Raises LogwellError when both required fields are missing.""" + config: dict[str, Any] = {} + + with pytest.raises(LogwellError) as exc_info: + validate_config(config) # type: ignore[arg-type] + + # api_key is checked first + assert exc_info.value.code == LogwellErrorCode.INVALID_CONFIG + assert "api_key" in exc_info.value.message + + +# ============================================================================= +# validate_config Tests - Invalid API Key Format +# ============================================================================= + + +class TestValidateConfigInvalidApiKey: + """Tests for validate_config with invalid API key formats.""" + + def test_wrong_prefix(self, valid_endpoint: str) -> None: + """Raises LogwellError when API key has wrong prefix.""" + config: dict[str, Any] = { + "api_key": "pk_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + "endpoint": valid_endpoint, + } + + with pytest.raises(LogwellError) as exc_info: + validate_config(config) # type: ignore[arg-type] + + assert exc_info.value.code == LogwellErrorCode.INVALID_CONFIG + assert "Invalid API key format" in exc_info.value.message + + def test_too_short(self, valid_endpoint: str) -> None: + """Raises LogwellError when API key is too short.""" + config: dict[str, Any] = { + "api_key": "lw_short", + "endpoint": valid_endpoint, + } + + with pytest.raises(LogwellError) as exc_info: + validate_config(config) # type: ignore[arg-type] + + assert exc_info.value.code == LogwellErrorCode.INVALID_CONFIG + assert "Invalid API key format" in exc_info.value.message + + def test_too_long(self, valid_endpoint: str) -> None: + """Raises LogwellError when API key is too long.""" + config: dict[str, Any] = { + "api_key": "lw_" + "a" * 40, + "endpoint": valid_endpoint, + } + + with pytest.raises(LogwellError) as exc_info: + validate_config(config) # type: ignore[arg-type] + + assert exc_info.value.code == LogwellErrorCode.INVALID_CONFIG + assert "Invalid API key format" in exc_info.value.message + + def test_invalid_chars(self, valid_endpoint: str) -> None: + """Raises LogwellError when API key has invalid characters.""" + config: dict[str, Any] = { + "api_key": "lw_aaaaaaaaaa!@#$aaaaaaaaaaaaaaaa", + "endpoint": valid_endpoint, + } + + with pytest.raises(LogwellError) as exc_info: + validate_config(config) # type: ignore[arg-type] + + assert exc_info.value.code == LogwellErrorCode.INVALID_CONFIG + assert "Invalid API key format" in exc_info.value.message + + def test_error_message_masks_key(self, valid_endpoint: str) -> None: + """Error message masks the API key for security.""" + long_key = "lw_this_is_a_very_long_invalid_key_that_should_be_masked" + config: dict[str, Any] = { + "api_key": long_key, + "endpoint": valid_endpoint, + } + + with pytest.raises(LogwellError) as exc_info: + validate_config(config) # type: ignore[arg-type] + + # Key should be masked after first 10 chars + assert long_key not in exc_info.value.message + assert "lw_this_is..." in exc_info.value.message + + def test_error_message_short_key_masked(self, valid_endpoint: str) -> None: + """Error message masks short API keys as ***.""" + short_key = "lw_abc" + config: dict[str, Any] = { + "api_key": short_key, + "endpoint": valid_endpoint, + } + + with pytest.raises(LogwellError) as exc_info: + validate_config(config) # type: ignore[arg-type] + + # Short keys should show *** + assert short_key not in exc_info.value.message + assert "***" in exc_info.value.message + + +# ============================================================================= +# validate_config Tests - Invalid Endpoint URL +# ============================================================================= + + +class TestValidateConfigInvalidEndpoint: + """Tests for validate_config with invalid endpoint URLs.""" + + def test_missing_scheme(self, valid_api_key: str) -> None: + """Raises LogwellError when endpoint has no scheme.""" + config: dict[str, Any] = { + "api_key": valid_api_key, + "endpoint": "logs.example.com", + } + + with pytest.raises(LogwellError) as exc_info: + validate_config(config) # type: ignore[arg-type] + + assert exc_info.value.code == LogwellErrorCode.INVALID_CONFIG + assert "Invalid endpoint URL" in exc_info.value.message + + def test_relative_path(self, valid_api_key: str) -> None: + """Raises LogwellError when endpoint is a relative path.""" + config: dict[str, Any] = { + "api_key": valid_api_key, + "endpoint": "/api/logs", + } + + with pytest.raises(LogwellError) as exc_info: + validate_config(config) # type: ignore[arg-type] + + assert exc_info.value.code == LogwellErrorCode.INVALID_CONFIG + assert "Invalid endpoint URL" in exc_info.value.message + + def test_scheme_only(self, valid_api_key: str) -> None: + """Raises LogwellError when endpoint is scheme only.""" + config: dict[str, Any] = { + "api_key": valid_api_key, + "endpoint": "https://", + } + + with pytest.raises(LogwellError) as exc_info: + validate_config(config) # type: ignore[arg-type] + + assert exc_info.value.code == LogwellErrorCode.INVALID_CONFIG + assert "Invalid endpoint URL" in exc_info.value.message + + def test_valid_http_endpoint(self, valid_api_key: str) -> None: + """Accepts HTTP endpoint (for local development).""" + config: dict[str, Any] = { + "api_key": valid_api_key, + "endpoint": "http://localhost:3000", + } + + result = validate_config(config) # type: ignore[arg-type] + assert result["endpoint"] == "http://localhost:3000" + + def test_valid_https_endpoint(self, valid_api_key: str) -> None: + """Accepts HTTPS endpoint.""" + config: dict[str, Any] = { + "api_key": valid_api_key, + "endpoint": "https://logs.example.com", + } + + result = validate_config(config) # type: ignore[arg-type] + assert result["endpoint"] == "https://logs.example.com" + + def test_endpoint_with_path(self, valid_api_key: str) -> None: + """Accepts endpoint with path.""" + config: dict[str, Any] = { + "api_key": valid_api_key, + "endpoint": "https://logs.example.com/v1", + } + + result = validate_config(config) # type: ignore[arg-type] + assert result["endpoint"] == "https://logs.example.com/v1" + + def test_endpoint_with_port(self, valid_api_key: str) -> None: + """Accepts endpoint with port.""" + config: dict[str, Any] = { + "api_key": valid_api_key, + "endpoint": "https://logs.example.com:8443", + } + + result = validate_config(config) # type: ignore[arg-type] + assert result["endpoint"] == "https://logs.example.com:8443" + + +# ============================================================================= +# validate_config Tests - Numeric Bounds +# ============================================================================= + + +class TestValidateConfigNumericBounds: + """Tests for validate_config with numeric boundary conditions.""" + + # batch_size tests + def test_batch_size_negative(self, valid_config: LogwellConfig) -> None: + """Raises LogwellError when batch_size is negative.""" + config = dict(valid_config) + config["batch_size"] = -1 + + with pytest.raises(LogwellError) as exc_info: + validate_config(config) # type: ignore[arg-type] + + assert exc_info.value.code == LogwellErrorCode.INVALID_CONFIG + assert "batch_size" in exc_info.value.message + + def test_batch_size_zero(self, valid_config: LogwellConfig) -> None: + """Raises LogwellError when batch_size is zero.""" + config = dict(valid_config) + config["batch_size"] = 0 + + with pytest.raises(LogwellError) as exc_info: + validate_config(config) # type: ignore[arg-type] + + assert exc_info.value.code == LogwellErrorCode.INVALID_CONFIG + assert "batch_size" in exc_info.value.message + + def test_batch_size_positive(self, valid_config: LogwellConfig) -> None: + """Accepts batch_size when positive.""" + config = dict(valid_config) + config["batch_size"] = 1 + + result = validate_config(config) # type: ignore[arg-type] + assert result["batch_size"] == 1 + + def test_batch_size_large(self, valid_config: LogwellConfig) -> None: + """Accepts large batch_size values.""" + config = dict(valid_config) + config["batch_size"] = 10000 + + result = validate_config(config) # type: ignore[arg-type] + assert result["batch_size"] == 10000 + + # flush_interval tests + def test_flush_interval_negative(self, valid_config: LogwellConfig) -> None: + """Raises LogwellError when flush_interval is negative.""" + config = dict(valid_config) + config["flush_interval"] = -1.0 + + with pytest.raises(LogwellError) as exc_info: + validate_config(config) # type: ignore[arg-type] + + assert exc_info.value.code == LogwellErrorCode.INVALID_CONFIG + assert "flush_interval" in exc_info.value.message + + def test_flush_interval_zero(self, valid_config: LogwellConfig) -> None: + """Raises LogwellError when flush_interval is zero.""" + config = dict(valid_config) + config["flush_interval"] = 0.0 + + with pytest.raises(LogwellError) as exc_info: + validate_config(config) # type: ignore[arg-type] + + assert exc_info.value.code == LogwellErrorCode.INVALID_CONFIG + assert "flush_interval" in exc_info.value.message + + def test_flush_interval_small_positive(self, valid_config: LogwellConfig) -> None: + """Accepts small positive flush_interval.""" + config = dict(valid_config) + config["flush_interval"] = 0.001 + + result = validate_config(config) # type: ignore[arg-type] + assert result["flush_interval"] == 0.001 + + def test_flush_interval_integer(self, valid_config: LogwellConfig) -> None: + """Accepts integer flush_interval.""" + config = dict(valid_config) + config["flush_interval"] = 10 + + result = validate_config(config) # type: ignore[arg-type] + assert result["flush_interval"] == 10 + + # max_queue_size tests + def test_max_queue_size_negative(self, valid_config: LogwellConfig) -> None: + """Raises LogwellError when max_queue_size is negative.""" + config = dict(valid_config) + config["max_queue_size"] = -100 + + with pytest.raises(LogwellError) as exc_info: + validate_config(config) # type: ignore[arg-type] + + assert exc_info.value.code == LogwellErrorCode.INVALID_CONFIG + assert "max_queue_size" in exc_info.value.message + + def test_max_queue_size_zero(self, valid_config: LogwellConfig) -> None: + """Raises LogwellError when max_queue_size is zero.""" + config = dict(valid_config) + config["max_queue_size"] = 0 + + with pytest.raises(LogwellError) as exc_info: + validate_config(config) # type: ignore[arg-type] + + assert exc_info.value.code == LogwellErrorCode.INVALID_CONFIG + assert "max_queue_size" in exc_info.value.message + + def test_max_queue_size_positive(self, valid_config: LogwellConfig) -> None: + """Accepts positive max_queue_size.""" + config = dict(valid_config) + config["max_queue_size"] = 1 + + result = validate_config(config) # type: ignore[arg-type] + assert result["max_queue_size"] == 1 + + # max_retries tests + def test_max_retries_negative(self, valid_config: LogwellConfig) -> None: + """Raises LogwellError when max_retries is negative.""" + config = dict(valid_config) + config["max_retries"] = -1 + + with pytest.raises(LogwellError) as exc_info: + validate_config(config) # type: ignore[arg-type] + + assert exc_info.value.code == LogwellErrorCode.INVALID_CONFIG + assert "max_retries" in exc_info.value.message + + def test_max_retries_zero(self, valid_config: LogwellConfig) -> None: + """Accepts max_retries of zero (disables retries).""" + config = dict(valid_config) + config["max_retries"] = 0 + + result = validate_config(config) # type: ignore[arg-type] + assert result["max_retries"] == 0 + + def test_max_retries_positive(self, valid_config: LogwellConfig) -> None: + """Accepts positive max_retries.""" + config = dict(valid_config) + config["max_retries"] = 10 + + result = validate_config(config) # type: ignore[arg-type] + assert result["max_retries"] == 10 + + +# ============================================================================= +# validate_config Tests - Default Value Merging +# ============================================================================= + + +class TestValidateConfigDefaults: + """Tests for validate_config default value merging.""" + + def test_applies_all_defaults(self, valid_config: LogwellConfig) -> None: + """Applies all default values when not provided.""" + result = validate_config(valid_config) + + assert result["batch_size"] == DEFAULT_CONFIG["batch_size"] + assert result["flush_interval"] == DEFAULT_CONFIG["flush_interval"] + assert result["max_queue_size"] == DEFAULT_CONFIG["max_queue_size"] + assert result["max_retries"] == DEFAULT_CONFIG["max_retries"] + assert result["capture_source_location"] == DEFAULT_CONFIG["capture_source_location"] + + def test_preserves_provided_values(self, valid_config_full: LogwellConfig) -> None: + """Preserves user-provided values over defaults.""" + result = validate_config(valid_config_full) + + assert result["batch_size"] == 100 + assert result["flush_interval"] == 10.0 + assert result["max_queue_size"] == 500 + assert result["max_retries"] == 5 + assert result["capture_source_location"] is True + + def test_partial_overrides(self, valid_config: LogwellConfig) -> None: + """Allows partial override of defaults.""" + config = dict(valid_config) + config["batch_size"] = 200 + config["max_retries"] = 10 + + result = validate_config(config) # type: ignore[arg-type] + + # Overridden values + assert result["batch_size"] == 200 + assert result["max_retries"] == 10 + # Default values + assert result["flush_interval"] == DEFAULT_CONFIG["flush_interval"] + assert result["max_queue_size"] == DEFAULT_CONFIG["max_queue_size"] + assert result["capture_source_location"] == DEFAULT_CONFIG["capture_source_location"] + + def test_default_config_values(self) -> None: + """Verify DEFAULT_CONFIG values are correct.""" + assert DEFAULT_CONFIG["batch_size"] == 50 + assert DEFAULT_CONFIG["flush_interval"] == 5.0 + assert DEFAULT_CONFIG["max_queue_size"] == 1000 + assert DEFAULT_CONFIG["max_retries"] == 3 + assert DEFAULT_CONFIG["capture_source_location"] is False + + +# ============================================================================= +# validate_config Tests - Optional Fields +# ============================================================================= + + +class TestValidateConfigOptionalFields: + """Tests for validate_config optional field handling.""" + + def test_service_preserved(self, valid_config: LogwellConfig) -> None: + """Preserves service name when provided.""" + config = dict(valid_config) + config["service"] = "my-service" + + result = validate_config(config) # type: ignore[arg-type] + assert result["service"] == "my-service" + + def test_service_not_added_by_default(self, valid_config: LogwellConfig) -> None: + """Does not add service when not provided.""" + result = validate_config(valid_config) + assert "service" not in result + + def test_on_error_callback_preserved( + self, valid_config: LogwellConfig, mock_on_error: Any + ) -> None: + """Preserves on_error callback when provided.""" + config = dict(valid_config) + config["on_error"] = mock_on_error + + result = validate_config(config) # type: ignore[arg-type] + assert result["on_error"] is mock_on_error + + def test_on_error_not_added_by_default(self, valid_config: LogwellConfig) -> None: + """Does not add on_error when not provided.""" + result = validate_config(valid_config) + assert "on_error" not in result + + def test_on_flush_callback_preserved( + self, valid_config: LogwellConfig, mock_on_flush: Any + ) -> None: + """Preserves on_flush callback when provided.""" + config = dict(valid_config) + config["on_flush"] = mock_on_flush + + result = validate_config(config) # type: ignore[arg-type] + assert result["on_flush"] is mock_on_flush + + def test_on_flush_not_added_by_default(self, valid_config: LogwellConfig) -> None: + """Does not add on_flush when not provided.""" + result = validate_config(valid_config) + assert "on_flush" not in result + + def test_capture_source_location_true(self, valid_config: LogwellConfig) -> None: + """Accepts capture_source_location=True.""" + config = dict(valid_config) + config["capture_source_location"] = True + + result = validate_config(config) # type: ignore[arg-type] + assert result["capture_source_location"] is True + + def test_capture_source_location_false(self, valid_config: LogwellConfig) -> None: + """Accepts capture_source_location=False.""" + config = dict(valid_config) + config["capture_source_location"] = False + + result = validate_config(config) # type: ignore[arg-type] + assert result["capture_source_location"] is False + + +# ============================================================================= +# validate_config Tests - Return Value Structure +# ============================================================================= + + +class TestValidateConfigReturnValue: + """Tests for validate_config return value structure.""" + + def test_returns_logwell_config_type(self, valid_config: LogwellConfig) -> None: + """Returns a LogwellConfig dict.""" + result = validate_config(valid_config) + + # Required fields present + assert "api_key" in result + assert "endpoint" in result + + # Default fields present + assert "batch_size" in result + assert "flush_interval" in result + assert "max_queue_size" in result + assert "max_retries" in result + assert "capture_source_location" in result + + def test_returns_copy_not_reference(self, valid_config: LogwellConfig) -> None: + """Returns a new dict, not a reference to input.""" + result = validate_config(valid_config) + + # Modify result should not affect input + result["batch_size"] = 9999 + assert valid_config.get("batch_size") != 9999 + + def test_all_values_present_in_full_config( + self, valid_config_full: LogwellConfig + ) -> None: + """Full config returns all provided values.""" + result = validate_config(valid_config_full) + + assert result["api_key"] == valid_config_full["api_key"] + assert result["endpoint"] == valid_config_full["endpoint"] + assert result["service"] == valid_config_full["service"] + assert result["batch_size"] == valid_config_full["batch_size"] + assert result["flush_interval"] == valid_config_full["flush_interval"] + assert result["max_queue_size"] == valid_config_full["max_queue_size"] + assert result["max_retries"] == valid_config_full["max_retries"] + assert ( + result["capture_source_location"] + == valid_config_full["capture_source_location"] + ) + + +# ============================================================================= +# Edge Cases +# ============================================================================= + + +class TestIsValidUrlEdgeCases: + """Edge cases for _is_valid_url internal function (via validate_config).""" + + def test_url_that_triggers_exception( + self, valid_api_key: str + ) -> None: + """Test URL that might trigger urlparse exception. + + urlparse is very permissive and rarely throws, but we can test + by mocking to ensure the exception path returns False. + """ + from unittest.mock import patch + + config: dict[str, Any] = { + "api_key": valid_api_key, + "endpoint": "https://valid.example.com", + } + + # Mock urlparse to raise an exception + with patch("logwell.config.urlparse") as mock_urlparse: + mock_urlparse.side_effect = ValueError("Mock error") + + with pytest.raises(LogwellError) as exc_info: + validate_config(config) # type: ignore[arg-type] + + assert exc_info.value.code == LogwellErrorCode.INVALID_CONFIG + assert "Invalid endpoint URL" in exc_info.value.message + + def test_url_with_attribute_error( + self, valid_api_key: str + ) -> None: + """Test URL that causes AttributeError in urlparse.""" + from unittest.mock import patch + + config: dict[str, Any] = { + "api_key": valid_api_key, + "endpoint": "https://valid.example.com", + } + + # Mock urlparse to raise AttributeError + with patch("logwell.config.urlparse") as mock_urlparse: + mock_urlparse.side_effect = AttributeError("Mock attribute error") + + with pytest.raises(LogwellError) as exc_info: + validate_config(config) # type: ignore[arg-type] + + assert exc_info.value.code == LogwellErrorCode.INVALID_CONFIG + assert "Invalid endpoint URL" in exc_info.value.message + + +class TestValidateConfigEdgeCases: + """Edge case tests for validate_config.""" + + def test_api_key_exactly_32_chars_after_prefix( + self, valid_endpoint: str + ) -> None: + """Accepts API key with exactly 32 chars after prefix.""" + config: dict[str, Any] = { + "api_key": "lw_" + "a" * 32, + "endpoint": valid_endpoint, + } + + result = validate_config(config) # type: ignore[arg-type] + assert result["api_key"] == "lw_" + "a" * 32 + + def test_endpoint_with_trailing_slash(self, valid_api_key: str) -> None: + """Accepts endpoint with trailing slash.""" + config: dict[str, Any] = { + "api_key": valid_api_key, + "endpoint": "https://logs.example.com/", + } + + result = validate_config(config) # type: ignore[arg-type] + assert result["endpoint"] == "https://logs.example.com/" + + def test_endpoint_with_query_params(self, valid_api_key: str) -> None: + """Accepts endpoint with query parameters.""" + config: dict[str, Any] = { + "api_key": valid_api_key, + "endpoint": "https://logs.example.com?project=test", + } + + result = validate_config(config) # type: ignore[arg-type] + assert result["endpoint"] == "https://logs.example.com?project=test" + + def test_validates_in_order(self, valid_endpoint: str) -> None: + """Validates api_key before endpoint.""" + config: dict[str, Any] = { + "api_key": "", # Invalid + "endpoint": "invalid", # Also invalid + } + + with pytest.raises(LogwellError) as exc_info: + validate_config(config) # type: ignore[arg-type] + + # api_key error should come first + assert "api_key" in exc_info.value.message + + def test_api_key_format_checked_before_numeric_bounds( + self, valid_endpoint: str + ) -> None: + """API key format checked before numeric options.""" + config: dict[str, Any] = { + "api_key": "invalid_key", + "endpoint": valid_endpoint, + "batch_size": -1, # Also invalid + } + + with pytest.raises(LogwellError) as exc_info: + validate_config(config) # type: ignore[arg-type] + + # api_key format error should come first + assert "Invalid API key format" in exc_info.value.message + + def test_endpoint_checked_before_numeric_bounds( + self, valid_api_key: str + ) -> None: + """Endpoint URL checked before numeric options.""" + config: dict[str, Any] = { + "api_key": valid_api_key, + "endpoint": "invalid-url", + "batch_size": -1, # Also invalid + } + + with pytest.raises(LogwellError) as exc_info: + validate_config(config) # type: ignore[arg-type] + + # endpoint error should come first + assert "Invalid endpoint URL" in exc_info.value.message diff --git a/specs/python-sdk/.progress.md b/specs/python-sdk/.progress.md index bffd959..ffe9444 100644 --- a/specs/python-sdk/.progress.md +++ b/specs/python-sdk/.progress.md @@ -24,6 +24,7 @@ Port the TypeScript SDK at `sdks/typescript/` to Python with full feature parity - [x] 2.3 Add type hints throughout - (verified) - [x] 2.4 Add README and LICENSE - b6f61bd - [x] 3.1 Create test fixtures - 607f87a +- [x] 3.2 Unit tests for config - a592689 ## Current Task Awaiting next task @@ -56,6 +57,10 @@ Awaiting next task - pytest --collect-only exits 5 with no tests (expected) but fixtures load fine - respx added for HTTP mocking in integration tests - 39 fixtures defined covering valid/invalid configs, mock responses, sample entries +- pytest-cov installed for coverage reporting +- 78 tests written for config.py with 100% coverage +- Tests cover: validate_api_key_format (valid/invalid keys), validate_config (missing fields, invalid formats, numeric bounds), default merging, optional fields, return value structure, edge cases +- Mock urlparse to test exception handling in _is_valid_url (lines 57-58 coverage) ## Next -Task 3.2: Unit tests for config +Task 3.3: Unit tests for errors diff --git a/specs/python-sdk/tasks.md b/specs/python-sdk/tasks.md index 37f329e..1ca99dc 100644 --- a/specs/python-sdk/tasks.md +++ b/specs/python-sdk/tasks.md @@ -148,7 +148,7 @@ After POC validated, clean up code. - **Commit**: `test(python-sdk): add test fixtures` - _Design: File Structure_ -- [ ] 3.2 Unit tests for config +- [x] 3.2 Unit tests for config - **Do**: Test validate_config, validate_api_key_format, edge cases - **Files**: `sdks/python/tests/unit/__init__.py`, `sdks/python/tests/unit/test_config.py` - **Done when**: 100% coverage of config.py From 43877df0572cc4dd0f29a8638315edf81869a686 Mon Sep 17 00:00:00 2001 From: Divanshu Chauhan Date: Fri, 16 Jan 2026 17:58:47 -0700 Subject: [PATCH 19/30] test(python-sdk): add error unit tests --- sdks/python/tests/unit/test_errors.py | 528 ++++++++++++++++++++++++++ specs/python-sdk/.progress.md | 4 +- specs/python-sdk/tasks.md | 2 +- 3 files changed, 532 insertions(+), 2 deletions(-) create mode 100644 sdks/python/tests/unit/test_errors.py diff --git a/sdks/python/tests/unit/test_errors.py b/sdks/python/tests/unit/test_errors.py new file mode 100644 index 0000000..1051112 --- /dev/null +++ b/sdks/python/tests/unit/test_errors.py @@ -0,0 +1,528 @@ +"""Unit tests for errors.py - LogwellErrorCode and LogwellError. + +Tests cover: +- LogwellErrorCode: All 7 error codes exist with correct values +- LogwellError: Construction, attributes, inheritance from Exception +- LogwellError: __str__ and __repr__ methods +- LogwellError: Default values for optional attributes +""" + +from __future__ import annotations + +import pytest + +from logwell.errors import LogwellError, LogwellErrorCode + + +# ============================================================================= +# LogwellErrorCode Tests +# ============================================================================= + + +class TestLogwellErrorCode: + """Tests for LogwellErrorCode enum.""" + + def test_network_error_exists(self) -> None: + """NETWORK_ERROR code exists.""" + assert hasattr(LogwellErrorCode, "NETWORK_ERROR") + + def test_network_error_value(self) -> None: + """NETWORK_ERROR has correct string value.""" + assert LogwellErrorCode.NETWORK_ERROR.value == "NETWORK_ERROR" + + def test_unauthorized_exists(self) -> None: + """UNAUTHORIZED code exists.""" + assert hasattr(LogwellErrorCode, "UNAUTHORIZED") + + def test_unauthorized_value(self) -> None: + """UNAUTHORIZED has correct string value.""" + assert LogwellErrorCode.UNAUTHORIZED.value == "UNAUTHORIZED" + + def test_validation_error_exists(self) -> None: + """VALIDATION_ERROR code exists.""" + assert hasattr(LogwellErrorCode, "VALIDATION_ERROR") + + def test_validation_error_value(self) -> None: + """VALIDATION_ERROR has correct string value.""" + assert LogwellErrorCode.VALIDATION_ERROR.value == "VALIDATION_ERROR" + + def test_rate_limited_exists(self) -> None: + """RATE_LIMITED code exists.""" + assert hasattr(LogwellErrorCode, "RATE_LIMITED") + + def test_rate_limited_value(self) -> None: + """RATE_LIMITED has correct string value.""" + assert LogwellErrorCode.RATE_LIMITED.value == "RATE_LIMITED" + + def test_server_error_exists(self) -> None: + """SERVER_ERROR code exists.""" + assert hasattr(LogwellErrorCode, "SERVER_ERROR") + + def test_server_error_value(self) -> None: + """SERVER_ERROR has correct string value.""" + assert LogwellErrorCode.SERVER_ERROR.value == "SERVER_ERROR" + + def test_queue_overflow_exists(self) -> None: + """QUEUE_OVERFLOW code exists.""" + assert hasattr(LogwellErrorCode, "QUEUE_OVERFLOW") + + def test_queue_overflow_value(self) -> None: + """QUEUE_OVERFLOW has correct string value.""" + assert LogwellErrorCode.QUEUE_OVERFLOW.value == "QUEUE_OVERFLOW" + + def test_invalid_config_exists(self) -> None: + """INVALID_CONFIG code exists.""" + assert hasattr(LogwellErrorCode, "INVALID_CONFIG") + + def test_invalid_config_value(self) -> None: + """INVALID_CONFIG has correct string value.""" + assert LogwellErrorCode.INVALID_CONFIG.value == "INVALID_CONFIG" + + def test_error_code_count(self) -> None: + """Exactly 7 error codes are defined.""" + assert len(LogwellErrorCode) == 7 + + def test_all_codes_are_strings(self) -> None: + """All error codes inherit from str.""" + for code in LogwellErrorCode: + assert isinstance(code, str) + assert isinstance(code.value, str) + + def test_error_code_string_comparison(self) -> None: + """Error codes can be compared as strings.""" + assert LogwellErrorCode.NETWORK_ERROR == "NETWORK_ERROR" + assert LogwellErrorCode.UNAUTHORIZED == "UNAUTHORIZED" + + def test_error_codes_are_unique(self) -> None: + """All error code values are unique.""" + values = [code.value for code in LogwellErrorCode] + assert len(values) == len(set(values)) + + +# ============================================================================= +# LogwellError Construction Tests +# ============================================================================= + + +class TestLogwellErrorConstruction: + """Tests for LogwellError construction.""" + + def test_basic_construction(self) -> None: + """Error can be constructed with required arguments.""" + error = LogwellError( + message="Test error", + code=LogwellErrorCode.NETWORK_ERROR, + ) + assert error.message == "Test error" + assert error.code == LogwellErrorCode.NETWORK_ERROR + + def test_construction_with_all_arguments(self) -> None: + """Error can be constructed with all arguments.""" + error = LogwellError( + message="Rate limited", + code=LogwellErrorCode.RATE_LIMITED, + status_code=429, + retryable=True, + ) + assert error.message == "Rate limited" + assert error.code == LogwellErrorCode.RATE_LIMITED + assert error.status_code == 429 + assert error.retryable is True + + def test_default_status_code_is_none(self) -> None: + """Default status_code is None.""" + error = LogwellError( + message="Test", + code=LogwellErrorCode.VALIDATION_ERROR, + ) + assert error.status_code is None + + def test_default_retryable_is_false(self) -> None: + """Default retryable is False.""" + error = LogwellError( + message="Test", + code=LogwellErrorCode.VALIDATION_ERROR, + ) + assert error.retryable is False + + def test_retryable_true(self) -> None: + """Retryable can be set to True.""" + error = LogwellError( + message="Network timeout", + code=LogwellErrorCode.NETWORK_ERROR, + retryable=True, + ) + assert error.retryable is True + + def test_retryable_false_explicit(self) -> None: + """Retryable can be explicitly set to False.""" + error = LogwellError( + message="Invalid config", + code=LogwellErrorCode.INVALID_CONFIG, + retryable=False, + ) + assert error.retryable is False + + def test_status_code_401(self) -> None: + """Status code 401 for unauthorized.""" + error = LogwellError( + message="Invalid API key", + code=LogwellErrorCode.UNAUTHORIZED, + status_code=401, + ) + assert error.status_code == 401 + + def test_status_code_500(self) -> None: + """Status code 500 for server error.""" + error = LogwellError( + message="Internal server error", + code=LogwellErrorCode.SERVER_ERROR, + status_code=500, + ) + assert error.status_code == 500 + + def test_status_code_503(self) -> None: + """Status code 503 for service unavailable.""" + error = LogwellError( + message="Service unavailable", + code=LogwellErrorCode.SERVER_ERROR, + status_code=503, + ) + assert error.status_code == 503 + + def test_empty_message(self) -> None: + """Error can be created with empty message.""" + error = LogwellError( + message="", + code=LogwellErrorCode.NETWORK_ERROR, + ) + assert error.message == "" + + def test_message_with_special_chars(self) -> None: + """Error message can contain special characters.""" + msg = "Error: 'test' with \"quotes\" and & ampersand" + error = LogwellError( + message=msg, + code=LogwellErrorCode.VALIDATION_ERROR, + ) + assert error.message == msg + + def test_message_with_unicode(self) -> None: + """Error message can contain unicode characters.""" + msg = "Error: æ—ĨæœŦčĒž emoji 🎉 accents ÊàÃŧ" + error = LogwellError( + message=msg, + code=LogwellErrorCode.SERVER_ERROR, + ) + assert error.message == msg + + def test_long_message(self) -> None: + """Error can have a long message.""" + msg = "A" * 10000 + error = LogwellError( + message=msg, + code=LogwellErrorCode.SERVER_ERROR, + ) + assert error.message == msg + assert len(error.message) == 10000 + + +# ============================================================================= +# LogwellError Inheritance Tests +# ============================================================================= + + +class TestLogwellErrorInheritance: + """Tests for LogwellError inheritance from Exception.""" + + def test_is_exception_subclass(self) -> None: + """LogwellError is a subclass of Exception.""" + assert issubclass(LogwellError, Exception) + + def test_instance_is_exception(self) -> None: + """LogwellError instance is an Exception.""" + error = LogwellError( + message="Test", + code=LogwellErrorCode.NETWORK_ERROR, + ) + assert isinstance(error, Exception) + + def test_can_be_raised(self) -> None: + """LogwellError can be raised.""" + with pytest.raises(LogwellError): + raise LogwellError( + message="Test error", + code=LogwellErrorCode.VALIDATION_ERROR, + ) + + def test_can_be_caught_as_exception(self) -> None: + """LogwellError can be caught as Exception.""" + try: + raise LogwellError( + message="Test", + code=LogwellErrorCode.SERVER_ERROR, + ) + except Exception as e: + assert isinstance(e, LogwellError) + assert e.message == "Test" + + def test_can_be_caught_as_logwell_error(self) -> None: + """LogwellError can be caught specifically.""" + try: + raise LogwellError( + message="Specific error", + code=LogwellErrorCode.UNAUTHORIZED, + status_code=401, + ) + except LogwellError as e: + assert e.code == LogwellErrorCode.UNAUTHORIZED + assert e.status_code == 401 + + def test_exception_args_preserved(self) -> None: + """Exception args are preserved (message is first arg).""" + error = LogwellError( + message="Test message", + code=LogwellErrorCode.NETWORK_ERROR, + ) + assert error.args == ("Test message",) + + def test_is_base_exception_subclass(self) -> None: + """LogwellError is a subclass of BaseException.""" + assert issubclass(LogwellError, BaseException) + + +# ============================================================================= +# LogwellError __str__ Tests +# ============================================================================= + + +class TestLogwellErrorStr: + """Tests for LogwellError __str__ method.""" + + def test_str_without_status_code(self) -> None: + """String representation without status code.""" + error = LogwellError( + message="Network timeout", + code=LogwellErrorCode.NETWORK_ERROR, + ) + assert str(error) == "[NETWORK_ERROR] Network timeout" + + def test_str_with_status_code(self) -> None: + """String representation with status code.""" + error = LogwellError( + message="Unauthorized request", + code=LogwellErrorCode.UNAUTHORIZED, + status_code=401, + ) + assert str(error) == "[UNAUTHORIZED] Unauthorized request (HTTP 401)" + + def test_str_with_server_error(self) -> None: + """String representation for server error.""" + error = LogwellError( + message="Internal server error", + code=LogwellErrorCode.SERVER_ERROR, + status_code=500, + ) + assert str(error) == "[SERVER_ERROR] Internal server error (HTTP 500)" + + def test_str_with_rate_limited(self) -> None: + """String representation for rate limited error.""" + error = LogwellError( + message="Too many requests", + code=LogwellErrorCode.RATE_LIMITED, + status_code=429, + ) + assert str(error) == "[RATE_LIMITED] Too many requests (HTTP 429)" + + def test_str_validation_error(self) -> None: + """String representation for validation error.""" + error = LogwellError( + message="Invalid log level", + code=LogwellErrorCode.VALIDATION_ERROR, + ) + assert str(error) == "[VALIDATION_ERROR] Invalid log level" + + def test_str_queue_overflow(self) -> None: + """String representation for queue overflow.""" + error = LogwellError( + message="Queue full, logs dropped", + code=LogwellErrorCode.QUEUE_OVERFLOW, + ) + assert str(error) == "[QUEUE_OVERFLOW] Queue full, logs dropped" + + def test_str_invalid_config(self) -> None: + """String representation for invalid config.""" + error = LogwellError( + message="endpoint must be a valid URL", + code=LogwellErrorCode.INVALID_CONFIG, + ) + assert str(error) == "[INVALID_CONFIG] endpoint must be a valid URL" + + def test_str_empty_message(self) -> None: + """String representation with empty message.""" + error = LogwellError( + message="", + code=LogwellErrorCode.NETWORK_ERROR, + ) + assert str(error) == "[NETWORK_ERROR] " + + def test_str_with_special_chars_in_message(self) -> None: + """String representation with special characters.""" + error = LogwellError( + message="Error & 'value'", + code=LogwellErrorCode.VALIDATION_ERROR, + ) + assert str(error) == "[VALIDATION_ERROR] Error & 'value'" + + +# ============================================================================= +# LogwellError __repr__ Tests +# ============================================================================= + + +class TestLogwellErrorRepr: + """Tests for LogwellError __repr__ method.""" + + def test_repr_basic(self) -> None: + """Repr shows all attributes.""" + error = LogwellError( + message="Test", + code=LogwellErrorCode.NETWORK_ERROR, + ) + repr_str = repr(error) + assert "LogwellError(" in repr_str + assert "message='Test'" in repr_str + assert "LogwellErrorCode.NETWORK_ERROR" in repr_str + assert "status_code=None" in repr_str + assert "retryable=False" in repr_str + + def test_repr_with_all_attributes(self) -> None: + """Repr shows all attributes when set.""" + error = LogwellError( + message="Rate limited", + code=LogwellErrorCode.RATE_LIMITED, + status_code=429, + retryable=True, + ) + repr_str = repr(error) + assert "LogwellError(" in repr_str + assert "message='Rate limited'" in repr_str + assert "LogwellErrorCode.RATE_LIMITED" in repr_str + assert "status_code=429" in repr_str + assert "retryable=True" in repr_str + + def test_repr_with_status_code_500(self) -> None: + """Repr shows status code 500.""" + error = LogwellError( + message="Server error", + code=LogwellErrorCode.SERVER_ERROR, + status_code=500, + ) + repr_str = repr(error) + assert "status_code=500" in repr_str + + def test_repr_message_with_quotes(self) -> None: + """Repr properly escapes quotes in message.""" + error = LogwellError( + message="Error with 'single' quotes", + code=LogwellErrorCode.VALIDATION_ERROR, + ) + repr_str = repr(error) + # Message should be repr'd (escaped) + assert "Error with 'single' quotes" in repr_str + + def test_repr_is_valid_python(self) -> None: + """Repr output format is consistent.""" + error = LogwellError( + message="Test", + code=LogwellErrorCode.UNAUTHORIZED, + status_code=401, + retryable=False, + ) + repr_str = repr(error) + # Should start with class name and have proper format + assert repr_str.startswith("LogwellError(") + assert repr_str.endswith(")") + + +# ============================================================================= +# LogwellError Edge Cases +# ============================================================================= + + +class TestLogwellErrorEdgeCases: + """Edge case tests for LogwellError.""" + + def test_status_code_zero(self) -> None: + """Status code can be zero (unusual but valid).""" + error = LogwellError( + message="Test", + code=LogwellErrorCode.NETWORK_ERROR, + status_code=0, + ) + # 0 is not None, so should be included in str + assert error.status_code == 0 + assert "(HTTP 0)" in str(error) + + def test_status_code_negative(self) -> None: + """Status code can be negative (unusual but no validation).""" + error = LogwellError( + message="Test", + code=LogwellErrorCode.NETWORK_ERROR, + status_code=-1, + ) + assert error.status_code == -1 + + def test_message_multiline(self) -> None: + """Message can be multiline.""" + msg = "Line 1\nLine 2\nLine 3" + error = LogwellError( + message=msg, + code=LogwellErrorCode.SERVER_ERROR, + ) + assert error.message == msg + assert "\n" in str(error) + + def test_all_error_codes_can_create_errors(self) -> None: + """All error codes can be used to create errors.""" + for code in LogwellErrorCode: + error = LogwellError( + message=f"Test {code.value}", + code=code, + ) + assert error.code == code + assert code.value in str(error) + + def test_error_code_in_exception_chain(self) -> None: + """Error can be part of exception chain.""" + original = ValueError("Original error") + try: + try: + raise original + except ValueError as e: + raise LogwellError( + message="Wrapped error", + code=LogwellErrorCode.VALIDATION_ERROR, + ) from e + except LogwellError as e: + assert e.__cause__ is original + assert e.message == "Wrapped error" + + def test_multiple_errors_independent(self) -> None: + """Multiple error instances are independent.""" + error1 = LogwellError( + message="Error 1", + code=LogwellErrorCode.NETWORK_ERROR, + status_code=None, + retryable=True, + ) + error2 = LogwellError( + message="Error 2", + code=LogwellErrorCode.SERVER_ERROR, + status_code=500, + retryable=False, + ) + assert error1.message != error2.message + assert error1.code != error2.code + assert error1.status_code != error2.status_code + assert error1.retryable != error2.retryable diff --git a/specs/python-sdk/.progress.md b/specs/python-sdk/.progress.md index ffe9444..7b16f2b 100644 --- a/specs/python-sdk/.progress.md +++ b/specs/python-sdk/.progress.md @@ -25,6 +25,7 @@ Port the TypeScript SDK at `sdks/typescript/` to Python with full feature parity - [x] 2.4 Add README and LICENSE - b6f61bd - [x] 3.1 Create test fixtures - 607f87a - [x] 3.2 Unit tests for config - a592689 +- [x] 3.3 Unit tests for errors - 56797be ## Current Task Awaiting next task @@ -61,6 +62,7 @@ Awaiting next task - 78 tests written for config.py with 100% coverage - Tests cover: validate_api_key_format (valid/invalid keys), validate_config (missing fields, invalid formats, numeric bounds), default merging, optional fields, return value structure, edge cases - Mock urlparse to test exception handling in _is_valid_url (lines 57-58 coverage) +- 58 tests for errors module covering: LogwellErrorCode enum (7 codes, values, uniqueness), LogwellError construction, inheritance from Exception, __str__ and __repr__ methods, edge cases ## Next -Task 3.3: Unit tests for errors +Task 3.4: Unit tests for queue diff --git a/specs/python-sdk/tasks.md b/specs/python-sdk/tasks.md index 1ca99dc..75d7cf2 100644 --- a/specs/python-sdk/tasks.md +++ b/specs/python-sdk/tasks.md @@ -156,7 +156,7 @@ After POC validated, clean up code. - **Commit**: `test(python-sdk): add config unit tests` - _Requirements: AC-7.1, AC-7.2, AC-7.3, AC-7.4_ -- [ ] 3.3 Unit tests for errors +- [x] 3.3 Unit tests for errors - **Do**: Test LogwellError construction, attributes, inheritance - **Files**: `sdks/python/tests/unit/test_errors.py` - **Done when**: All error codes tested From 1bc2f3b13ab48031a716bd5bd1dc07a3918777e8 Mon Sep 17 00:00:00 2001 From: Divanshu Chauhan Date: Fri, 16 Jan 2026 18:02:51 -0700 Subject: [PATCH 20/30] test(python-sdk): add queue unit tests 47 tests covering: - QueueConfig: construction, from_logwell_config - BatchQueue: add, flush, size, shutdown methods - Auto-flush on batch_size threshold - Timer-based flush after flush_interval - Queue overflow handling (drop oldest, on_error callback) - Concurrent add/flush operations (thread safety) - Edge cases (error recovery, entries during flush) --- sdks/python/tests/unit/test_queue.py | 898 +++++++++++++++++++++++++++ specs/python-sdk/.progress.md | 6 +- specs/python-sdk/tasks.md | 2 +- 3 files changed, 904 insertions(+), 2 deletions(-) create mode 100644 sdks/python/tests/unit/test_queue.py diff --git a/sdks/python/tests/unit/test_queue.py b/sdks/python/tests/unit/test_queue.py new file mode 100644 index 0000000..ee007e5 --- /dev/null +++ b/sdks/python/tests/unit/test_queue.py @@ -0,0 +1,898 @@ +"""Unit tests for queue.py - BatchQueue class. + +Tests cover: +- QueueConfig: construction and from_logwell_config +- BatchQueue: add, flush, size, shutdown methods +- Auto-flush on batch_size threshold +- Timer-based flush after flush_interval +- Queue overflow handling (drop oldest, call on_error) +- Concurrent add/flush operations (thread safety) +- Graceful shutdown behavior +""" + +from __future__ import annotations + +import asyncio +import threading +import time +from typing import TYPE_CHECKING, Any +from unittest.mock import MagicMock + +import pytest + +from logwell.errors import LogwellError, LogwellErrorCode +from logwell.queue import BatchQueue, QueueConfig +from logwell.types import IngestResponse, LogEntry + +if TYPE_CHECKING: + from collections.abc import Callable + + +# ============================================================================= +# Test Helpers +# ============================================================================= + + +def make_log_entry(message: str = "test", level: str = "info") -> LogEntry: + """Create a simple log entry for testing.""" + return {"level": level, "message": message} # type: ignore[typeddict-item] + + +def make_send_batch_mock( + response: IngestResponse | None = None, + error: Exception | None = None, +) -> tuple[MagicMock, list[list[LogEntry]]]: + """Create a mock send_batch function that tracks calls. + + Args: + response: The response to return (default: {"accepted": 1}) + error: Exception to raise instead of returning response + + Returns: + Tuple of (mock_function, captured_batches_list) + """ + captured: list[list[LogEntry]] = [] + if response is None: + response = {"accepted": 1} + + async def mock_send(batch: list[LogEntry]) -> IngestResponse: + captured.append(batch) + if error: + raise error + return response # type: ignore[return-value] + + mock = MagicMock(side_effect=mock_send) + return mock, captured + + +# ============================================================================= +# QueueConfig Tests +# ============================================================================= + + +class TestQueueConfigConstruction: + """Tests for QueueConfig construction.""" + + def test_default_values(self) -> None: + """QueueConfig has sensible defaults.""" + config = QueueConfig() + assert config.batch_size == 50 + assert config.flush_interval == 5.0 + assert config.max_queue_size == 1000 + assert config.on_error is None + assert config.on_flush is None + + def test_custom_values(self) -> None: + """QueueConfig accepts custom values.""" + on_error = MagicMock() + on_flush = MagicMock() + config = QueueConfig( + batch_size=100, + flush_interval=10.0, + max_queue_size=500, + on_error=on_error, + on_flush=on_flush, + ) + assert config.batch_size == 100 + assert config.flush_interval == 10.0 + assert config.max_queue_size == 500 + assert config.on_error is on_error + assert config.on_flush is on_flush + + def test_partial_custom_values(self) -> None: + """QueueConfig allows partial override of defaults.""" + config = QueueConfig(batch_size=25) + assert config.batch_size == 25 + assert config.flush_interval == 5.0 # default + assert config.max_queue_size == 1000 # default + + +class TestQueueConfigFromLogwellConfig: + """Tests for QueueConfig.from_logwell_config classmethod.""" + + def test_extracts_queue_config_values(self, valid_config_full: Any) -> None: + """Extracts queue-related values from LogwellConfig.""" + config = QueueConfig.from_logwell_config(valid_config_full) + assert config.batch_size == 100 + assert config.flush_interval == 10.0 + assert config.max_queue_size == 500 + + def test_uses_defaults_for_missing_values(self, valid_config: Any) -> None: + """Uses default values for missing config keys.""" + config = QueueConfig.from_logwell_config(valid_config) + assert config.batch_size == 50 + assert config.flush_interval == 5.0 + assert config.max_queue_size == 1000 + assert config.on_error is None + assert config.on_flush is None + + def test_extracts_callbacks(self, valid_config: Any) -> None: + """Extracts on_error and on_flush callbacks.""" + on_error = MagicMock() + on_flush = MagicMock() + logwell_config = dict(valid_config) + logwell_config["on_error"] = on_error + logwell_config["on_flush"] = on_flush + + config = QueueConfig.from_logwell_config(logwell_config) + assert config.on_error is on_error + assert config.on_flush is on_flush + + +# ============================================================================= +# BatchQueue Construction Tests +# ============================================================================= + + +class TestBatchQueueConstruction: + """Tests for BatchQueue construction.""" + + def test_accepts_queue_config(self) -> None: + """BatchQueue accepts QueueConfig.""" + send_batch, _ = make_send_batch_mock() + config = QueueConfig(batch_size=10) + queue = BatchQueue(send_batch, config) + assert queue.size == 0 + + def test_accepts_logwell_config(self, valid_config: Any) -> None: + """BatchQueue accepts LogwellConfig and converts it.""" + send_batch, _ = make_send_batch_mock() + queue = BatchQueue(send_batch, valid_config) + assert queue.size == 0 + + def test_starts_empty(self) -> None: + """BatchQueue starts with size 0.""" + send_batch, _ = make_send_batch_mock() + queue = BatchQueue(send_batch, QueueConfig()) + assert queue.size == 0 + + +# ============================================================================= +# BatchQueue.add() Tests +# ============================================================================= + + +class TestBatchQueueAdd: + """Tests for BatchQueue.add() method.""" + + def test_add_increases_size(self) -> None: + """add() increases queue size by 1.""" + send_batch, _ = make_send_batch_mock() + queue = BatchQueue(send_batch, QueueConfig(batch_size=100)) + + queue.add(make_log_entry("first")) + assert queue.size == 1 + + queue.add(make_log_entry("second")) + assert queue.size == 2 + + def test_add_multiple_entries(self) -> None: + """add() handles multiple entries sequentially.""" + send_batch, _ = make_send_batch_mock() + queue = BatchQueue(send_batch, QueueConfig(batch_size=100)) + + for i in range(10): + queue.add(make_log_entry(f"message_{i}")) + + assert queue.size == 10 + + @pytest.mark.asyncio + async def test_add_after_shutdown_is_ignored(self) -> None: + """add() is ignored after shutdown().""" + send_batch, _ = make_send_batch_mock() + queue = BatchQueue(send_batch, QueueConfig(batch_size=100)) + + queue.add(make_log_entry("before")) + assert queue.size == 1 + + await queue.shutdown() + queue.add(make_log_entry("after")) + # Should still be 0 since shutdown flushed and new add ignored + assert queue.size == 0 + + +# ============================================================================= +# BatchQueue.size Tests +# ============================================================================= + + +class TestBatchQueueSize: + """Tests for BatchQueue.size property.""" + + def test_size_starts_at_zero(self) -> None: + """size is 0 for new queue.""" + send_batch, _ = make_send_batch_mock() + queue = BatchQueue(send_batch, QueueConfig()) + assert queue.size == 0 + + def test_size_reflects_queue_length(self) -> None: + """size accurately reflects number of entries.""" + send_batch, _ = make_send_batch_mock() + queue = BatchQueue(send_batch, QueueConfig(batch_size=100)) + + assert queue.size == 0 + queue.add(make_log_entry()) + assert queue.size == 1 + queue.add(make_log_entry()) + assert queue.size == 2 + + @pytest.mark.asyncio + async def test_size_zero_after_flush(self) -> None: + """size is 0 after flush().""" + send_batch, _ = make_send_batch_mock() + queue = BatchQueue(send_batch, QueueConfig(batch_size=100)) + + queue.add(make_log_entry()) + queue.add(make_log_entry()) + assert queue.size == 2 + + await queue.flush() + assert queue.size == 0 + + +# ============================================================================= +# BatchQueue.flush() Tests +# ============================================================================= + + +class TestBatchQueueFlush: + """Tests for BatchQueue.flush() method.""" + + @pytest.mark.asyncio + async def test_flush_calls_send_batch_with_entries(self) -> None: + """flush() calls send_batch with queued entries.""" + send_batch, captured = make_send_batch_mock() + queue = BatchQueue(send_batch, QueueConfig(batch_size=100)) + + queue.add(make_log_entry("one")) + queue.add(make_log_entry("two")) + + await queue.flush() + + assert len(captured) == 1 + assert len(captured[0]) == 2 + assert captured[0][0]["message"] == "one" + assert captured[0][1]["message"] == "two" + + @pytest.mark.asyncio + async def test_flush_clears_queue(self) -> None: + """flush() empties the queue.""" + send_batch, _ = make_send_batch_mock() + queue = BatchQueue(send_batch, QueueConfig(batch_size=100)) + + queue.add(make_log_entry()) + queue.add(make_log_entry()) + assert queue.size == 2 + + await queue.flush() + assert queue.size == 0 + + @pytest.mark.asyncio + async def test_flush_returns_response(self) -> None: + """flush() returns the IngestResponse from send_batch.""" + response: IngestResponse = {"accepted": 5, "rejected": 0} + send_batch, _ = make_send_batch_mock(response=response) + queue = BatchQueue(send_batch, QueueConfig(batch_size=100)) + + queue.add(make_log_entry()) + + result = await queue.flush() + assert result == response + + @pytest.mark.asyncio + async def test_flush_empty_queue_returns_none(self) -> None: + """flush() on empty queue returns None without calling send_batch.""" + send_batch, captured = make_send_batch_mock() + queue = BatchQueue(send_batch, QueueConfig()) + + result = await queue.flush() + + assert result is None + assert len(captured) == 0 + + @pytest.mark.asyncio + async def test_flush_calls_on_flush_callback(self) -> None: + """flush() calls on_flush callback with count.""" + on_flush = MagicMock() + send_batch, _ = make_send_batch_mock() + config = QueueConfig(batch_size=100, on_flush=on_flush) + queue = BatchQueue(send_batch, config) + + queue.add(make_log_entry()) + queue.add(make_log_entry()) + queue.add(make_log_entry()) + + await queue.flush() + + on_flush.assert_called_once_with(3) + + @pytest.mark.asyncio + async def test_flush_requeues_on_error(self) -> None: + """flush() re-queues entries on send_batch error.""" + error = Exception("Network error") + send_batch, _ = make_send_batch_mock(error=error) + queue = BatchQueue(send_batch, QueueConfig(batch_size=100)) + + queue.add(make_log_entry("one")) + queue.add(make_log_entry("two")) + + await queue.flush() + + # Entries should be re-queued + assert queue.size == 2 + + @pytest.mark.asyncio + async def test_flush_calls_on_error_callback_on_failure(self) -> None: + """flush() calls on_error callback when send_batch fails.""" + error = Exception("Network error") + on_error = MagicMock() + send_batch, _ = make_send_batch_mock(error=error) + config = QueueConfig(batch_size=100, on_error=on_error) + queue = BatchQueue(send_batch, config) + + queue.add(make_log_entry()) + await queue.flush() + + on_error.assert_called_once_with(error) + + @pytest.mark.asyncio + async def test_concurrent_flush_prevented(self) -> None: + """Concurrent flush() calls are prevented.""" + call_count = 0 + flush_started = asyncio.Event() + flush_continue = asyncio.Event() + + async def slow_send(batch: list[LogEntry]) -> IngestResponse: + nonlocal call_count + call_count += 1 + flush_started.set() + # Wait until test signals to continue + await flush_continue.wait() + return {"accepted": len(batch)} + + mock = MagicMock(side_effect=slow_send) + queue = BatchQueue(mock, QueueConfig(batch_size=100)) + + queue.add(make_log_entry()) + queue.add(make_log_entry()) + + # Start first flush in background + task1 = asyncio.create_task(queue.flush()) + + # Wait for first flush to start + await flush_started.wait() + + # Try second flush while first is in progress + result2 = await queue.flush() + + # Second flush should return None (skipped) + assert result2 is None + + # Let first flush complete + flush_continue.set() + await task1 + + # Only one actual send should have happened + assert call_count == 1 + + +# ============================================================================= +# Auto-Flush on Batch Size Tests +# ============================================================================= + + +class TestAutoFlushOnBatchSize: + """Tests for automatic flush when batch_size threshold is reached.""" + + @pytest.mark.asyncio + async def test_auto_flush_triggers_at_batch_size(self) -> None: + """Auto-flush triggers when batch_size entries are added.""" + send_batch, captured = make_send_batch_mock() + queue = BatchQueue(send_batch, QueueConfig(batch_size=3)) + + # Add up to batch_size + queue.add(make_log_entry("one")) + queue.add(make_log_entry("two")) + # Size should be 2, no flush yet + + queue.add(make_log_entry("three")) + # This should trigger auto-flush + + # Give async flush time to complete + await asyncio.sleep(0.1) + + assert len(captured) >= 1 + assert queue.size == 0 + + @pytest.mark.asyncio + async def test_auto_flush_sends_batch_size_entries(self) -> None: + """Auto-flush sends exactly batch_size entries.""" + send_batch, captured = make_send_batch_mock() + config = QueueConfig(batch_size=5) + queue = BatchQueue(send_batch, config) + + for i in range(5): + queue.add(make_log_entry(f"msg_{i}")) + + await asyncio.sleep(0.1) + + assert len(captured) == 1 + assert len(captured[0]) == 5 + + @pytest.mark.asyncio + async def test_batch_size_of_one_flushes_immediately(self) -> None: + """batch_size=1 causes immediate flush on each add.""" + send_batch, captured = make_send_batch_mock() + queue = BatchQueue(send_batch, QueueConfig(batch_size=1)) + + queue.add(make_log_entry("first")) + await asyncio.sleep(0.1) + assert len(captured) >= 1 + + queue.add(make_log_entry("second")) + await asyncio.sleep(0.1) + assert len(captured) >= 2 + + +# ============================================================================= +# Timer-Based Flush Tests +# ============================================================================= + + +class TestTimerBasedFlush: + """Tests for timer-based automatic flush.""" + + @pytest.mark.asyncio + async def test_timer_starts_on_first_add(self) -> None: + """Timer starts when first entry is added.""" + send_batch, captured = make_send_batch_mock() + config = QueueConfig(batch_size=100, flush_interval=0.1) + queue = BatchQueue(send_batch, config) + + queue.add(make_log_entry()) + + # Wait for timer to fire + await asyncio.sleep(0.2) + + assert len(captured) >= 1 + assert queue.size == 0 + + @pytest.mark.asyncio + async def test_timer_flush_with_partial_batch(self) -> None: + """Timer flushes even when batch_size not reached.""" + send_batch, captured = make_send_batch_mock() + config = QueueConfig(batch_size=100, flush_interval=0.1) + queue = BatchQueue(send_batch, config) + + # Add fewer entries than batch_size + queue.add(make_log_entry("one")) + queue.add(make_log_entry("two")) + + # Wait for timer + await asyncio.sleep(0.2) + + assert len(captured) >= 1 + assert len(captured[0]) == 2 + + @pytest.mark.asyncio + async def test_timer_reset_after_flush(self) -> None: + """Timer is reset after manual flush if queue not empty.""" + send_batch, captured = make_send_batch_mock() + config = QueueConfig(batch_size=100, flush_interval=0.15) + queue = BatchQueue(send_batch, config) + + queue.add(make_log_entry("first")) + await queue.flush() + + # Add more entries + queue.add(make_log_entry("second")) + + # Wait for timer + await asyncio.sleep(0.25) + + # Should have flushed both times + assert len(captured) >= 2 + + +# ============================================================================= +# Queue Overflow Tests +# ============================================================================= + + +class TestQueueOverflow: + """Tests for queue overflow handling.""" + + def test_overflow_drops_oldest_entry(self) -> None: + """Overflow drops oldest entry when max_queue_size exceeded.""" + send_batch, _ = make_send_batch_mock() + config = QueueConfig(batch_size=100, max_queue_size=3) + queue = BatchQueue(send_batch, config) + + queue.add(make_log_entry("one")) + queue.add(make_log_entry("two")) + queue.add(make_log_entry("three")) + assert queue.size == 3 + + # This should drop "one" + queue.add(make_log_entry("four")) + assert queue.size == 3 + + @pytest.mark.asyncio + async def test_overflow_preserves_newest_entries(self) -> None: + """Overflow keeps newest entries, drops oldest.""" + send_batch, captured = make_send_batch_mock() + config = QueueConfig(batch_size=100, max_queue_size=3) + queue = BatchQueue(send_batch, config) + + queue.add(make_log_entry("one")) + queue.add(make_log_entry("two")) + queue.add(make_log_entry("three")) + queue.add(make_log_entry("four")) # Drops "one" + queue.add(make_log_entry("five")) # Drops "two" + + await queue.flush() + + assert len(captured[0]) == 3 + messages = [e["message"] for e in captured[0]] + assert messages == ["three", "four", "five"] + + def test_overflow_calls_on_error(self) -> None: + """Overflow calls on_error callback with LogwellError.""" + on_error = MagicMock() + send_batch, _ = make_send_batch_mock() + config = QueueConfig(batch_size=100, max_queue_size=2, on_error=on_error) + queue = BatchQueue(send_batch, config) + + queue.add(make_log_entry("one")) + queue.add(make_log_entry("two")) + queue.add(make_log_entry("three")) # Overflow! + + on_error.assert_called_once() + error = on_error.call_args[0][0] + assert isinstance(error, LogwellError) + assert error.code == LogwellErrorCode.QUEUE_OVERFLOW + + def test_overflow_error_includes_dropped_message(self) -> None: + """Overflow error message includes preview of dropped log.""" + on_error = MagicMock() + send_batch, _ = make_send_batch_mock() + config = QueueConfig(batch_size=100, max_queue_size=1, on_error=on_error) + queue = BatchQueue(send_batch, config) + + queue.add(make_log_entry("important message")) + queue.add(make_log_entry("new message")) + + error = on_error.call_args[0][0] + assert "important message" in error.message + + def test_overflow_truncates_long_messages(self) -> None: + """Overflow error truncates long dropped message preview.""" + on_error = MagicMock() + send_batch, _ = make_send_batch_mock() + config = QueueConfig(batch_size=100, max_queue_size=1, on_error=on_error) + queue = BatchQueue(send_batch, config) + + long_msg = "A" * 100 + queue.add(make_log_entry(long_msg)) + queue.add(make_log_entry("new")) + + error = on_error.call_args[0][0] + # Message preview should be truncated to 50 chars (the full message is longer) + # The error message includes context, but the dropped log preview is truncated + assert "A" * 50 in error.message # First 50 chars included + assert "A" * 100 not in error.message # Full message NOT included + + def test_no_on_error_callback_no_exception(self) -> None: + """Overflow without on_error callback doesn't raise.""" + send_batch, _ = make_send_batch_mock() + config = QueueConfig(batch_size=100, max_queue_size=1, on_error=None) + queue = BatchQueue(send_batch, config) + + queue.add(make_log_entry("one")) + # Should not raise + queue.add(make_log_entry("two")) + assert queue.size == 1 + + +# ============================================================================= +# Shutdown Tests +# ============================================================================= + + +class TestBatchQueueShutdown: + """Tests for BatchQueue.shutdown() method.""" + + @pytest.mark.asyncio + async def test_shutdown_flushes_remaining(self) -> None: + """shutdown() flushes remaining entries.""" + send_batch, captured = make_send_batch_mock() + queue = BatchQueue(send_batch, QueueConfig(batch_size=100)) + + queue.add(make_log_entry("one")) + queue.add(make_log_entry("two")) + + await queue.shutdown() + + assert len(captured) == 1 + assert len(captured[0]) == 2 + + @pytest.mark.asyncio + async def test_shutdown_sets_stopped_flag(self) -> None: + """shutdown() prevents further adds.""" + send_batch, _ = make_send_batch_mock() + queue = BatchQueue(send_batch, QueueConfig(batch_size=100)) + + await queue.shutdown() + + queue.add(make_log_entry("ignored")) + assert queue.size == 0 + + @pytest.mark.asyncio + async def test_shutdown_is_idempotent(self) -> None: + """shutdown() can be called multiple times safely.""" + send_batch, captured = make_send_batch_mock() + queue = BatchQueue(send_batch, QueueConfig(batch_size=100)) + + queue.add(make_log_entry()) + + await queue.shutdown() + await queue.shutdown() + await queue.shutdown() + + # Should only flush once + assert len(captured) == 1 + + @pytest.mark.asyncio + async def test_shutdown_empty_queue_no_flush(self) -> None: + """shutdown() on empty queue doesn't call send_batch.""" + send_batch, captured = make_send_batch_mock() + queue = BatchQueue(send_batch, QueueConfig()) + + await queue.shutdown() + + assert len(captured) == 0 + + @pytest.mark.asyncio + async def test_shutdown_stops_timer(self) -> None: + """shutdown() stops the flush timer.""" + send_batch, captured = make_send_batch_mock() + config = QueueConfig(batch_size=100, flush_interval=0.1) + queue = BatchQueue(send_batch, config) + + queue.add(make_log_entry()) + await queue.shutdown() + + # Wait past flush interval + await asyncio.sleep(0.2) + + # Timer should not have fired again (only shutdown flush) + assert len(captured) == 1 + + +# ============================================================================= +# Thread Safety Tests +# ============================================================================= + + +class TestBatchQueueThreadSafety: + """Tests for BatchQueue thread safety.""" + + @pytest.mark.asyncio + async def test_concurrent_adds_are_thread_safe(self) -> None: + """Multiple threads can add() concurrently without data loss.""" + send_batch, captured = make_send_batch_mock() + # Use very high batch_size to prevent auto-flush during test + config = QueueConfig(batch_size=100000, max_queue_size=100000) + queue = BatchQueue(send_batch, config) + + num_threads = 10 + entries_per_thread = 100 + total_expected = num_threads * entries_per_thread + + def add_entries(thread_id: int) -> None: + for i in range(entries_per_thread): + queue.add(make_log_entry(f"thread_{thread_id}_msg_{i}")) + + threads = [ + threading.Thread(target=add_entries, args=(i,)) + for i in range(num_threads) + ] + + for t in threads: + t.start() + for t in threads: + t.join() + + assert queue.size == total_expected + + @pytest.mark.asyncio + async def test_concurrent_add_and_flush(self) -> None: + """Concurrent add() and flush() don't cause race conditions.""" + send_batch, captured = make_send_batch_mock() + config = QueueConfig(batch_size=1000) + queue = BatchQueue(send_batch, config) + + num_adds = 100 + add_complete = threading.Event() + + def add_entries() -> None: + for i in range(num_adds): + queue.add(make_log_entry(f"msg_{i}")) + time.sleep(0.001) # Small delay to interleave with flush + add_complete.set() + + async def periodic_flush() -> None: + while not add_complete.is_set(): + await queue.flush() + await asyncio.sleep(0.01) + # Final flush + await queue.flush() + + # Start adding in background thread + add_thread = threading.Thread(target=add_entries) + add_thread.start() + + # Flush periodically from async context + await periodic_flush() + + add_thread.join() + + # All entries should have been captured + total_captured = sum(len(batch) for batch in captured) + assert total_captured == num_adds + + def test_size_is_thread_safe(self) -> None: + """Reading size while adding doesn't cause race conditions.""" + send_batch, _ = make_send_batch_mock() + config = QueueConfig(batch_size=10000) + queue = BatchQueue(send_batch, config) + + num_adds = 1000 + sizes: list[int] = [] + + def add_entries() -> None: + for _ in range(num_adds): + queue.add(make_log_entry()) + + def read_size() -> None: + for _ in range(num_adds): + sizes.append(queue.size) + + t1 = threading.Thread(target=add_entries) + t2 = threading.Thread(target=read_size) + + t1.start() + t2.start() + t1.join() + t2.join() + + # Final size should be correct + assert queue.size == num_adds + # All size readings should be valid (0 to num_adds) + assert all(0 <= s <= num_adds for s in sizes) + + +# ============================================================================= +# Edge Cases +# ============================================================================= + + +class TestBatchQueueEdgeCases: + """Edge case tests for BatchQueue.""" + + @pytest.mark.asyncio + async def test_flush_during_send_batch_error_preserves_order(self) -> None: + """Re-queued entries maintain order when error occurs.""" + call_count = 0 + + async def failing_then_success(batch: list[LogEntry]) -> IngestResponse: + nonlocal call_count + call_count += 1 + if call_count == 1: + raise Exception("First call fails") + return {"accepted": len(batch)} + + mock = MagicMock(side_effect=failing_then_success) + config = QueueConfig(batch_size=100) + queue = BatchQueue(mock, config) + + queue.add(make_log_entry("one")) + queue.add(make_log_entry("two")) + + # First flush fails + await queue.flush() + assert queue.size == 2 # Re-queued + + # Second flush succeeds + result = await queue.flush() + assert result is not None + assert queue.size == 0 + + @pytest.mark.asyncio + async def test_entries_added_during_flush_are_preserved(self) -> None: + """Entries added during flush are not lost.""" + flush_started = asyncio.Event() + flush_continue = asyncio.Event() + captured_batches: list[list[LogEntry]] = [] + + async def slow_send(batch: list[LogEntry]) -> IngestResponse: + captured_batches.append(batch) + flush_started.set() + await flush_continue.wait() + return {"accepted": len(batch)} + + mock = MagicMock(side_effect=slow_send) + queue = BatchQueue(mock, QueueConfig(batch_size=100)) + + queue.add(make_log_entry("before")) + + # Start flush + flush_task = asyncio.create_task(queue.flush()) + + # Wait for flush to start + await flush_started.wait() + + # Add during flush + queue.add(make_log_entry("during")) + + # Complete flush + flush_continue.set() + await flush_task + + # Entry added during flush should be in queue + assert queue.size == 1 + + # Flush again to capture the "during" entry + await queue.flush() + assert len(captured_batches) == 2 + assert captured_batches[0][0]["message"] == "before" + assert captured_batches[1][0]["message"] == "during" + + @pytest.mark.asyncio + async def test_empty_message_in_overflow(self) -> None: + """Overflow handles empty message gracefully.""" + on_error = MagicMock() + send_batch, _ = make_send_batch_mock() + config = QueueConfig(batch_size=100, max_queue_size=1, on_error=on_error) + queue = BatchQueue(send_batch, config) + + queue.add({"level": "info", "message": ""}) # type: ignore[typeddict-item] + queue.add(make_log_entry("new")) + + # Should not raise, error should be called + on_error.assert_called_once() + + @pytest.mark.asyncio + async def test_very_large_batch(self) -> None: + """Queue handles very large batches.""" + send_batch, captured = make_send_batch_mock() + config = QueueConfig(batch_size=10000, max_queue_size=20000) + queue = BatchQueue(send_batch, config) + + for i in range(5000): + queue.add(make_log_entry(f"msg_{i}")) + + await queue.flush() + + assert len(captured) == 1 + assert len(captured[0]) == 5000 diff --git a/specs/python-sdk/.progress.md b/specs/python-sdk/.progress.md index 7b16f2b..48a8702 100644 --- a/specs/python-sdk/.progress.md +++ b/specs/python-sdk/.progress.md @@ -26,12 +26,16 @@ Port the TypeScript SDK at `sdks/typescript/` to Python with full feature parity - [x] 3.1 Create test fixtures - 607f87a - [x] 3.2 Unit tests for config - a592689 - [x] 3.3 Unit tests for errors - 56797be +- [x] 3.4 Unit tests for queue - d85f824 ## Current Task Awaiting next task ## Learnings +- 47 tests written for queue.py covering: QueueConfig construction, BatchQueue add/flush/size/shutdown, auto-flush on batch_size, timer-based flush, overflow handling, thread safety, edge cases +- Use asyncio.Event instead of threading.Event for async test coordination to avoid race conditions +- Concurrent flush prevention tested by using async events to coordinate slow_send mock - TypeScript SDK has clean 1:1 mappable module structure to Python - Queue uses setTimeout/clearTimeout - maps to threading.Timer in Python - Transport uses native fetch - httpx is the modern Python equivalent @@ -65,4 +69,4 @@ Awaiting next task - 58 tests for errors module covering: LogwellErrorCode enum (7 codes, values, uniqueness), LogwellError construction, inheritance from Exception, __str__ and __repr__ methods, edge cases ## Next -Task 3.4: Unit tests for queue +Task 3.5: Unit tests for source_location diff --git a/specs/python-sdk/tasks.md b/specs/python-sdk/tasks.md index 75d7cf2..02f4363 100644 --- a/specs/python-sdk/tasks.md +++ b/specs/python-sdk/tasks.md @@ -164,7 +164,7 @@ After POC validated, clean up code. - **Commit**: `test(python-sdk): add error unit tests` - _Requirements: FR-9_ -- [ ] 3.4 Unit tests for queue +- [x] 3.4 Unit tests for queue - **Do**: Test add, flush, overflow, timer, shutdown, concurrent ops - **Files**: `sdks/python/tests/unit/test_queue.py` - **Done when**: All BatchQueue methods tested From fe0bae769b9bd69e1f50e24c4e8f4604d15e867b Mon Sep 17 00:00:00 2001 From: Divanshu Chauhan Date: Fri, 16 Jan 2026 18:05:55 -0700 Subject: [PATCH 21/30] test(python-sdk): add source location tests 36 tests for source_location.py covering: - SourceLocation dataclass: attributes, frozen/immutable, equality, repr - capture_source_location: basic functionality, frame depths 0/1/2 - Invalid frame depths returning None - Edge cases: lambda, list comprehension, nested calls - Exception handling and graceful failure - Integration-style tests for typical logging patterns Requirements: AC-6.1, AC-6.2, AC-6.3 --- .../python/tests/unit/test_source_location.py | 417 ++++++++++++++++++ specs/python-sdk/.progress.md | 4 +- specs/python-sdk/tasks.md | 2 +- 3 files changed, 421 insertions(+), 2 deletions(-) create mode 100644 sdks/python/tests/unit/test_source_location.py diff --git a/sdks/python/tests/unit/test_source_location.py b/sdks/python/tests/unit/test_source_location.py new file mode 100644 index 0000000..234bac0 --- /dev/null +++ b/sdks/python/tests/unit/test_source_location.py @@ -0,0 +1,417 @@ +"""Unit tests for source_location.py - SourceLocation and capture_source_location. + +Tests cover: +- SourceLocation dataclass: attributes, immutability (frozen) +- capture_source_location: frame depth 0 (immediate caller), depth 1+ (caller's caller) +- capture_source_location: invalid frame depth returns None +- File path is string, line number is positive integer + +Requirements tested: AC-6.1, AC-6.2, AC-6.3 +""" + +from __future__ import annotations + +import inspect +import os +from dataclasses import FrozenInstanceError +from typing import TYPE_CHECKING + +import pytest + +from logwell.source_location import SourceLocation, capture_source_location + +if TYPE_CHECKING: + pass + + +# ============================================================================= +# SourceLocation Dataclass Tests +# ============================================================================= + + +class TestSourceLocationDataclass: + """Tests for SourceLocation dataclass structure and attributes.""" + + def test_has_source_file_attribute(self) -> None: + """SourceLocation has source_file attribute.""" + loc = SourceLocation(source_file="/path/to/file.py", line_number=42) + assert hasattr(loc, "source_file") + assert loc.source_file == "/path/to/file.py" + + def test_has_line_number_attribute(self) -> None: + """SourceLocation has line_number attribute.""" + loc = SourceLocation(source_file="/path/to/file.py", line_number=42) + assert hasattr(loc, "line_number") + assert loc.line_number == 42 + + def test_source_file_is_string(self) -> None: + """source_file is a string type.""" + loc = SourceLocation(source_file="/path/to/file.py", line_number=1) + assert isinstance(loc.source_file, str) + + def test_line_number_is_int(self) -> None: + """line_number is an int type.""" + loc = SourceLocation(source_file="/path/to/file.py", line_number=100) + assert isinstance(loc.line_number, int) + + def test_is_frozen_immutable(self) -> None: + """SourceLocation is frozen (immutable).""" + loc = SourceLocation(source_file="/path/to/file.py", line_number=42) + + with pytest.raises(FrozenInstanceError): + loc.source_file = "/other/path.py" # type: ignore[misc] + + with pytest.raises(FrozenInstanceError): + loc.line_number = 99 # type: ignore[misc] + + def test_equality_same_values(self) -> None: + """Two SourceLocation with same values are equal.""" + loc1 = SourceLocation(source_file="/path/to/file.py", line_number=42) + loc2 = SourceLocation(source_file="/path/to/file.py", line_number=42) + assert loc1 == loc2 + + def test_equality_different_file(self) -> None: + """Two SourceLocation with different files are not equal.""" + loc1 = SourceLocation(source_file="/path/to/file1.py", line_number=42) + loc2 = SourceLocation(source_file="/path/to/file2.py", line_number=42) + assert loc1 != loc2 + + def test_equality_different_line(self) -> None: + """Two SourceLocation with different lines are not equal.""" + loc1 = SourceLocation(source_file="/path/to/file.py", line_number=42) + loc2 = SourceLocation(source_file="/path/to/file.py", line_number=43) + assert loc1 != loc2 + + def test_repr_contains_values(self) -> None: + """__repr__ contains source_file and line_number.""" + loc = SourceLocation(source_file="/path/to/file.py", line_number=42) + repr_str = repr(loc) + + assert "SourceLocation" in repr_str + assert "/path/to/file.py" in repr_str + assert "42" in repr_str + + def test_accepts_relative_path(self) -> None: + """Accepts relative file paths.""" + loc = SourceLocation(source_file="relative/path.py", line_number=1) + assert loc.source_file == "relative/path.py" + + def test_accepts_absolute_path(self) -> None: + """Accepts absolute file paths.""" + loc = SourceLocation(source_file="/absolute/path/file.py", line_number=1) + assert loc.source_file == "/absolute/path/file.py" + + def test_accepts_line_number_one(self) -> None: + """Accepts line number 1 (first line).""" + loc = SourceLocation(source_file="/path/to/file.py", line_number=1) + assert loc.line_number == 1 + + def test_accepts_large_line_number(self) -> None: + """Accepts large line numbers.""" + loc = SourceLocation(source_file="/path/to/file.py", line_number=999999) + assert loc.line_number == 999999 + + +# ============================================================================= +# capture_source_location Tests - Basic Functionality +# ============================================================================= + + +class TestCaptureSourceLocationBasic: + """Tests for capture_source_location basic functionality.""" + + def test_returns_source_location(self) -> None: + """capture_source_location returns SourceLocation instance.""" + result = capture_source_location(0) + assert isinstance(result, SourceLocation) + + def test_returns_this_file(self) -> None: + """capture_source_location(0) returns this test file.""" + result = capture_source_location(0) + assert result is not None + # Should contain this file's name + assert "test_source_location.py" in result.source_file + + def test_line_number_is_positive(self) -> None: + """Line number is a positive integer.""" + result = capture_source_location(0) + assert result is not None + assert result.line_number > 0 + + def test_captures_correct_line(self) -> None: + """Captures the line where capture_source_location is called.""" + # Get line number of the capture call + expected_line = inspect.currentframe().f_lineno + 1 # type: ignore[union-attr] + result = capture_source_location(0) + + assert result is not None + assert result.line_number == expected_line + + def test_file_path_exists(self) -> None: + """The captured file path actually exists.""" + result = capture_source_location(0) + assert result is not None + # File should exist since we're running this test + assert os.path.exists(result.source_file) + + +# ============================================================================= +# capture_source_location Tests - Frame Depth +# ============================================================================= + + +def helper_depth_1() -> SourceLocation | None: + """Helper that captures with skip_frames=1.""" + return capture_source_location(1) + + +def helper_depth_0() -> SourceLocation | None: + """Helper that captures with skip_frames=0.""" + return capture_source_location(0) + + +def outer_caller() -> tuple[SourceLocation | None, int]: + """Outer function that calls helper and records its line number.""" + expected_line = inspect.currentframe().f_lineno + 1 # type: ignore[union-attr] + result = helper_depth_1() + return result, expected_line + + +def deeply_nested_call() -> SourceLocation | None: + """Deeply nested call chain for testing higher skip_frames.""" + return capture_source_location(2) + + +def nested_intermediate() -> SourceLocation | None: + """Intermediate function in nested call.""" + return deeply_nested_call() + + +def nested_outer() -> tuple[SourceLocation | None, int]: + """Outer function for deeply nested test.""" + expected_line = inspect.currentframe().f_lineno + 1 # type: ignore[union-attr] + result = nested_intermediate() + return result, expected_line + + +class TestCaptureSourceLocationFrameDepth: + """Tests for capture_source_location with different frame depths.""" + + def test_skip_frames_zero_captures_immediate_caller(self) -> None: + """skip_frames=0 captures the immediate caller of capture_source_location.""" + result = helper_depth_0() + assert result is not None + # Should capture line inside helper_depth_0, not this test + assert "test_source_location.py" in result.source_file + # Line should be around line 147 (capture_source_location call in helper) + + def test_skip_frames_one_captures_callers_caller(self) -> None: + """skip_frames=1 captures the caller's caller.""" + result, expected_line = outer_caller() + assert result is not None + # Should capture line in outer_caller where helper_depth_1 was called + assert result.line_number == expected_line + + def test_skip_frames_two_captures_two_levels_up(self) -> None: + """skip_frames=2 captures two levels up the call stack.""" + result, expected_line = nested_outer() + assert result is not None + # Should capture line in nested_outer where nested_intermediate was called + assert result.line_number == expected_line + + def test_captures_caller_not_sdk_internals(self) -> None: + """Verifies caller location is captured, not SDK internals (AC-6.3).""" + # When we call capture_source_location(0), it should capture THIS file + result = capture_source_location(0) + assert result is not None + + # Should NOT be source_location.py (SDK internal) + assert "source_location.py" not in result.source_file or "test_" in result.source_file + # Should be test file + assert "test_source_location.py" in result.source_file + + +# ============================================================================= +# capture_source_location Tests - Invalid Frames +# ============================================================================= + + +class TestCaptureSourceLocationInvalidFrames: + """Tests for capture_source_location with invalid frame depths.""" + + def test_excessive_skip_frames_returns_none(self) -> None: + """Returns None when skip_frames exceeds stack depth.""" + # A very large number that exceeds any reasonable stack depth + result = capture_source_location(10000) + assert result is None + + def test_skip_frames_at_stack_boundary_returns_none(self) -> None: + """Returns None when skip_frames is exactly at stack boundary.""" + # Get current stack depth + stack_depth = len(inspect.stack()) + + # skip_frames + 1 (for capture_source_location itself) should exceed stack + result = capture_source_location(stack_depth) + assert result is None + + def test_negative_skip_frames_behaves_safely(self) -> None: + """Negative skip_frames should work (index 1 + negative = may still be valid).""" + # skip_frames=-1 means target_index=0, which is capture_source_location itself + # This should return the capture_source_location function location + result = capture_source_location(-1) + assert result is not None + assert "source_location.py" in result.source_file + + def test_skip_frames_very_negative_returns_none(self) -> None: + """Very negative skip_frames returns None due to negative index handling.""" + # skip_frames=-10000 means target_index=-9999, which wraps around in list + # but may not be a valid frame - depends on implementation + result = capture_source_location(-10000) + # This will either work (Python negative indexing) or return None + # The implementation checks target_index >= len(stack), which won't catch + # negative indices. Python's negative indexing will either work or raise. + # Current impl will return something due to Python's negative indexing. + # This test documents the behavior. + # For stack with ~10 frames, -10000 wraps to valid index + # Actually, if target_index is negative and abs(target_index) > len(stack), + # Python raises IndexError, which is caught and returns None. + # Let's verify: + assert result is None or isinstance(result, SourceLocation) + + +# ============================================================================= +# capture_source_location Tests - Edge Cases +# ============================================================================= + + +class TestCaptureSourceLocationEdgeCases: + """Edge case tests for capture_source_location.""" + + def test_multiple_calls_return_correct_lines(self) -> None: + """Multiple calls return their respective call locations.""" + line1 = inspect.currentframe().f_lineno + 1 # type: ignore[union-attr] + result1 = capture_source_location(0) + line2 = inspect.currentframe().f_lineno + 1 # type: ignore[union-attr] + result2 = capture_source_location(0) + + assert result1 is not None + assert result2 is not None + assert result1.line_number == line1 + assert result2.line_number == line2 + + def test_called_from_class_method(self) -> None: + """Works when called from inside a class method.""" + expected_line = inspect.currentframe().f_lineno + 1 # type: ignore[union-attr] + result = capture_source_location(0) + + assert result is not None + assert result.line_number == expected_line + assert "test_source_location.py" in result.source_file + + def test_called_from_lambda(self) -> None: + """Works when called from a lambda.""" + get_location = lambda: capture_source_location(0) # noqa: E731 + result = get_location() + + assert result is not None + # Should capture this file + assert "test_source_location.py" in result.source_file + + def test_called_from_list_comprehension(self) -> None: + """Works when called from a list comprehension.""" + results = [capture_source_location(0) for _ in range(3)] + + assert all(r is not None for r in results) + # All should be from this file + for r in results: + assert r is not None + assert "test_source_location.py" in r.source_file + + def test_returns_absolute_path(self) -> None: + """The returned file path is absolute.""" + result = capture_source_location(0) + assert result is not None + # inspect.stack() returns absolute paths when running pytest + assert os.path.isabs(result.source_file) + + +# ============================================================================= +# capture_source_location Tests - Exception Handling +# ============================================================================= + + +class TestCaptureSourceLocationExceptionHandling: + """Tests for capture_source_location exception handling.""" + + def test_handles_index_error_gracefully(self) -> None: + """Returns None when IndexError occurs.""" + # This is tested by excessive skip_frames, but explicit test + result = capture_source_location(999999) + assert result is None + + def test_returns_none_not_raises(self) -> None: + """Never raises exception, returns None on failure.""" + # Various edge cases should return None, not raise + test_cases = [ + 100, # Too high + 1000, # Way too high + 10000, # Extremely high + ] + + for skip in test_cases: + result = capture_source_location(skip) + # Should be None or SourceLocation, never raise + assert result is None or isinstance(result, SourceLocation) + + +# ============================================================================= +# Integration-like Tests +# ============================================================================= + + +class TestSourceLocationIntegration: + """Integration-style tests for source location capture workflow.""" + + def test_typical_logging_usage_pattern(self) -> None: + """Test the typical pattern: log function calls capture with skip_frames=1.""" + + def mock_log_function(message: str) -> SourceLocation | None: + # In real logging, this would be skip_frames=1 to get caller of log() + return capture_source_location(1) + + expected_line = inspect.currentframe().f_lineno + 1 # type: ignore[union-attr] + location = mock_log_function("test message") + + assert location is not None + assert location.line_number == expected_line + assert "test_source_location.py" in location.source_file + + def test_nested_logging_wrapper_pattern(self) -> None: + """Test pattern where logging has multiple wrappers.""" + + def inner_log(message: str) -> SourceLocation | None: + # Skip 2: inner_log -> outer_log -> caller + return capture_source_location(2) + + def outer_log(message: str) -> SourceLocation | None: + return inner_log(message) + + expected_line = inspect.currentframe().f_lineno + 1 # type: ignore[union-attr] + location = outer_log("test message") + + assert location is not None + assert location.line_number == expected_line + + def test_source_location_can_be_serialized(self) -> None: + """SourceLocation data can be extracted for serialization.""" + result = capture_source_location(0) + assert result is not None + + # Can extract to dict-like structure + location_dict = { + "source_file": result.source_file, + "line_number": result.line_number, + } + + assert isinstance(location_dict["source_file"], str) + assert isinstance(location_dict["line_number"], int) diff --git a/specs/python-sdk/.progress.md b/specs/python-sdk/.progress.md index 48a8702..4ba5db0 100644 --- a/specs/python-sdk/.progress.md +++ b/specs/python-sdk/.progress.md @@ -27,12 +27,14 @@ Port the TypeScript SDK at `sdks/typescript/` to Python with full feature parity - [x] 3.2 Unit tests for config - a592689 - [x] 3.3 Unit tests for errors - 56797be - [x] 3.4 Unit tests for queue - d85f824 +- [x] 3.5 Unit tests for source_location - 274125b ## Current Task Awaiting next task ## Learnings +- 36 tests written for source_location.py covering: SourceLocation dataclass (attributes, frozen/immutable, equality, repr), capture_source_location (basic functionality, frame depth 0/1/2, invalid frames returning None, edge cases like lambda/list comprehension, exception handling), integration-style tests for typical logging patterns - 47 tests written for queue.py covering: QueueConfig construction, BatchQueue add/flush/size/shutdown, auto-flush on batch_size, timer-based flush, overflow handling, thread safety, edge cases - Use asyncio.Event instead of threading.Event for async test coordination to avoid race conditions - Concurrent flush prevention tested by using async events to coordinate slow_send mock @@ -69,4 +71,4 @@ Awaiting next task - 58 tests for errors module covering: LogwellErrorCode enum (7 codes, values, uniqueness), LogwellError construction, inheritance from Exception, __str__ and __repr__ methods, edge cases ## Next -Task 3.5: Unit tests for source_location +Task 3.6: Unit tests for client diff --git a/specs/python-sdk/tasks.md b/specs/python-sdk/tasks.md index 02f4363..18e18d7 100644 --- a/specs/python-sdk/tasks.md +++ b/specs/python-sdk/tasks.md @@ -172,7 +172,7 @@ After POC validated, clean up code. - **Commit**: `test(python-sdk): add queue unit tests` - _Requirements: AC-2.1 through AC-2.5_ -- [ ] 3.5 Unit tests for source_location +- [x] 3.5 Unit tests for source_location - **Do**: Test capture at different frame depths, invalid frames - **Files**: `sdks/python/tests/unit/test_source_location.py` - **Done when**: Source location capture verified From d9fc2d8f22926ec9894193fcb4f9e30e2f80d0a1 Mon Sep 17 00:00:00 2001 From: Divanshu Chauhan Date: Fri, 16 Jan 2026 18:09:27 -0700 Subject: [PATCH 22/30] test(python-sdk): add client unit tests Add 67 comprehensive unit tests for client.py covering: - Logwell construction with valid/invalid configs - All log methods (debug, info, warn, error, fatal) - Logging with metadata (none, empty, complex) - Automatic UTC timestamp generation - Service name handling (config, override, inheritance) - queue_size property - flush() method delegation and response handling - shutdown() method (stops new logs, queue lifecycle) - child() method (shares queue, inherits config, merges metadata) - Nested children with accumulated metadata - Source location capture when enabled - _merge_metadata internal method - Integration-style workflow tests --- sdks/python/tests/unit/test_client.py | 1073 +++++++++++++++++++++++++ specs/python-sdk/.progress.md | 6 +- specs/python-sdk/tasks.md | 2 +- 3 files changed, 1079 insertions(+), 2 deletions(-) create mode 100644 sdks/python/tests/unit/test_client.py diff --git a/sdks/python/tests/unit/test_client.py b/sdks/python/tests/unit/test_client.py new file mode 100644 index 0000000..268a530 --- /dev/null +++ b/sdks/python/tests/unit/test_client.py @@ -0,0 +1,1073 @@ +"""Unit tests for client.py - Logwell client class. + +Tests cover: +- Logwell construction: valid config, invalid config raises +- Log methods: debug, info, warn, error, fatal +- Log with metadata +- queue_size property +- flush() method +- shutdown() method: idempotent, rejects new logs after +- child() method: inherits config, merges metadata +- Nested children +- Source location capture when enabled +""" + +from __future__ import annotations + +import asyncio +from typing import TYPE_CHECKING, Any +from unittest.mock import AsyncMock, MagicMock, patch + +import pytest + +from logwell.client import Logwell +from logwell.errors import LogwellError, LogwellErrorCode +from logwell.queue import BatchQueue + +if TYPE_CHECKING: + from collections.abc import Callable + + from logwell.types import IngestResponse, LogEntry, LogwellConfig + + +# ============================================================================= +# Test Helpers +# ============================================================================= + + +def make_mock_queue() -> tuple[MagicMock, list[LogEntry]]: + """Create a mock BatchQueue that captures added entries. + + Returns: + Tuple of (mock_queue, captured_entries_list) + """ + captured: list[LogEntry] = [] + mock_queue = MagicMock(spec=BatchQueue) + mock_queue.size = 0 + + def add_entry(entry: LogEntry) -> None: + captured.append(entry) + mock_queue.size = len(captured) + + mock_queue.add = MagicMock(side_effect=add_entry) + mock_queue.flush = AsyncMock(return_value={"accepted": 1}) + mock_queue.shutdown = AsyncMock() + + return mock_queue, captured + + +# ============================================================================= +# Logwell Construction Tests +# ============================================================================= + + +class TestLogwellConstruction: + """Tests for Logwell client construction.""" + + def test_valid_config_minimal(self, valid_config: LogwellConfig) -> None: + """Logwell accepts minimal valid configuration.""" + client = Logwell(valid_config) + assert client.queue_size == 0 + + def test_valid_config_full(self, valid_config_full: LogwellConfig) -> None: + """Logwell accepts full configuration with all fields.""" + client = Logwell(valid_config_full) + assert client.queue_size == 0 + + def test_invalid_config_missing_api_key(self, valid_endpoint: str) -> None: + """Logwell raises LogwellError when api_key is missing.""" + config: dict[str, Any] = {"endpoint": valid_endpoint} + + with pytest.raises(LogwellError) as exc_info: + Logwell(config) # type: ignore[arg-type] + + assert exc_info.value.code == LogwellErrorCode.INVALID_CONFIG + + def test_invalid_config_missing_endpoint(self, valid_api_key: str) -> None: + """Logwell raises LogwellError when endpoint is missing.""" + config: dict[str, Any] = {"api_key": valid_api_key} + + with pytest.raises(LogwellError) as exc_info: + Logwell(config) # type: ignore[arg-type] + + assert exc_info.value.code == LogwellErrorCode.INVALID_CONFIG + + def test_invalid_config_bad_api_key_format(self, valid_endpoint: str) -> None: + """Logwell raises LogwellError for invalid API key format.""" + config: dict[str, Any] = { + "api_key": "invalid_key", + "endpoint": valid_endpoint, + } + + with pytest.raises(LogwellError) as exc_info: + Logwell(config) # type: ignore[arg-type] + + assert exc_info.value.code == LogwellErrorCode.INVALID_CONFIG + + def test_invalid_config_bad_endpoint_format(self, valid_api_key: str) -> None: + """Logwell raises LogwellError for invalid endpoint URL.""" + config: dict[str, Any] = { + "api_key": valid_api_key, + "endpoint": "not-a-url", + } + + with pytest.raises(LogwellError) as exc_info: + Logwell(config) # type: ignore[arg-type] + + assert exc_info.value.code == LogwellErrorCode.INVALID_CONFIG + + def test_invalid_config_negative_batch_size( + self, valid_api_key: str, valid_endpoint: str + ) -> None: + """Logwell raises LogwellError for negative batch_size.""" + config: dict[str, Any] = { + "api_key": valid_api_key, + "endpoint": valid_endpoint, + "batch_size": -1, + } + + with pytest.raises(LogwellError) as exc_info: + Logwell(config) # type: ignore[arg-type] + + assert exc_info.value.code == LogwellErrorCode.INVALID_CONFIG + + def test_creates_own_queue_by_default(self, valid_config: LogwellConfig) -> None: + """Logwell creates its own queue when none provided.""" + client = Logwell(valid_config) + assert client._owns_queue is True + + def test_uses_provided_queue(self, valid_config: LogwellConfig) -> None: + """Logwell uses provided queue (for child loggers).""" + mock_queue, _ = make_mock_queue() + client = Logwell(valid_config, _queue=mock_queue) + assert client._owns_queue is False + assert client._queue is mock_queue + + +# ============================================================================= +# Log Method Tests +# ============================================================================= + + +class TestLogMethods: + """Tests for log level methods (debug, info, warn, error, fatal).""" + + @pytest.fixture + def client_with_mock( + self, valid_config: LogwellConfig + ) -> tuple[Logwell, list[LogEntry]]: + """Create a client with a mock queue.""" + mock_queue, captured = make_mock_queue() + client = Logwell(valid_config, _queue=mock_queue) + return client, captured + + def test_debug_logs_at_debug_level( + self, client_with_mock: tuple[Logwell, list[LogEntry]] + ) -> None: + """debug() creates log entry with debug level.""" + client, captured = client_with_mock + client.debug("Debug message") + + assert len(captured) == 1 + assert captured[0]["level"] == "debug" + assert captured[0]["message"] == "Debug message" + + def test_info_logs_at_info_level( + self, client_with_mock: tuple[Logwell, list[LogEntry]] + ) -> None: + """info() creates log entry with info level.""" + client, captured = client_with_mock + client.info("Info message") + + assert len(captured) == 1 + assert captured[0]["level"] == "info" + assert captured[0]["message"] == "Info message" + + def test_warn_logs_at_warn_level( + self, client_with_mock: tuple[Logwell, list[LogEntry]] + ) -> None: + """warn() creates log entry with warn level.""" + client, captured = client_with_mock + client.warn("Warning message") + + assert len(captured) == 1 + assert captured[0]["level"] == "warn" + assert captured[0]["message"] == "Warning message" + + def test_error_logs_at_error_level( + self, client_with_mock: tuple[Logwell, list[LogEntry]] + ) -> None: + """error() creates log entry with error level.""" + client, captured = client_with_mock + client.error("Error message") + + assert len(captured) == 1 + assert captured[0]["level"] == "error" + assert captured[0]["message"] == "Error message" + + def test_fatal_logs_at_fatal_level( + self, client_with_mock: tuple[Logwell, list[LogEntry]] + ) -> None: + """fatal() creates log entry with fatal level.""" + client, captured = client_with_mock + client.fatal("Fatal message") + + assert len(captured) == 1 + assert captured[0]["level"] == "fatal" + assert captured[0]["message"] == "Fatal message" + + def test_log_method_accepts_entry_dict( + self, client_with_mock: tuple[Logwell, list[LogEntry]] + ) -> None: + """log() accepts a LogEntry dict directly.""" + client, captured = client_with_mock + entry: LogEntry = {"level": "info", "message": "Direct entry"} + client.log(entry) + + assert len(captured) == 1 + assert captured[0]["level"] == "info" + assert captured[0]["message"] == "Direct entry" + + +# ============================================================================= +# Log with Metadata Tests +# ============================================================================= + + +class TestLogWithMetadata: + """Tests for logging with metadata.""" + + @pytest.fixture + def client_with_mock( + self, valid_config: LogwellConfig + ) -> tuple[Logwell, list[LogEntry]]: + """Create a client with a mock queue.""" + mock_queue, captured = make_mock_queue() + client = Logwell(valid_config, _queue=mock_queue) + return client, captured + + def test_debug_with_metadata( + self, client_with_mock: tuple[Logwell, list[LogEntry]] + ) -> None: + """debug() includes metadata in log entry.""" + client, captured = client_with_mock + client.debug("Debug", {"key": "value"}) + + assert captured[0]["metadata"] == {"key": "value"} + + def test_info_with_metadata( + self, client_with_mock: tuple[Logwell, list[LogEntry]] + ) -> None: + """info() includes metadata in log entry.""" + client, captured = client_with_mock + client.info("Info", {"user_id": "123"}) + + assert captured[0]["metadata"] == {"user_id": "123"} + + def test_warn_with_metadata( + self, client_with_mock: tuple[Logwell, list[LogEntry]] + ) -> None: + """warn() includes metadata in log entry.""" + client, captured = client_with_mock + client.warn("Warning", {"count": 5}) + + assert captured[0]["metadata"] == {"count": 5} + + def test_error_with_metadata( + self, client_with_mock: tuple[Logwell, list[LogEntry]] + ) -> None: + """error() includes metadata in log entry.""" + client, captured = client_with_mock + client.error("Error", {"error_code": "E001"}) + + assert captured[0]["metadata"] == {"error_code": "E001"} + + def test_fatal_with_metadata( + self, client_with_mock: tuple[Logwell, list[LogEntry]] + ) -> None: + """fatal() includes metadata in log entry.""" + client, captured = client_with_mock + client.fatal("Fatal", {"crash_id": "xyz"}) + + assert captured[0]["metadata"] == {"crash_id": "xyz"} + + def test_metadata_none_not_added( + self, client_with_mock: tuple[Logwell, list[LogEntry]] + ) -> None: + """Log entry without metadata doesn't have metadata key.""" + client, captured = client_with_mock + client.info("No metadata") + + assert "metadata" not in captured[0] + + def test_metadata_empty_dict_not_added( + self, client_with_mock: tuple[Logwell, list[LogEntry]] + ) -> None: + """Empty metadata dict is not added to entry.""" + client, captured = client_with_mock + client.info("Empty metadata", {}) + + # Empty metadata should not be added + assert "metadata" not in captured[0] + + def test_complex_metadata( + self, client_with_mock: tuple[Logwell, list[LogEntry]] + ) -> None: + """Complex nested metadata is preserved.""" + client, captured = client_with_mock + metadata = { + "user": {"id": 123, "name": "Alice"}, + "tags": ["important", "urgent"], + "nested": {"deep": {"value": True}}, + } + client.info("Complex", metadata) + + assert captured[0]["metadata"] == metadata + + +# ============================================================================= +# Timestamp Tests +# ============================================================================= + + +class TestLogTimestamp: + """Tests for automatic timestamp generation.""" + + @pytest.fixture + def client_with_mock( + self, valid_config: LogwellConfig + ) -> tuple[Logwell, list[LogEntry]]: + """Create a client with a mock queue.""" + mock_queue, captured = make_mock_queue() + client = Logwell(valid_config, _queue=mock_queue) + return client, captured + + def test_timestamp_auto_generated( + self, client_with_mock: tuple[Logwell, list[LogEntry]] + ) -> None: + """Log entries get automatic ISO timestamp.""" + client, captured = client_with_mock + client.info("Test") + + assert "timestamp" in captured[0] + # Verify it's an ISO format timestamp + timestamp = captured[0]["timestamp"] + assert isinstance(timestamp, str) + assert "T" in timestamp # ISO 8601 format has T separator + + def test_timestamp_uses_utc( + self, client_with_mock: tuple[Logwell, list[LogEntry]] + ) -> None: + """Timestamp is in UTC timezone.""" + client, captured = client_with_mock + client.info("Test") + + timestamp = captured[0]["timestamp"] + # UTC timestamps end with +00:00 or Z + assert "+00:00" in timestamp or timestamp.endswith("Z") + + +# ============================================================================= +# Service Name Tests +# ============================================================================= + + +class TestServiceName: + """Tests for service name handling.""" + + def test_service_from_config(self, valid_api_key: str, valid_endpoint: str) -> None: + """Service name from config is added to log entries.""" + config: LogwellConfig = { + "api_key": valid_api_key, + "endpoint": valid_endpoint, + "service": "my-service", + } + mock_queue, captured = make_mock_queue() + client = Logwell(config, _queue=mock_queue) + + client.info("Test") + + assert captured[0]["service"] == "my-service" + + def test_no_service_in_config(self, valid_config: LogwellConfig) -> None: + """No service key when not provided in config.""" + mock_queue, captured = make_mock_queue() + client = Logwell(valid_config, _queue=mock_queue) + + client.info("Test") + + assert "service" not in captured[0] + + def test_service_from_entry_overrides_config( + self, valid_api_key: str, valid_endpoint: str + ) -> None: + """Service in log entry overrides config service.""" + config: LogwellConfig = { + "api_key": valid_api_key, + "endpoint": valid_endpoint, + "service": "config-service", + } + mock_queue, captured = make_mock_queue() + client = Logwell(config, _queue=mock_queue) + + entry: LogEntry = { + "level": "info", + "message": "Test", + "service": "entry-service", + } + client.log(entry) + + assert captured[0]["service"] == "entry-service" + + +# ============================================================================= +# queue_size Property Tests +# ============================================================================= + + +class TestQueueSize: + """Tests for queue_size property.""" + + def test_queue_size_starts_at_zero(self, valid_config: LogwellConfig) -> None: + """queue_size is 0 for new client.""" + client = Logwell(valid_config) + assert client.queue_size == 0 + + def test_queue_size_reflects_queue(self, valid_config: LogwellConfig) -> None: + """queue_size reflects underlying queue size.""" + mock_queue, _ = make_mock_queue() + mock_queue.size = 5 + client = Logwell(valid_config, _queue=mock_queue) + + assert client.queue_size == 5 + + +# ============================================================================= +# flush() Method Tests +# ============================================================================= + + +class TestFlush: + """Tests for flush() method.""" + + @pytest.mark.asyncio + async def test_flush_calls_queue_flush(self, valid_config: LogwellConfig) -> None: + """flush() delegates to queue.flush().""" + mock_queue, _ = make_mock_queue() + client = Logwell(valid_config, _queue=mock_queue) + + await client.flush() + + mock_queue.flush.assert_called_once() + + @pytest.mark.asyncio + async def test_flush_returns_response(self, valid_config: LogwellConfig) -> None: + """flush() returns IngestResponse from queue.""" + mock_queue, _ = make_mock_queue() + mock_queue.flush = AsyncMock(return_value={"accepted": 5, "rejected": 0}) + client = Logwell(valid_config, _queue=mock_queue) + + result = await client.flush() + + assert result == {"accepted": 5, "rejected": 0} + + @pytest.mark.asyncio + async def test_flush_returns_none_when_empty( + self, valid_config: LogwellConfig + ) -> None: + """flush() returns None when queue is empty.""" + mock_queue, _ = make_mock_queue() + mock_queue.flush = AsyncMock(return_value=None) + client = Logwell(valid_config, _queue=mock_queue) + + result = await client.flush() + + assert result is None + + +# ============================================================================= +# shutdown() Method Tests +# ============================================================================= + + +class TestShutdown: + """Tests for shutdown() method.""" + + @pytest.mark.asyncio + async def test_shutdown_calls_queue_shutdown( + self, valid_config: LogwellConfig + ) -> None: + """shutdown() calls queue.shutdown() when owning queue.""" + mock_queue, _ = make_mock_queue() + # Create client that owns queue + with patch.object(Logwell, "__init__", lambda s, c: None): + client = Logwell.__new__(Logwell) + client._config = valid_config + client._queue = mock_queue + client._owns_queue = True + client._stopped = False + client._transport = MagicMock() + client._transport.close = AsyncMock() + + await client.shutdown() + + mock_queue.shutdown.assert_called_once() + + @pytest.mark.asyncio + async def test_shutdown_closes_transport(self, valid_config: LogwellConfig) -> None: + """shutdown() closes transport when owning queue.""" + mock_queue, _ = make_mock_queue() + mock_transport = MagicMock() + mock_transport.close = AsyncMock() + + with patch.object(Logwell, "__init__", lambda s, c: None): + client = Logwell.__new__(Logwell) + client._config = valid_config + client._queue = mock_queue + client._owns_queue = True + client._stopped = False + client._transport = mock_transport + + await client.shutdown() + + mock_transport.close.assert_called_once() + + @pytest.mark.asyncio + async def test_shutdown_does_not_shutdown_shared_queue( + self, valid_config: LogwellConfig + ) -> None: + """shutdown() doesn't call queue.shutdown() for child logger.""" + mock_queue, _ = make_mock_queue() + client = Logwell(valid_config, _queue=mock_queue) + + await client.shutdown() + + # Should not shutdown shared queue + mock_queue.shutdown.assert_not_called() + + @pytest.mark.asyncio + async def test_shutdown_sets_stopped_flag( + self, valid_config: LogwellConfig + ) -> None: + """shutdown() sets _stopped flag.""" + mock_queue, _ = make_mock_queue() + client = Logwell(valid_config, _queue=mock_queue) + + assert client._stopped is False + await client.shutdown() + assert client._stopped is True + + @pytest.mark.asyncio + async def test_shutdown_rejects_new_logs( + self, valid_config: LogwellConfig + ) -> None: + """Logs are ignored after shutdown().""" + mock_queue, captured = make_mock_queue() + client = Logwell(valid_config, _queue=mock_queue) + + client.info("Before shutdown") + await client.shutdown() + client.info("After shutdown") + + # Only the first log should be captured + assert len(captured) == 1 + assert captured[0]["message"] == "Before shutdown" + + @pytest.mark.asyncio + async def test_shutdown_is_idempotent(self, valid_config: LogwellConfig) -> None: + """shutdown() can be called multiple times safely. + + Note: The client calls queue.shutdown() each time, but the queue + itself handles idempotency. The client's _stopped flag only + prevents new logs from being added. + """ + mock_queue, _ = make_mock_queue() + mock_transport = MagicMock() + mock_transport.close = AsyncMock() + + with patch.object(Logwell, "__init__", lambda s, c: None): + client = Logwell.__new__(Logwell) + client._config = valid_config + client._queue = mock_queue + client._owns_queue = True + client._stopped = False + client._transport = mock_transport + + # Call shutdown multiple times - should not raise + await client.shutdown() + await client.shutdown() + await client.shutdown() + + # All three calls should complete without error + # Queue handles its own idempotency (tested in test_queue.py) + assert mock_queue.shutdown.call_count == 3 + assert mock_transport.close.call_count == 3 + + +# ============================================================================= +# child() Method Tests +# ============================================================================= + + +class TestChild: + """Tests for child() method.""" + + def test_child_creates_new_client(self, valid_config: LogwellConfig) -> None: + """child() returns a new Logwell instance.""" + client = Logwell(valid_config) + child = client.child() + + assert isinstance(child, Logwell) + assert child is not client + + def test_child_shares_queue(self, valid_config: LogwellConfig) -> None: + """Child logger shares parent's queue.""" + mock_queue, _ = make_mock_queue() + client = Logwell(valid_config, _queue=mock_queue) + child = client.child() + + assert child._queue is mock_queue + + def test_child_does_not_own_queue(self, valid_config: LogwellConfig) -> None: + """Child logger does not own the queue.""" + mock_queue, _ = make_mock_queue() + client = Logwell(valid_config, _queue=mock_queue) + child = client.child() + + assert child._owns_queue is False + + def test_child_inherits_config( + self, valid_api_key: str, valid_endpoint: str + ) -> None: + """Child logger inherits parent config.""" + config: LogwellConfig = { + "api_key": valid_api_key, + "endpoint": valid_endpoint, + "batch_size": 100, + "flush_interval": 10.0, + "max_queue_size": 500, + "max_retries": 5, + } + mock_queue, _ = make_mock_queue() + client = Logwell(config, _queue=mock_queue) + child = client.child() + + assert child._config["api_key"] == valid_api_key + assert child._config["endpoint"] == valid_endpoint + assert child._config["batch_size"] == 100 + assert child._config["flush_interval"] == 10.0 + assert child._config["max_queue_size"] == 500 + assert child._config["max_retries"] == 5 + + def test_child_with_metadata(self, valid_config: LogwellConfig) -> None: + """Child logger includes metadata in all logs.""" + mock_queue, captured = make_mock_queue() + client = Logwell(valid_config, _queue=mock_queue) + child = client.child({"request_id": "abc123"}) + + child.info("Test message") + + assert captured[0]["metadata"]["request_id"] == "abc123" + + def test_child_metadata_merges_with_log_metadata( + self, valid_config: LogwellConfig + ) -> None: + """Child metadata merges with per-log metadata.""" + mock_queue, captured = make_mock_queue() + client = Logwell(valid_config, _queue=mock_queue) + child = client.child({"request_id": "abc123"}) + + child.info("Test", {"user_id": "user456"}) + + assert captured[0]["metadata"]["request_id"] == "abc123" + assert captured[0]["metadata"]["user_id"] == "user456" + + def test_child_log_metadata_overrides_child_metadata( + self, valid_config: LogwellConfig + ) -> None: + """Per-log metadata takes precedence over child metadata.""" + mock_queue, captured = make_mock_queue() + client = Logwell(valid_config, _queue=mock_queue) + child = client.child({"key": "child_value"}) + + child.info("Test", {"key": "log_value"}) + + assert captured[0]["metadata"]["key"] == "log_value" + + def test_child_with_service_override( + self, valid_api_key: str, valid_endpoint: str + ) -> None: + """Child logger can override service name.""" + config: LogwellConfig = { + "api_key": valid_api_key, + "endpoint": valid_endpoint, + "service": "parent-service", + } + mock_queue, captured = make_mock_queue() + client = Logwell(config, _queue=mock_queue) + child = client.child(service="child-service") + + child.info("Test") + + assert captured[0]["service"] == "child-service" + + def test_child_inherits_parent_service( + self, valid_api_key: str, valid_endpoint: str + ) -> None: + """Child logger inherits parent's service if not overridden.""" + config: LogwellConfig = { + "api_key": valid_api_key, + "endpoint": valid_endpoint, + "service": "parent-service", + } + mock_queue, captured = make_mock_queue() + client = Logwell(config, _queue=mock_queue) + child = client.child() + + child.info("Test") + + assert captured[0]["service"] == "parent-service" + + def test_child_inherits_callbacks( + self, valid_api_key: str, valid_endpoint: str + ) -> None: + """Child logger inherits on_error and on_flush callbacks.""" + on_error = MagicMock() + on_flush = MagicMock() + config: LogwellConfig = { + "api_key": valid_api_key, + "endpoint": valid_endpoint, + "on_error": on_error, + "on_flush": on_flush, + } + mock_queue, _ = make_mock_queue() + client = Logwell(config, _queue=mock_queue) + child = client.child() + + assert child._config["on_error"] is on_error + assert child._config["on_flush"] is on_flush + + def test_child_inherits_capture_source_location( + self, valid_api_key: str, valid_endpoint: str + ) -> None: + """Child inherits capture_source_location setting.""" + config: LogwellConfig = { + "api_key": valid_api_key, + "endpoint": valid_endpoint, + "capture_source_location": True, + } + mock_queue, _ = make_mock_queue() + client = Logwell(config, _queue=mock_queue) + child = client.child() + + assert child._config["capture_source_location"] is True + + +# ============================================================================= +# Nested Children Tests +# ============================================================================= + + +class TestNestedChildren: + """Tests for nested child loggers.""" + + def test_grandchild_shares_root_queue(self, valid_config: LogwellConfig) -> None: + """Grandchild shares the same queue as root.""" + mock_queue, _ = make_mock_queue() + root = Logwell(valid_config, _queue=mock_queue) + child = root.child() + grandchild = child.child() + + assert grandchild._queue is mock_queue + + def test_nested_metadata_accumulates(self, valid_config: LogwellConfig) -> None: + """Nested children accumulate metadata from ancestors.""" + mock_queue, captured = make_mock_queue() + root = Logwell(valid_config, _queue=mock_queue) + child = root.child({"level1": "value1"}) + grandchild = child.child({"level2": "value2"}) + + grandchild.info("Test") + + assert captured[0]["metadata"]["level1"] == "value1" + assert captured[0]["metadata"]["level2"] == "value2" + + def test_nested_metadata_overrides_parent( + self, valid_config: LogwellConfig + ) -> None: + """Deeper child can override ancestor's metadata key.""" + mock_queue, captured = make_mock_queue() + root = Logwell(valid_config, _queue=mock_queue) + child = root.child({"key": "parent_value"}) + grandchild = child.child({"key": "grandchild_value"}) + + grandchild.info("Test") + + assert captured[0]["metadata"]["key"] == "grandchild_value" + + def test_deeply_nested_children(self, valid_config: LogwellConfig) -> None: + """Deeply nested children work correctly.""" + mock_queue, captured = make_mock_queue() + root = Logwell(valid_config, _queue=mock_queue) + + current = root + for i in range(10): + current = current.child({f"level_{i}": f"value_{i}"}) + + current.info("Deep log") + + # All 10 levels of metadata should be present + for i in range(10): + assert captured[0]["metadata"][f"level_{i}"] == f"value_{i}" + + def test_sibling_children_independent(self, valid_config: LogwellConfig) -> None: + """Sibling children have independent metadata.""" + mock_queue, captured = make_mock_queue() + root = Logwell(valid_config, _queue=mock_queue) + child1 = root.child({"child": "one"}) + child2 = root.child({"child": "two"}) + + child1.info("From child1") + child2.info("From child2") + + assert captured[0]["metadata"]["child"] == "one" + assert captured[1]["metadata"]["child"] == "two" + + +# ============================================================================= +# Source Location Capture Tests +# ============================================================================= + + +class TestSourceLocationCapture: + """Tests for source location capture when enabled.""" + + def test_source_location_disabled_by_default( + self, valid_config: LogwellConfig + ) -> None: + """Source location not captured when disabled (default).""" + mock_queue, captured = make_mock_queue() + client = Logwell(valid_config, _queue=mock_queue) + + client.info("Test") + + assert "source_file" not in captured[0] + assert "line_number" not in captured[0] + + def test_source_location_captured_when_enabled( + self, valid_api_key: str, valid_endpoint: str + ) -> None: + """Source location captured when capture_source_location=True.""" + config: LogwellConfig = { + "api_key": valid_api_key, + "endpoint": valid_endpoint, + "capture_source_location": True, + } + mock_queue, captured = make_mock_queue() + client = Logwell(config, _queue=mock_queue) + + client.info("Test") + + assert "source_file" in captured[0] + assert "line_number" in captured[0] + assert isinstance(captured[0]["line_number"], int) + assert captured[0]["line_number"] > 0 + + def test_source_location_points_to_caller( + self, valid_api_key: str, valid_endpoint: str + ) -> None: + """Source location points to calling code, not SDK internals.""" + config: LogwellConfig = { + "api_key": valid_api_key, + "endpoint": valid_endpoint, + "capture_source_location": True, + } + mock_queue, captured = make_mock_queue() + client = Logwell(config, _queue=mock_queue) + + client.info("Test") # Line number should point to this line + + # Source file should be this test file + assert "test_client.py" in captured[0]["source_file"] + + def test_source_location_for_all_log_methods( + self, valid_api_key: str, valid_endpoint: str + ) -> None: + """All log methods capture source location.""" + config: LogwellConfig = { + "api_key": valid_api_key, + "endpoint": valid_endpoint, + "capture_source_location": True, + } + mock_queue, captured = make_mock_queue() + client = Logwell(config, _queue=mock_queue) + + client.debug("debug") + client.info("info") + client.warn("warn") + client.error("error") + client.fatal("fatal") + + for entry in captured: + assert "source_file" in entry + assert "line_number" in entry + + def test_child_inherits_source_location_setting( + self, valid_api_key: str, valid_endpoint: str + ) -> None: + """Child logger inherits source location capture setting.""" + config: LogwellConfig = { + "api_key": valid_api_key, + "endpoint": valid_endpoint, + "capture_source_location": True, + } + mock_queue, captured = make_mock_queue() + client = Logwell(config, _queue=mock_queue) + child = client.child({"request_id": "test"}) + + child.info("Child log") + + assert "source_file" in captured[0] + assert "line_number" in captured[0] + + +# ============================================================================= +# _merge_metadata Tests +# ============================================================================= + + +class TestMergeMetadata: + """Tests for _merge_metadata internal method.""" + + def test_no_parent_no_entry_returns_none( + self, valid_config: LogwellConfig + ) -> None: + """Returns None when no parent or entry metadata.""" + mock_queue, _ = make_mock_queue() + client = Logwell(valid_config, _queue=mock_queue) + + result = client._merge_metadata(None) + + assert result is None + + def test_parent_only_returns_parent(self, valid_config: LogwellConfig) -> None: + """Returns parent metadata when no entry metadata.""" + mock_queue, _ = make_mock_queue() + client = Logwell( + valid_config, _queue=mock_queue, _parent_metadata={"parent": "value"} + ) + + result = client._merge_metadata(None) + + assert result == {"parent": "value"} + + def test_entry_only_returns_entry(self, valid_config: LogwellConfig) -> None: + """Returns entry metadata when no parent metadata.""" + mock_queue, _ = make_mock_queue() + client = Logwell(valid_config, _queue=mock_queue) + + result = client._merge_metadata({"entry": "value"}) + + assert result == {"entry": "value"} + + def test_both_merges_with_entry_priority( + self, valid_config: LogwellConfig + ) -> None: + """Merges both, entry takes precedence.""" + mock_queue, _ = make_mock_queue() + client = Logwell( + valid_config, + _queue=mock_queue, + _parent_metadata={"key": "parent", "parent_only": "p"}, + ) + + result = client._merge_metadata({"key": "entry", "entry_only": "e"}) + + assert result == {"key": "entry", "parent_only": "p", "entry_only": "e"} + + +# ============================================================================= +# Integration-style Tests +# ============================================================================= + + +class TestIntegration: + """Integration-style tests for common usage patterns.""" + + @pytest.mark.asyncio + async def test_basic_workflow(self, valid_config: LogwellConfig) -> None: + """Test basic log -> flush workflow.""" + mock_queue, captured = make_mock_queue() + client = Logwell(valid_config, _queue=mock_queue) + + client.info("Starting task") + client.debug("Processing item", {"item_id": 1}) + client.info("Task complete") + + response = await client.flush() + + assert len(captured) == 3 + assert response == {"accepted": 1} + + @pytest.mark.asyncio + async def test_request_scoped_logging( + self, valid_api_key: str, valid_endpoint: str + ) -> None: + """Test request-scoped logging pattern.""" + config: LogwellConfig = { + "api_key": valid_api_key, + "endpoint": valid_endpoint, + "service": "api-server", + } + mock_queue, captured = make_mock_queue() + client = Logwell(config, _queue=mock_queue) + + # Simulate request handling + request_logger = client.child({"request_id": "req-123", "method": "GET"}) + request_logger.info("Request received") + + # Handler with user context + user_logger = request_logger.child({"user_id": "user-456"}) + user_logger.info("Processing user request") + user_logger.debug("Fetching data") + + request_logger.info("Request complete", {"status": 200}) + + # All logs should have request_id + for entry in captured: + assert entry["metadata"]["request_id"] == "req-123" + assert entry["service"] == "api-server" + + # User logs should have user_id + assert captured[1]["metadata"]["user_id"] == "user-456" + assert captured[2]["metadata"]["user_id"] == "user-456" + + @pytest.mark.asyncio + async def test_graceful_shutdown(self, valid_config: LogwellConfig) -> None: + """Test graceful shutdown with multiple children.""" + mock_queue, captured = make_mock_queue() + mock_transport = MagicMock() + mock_transport.close = AsyncMock() + + with patch.object(Logwell, "__init__", lambda s, c: None): + client = Logwell.__new__(Logwell) + client._config = valid_config + client._queue = mock_queue + client._owns_queue = True + client._stopped = False + client._transport = mock_transport + client._parent_metadata = None + + child1 = client.child({"worker": 1}) + child2 = client.child({"worker": 2}) + + child1.info("Worker 1 log") + child2.info("Worker 2 log") + + # Only root client owns the queue, so only it should shutdown + await child1.shutdown() + await child2.shutdown() + assert mock_queue.shutdown.call_count == 0 + + await client.shutdown() + assert mock_queue.shutdown.call_count == 1 diff --git a/specs/python-sdk/.progress.md b/specs/python-sdk/.progress.md index 4ba5db0..866fef9 100644 --- a/specs/python-sdk/.progress.md +++ b/specs/python-sdk/.progress.md @@ -28,6 +28,7 @@ Port the TypeScript SDK at `sdks/typescript/` to Python with full feature parity - [x] 3.3 Unit tests for errors - 56797be - [x] 3.4 Unit tests for queue - d85f824 - [x] 3.5 Unit tests for source_location - 274125b +- [x] 3.6 Unit tests for client - d9875f4 ## Current Task Awaiting next task @@ -69,6 +70,9 @@ Awaiting next task - Tests cover: validate_api_key_format (valid/invalid keys), validate_config (missing fields, invalid formats, numeric bounds), default merging, optional fields, return value structure, edge cases - Mock urlparse to test exception handling in _is_valid_url (lines 57-58 coverage) - 58 tests for errors module covering: LogwellErrorCode enum (7 codes, values, uniqueness), LogwellError construction, inheritance from Exception, __str__ and __repr__ methods, edge cases +- 67 tests written for client.py covering: Logwell construction (valid/invalid configs), log methods (debug/info/warn/error/fatal), logging with metadata, timestamp auto-generation (UTC), service name handling, queue_size property, flush() method, shutdown() (idempotent, rejects new logs), child() method (inherits config, shares queue, merges metadata), nested children (accumulates metadata), source location capture, _merge_metadata internal method, integration-style workflows +- Client's shutdown() idempotency is handled at the queue level, not at the client level - client always calls queue.shutdown() but queue handles its own stopped state +- Mock queue pattern: create MagicMock(spec=BatchQueue) with size property and async methods to capture log entries ## Next -Task 3.6: Unit tests for client +Task 3.7: Integration tests diff --git a/specs/python-sdk/tasks.md b/specs/python-sdk/tasks.md index 18e18d7..8cb1775 100644 --- a/specs/python-sdk/tasks.md +++ b/specs/python-sdk/tasks.md @@ -180,7 +180,7 @@ After POC validated, clean up code. - **Commit**: `test(python-sdk): add source location tests` - _Requirements: AC-6.1, AC-6.2, AC-6.3_ -- [ ] 3.6 Unit tests for client +- [x] 3.6 Unit tests for client - **Do**: Test all log methods, child loggers, flush, shutdown - **Files**: `sdks/python/tests/unit/test_client.py` - **Done when**: Client API fully tested From d2c5a41ec2ea650df0575baa38e798bcf4ac3c57 Mon Sep 17 00:00:00 2001 From: Divanshu Chauhan Date: Fri, 16 Jan 2026 18:13:47 -0700 Subject: [PATCH 23/30] test(python-sdk): add integration tests Add 22 integration tests using respx for HTTP mocking: - Full flow tests (instantiate -> log -> flush -> verify) - All log levels (debug, info, warn, error, fatal) - Batching behavior and auto-flush on batch_size - Retry on 5xx server errors and 429 rate limits - Non-retryable error handling (401, 400) - Shutdown behavior (flush remaining, idempotent, reject after) - Child logger tests (same endpoint, metadata inheritance) - Nested children metadata accumulation - on_flush callback verification - Source location capture when enabled - Complex metadata serialization --- sdks/python/tests/integration/__init__.py | 1 + sdks/python/tests/integration/test_e2e.py | 722 ++++++++++++++++++++++ specs/python-sdk/.progress.md | 4 +- specs/python-sdk/tasks.md | 2 +- 4 files changed, 727 insertions(+), 2 deletions(-) create mode 100644 sdks/python/tests/integration/__init__.py create mode 100644 sdks/python/tests/integration/test_e2e.py diff --git a/sdks/python/tests/integration/__init__.py b/sdks/python/tests/integration/__init__.py new file mode 100644 index 0000000..765bf49 --- /dev/null +++ b/sdks/python/tests/integration/__init__.py @@ -0,0 +1 @@ +"""Integration tests for Logwell Python SDK.""" diff --git a/sdks/python/tests/integration/test_e2e.py b/sdks/python/tests/integration/test_e2e.py new file mode 100644 index 0000000..0f12f33 --- /dev/null +++ b/sdks/python/tests/integration/test_e2e.py @@ -0,0 +1,722 @@ +"""End-to-end integration tests for Logwell Python SDK. + +Tests full flow with mocked HTTP using respx: +- Instantiate -> log -> flush -> verify HTTP request sent +- Multiple logs batched together +- Retry on server error (5xx) +- Error handling (401, 400) +- Shutdown flushes remaining logs +- Child logger logs go to same endpoint +""" + +from __future__ import annotations + +import json +from typing import TYPE_CHECKING, Any + +import httpx +import pytest +import respx + +from logwell import Logwell, LogwellError, LogwellErrorCode + +if TYPE_CHECKING: + from logwell.types import LogwellConfig + + +# ============================================================================= +# Fixtures +# ============================================================================= + + +@pytest.fixture +def valid_config() -> LogwellConfig: + """Valid config for integration tests.""" + return { + "api_key": "lw_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + "endpoint": "https://logs.example.com", + "service": "integration-test", + "batch_size": 10, + "flush_interval": 1.0, + "max_retries": 3, + } + + +@pytest.fixture +def mock_server() -> respx.MockRouter: + """Start respx mock server for integration tests.""" + with respx.mock(assert_all_called=False) as router: + yield router + + +# ============================================================================= +# Full Flow Tests +# ============================================================================= + + +class TestFullFlow: + """Test complete logging flow from instantiate to flush.""" + + @pytest.mark.asyncio + async def test_log_and_flush_sends_http_request( + self, valid_config: LogwellConfig, mock_server: respx.MockRouter + ) -> None: + """Full flow: instantiate -> log -> flush -> verify HTTP request sent.""" + # Setup mock + mock_server.post("https://logs.example.com/v1/ingest").mock( + return_value=httpx.Response(200, json={"accepted": 1}) + ) + + # Execute + client = Logwell(valid_config) + client.info("Test message", {"key": "value"}) + response = await client.flush() + await client.shutdown() + + # Verify + assert response is not None + assert response["accepted"] == 1 + assert mock_server.calls.call_count == 1 + + # Verify request payload + request = mock_server.calls.last.request + assert request.headers["Authorization"] == "Bearer lw_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + assert request.headers["Content-Type"] == "application/json" + + body = json.loads(request.content) + assert len(body) == 1 + assert body[0]["level"] == "info" + assert body[0]["message"] == "Test message" + assert body[0]["metadata"]["key"] == "value" + assert body[0]["service"] == "integration-test" + assert "timestamp" in body[0] + + @pytest.mark.asyncio + async def test_all_log_levels_send_correct_level( + self, valid_config: LogwellConfig, mock_server: respx.MockRouter + ) -> None: + """All log levels (debug, info, warn, error, fatal) send correctly.""" + # Setup mock + mock_server.post("https://logs.example.com/v1/ingest").mock( + return_value=httpx.Response(200, json={"accepted": 5}) + ) + + # Execute + client = Logwell(valid_config) + client.debug("Debug message") + client.info("Info message") + client.warn("Warn message") + client.error("Error message") + client.fatal("Fatal message") + await client.flush() + await client.shutdown() + + # Verify + request = mock_server.calls.last.request + body = json.loads(request.content) + + levels = [entry["level"] for entry in body] + assert levels == ["debug", "info", "warn", "error", "fatal"] + + +# ============================================================================= +# Batching Tests +# ============================================================================= + + +class TestBatching: + """Test log batching behavior.""" + + @pytest.mark.asyncio + async def test_multiple_logs_batched_together( + self, valid_config: LogwellConfig, mock_server: respx.MockRouter + ) -> None: + """Multiple logs are batched into a single HTTP request.""" + # Setup mock + mock_server.post("https://logs.example.com/v1/ingest").mock( + return_value=httpx.Response(200, json={"accepted": 5}) + ) + + # Execute + client = Logwell(valid_config) + for i in range(5): + client.info(f"Message {i}") + response = await client.flush() + await client.shutdown() + + # Verify single request with all logs + assert response is not None + assert response["accepted"] == 5 + assert mock_server.calls.call_count == 1 + + body = json.loads(mock_server.calls.last.request.content) + assert len(body) == 5 + messages = [entry["message"] for entry in body] + assert messages == [f"Message {i}" for i in range(5)] + + @pytest.mark.asyncio + async def test_auto_flush_on_batch_size( + self, valid_config: LogwellConfig, mock_server: respx.MockRouter + ) -> None: + """Logs are auto-flushed when batch size is reached.""" + # Use smaller batch size + config = {**valid_config, "batch_size": 5} + + # Setup mock + mock_server.post("https://logs.example.com/v1/ingest").mock( + return_value=httpx.Response(200, json={"accepted": 5}) + ) + + # Execute + client = Logwell(config) + for i in range(5): + client.info(f"Message {i}") + + # Give time for auto-flush to complete + import asyncio + await asyncio.sleep(0.1) + + await client.shutdown() + + # Verify flush occurred + assert mock_server.calls.call_count >= 1 + + +# ============================================================================= +# Retry Tests +# ============================================================================= + + +class TestRetry: + """Test retry behavior on server errors.""" + + @pytest.mark.asyncio + async def test_retry_on_500_server_error( + self, valid_config: LogwellConfig, mock_server: respx.MockRouter + ) -> None: + """Retries on 5xx server errors with exponential backoff.""" + # Setup mock: fail twice, succeed third time + responses = [ + httpx.Response(500, json={"error": "Internal server error"}), + httpx.Response(503, json={"error": "Service unavailable"}), + httpx.Response(200, json={"accepted": 1}), + ] + mock_server.post("https://logs.example.com/v1/ingest").mock( + side_effect=responses + ) + + # Execute + client = Logwell(valid_config) + client.info("Test message") + response = await client.flush() + await client.shutdown() + + # Verify retries occurred and succeeded + assert response is not None + assert response["accepted"] == 1 + assert mock_server.calls.call_count == 3 + + @pytest.mark.asyncio + async def test_retry_on_429_rate_limit( + self, valid_config: LogwellConfig, mock_server: respx.MockRouter + ) -> None: + """Retries on 429 rate limit with exponential backoff.""" + # Setup mock: rate limit then succeed + responses = [ + httpx.Response(429, json={"error": "Too many requests"}), + httpx.Response(200, json={"accepted": 1}), + ] + mock_server.post("https://logs.example.com/v1/ingest").mock( + side_effect=responses + ) + + # Execute + client = Logwell(valid_config) + client.info("Test message") + response = await client.flush() + await client.shutdown() + + # Verify retry succeeded + assert response is not None + assert response["accepted"] == 1 + assert mock_server.calls.call_count == 2 + + @pytest.mark.asyncio + async def test_max_retries_exhausted_requeues_logs( + self, valid_config: LogwellConfig, mock_server: respx.MockRouter + ) -> None: + """When max retries exhausted, logs are requeued for next flush.""" + # Use low retry count + config = {**valid_config, "max_retries": 1} + + # Setup mock: always fail + mock_server.post("https://logs.example.com/v1/ingest").mock( + return_value=httpx.Response(500, json={"error": "Server error"}) + ) + + # Execute + client = Logwell(config) + client.info("Test message") + response = await client.flush() + + # Verify flush failed (returns None on error) + assert response is None + # Logs should still be in queue (requeued after failure) + assert client.queue_size == 1 + + await client.shutdown() + + +# ============================================================================= +# Error Handling Tests +# ============================================================================= + + +class TestErrorHandling: + """Test error handling for various HTTP errors.""" + + @pytest.mark.asyncio + async def test_401_unauthorized_not_retried( + self, valid_config: LogwellConfig, mock_server: respx.MockRouter + ) -> None: + """401 Unauthorized is not retried and triggers on_error callback.""" + errors: list[Exception] = [] + + # Use max_retries=0 to ensure only 1 attempt per flush + config = {**valid_config, "on_error": lambda e: errors.append(e), "max_retries": 0} + + # Setup mock + mock_server.post("https://logs.example.com/v1/ingest").mock( + return_value=httpx.Response(401, json={"error": "Invalid API key"}) + ) + + # Execute + client = Logwell(config) + client.info("Test message") + await client.flush() + + # After flush fails, logs are requeued. Check before shutdown to see single attempt. + # Note: shutdown will also try to flush requeued logs, so we check intermediate state + first_flush_calls = mock_server.calls.call_count + assert first_flush_calls == 1 # No retries for 401 + + await client.shutdown() + + # Verify error callback was triggered + assert len(errors) >= 1 + assert isinstance(errors[0], LogwellError) + assert errors[0].code == LogwellErrorCode.UNAUTHORIZED + assert not errors[0].retryable + + @pytest.mark.asyncio + async def test_400_bad_request_not_retried( + self, valid_config: LogwellConfig, mock_server: respx.MockRouter + ) -> None: + """400 Bad Request is not retried.""" + errors: list[Exception] = [] + + # Use max_retries=0 to ensure only 1 attempt per flush + config = {**valid_config, "on_error": lambda e: errors.append(e), "max_retries": 0} + + # Setup mock + mock_server.post("https://logs.example.com/v1/ingest").mock( + return_value=httpx.Response(400, json={"error": "Validation failed"}) + ) + + # Execute + client = Logwell(config) + client.info("Test message") + await client.flush() + + # Verify no retry on first flush (400 is non-retryable) + first_flush_calls = mock_server.calls.call_count + assert first_flush_calls == 1 # No retries for 400 + + await client.shutdown() + + # Verify error + assert len(errors) >= 1 + assert isinstance(errors[0], LogwellError) + assert errors[0].code == LogwellErrorCode.VALIDATION_ERROR + + @pytest.mark.asyncio + async def test_on_error_callback_receives_exception( + self, valid_config: LogwellConfig, mock_server: respx.MockRouter + ) -> None: + """on_error callback receives LogwellError with full details.""" + errors: list[Exception] = [] + + config = {**valid_config, "on_error": lambda e: errors.append(e)} + + # Setup mock + mock_server.post("https://logs.example.com/v1/ingest").mock( + return_value=httpx.Response(500, json={"error": "Internal error"}) + ) + + # Execute (will fail after retries) + client = Logwell(config) + client.info("Test message") + await client.flush() + await client.shutdown() + + # Verify error callback received + assert len(errors) >= 1 + assert isinstance(errors[-1], LogwellError) + assert errors[-1].code == LogwellErrorCode.SERVER_ERROR + + +# ============================================================================= +# Shutdown Tests +# ============================================================================= + + +class TestShutdown: + """Test shutdown behavior.""" + + @pytest.mark.asyncio + async def test_shutdown_flushes_remaining_logs( + self, valid_config: LogwellConfig, mock_server: respx.MockRouter + ) -> None: + """Shutdown flushes any remaining logs in the queue.""" + # Setup mock + mock_server.post("https://logs.example.com/v1/ingest").mock( + return_value=httpx.Response(200, json={"accepted": 3}) + ) + + # Execute + client = Logwell(valid_config) + client.info("Message 1") + client.info("Message 2") + client.info("Message 3") + + # Don't call flush, just shutdown + await client.shutdown() + + # Verify logs were flushed during shutdown + assert mock_server.calls.call_count >= 1 + body = json.loads(mock_server.calls.last.request.content) + assert len(body) == 3 + + @pytest.mark.asyncio + async def test_shutdown_is_idempotent( + self, valid_config: LogwellConfig, mock_server: respx.MockRouter + ) -> None: + """Shutdown can be called multiple times safely.""" + # Setup mock + mock_server.post("https://logs.example.com/v1/ingest").mock( + return_value=httpx.Response(200, json={"accepted": 1}) + ) + + # Execute + client = Logwell(valid_config) + client.info("Test message") + + # Call shutdown multiple times + await client.shutdown() + await client.shutdown() + await client.shutdown() + + # Should only flush once + assert mock_server.calls.call_count == 1 + + @pytest.mark.asyncio + async def test_logs_after_shutdown_are_ignored( + self, valid_config: LogwellConfig, mock_server: respx.MockRouter + ) -> None: + """Logs added after shutdown are silently ignored.""" + # Setup mock + mock_server.post("https://logs.example.com/v1/ingest").mock( + return_value=httpx.Response(200, json={"accepted": 1}) + ) + + # Execute + client = Logwell(valid_config) + client.info("Before shutdown") + await client.shutdown() + + # These should be ignored + client.info("After shutdown 1") + client.info("After shutdown 2") + + # Verify only the first log was sent + assert mock_server.calls.call_count == 1 + body = json.loads(mock_server.calls.last.request.content) + assert len(body) == 1 + assert body[0]["message"] == "Before shutdown" + + +# ============================================================================= +# Child Logger Tests +# ============================================================================= + + +class TestChildLogger: + """Test child logger behavior.""" + + @pytest.mark.asyncio + async def test_child_logger_logs_to_same_endpoint( + self, valid_config: LogwellConfig, mock_server: respx.MockRouter + ) -> None: + """Child logger logs go to the same endpoint as parent.""" + # Setup mock + mock_server.post("https://logs.example.com/v1/ingest").mock( + return_value=httpx.Response(200, json={"accepted": 2}) + ) + + # Execute + client = Logwell(valid_config) + child = client.child({"child_key": "child_value"}) + + client.info("Parent log") + child.info("Child log") + + await client.flush() + await client.shutdown() + + # Verify both logs sent to same endpoint + assert mock_server.calls.call_count == 1 + body = json.loads(mock_server.calls.last.request.content) + assert len(body) == 2 + + @pytest.mark.asyncio + async def test_child_logger_inherits_parent_metadata( + self, valid_config: LogwellConfig, mock_server: respx.MockRouter + ) -> None: + """Child logger metadata is merged with parent metadata.""" + # Setup mock + mock_server.post("https://logs.example.com/v1/ingest").mock( + return_value=httpx.Response(200, json={"accepted": 1}) + ) + + # Execute + client = Logwell(valid_config) + child = client.child({"request_id": "abc123"}) + + child.info("Child log", {"extra": "data"}) + + await client.flush() + await client.shutdown() + + # Verify metadata is merged + body = json.loads(mock_server.calls.last.request.content) + assert len(body) == 1 + assert body[0]["metadata"]["request_id"] == "abc123" + assert body[0]["metadata"]["extra"] == "data" + + @pytest.mark.asyncio + async def test_nested_children_accumulate_metadata( + self, valid_config: LogwellConfig, mock_server: respx.MockRouter + ) -> None: + """Nested child loggers accumulate metadata from all ancestors.""" + # Setup mock + mock_server.post("https://logs.example.com/v1/ingest").mock( + return_value=httpx.Response(200, json={"accepted": 1}) + ) + + # Execute + client = Logwell(valid_config) + child1 = client.child({"tenant_id": "tenant-123"}) + child2 = child1.child({"user_id": "user-456"}) + grandchild = child2.child({"session_id": "session-789"}) + + grandchild.info("Grandchild log") + + await client.flush() + await client.shutdown() + + # Verify all metadata accumulated + body = json.loads(mock_server.calls.last.request.content) + metadata = body[0]["metadata"] + assert metadata["tenant_id"] == "tenant-123" + assert metadata["user_id"] == "user-456" + assert metadata["session_id"] == "session-789" + + @pytest.mark.asyncio + async def test_child_logger_can_override_service( + self, valid_config: LogwellConfig, mock_server: respx.MockRouter + ) -> None: + """Child logger can override the service name.""" + # Setup mock + mock_server.post("https://logs.example.com/v1/ingest").mock( + return_value=httpx.Response(200, json={"accepted": 2}) + ) + + # Execute + client = Logwell(valid_config) + child = client.child(service="child-service") + + client.info("Parent log") + child.info("Child log") + + await client.flush() + await client.shutdown() + + # Verify services + body = json.loads(mock_server.calls.last.request.content) + assert body[0]["service"] == "integration-test" # Parent + assert body[1]["service"] == "child-service" # Child override + + @pytest.mark.asyncio + async def test_child_logger_shares_queue_with_parent( + self, valid_config: LogwellConfig, mock_server: respx.MockRouter + ) -> None: + """Child logger shares the same queue as parent.""" + # Setup mock + mock_server.post("https://logs.example.com/v1/ingest").mock( + return_value=httpx.Response(200, json={"accepted": 3}) + ) + + # Execute + client = Logwell(valid_config) + child1 = client.child({"key1": "value1"}) + child2 = client.child({"key2": "value2"}) + + client.info("Parent log") + child1.info("Child1 log") + child2.info("Child2 log") + + # All logs should be in the same queue + assert client.queue_size == 3 + + await client.flush() + await client.shutdown() + + # All sent in single request + assert mock_server.calls.call_count == 1 + + +# ============================================================================= +# On Flush Callback Tests +# ============================================================================= + + +class TestOnFlushCallback: + """Test on_flush callback behavior.""" + + @pytest.mark.asyncio + async def test_on_flush_callback_receives_count( + self, valid_config: LogwellConfig, mock_server: respx.MockRouter + ) -> None: + """on_flush callback receives the count of flushed logs.""" + flush_counts: list[int] = [] + + config = {**valid_config, "on_flush": lambda count: flush_counts.append(count)} + + # Setup mock + mock_server.post("https://logs.example.com/v1/ingest").mock( + return_value=httpx.Response(200, json={"accepted": 5}) + ) + + # Execute + client = Logwell(config) + for i in range(5): + client.info(f"Message {i}") + await client.flush() + await client.shutdown() + + # Verify callback received correct count + assert len(flush_counts) == 1 + assert flush_counts[0] == 5 + + +# ============================================================================= +# Source Location Tests +# ============================================================================= + + +class TestSourceLocation: + """Test source location capture in integration context.""" + + @pytest.mark.asyncio + async def test_source_location_captured_when_enabled( + self, valid_config: LogwellConfig, mock_server: respx.MockRouter + ) -> None: + """Source location is captured when capture_source_location is enabled.""" + config = {**valid_config, "capture_source_location": True} + + # Setup mock + mock_server.post("https://logs.example.com/v1/ingest").mock( + return_value=httpx.Response(200, json={"accepted": 1}) + ) + + # Execute + client = Logwell(config) + client.info("Test message") + await client.flush() + await client.shutdown() + + # Verify source location is present + body = json.loads(mock_server.calls.last.request.content) + assert "source_file" in body[0] + assert "line_number" in body[0] + assert body[0]["source_file"].endswith("test_e2e.py") + + @pytest.mark.asyncio + async def test_source_location_not_captured_when_disabled( + self, valid_config: LogwellConfig, mock_server: respx.MockRouter + ) -> None: + """Source location is not captured when capture_source_location is disabled.""" + config = {**valid_config, "capture_source_location": False} + + # Setup mock + mock_server.post("https://logs.example.com/v1/ingest").mock( + return_value=httpx.Response(200, json={"accepted": 1}) + ) + + # Execute + client = Logwell(config) + client.info("Test message") + await client.flush() + await client.shutdown() + + # Verify source location is not present + body = json.loads(mock_server.calls.last.request.content) + assert "source_file" not in body[0] + assert "line_number" not in body[0] + + +# ============================================================================= +# Metadata Tests +# ============================================================================= + + +class TestMetadata: + """Test metadata handling in integration context.""" + + @pytest.mark.asyncio + async def test_complex_metadata_serialized_correctly( + self, valid_config: LogwellConfig, mock_server: respx.MockRouter + ) -> None: + """Complex nested metadata is serialized correctly in HTTP request.""" + # Setup mock + mock_server.post("https://logs.example.com/v1/ingest").mock( + return_value=httpx.Response(200, json={"accepted": 1}) + ) + + # Execute + client = Logwell(valid_config) + client.info( + "Test message", + { + "string": "value", + "number": 42, + "float": 3.14, + "boolean": True, + "null": None, + "array": [1, 2, 3], + "nested": {"deep": {"value": "found"}}, + }, + ) + await client.flush() + await client.shutdown() + + # Verify metadata serialized correctly + body = json.loads(mock_server.calls.last.request.content) + metadata = body[0]["metadata"] + assert metadata["string"] == "value" + assert metadata["number"] == 42 + assert metadata["float"] == 3.14 + assert metadata["boolean"] is True + assert metadata["null"] is None + assert metadata["array"] == [1, 2, 3] + assert metadata["nested"]["deep"]["value"] == "found" diff --git a/specs/python-sdk/.progress.md b/specs/python-sdk/.progress.md index 866fef9..22868f4 100644 --- a/specs/python-sdk/.progress.md +++ b/specs/python-sdk/.progress.md @@ -29,6 +29,7 @@ Port the TypeScript SDK at `sdks/typescript/` to Python with full feature parity - [x] 3.4 Unit tests for queue - d85f824 - [x] 3.5 Unit tests for source_location - 274125b - [x] 3.6 Unit tests for client - d9875f4 +- [x] 3.7 Integration tests - be80638 ## Current Task Awaiting next task @@ -73,6 +74,7 @@ Awaiting next task - 67 tests written for client.py covering: Logwell construction (valid/invalid configs), log methods (debug/info/warn/error/fatal), logging with metadata, timestamp auto-generation (UTC), service name handling, queue_size property, flush() method, shutdown() (idempotent, rejects new logs), child() method (inherits config, shares queue, merges metadata), nested children (accumulates metadata), source location capture, _merge_metadata internal method, integration-style workflows - Client's shutdown() idempotency is handled at the queue level, not at the client level - client always calls queue.shutdown() but queue handles its own stopped state - Mock queue pattern: create MagicMock(spec=BatchQueue) with size property and async methods to capture log entries +- 22 integration tests written using respx for HTTP mocking covering: full flow (instantiate->log->flush->verify), all log levels, batching, auto-flush on batch_size, retry on 5xx/429, non-retryable 401/400 errors, shutdown flush, shutdown idempotency, child loggers, metadata inheritance, nested children, on_flush callback, source location capture ## Next -Task 3.7: Integration tests +Task 4.1: Local quality check diff --git a/specs/python-sdk/tasks.md b/specs/python-sdk/tasks.md index 8cb1775..45f7255 100644 --- a/specs/python-sdk/tasks.md +++ b/specs/python-sdk/tasks.md @@ -188,7 +188,7 @@ After POC validated, clean up code. - **Commit**: `test(python-sdk): add client unit tests` - _Requirements: AC-1.1 through AC-4.4_ -- [ ] 3.7 Integration tests +- [x] 3.7 Integration tests - **Do**: Test full flow with mocked HTTP (respx) - **Files**: `sdks/python/tests/integration/__init__.py`, `sdks/python/tests/integration/test_e2e.py` - **Done when**: E2E flow tested with mock server From 195b1f51bac0937eb54b12219c737c5628878087 Mon Sep 17 00:00:00 2001 From: Divanshu Chauhan Date: Fri, 16 Jan 2026 18:16:45 -0700 Subject: [PATCH 24/30] fix(python-sdk): address lint/type issues - Fix E501 line too long errors in queue.py and transport.py - All quality checks now pass: - mypy --strict: no issues - ruff check: all checks passed - pytest: 308 tests, 98% coverage --- sdks/python/src/logwell/queue.py | 3 ++- sdks/python/src/logwell/transport.py | 4 ++-- specs/python-sdk/.progress.md | 3 ++- specs/python-sdk/tasks.md | 2 +- 4 files changed, 7 insertions(+), 5 deletions(-) diff --git a/sdks/python/src/logwell/queue.py b/sdks/python/src/logwell/queue.py index fe931a3..4c11171 100644 --- a/sdks/python/src/logwell/queue.py +++ b/sdks/python/src/logwell/queue.py @@ -118,7 +118,8 @@ def add(self, entry: LogEntry) -> None: msg = dropped.get("message", "")[:50] self._config.on_error( LogwellError( - f"Queue overflow: max_queue_size ({self._config.max_queue_size}) exceeded. " + f"Queue overflow: max_queue_size " + f"({self._config.max_queue_size}) exceeded. " f"Dropped oldest log: '{msg}...'. " "Logs are being generated faster than they can be sent. " "Consider increasing max_queue_size, reducing log volume, " diff --git a/sdks/python/src/logwell/transport.py b/sdks/python/src/logwell/transport.py index 5ec0cb2..5fec1f8 100644 --- a/sdks/python/src/logwell/transport.py +++ b/sdks/python/src/logwell/transport.py @@ -112,8 +112,8 @@ async def send(self, logs: list[LogEntry]) -> IngestResponse: LogwellError: On failure after all retries """ last_error: LogwellError = LogwellError( - f"Failed to send logs after {self._config.max_retries + 1} attempts to {self._ingest_url}. " - "Check your network connection and endpoint URL.", + f"Failed to send logs after {self._config.max_retries + 1} attempts " + f"to {self._ingest_url}. Check your network connection and endpoint URL.", LogwellErrorCode.NETWORK_ERROR, None, True, diff --git a/specs/python-sdk/.progress.md b/specs/python-sdk/.progress.md index 22868f4..d0826ba 100644 --- a/specs/python-sdk/.progress.md +++ b/specs/python-sdk/.progress.md @@ -30,6 +30,7 @@ Port the TypeScript SDK at `sdks/typescript/` to Python with full feature parity - [x] 3.5 Unit tests for source_location - 274125b - [x] 3.6 Unit tests for client - d9875f4 - [x] 3.7 Integration tests - be80638 +- [x] 4.1 Local quality check - c46348e ## Current Task Awaiting next task @@ -77,4 +78,4 @@ Awaiting next task - 22 integration tests written using respx for HTTP mocking covering: full flow (instantiate->log->flush->verify), all log levels, batching, auto-flush on batch_size, retry on 5xx/429, non-retryable 401/400 errors, shutdown flush, shutdown idempotency, child loggers, metadata inheritance, nested children, on_flush callback, source location capture ## Next -Task 4.1: Local quality check +Task 4.2: Create PR diff --git a/specs/python-sdk/tasks.md b/specs/python-sdk/tasks.md index 45f7255..9380783 100644 --- a/specs/python-sdk/tasks.md +++ b/specs/python-sdk/tasks.md @@ -197,7 +197,7 @@ After POC validated, clean up code. ## Phase 4: Quality Gates -- [ ] 4.1 Local quality check +- [x] 4.1 Local quality check - **Do**: Run mypy, ruff, pytest with coverage - **Verify**: `cd sdks/python && mypy --strict src && ruff check src && pytest --cov=src --cov-report=term-missing` - **Done when**: All pass, coverage >= 90% From 0b23b2dc4a12da8dae5ac77b9361b30978b9fc40 Mon Sep 17 00:00:00 2001 From: Divanshu Chauhan Date: Fri, 16 Jan 2026 18:22:47 -0700 Subject: [PATCH 25/30] chore(python-sdk): complete spec - PR #11 created All 22 tasks completed: - Phase 1: POC implementation - Phase 2: Refactoring (thread safety, type hints, docs) - Phase 3: Testing (unit + integration) - Phase 4: Quality gates + PR PR: https://github.com/Divkix/Logwell/pull/11 --- specs/python-sdk/.progress.md | 7 ++++--- specs/python-sdk/.ralph-state.json | 6 +++--- specs/python-sdk/tasks.md | 2 +- 3 files changed, 8 insertions(+), 7 deletions(-) diff --git a/specs/python-sdk/.progress.md b/specs/python-sdk/.progress.md index d0826ba..f0b2f6b 100644 --- a/specs/python-sdk/.progress.md +++ b/specs/python-sdk/.progress.md @@ -4,7 +4,7 @@ Port the TypeScript SDK at `sdks/typescript/` to Python with full feature parity. The server is ready, and we need a Python SDK so Python developers can also use the logging platform. ## Status -- Phase: Execution (Phase 3 - Testing) +- Phase: Complete - Started: 2026-01-16 - Artifacts generated: research.md, requirements.md, design.md, tasks.md @@ -31,9 +31,10 @@ Port the TypeScript SDK at `sdks/typescript/` to Python with full feature parity - [x] 3.6 Unit tests for client - d9875f4 - [x] 3.7 Integration tests - be80638 - [x] 4.1 Local quality check - c46348e +- [x] 4.2 Create PR - PR #11 (https://github.com/Divkix/Logwell/pull/11) ## Current Task -Awaiting next task +All tasks complete - PR ready for review ## Learnings @@ -78,4 +79,4 @@ Awaiting next task - 22 integration tests written using respx for HTTP mocking covering: full flow (instantiate->log->flush->verify), all log levels, batching, auto-flush on batch_size, retry on 5xx/429, non-retryable 401/400 errors, shutdown flush, shutdown idempotency, child loggers, metadata inheritance, nested children, on_flush callback, source location capture ## Next -Task 4.2: Create PR +All tasks complete. PR #11 ready for review at https://github.com/Divkix/Logwell/pull/11 diff --git a/specs/python-sdk/.ralph-state.json b/specs/python-sdk/.ralph-state.json index 0e0303b..c9282da 100644 --- a/specs/python-sdk/.ralph-state.json +++ b/specs/python-sdk/.ralph-state.json @@ -2,9 +2,9 @@ "source": "plan", "name": "python-sdk", "basePath": "./specs/python-sdk", - "phase": "tasks", - "taskIndex": 0, - "totalTasks": 0, + "phase": "complete", + "taskIndex": 22, + "totalTasks": 22, "taskIteration": 1, "maxTaskIterations": 5, "globalIteration": 1, diff --git a/specs/python-sdk/tasks.md b/specs/python-sdk/tasks.md index 9380783..dc62703 100644 --- a/specs/python-sdk/tasks.md +++ b/specs/python-sdk/tasks.md @@ -203,7 +203,7 @@ After POC validated, clean up code. - **Done when**: All pass, coverage >= 90% - **Commit**: `fix(python-sdk): address lint/type issues` (if needed) -- [ ] 4.2 Create PR +- [x] 4.2 Create PR - **Do**: Push branch, create PR with gh CLI - **Verify**: `gh pr checks --watch` all green - **Done when**: PR ready for review From d304bde2097a52d3b3ed527bd04f5e8dd08b06ff Mon Sep 17 00:00:00 2001 From: Divanshu Chauhan Date: Fri, 16 Jan 2026 18:37:35 -0700 Subject: [PATCH 26/30] fix(python-sdk): remove unused imports in test files - Remove unused asyncio import from test_client.py - Remove unused Any import from test_e2e.py --- sdks/python/tests/integration/test_e2e.py | 2 +- sdks/python/tests/unit/test_client.py | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/sdks/python/tests/integration/test_e2e.py b/sdks/python/tests/integration/test_e2e.py index 0f12f33..32e462d 100644 --- a/sdks/python/tests/integration/test_e2e.py +++ b/sdks/python/tests/integration/test_e2e.py @@ -12,7 +12,7 @@ from __future__ import annotations import json -from typing import TYPE_CHECKING, Any +from typing import TYPE_CHECKING import httpx import pytest diff --git a/sdks/python/tests/unit/test_client.py b/sdks/python/tests/unit/test_client.py index 268a530..52585d0 100644 --- a/sdks/python/tests/unit/test_client.py +++ b/sdks/python/tests/unit/test_client.py @@ -14,7 +14,6 @@ from __future__ import annotations -import asyncio from typing import TYPE_CHECKING, Any from unittest.mock import AsyncMock, MagicMock, patch From b95552763277ed1c29ce1f207d26ab486e54a012 Mon Sep 17 00:00:00 2001 From: Divanshu Chauhan Date: Fri, 16 Jan 2026 18:37:41 -0700 Subject: [PATCH 27/30] ci(python-sdk): add CI workflow with PyPI publishing - Add lint job: ruff check/format, mypy --strict - Add unit tests across Python 3.10-3.13 - Add integration tests with respx mocking - Add coverage report with 90% threshold - Add build verification with twine - Add PyPI publish via trusted publisher (OIDC) - Auto-publish on main when version changes - Add coverage[toml] to dev dependencies --- .github/workflows/sdk-python.yml | 272 +++++++++++++++++++++++++++++++ sdks/python/pyproject.toml | 1 + 2 files changed, 273 insertions(+) create mode 100644 .github/workflows/sdk-python.yml diff --git a/.github/workflows/sdk-python.yml b/.github/workflows/sdk-python.yml new file mode 100644 index 0000000..3034883 --- /dev/null +++ b/.github/workflows/sdk-python.yml @@ -0,0 +1,272 @@ +name: SDK Python + +on: + push: + branches: [main] + paths: + - "sdks/python/**" + - ".github/workflows/sdk-python.yml" + pull_request: + branches: [main] + paths: + - "sdks/python/**" + - ".github/workflows/sdk-python.yml" + workflow_dispatch: + +permissions: + contents: read + id-token: write + +# Cancel in-progress runs on same branch/PR +concurrency: + group: sdk-py-${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + +defaults: + run: + working-directory: sdks/python + +jobs: + # ============================================================================= + # Lint & Type Check + # ============================================================================= + lint: + name: Lint & Type Check + runs-on: ubuntu-latest + timeout-minutes: 10 + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Setup Python + uses: actions/setup-python@v5 + with: + python-version: "3.11" + + - name: Install uv + uses: astral-sh/setup-uv@v4 + + - name: Install dependencies + run: | + uv venv + uv pip install -e ".[dev]" + + - name: Run ruff linter + run: uv run ruff check src tests + + - name: Run ruff formatter check + run: uv run ruff format --check src tests + + - name: Run mypy type check + run: uv run mypy --strict src + + # ============================================================================= + # Unit Tests + # ============================================================================= + test-unit: + name: Unit Tests + runs-on: ubuntu-latest + timeout-minutes: 10 + strategy: + matrix: + python-version: ["3.10", "3.11", "3.12", "3.13"] + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Setup Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + + - name: Install uv + uses: astral-sh/setup-uv@v4 + + - name: Install dependencies + run: | + uv venv + uv pip install -e ".[dev]" + + - name: Run unit tests + run: uv run pytest tests/unit -v --tb=short + + # ============================================================================= + # Integration Tests + # ============================================================================= + test-integration: + name: Integration Tests + runs-on: ubuntu-latest + timeout-minutes: 10 + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Setup Python + uses: actions/setup-python@v5 + with: + python-version: "3.11" + + - name: Install uv + uses: astral-sh/setup-uv@v4 + + - name: Install dependencies + run: | + uv venv + uv pip install -e ".[dev]" + + - name: Run integration tests + run: uv run pytest tests/integration -v --tb=short + + # ============================================================================= + # Coverage + # ============================================================================= + coverage: + name: Coverage Report + runs-on: ubuntu-latest + timeout-minutes: 10 + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Setup Python + uses: actions/setup-python@v5 + with: + python-version: "3.11" + + - name: Install uv + uses: astral-sh/setup-uv@v4 + + - name: Install dependencies + run: | + uv venv + uv pip install -e ".[dev]" + + - name: Run tests with coverage + run: uv run pytest --cov=src --cov-report=term-missing --cov-report=xml --cov-fail-under=90 + + - name: Upload coverage to job summary + run: | + echo "## 📊 Coverage Report" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + uv run coverage report --format=markdown >> $GITHUB_STEP_SUMMARY + + # ============================================================================= + # Build & Verify + # ============================================================================= + build: + name: Build & Verify + runs-on: ubuntu-latest + timeout-minutes: 10 + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Setup Python + uses: actions/setup-python@v5 + with: + python-version: "3.11" + + - name: Install uv + uses: astral-sh/setup-uv@v4 + + - name: Install build tools + run: uv pip install --system build twine + + - name: Build package + run: python -m build + + - name: Verify package with twine + run: twine check dist/* + + - name: Test install from wheel + run: | + uv venv test-install + uv pip install --python test-install dist/*.whl + test-install/bin/python -c "from logwell import Logwell; print('Import OK')" + + - name: Upload build artifacts + uses: actions/upload-artifact@v4 + with: + name: sdk-python-dist + path: sdks/python/dist/ + retention-days: 7 + + # ============================================================================= + # Publish to PyPI (only on main, only if version changed) + # ============================================================================= + publish: + name: Publish to PyPI + runs-on: ubuntu-latest + timeout-minutes: 10 + needs: [lint, test-unit, test-integration, coverage, build] + if: github.ref == 'refs/heads/main' && (github.event_name == 'push' || github.event_name == 'workflow_dispatch') + environment: + name: pypi + url: https://pypi.org/project/logwell/ + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Setup Python + uses: actions/setup-python@v5 + with: + python-version: "3.11" + + - name: Install uv + uses: astral-sh/setup-uv@v4 + + - name: Install build tools + run: uv pip install --system build + + - name: Build package + run: python -m build + + - name: Check if version exists on PyPI + id: version-check + run: | + PACKAGE_NAME=$(python -c "import tomllib; print(tomllib.load(open('pyproject.toml', 'rb'))['project']['name'])") + LOCAL_VERSION=$(python -c "import tomllib; print(tomllib.load(open('pyproject.toml', 'rb'))['project']['version'])") + echo "package_name=$PACKAGE_NAME" >> $GITHUB_OUTPUT + echo "local_version=$LOCAL_VERSION" >> $GITHUB_OUTPUT + + # Check if this version already exists on PyPI + if pip index versions "$PACKAGE_NAME" 2>/dev/null | grep -q "$LOCAL_VERSION"; then + echo "Version $LOCAL_VERSION already exists on PyPI" + echo "should_publish=false" >> $GITHUB_OUTPUT + else + echo "Version $LOCAL_VERSION is new, will publish" + echo "should_publish=true" >> $GITHUB_OUTPUT + fi + + - name: Publish to PyPI (Trusted Publisher) + if: steps.version-check.outputs.should_publish == 'true' + uses: pypa/gh-action-pypi-publish@release/v1 + with: + packages-dir: sdks/python/dist/ + + - name: Add publish info to job summary + if: steps.version-check.outputs.should_publish == 'true' + run: | + echo "## đŸ“Ļ SDK Published to PyPI" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "**Package:** \`${{ steps.version-check.outputs.package_name }}\`" >> $GITHUB_STEP_SUMMARY + echo "**Version:** \`${{ steps.version-check.outputs.local_version }}\`" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "Install with:" >> $GITHUB_STEP_SUMMARY + echo "\`\`\`bash" >> $GITHUB_STEP_SUMMARY + echo "pip install ${{ steps.version-check.outputs.package_name }}==${{ steps.version-check.outputs.local_version }}" >> $GITHUB_STEP_SUMMARY + echo "\`\`\`" >> $GITHUB_STEP_SUMMARY + + - name: Skip publish info + if: steps.version-check.outputs.should_publish == 'false' + run: | + echo "## â„šī¸ SDK Publish Skipped" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "Version \`${{ steps.version-check.outputs.local_version }}\` already exists on PyPI." >> $GITHUB_STEP_SUMMARY + echo "Bump the version in \`sdks/python/pyproject.toml\` to trigger a new publish." >> $GITHUB_STEP_SUMMARY diff --git a/sdks/python/pyproject.toml b/sdks/python/pyproject.toml index a689b29..dd18817 100644 --- a/sdks/python/pyproject.toml +++ b/sdks/python/pyproject.toml @@ -37,6 +37,7 @@ dev = [ "mypy>=1.8.0", "ruff>=0.4.0", "pytest-cov>=4.1.0", + "coverage[toml]>=7.0.0", ] [project.urls] From d425a29f7c8ac434eaea9ad42b34615ca42b5f61 Mon Sep 17 00:00:00 2001 From: Divanshu Chauhan Date: Fri, 16 Jan 2026 18:44:41 -0700 Subject: [PATCH 28/30] fix(python-sdk): resolve lint errors in test files - Remove unused Callable and IngestResponse imports from test_client.py - Remove unused Callable import from test_config.py - Move IngestResponse and LogEntry to TYPE_CHECKING block in test_queue.py - Remove empty TYPE_CHECKING block from test_source_location.py - Run ruff format to fix import sorting --- sdks/python/tests/conftest.py | 16 +-- sdks/python/tests/integration/test_e2e.py | 9 +- sdks/python/tests/unit/test_client.py | 108 +++++------------- sdks/python/tests/unit/test_config.py | 31 ++--- sdks/python/tests/unit/test_errors.py | 1 - sdks/python/tests/unit/test_queue.py | 8 +- .../python/tests/unit/test_source_location.py | 5 - 7 files changed, 43 insertions(+), 135 deletions(-) diff --git a/sdks/python/tests/conftest.py b/sdks/python/tests/conftest.py index c31edca..faec2d6 100644 --- a/sdks/python/tests/conftest.py +++ b/sdks/python/tests/conftest.py @@ -156,9 +156,7 @@ def invalid_config_bad_endpoint_relative(valid_api_key: str) -> dict[str, Any]: @pytest.fixture -def invalid_config_negative_batch_size( - valid_api_key: str, valid_endpoint: str -) -> dict[str, Any]: +def invalid_config_negative_batch_size(valid_api_key: str, valid_endpoint: str) -> dict[str, Any]: """Config with negative batch_size.""" return { "api_key": valid_api_key, @@ -168,9 +166,7 @@ def invalid_config_negative_batch_size( @pytest.fixture -def invalid_config_zero_batch_size( - valid_api_key: str, valid_endpoint: str -) -> dict[str, Any]: +def invalid_config_zero_batch_size(valid_api_key: str, valid_endpoint: str) -> dict[str, Any]: """Config with zero batch_size.""" return { "api_key": valid_api_key, @@ -204,9 +200,7 @@ def invalid_config_negative_max_queue_size( @pytest.fixture -def invalid_config_negative_max_retries( - valid_api_key: str, valid_endpoint: str -) -> dict[str, Any]: +def invalid_config_negative_max_retries(valid_api_key: str, valid_endpoint: str) -> dict[str, Any]: """Config with negative max_retries.""" return { "api_key": valid_api_key, @@ -461,9 +455,7 @@ def _make( @pytest.fixture -def make_config( - valid_api_key: str, valid_endpoint: str -) -> Callable[..., LogwellConfig]: +def make_config(valid_api_key: str, valid_endpoint: str) -> Callable[..., LogwellConfig]: """Factory fixture for creating configs with custom overrides.""" def _make(**overrides: Any) -> LogwellConfig: diff --git a/sdks/python/tests/integration/test_e2e.py b/sdks/python/tests/integration/test_e2e.py index 32e462d..8a96330 100644 --- a/sdks/python/tests/integration/test_e2e.py +++ b/sdks/python/tests/integration/test_e2e.py @@ -174,6 +174,7 @@ async def test_auto_flush_on_batch_size( # Give time for auto-flush to complete import asyncio + await asyncio.sleep(0.1) await client.shutdown() @@ -201,9 +202,7 @@ async def test_retry_on_500_server_error( httpx.Response(503, json={"error": "Service unavailable"}), httpx.Response(200, json={"accepted": 1}), ] - mock_server.post("https://logs.example.com/v1/ingest").mock( - side_effect=responses - ) + mock_server.post("https://logs.example.com/v1/ingest").mock(side_effect=responses) # Execute client = Logwell(valid_config) @@ -226,9 +225,7 @@ async def test_retry_on_429_rate_limit( httpx.Response(429, json={"error": "Too many requests"}), httpx.Response(200, json={"accepted": 1}), ] - mock_server.post("https://logs.example.com/v1/ingest").mock( - side_effect=responses - ) + mock_server.post("https://logs.example.com/v1/ingest").mock(side_effect=responses) # Execute client = Logwell(valid_config) diff --git a/sdks/python/tests/unit/test_client.py b/sdks/python/tests/unit/test_client.py index 52585d0..5cbb3a1 100644 --- a/sdks/python/tests/unit/test_client.py +++ b/sdks/python/tests/unit/test_client.py @@ -24,9 +24,7 @@ from logwell.queue import BatchQueue if TYPE_CHECKING: - from collections.abc import Callable - - from logwell.types import IngestResponse, LogEntry, LogwellConfig + from logwell.types import LogEntry, LogwellConfig # ============================================================================= @@ -152,9 +150,7 @@ class TestLogMethods: """Tests for log level methods (debug, info, warn, error, fatal).""" @pytest.fixture - def client_with_mock( - self, valid_config: LogwellConfig - ) -> tuple[Logwell, list[LogEntry]]: + def client_with_mock(self, valid_config: LogwellConfig) -> tuple[Logwell, list[LogEntry]]: """Create a client with a mock queue.""" mock_queue, captured = make_mock_queue() client = Logwell(valid_config, _queue=mock_queue) @@ -237,53 +233,41 @@ class TestLogWithMetadata: """Tests for logging with metadata.""" @pytest.fixture - def client_with_mock( - self, valid_config: LogwellConfig - ) -> tuple[Logwell, list[LogEntry]]: + def client_with_mock(self, valid_config: LogwellConfig) -> tuple[Logwell, list[LogEntry]]: """Create a client with a mock queue.""" mock_queue, captured = make_mock_queue() client = Logwell(valid_config, _queue=mock_queue) return client, captured - def test_debug_with_metadata( - self, client_with_mock: tuple[Logwell, list[LogEntry]] - ) -> None: + def test_debug_with_metadata(self, client_with_mock: tuple[Logwell, list[LogEntry]]) -> None: """debug() includes metadata in log entry.""" client, captured = client_with_mock client.debug("Debug", {"key": "value"}) assert captured[0]["metadata"] == {"key": "value"} - def test_info_with_metadata( - self, client_with_mock: tuple[Logwell, list[LogEntry]] - ) -> None: + def test_info_with_metadata(self, client_with_mock: tuple[Logwell, list[LogEntry]]) -> None: """info() includes metadata in log entry.""" client, captured = client_with_mock client.info("Info", {"user_id": "123"}) assert captured[0]["metadata"] == {"user_id": "123"} - def test_warn_with_metadata( - self, client_with_mock: tuple[Logwell, list[LogEntry]] - ) -> None: + def test_warn_with_metadata(self, client_with_mock: tuple[Logwell, list[LogEntry]]) -> None: """warn() includes metadata in log entry.""" client, captured = client_with_mock client.warn("Warning", {"count": 5}) assert captured[0]["metadata"] == {"count": 5} - def test_error_with_metadata( - self, client_with_mock: tuple[Logwell, list[LogEntry]] - ) -> None: + def test_error_with_metadata(self, client_with_mock: tuple[Logwell, list[LogEntry]]) -> None: """error() includes metadata in log entry.""" client, captured = client_with_mock client.error("Error", {"error_code": "E001"}) assert captured[0]["metadata"] == {"error_code": "E001"} - def test_fatal_with_metadata( - self, client_with_mock: tuple[Logwell, list[LogEntry]] - ) -> None: + def test_fatal_with_metadata(self, client_with_mock: tuple[Logwell, list[LogEntry]]) -> None: """fatal() includes metadata in log entry.""" client, captured = client_with_mock client.fatal("Fatal", {"crash_id": "xyz"}) @@ -309,9 +293,7 @@ def test_metadata_empty_dict_not_added( # Empty metadata should not be added assert "metadata" not in captured[0] - def test_complex_metadata( - self, client_with_mock: tuple[Logwell, list[LogEntry]] - ) -> None: + def test_complex_metadata(self, client_with_mock: tuple[Logwell, list[LogEntry]]) -> None: """Complex nested metadata is preserved.""" client, captured = client_with_mock metadata = { @@ -333,9 +315,7 @@ class TestLogTimestamp: """Tests for automatic timestamp generation.""" @pytest.fixture - def client_with_mock( - self, valid_config: LogwellConfig - ) -> tuple[Logwell, list[LogEntry]]: + def client_with_mock(self, valid_config: LogwellConfig) -> tuple[Logwell, list[LogEntry]]: """Create a client with a mock queue.""" mock_queue, captured = make_mock_queue() client = Logwell(valid_config, _queue=mock_queue) @@ -354,9 +334,7 @@ def test_timestamp_auto_generated( assert isinstance(timestamp, str) assert "T" in timestamp # ISO 8601 format has T separator - def test_timestamp_uses_utc( - self, client_with_mock: tuple[Logwell, list[LogEntry]] - ) -> None: + def test_timestamp_uses_utc(self, client_with_mock: tuple[Logwell, list[LogEntry]]) -> None: """Timestamp is in UTC timezone.""" client, captured = client_with_mock client.info("Test") @@ -471,9 +449,7 @@ async def test_flush_returns_response(self, valid_config: LogwellConfig) -> None assert result == {"accepted": 5, "rejected": 0} @pytest.mark.asyncio - async def test_flush_returns_none_when_empty( - self, valid_config: LogwellConfig - ) -> None: + async def test_flush_returns_none_when_empty(self, valid_config: LogwellConfig) -> None: """flush() returns None when queue is empty.""" mock_queue, _ = make_mock_queue() mock_queue.flush = AsyncMock(return_value=None) @@ -493,9 +469,7 @@ class TestShutdown: """Tests for shutdown() method.""" @pytest.mark.asyncio - async def test_shutdown_calls_queue_shutdown( - self, valid_config: LogwellConfig - ) -> None: + async def test_shutdown_calls_queue_shutdown(self, valid_config: LogwellConfig) -> None: """shutdown() calls queue.shutdown() when owning queue.""" mock_queue, _ = make_mock_queue() # Create client that owns queue @@ -545,9 +519,7 @@ async def test_shutdown_does_not_shutdown_shared_queue( mock_queue.shutdown.assert_not_called() @pytest.mark.asyncio - async def test_shutdown_sets_stopped_flag( - self, valid_config: LogwellConfig - ) -> None: + async def test_shutdown_sets_stopped_flag(self, valid_config: LogwellConfig) -> None: """shutdown() sets _stopped flag.""" mock_queue, _ = make_mock_queue() client = Logwell(valid_config, _queue=mock_queue) @@ -557,9 +529,7 @@ async def test_shutdown_sets_stopped_flag( assert client._stopped is True @pytest.mark.asyncio - async def test_shutdown_rejects_new_logs( - self, valid_config: LogwellConfig - ) -> None: + async def test_shutdown_rejects_new_logs(self, valid_config: LogwellConfig) -> None: """Logs are ignored after shutdown().""" mock_queue, captured = make_mock_queue() client = Logwell(valid_config, _queue=mock_queue) @@ -635,9 +605,7 @@ def test_child_does_not_own_queue(self, valid_config: LogwellConfig) -> None: assert child._owns_queue is False - def test_child_inherits_config( - self, valid_api_key: str, valid_endpoint: str - ) -> None: + def test_child_inherits_config(self, valid_api_key: str, valid_endpoint: str) -> None: """Child logger inherits parent config.""" config: LogwellConfig = { "api_key": valid_api_key, @@ -668,9 +636,7 @@ def test_child_with_metadata(self, valid_config: LogwellConfig) -> None: assert captured[0]["metadata"]["request_id"] == "abc123" - def test_child_metadata_merges_with_log_metadata( - self, valid_config: LogwellConfig - ) -> None: + def test_child_metadata_merges_with_log_metadata(self, valid_config: LogwellConfig) -> None: """Child metadata merges with per-log metadata.""" mock_queue, captured = make_mock_queue() client = Logwell(valid_config, _queue=mock_queue) @@ -681,9 +647,7 @@ def test_child_metadata_merges_with_log_metadata( assert captured[0]["metadata"]["request_id"] == "abc123" assert captured[0]["metadata"]["user_id"] == "user456" - def test_child_log_metadata_overrides_child_metadata( - self, valid_config: LogwellConfig - ) -> None: + def test_child_log_metadata_overrides_child_metadata(self, valid_config: LogwellConfig) -> None: """Per-log metadata takes precedence over child metadata.""" mock_queue, captured = make_mock_queue() client = Logwell(valid_config, _queue=mock_queue) @@ -693,9 +657,7 @@ def test_child_log_metadata_overrides_child_metadata( assert captured[0]["metadata"]["key"] == "log_value" - def test_child_with_service_override( - self, valid_api_key: str, valid_endpoint: str - ) -> None: + def test_child_with_service_override(self, valid_api_key: str, valid_endpoint: str) -> None: """Child logger can override service name.""" config: LogwellConfig = { "api_key": valid_api_key, @@ -710,9 +672,7 @@ def test_child_with_service_override( assert captured[0]["service"] == "child-service" - def test_child_inherits_parent_service( - self, valid_api_key: str, valid_endpoint: str - ) -> None: + def test_child_inherits_parent_service(self, valid_api_key: str, valid_endpoint: str) -> None: """Child logger inherits parent's service if not overridden.""" config: LogwellConfig = { "api_key": valid_api_key, @@ -727,9 +687,7 @@ def test_child_inherits_parent_service( assert captured[0]["service"] == "parent-service" - def test_child_inherits_callbacks( - self, valid_api_key: str, valid_endpoint: str - ) -> None: + def test_child_inherits_callbacks(self, valid_api_key: str, valid_endpoint: str) -> None: """Child logger inherits on_error and on_flush callbacks.""" on_error = MagicMock() on_flush = MagicMock() @@ -791,9 +749,7 @@ def test_nested_metadata_accumulates(self, valid_config: LogwellConfig) -> None: assert captured[0]["metadata"]["level1"] == "value1" assert captured[0]["metadata"]["level2"] == "value2" - def test_nested_metadata_overrides_parent( - self, valid_config: LogwellConfig - ) -> None: + def test_nested_metadata_overrides_parent(self, valid_config: LogwellConfig) -> None: """Deeper child can override ancestor's metadata key.""" mock_queue, captured = make_mock_queue() root = Logwell(valid_config, _queue=mock_queue) @@ -841,9 +797,7 @@ def test_sibling_children_independent(self, valid_config: LogwellConfig) -> None class TestSourceLocationCapture: """Tests for source location capture when enabled.""" - def test_source_location_disabled_by_default( - self, valid_config: LogwellConfig - ) -> None: + def test_source_location_disabled_by_default(self, valid_config: LogwellConfig) -> None: """Source location not captured when disabled (default).""" mock_queue, captured = make_mock_queue() client = Logwell(valid_config, _queue=mock_queue) @@ -938,9 +892,7 @@ def test_child_inherits_source_location_setting( class TestMergeMetadata: """Tests for _merge_metadata internal method.""" - def test_no_parent_no_entry_returns_none( - self, valid_config: LogwellConfig - ) -> None: + def test_no_parent_no_entry_returns_none(self, valid_config: LogwellConfig) -> None: """Returns None when no parent or entry metadata.""" mock_queue, _ = make_mock_queue() client = Logwell(valid_config, _queue=mock_queue) @@ -952,9 +904,7 @@ def test_no_parent_no_entry_returns_none( def test_parent_only_returns_parent(self, valid_config: LogwellConfig) -> None: """Returns parent metadata when no entry metadata.""" mock_queue, _ = make_mock_queue() - client = Logwell( - valid_config, _queue=mock_queue, _parent_metadata={"parent": "value"} - ) + client = Logwell(valid_config, _queue=mock_queue, _parent_metadata={"parent": "value"}) result = client._merge_metadata(None) @@ -969,9 +919,7 @@ def test_entry_only_returns_entry(self, valid_config: LogwellConfig) -> None: assert result == {"entry": "value"} - def test_both_merges_with_entry_priority( - self, valid_config: LogwellConfig - ) -> None: + def test_both_merges_with_entry_priority(self, valid_config: LogwellConfig) -> None: """Merges both, entry takes precedence.""" mock_queue, _ = make_mock_queue() client = Logwell( @@ -1009,9 +957,7 @@ async def test_basic_workflow(self, valid_config: LogwellConfig) -> None: assert response == {"accepted": 1} @pytest.mark.asyncio - async def test_request_scoped_logging( - self, valid_api_key: str, valid_endpoint: str - ) -> None: + async def test_request_scoped_logging(self, valid_api_key: str, valid_endpoint: str) -> None: """Test request-scoped logging pattern.""" config: LogwellConfig = { "api_key": valid_api_key, diff --git a/sdks/python/tests/unit/test_config.py b/sdks/python/tests/unit/test_config.py index ba9454e..8443577 100644 --- a/sdks/python/tests/unit/test_config.py +++ b/sdks/python/tests/unit/test_config.py @@ -21,8 +21,6 @@ from logwell.errors import LogwellError, LogwellErrorCode if TYPE_CHECKING: - from collections.abc import Callable - from logwell.types import LogwellConfig @@ -680,9 +678,7 @@ def test_returns_copy_not_reference(self, valid_config: LogwellConfig) -> None: result["batch_size"] = 9999 assert valid_config.get("batch_size") != 9999 - def test_all_values_present_in_full_config( - self, valid_config_full: LogwellConfig - ) -> None: + def test_all_values_present_in_full_config(self, valid_config_full: LogwellConfig) -> None: """Full config returns all provided values.""" result = validate_config(valid_config_full) @@ -693,10 +689,7 @@ def test_all_values_present_in_full_config( assert result["flush_interval"] == valid_config_full["flush_interval"] assert result["max_queue_size"] == valid_config_full["max_queue_size"] assert result["max_retries"] == valid_config_full["max_retries"] - assert ( - result["capture_source_location"] - == valid_config_full["capture_source_location"] - ) + assert result["capture_source_location"] == valid_config_full["capture_source_location"] # ============================================================================= @@ -707,9 +700,7 @@ def test_all_values_present_in_full_config( class TestIsValidUrlEdgeCases: """Edge cases for _is_valid_url internal function (via validate_config).""" - def test_url_that_triggers_exception( - self, valid_api_key: str - ) -> None: + def test_url_that_triggers_exception(self, valid_api_key: str) -> None: """Test URL that might trigger urlparse exception. urlparse is very permissive and rarely throws, but we can test @@ -732,9 +723,7 @@ def test_url_that_triggers_exception( assert exc_info.value.code == LogwellErrorCode.INVALID_CONFIG assert "Invalid endpoint URL" in exc_info.value.message - def test_url_with_attribute_error( - self, valid_api_key: str - ) -> None: + def test_url_with_attribute_error(self, valid_api_key: str) -> None: """Test URL that causes AttributeError in urlparse.""" from unittest.mock import patch @@ -757,9 +746,7 @@ def test_url_with_attribute_error( class TestValidateConfigEdgeCases: """Edge case tests for validate_config.""" - def test_api_key_exactly_32_chars_after_prefix( - self, valid_endpoint: str - ) -> None: + def test_api_key_exactly_32_chars_after_prefix(self, valid_endpoint: str) -> None: """Accepts API key with exactly 32 chars after prefix.""" config: dict[str, Any] = { "api_key": "lw_" + "a" * 32, @@ -802,9 +789,7 @@ def test_validates_in_order(self, valid_endpoint: str) -> None: # api_key error should come first assert "api_key" in exc_info.value.message - def test_api_key_format_checked_before_numeric_bounds( - self, valid_endpoint: str - ) -> None: + def test_api_key_format_checked_before_numeric_bounds(self, valid_endpoint: str) -> None: """API key format checked before numeric options.""" config: dict[str, Any] = { "api_key": "invalid_key", @@ -818,9 +803,7 @@ def test_api_key_format_checked_before_numeric_bounds( # api_key format error should come first assert "Invalid API key format" in exc_info.value.message - def test_endpoint_checked_before_numeric_bounds( - self, valid_api_key: str - ) -> None: + def test_endpoint_checked_before_numeric_bounds(self, valid_api_key: str) -> None: """Endpoint URL checked before numeric options.""" config: dict[str, Any] = { "api_key": valid_api_key, diff --git a/sdks/python/tests/unit/test_errors.py b/sdks/python/tests/unit/test_errors.py index 1051112..4a6a5c9 100644 --- a/sdks/python/tests/unit/test_errors.py +++ b/sdks/python/tests/unit/test_errors.py @@ -13,7 +13,6 @@ from logwell.errors import LogwellError, LogwellErrorCode - # ============================================================================= # LogwellErrorCode Tests # ============================================================================= diff --git a/sdks/python/tests/unit/test_queue.py b/sdks/python/tests/unit/test_queue.py index ee007e5..92fd37c 100644 --- a/sdks/python/tests/unit/test_queue.py +++ b/sdks/python/tests/unit/test_queue.py @@ -22,10 +22,9 @@ from logwell.errors import LogwellError, LogwellErrorCode from logwell.queue import BatchQueue, QueueConfig -from logwell.types import IngestResponse, LogEntry if TYPE_CHECKING: - from collections.abc import Callable + from logwell.types import IngestResponse, LogEntry # ============================================================================= @@ -713,10 +712,7 @@ def add_entries(thread_id: int) -> None: for i in range(entries_per_thread): queue.add(make_log_entry(f"thread_{thread_id}_msg_{i}")) - threads = [ - threading.Thread(target=add_entries, args=(i,)) - for i in range(num_threads) - ] + threads = [threading.Thread(target=add_entries, args=(i,)) for i in range(num_threads)] for t in threads: t.start() diff --git a/sdks/python/tests/unit/test_source_location.py b/sdks/python/tests/unit/test_source_location.py index 234bac0..f0ecd3c 100644 --- a/sdks/python/tests/unit/test_source_location.py +++ b/sdks/python/tests/unit/test_source_location.py @@ -14,16 +14,11 @@ import inspect import os from dataclasses import FrozenInstanceError -from typing import TYPE_CHECKING import pytest from logwell.source_location import SourceLocation, capture_source_location -if TYPE_CHECKING: - pass - - # ============================================================================= # SourceLocation Dataclass Tests # ============================================================================= From 1ddaee9532a5aaf1bce41cf352d4b39410b355d6 Mon Sep 17 00:00:00 2001 From: Divanshu Chauhan Date: Fri, 16 Jan 2026 18:51:58 -0700 Subject: [PATCH 29/30] chore(python-sdk): update author name to Divkix --- sdks/python/pyproject.toml | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/sdks/python/pyproject.toml b/sdks/python/pyproject.toml index dd18817..520c299 100644 --- a/sdks/python/pyproject.toml +++ b/sdks/python/pyproject.toml @@ -8,7 +8,7 @@ version = "0.1.0" description = "Official Python SDK for Logwell logging platform" readme = "README.md" license = "MIT" -authors = [{ name = "Logwell", email = "dev@logwell.io" }] +authors = [{ name = "Divkix", email = "divkix@divkix.me" }] keywords = ["logging", "logs", "observability", "logwell", "python"] classifiers = [ "Development Status :: 4 - Beta", @@ -24,10 +24,7 @@ classifiers = [ "Typing :: Typed", ] requires-python = ">=3.9" -dependencies = [ - "httpx>=0.25.0", - "typing_extensions>=4.0.0", -] +dependencies = ["httpx>=0.25.0", "typing_extensions>=4.0.0"] [project.optional-dependencies] dev = [ From 60b8de21a802523cad70fae7c34248eec8d515bb Mon Sep 17 00:00:00 2001 From: Divanshu Chauhan Date: Fri, 16 Jan 2026 18:56:47 -0700 Subject: [PATCH 30/30] all done --- specs/python-sdk/.ralph-state.json | 12 ------------ 1 file changed, 12 deletions(-) delete mode 100644 specs/python-sdk/.ralph-state.json diff --git a/specs/python-sdk/.ralph-state.json b/specs/python-sdk/.ralph-state.json deleted file mode 100644 index c9282da..0000000 --- a/specs/python-sdk/.ralph-state.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "source": "plan", - "name": "python-sdk", - "basePath": "./specs/python-sdk", - "phase": "complete", - "taskIndex": 22, - "totalTasks": 22, - "taskIteration": 1, - "maxTaskIterations": 5, - "globalIteration": 1, - "maxGlobalIterations": 100 -}