From ca74278f13c3a2208c81a9aa4a903c09236acc16 Mon Sep 17 00:00:00 2001 From: Vitor Hugo Date: Thu, 12 Feb 2026 00:19:41 -0300 Subject: [PATCH 1/5] feat: queue, OpenTelemetry config, uvloop support and docs - Add genesis.queue with in-memory and Redis backends (slot/semaphore) - Add genesis.loop with use_uvloop() helper - Add genesis.observability.otel_config for OTEL env configuration - CLI: expose loop and otel config options - Docs: queue example and tool docs, otel-config, uvloop - Example: examples/queue.py - Docker: local_stream.conf.xml for FreeSWITCH Co-authored-by: Cursor --- README.md | 2 + .../autoload_configs/local_stream.conf.xml | 4 + docs/content/docs/Examples/_index.md | 27 ++ .../docs/Examples/fastapi-click2call.md | 10 +- docs/content/docs/Examples/group-call.md | 24 +- docs/content/docs/Examples/ivr.md | 40 +- docs/content/docs/Examples/queue.md | 116 ++++++ docs/content/docs/Observability/_index.md | 1 + .../content/docs/Observability/otel-config.md | 56 +++ docs/content/docs/Tools/_index.md | 2 + docs/content/docs/Tools/queue/_index.md | 171 ++++++++ docs/content/docs/Tools/queue/backends.md | 78 ++++ docs/content/docs/Tools/uvloop.md | 29 ++ examples/queue.py | 45 +++ genesis/__init__.py | 18 + genesis/cli/__init__.py | 43 ++- genesis/cli/consumer.py | 3 + genesis/cli/outbound.py | 3 + genesis/exceptions.py | 6 + genesis/group/load_balancer.py | 52 +-- genesis/loop.py | 38 ++ genesis/observability/__init__.py | 16 + genesis/observability/otel_config.py | 118 ++++++ genesis/queue/__init__.py | 22 ++ genesis/queue/backends.py | 151 ++++++++ genesis/queue/core.py | 214 ++++++++++ genesis/queue/redis_backend.py | 160 ++++++++ poetry.lock | 364 ++++++++++++++---- pyproject.toml | 10 +- tests/test_cli.py | 5 +- tests/test_loop.py | 11 + tests/test_otel_config.py | 148 +++++++ tests/test_queue.py | 238 ++++++++++++ 33 files changed, 2052 insertions(+), 173 deletions(-) create mode 100644 docker/freeswitch/conf/autoload_configs/local_stream.conf.xml create mode 100644 docs/content/docs/Examples/queue.md create mode 100644 docs/content/docs/Observability/otel-config.md create mode 100644 docs/content/docs/Tools/queue/_index.md create mode 100644 docs/content/docs/Tools/queue/backends.md create mode 100644 docs/content/docs/Tools/uvloop.md create mode 100644 examples/queue.py create mode 100644 genesis/loop.py create mode 100644 genesis/observability/otel_config.py create mode 100644 genesis/queue/__init__.py create mode 100644 genesis/queue/backends.py create mode 100644 genesis/queue/core.py create mode 100644 genesis/queue/redis_backend.py create mode 100644 tests/test_loop.py create mode 100644 tests/test_otel_config.py create mode 100644 tests/test_queue.py diff --git a/README.md b/README.md index c248a9b..71d44e0 100644 --- a/README.md +++ b/README.md @@ -24,6 +24,8 @@ Install Genesis using `pip`: pip install genesis ``` +For better asyncio performance on Unix (Linux and macOS), use the optional uvloop extra: `pip install genesis[uvloop]`. See the [Installation Guide](https://otoru.github.io/Genesis/docs/installation/) for details. + ## Quickstart ### Inbound Socket Mode diff --git a/docker/freeswitch/conf/autoload_configs/local_stream.conf.xml b/docker/freeswitch/conf/autoload_configs/local_stream.conf.xml new file mode 100644 index 0000000..ff60a23 --- /dev/null +++ b/docker/freeswitch/conf/autoload_configs/local_stream.conf.xml @@ -0,0 +1,4 @@ + + + + diff --git a/docs/content/docs/Examples/_index.md b/docs/content/docs/Examples/_index.md index 9071979..fab12b4 100644 --- a/docs/content/docs/Examples/_index.md +++ b/docs/content/docs/Examples/_index.md @@ -48,10 +48,37 @@ docker-compose down For more details about the Docker setup, see the [docker/freeswitch/README.md](https://github.com/Otoru/Genesis/blob/main/docker/freeswitch/README.md) file. +## Testing with a SIP Client + +You can test the examples using a SIP client (e.g. Linphone, Zoiper, or X-Lite): + +1. Configure your SIP client to connect to FreeSWITCH: + - **Server:** `127.0.0.1:5060` + - **Username:** `1000` or `1001` + - **Password:** `1000` or `1001` (same as username) + - **Domain:** `127.0.0.1` + +2. Register the SIP client. + +## Dialplan Configuration + +The Docker environment includes a dialplan entry that routes calls to `9999` to the outbound socket: + +```xml + + + + + +``` + +Calls to `9999` trigger FreeSWITCH to connect to your application at `127.0.0.1:9696`. + ## Available Examples {{< cards cols="1" >}} {{< card link="fastapi-click2call/" title="Click2Call API" icon="code" subtitle="REST API endpoint for click2call functionality using FastAPI." >}} {{< card link="ivr/" title="IVR" icon="phone" subtitle="Simple IVR system using Outbound mode with DTMF interaction." >}} {{< card link="group-call/" title="Group Call" icon="users" subtitle="Simultaneous originate that calls multiple destinations and bridges with the first to answer." >}} + {{< card link="queue/" title="Queue" icon="view-list" subtitle="Outbound with a queue: one call at a time; others wait in line (FIFO)." >}} {{< /cards >}} diff --git a/docs/content/docs/Examples/fastapi-click2call.md b/docs/content/docs/Examples/fastapi-click2call.md index 6ef8b8e..ab87e1d 100644 --- a/docs/content/docs/Examples/fastapi-click2call.md +++ b/docs/content/docs/Examples/fastapi-click2call.md @@ -64,20 +64,20 @@ The example uses a **per-request connection**, opening a new connection to FreeS {{% steps %}} -### 1. Clone the Repository +### Clone the Repository ```bash git clone https://github.com/Otoru/Genesis.git cd Genesis ``` -### 2. Install Dependencies +### Install Dependencies ```bash poetry install --with examples ``` -### 3. Configure FreeSWITCH Connection +### Configure FreeSWITCH Connection Set environment variables for your FreeSWITCH connection: @@ -87,7 +87,7 @@ export FS_PORT=8021 export FS_PASSWORD=ClueCon ``` -### 4. Run the Server +### Run the Server ```bash uvicorn examples.click2call:app --reload @@ -95,7 +95,7 @@ uvicorn examples.click2call:app --reload The API will be available at `http://localhost:8000`. -### 5. Test the Endpoint +### Test the Endpoint ```bash curl -X POST "http://localhost:8000/" \ diff --git a/docs/content/docs/Examples/group-call.md b/docs/content/docs/Examples/group-call.md index c4048a3..11d2ef7 100644 --- a/docs/content/docs/Examples/group-call.md +++ b/docs/content/docs/Examples/group-call.md @@ -91,12 +91,30 @@ sequenceDiagram ## Running the Example -Start FreeSWITCH (see [Examples environment]({{< relref "../Examples/_index.md" >}})) and run: +{{% steps %}} + +### Start FreeSWITCH + +Make sure FreeSWITCH is running (see [Examples environment]({{< relref "../Examples/_index.md" >}})). + +### Run the example ```bash python examples/group_call.py ``` -The example will ring the group `["user/1001", "user/1002", "user/1003"]` in parallel mode, wait for the first callee to answer, create and bridge the caller (`user/1000`) with the answered callee, then hang up all channels after 5 seconds. +### Make test calls + +- Register multiple SIP clients: user `1000` , `1001`, `1002` and `1003`. +- Run the example; the first callee to answer is connected to the caller. + +### View Logs + +To see what's happening in FreeSWITCH: + +```bash +docker exec -it genesis-freeswitch fs_cli -x "show channels" +docker logs genesis-freeswitch -f +``` -To test this properly, you'll need multiple SIP clients registered: user `1000` (caller) and users `1001`, `1002`, `1003` (callees). The first callee to answer will be connected to the caller. +{{% /steps %}} diff --git a/docs/content/docs/Examples/ivr.md b/docs/content/docs/Examples/ivr.md index 6b0f009..bdc63ba 100644 --- a/docs/content/docs/Examples/ivr.md +++ b/docs/content/docs/Examples/ivr.md @@ -72,11 +72,11 @@ This example demonstrates Outbound Socket mode, where FreeSWITCH connects to you {{% steps %}} -### 1. Start FreeSWITCH +### Start FreeSWITCH Make sure FreeSWITCH is running in Docker (see [Examples environment]({{< relref "../Examples/_index.md" >}})). -### 2. Start the IVR Server +### Start the IVR Server In a terminal, run the IVR example: @@ -86,7 +86,7 @@ python examples/ivr.py The server will start listening on `0.0.0.0:9696` and wait for FreeSWITCH to connect. -### 3. Make a Test Call +### Make a Test Call In another terminal, use FreeSWITCH CLI to originate a call to the IVR: @@ -98,14 +98,14 @@ This command: - Creates a call from user `1000` (a test user configured in the Docker environment) - Routes it to number `9999` (configured in the dialplan to connect to your outbound socket) -### 4. Interact with the IVR +### Interact with the IVR Once the call is connected: - You'll hear the welcome message - Press `1`, `2`, or `3` to select an option - The IVR will respond to your selection -### 5. View Logs +### View Logs To see what's happening in FreeSWITCH: @@ -115,33 +115,3 @@ docker logs genesis-freeswitch -f ``` {{% /steps %}} - -## Testing with a SIP Client - -You can also test using a SIP client (like Linphone, Zoiper, or X-Lite): - -1. Configure your SIP client to connect to FreeSWITCH: - - **Server:** `127.0.0.1:5060` - - **Username:** `1000` or `1001` - - **Password:** `1000` or `1001` (same as username) - - **Domain:** `127.0.0.1` - -2. Register the SIP client - -3. Make a call to `9999` - -4. The call will be routed to your IVR application - -## Dialplan Configuration - -The Docker environment includes a dialplan entry that routes calls to `9999` to your outbound socket: - -```xml - - - - - -``` - -This means any call to `9999` will trigger FreeSWITCH to connect to your application at `127.0.0.1:9696`. diff --git a/docs/content/docs/Examples/queue.md b/docs/content/docs/Examples/queue.md new file mode 100644 index 0000000..5935b6c --- /dev/null +++ b/docs/content/docs/Examples/queue.md @@ -0,0 +1,116 @@ +--- +title: Queue +weight: 25 +parent: Examples +--- + +Outbound example: one extension calls another through the app. The caller hears hold music (or a message) until a queue slot is free, then we bridge them to the callee. Only one bridge at a time, so you keep control (e.g. one agent per queue). + +## Example Code + +```python {filename="examples/queue.py" base_url="https://github.com/Otoru/Genesis/blob/main"} +""" +Queue example. + +One extension calls another via the app: the caller is held (music or message) +until a queue slot is free, then we bridge them to the callee. Only one +bridge at a time so you keep control (e.g. one agent per queue). +""" + +import asyncio +import os + +from genesis import Outbound, Session, Queue, Channel +from genesis.types import ChannelState + +HOST = os.getenv("HOST", "0.0.0.0") +PORT = int(os.getenv("PORT", "9696")) +CALLEE = "user/1001" +HOLD_SOUND = os.getenv("HOLD_SOUND", "local_stream://moh") + +queue = Queue() # in-memory by default + + +async def handler(session: Session) -> None: + if session.channel is None: + return + await session.channel.answer() + await session.channel.playback(HOLD_SOUND, block=False) + + async with queue.slot("support"): + callee = await Channel.create(session, CALLEE) + await callee.wait(ChannelState.EXECUTE, timeout=30.0) + await session.channel.bridge(callee) + + +async def main() -> None: + server = Outbound(handler=handler, host=HOST, port=PORT) + await server.start() + + +if __name__ == "__main__": + asyncio.run(main()) +``` + +## Flow + +{{% steps %}} + +### FreeSWITCH sends the call + +FreeSWITCH sends the call to your app (outbound socket). + +### Answer and play hold sound + +We answer and start playing a hold sound (`playback(..., block=False)`), so the caller hears it while waiting. + +### Wait for a queue slot + +The handler waits for a slot in the `"support"` queue (`async with queue.slot("support")`). If another call is already in the slot, this call waits (caller keeps hearing the hold sound). + +### Originate callee and bridge + +When we get the slot, we originate the callee (`Channel.create(session, CALLEE)`), wait for them to answer, then bridge the caller to the callee. The bridge replaces the hold playback. + +### Release the slot + +When the handler leaves the `async with` block, the slot is released and the next waiting caller can be served. + +{{% /steps %}} + +## Running the Example + +{{% steps %}} + +### Start FreeSWITCH + +Make sure FreeSWITCH is running (see [Examples environment]({{< relref "../Examples/_index.md" >}})). + +### Run the queue example + +```bash +python examples/queue.py +``` + +### Make test calls + +- You need two SIP clients: caller and callee (`user/1001`). See [Examples environment]({{< relref "../Examples/_index.md" >}}) (Docker includes MOH). +- Call the number that hits this dialplan. You hear hold music until your turn, then you're bridged to the callee. +- Place a second call while the first is still connected: the second caller hears hold music until the first call ends. + +### View Logs + +To see what's happening in FreeSWITCH: + +```bash +docker exec -it genesis-freeswitch fs_cli -x "show channels" +docker logs genesis-freeswitch -f +``` + +{{% /steps %}} + +## Related + +- [Queue]({{< relref "../Tools/queue/_index.md" >}}) - Queue API and backends +- [Outbound Socket]({{< relref "../Quickstart/outbound.md" >}}) - Outbound basics +- [Channel]({{< relref "../Tools/channel.md" >}}) - Creating channels and bridge diff --git a/docs/content/docs/Observability/_index.md b/docs/content/docs/Observability/_index.md index 33bedac..47a9052 100644 --- a/docs/content/docs/Observability/_index.md +++ b/docs/content/docs/Observability/_index.md @@ -12,4 +12,5 @@ Genesis ships with **OpenTelemetry** for tracing, logging, and metrics. You get {{< card link="logging/" title="Logging" icon="terminal" subtitle="Structured logs with trace correlation and optional JSON output." >}} {{< card link="server/" title="Server" icon="server" subtitle="Health, readiness, and metrics over HTTP." >}} {{< card link="metrics/" title="Metrics" icon="chart-bar" subtitle="Counters and histograms for commands, events, channels, and ring groups." >}} + {{< card link="otel-config/" title="OTEL configuration" icon="cog" subtitle="Configure OpenTelemetry via OTEL_SDK_DISABLED, OTEL_SERVICE_NAME, and OTEL_RESOURCE_ATTRIBUTES." >}} {{< /cards >}} diff --git a/docs/content/docs/Observability/otel-config.md b/docs/content/docs/Observability/otel-config.md new file mode 100644 index 0000000..5ad305c --- /dev/null +++ b/docs/content/docs/Observability/otel-config.md @@ -0,0 +1,56 @@ +--- +title: Configuration +weight: 70 +--- + +Genesis supports configuring OpenTelemetry via standard environment variables. When you run the CLI (`genesis consumer` or `genesis outbound`), these variables control the metrics resource and whether the SDK is enabled. + +## Supported variables + +- **`OTEL_SDK_DISABLED`** + - Disables the OpenTelemetry SDK when set to `true` (case-insensitive). + - When disabled, the CLI does not set a meter provider; metrics are no-ops. + - Default: not set (SDK enabled). + +- **`OTEL_SERVICE_NAME`** + - Sets the `service.name` resource attribute for metrics (and traces if you configure a tracer provider). + - Default: `genesis`. + +- **`OTEL_RESOURCE_ATTRIBUTES`** + - Extra resource attributes as comma-separated key-value pairs: `key1=value1,key2=value2`. + - If `service.name` is present here, it is overridden by `OTEL_SERVICE_NAME` when that variable is set. + - Example: `deployment.environment=production,service.version=1.0.0`. + +- **`OTEL_EXPORTER_OTLP_ENDPOINT`** + - Base URL for OTLP/HTTP export (traces and metrics). When set, the CLI configures an OTLP HTTP exporter so telemetry is sent to this endpoint (e.g. an OpenTelemetry Collector). + - Default for HTTP per spec: `http://localhost:4318` (collector OTLP HTTP receiver). + +- **`OTEL_EXPORTER_OTLP_METRICS_ENDPOINT`** + - Overrides the metrics endpoint (if unset, `OTEL_EXPORTER_OTLP_ENDPOINT` is used). + +- **`OTEL_EXPORTER_OTLP_TRACES_ENDPOINT`** + - Overrides the traces endpoint (if unset, `OTEL_EXPORTER_OTLP_ENDPOINT` is used). When set (or when `OTEL_EXPORTER_OTLP_ENDPOINT` is set), the CLI also sets a TracerProvider with OTLP HTTP span exporter. + +## Examples + +Disable OpenTelemetry (e.g. in tests or when using another instrumentation): + +```bash +export OTEL_SDK_DISABLED=true +genesis consumer ... +``` + +Set a custom service name and environment: + +```bash +export OTEL_SERVICE_NAME=my-call-center +export OTEL_RESOURCE_ATTRIBUTES=deployment.environment=production +genesis outbound ... +``` + +Send metrics and traces to an OTLP collector over HTTP: + +```bash +export OTEL_EXPORTER_OTLP_ENDPOINT=http://localhost:4318 +genesis consumer ... +``` diff --git a/docs/content/docs/Tools/_index.md b/docs/content/docs/Tools/_index.md index 15e28fd..d46b2c5 100644 --- a/docs/content/docs/Tools/_index.md +++ b/docs/content/docs/Tools/_index.md @@ -6,7 +6,9 @@ weight: 50 Useful utilities and patterns to streamline your development with Genesis. {{< cards cols="1" >}} + {{< card link="uvloop/" title="uvloop" icon="lightning-bolt" subtitle="Optional fast asyncio event loop on Unix; install with genesis[uvloop]." >}} {{< card link="filtrate/" title="Filtrate" icon="code" subtitle="Filter events based on key-value pairs using decorators." >}} {{< card link="channel/" title="Channel Abstraction" icon="phone" subtitle="Create and manage outbound channels for call origination and bridging." >}} + {{< card link="queue/" title="Queue" icon="view-list" subtitle="FIFO queue with concurrency limit; slot and semaphore API; in-memory or Redis backend." >}} {{< card link="ring-group/" title="Ring Group" icon="users" subtitle="Call multiple destinations simultaneously or sequentially, connect to first answer." >}} {{< /cards >}} diff --git a/docs/content/docs/Tools/queue/_index.md b/docs/content/docs/Tools/queue/_index.md new file mode 100644 index 0000000..45394e5 --- /dev/null +++ b/docs/content/docs/Tools/queue/_index.md @@ -0,0 +1,171 @@ +--- +title: Queue +weight: 40 +--- + +The queue abstraction lets you limit concurrency per logical queue and process callers in FIFO order. It uses a **context manager** and **semaphore-like** API: you enter a slot, do your work, and release on exit. + +## Overview + +Use the queue when you want to: + +- Enqueue calls and process them one (or N) at a time per queue +- Control how many calls are "inside" a given flow at once (e.g. one at a time for a ring group) +- Share queue state across app instances via Redis when scaling out + +The queue is **generic**: it does not know about ring groups or FreeSWITCH. You use it to acquire a "slot" (FIFO + concurrency limit); what you do inside the slot (ring group, IVR, etc.) is up to you. + +## Flow + +With `max_concurrent=1`, only one caller holds the slot at a time. Others wait in FIFO order and acquire when the slot is released: + +```mermaid +sequenceDiagram + box Callers + participant A as Caller A + participant B as Caller B + participant C as Caller C + end + box Queue + participant Q as Queue + end + + A->>Q: enqueue + Q-->>A: slot acquired + A->>A: work (ring, bridge…) + + B->>Q: enqueue + C->>Q: enqueue + Note over Q: B and C waiting (FIFO) + + A->>Q: release + Q-->>B: slot acquired + B->>B: work + B->>Q: release + Q-->>C: slot acquired + C->>C: work + C->>Q: release +``` + +- **Enqueue**: On `slot(...)` enter, you join the queue (FIFO). You block until you are at the head and a slot is free. +- **Slot acquired**: You run your code (e.g. ring group, bridge). With `max_concurrent=1`, only one caller is in this phase at a time. +- **Release**: On exit, you free the slot; the next caller in line acquires it. + +With `max_concurrent=2`, two callers can hold a slot at once; the rest wait in line. + +## Basic Example + +```python +import asyncio +from genesis import Outbound, Session, Queue, RingGroup, RingMode + +queue = Queue() # in-memory by default + +async def handler(session: Session): + # Only one call at a time in "sales" queue; others wait in line + async with queue.slot("sales"): + answered = await RingGroup.ring( + session, + ["user/1001", "user/1002"], + RingMode.PARALLEL, + timeout=30.0, + ) + if answered: + await session.channel.bridge(answered) + +app = Outbound(handler, "127.0.0.1", 9000) +asyncio.run(app.start()) +``` + +## API + +### `queue.slot()` + +Use `async with queue.slot(...)` to acquire a slot and release it when the block ends. + +- **Enter**: enqueues this call, then blocks until it is at the head of the queue and a slot is free +- **Exit**: releases the slot so the next caller can proceed + +```python +async with queue.slot("sales"): + # do work + pass + +# With explicit item_id (e.g. for Redis / tracing) +async with queue.slot("sales", item_id=session.uuid): + pass + +# Allow 2 concurrent +async with queue.slot("support", max_concurrent=2): + pass + +# Optional timeout: raise QueueTimeoutError if not acquired in 30s +try: + async with queue.slot("sales", timeout=30.0): + # do work + pass +except QueueTimeoutError: + # Caller waited too long; item was removed from queue + pass +``` + +### `queue.semaphore()` + +Returns a reusable object you can use as a context manager. Same semantics as a slot, but you can keep a reference and use it in several places. + +```python +sem = queue.semaphore("support", max_concurrent=2) + +async with sem: + # do work + pass + +# Optional: pass item_id when used as callable +async with sem(item_id=session.uuid): + pass +``` + +For in-memory and Redis, see [Backends]({{< relref "backends.md" >}}). + +## Parameters + +**`queue.slot(queue_id, *, item_id=None, max_concurrent=1, timeout=None)`** + +- `queue_id`: Logical queue name (e.g. `"sales"`, `"support"`) +- `item_id`: Optional identifier for this entry (e.g. `session.uuid`). If omitted, a UUID is generated. Useful with Redis for tracing +- `max_concurrent`: Maximum number of callers allowed inside this queue at once. First use for a given `queue_id` sets this for that queue in the backend +- `timeout`: Optional seconds to wait for a slot. If the wait exceeds this, the item is removed from the queue and :exc:`genesis.exceptions.QueueTimeoutError` is raised + +**`queue.semaphore(queue_id, max_concurrent=1, timeout=None)`** + +- `queue_id`: Name of the queue +- `max_concurrent`: Max concurrent slots when using this semaphore +- `timeout`: Optional seconds per acquire + +## Timeout + +You can pass **`timeout`** (seconds) to `queue.slot()` or `queue.semaphore()` so that if the caller does not get a slot within that time, the wait is aborted instead of blocking indefinitely. + +- **Behavior**
The timeout covers both (1) waiting for your turn in the FIFO queue and (2) waiting for a free concurrency slot. If the time is exceeded, your entry is removed from the queue and the next caller can proceed. +- **Exception**
:exc:`genesis.exceptions.QueueTimeoutError` is raised. Handle it to e.g. play a message and hang up. +- **Use case**: Avoid callers waiting forever when the queue is congested; after a limit (e.g. 60 seconds), you can play "all agents are busy" and disconnect. + +## Use Cases + +- **Ring group with one call at a time**: use a single slot per queue so only one caller is "in" the ring group at once; others wait in line +- **Bounded concurrency**: use `max_concurrent > 1` to allow N calls in the same flow (e.g. support pool) +- **Scaling**: use `RedisBackend` so several app instances share the same queue and respect global order and limits + +## Observability + +The queue reports: + +- **Metrics**: `genesis.queue.operations` (acquire/release counts), `genesis.queue.wait_duration` (time waiting for a slot) +- **Tracing**: spans for `queue.wait_and_acquire` with attributes `queue.id` and `queue.item_id` + +## Related + +- [Backends]({{< relref "backends.md" >}}) - In-memory and Redis +- [Ring Group]({{< relref "../ring-group/_index.md" >}}) - Often used inside a queue slot to ring agents +- [Outbound Socket]({{< relref "../../Quickstart/outbound.md" >}}) - Typical place to use the queue in the session handler +- [Queue Example]({{< relref "../../Examples/queue.md" >}}) - Full runnable example diff --git a/docs/content/docs/Tools/queue/backends.md b/docs/content/docs/Tools/queue/backends.md new file mode 100644 index 0000000..7f558fb --- /dev/null +++ b/docs/content/docs/Tools/queue/backends.md @@ -0,0 +1,78 @@ +--- +title: Backends +weight: 41 +--- + +Backends store queue state (FIFO order and concurrency). Choose the backend based on whether you run a single instance or multiple instances of your application. + +## Single Instance + +If you run a single process, use the default in-memory backend: + +```python +from genesis import Queue + +queue = Queue() # InMemoryBackend by default +``` + +- State lives in process memory +- No extra dependencies +- Omit the backend for simplicity + +## Multiple Instances + +If you run multiple instances (horizontal scaling), pass `RedisBackend` so all instances share the same queue state: + +```python +from genesis import Queue +from genesis.queue import RedisBackend + +queue = Queue(RedisBackend(url="redis://localhost:6379")) + +async with queue.slot("sales", item_id=session.uuid): + # ... +``` + +- State lives in Redis (list + counter per queue, pub/sub to wake waiters) +- Each instance enqueues its own call and waits until it is that call's turn; the **process that holds the ESL session** must be the one that runs the handler. Redis only stores order and concurrency. +- Optional **timeout** on `queue.slot()` / `queue.semaphore()` is supported by both backends; when it expires, the item is removed and :exc:`genesis.exceptions.QueueTimeoutError` is raised. + +## Custom Redis Key Prefix + +To avoid key collisions in Redis, set a custom prefix: + +```python +backend = RedisBackend( + url="redis://localhost:6379", + key_prefix="myapp:queue:" +) +queue = Queue(backend) +``` + +## Parameters + +**`Queue(backend=None)`** + +- `backend`: Backend to use (FIFO + concurrency state). Default: `InMemoryBackend`, so `Queue()` is enough for single-process use. + +**`InMemoryBackend()`** + +- No arguments + +**`RedisBackend(url="redis://localhost:6379", key_prefix="genesis:queue:")`** + +- `url`: Redis connection URL +- `key_prefix`: Prefix for Redis keys (default: `"genesis:queue:"`) + +## Best Practices + +1. Create the backend once and reuse the same `Queue` instance (e.g. at app startup) +2. Use `InMemoryBackend` for single-instance deployments, `RedisBackend` when running multiple instances +3. With Redis, pass `item_id=session.uuid` (or similar) when acquiring a slot so you can correlate metrics and traces across instances +4. If Redis becomes unavailable, `RedisBackend` will raise; ensure your application handles these errors + +## Related + +- [Queue]({{< relref "_index.md" >}}) - API and usage +- [Ring Group]({{< relref "../ring-group/_index.md" >}}) - Often used inside a queue slot +- [Observability / Metrics]({{< relref "../../Observability/metrics.md" >}}) - Queue metrics diff --git a/docs/content/docs/Tools/uvloop.md b/docs/content/docs/Tools/uvloop.md new file mode 100644 index 0000000..f7af6fc --- /dev/null +++ b/docs/content/docs/Tools/uvloop.md @@ -0,0 +1,29 @@ +--- +title: uvloop +weight: 15 +--- + +[uvloop](https://github.com/MagicStack/uvloop) is a fast, drop-in replacement for the default asyncio event loop, built on libuv. + +On **Unix** (Linux and macOS), using uvloop can improve asyncio performance. + +{{< callout type="warning" >}} +uvloop is **not supported on Windows**. +{{< /callout >}} + +## Installation + +Install Genesis with the uvloop extra: + +```bash +pip install genesis[uvloop] +``` + +## Usage + +When the extra is installed, the Genesis CLI uses uvloop automatically. + +## See also + +- [uvloop on GitHub](https://github.com/MagicStack/uvloop) +- [Installation Guide]({{< relref "../installation.md" >}}) — base Genesis installation diff --git a/examples/queue.py b/examples/queue.py new file mode 100644 index 0000000..540b6da --- /dev/null +++ b/examples/queue.py @@ -0,0 +1,45 @@ +""" +Queue example. + +One extension calls another via the app: the caller is held (music or message) +until a queue slot is free, then we bridge them to the callee. Only one +bridge at a time so you keep control (e.g. one agent per queue). +""" + +import asyncio +import os + +from genesis import Outbound, Session, Queue, Channel +from genesis.types import ChannelState + +HOST = os.getenv("HOST", "0.0.0.0") +PORT = int(os.getenv("PORT", "9696")) +CALLEE = "user/1001" +# Sound to play while caller waits. Default: music on hold (project Docker has MOH). +# Alternative: ivr/8000/ivr-one_moment_please.wav (Callie voice) +HOLD_SOUND = os.getenv("HOLD_SOUND", "local_stream://moh") + +queue = Queue() # in-memory by default + + +async def handler(session: Session) -> None: + if session.channel is None: + return + await session.channel.answer() + + # Start hold sound (block=False so it plays while we wait for a slot) + await session.channel.playback(HOLD_SOUND, block=False) + + async with queue.slot("support"): + callee = await Channel.create(session, CALLEE) + await callee.wait(ChannelState.EXECUTE, timeout=30.0) + await session.channel.bridge(callee) + + +async def main() -> None: + server = Outbound(handler=handler, host=HOST, port=PORT) + await server.start() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/genesis/__init__.py b/genesis/__init__.py index f14b6ce..c1585d8 100644 --- a/genesis/__init__.py +++ b/genesis/__init__.py @@ -6,12 +6,22 @@ from .protocol.parser import ESLEvent from .inbound import Inbound from .channel import Channel +from .exceptions import QueueTimeoutError from .group import ( RingGroup, RingMode, InMemoryLoadBalancer, RedisLoadBalancer, ) +from .queue import ( + Queue, + QueueBackend, + QueueSemaphore, + QueueSlot, + InMemoryBackend, + RedisBackend, +) +from .loop import use_uvloop __all__ = [ "Inbound", @@ -25,5 +35,13 @@ "RingMode", "InMemoryLoadBalancer", "RedisLoadBalancer", + "Queue", + "QueueBackend", + "QueueSemaphore", + "QueueSlot", + "QueueTimeoutError", + "InMemoryBackend", + "RedisBackend", + "use_uvloop", ] __version__ = importlib.metadata.version("genesis") diff --git a/genesis/cli/__init__.py b/genesis/cli/__init__.py index c4eeaaa..3de27f6 100644 --- a/genesis/cli/__init__.py +++ b/genesis/cli/__init__.py @@ -11,15 +11,24 @@ import typer from rich import print -from opentelemetry import metrics +from opentelemetry import metrics, trace from opentelemetry.sdk.metrics import MeterProvider -from opentelemetry.sdk.resources import Resource +from opentelemetry.sdk.metrics.export import PeriodicExportingMetricReader +from opentelemetry.sdk.trace import TracerProvider +from opentelemetry.sdk.trace.export import BatchSpanProcessor from opentelemetry.exporter.prometheus import PrometheusMetricReader -from prometheus_client import start_http_server +from opentelemetry.exporter.otlp.proto.http.metric_exporter import OTLPMetricExporter +from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter from genesis.cli.consumer import consumer from genesis.cli.outbound import outbound from genesis.observability import reconfigure_logger, logger +from genesis.observability.otel_config import ( + create_resource, + get_otel_exporter_otlp_metrics_endpoint, + get_otel_exporter_otlp_traces_endpoint, + is_otel_sdk_disabled, +) app = typer.Typer(rich_markup_mode="rich") @@ -49,14 +58,26 @@ def callback( reconfigure_logger(json) try: - # Setup OpenTelemetry - metric_reader = PrometheusMetricReader() - provider = MeterProvider( - resource=Resource.create({"service.name": "genesis"}), - metric_readers=[metric_reader], - ) - metrics.set_meter_provider(provider) - + # Setup OpenTelemetry (honors OTEL_SDK_DISABLED, OTEL_* env vars) + if not is_otel_sdk_disabled(): + resource = create_resource() + metric_readers: list = [PrometheusMetricReader()] + if get_otel_exporter_otlp_metrics_endpoint(): + metric_readers.append( + PeriodicExportingMetricReader( + OTLPMetricExporter(), + export_interval_millis=60_000, + ) + ) + metrics.set_meter_provider( + MeterProvider(resource=resource, metric_readers=metric_readers) + ) + if get_otel_exporter_otlp_traces_endpoint(): + tracer_provider = TracerProvider(resource=resource) + tracer_provider.add_span_processor( + BatchSpanProcessor(OTLPSpanExporter()) + ) + trace.set_tracer_provider(tracer_provider) except Exception as e: logger.warning(f"Failed to setup OpenTelemetry: {e}") diff --git a/genesis/cli/consumer.py b/genesis/cli/consumer.py index b49736d..45bab4b 100644 --- a/genesis/cli/consumer.py +++ b/genesis/cli/consumer.py @@ -7,6 +7,7 @@ import typer from genesis.cli import watcher +from genesis.loop import use_uvloop from genesis.observability import logger from genesis.consumer import Consumer from genesis.cli.exceptions import CLIExcpetion @@ -79,6 +80,8 @@ def _run( levels = get_log_level_map() logger.setLevel(levels.get(loglevel.upper(), logging.INFO)) + use_uvloop() + if reload: asyncio.run(_run_with_reload(consumer_app, path)) else: diff --git a/genesis/cli/outbound.py b/genesis/cli/outbound.py index 3d6fc46..9e47c87 100644 --- a/genesis/cli/outbound.py +++ b/genesis/cli/outbound.py @@ -7,6 +7,7 @@ import typer from genesis.cli import watcher +from genesis.loop import use_uvloop from genesis.observability import logger from genesis.outbound import Outbound from genesis.cli.exceptions import CLIExcpetion @@ -174,6 +175,8 @@ def _run( levels = get_log_level_map() logger.setLevel(levels.get(loglevel.upper(), logging.INFO)) + use_uvloop() + if reload: asyncio.run(_run_with_reload(outbound_app, path)) else: diff --git a/genesis/exceptions.py b/genesis/exceptions.py index 2d35050..2e6a8ef 100644 --- a/genesis/exceptions.py +++ b/genesis/exceptions.py @@ -50,3 +50,9 @@ class TimeoutError(GenesisError): """Occurs when an operation times out (e.g., waiting for an event).""" ... + + +class QueueTimeoutError(TimeoutError): + """Occurs when waiting for a queue slot exceeds the optional timeout.""" + + ... diff --git a/genesis/group/load_balancer.py b/genesis/group/load_balancer.py index e407c50..08fecb7 100644 --- a/genesis/group/load_balancer.py +++ b/genesis/group/load_balancer.py @@ -7,58 +7,15 @@ from __future__ import annotations -import subprocess -import sys -from typing import TYPE_CHECKING, Protocol, List, Optional, Any, Awaitable +from typing import Any, List, Optional, Protocol from abc import ABC, abstractmethod -if TYPE_CHECKING: - try: - import redis.asyncio as redis - except ImportError: - redis = None -else: - try: - import redis.asyncio as redis - except ImportError: - redis = None +import redis.asyncio as redis async def _create_redis_client(url: str = "redis://localhost:6379") -> Any: - """ - Create a Redis async client, installing redis package if needed. - - Internal helper function. - - Args: - url: Redis connection URL (default: "redis://localhost:6379") - - Returns: - Redis async client instance - - Raises: - RuntimeError: If redis package cannot be installed or imported - """ - # Import here to handle optional dependency - try: - import redis.asyncio as redis_module - except ImportError: - # Try to install redis automatically - try: - subprocess.check_call( - [sys.executable, "-m", "pip", "install", "redis>=5.0.0"], - stdout=subprocess.DEVNULL, - stderr=subprocess.DEVNULL, - ) - # Re-import after installation - import redis.asyncio as redis_module - except (subprocess.CalledProcessError, ImportError): - raise RuntimeError( - "Redis package is required for RedisLoadBalancer. " - "Install it with: pip install redis" - ) - - return await redis_module.from_url(url) + """Create a Redis async client.""" + return await redis.from_url(url) class LoadBalancerBackend(Protocol): @@ -165,7 +122,6 @@ class RedisLoadBalancer: Redis-based load balancer backend. Tracks call counts in Redis. Suitable for horizontal scaling. - The redis package is automatically installed when needed. Args: url: Redis connection URL (default: "redis://localhost:6379") diff --git a/genesis/loop.py b/genesis/loop.py new file mode 100644 index 0000000..6580b05 --- /dev/null +++ b/genesis/loop.py @@ -0,0 +1,38 @@ +""" +Event loop utilities. + +Provides optional uvloop integration for improved asyncio performance on Unix. +""" + +import asyncio + + +def use_uvloop() -> bool: + """ + Set the current event loop policy to use uvloop, when available. + + uvloop is a fast, drop-in replacement for the default asyncio event loop, + built on libuv. It is only supported on Unix (Linux and macOS); on Windows + or when uvloop is not installed, this function does nothing. + + Call this once at application startup, before creating any event loop + (e.g. before asyncio.run()). + + Returns: + True if uvloop was successfully installed as the event loop policy, + False otherwise (uvloop not installed or not supported on this platform). + + Example: + >>> from genesis import use_uvloop + >>> use_uvloop() + True + >>> import asyncio + >>> asyncio.run(my_main()) + """ + try: + import uvloop + + asyncio.set_event_loop_policy(uvloop.EventLoopPolicy()) + return True + except (ImportError, OSError, AttributeError): + return False diff --git a/genesis/observability/__init__.py b/genesis/observability/__init__.py index 00d9703..5ee4fd8 100644 --- a/genesis/observability/__init__.py +++ b/genesis/observability/__init__.py @@ -7,6 +7,15 @@ JSONFormatter, get_log_level, ) +from .otel_config import ( + create_resource, + get_otel_exporter_otlp_endpoint, + get_otel_exporter_otlp_metrics_endpoint, + get_otel_exporter_otlp_traces_endpoint, + get_otel_resource_attributes, + get_otel_service_name, + is_otel_sdk_disabled, +) from .server import Observability, AppType, observability __all__ = [ @@ -20,4 +29,11 @@ "Observability", "AppType", "observability", + "create_resource", + "get_otel_exporter_otlp_endpoint", + "get_otel_exporter_otlp_metrics_endpoint", + "get_otel_exporter_otlp_traces_endpoint", + "get_otel_resource_attributes", + "get_otel_service_name", + "is_otel_sdk_disabled", ] diff --git a/genesis/observability/otel_config.py b/genesis/observability/otel_config.py new file mode 100644 index 0000000..07459fa --- /dev/null +++ b/genesis/observability/otel_config.py @@ -0,0 +1,118 @@ +""" +OpenTelemetry configuration via environment variables. +------------------------------------------------------ + +Supports the standard OTEL environment variables for configuring +tracing and metrics in Genesis. +""" + +import os +from typing import Dict, Optional + +from opentelemetry.sdk.resources import Resource + + +def _parse_boolean(value: str) -> bool: + """Parse OTEL boolean env: only 'true' (case-insensitive) is True.""" + return value.strip().lower() == "true" + + +def _parse_resource_attributes(value: str) -> Dict[str, str]: + """ + Parse OTEL_RESOURCE_ATTRIBUTES string into a dict. + + Format: key1=value1,key2=value2 + Values may contain equals signs; only the first '=' splits key and value. + """ + attrs: Dict[str, str] = {} + for item in value.split(","): + item = item.strip() + if not item: + continue + idx = item.find("=") + if idx < 0: + continue + key = item[:idx].strip() + val = item[idx + 1 :].strip() + if key: + attrs[key] = val + return attrs + + +def is_otel_sdk_disabled() -> bool: + """ + Return whether the OpenTelemetry SDK is disabled via environment. + + Reads OTEL_SDK_DISABLED. Only the case-insensitive value "true" + disables the SDK (per OpenTelemetry spec). + """ + raw = os.getenv("OTEL_SDK_DISABLED", "").strip() + if not raw: + return False + return _parse_boolean(raw) + + +def get_otel_service_name() -> str: + """ + Return the service name for the OTEL resource. + + Reads OTEL_SERVICE_NAME. Defaults to "genesis" when unset. + """ + return os.getenv("OTEL_SERVICE_NAME", "genesis").strip() or "genesis" + + +def get_otel_resource_attributes() -> Dict[str, str]: + """ + Return resource attributes from OTEL_RESOURCE_ATTRIBUTES. + + Format: key1=value1,key2=value2. OTEL_SERVICE_NAME is applied + separately and takes precedence over service.name here. + """ + raw = os.getenv("OTEL_RESOURCE_ATTRIBUTES", "").strip() + if not raw: + return {} + return _parse_resource_attributes(raw) + + +def get_otel_exporter_otlp_endpoint() -> Optional[str]: + """ + Return the OTLP exporter endpoint for HTTP (all signals). + + Reads OTEL_EXPORTER_OTLP_ENDPOINT. When set, metrics (and traces if + configured) can be sent to this endpoint via OTLP/HTTP. + Default per spec for HTTP is http://localhost:4318. + """ + raw = os.getenv("OTEL_EXPORTER_OTLP_ENDPOINT", "").strip() + return raw if raw else None + + +def get_otel_exporter_otlp_metrics_endpoint() -> Optional[str]: + """ + Return the OTLP metrics exporter endpoint (overrides OTEL_EXPORTER_OTLP_ENDPOINT for metrics). + """ + raw = os.getenv("OTEL_EXPORTER_OTLP_METRICS_ENDPOINT", "").strip() + return raw if raw else get_otel_exporter_otlp_endpoint() + + +def get_otel_exporter_otlp_traces_endpoint() -> Optional[str]: + """ + Return the OTLP traces exporter endpoint (overrides OTEL_EXPORTER_OTLP_ENDPOINT for traces). + """ + raw = os.getenv("OTEL_EXPORTER_OTLP_TRACES_ENDPOINT", "").strip() + return raw if raw else get_otel_exporter_otlp_endpoint() + + +def create_resource() -> Resource: + """ + Create an OpenTelemetry Resource from OTEL environment variables. + + Uses: + - OTEL_SERVICE_NAME for service.name (default: "genesis") + - OTEL_RESOURCE_ATTRIBUTES for additional key=value pairs + + OTEL_SERVICE_NAME takes precedence over service.name in + OTEL_RESOURCE_ATTRIBUTES (per OpenTelemetry spec). + """ + attrs: Dict[str, str] = dict(get_otel_resource_attributes()) + attrs["service.name"] = get_otel_service_name() + return Resource.create(attrs) diff --git a/genesis/queue/__init__.py b/genesis/queue/__init__.py new file mode 100644 index 0000000..9869b3e --- /dev/null +++ b/genesis/queue/__init__.py @@ -0,0 +1,22 @@ +""" +Genesis queue +------------- + +FIFO queue with concurrency limit per queue_id. Context-manager and +semaphore-like API; backends: in-memory (single process) or Redis (scalable). +""" + +from genesis.exceptions import QueueTimeoutError +from genesis.queue.backends import InMemoryBackend, QueueBackend +from genesis.queue.core import Queue, QueueSemaphore, QueueSlot +from genesis.queue.redis_backend import RedisBackend + +__all__ = [ + "Queue", + "QueueBackend", + "QueueSemaphore", + "QueueSlot", + "QueueTimeoutError", + "InMemoryBackend", + "RedisBackend", +] diff --git a/genesis/queue/backends.py b/genesis/queue/backends.py new file mode 100644 index 0000000..b0b52ae --- /dev/null +++ b/genesis/queue/backends.py @@ -0,0 +1,151 @@ +""" +Genesis queue backends +---------------------- + +Backend protocol and in-memory implementation for the queue abstraction. +""" + +from __future__ import annotations + +import asyncio +import time +from collections import deque +from typing import Optional, Protocol, runtime_checkable + +from genesis.exceptions import QueueTimeoutError + + +class _QueueState: + """Per-queue state: FIFO deque, lock, condition; semaphore created on first acquire.""" + + __slots__ = ("deque", "lock", "condition", "semaphore") + + def __init__(self) -> None: + self.deque: deque[str] = deque() + self.lock = asyncio.Lock() + self.condition = asyncio.Condition(self.lock) + self.semaphore: asyncio.Semaphore | None = None + + +@runtime_checkable +class QueueBackend(Protocol): + """ + Protocol for queue backends. + + Implementations provide FIFO-ordered, concurrency-limited slots per queue_id. + """ + + async def enqueue(self, queue_id: str, item_id: str) -> None: + """Add item_id to the tail of the queue.""" + ... + + async def wait_and_acquire( + self, + queue_id: str, + item_id: str, + max_concurrent: int, + timeout: Optional[float] = None, + ) -> None: + """ + Block until this item is at the head of the queue and a slot is free, + then consume the head and hold one slot. + + If ``timeout`` is set (seconds) and expires before acquiring, + remove this item from the queue and raise :exc:`QueueTimeoutError`. + """ + ... + + async def release(self, queue_id: str) -> None: + """Release one slot for the queue.""" + ... + + +class InMemoryBackend: + """ + In-memory queue backend. + + Uses a deque and a semaphore per queue_id. Suitable for single-process use. + """ + + def __init__(self) -> None: + """Initialize in-memory backend.""" + self._states: dict[str, _QueueState] = {} + + def _get_or_create_state(self, queue_id: str) -> _QueueState: + """Get or create queue state (deque, lock, condition). Semaphore set in wait_and_acquire.""" + if queue_id not in self._states: + self._states[queue_id] = _QueueState() + return self._states[queue_id] + + async def enqueue(self, queue_id: str, item_id: str) -> None: + """Add item_id to the tail of the queue.""" + state = self._get_or_create_state(queue_id) + async with state.lock: + state.deque.append(item_id) + state.condition.notify_all() + + async def wait_and_acquire( + self, + queue_id: str, + item_id: str, + max_concurrent: int, + timeout: Optional[float] = None, + ) -> None: + """ + Block until this item is at the head and a slot is free, then pop head and acquire. + First call for a queue_id sets max_concurrent for that queue. + If timeout (seconds) expires, remove item from queue and raise QueueTimeoutError. + """ + state = self._get_or_create_state(queue_id) + if state.semaphore is None: + state.semaphore = asyncio.Semaphore(max_concurrent) + deadline = time.monotonic() + timeout if timeout is not None else None + async with state.lock: + while True: + if state.deque and state.deque[0] == item_id: + state.deque.popleft() + state.condition.notify_all() + break + remaining = None + if deadline is not None: + remaining = deadline - time.monotonic() + if remaining <= 0: + try: + state.deque.remove(item_id) + except ValueError: + pass + state.condition.notify_all() + raise QueueTimeoutError() + try: + if remaining is not None: + await asyncio.wait_for( + state.condition.wait(), timeout=remaining + ) + else: + await state.condition.wait() + except asyncio.TimeoutError: + try: + state.deque.remove(item_id) + except ValueError: + pass + state.condition.notify_all() + raise QueueTimeoutError() + if deadline is not None: + remaining = deadline - time.monotonic() + if remaining <= 0: + raise QueueTimeoutError() + try: + await asyncio.wait_for(state.semaphore.acquire(), timeout=remaining) + except asyncio.TimeoutError: + raise QueueTimeoutError() + else: + await state.semaphore.acquire() + + async def release(self, queue_id: str) -> None: + """Release one slot for the queue.""" + if queue_id in self._states: + state = self._states[queue_id] + if state.semaphore is not None: + state.semaphore.release() + async with state.lock: + state.condition.notify_all() diff --git a/genesis/queue/core.py b/genesis/queue/core.py new file mode 100644 index 0000000..e2665a0 --- /dev/null +++ b/genesis/queue/core.py @@ -0,0 +1,214 @@ +""" +Genesis queue core +------------------ + +Queue abstraction with context-manager and semaphore-like API. +""" + +from __future__ import annotations + +import time +from contextlib import asynccontextmanager +from typing import Optional +from uuid import uuid4 + +from opentelemetry import metrics, trace + +from genesis.queue.backends import InMemoryBackend, QueueBackend + +tracer = trace.get_tracer(__name__) +meter = metrics.get_meter(__name__) + +queue_operations_counter = meter.create_counter( + "genesis.queue.operations", + description="Queue slot acquire/release operations", + unit="1", +) +queue_wait_duration = meter.create_histogram( + "genesis.queue.wait_duration", + description="Time spent waiting for a slot", + unit="s", +) + + +class QueueSlot: + """ + Async context manager for a single slot acquisition. + + Use via ``async with queue.slot(queue_id):``. On enter, enqueues and blocks + until this item is at the head and a slot is free; on exit, releases the slot. + Optional ``timeout`` (seconds): raise :exc:`~genesis.exceptions.QueueTimeoutError` if not acquired in time. + """ + + __slots__ = ( + "_queue", + "_queue_id", + "_item_id", + "_max_concurrent", + "_timeout", + "_acquired", + "_released", + ) + + def __init__( + self, + queue: "Queue", + queue_id: str, + *, + item_id: Optional[str] = None, + max_concurrent: int = 1, + timeout: Optional[float] = None, + ) -> None: + self._queue = queue + self._queue_id = queue_id + self._item_id = item_id or str(uuid4()) + self._max_concurrent = max_concurrent + self._timeout = timeout + self._acquired = False + self._released = False + + async def __aenter__(self) -> "QueueSlot": + await self._queue._enqueue(self._queue_id, self._item_id) + start = time.monotonic() + with tracer.start_as_current_span( + "queue.wait_and_acquire", + attributes={ + "queue.id": self._queue_id, + "queue.item_id": self._item_id, + }, + ): + await self._queue._backend.wait_and_acquire( + self._queue_id, + self._item_id, + self._max_concurrent, + timeout=self._timeout, + ) + self._acquired = True + elapsed = time.monotonic() - start + queue_wait_duration.record(elapsed, attributes={"queue.id": self._queue_id}) + queue_operations_counter.add( + 1, attributes={"queue.id": self._queue_id, "op": "acquire"} + ) + return self + + async def __aexit__(self, *args: object) -> None: + if self._acquired and not self._released: + self._released = True + await self._queue._release(self._queue_id) + queue_operations_counter.add( + 1, attributes={"queue.id": self._queue_id, "op": "release"} + ) + + +class QueueSemaphore: + """ + Semaphore-like handle for a queue: reusable context manager for the same queue_id. + + Use via ``async with queue.semaphore(queue_id):`` or store and reuse: + ``sem = queue.semaphore("sales", max_concurrent=2); async with sem: ...`` + Optional ``timeout`` (seconds) applies to each acquire. + """ + + __slots__ = ("_queue", "_queue_id", "_max_concurrent", "_timeout", "_slot") + + def __init__( + self, + queue: "Queue", + queue_id: str, + max_concurrent: int = 1, + timeout: Optional[float] = None, + ) -> None: + self._queue = queue + self._queue_id = queue_id + self._max_concurrent = max_concurrent + self._timeout = timeout + self._slot: Optional[QueueSlot] = None + + @asynccontextmanager + async def __call__(self, *, item_id: Optional[str] = None): + """Acquire a slot with optional item_id (e.g. session uuid).""" + slot = QueueSlot( + self._queue, + self._queue_id, + item_id=item_id, + max_concurrent=self._max_concurrent, + timeout=self._timeout, + ) + async with slot: + yield + + async def __aenter__(self) -> "QueueSemaphore": + self._slot = QueueSlot( + self._queue, + self._queue_id, + max_concurrent=self._max_concurrent, + timeout=self._timeout, + ) + await self._slot.__aenter__() + return self + + async def __aexit__(self, *args: object) -> None: + if self._slot is not None: + await self._slot.__aexit__(*args) + self._slot = None + + +class Queue: + """ + FIFO queue with concurrency limit per queue_id. + + Uses an in-memory backend by default; pass a backend for Redis or custom + storage. API is context-manager and semaphore-like: + ``async with queue.slot("sales"):`` or + ``sem = queue.semaphore("sales", max_concurrent=2); async with sem: ...`` + """ + + __slots__ = ("_backend",) + + def __init__(self, backend: Optional[QueueBackend] = None) -> None: + self._backend = backend if backend is not None else InMemoryBackend() + + def slot( + self, + queue_id: str, + *, + item_id: Optional[str] = None, + max_concurrent: int = 1, + timeout: Optional[float] = None, + ) -> QueueSlot: + """ + Return a context manager that acquires a slot in the given queue. + + On enter: enqueue (with optional item_id), then block until at head and + a slot is free. On exit: release the slot. + If ``timeout`` (seconds) is set and expires before acquiring, + the item is removed from the queue and :exc:`~genesis.exceptions.QueueTimeoutError` is raised. + """ + return QueueSlot( + self, + queue_id, + item_id=item_id, + max_concurrent=max_concurrent, + timeout=timeout, + ) + + def semaphore( + self, + queue_id: str, + max_concurrent: int = 1, + timeout: Optional[float] = None, + ) -> QueueSemaphore: + """ + Return a semaphore-like handle for the queue. Reusable for multiple + ``async with sem:`` calls with the same concurrency limit. + Optional ``timeout`` (seconds) applies to each acquire. + """ + return QueueSemaphore( + self, queue_id, max_concurrent=max_concurrent, timeout=timeout + ) + + async def _enqueue(self, queue_id: str, item_id: str) -> None: + await self._backend.enqueue(queue_id, item_id) + + async def _release(self, queue_id: str) -> None: + await self._backend.release(queue_id) diff --git a/genesis/queue/redis_backend.py b/genesis/queue/redis_backend.py new file mode 100644 index 0000000..c769c82 --- /dev/null +++ b/genesis/queue/redis_backend.py @@ -0,0 +1,160 @@ +""" +Genesis queue Redis backend +--------------------------- + +Redis-backed queue for multi-process / horizontal scaling. +""" + +from __future__ import annotations + +import asyncio +import time +from typing import Any, Optional + +import redis.asyncio as redis + +from genesis.exceptions import QueueTimeoutError +from genesis.queue.backends import QueueBackend + +# Lua: try to acquire if we're at head and slots available. Keys: waiting_list, in_use_key. Args: item_id, max_concurrent. +SCRIPT_ACQUIRE = """ +local head = redis.call('LINDEX', KEYS[1], 0) +if head == ARGV[1] then + local in_use = tonumber(redis.call('GET', KEYS[2]) or '0') + if in_use < tonumber(ARGV[2]) then + redis.call('LPOP', KEYS[1]) + redis.call('INCR', KEYS[2]) + return 1 + end +end +return 0 +""" + + +class RedisBackend: + """ + Redis-backed queue backend. + + Uses a list for FIFO order and a counter for in-use slots per queue_id. + Suitable for horizontal scaling (multiple app instances). + """ + + def __init__( + self, + url: str = "redis://localhost:6379", + key_prefix: str = "genesis:queue:", + ) -> None: + self._url = url + self._prefix = key_prefix + self._client: Any = None + + async def _get_client(self) -> Any: + if self._client is None: + self._client = await redis.from_url(self._url) + return self._client + + def _waiting_key(self, queue_id: str) -> str: + return f"{self._prefix}{queue_id}:waiting" + + def _in_use_key(self, queue_id: str) -> str: + return f"{self._prefix}{queue_id}:in_use" + + def _channel(self, queue_id: str) -> str: + return f"{self._prefix}{queue_id}:release" + + async def enqueue(self, queue_id: str, item_id: str) -> None: + """Add item_id to the tail of the queue.""" + client = await self._get_client() + key = self._waiting_key(queue_id) + await client.rpush(key, item_id) + + async def _try_acquire( + self, + client: Any, + waiting_key: str, + in_use_key: str, + item_id: str, + max_concurrent: int, + ) -> bool: + """Try to acquire a slot; returns True if acquired.""" + try: + script = client.register_script(SCRIPT_ACQUIRE) + got = await script( + keys=[waiting_key, in_use_key], + args=[item_id, str(max_concurrent)], + ) + return bool(got) + except AttributeError: + pass + head = await client.lindex(waiting_key, 0) + if head is not None: + head = head.decode("utf-8") if isinstance(head, bytes) else head + in_use = int(await client.get(in_use_key) or 0) + if head == item_id and in_use < max_concurrent: + await client.lpop(waiting_key) + await client.incr(in_use_key) + return True + return False + + async def _wait_for_release_signal(self, client: Any, channel: str) -> None: + """Block until a message is published on channel or timeout.""" + sub = client.pubsub() + await sub.subscribe(channel) + try: + async for msg in sub.listen(): + if msg.get("type") == "message": + return + await asyncio.sleep(0) + except asyncio.CancelledError: + raise + finally: + await sub.unsubscribe(channel) + await sub.close() + + async def wait_and_acquire( + self, + queue_id: str, + item_id: str, + max_concurrent: int, + timeout: Optional[float] = None, + ) -> None: + """ + Block until this item is at the head and a slot is free, then pop head and acquire. + If timeout (seconds) expires, remove item from waiting list and raise QueueTimeoutError. + """ + client = await self._get_client() + waiting_key = self._waiting_key(queue_id) + in_use_key = self._in_use_key(queue_id) + channel = self._channel(queue_id) + deadline = time.monotonic() + timeout if timeout is not None else None + + while True: + if await self._try_acquire( + client, waiting_key, in_use_key, item_id, max_concurrent + ): + return + wait_timeout = 1.0 + if deadline is not None: + remaining = deadline - time.monotonic() + if remaining <= 0: + await client.lrem(waiting_key, 1, item_id) + raise QueueTimeoutError() + wait_timeout = min(1.0, remaining) + try: + await asyncio.wait_for( + self._wait_for_release_signal(client, channel), + timeout=wait_timeout, + ) + except asyncio.TimeoutError: + if deadline is not None and time.monotonic() >= deadline: + await client.lrem(waiting_key, 1, item_id) + raise QueueTimeoutError() + pass + + async def release(self, queue_id: str) -> None: + """Release one slot for the queue.""" + client = await self._get_client() + in_use_key = self._in_use_key(queue_id) + channel = self._channel(queue_id) + await client.decr(in_use_key) + await client.publish(channel, "1") diff --git a/poetry.lock b/poetry.lock index 1e06e3c..dbba99d 100644 --- a/poetry.lock +++ b/poetry.lock @@ -48,10 +48,10 @@ trio = ["trio (>=0.31.0) ; python_version < \"3.10\"", "trio (>=0.32.0) ; python name = "async-timeout" version = "5.0.1" description = "Timeout context manager for asyncio programs" -optional = true +optional = false python-versions = ">=3.8" groups = ["main"] -markers = "extra == \"redis\" and python_full_version < \"3.11.3\"" +markers = "python_full_version < \"3.11.3\"" files = [ {file = "async_timeout-5.0.1-py3-none-any.whl", hash = "sha256:39e3809566ff85354557ec2398b55e096c8364bacac9405a7a1fa429e77fe76c"}, {file = "async_timeout-5.0.1.tar.gz", hash = "sha256:d9321a7a3d5a6a5e187e824d2fa0793ce379a202935782d555d6e9d2735677d3"}, @@ -110,12 +110,135 @@ version = "2026.1.4" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.7" -groups = ["dev"] +groups = ["main", "dev"] files = [ {file = "certifi-2026.1.4-py3-none-any.whl", hash = "sha256:9943707519e4add1115f44c2bc244f782c0249876bf51b6599fee1ffbedd685c"}, {file = "certifi-2026.1.4.tar.gz", hash = "sha256:ac726dd470482006e014ad384921ed6438c457018f4b3d204aea4281258b2120"}, ] +[[package]] +name = "charset-normalizer" +version = "3.4.4" +description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "charset_normalizer-3.4.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e824f1492727fa856dd6eda4f7cee25f8518a12f3c4a56a74e8095695089cf6d"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4bd5d4137d500351a30687c2d3971758aac9a19208fc110ccb9d7188fbe709e8"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:027f6de494925c0ab2a55eab46ae5129951638a49a34d87f4c3eda90f696b4ad"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f820802628d2694cb7e56db99213f930856014862f3fd943d290ea8438d07ca8"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:798d75d81754988d2565bff1b97ba5a44411867c0cf32b77a7e8f8d84796b10d"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9d1bb833febdff5c8927f922386db610b49db6e0d4f4ee29601d71e7c2694313"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:9cd98cdc06614a2f768d2b7286d66805f94c48cde050acdbbb7db2600ab3197e"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:077fbb858e903c73f6c9db43374fd213b0b6a778106bc7032446a8e8b5b38b93"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:244bfb999c71b35de57821b8ea746b24e863398194a4014e4c76adc2bbdfeff0"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:64b55f9dce520635f018f907ff1b0df1fdc31f2795a922fb49dd14fbcdf48c84"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:faa3a41b2b66b6e50f84ae4a68c64fcd0c44355741c6374813a800cd6695db9e"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:6515f3182dbe4ea06ced2d9e8666d97b46ef4c75e326b79bb624110f122551db"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:cc00f04ed596e9dc0da42ed17ac5e596c6ccba999ba6bd92b0e0aef2f170f2d6"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-win32.whl", hash = "sha256:f34be2938726fc13801220747472850852fe6b1ea75869a048d6f896838c896f"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-win_amd64.whl", hash = "sha256:a61900df84c667873b292c3de315a786dd8dac506704dea57bc957bd31e22c7d"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-win_arm64.whl", hash = "sha256:cead0978fc57397645f12578bfd2d5ea9138ea0fac82b2f63f7f7c6877986a69"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6e1fcf0720908f200cd21aa4e6750a48ff6ce4afe7ff5a79a90d5ed8a08296f8"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5f819d5fe9234f9f82d75bdfa9aef3a3d72c4d24a6e57aeaebba32a704553aa0"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:a59cb51917aa591b1c4e6a43c132f0cdc3c76dbad6155df4e28ee626cc77a0a3"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:8ef3c867360f88ac904fd3f5e1f902f13307af9052646963ee08ff4f131adafc"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d9e45d7faa48ee908174d8fe84854479ef838fc6a705c9315372eacbc2f02897"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:840c25fb618a231545cbab0564a799f101b63b9901f2569faecd6b222ac72381"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:ca5862d5b3928c4940729dacc329aa9102900382fea192fc5e52eb69d6093815"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d9c7f57c3d666a53421049053eaacdd14bbd0a528e2186fcb2e672effd053bb0"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:277e970e750505ed74c832b4bf75dac7476262ee2a013f5574dd49075879e161"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:31fd66405eaf47bb62e8cd575dc621c56c668f27d46a61d975a249930dd5e2a4"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:0d3d8f15c07f86e9ff82319b3d9ef6f4bf907608f53fe9d92b28ea9ae3d1fd89"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:9f7fcd74d410a36883701fafa2482a6af2ff5ba96b9a620e9e0721e28ead5569"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ebf3e58c7ec8a8bed6d66a75d7fb37b55e5015b03ceae72a8e7c74495551e224"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-win32.whl", hash = "sha256:eecbc200c7fd5ddb9a7f16c7decb07b566c29fa2161a16cf67b8d068bd21690a"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-win_amd64.whl", hash = "sha256:5ae497466c7901d54b639cf42d5b8c1b6a4fead55215500d2f486d34db48d016"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-win_arm64.whl", hash = "sha256:65e2befcd84bc6f37095f5961e68a6f077bf44946771354a28ad434c2cce0ae1"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0a98e6759f854bd25a58a73fa88833fba3b7c491169f86ce1180c948ab3fd394"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b5b290ccc2a263e8d185130284f8501e3e36c5e02750fc6b6bdeb2e9e96f1e25"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:74bb723680f9f7a6234dcf67aea57e708ec1fbdf5699fb91dfd6f511b0a320ef"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f1e34719c6ed0b92f418c7c780480b26b5d9c50349e9a9af7d76bf757530350d"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2437418e20515acec67d86e12bf70056a33abdacb5cb1655042f6538d6b085a8"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:11d694519d7f29d6cd09f6ac70028dba10f92f6cdd059096db198c283794ac86"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:ac1c4a689edcc530fc9d9aa11f5774b9e2f33f9a0c6a57864e90908f5208d30a"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:21d142cc6c0ec30d2efee5068ca36c128a30b0f2c53c1c07bd78cb6bc1d3be5f"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:5dbe56a36425d26d6cfb40ce79c314a2e4dd6211d51d6d2191c00bed34f354cc"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:5bfbb1b9acf3334612667b61bd3002196fe2a1eb4dd74d247e0f2a4d50ec9bbf"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:d055ec1e26e441f6187acf818b73564e6e6282709e9bcb5b63f5b23068356a15"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:af2d8c67d8e573d6de5bc30cdb27e9b95e49115cd9baad5ddbd1a6207aaa82a9"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:780236ac706e66881f3b7f2f32dfe90507a09e67d1d454c762cf642e6e1586e0"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-win32.whl", hash = "sha256:5833d2c39d8896e4e19b689ffc198f08ea58116bee26dea51e362ecc7cd3ed26"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-win_amd64.whl", hash = "sha256:a79cfe37875f822425b89a82333404539ae63dbdddf97f84dcbc3d339aae9525"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-win_arm64.whl", hash = "sha256:376bec83a63b8021bb5c8ea75e21c4ccb86e7e45ca4eb81146091b56599b80c3"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:e1f185f86a6f3403aa2420e815904c67b2f9ebc443f045edd0de921108345794"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6b39f987ae8ccdf0d2642338faf2abb1862340facc796048b604ef14919e55ed"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:3162d5d8ce1bb98dd51af660f2121c55d0fa541b46dff7bb9b9f86ea1d87de72"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:81d5eb2a312700f4ecaa977a8235b634ce853200e828fbadf3a9c50bab278328"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5bd2293095d766545ec1a8f612559f6b40abc0eb18bb2f5d1171872d34036ede"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a8a8b89589086a25749f471e6a900d3f662d1d3b6e2e59dcecf787b1cc3a1894"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:bc7637e2f80d8530ee4a78e878bce464f70087ce73cf7c1caf142416923b98f1"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f8bf04158c6b607d747e93949aa60618b61312fe647a6369f88ce2ff16043490"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:554af85e960429cf30784dd47447d5125aaa3b99a6f0683589dbd27e2f45da44"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:74018750915ee7ad843a774364e13a3db91682f26142baddf775342c3f5b1133"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:c0463276121fdee9c49b98908b3a89c39be45d86d1dbaa22957e38f6321d4ce3"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:362d61fd13843997c1c446760ef36f240cf81d3ebf74ac62652aebaf7838561e"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9a26f18905b8dd5d685d6d07b0cdf98a79f3c7a918906af7cc143ea2e164c8bc"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-win32.whl", hash = "sha256:9b35f4c90079ff2e2edc5b26c0c77925e5d2d255c42c74fdb70fb49b172726ac"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-win_amd64.whl", hash = "sha256:b435cba5f4f750aa6c0a0d92c541fb79f69a387c91e61f1795227e4ed9cece14"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-win_arm64.whl", hash = "sha256:542d2cee80be6f80247095cc36c418f7bddd14f4a6de45af91dfad36d817bba2"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:da3326d9e65ef63a817ecbcc0df6e94463713b754fe293eaa03da99befb9a5bd"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8af65f14dc14a79b924524b1e7fffe304517b2bff5a58bf64f30b98bbc5079eb"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:74664978bb272435107de04e36db5a9735e78232b85b77d45cfb38f758efd33e"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:752944c7ffbfdd10c074dc58ec2d5a8a4cd9493b314d367c14d24c17684ddd14"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d1f13550535ad8cff21b8d757a3257963e951d96e20ec82ab44bc64aeb62a191"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ecaae4149d99b1c9e7b88bb03e3221956f68fd6d50be2ef061b2381b61d20838"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:cb6254dc36b47a990e59e1068afacdcd02958bdcce30bb50cc1700a8b9d624a6"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:c8ae8a0f02f57a6e61203a31428fa1d677cbe50c93622b4149d5c0f319c1d19e"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:47cc91b2f4dd2833fddaedd2893006b0106129d4b94fdb6af1f4ce5a9965577c"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:82004af6c302b5d3ab2cfc4cc5f29db16123b1a8417f2e25f9066f91d4411090"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:2b7d8f6c26245217bd2ad053761201e9f9680f8ce52f0fcd8d0755aeae5b2152"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:799a7a5e4fb2d5898c60b640fd4981d6a25f1c11790935a44ce38c54e985f828"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:99ae2cffebb06e6c22bdc25801d7b30f503cc87dbd283479e7b606f70aff57ec"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-win32.whl", hash = "sha256:f9d332f8c2a2fcbffe1378594431458ddbef721c1769d78e2cbc06280d8155f9"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-win_amd64.whl", hash = "sha256:8a6562c3700cce886c5be75ade4a5db4214fda19fede41d9792d100288d8f94c"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-win_arm64.whl", hash = "sha256:de00632ca48df9daf77a2c65a484531649261ec9f25489917f09e455cb09ddb2"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:ce8a0633f41a967713a59c4139d29110c07e826d131a316b50ce11b1d79b4f84"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:eaabd426fe94daf8fd157c32e571c85cb12e66692f15516a83a03264b08d06c3"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:c4ef880e27901b6cc782f1b95f82da9313c0eb95c3af699103088fa0ac3ce9ac"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2aaba3b0819274cc41757a1da876f810a3e4d7b6eb25699253a4effef9e8e4af"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:778d2e08eda00f4256d7f672ca9fef386071c9202f5e4607920b86d7803387f2"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f155a433c2ec037d4e8df17d18922c3a0d9b3232a396690f17175d2946f0218d"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:a8bf8d0f749c5757af2142fe7903a9df1d2e8aa3841559b2bad34b08d0e2bcf3"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:194f08cbb32dc406d6e1aea671a68be0823673db2832b38405deba2fb0d88f63"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-musllinux_1_2_armv7l.whl", hash = "sha256:6aee717dcfead04c6eb1ce3bd29ac1e22663cdea57f943c87d1eab9a025438d7"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:cd4b7ca9984e5e7985c12bc60a6f173f3c958eae74f3ef6624bb6b26e2abbae4"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-musllinux_1_2_riscv64.whl", hash = "sha256:b7cf1017d601aa35e6bb650b6ad28652c9cd78ee6caff19f3c28d03e1c80acbf"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:e912091979546adf63357d7e2ccff9b44f026c075aeaf25a52d0e95ad2281074"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:5cb4d72eea50c8868f5288b7f7f33ed276118325c1dfd3957089f6b519e1382a"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-win32.whl", hash = "sha256:837c2ce8c5a65a2035be9b3569c684358dfbf109fd3b6969630a87535495ceaa"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-win_amd64.whl", hash = "sha256:44c2a8734b333e0578090c4cd6b16f275e07aa6614ca8715e6c038e865e70576"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:a9768c477b9d7bd54bc0c86dbaebdec6f03306675526c9927c0e8a04e8f94af9"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1bee1e43c28aa63cb16e5c14e582580546b08e535299b8b6158a7c9c768a1f3d"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:fd44c878ea55ba351104cb93cc85e74916eb8fa440ca7903e57575e97394f608"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:0f04b14ffe5fdc8c4933862d8306109a2c51e0704acfa35d51598eb45a1e89fc"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:cd09d08005f958f370f539f186d10aec3377d55b9eeb0d796025d4886119d76e"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4fe7859a4e3e8457458e2ff592f15ccb02f3da787fcd31e0183879c3ad4692a1"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:fa09f53c465e532f4d3db095e0c55b615f010ad81803d383195b6b5ca6cbf5f3"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:7fa17817dc5625de8a027cb8b26d9fefa3ea28c8253929b8d6649e705d2835b6"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:5947809c8a2417be3267efc979c47d76a079758166f7d43ef5ae8e9f92751f88"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:4902828217069c3c5c71094537a8e623f5d097858ac6ca8252f7b4d10b7560f1"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-musllinux_1_2_riscv64.whl", hash = "sha256:7c308f7e26e4363d79df40ca5b2be1c6ba9f02bdbccfed5abddb7859a6ce72cf"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:2c9d3c380143a1fedbff95a312aa798578371eb29da42106a29019368a475318"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:cb01158d8b88ee68f15949894ccc6712278243d95f344770fa7593fa2d94410c"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-win32.whl", hash = "sha256:2677acec1a2f8ef614c6888b5b4ae4060cc184174a938ed4e8ef690e15d3e505"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-win_amd64.whl", hash = "sha256:f8e160feb2aed042cd657a72acc0b481212ed28b1b9a95c0cee1621b524e1966"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-win_arm64.whl", hash = "sha256:b5d84d37db046c5ca74ee7bb47dd6cbc13f80665fdde3e8040bdd3fb015ecb50"}, + {file = "charset_normalizer-3.4.4-py3-none-any.whl", hash = "sha256:7a32c560861a02ff789ad905a2fe94e3f840803362c84fecf1851cb4cf3dc37f"}, + {file = "charset_normalizer-3.4.4.tar.gz", hash = "sha256:94537985111c35f28720e43603b8e7b43a6ecfb2ce1d3058bbe955b73404e21a"}, +] + [[package]] name = "click" version = "8.1.8" @@ -315,6 +438,24 @@ files = [ {file = "filelock-3.20.3.tar.gz", hash = "sha256:18c57ee915c7ec61cff0ecf7f0f869936c7c30191bb0cf406f1341778d0834e1"}, ] +[[package]] +name = "googleapis-common-protos" +version = "1.72.0" +description = "Common protobufs used in Google APIs" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "googleapis_common_protos-1.72.0-py3-none-any.whl", hash = "sha256:4299c5a82d5ae1a9702ada957347726b167f9f8d1fc352477702a1e851ff4038"}, + {file = "googleapis_common_protos-1.72.0.tar.gz", hash = "sha256:e55a601c1b32b52d7a3e65f43563e2aa61bcd737998ee672ac9b951cd49319f5"}, +] + +[package.dependencies] +protobuf = ">=3.20.2,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<7.0.0" + +[package.extras] +grpc = ["grpcio (>=1.44.0,<2.0.0)"] + [[package]] name = "h11" version = "0.16.0" @@ -710,6 +851,45 @@ files = [ importlib-metadata = ">=6.0,<8.8.0" typing-extensions = ">=4.5.0" +[[package]] +name = "opentelemetry-exporter-otlp-proto-common" +version = "1.39.1" +description = "OpenTelemetry Protobuf encoding" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "opentelemetry_exporter_otlp_proto_common-1.39.1-py3-none-any.whl", hash = "sha256:08f8a5862d64cc3435105686d0216c1365dc5701f86844a8cd56597d0c764fde"}, + {file = "opentelemetry_exporter_otlp_proto_common-1.39.1.tar.gz", hash = "sha256:763370d4737a59741c89a67b50f9e39271639ee4afc999dadfe768541c027464"}, +] + +[package.dependencies] +opentelemetry-proto = "1.39.1" + +[[package]] +name = "opentelemetry-exporter-otlp-proto-http" +version = "1.39.1" +description = "OpenTelemetry Collector Protobuf over HTTP Exporter" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "opentelemetry_exporter_otlp_proto_http-1.39.1-py3-none-any.whl", hash = "sha256:d9f5207183dd752a412c4cd564ca8875ececba13be6e9c6c370ffb752fd59985"}, + {file = "opentelemetry_exporter_otlp_proto_http-1.39.1.tar.gz", hash = "sha256:31bdab9745c709ce90a49a0624c2bd445d31a28ba34275951a6a362d16a0b9cb"}, +] + +[package.dependencies] +googleapis-common-protos = ">=1.52,<2.0" +opentelemetry-api = ">=1.15,<2.0" +opentelemetry-exporter-otlp-proto-common = "1.39.1" +opentelemetry-proto = "1.39.1" +opentelemetry-sdk = ">=1.39.1,<1.40.0" +requests = ">=2.7,<3.0" +typing-extensions = ">=4.5.0" + +[package.extras] +gcp-auth = ["opentelemetry-exporter-credential-provider-gcp (>=0.59b0)"] + [[package]] name = "opentelemetry-exporter-prometheus" version = "0.60b1" @@ -727,6 +907,21 @@ opentelemetry-api = ">=1.12,<2.0" opentelemetry-sdk = ">=1.39.1,<1.40.0" prometheus-client = ">=0.5.0,<1.0.0" +[[package]] +name = "opentelemetry-proto" +version = "1.39.1" +description = "OpenTelemetry Python Proto" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "opentelemetry_proto-1.39.1-py3-none-any.whl", hash = "sha256:22cdc78efd3b3765d09e68bfbd010d4fc254c9818afd0b6b423387d9dee46007"}, + {file = "opentelemetry_proto-1.39.1.tar.gz", hash = "sha256:6c8e05144fc0d3ed4d22c2289c6b126e03bcd0e6a7da0f16cedd2e1c2772e2c8"}, +] + +[package.dependencies] +protobuf = ">=5.0,<7.0" + [[package]] name = "opentelemetry-sdk" version = "1.39.1" @@ -834,6 +1029,26 @@ aiohttp = ["aiohttp"] django = ["django"] twisted = ["twisted"] +[[package]] +name = "protobuf" +version = "6.33.5" +description = "" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "protobuf-6.33.5-cp310-abi3-win32.whl", hash = "sha256:d71b040839446bac0f4d162e758bea99c8251161dae9d0983a3b88dee345153b"}, + {file = "protobuf-6.33.5-cp310-abi3-win_amd64.whl", hash = "sha256:3093804752167bcab3998bec9f1048baae6e29505adaf1afd14a37bddede533c"}, + {file = "protobuf-6.33.5-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:a5cb85982d95d906df1e2210e58f8e4f1e3cdc088e52c921a041f9c9a0386de5"}, + {file = "protobuf-6.33.5-cp39-abi3-manylinux2014_aarch64.whl", hash = "sha256:9b71e0281f36f179d00cbcb119cb19dec4d14a81393e5ea220f64b286173e190"}, + {file = "protobuf-6.33.5-cp39-abi3-manylinux2014_s390x.whl", hash = "sha256:8afa18e1d6d20af15b417e728e9f60f3aa108ee76f23c3b2c07a2c3b546d3afd"}, + {file = "protobuf-6.33.5-cp39-abi3-manylinux2014_x86_64.whl", hash = "sha256:cbf16ba3350fb7b889fca858fb215967792dc125b35c7976ca4818bee3521cf0"}, + {file = "protobuf-6.33.5-cp39-cp39-win32.whl", hash = "sha256:a3157e62729aafb8df6da2c03aa5c0937c7266c626ce11a278b6eb7963c4e37c"}, + {file = "protobuf-6.33.5-cp39-cp39-win_amd64.whl", hash = "sha256:8f04fa32763dcdb4973d537d6b54e615cc61108c7cb38fe59310c3192d29510a"}, + {file = "protobuf-6.33.5-py3-none-any.whl", hash = "sha256:69915a973dd0f60f31a08b8318b73eab2bd6a392c79184b3612226b0a3f8ec02"}, + {file = "protobuf-6.33.5.tar.gz", hash = "sha256:6ddcac2a081f8b7b9642c09406bc6a4290128fce5f471cddd165960bb9119e5c"}, +] + [[package]] name = "py" version = "1.11.0" @@ -1021,10 +1236,9 @@ windows-terminal = ["colorama (>=0.4.6)"] name = "pyjwt" version = "2.10.1" description = "JSON Web Token implementation in Python" -optional = true +optional = false python-versions = ">=3.9" groups = ["main"] -markers = "extra == \"redis\"" files = [ {file = "PyJWT-2.10.1-py3-none-any.whl", hash = "sha256:dcdd193e30abefd5debf142f9adfcdd2b58004e644f25406ffaebd50bd98dacb"}, {file = "pyjwt-2.10.1.tar.gz", hash = "sha256:3cc5772eb20009233caf06e9d8a0577824723b44e6648ee0a2aedb6cf9381953"}, @@ -1215,10 +1429,9 @@ files = [ name = "redis" version = "5.3.1" description = "Python client for Redis database and key-value store" -optional = true +optional = false python-versions = ">=3.8" groups = ["main"] -markers = "extra == \"redis\"" files = [ {file = "redis-5.3.1-py3-none-any.whl", hash = "sha256:dc1909bd24669cc31b5f67a039700b16ec30571096c5f1f0d9d2324bff31af97"}, {file = "redis-5.3.1.tar.gz", hash = "sha256:ca49577a531ea64039b5a36db3d6cd1a0c7a60c34124d46924a45b956e8cf14c"}, @@ -1232,6 +1445,28 @@ PyJWT = ">=2.9.0" hiredis = ["hiredis (>=3.0.0)"] ocsp = ["cryptography (>=36.0.1)", "pyopenssl (==23.2.1)", "requests (>=2.31.0)"] +[[package]] +name = "requests" +version = "2.32.5" +description = "Python HTTP for Humans." +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "requests-2.32.5-py3-none-any.whl", hash = "sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6"}, + {file = "requests-2.32.5.tar.gz", hash = "sha256:dbba0bac56e100853db0ea71b82b4dfd5fe2bf6d3754a8893c3af500cec7d7cf"}, +] + +[package.dependencies] +certifi = ">=2017.4.17" +charset_normalizer = ">=2,<4" +idna = ">=2.5,<4" +urllib3 = ">=1.21.1,<3" + +[package.extras] +socks = ["PySocks (>=1.5.6,!=1.5.7)"] +use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] + [[package]] name = "rich" version = "13.9.4" @@ -1433,6 +1668,24 @@ files = [ [package.dependencies] typing-extensions = ">=4.12.0" +[[package]] +name = "urllib3" +version = "2.6.3" +description = "HTTP library with thread-safe connection pooling, file post, and more." +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "urllib3-2.6.3-py3-none-any.whl", hash = "sha256:bf272323e553dfb2e87d9bfd225ca7b0f467b919d7bbd355436d3fd37cb0acd4"}, + {file = "urllib3-2.6.3.tar.gz", hash = "sha256:1b62b6884944a57dbe321509ab94fd4d3b307075e0c2eae991ac71ee15ad38ed"}, +] + +[package.extras] +brotli = ["brotli (>=1.2.0) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=1.2.0.0) ; platform_python_implementation != \"CPython\""] +h2 = ["h2 (>=4,<5)"] +socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] +zstd = ["backports-zstd (>=1.0.0) ; python_version < \"3.14\""] + [[package]] name = "uvicorn" version = "0.32.1" @@ -1462,68 +1715,49 @@ standard = ["colorama (>=0.4) ; sys_platform == \"win32\"", "httptools (>=0.6.3) [[package]] name = "uvloop" -version = "0.22.1" +version = "0.19.0" description = "Fast implementation of asyncio event loop on top of libuv" optional = false -python-versions = ">=3.8.1" +python-versions = ">=3.8.0" groups = ["main"] -markers = "sys_platform != \"win32\" and sys_platform != \"cygwin\" and platform_python_implementation != \"PyPy\"" -files = [ - {file = "uvloop-0.22.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ef6f0d4cc8a9fa1f6a910230cd53545d9a14479311e87e3cb225495952eb672c"}, - {file = "uvloop-0.22.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7cd375a12b71d33d46af85a3343b35d98e8116134ba404bd657b3b1d15988792"}, - {file = "uvloop-0.22.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ac33ed96229b7790eb729702751c0e93ac5bc3bcf52ae9eccbff30da09194b86"}, - {file = "uvloop-0.22.1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:481c990a7abe2c6f4fc3d98781cc9426ebd7f03a9aaa7eb03d3bfc68ac2a46bd"}, - {file = "uvloop-0.22.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a592b043a47ad17911add5fbd087c76716d7c9ccc1d64ec9249ceafd735f03c2"}, - {file = "uvloop-0.22.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:1489cf791aa7b6e8c8be1c5a080bae3a672791fcb4e9e12249b05862a2ca9cec"}, - {file = "uvloop-0.22.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:c60ebcd36f7b240b30788554b6f0782454826a0ed765d8430652621b5de674b9"}, - {file = "uvloop-0.22.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3b7f102bf3cb1995cfeaee9321105e8f5da76fdb104cdad8986f85461a1b7b77"}, - {file = "uvloop-0.22.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:53c85520781d84a4b8b230e24a5af5b0778efdb39142b424990ff1ef7c48ba21"}, - {file = "uvloop-0.22.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:56a2d1fae65fd82197cb8c53c367310b3eabe1bbb9fb5a04d28e3e3520e4f702"}, - {file = "uvloop-0.22.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:40631b049d5972c6755b06d0bfe8233b1bd9a8a6392d9d1c45c10b6f9e9b2733"}, - {file = "uvloop-0.22.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:535cc37b3a04f6cd2c1ef65fa1d370c9a35b6695df735fcff5427323f2cd5473"}, - {file = "uvloop-0.22.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:fe94b4564e865d968414598eea1a6de60adba0c040ba4ed05ac1300de402cd42"}, - {file = "uvloop-0.22.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:51eb9bd88391483410daad430813d982010f9c9c89512321f5b60e2cddbdddd6"}, - {file = "uvloop-0.22.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:700e674a166ca5778255e0e1dc4e9d79ab2acc57b9171b79e65feba7184b3370"}, - {file = "uvloop-0.22.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7b5b1ac819a3f946d3b2ee07f09149578ae76066d70b44df3fa990add49a82e4"}, - {file = "uvloop-0.22.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e047cc068570bac9866237739607d1313b9253c3051ad84738cbb095be0537b2"}, - {file = "uvloop-0.22.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:512fec6815e2dd45161054592441ef76c830eddaad55c8aa30952e6fe1ed07c0"}, - {file = "uvloop-0.22.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:561577354eb94200d75aca23fbde86ee11be36b00e52a4eaf8f50fb0c86b7705"}, - {file = "uvloop-0.22.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:1cdf5192ab3e674ca26da2eada35b288d2fa49fdd0f357a19f0e7c4e7d5077c8"}, - {file = "uvloop-0.22.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6e2ea3d6190a2968f4a14a23019d3b16870dd2190cd69c8180f7c632d21de68d"}, - {file = "uvloop-0.22.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0530a5fbad9c9e4ee3f2b33b148c6a64d47bbad8000ea63704fa8260f4cf728e"}, - {file = "uvloop-0.22.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:bc5ef13bbc10b5335792360623cc378d52d7e62c2de64660616478c32cd0598e"}, - {file = "uvloop-0.22.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:1f38ec5e3f18c8a10ded09742f7fb8de0108796eb673f30ce7762ce1b8550cad"}, - {file = "uvloop-0.22.1-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:3879b88423ec7e97cd4eba2a443aa26ed4e59b45e6b76aabf13fe2f27023a142"}, - {file = "uvloop-0.22.1-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:4baa86acedf1d62115c1dc6ad1e17134476688f08c6efd8a2ab076e815665c74"}, - {file = "uvloop-0.22.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:297c27d8003520596236bdb2335e6b3f649480bd09e00d1e3a99144b691d2a35"}, - {file = "uvloop-0.22.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c1955d5a1dd43198244d47664a5858082a3239766a839b2102a269aaff7a4e25"}, - {file = "uvloop-0.22.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:b31dc2fccbd42adc73bc4e7cdbae4fc5086cf378979e53ca5d0301838c5682c6"}, - {file = "uvloop-0.22.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:93f617675b2d03af4e72a5333ef89450dfaa5321303ede6e67ba9c9d26878079"}, - {file = "uvloop-0.22.1-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:37554f70528f60cad66945b885eb01f1bb514f132d92b6eeed1c90fd54ed6289"}, - {file = "uvloop-0.22.1-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:b76324e2dc033a0b2f435f33eb88ff9913c156ef78e153fb210e03c13da746b3"}, - {file = "uvloop-0.22.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:badb4d8e58ee08dad957002027830d5c3b06aea446a6a3744483c2b3b745345c"}, - {file = "uvloop-0.22.1-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b91328c72635f6f9e0282e4a57da7470c7350ab1c9f48546c0f2866205349d21"}, - {file = "uvloop-0.22.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:daf620c2995d193449393d6c62131b3fbd40a63bf7b307a1527856ace637fe88"}, - {file = "uvloop-0.22.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:6cde23eeda1a25c75b2e07d39970f3374105d5eafbaab2a4482be82f272d5a5e"}, - {file = "uvloop-0.22.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:80eee091fe128e425177fbd82f8635769e2f32ec9daf6468286ec57ec0313efa"}, - {file = "uvloop-0.22.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:017bd46f9e7b78e81606329d07141d3da446f8798c6baeec124260e22c262772"}, - {file = "uvloop-0.22.1-cp38-cp38-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c3e5c6727a57cb6558592a95019e504f605d1c54eb86463ee9f7a2dbd411c820"}, - {file = "uvloop-0.22.1-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:57df59d8b48feb0e613d9b1f5e57b7532e97cbaf0d61f7aa9aa32221e84bc4b6"}, - {file = "uvloop-0.22.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:55502bc2c653ed2e9692e8c55cb95b397d33f9f2911e929dc97c4d6b26d04242"}, - {file = "uvloop-0.22.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:4a968a72422a097b09042d5fa2c5c590251ad484acf910a651b4b620acd7f193"}, - {file = "uvloop-0.22.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b45649628d816c030dba3c80f8e2689bab1c89518ed10d426036cdc47874dfc4"}, - {file = "uvloop-0.22.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ea721dd3203b809039fcc2983f14608dae82b212288b346e0bfe46ec2fab0b7c"}, - {file = "uvloop-0.22.1-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0ae676de143db2b2f60a9696d7eca5bb9d0dd6cc3ac3dad59a8ae7e95f9e1b54"}, - {file = "uvloop-0.22.1-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:17d4e97258b0172dfa107b89aa1eeba3016f4b1974ce85ca3ef6a66b35cbf659"}, - {file = "uvloop-0.22.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:05e4b5f86e621cf3927631789999e697e58f0d2d32675b67d9ca9eb0bca55743"}, - {file = "uvloop-0.22.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:286322a90bea1f9422a470d5d2ad82d38080be0a29c4dd9b3e6384320a4d11e7"}, - {file = "uvloop-0.22.1.tar.gz", hash = "sha256:6c84bae345b9147082b17371e3dd5d42775bddce91f885499017f4607fdaf39f"}, +markers = "sys_platform != \"win32\" and sys_platform != \"cygwin\" and platform_python_implementation != \"PyPy\" or extra == \"uvloop\"" +files = [ + {file = "uvloop-0.19.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:de4313d7f575474c8f5a12e163f6d89c0a878bc49219641d49e6f1444369a90e"}, + {file = "uvloop-0.19.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5588bd21cf1fcf06bded085f37e43ce0e00424197e7c10e77afd4bbefffef428"}, + {file = "uvloop-0.19.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7b1fd71c3843327f3bbc3237bedcdb6504fd50368ab3e04d0410e52ec293f5b8"}, + {file = "uvloop-0.19.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5a05128d315e2912791de6088c34136bfcdd0c7cbc1cf85fd6fd1bb321b7c849"}, + {file = "uvloop-0.19.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:cd81bdc2b8219cb4b2556eea39d2e36bfa375a2dd021404f90a62e44efaaf957"}, + {file = "uvloop-0.19.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:5f17766fb6da94135526273080f3455a112f82570b2ee5daa64d682387fe0dcd"}, + {file = "uvloop-0.19.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:4ce6b0af8f2729a02a5d1575feacb2a94fc7b2e983868b009d51c9a9d2149bef"}, + {file = "uvloop-0.19.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:31e672bb38b45abc4f26e273be83b72a0d28d074d5b370fc4dcf4c4eb15417d2"}, + {file = "uvloop-0.19.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:570fc0ed613883d8d30ee40397b79207eedd2624891692471808a95069a007c1"}, + {file = "uvloop-0.19.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5138821e40b0c3e6c9478643b4660bd44372ae1e16a322b8fc07478f92684e24"}, + {file = "uvloop-0.19.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:91ab01c6cd00e39cde50173ba4ec68a1e578fee9279ba64f5221810a9e786533"}, + {file = "uvloop-0.19.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:47bf3e9312f63684efe283f7342afb414eea4d3011542155c7e625cd799c3b12"}, + {file = "uvloop-0.19.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:da8435a3bd498419ee8c13c34b89b5005130a476bda1d6ca8cfdde3de35cd650"}, + {file = "uvloop-0.19.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:02506dc23a5d90e04d4f65c7791e65cf44bd91b37f24cfc3ef6cf2aff05dc7ec"}, + {file = "uvloop-0.19.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2693049be9d36fef81741fddb3f441673ba12a34a704e7b4361efb75cf30befc"}, + {file = "uvloop-0.19.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7010271303961c6f0fe37731004335401eb9075a12680738731e9c92ddd96ad6"}, + {file = "uvloop-0.19.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:5daa304d2161d2918fa9a17d5635099a2f78ae5b5960e742b2fcfbb7aefaa593"}, + {file = "uvloop-0.19.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:7207272c9520203fea9b93843bb775d03e1cf88a80a936ce760f60bb5add92f3"}, + {file = "uvloop-0.19.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:78ab247f0b5671cc887c31d33f9b3abfb88d2614b84e4303f1a63b46c046c8bd"}, + {file = "uvloop-0.19.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:472d61143059c84947aa8bb74eabbace30d577a03a1805b77933d6bd13ddebbd"}, + {file = "uvloop-0.19.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45bf4c24c19fb8a50902ae37c5de50da81de4922af65baf760f7c0c42e1088be"}, + {file = "uvloop-0.19.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:271718e26b3e17906b28b67314c45d19106112067205119dddbd834c2b7ce797"}, + {file = "uvloop-0.19.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:34175c9fd2a4bc3adc1380e1261f60306344e3407c20a4d684fd5f3be010fa3d"}, + {file = "uvloop-0.19.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:e27f100e1ff17f6feeb1f33968bc185bf8ce41ca557deee9d9bbbffeb72030b7"}, + {file = "uvloop-0.19.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:13dfdf492af0aa0a0edf66807d2b465607d11c4fa48f4a1fd41cbea5b18e8e8b"}, + {file = "uvloop-0.19.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6e3d4e85ac060e2342ff85e90d0c04157acb210b9ce508e784a944f852a40e67"}, + {file = "uvloop-0.19.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8ca4956c9ab567d87d59d49fa3704cf29e37109ad348f2d5223c9bf761a332e7"}, + {file = "uvloop-0.19.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f467a5fd23b4fc43ed86342641f3936a68ded707f4627622fa3f82a120e18256"}, + {file = "uvloop-0.19.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:492e2c32c2af3f971473bc22f086513cedfc66a130756145a931a90c3958cb17"}, + {file = "uvloop-0.19.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2df95fca285a9f5bfe730e51945ffe2fa71ccbfdde3b0da5772b4ee4f2e770d5"}, + {file = "uvloop-0.19.0.tar.gz", hash = "sha256:0246f4fd1bf2bf702e06b0d45ee91677ee5c31242f39aab4ea6fe0c51aedd0fd"}, ] [package.extras] -dev = ["Cython (>=3.0,<4.0)", "setuptools (>=60)"] -docs = ["Sphinx (>=4.1.2,<4.2.0)", "sphinx_rtd_theme (>=0.5.2,<0.6.0)", "sphinxcontrib-asyncio (>=0.3.0,<0.4.0)"] -test = ["aiohttp (>=3.10.5)", "flake8 (>=6.1,<7.0)", "mypy (>=0.800)", "psutil", "pyOpenSSL (>=25.3.0,<25.4.0)", "pycodestyle (>=2.11.0,<2.12.0)"] +docs = ["Sphinx (>=4.1.2,<4.2.0)", "sphinx-rtd-theme (>=0.5.2,<0.6.0)", "sphinxcontrib-asyncio (>=0.3.0,<0.4.0)"] +test = ["Cython (>=0.29.36,<0.30.0)", "aiohttp (==3.9.0b0) ; python_version >= \"3.12\"", "aiohttp (>=3.8.1) ; python_version < \"3.12\"", "flake8 (>=5.0,<6.0)", "mypy (>=0.800)", "psutil", "pyOpenSSL (>=23.0.0,<23.1.0)", "pycodestyle (>=2.9.0,<2.10.0)"] [[package]] name = "virtualenv" @@ -1809,9 +2043,9 @@ test = ["big-O", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more_it type = ["pytest-mypy"] [extras] -redis = ["redis"] +uvloop = ["uvloop"] [metadata] lock-version = "2.1" python-versions = ">=3.10,<3.13" -content-hash = "b0dec5a80eb2783d2b5abcb5308f265f53e5e2c837d113e2347e27300b98e76c" +content-hash = "ce773bfb444d195c8d338f3056b8bd1980360b296de807d249530d567d6454fb" diff --git a/pyproject.toml b/pyproject.toml index 44d208b..c68c51d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -36,10 +36,12 @@ opentelemetry-api = "^1.39.1" opentelemetry-exporter-prometheus = "^0.60b1" fastapi = "^0.128.0" uvicorn = {extras = ["standard"], version = "^0.32.0"} -redis = {version = "^5.0.0", optional = true} +redis = "^5.0.0" +uvloop = {version = "^0.19.0", optional = true} +opentelemetry-exporter-otlp-proto-http = "^1.39.1" [tool.poetry.extras] -redis = ["redis"] +uvloop = ["uvloop"] [tool.poetry.group.dev.dependencies] pytest = "^8.3.4" @@ -92,3 +94,7 @@ warn_unused_ignores = true [[tool.mypy.overrides]] module = "redis.*" ignore_missing_imports = true + +[[tool.mypy.overrides]] +module = "genesis.loop" +ignore_missing_imports = true diff --git a/tests/test_cli.py b/tests/test_cli.py index b571a47..d13f8ac 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -34,10 +34,7 @@ def simple_info(msg, *args, **kwargs): monkeypatch.setattr(genesis.cli.logger, "info", simple_info) monkeypatch.setattr(genesis.cli.logger, "warning", lambda *args, **kwargs: None) - # We still keep the patch to avoid actual network binding if possible, - # but the env var ensures if it DOES bind, it won't conflict. - with patch("genesis.cli.start_http_server"): - yield + yield runner = CliRunner() diff --git a/tests/test_loop.py b/tests/test_loop.py new file mode 100644 index 0000000..ec27efa --- /dev/null +++ b/tests/test_loop.py @@ -0,0 +1,11 @@ +"""Tests for genesis loop utilities (uvloop support).""" + +import pytest + +from genesis.loop import use_uvloop + + +def test_use_uvloop_returns_bool() -> None: + """use_uvloop() returns True if uvloop was set, False otherwise.""" + result = use_uvloop() + assert isinstance(result, bool) diff --git a/tests/test_otel_config.py b/tests/test_otel_config.py new file mode 100644 index 0000000..7ea2b5d --- /dev/null +++ b/tests/test_otel_config.py @@ -0,0 +1,148 @@ +import pytest + +from genesis.observability.otel_config import ( + create_resource, + get_otel_exporter_otlp_endpoint, + get_otel_exporter_otlp_metrics_endpoint, + get_otel_exporter_otlp_traces_endpoint, + get_otel_resource_attributes, + get_otel_service_name, + is_otel_sdk_disabled, +) + + +def test_is_otel_sdk_disabled_unset(monkeypatch: pytest.MonkeyPatch) -> None: + monkeypatch.delenv("OTEL_SDK_DISABLED", raising=False) + assert is_otel_sdk_disabled() is False + + +def test_is_otel_sdk_disabled_true(monkeypatch: pytest.MonkeyPatch) -> None: + monkeypatch.setenv("OTEL_SDK_DISABLED", "true") + assert is_otel_sdk_disabled() is True + + +def test_is_otel_sdk_disabled_true_case_insensitive( + monkeypatch: pytest.MonkeyPatch, +) -> None: + monkeypatch.setenv("OTEL_SDK_DISABLED", "TRUE") + assert is_otel_sdk_disabled() is True + + +def test_is_otel_sdk_disabled_false(monkeypatch: pytest.MonkeyPatch) -> None: + monkeypatch.setenv("OTEL_SDK_DISABLED", "false") + assert is_otel_sdk_disabled() is False + + +def test_get_otel_service_name_default(monkeypatch: pytest.MonkeyPatch) -> None: + monkeypatch.delenv("OTEL_SERVICE_NAME", raising=False) + assert get_otel_service_name() == "genesis" + + +def test_get_otel_service_name_custom(monkeypatch: pytest.MonkeyPatch) -> None: + monkeypatch.setenv("OTEL_SERVICE_NAME", "my-service") + assert get_otel_service_name() == "my-service" + + +def test_get_otel_resource_attributes_empty(monkeypatch: pytest.MonkeyPatch) -> None: + monkeypatch.delenv("OTEL_RESOURCE_ATTRIBUTES", raising=False) + assert get_otel_resource_attributes() == {} + + +def test_get_otel_resource_attributes_single(monkeypatch: pytest.MonkeyPatch) -> None: + monkeypatch.setenv("OTEL_RESOURCE_ATTRIBUTES", "deployment.environment=prod") + assert get_otel_resource_attributes() == {"deployment.environment": "prod"} + + +def test_get_otel_resource_attributes_multiple(monkeypatch: pytest.MonkeyPatch) -> None: + monkeypatch.setenv( + "OTEL_RESOURCE_ATTRIBUTES", + "deployment.environment=prod,service.version=1.0.0", + ) + assert get_otel_resource_attributes() == { + "deployment.environment": "prod", + "service.version": "1.0.0", + } + + +def test_get_otel_resource_attributes_value_with_equals( + monkeypatch: pytest.MonkeyPatch, +) -> None: + monkeypatch.setenv("OTEL_RESOURCE_ATTRIBUTES", "key=value=with=equals") + assert get_otel_resource_attributes() == {"key": "value=with=equals"} + + +def test_create_resource_default(monkeypatch: pytest.MonkeyPatch) -> None: + monkeypatch.delenv("OTEL_SERVICE_NAME", raising=False) + monkeypatch.delenv("OTEL_RESOURCE_ATTRIBUTES", raising=False) + resource = create_resource() + assert resource.attributes["service.name"] == "genesis" + + +def test_create_resource_custom_service_name(monkeypatch: pytest.MonkeyPatch) -> None: + monkeypatch.setenv("OTEL_SERVICE_NAME", "custom-app") + monkeypatch.delenv("OTEL_RESOURCE_ATTRIBUTES", raising=False) + resource = create_resource() + assert resource.attributes["service.name"] == "custom-app" + + +def test_create_resource_service_name_overrides_attributes( + monkeypatch: pytest.MonkeyPatch, +) -> None: + monkeypatch.setenv("OTEL_SERVICE_NAME", "from-env") + monkeypatch.setenv("OTEL_RESOURCE_ATTRIBUTES", "service.name=from-attributes") + resource = create_resource() + assert resource.attributes["service.name"] == "from-env" + + +def test_create_resource_with_attributes(monkeypatch: pytest.MonkeyPatch) -> None: + monkeypatch.delenv("OTEL_SERVICE_NAME", raising=False) + monkeypatch.setenv( + "OTEL_RESOURCE_ATTRIBUTES", + "deployment.environment=staging,service.version=2.0", + ) + resource = create_resource() + assert resource.attributes["service.name"] == "genesis" + assert resource.attributes["deployment.environment"] == "staging" + assert resource.attributes["service.version"] == "2.0" + + +def test_get_otel_exporter_otlp_endpoint_unset(monkeypatch: pytest.MonkeyPatch) -> None: + monkeypatch.delenv("OTEL_EXPORTER_OTLP_ENDPOINT", raising=False) + assert get_otel_exporter_otlp_endpoint() is None + + +def test_get_otel_exporter_otlp_endpoint_set(monkeypatch: pytest.MonkeyPatch) -> None: + monkeypatch.setenv("OTEL_EXPORTER_OTLP_ENDPOINT", "http://localhost:4318") + assert get_otel_exporter_otlp_endpoint() == "http://localhost:4318" + + +def test_get_otel_exporter_otlp_metrics_endpoint_fallback( + monkeypatch: pytest.MonkeyPatch, +) -> None: + monkeypatch.delenv("OTEL_EXPORTER_OTLP_METRICS_ENDPOINT", raising=False) + monkeypatch.setenv("OTEL_EXPORTER_OTLP_ENDPOINT", "http://collector:4318") + assert get_otel_exporter_otlp_metrics_endpoint() == "http://collector:4318" + + +def test_get_otel_exporter_otlp_metrics_endpoint_override( + monkeypatch: pytest.MonkeyPatch, +) -> None: + monkeypatch.setenv("OTEL_EXPORTER_OTLP_ENDPOINT", "http://default:4318") + monkeypatch.setenv("OTEL_EXPORTER_OTLP_METRICS_ENDPOINT", "http://metrics:4318") + assert get_otel_exporter_otlp_metrics_endpoint() == "http://metrics:4318" + + +def test_get_otel_exporter_otlp_traces_endpoint_fallback( + monkeypatch: pytest.MonkeyPatch, +) -> None: + monkeypatch.delenv("OTEL_EXPORTER_OTLP_TRACES_ENDPOINT", raising=False) + monkeypatch.setenv("OTEL_EXPORTER_OTLP_ENDPOINT", "http://collector:4318") + assert get_otel_exporter_otlp_traces_endpoint() == "http://collector:4318" + + +def test_get_otel_exporter_otlp_traces_endpoint_override( + monkeypatch: pytest.MonkeyPatch, +) -> None: + monkeypatch.setenv("OTEL_EXPORTER_OTLP_ENDPOINT", "http://default:4318") + monkeypatch.setenv("OTEL_EXPORTER_OTLP_TRACES_ENDPOINT", "http://traces:4318") + assert get_otel_exporter_otlp_traces_endpoint() == "http://traces:4318" diff --git a/tests/test_queue.py b/tests/test_queue.py new file mode 100644 index 0000000..71f69d0 --- /dev/null +++ b/tests/test_queue.py @@ -0,0 +1,238 @@ +"""Tests for genesis queue (slot, semaphore, backends).""" + +import asyncio + +import pytest + +from genesis.queue import InMemoryBackend, Queue, QueueSemaphore, QueueTimeoutError + + +@pytest.mark.asyncio +async def test_in_memory_backend_enqueue_wait_and_acquire_release(): + """Backend: enqueue, wait_and_acquire, release.""" + backend = InMemoryBackend() + await backend.enqueue("q1", "item1") + await backend.wait_and_acquire("q1", "item1", max_concurrent=1) + await backend.release("q1") + + +@pytest.mark.asyncio +async def test_in_memory_backend_fifo_order(): + """Backend: first enqueued acquires first when slot free.""" + backend = InMemoryBackend() + await backend.enqueue("q1", "first") + await backend.enqueue("q1", "second") + + # First acquires + await backend.wait_and_acquire("q1", "first", max_concurrent=1) + # Second must wait until first releases + entered = asyncio.Event() + released = asyncio.Event() + + async def second_acquires(): + await backend.wait_and_acquire("q1", "second", max_concurrent=1) + entered.set() + await released.wait() + await backend.release("q1") + + async def first_releases(): + await backend.release("q1") + released.set() + + t2 = asyncio.create_task(second_acquires()) + t1 = asyncio.create_task(first_releases()) + await asyncio.wait_for(entered.wait(), timeout=2.0) + await asyncio.wait_for(t2, timeout=2.0) + t1.cancel() + try: + await t1 + except asyncio.CancelledError: + pass + + +@pytest.mark.asyncio +async def test_queue_slot_context_manager(): + """Queue.slot() is an async context manager; release on exit.""" + backend = InMemoryBackend() + queue = Queue(backend) + entered = asyncio.Event() + + async def use_slot(): + async with queue.slot("sales"): + entered.set() + + t = asyncio.create_task(use_slot()) + await asyncio.wait_for(entered.wait(), timeout=2.0) + await asyncio.wait_for(t, timeout=2.0) + + +@pytest.mark.asyncio +async def test_queue_slot_with_item_id(): + """Queue.slot(queue_id, item_id=...) uses that item_id for ordering.""" + backend = InMemoryBackend() + queue = Queue(backend) + order: list[str] = [] + + async def first(): + async with queue.slot("q", item_id="a"): + order.append("a-in") + order.append("a-out") + + async def second(): + async with queue.slot("q", item_id="b"): + order.append("b-in") + order.append("b-out") + + t1 = asyncio.create_task(first()) + t2 = asyncio.create_task(second()) + await asyncio.gather(t1, t2) + assert order == ["a-in", "a-out", "b-in", "b-out"] + + +@pytest.mark.asyncio +async def test_queue_semaphore_context_manager(): + """Queue.semaphore() returns a reusable context manager.""" + backend = InMemoryBackend() + queue = Queue(backend) + sem = queue.semaphore("support", max_concurrent=1) + entered = asyncio.Event() + + async def use_sem(): + async with sem: + entered.set() + + t = asyncio.create_task(use_sem()) + await asyncio.wait_for(entered.wait(), timeout=2.0) + await asyncio.wait_for(t, timeout=2.0) + + +@pytest.mark.asyncio +async def test_queue_semaphore_max_concurrent_two(): + """With max_concurrent=2, two can be inside at once.""" + backend = InMemoryBackend() + queue = Queue(backend) + sem = queue.semaphore("pool", max_concurrent=2) + both_inside = asyncio.Event() + + async def enter(ready: asyncio.Event): + async with sem: + ready.set() + await both_inside.wait() + + r1 = asyncio.Event() + r2 = asyncio.Event() + t1 = asyncio.create_task(enter(r1)) + t2 = asyncio.create_task(enter(r2)) + await asyncio.wait_for(r1.wait(), timeout=2.0) + await asyncio.wait_for(r2.wait(), timeout=2.0) + both_inside.set() + await asyncio.gather(t1, t2) + + +@pytest.mark.asyncio +async def test_queue_slot_semaphore_like(): + """slot() behaves like a semaphore: one in, one out, then next.""" + backend = InMemoryBackend() + queue = Queue(backend) + log: list[str] = [] + + async def worker(name: str): + async with queue.slot("single", item_id=name): + log.append(f"{name}-in") + log.append(f"{name}-out") + + await asyncio.gather( + worker("a"), + worker("b"), + worker("c"), + ) + assert log == ["a-in", "a-out", "b-in", "b-out", "c-in", "c-out"] + + +@pytest.mark.asyncio +async def test_queue_slot_timeout_raises_and_removes_from_queue(): + """With timeout, wait_and_acquire raises QueueTimeoutError and item is removed.""" + backend = InMemoryBackend() + queue = Queue(backend) + entered = asyncio.Event() + + async def holder(): + async with queue.slot("q", item_id="first"): + entered.set() + await asyncio.Event().wait() + + async def waiter(): + await entered.wait() + try: + async with queue.slot("q", item_id="second", timeout=0.2): + pass + pytest.fail("expected QueueTimeoutError") + except QueueTimeoutError: + pass + + t_holder = asyncio.create_task(holder()) + t_waiter = asyncio.create_task(waiter()) + await asyncio.wait_for(t_waiter, timeout=2.0) + t_holder.cancel() + try: + await t_holder + except asyncio.CancelledError: + pass + + +@pytest.mark.asyncio +async def test_queue_slot_timeout_next_in_line_can_acquire(): + """After one item times out, the next in line can acquire.""" + backend = InMemoryBackend() + queue = Queue(backend) + order: list[str] = [] + first_holding = asyncio.Event() + release_first = asyncio.Event() + + async def first_acquires(): + async with queue.slot("q", item_id="a"): + order.append("a-in") + first_holding.set() + await release_first.wait() + order.append("a-out") + + async def second_times_out(): + await first_holding.wait() + try: + async with queue.slot("q", item_id="b", timeout=0.2): + order.append("b-in") + except QueueTimeoutError: + order.append("b-timeout") + order.append("b-done") + release_first.set() + + async def third_acquires(): + await first_holding.wait() + async with queue.slot("q", item_id="c"): + order.append("c-in") + order.append("c-out") + + await asyncio.gather( + first_acquires(), + second_times_out(), + third_acquires(), + ) + assert "a-in" in order and "a-out" in order + assert "b-timeout" in order and "b-done" in order + assert "c-in" in order and "c-out" in order + assert order.index("b-timeout") < order.index("c-in") + + +@pytest.mark.asyncio +async def test_queue_default_in_memory_backend(): + """Queue() without backend uses InMemoryBackend by default.""" + queue = Queue() + entered = asyncio.Event() + + async def use_slot(): + async with queue.slot("default"): + entered.set() + + t = asyncio.create_task(use_slot()) + await asyncio.wait_for(entered.wait(), timeout=2.0) + await asyncio.wait_for(t, timeout=2.0) From 7b8d8d5f34ce5a2d9a40dd1503c84f6f35dbc712 Mon Sep 17 00:00:00 2001 From: Vitor Hugo Date: Tue, 3 Mar 2026 01:32:34 -0300 Subject: [PATCH 2/5] fix: address Sonar issues on queue (S3776, S1192, S2737, S2772, S7497, S108) --- genesis/queue/backends.py | 88 ++++++++++++++++++++-------------- genesis/queue/core.py | 13 +++-- genesis/queue/redis_backend.py | 4 +- tests/test_queue.py | 8 ++-- 4 files changed, 63 insertions(+), 50 deletions(-) diff --git a/genesis/queue/backends.py b/genesis/queue/backends.py index b0b52ae..f2fb6f0 100644 --- a/genesis/queue/backends.py +++ b/genesis/queue/backends.py @@ -84,52 +84,46 @@ async def enqueue(self, queue_id: str, item_id: str) -> None: state.deque.append(item_id) state.condition.notify_all() - async def wait_and_acquire( + async def _wait_until_at_head( self, - queue_id: str, + state: _QueueState, item_id: str, - max_concurrent: int, - timeout: Optional[float] = None, + deadline: Optional[float], ) -> None: - """ - Block until this item is at the head and a slot is free, then pop head and acquire. - First call for a queue_id sets max_concurrent for that queue. - If timeout (seconds) expires, remove item from queue and raise QueueTimeoutError. - """ - state = self._get_or_create_state(queue_id) - if state.semaphore is None: - state.semaphore = asyncio.Semaphore(max_concurrent) - deadline = time.monotonic() + timeout if timeout is not None else None - async with state.lock: - while True: - if state.deque and state.deque[0] == item_id: - state.deque.popleft() - state.condition.notify_all() - break - remaining = None - if deadline is not None: - remaining = deadline - time.monotonic() - if remaining <= 0: - try: - state.deque.remove(item_id) - except ValueError: - pass - state.condition.notify_all() - raise QueueTimeoutError() - try: - if remaining is not None: - await asyncio.wait_for( - state.condition.wait(), timeout=remaining - ) - else: - await state.condition.wait() - except asyncio.TimeoutError: + """Wait until item_id is at head of deque; on timeout remove item and raise.""" + while True: + if state.deque and state.deque[0] == item_id: + state.deque.popleft() + state.condition.notify_all() + return + remaining = None + if deadline is not None: + remaining = deadline - time.monotonic() + if remaining <= 0: try: state.deque.remove(item_id) except ValueError: pass state.condition.notify_all() raise QueueTimeoutError() + try: + if remaining is not None: + await asyncio.wait_for(state.condition.wait(), timeout=remaining) + else: + await state.condition.wait() + except asyncio.TimeoutError: + try: + state.deque.remove(item_id) + except ValueError: + pass + state.condition.notify_all() + raise QueueTimeoutError() + + async def _acquire_semaphore( + self, state: _QueueState, deadline: Optional[float] + ) -> None: + """Acquire one semaphore slot; raise QueueTimeoutError if deadline exceeded.""" + assert state.semaphore is not None # set in wait_and_acquire before calling if deadline is not None: remaining = deadline - time.monotonic() if remaining <= 0: @@ -141,6 +135,26 @@ async def wait_and_acquire( else: await state.semaphore.acquire() + async def wait_and_acquire( + self, + queue_id: str, + item_id: str, + max_concurrent: int, + timeout: Optional[float] = None, + ) -> None: + """ + Block until this item is at the head and a slot is free, then pop head and acquire. + First call for a queue_id sets max_concurrent for that queue. + If timeout (seconds) expires, remove item from queue and raise QueueTimeoutError. + """ + state = self._get_or_create_state(queue_id) + if state.semaphore is None: + state.semaphore = asyncio.Semaphore(max_concurrent) + deadline = time.monotonic() + timeout if timeout is not None else None + async with state.lock: + await self._wait_until_at_head(state, item_id, deadline) + await self._acquire_semaphore(state, deadline) + async def release(self, queue_id: str) -> None: """Release one slot for the queue.""" if queue_id in self._states: diff --git a/genesis/queue/core.py b/genesis/queue/core.py index e2665a0..5fe2c59 100644 --- a/genesis/queue/core.py +++ b/genesis/queue/core.py @@ -30,6 +30,9 @@ unit="s", ) +ATTR_QUEUE_ID = "queue.id" +ATTR_QUEUE_ITEM_ID = "queue.item_id" + class QueueSlot: """ @@ -73,8 +76,8 @@ async def __aenter__(self) -> "QueueSlot": with tracer.start_as_current_span( "queue.wait_and_acquire", attributes={ - "queue.id": self._queue_id, - "queue.item_id": self._item_id, + ATTR_QUEUE_ID: self._queue_id, + ATTR_QUEUE_ITEM_ID: self._item_id, }, ): await self._queue._backend.wait_and_acquire( @@ -85,9 +88,9 @@ async def __aenter__(self) -> "QueueSlot": ) self._acquired = True elapsed = time.monotonic() - start - queue_wait_duration.record(elapsed, attributes={"queue.id": self._queue_id}) + queue_wait_duration.record(elapsed, attributes={ATTR_QUEUE_ID: self._queue_id}) queue_operations_counter.add( - 1, attributes={"queue.id": self._queue_id, "op": "acquire"} + 1, attributes={ATTR_QUEUE_ID: self._queue_id, "op": "acquire"} ) return self @@ -96,7 +99,7 @@ async def __aexit__(self, *args: object) -> None: self._released = True await self._queue._release(self._queue_id) queue_operations_counter.add( - 1, attributes={"queue.id": self._queue_id, "op": "release"} + 1, attributes={ATTR_QUEUE_ID: self._queue_id, "op": "release"} ) diff --git a/genesis/queue/redis_backend.py b/genesis/queue/redis_backend.py index c769c82..3088836 100644 --- a/genesis/queue/redis_backend.py +++ b/genesis/queue/redis_backend.py @@ -105,8 +105,6 @@ async def _wait_for_release_signal(self, client: Any, channel: str) -> None: if msg.get("type") == "message": return await asyncio.sleep(0) - except asyncio.CancelledError: - raise finally: await sub.unsubscribe(channel) await sub.close() @@ -149,7 +147,7 @@ async def wait_and_acquire( if deadline is not None and time.monotonic() >= deadline: await client.lrem(waiting_key, 1, item_id) raise QueueTimeoutError() - pass + # retry loop async def release(self, queue_id: str) -> None: """Release one slot for the queue.""" diff --git a/tests/test_queue.py b/tests/test_queue.py index 71f69d0..3809e81 100644 --- a/tests/test_queue.py +++ b/tests/test_queue.py @@ -47,7 +47,7 @@ async def first_releases(): try: await t1 except asyncio.CancelledError: - pass + raise @pytest.mark.asyncio @@ -168,16 +168,14 @@ async def waiter(): pass pytest.fail("expected QueueTimeoutError") except QueueTimeoutError: - pass + pass # expected t_holder = asyncio.create_task(holder()) t_waiter = asyncio.create_task(waiter()) await asyncio.wait_for(t_waiter, timeout=2.0) t_holder.cancel() - try: + with pytest.raises(asyncio.CancelledError): await t_holder - except asyncio.CancelledError: - pass @pytest.mark.asyncio From f5047be667853516c59ad35daacaafd1f8874298 Mon Sep 17 00:00:00 2001 From: Vitor Hugo Date: Tue, 3 Mar 2026 08:21:55 -0300 Subject: [PATCH 3/5] fix: more Sonar queue fixes (S3776, S2737, S108) - backends: extract _remove_item_and_raise_timeout to lower cognitive complexity - test_queue: drop redundant cancel/except in fifo test; use pytest.raises for timeout --- genesis/queue/backends.py | 29 +++++++++++++++++------------ tests/test_queue.py | 11 ++--------- 2 files changed, 19 insertions(+), 21 deletions(-) diff --git a/genesis/queue/backends.py b/genesis/queue/backends.py index f2fb6f0..5fbffe5 100644 --- a/genesis/queue/backends.py +++ b/genesis/queue/backends.py @@ -84,6 +84,21 @@ async def enqueue(self, queue_id: str, item_id: str) -> None: state.deque.append(item_id) state.condition.notify_all() + def _remove_item_and_raise_timeout( + self, + state: _QueueState, + item_id: str, + ) -> None: + """ + Remove item_id from deque if present, notify waiters and raise QueueTimeoutError. + """ + try: + state.deque.remove(item_id) + except ValueError: + pass + state.condition.notify_all() + raise QueueTimeoutError() + async def _wait_until_at_head( self, state: _QueueState, @@ -100,24 +115,14 @@ async def _wait_until_at_head( if deadline is not None: remaining = deadline - time.monotonic() if remaining <= 0: - try: - state.deque.remove(item_id) - except ValueError: - pass - state.condition.notify_all() - raise QueueTimeoutError() + self._remove_item_and_raise_timeout(state, item_id) try: if remaining is not None: await asyncio.wait_for(state.condition.wait(), timeout=remaining) else: await state.condition.wait() except asyncio.TimeoutError: - try: - state.deque.remove(item_id) - except ValueError: - pass - state.condition.notify_all() - raise QueueTimeoutError() + self._remove_item_and_raise_timeout(state, item_id) async def _acquire_semaphore( self, state: _QueueState, deadline: Optional[float] diff --git a/tests/test_queue.py b/tests/test_queue.py index 3809e81..4267f75 100644 --- a/tests/test_queue.py +++ b/tests/test_queue.py @@ -43,11 +43,7 @@ async def first_releases(): t1 = asyncio.create_task(first_releases()) await asyncio.wait_for(entered.wait(), timeout=2.0) await asyncio.wait_for(t2, timeout=2.0) - t1.cancel() - try: - await t1 - except asyncio.CancelledError: - raise + await t1 @pytest.mark.asyncio @@ -163,12 +159,9 @@ async def holder(): async def waiter(): await entered.wait() - try: + with pytest.raises(QueueTimeoutError): async with queue.slot("q", item_id="second", timeout=0.2): pass - pytest.fail("expected QueueTimeoutError") - except QueueTimeoutError: - pass # expected t_holder = asyncio.create_task(holder()) t_waiter = asyncio.create_task(waiter()) From 291598b686931397f5ee2f05ee1eceb406800146 Mon Sep 17 00:00:00 2001 From: Vitor Hugo Date: Tue, 3 Mar 2026 08:29:54 -0300 Subject: [PATCH 4/5] chore: cleanup sonar integration From 8638799915e4c928126c9482711225791f1cedd7 Mon Sep 17 00:00:00 2001 From: Vitor Hugo Date: Sat, 21 Mar 2026 00:52:16 -0300 Subject: [PATCH 5/5] fix: semaphore race condition, redis client leak, cancelled error handling, cli docstring + redis tests --- genesis/cli/__init__.py | 15 ++--- genesis/queue/backends.py | 13 +++- genesis/queue/redis_backend.py | 15 ++++- tests/test_queue.py | 119 +++++++++++++++++++++++++++++++++ 4 files changed, 150 insertions(+), 12 deletions(-) diff --git a/genesis/cli/__init__.py b/genesis/cli/__init__.py index 001ddc6..c84d3a9 100644 --- a/genesis/cli/__init__.py +++ b/genesis/cli/__init__.py @@ -54,6 +54,13 @@ def callback( typer.Option("--json", help="Output logs in JSON format."), ] = False, ) -> None: + """ + Genesis - [blue]FreeSWITCH Event Socket protocol[/blue] implementation with [bold]asyncio[/bold]. + + Run yours freeswitch apps without any external dependencies. + + ℹ️ Read more in the docs: [link]https://otoru.github.io/Genesis/[/link]. + """ reconfigure_logger(json) try: @@ -79,11 +86,3 @@ def callback( trace.set_tracer_provider(tracer_provider) except Exception as e: logger.warning(f"Failed to setup OpenTelemetry: {e}") - - """ - Genesis - [blue]FreeSWITCH Event Socket protocol[/blue] implementation with [bold]asyncio[/bold]. - - Run yours freeswitch apps without any external dependencies. - - ℹ️ Read more in the docs: [link]https://otoru.github.io/Genesis/[/link]. - """ diff --git a/genesis/queue/backends.py b/genesis/queue/backends.py index 5fbffe5..e172bb6 100644 --- a/genesis/queue/backends.py +++ b/genesis/queue/backends.py @@ -18,13 +18,14 @@ class _QueueState: """Per-queue state: FIFO deque, lock, condition; semaphore created on first acquire.""" - __slots__ = ("deque", "lock", "condition", "semaphore") + __slots__ = ("deque", "lock", "condition", "semaphore", "max_concurrent") def __init__(self) -> None: self.deque: deque[str] = deque() self.lock = asyncio.Lock() self.condition = asyncio.Condition(self.lock) self.semaphore: asyncio.Semaphore | None = None + self.max_concurrent: int | None = None @runtime_checkable @@ -153,10 +154,16 @@ async def wait_and_acquire( If timeout (seconds) expires, remove item from queue and raise QueueTimeoutError. """ state = self._get_or_create_state(queue_id) - if state.semaphore is None: - state.semaphore = asyncio.Semaphore(max_concurrent) deadline = time.monotonic() + timeout if timeout is not None else None async with state.lock: + if state.semaphore is None: + state.semaphore = asyncio.Semaphore(max_concurrent) + state.max_concurrent = max_concurrent + elif state.max_concurrent != max_concurrent: + raise ValueError( + f"Queue '{queue_id}' was initialized with max_concurrent={state.max_concurrent}, " + f"got {max_concurrent}." + ) await self._wait_until_at_head(state, item_id, deadline) await self._acquire_semaphore(state, deadline) diff --git a/genesis/queue/redis_backend.py b/genesis/queue/redis_backend.py index 3088836..30d7896 100644 --- a/genesis/queue/redis_backend.py +++ b/genesis/queue/redis_backend.py @@ -14,7 +14,6 @@ import redis.asyncio as redis from genesis.exceptions import QueueTimeoutError -from genesis.queue.backends import QueueBackend # Lua: try to acquire if we're at head and slots available. Keys: waiting_list, in_use_key. Args: item_id, max_concurrent. SCRIPT_ACQUIRE = """ @@ -105,6 +104,8 @@ async def _wait_for_release_signal(self, client: Any, channel: str) -> None: if msg.get("type") == "message": return await asyncio.sleep(0) + except asyncio.CancelledError: + raise finally: await sub.unsubscribe(channel) await sub.close() @@ -156,3 +157,15 @@ async def release(self, queue_id: str) -> None: channel = self._channel(queue_id) await client.decr(in_use_key) await client.publish(channel, "1") + + async def close(self) -> None: + """Close the Redis connection.""" + if self._client is not None: + await self._client.aclose() + self._client = None + + async def __aenter__(self) -> "RedisBackend": + return self + + async def __aexit__(self, *args: object) -> None: + await self.close() diff --git a/tests/test_queue.py b/tests/test_queue.py index 4267f75..8ece9ff 100644 --- a/tests/test_queue.py +++ b/tests/test_queue.py @@ -1,10 +1,12 @@ """Tests for genesis queue (slot, semaphore, backends).""" import asyncio +from unittest.mock import AsyncMock, MagicMock import pytest from genesis.queue import InMemoryBackend, Queue, QueueSemaphore, QueueTimeoutError +from genesis.queue.redis_backend import RedisBackend @pytest.mark.asyncio @@ -227,3 +229,120 @@ async def use_slot(): t = asyncio.create_task(use_slot()) await asyncio.wait_for(entered.wait(), timeout=2.0) await asyncio.wait_for(t, timeout=2.0) + + +@pytest.mark.asyncio +async def test_in_memory_backend_max_concurrent_mismatch_raises(): + """Backend: different max_concurrent for the same queue_id raises ValueError.""" + backend = InMemoryBackend() + await backend.enqueue("q1", "item1") + await backend.wait_and_acquire("q1", "item1", max_concurrent=2) + await backend.release("q1") + + await backend.enqueue("q1", "item2") + with pytest.raises(ValueError, match="max_concurrent"): + await backend.wait_and_acquire("q1", "item2", max_concurrent=5) + + +# --------------------------------------------------------------------------- +# RedisBackend (mock-based) +# --------------------------------------------------------------------------- + + +def _make_redis_mock(script_result: int = 1) -> MagicMock: + """Return a mock redis client where the Lua script returns script_result.""" + mock_script = AsyncMock(return_value=script_result) + client = AsyncMock() + client.register_script = MagicMock(return_value=mock_script) + return client + + +@pytest.mark.asyncio +async def test_redis_backend_enqueue(): + """RedisBackend.enqueue pushes to the correct Redis key.""" + client = _make_redis_mock() + backend = RedisBackend() + backend._client = client + + await backend.enqueue("q1", "item1") + client.rpush.assert_called_once_with("genesis:queue:q1:waiting", "item1") + + +@pytest.mark.asyncio +async def test_redis_backend_wait_and_acquire_success(): + """RedisBackend: acquire succeeds immediately when Lua script returns 1.""" + client = _make_redis_mock(script_result=1) + backend = RedisBackend() + backend._client = client + + await backend.wait_and_acquire("q1", "item1", max_concurrent=1) + client.register_script.assert_called_once() + + +@pytest.mark.asyncio +async def test_redis_backend_release(): + """RedisBackend.release decrements counter and publishes to release channel.""" + client = _make_redis_mock() + backend = RedisBackend() + backend._client = client + + await backend.release("q1") + client.decr.assert_called_once_with("genesis:queue:q1:in_use") + client.publish.assert_called_once_with("genesis:queue:q1:release", "1") + + +@pytest.mark.asyncio +async def test_redis_backend_wait_and_acquire_timeout(): + """RedisBackend: raises QueueTimeoutError when deadline passes without acquiring.""" + client = _make_redis_mock(script_result=0) # never acquires + + # pubsub() is synchronous; listen() must hang so wait_for can time it out + async def _never_yields(): + await asyncio.Future() + yield # pragma: no cover + + pubsub = MagicMock() + pubsub.subscribe = AsyncMock() + pubsub.unsubscribe = AsyncMock() + pubsub.close = AsyncMock() + pubsub.listen = _never_yields + client.pubsub = MagicMock(return_value=pubsub) + + backend = RedisBackend() + backend._client = client + + with pytest.raises(QueueTimeoutError): + await backend.wait_and_acquire("q1", "item1", max_concurrent=1, timeout=0.1) + + +@pytest.mark.asyncio +async def test_redis_backend_close(): + """RedisBackend.close() calls aclose on the client and clears the reference.""" + client = AsyncMock() + backend = RedisBackend() + backend._client = client + + await backend.close() + client.aclose.assert_called_once() + assert backend._client is None + + +@pytest.mark.asyncio +async def test_redis_backend_close_when_no_client(): + """RedisBackend.close() is a no-op when no client exists.""" + backend = RedisBackend() + await backend.close() # must not raise + + +@pytest.mark.asyncio +async def test_redis_backend_context_manager(): + """RedisBackend used as async context manager closes on exit.""" + client = AsyncMock() + backend = RedisBackend() + backend._client = client + + async with backend: + pass + + client.aclose.assert_called_once() + assert backend._client is None