diff --git a/.github/.codespellignorewords b/.github/.codespellignorewords index 96556582..9131059e 100644 --- a/.github/.codespellignorewords +++ b/.github/.codespellignorewords @@ -14,4 +14,5 @@ YML SDK S3 Kusto -NotIn \ No newline at end of file +NotIn +nin \ No newline at end of file diff --git a/docs/docs/exosphere/state-manager-setup.md b/docs/docs/exosphere/state-manager-setup.md index 4799e04b..71436a8c 100644 --- a/docs/docs/exosphere/state-manager-setup.md +++ b/docs/docs/exosphere/state-manager-setup.md @@ -29,6 +29,8 @@ The Exosphere state manager is the core backend service that handles workflow ex -e MONGO_DATABASE_NAME="your-database-name" \ -e STATE_MANAGER_SECRET="your-secret-key" \ -e SECRETS_ENCRYPTION_KEY="your-base64-encoded-encryption-key" \ + -e TRIGGER_WORKERS="1" \ + -e TRIGGER_RETENTION_HOURS="720" \ ghcr.io/exospherehost/exosphere-state-manager:latest ``` @@ -84,6 +86,8 @@ The Exosphere state manager is the core backend service that handles workflow ex export MONGO_DATABASE_NAME="your-database-name" export STATE_MANAGER_SECRET="your-secret-key" export SECRETS_ENCRYPTION_KEY="your-base64-encoded-encryption-key" + export TRIGGER_WORKERS="1" + export TRIGGER_RETENTION_HOURS="720" ``` 4. **Run the state manager**: @@ -151,6 +155,8 @@ The state manager uri and key would be configured accordingly while setting up n - MONGO_DATABASE_NAME=${MONGO_DATABASE_NAME} - STATE_MANAGER_SECRET=${STATE_MANAGER_SECRET} - SECRETS_ENCRYPTION_KEY=${SECRETS_ENCRYPTION_KEY} + - TRIGGER_WORKERS=${TRIGGER_WORKERS:-1} + - TRIGGER_RETENTION_HOURS=${TRIGGER_RETENTION_HOURS:-720} deploy: replicas: 3 update_config: @@ -182,6 +188,8 @@ The state manager uri and key would be configured accordingly while setting up n data: MONGO_URI: "your-mongodb-connection-string" MONGO_DATABASE_NAME: "your-database-name" + TRIGGER_WORKERS: "1" + TRIGGER_RETENTION_HOURS: "720" LOG_LEVEL: "INFO" ``` @@ -212,9 +220,11 @@ The state manager uri and key would be configured accordingly while setting up n | Variable | Description | Required | Default | |----------|-------------|----------|---------| | `MONGO_URI` | MongoDB connection string | Yes | - | -| `MONGO_DATABASE_NAME` | Database name | Yes | `exosphere` | +| `MONGO_DATABASE_NAME` | Database name | Yes | `exosphere-state-manager` | | `STATE_MANAGER_SECRET` | Secret API key for authentication | Yes | - | | `SECRETS_ENCRYPTION_KEY` | Base64-encoded key for data encryption | Yes | - | +| `TRIGGER_WORKERS` | Number of workers to run the trigger cron | No | `1` | +| `TRIGGER_RETENTION_HOURS` | Number of hours to retain completed/failed triggers before cleanup | No | `720` (30 days) | | `LOG_LEVEL` | Logging level (DEBUG, INFO, WARNING, ERROR) | No | `INFO` | ## Monitoring and Health Checks diff --git a/state-manager/app/config/settings.py b/state-manager/app/config/settings.py index 5d75fc2b..75cea36d 100644 --- a/state-manager/app/config/settings.py +++ b/state-manager/app/config/settings.py @@ -6,13 +6,14 @@ class Settings(BaseModel): """Application settings loaded from environment variables.""" - + # MongoDB Configuration mongo_uri: str = Field(..., description="MongoDB connection URI" ) mongo_database_name: str = Field(default="exosphere-state-manager", description="MongoDB database name") state_manager_secret: str = Field(..., description="Secret key for API authentication") secrets_encryption_key: str = Field(..., description="Key for encrypting secrets") trigger_workers: int = Field(default=1, description="Number of workers to run the trigger cron") + trigger_retention_hours: int = Field(default=720, description="Number of hours to retain completed/failed triggers before cleanup") @classmethod def from_env(cls) -> "Settings": @@ -21,7 +22,8 @@ def from_env(cls) -> "Settings": mongo_database_name=os.getenv("MONGO_DATABASE_NAME", "exosphere-state-manager"), # type: ignore state_manager_secret=os.getenv("STATE_MANAGER_SECRET"), # type: ignore secrets_encryption_key=os.getenv("SECRETS_ENCRYPTION_KEY"), # type: ignore - trigger_workers=int(os.getenv("TRIGGER_WORKERS", 1)) # type: ignore + trigger_workers=int(os.getenv("TRIGGER_WORKERS", 1)), # type: ignore + trigger_retention_hours=int(os.getenv("TRIGGER_RETENTION_HOURS", 720)) # type: ignore ) diff --git a/state-manager/app/main.py b/state-manager/app/main.py index b9e5d6ad..0486a0c4 100644 --- a/state-manager/app/main.py +++ b/state-manager/app/main.py @@ -38,6 +38,9 @@ from apscheduler.schedulers.asyncio import AsyncIOScheduler from apscheduler.triggers.cron import CronTrigger from .tasks.trigger_cron import trigger_cron + +# init tasks +from .tasks.init_tasks import init_tasks # Define models list DOCUMENT_MODELS = [State, GraphTemplate, RegisteredNode, Store, Run, DatabaseTriggers] @@ -59,6 +62,10 @@ async def lifespan(app: FastAPI): await init_beanie(db, document_models=DOCUMENT_MODELS) logger.info("beanie dbs initialized") + # performing init tasks + await init_tasks() + logger.info("init tasks completed") + # initialize secret if not settings.state_manager_secret: raise ValueError("STATE_MANAGER_SECRET is not set") diff --git a/state-manager/app/models/db/trigger.py b/state-manager/app/models/db/trigger.py index 26fe199f..a416193e 100644 --- a/state-manager/app/models/db/trigger.py +++ b/state-manager/app/models/db/trigger.py @@ -13,8 +13,9 @@ class DatabaseTriggers(Document): namespace: str = Field(..., description="Namespace of the graph") trigger_time: datetime = Field(..., description="Trigger time of the trigger") trigger_status: TriggerStatusEnum = Field(..., description="Status of the trigger") + expires_at: Optional[datetime] = Field(default=None, description="Expiration time for automatic cleanup of completed triggers") - class Settings: + class Settings: indexes = [ IndexModel( [ @@ -32,5 +33,20 @@ class Settings: ], name="uniq_graph_type_expr_time", unique=True + ), + IndexModel( + [ + ("expires_at", 1), + ], + name="ttl_expires_at", + expireAfterSeconds=0, # Delete immediately when expires_at is reached + partialFilterExpression={ + "trigger_status": { + "$in": [ + TriggerStatusEnum.TRIGGERED, + TriggerStatusEnum.FAILED + ] + } + } ) ] diff --git a/state-manager/app/tasks/init_tasks.py b/state-manager/app/tasks/init_tasks.py new file mode 100644 index 00000000..690bb5a8 --- /dev/null +++ b/state-manager/app/tasks/init_tasks.py @@ -0,0 +1,20 @@ +# tasks to run when the server starts +from app.models.db.trigger import DatabaseTriggers +from app.models.trigger_models import TriggerStatusEnum +import asyncio + +async def delete_old_triggers(): + await DatabaseTriggers.get_pymongo_collection().delete_many( + { + "trigger_status": { + "$in": [TriggerStatusEnum.TRIGGERED, TriggerStatusEnum.FAILED] + }, + "expires_at": None + } + ) + +async def init_tasks(): + await asyncio.gather( + *[ + delete_old_triggers() + ]) \ No newline at end of file diff --git a/state-manager/app/tasks/trigger_cron.py b/state-manager/app/tasks/trigger_cron.py index ab0771bb..3fca36ea 100644 --- a/state-manager/app/tasks/trigger_cron.py +++ b/state-manager/app/tasks/trigger_cron.py @@ -1,4 +1,4 @@ -from datetime import datetime +from datetime import datetime, timedelta, timezone from uuid import uuid4 from app.models.db.trigger import DatabaseTriggers from app.models.trigger_models import TriggerStatusEnum, TriggerTypeEnum @@ -34,18 +34,24 @@ async def call_trigger_graph(trigger: DatabaseTriggers): x_exosphere_request_id=str(uuid4()) ) -async def mark_as_failed(trigger: DatabaseTriggers): +async def mark_as_failed(trigger: DatabaseTriggers, retention_hours: int): + expires_at = datetime.now(timezone.utc) + timedelta(hours=retention_hours) + await DatabaseTriggers.get_pymongo_collection().update_one( {"_id": trigger.id}, - {"$set": {"trigger_status": TriggerStatusEnum.FAILED}} + {"$set": { + "trigger_status": TriggerStatusEnum.FAILED, + "expires_at": expires_at + }} ) -async def create_next_triggers(trigger: DatabaseTriggers, cron_time: datetime): +async def create_next_triggers(trigger: DatabaseTriggers, cron_time: datetime, retention_hours: int): assert trigger.expression is not None iter = croniter.croniter(trigger.expression, trigger.trigger_time) while True: next_trigger_time = iter.get_next(datetime) + expires_at = next_trigger_time + timedelta(hours=retention_hours) try: await DatabaseTriggers( @@ -54,7 +60,8 @@ async def create_next_triggers(trigger: DatabaseTriggers, cron_time: datetime): graph_name=trigger.graph_name, namespace=trigger.namespace, trigger_time=next_trigger_time, - trigger_status=TriggerStatusEnum.PENDING + trigger_status=TriggerStatusEnum.PENDING, + expires_at=expires_at ).insert() except DuplicateKeyError: logger.error(f"Duplicate trigger found for expression {trigger.expression}") @@ -65,24 +72,30 @@ async def create_next_triggers(trigger: DatabaseTriggers, cron_time: datetime): if next_trigger_time > cron_time: break -async def mark_as_triggered(trigger: DatabaseTriggers): +async def mark_as_triggered(trigger: DatabaseTriggers, retention_hours: int): + expires_at = datetime.now(timezone.utc) + timedelta(hours=retention_hours) + await DatabaseTriggers.get_pymongo_collection().update_one( {"_id": trigger.id}, - {"$set": {"trigger_status": TriggerStatusEnum.TRIGGERED}} + {"$set": { + "trigger_status": TriggerStatusEnum.TRIGGERED, + "expires_at": expires_at + }} ) -async def handle_trigger(cron_time: datetime): +async def handle_trigger(cron_time: datetime, retention_hours: int): while(trigger:= await get_due_triggers(cron_time)): try: await call_trigger_graph(trigger) - await mark_as_triggered(trigger) + await mark_as_triggered(trigger, retention_hours) except Exception as e: - await mark_as_failed(trigger) + await mark_as_failed(trigger, retention_hours) logger.error(f"Error calling trigger graph: {e}") finally: - await create_next_triggers(trigger, cron_time) + await create_next_triggers(trigger, cron_time, retention_hours) async def trigger_cron(): cron_time = datetime.now() + settings = get_settings() logger.info(f"starting trigger_cron: {cron_time}") - await asyncio.gather(*[handle_trigger(cron_time) for _ in range(get_settings().trigger_workers)]) \ No newline at end of file + await asyncio.gather(*[handle_trigger(cron_time, settings.trigger_retention_hours) for _ in range(settings.trigger_workers)]) \ No newline at end of file diff --git a/state-manager/app/tasks/verify_graph.py b/state-manager/app/tasks/verify_graph.py index 7fbb021d..3ac7ee36 100644 --- a/state-manager/app/tasks/verify_graph.py +++ b/state-manager/app/tasks/verify_graph.py @@ -10,9 +10,13 @@ from app.singletons.logs_manager import LogsManager from app.models.trigger_models import TriggerStatusEnum, TriggerTypeEnum from app.models.db.trigger import DatabaseTriggers +from app.config.settings import get_settings +from datetime import timedelta logger = LogsManager().get_logger() +settings = get_settings() + async def verify_node_exists(graph_template: GraphTemplate, registered_nodes: list[RegisteredNode]) -> list[str]: errors = [] template_nodes_set = set([(node.node_name, node.namespace) for node in graph_template.nodes]) @@ -110,6 +114,7 @@ async def create_crons(graph_template: GraphTemplate): iter = croniter.croniter(expression, current_time) next_trigger_time = iter.get_next(datetime) + expires_at = next_trigger_time + timedelta(hours=settings.trigger_retention_hours) new_db_triggers.append( DatabaseTriggers( @@ -118,7 +123,8 @@ async def create_crons(graph_template: GraphTemplate): graph_name=graph_template.name, namespace=graph_template.namespace, trigger_status=TriggerStatusEnum.PENDING, - trigger_time=next_trigger_time + trigger_time=next_trigger_time, + expires_at=expires_at ) ) diff --git a/state-manager/tests/unit/tasks/test_trigger_cron.py b/state-manager/tests/unit/tasks/test_trigger_cron.py new file mode 100644 index 00000000..7179e08a --- /dev/null +++ b/state-manager/tests/unit/tasks/test_trigger_cron.py @@ -0,0 +1,290 @@ +""" +Tests for trigger TTL (Time To Live) expiration logic. +Verifies that completed/failed triggers are properly marked for cleanup. +""" +import pytest +from unittest.mock import MagicMock, AsyncMock, patch +from datetime import datetime, timedelta, timezone +from pymongo.errors import DuplicateKeyError + +from app.tasks.trigger_cron import ( + mark_as_triggered, + mark_as_failed, + get_due_triggers, + call_trigger_graph, + create_next_triggers, + handle_trigger, + trigger_cron +) +from app.models.db.trigger import DatabaseTriggers +from app.models.trigger_models import TriggerStatusEnum + + +@pytest.mark.asyncio +@pytest.mark.parametrize("mark_function,expected_status", [ + (mark_as_triggered, TriggerStatusEnum.TRIGGERED), + (mark_as_failed, TriggerStatusEnum.FAILED), +]) +async def test_mark_trigger_sets_expires_at(mark_function, expected_status): + """Test that marking a trigger sets the expires_at field correctly""" + # Create a mock trigger + trigger = MagicMock(spec=DatabaseTriggers) + trigger.id = "test_trigger_id" + + # Mock the database update + with patch.object(DatabaseTriggers, 'get_pymongo_collection') as mock_collection: + mock_collection.return_value.update_one = AsyncMock() + + # Call the function with retention_hours parameter + await mark_function(trigger, retention_hours=24) + + # Verify update_one was called + assert mock_collection.return_value.update_one.called + call_args = mock_collection.return_value.update_one.call_args + + # Verify the filter (first argument) + assert call_args[0][0] == {"_id": trigger.id} + + # Verify the update includes both status and expires_at + update_dict = call_args[0][1]["$set"] + assert update_dict["trigger_status"] == expected_status + assert "expires_at" in update_dict + + # Verify expires_at is approximately 24 hours from now (UTC) + expires_at = update_dict["expires_at"] + expected_expiry = datetime.now(timezone.utc) + timedelta(hours=24) + time_diff = abs((expires_at - expected_expiry).total_seconds()) + assert time_diff < 2 # Within 2 seconds tolerance + + # Verify expires_at is timezone-aware UTC + assert expires_at.tzinfo is not None + assert expires_at.tzinfo == timezone.utc + + +@pytest.mark.asyncio +@pytest.mark.parametrize("mark_function,retention_hours", [ + (mark_as_triggered, 12), + (mark_as_triggered, 24), + (mark_as_triggered, 48), + (mark_as_failed, 12), + (mark_as_failed, 24), + (mark_as_failed, 48), +]) +async def test_mark_trigger_uses_custom_retention_period(mark_function, retention_hours): + """Test that custom retention period is respected across all mark functions""" + # Create a mock trigger + trigger = MagicMock(spec=DatabaseTriggers) + trigger.id = "test_trigger_id" + + # Mock the database update + with patch.object(DatabaseTriggers, 'get_pymongo_collection') as mock_collection: + mock_collection.return_value.update_one = AsyncMock() + + # Call the function with custom retention period + await mark_function(trigger, retention_hours=retention_hours) + + # Verify expires_at is approximately retention_hours from now (UTC) + call_args = mock_collection.return_value.update_one.call_args + update_dict = call_args[0][1]["$set"] + expires_at = update_dict["expires_at"] + expected_expiry = datetime.now(timezone.utc) + timedelta(hours=retention_hours) + time_diff = abs((expires_at - expected_expiry).total_seconds()) + assert time_diff < 2 # Within 2 seconds tolerance + + # Verify expires_at is timezone-aware UTC + assert expires_at.tzinfo is not None + assert expires_at.tzinfo == timezone.utc + +@pytest.mark.asyncio +async def test_get_due_triggers_returns_trigger(): + """Test get_due_triggers returns a PENDING trigger""" + cron_time = datetime.now(timezone.utc) + + with patch.object(DatabaseTriggers, 'get_pymongo_collection') as mock_collection: + with patch.object(DatabaseTriggers, '__init__', return_value=None): + mock_collection.return_value.find_one_and_update = AsyncMock(return_value={"_id": "trigger_id"}) + + result = await get_due_triggers(cron_time) + + # Verify the query + call_args = mock_collection.return_value.find_one_and_update.call_args + assert call_args[0][0] == { + "trigger_time": {"$lte": cron_time}, + "trigger_status": TriggerStatusEnum.PENDING + } + assert call_args[0][1] == {"$set": {"trigger_status": TriggerStatusEnum.TRIGGERING}} + + # Verify result is not None (data was returned) + assert result is not None + + +@pytest.mark.asyncio +async def test_get_due_triggers_returns_none_when_no_triggers(): + """Test get_due_triggers returns None when no triggers are due""" + cron_time = datetime.now(timezone.utc) + + with patch.object(DatabaseTriggers, 'get_pymongo_collection') as mock_collection: + mock_collection.return_value.find_one_and_update = AsyncMock(return_value=None) + + result = await get_due_triggers(cron_time) + + assert result is None + + +@pytest.mark.asyncio +async def test_call_trigger_graph(): + """Test call_trigger_graph calls trigger_graph controller""" + trigger = MagicMock(spec=DatabaseTriggers) + trigger.namespace = "test_ns" + trigger.graph_name = "test_graph" + + with patch('app.tasks.trigger_cron.trigger_graph') as mock_trigger_graph: + mock_trigger_graph.return_value = AsyncMock() + + await call_trigger_graph(trigger) + + # Verify trigger_graph was called with correct parameters + mock_trigger_graph.assert_called_once() + call_kwargs = mock_trigger_graph.call_args.kwargs + assert call_kwargs['namespace_name'] == "test_ns" + assert call_kwargs['graph_name'] == "test_graph" + assert 'body' in call_kwargs + assert 'x_exosphere_request_id' in call_kwargs + + +@pytest.mark.asyncio +async def test_create_next_triggers_creates_future_trigger(): + """Test create_next_triggers creates next trigger in the future""" + cron_time = datetime.now(timezone.utc) + trigger = MagicMock(spec=DatabaseTriggers) + trigger.expression = "0 9 * * *" + trigger.trigger_time = cron_time - timedelta(days=1) + trigger.graph_name = "test_graph" + trigger.namespace = "test_ns" + + with patch('app.tasks.trigger_cron.DatabaseTriggers') as MockDatabaseTriggers: + mock_instance = MagicMock() + mock_instance.insert = AsyncMock() + MockDatabaseTriggers.return_value = mock_instance + + await create_next_triggers(trigger, cron_time, 24) + + # Verify at least one trigger was created + assert MockDatabaseTriggers.called + assert mock_instance.insert.called + + +@pytest.mark.asyncio +async def test_create_next_triggers_handles_duplicate_key_error(): + """Test create_next_triggers handles DuplicateKeyError gracefully""" + cron_time = datetime.now(timezone.utc) + trigger = MagicMock(spec=DatabaseTriggers) + trigger.expression = "0 9 * * *" + trigger.trigger_time = cron_time - timedelta(days=1) + trigger.graph_name = "test_graph" + trigger.namespace = "test_ns" + + with patch('app.tasks.trigger_cron.DatabaseTriggers') as MockDatabaseTriggers: + mock_instance = MagicMock() + mock_instance.insert = AsyncMock(side_effect=DuplicateKeyError("duplicate")) + MockDatabaseTriggers.return_value = mock_instance + + # Should not raise exception + await create_next_triggers(trigger, cron_time, 24) + + +@pytest.mark.asyncio +async def test_create_next_triggers_raises_on_other_exceptions(): + """Test create_next_triggers raises on non-DuplicateKeyError exceptions""" + cron_time = datetime.now(timezone.utc) + trigger = MagicMock(spec=DatabaseTriggers) + trigger.expression = "0 9 * * *" + trigger.trigger_time = cron_time - timedelta(days=1) + trigger.graph_name = "test_graph" + trigger.namespace = "test_ns" + + with patch('app.tasks.trigger_cron.DatabaseTriggers') as MockDatabaseTriggers: + mock_instance = MagicMock() + mock_instance.insert = AsyncMock(side_effect=ValueError("test error")) + MockDatabaseTriggers.return_value = mock_instance + + with pytest.raises(ValueError, match="test error"): + await create_next_triggers(trigger, cron_time, 24) + + +@pytest.mark.asyncio +async def test_handle_trigger_success_path(): + """Test handle_trigger processes trigger successfully""" + cron_time = datetime.now(timezone.utc) + trigger = MagicMock(spec=DatabaseTriggers) + trigger.id = "trigger_id" + trigger.expression = "0 9 * * *" + trigger.trigger_time = cron_time - timedelta(days=1) + trigger.graph_name = "test_graph" + trigger.namespace = "test_ns" + + with patch('app.tasks.trigger_cron.get_due_triggers') as mock_get_due: + with patch('app.tasks.trigger_cron.call_trigger_graph') as mock_call: + with patch('app.tasks.trigger_cron.mark_as_triggered') as mock_mark_triggered: + with patch('app.tasks.trigger_cron.create_next_triggers') as mock_create_next: + # Return trigger once, then None to stop loop + mock_get_due.side_effect = [trigger, None] + mock_call.return_value = AsyncMock() + mock_mark_triggered.return_value = AsyncMock() + mock_create_next.return_value = AsyncMock() + + await handle_trigger(cron_time, retention_hours=24) + + # Verify all functions were called + assert mock_call.called + assert mock_mark_triggered.called + assert mock_create_next.called + + +@pytest.mark.asyncio +async def test_handle_trigger_failure_path(): + """Test handle_trigger marks trigger as failed on exception""" + cron_time = datetime.now(timezone.utc) + trigger = MagicMock(spec=DatabaseTriggers) + trigger.id = "trigger_id" + trigger.expression = "0 9 * * *" + trigger.trigger_time = cron_time - timedelta(days=1) + trigger.graph_name = "test_graph" + trigger.namespace = "test_ns" + + with patch('app.tasks.trigger_cron.get_due_triggers') as mock_get_due: + with patch('app.tasks.trigger_cron.call_trigger_graph') as mock_call: + with patch('app.tasks.trigger_cron.mark_as_failed') as mock_mark_failed: + with patch('app.tasks.trigger_cron.create_next_triggers') as mock_create_next: + # Return trigger once, then None + mock_get_due.side_effect = [trigger, None] + mock_call.side_effect = Exception("Trigger failed") + mock_mark_failed.return_value = AsyncMock() + mock_create_next.return_value = AsyncMock() + + await handle_trigger(cron_time, retention_hours=24) + + # Verify mark_as_failed was called + mock_mark_failed.assert_called_once_with(trigger, 24) + # Verify create_next_triggers was still called (finally block) + assert mock_create_next.called + + +@pytest.mark.asyncio +async def test_trigger_cron(): + """Test trigger_cron orchestrates handle_trigger with settings""" + with patch('app.tasks.trigger_cron.get_settings') as mock_get_settings: + with patch('app.tasks.trigger_cron.handle_trigger') as mock_handle: + mock_settings = MagicMock() + mock_settings.trigger_retention_hours = 24 + mock_settings.trigger_workers = 2 + mock_get_settings.return_value = mock_settings + mock_handle.return_value = AsyncMock() + + await trigger_cron() + + # Verify handle_trigger was called correct number of times + assert mock_handle.call_count == 2 + # Verify retention_hours parameter was passed + for call in mock_handle.call_args_list: + assert call[0][1] == 24 # retention_hours diff --git a/state-manager/tests/unit/test_main.py b/state-manager/tests/unit/test_main.py index ba6b38e4..c5b5ae95 100644 --- a/state-manager/tests/unit/test_main.py +++ b/state-manager/tests/unit/test_main.py @@ -116,7 +116,8 @@ class TestLifespan: @patch('app.main.init_beanie', new_callable=AsyncMock) @patch('app.main.AsyncMongoClient') @patch('app.main.check_database_health', new_callable=AsyncMock) - async def test_lifespan_startup_success(self, mock_health_check, mock_mongo_client, mock_init_beanie, mock_logs_manager): + @patch('app.main.init_tasks', new_callable=AsyncMock) + async def test_lifespan_startup_success(self, mock_init_tasks, mock_health_check, mock_mongo_client, mock_init_beanie, mock_logs_manager): """Test successful lifespan startup""" # Setup mocks mock_logger = MagicMock() @@ -154,7 +155,8 @@ async def test_lifespan_startup_success(self, mock_health_check, mock_mongo_clie @patch('app.main.init_beanie', new_callable=AsyncMock) @patch('app.main.AsyncMongoClient') @patch('app.main.LogsManager') - async def test_lifespan_empty_secret_raises_error(self, mock_logs_manager, mock_mongo_client, mock_init_beanie): + @patch('app.main.init_tasks', new_callable=AsyncMock) + async def test_lifespan_empty_secret_raises_error(self, mock_init_tasks, mock_logs_manager, mock_mongo_client, mock_init_beanie): """Test that empty STATE_MANAGER_SECRET raises ValueError""" mock_logger = MagicMock() mock_logs_manager.return_value.get_logger.return_value = mock_logger @@ -181,7 +183,8 @@ async def test_lifespan_empty_secret_raises_error(self, mock_logs_manager, mock_ @patch('app.main.check_database_health', new_callable=AsyncMock) @patch('app.main.LogsManager') @patch('app.main.scheduler') - async def test_lifespan_init_beanie_with_correct_models(self, mock_scheduler, mock_logs_manager, mock_health_check, mock_mongo_client, mock_init_beanie): + @patch('app.main.init_tasks', new_callable=AsyncMock) + async def test_lifespan_init_beanie_with_correct_models(self, mock_init_tasks, mock_scheduler, mock_logs_manager, mock_health_check, mock_mongo_client, mock_init_beanie): """Test that init_beanie is called with correct document models""" mock_logger = MagicMock() mock_logs_manager.return_value.get_logger.return_value = mock_logger