diff --git a/spp_api_v2_simulation/__init__.py b/spp_api_v2_simulation/__init__.py new file mode 100644 index 00000000..e997213a --- /dev/null +++ b/spp_api_v2_simulation/__init__.py @@ -0,0 +1,5 @@ +# Part of OpenSPP. See LICENSE file for full copyright and licensing details. +from . import models +from . import routers +from . import schemas +from . import services diff --git a/spp_api_v2_simulation/__manifest__.py b/spp_api_v2_simulation/__manifest__.py new file mode 100644 index 00000000..d8d7f10e --- /dev/null +++ b/spp_api_v2_simulation/__manifest__.py @@ -0,0 +1,29 @@ +# Part of OpenSPP. See LICENSE file for full copyright and licensing details. +{ + "name": "OpenSPP Simulation API", + "category": "OpenSPP/Integration", + "version": "19.0.2.0.0", + "sequence": 1, + "author": "OpenSPP.org", + "website": "https://github.com/OpenSPP/OpenSPP2", + "license": "LGPL-3", + "development_status": "Production/Stable", + "maintainers": ["jeremi", "gonzalesedwin1123", "reichie020212"], + "depends": [ + "spp_api_v2", + "spp_simulation", + "spp_aggregation", + ], + "data": [ + "security/ir.model.access.csv", + ], + "assets": {}, + "demo": [], + "images": [], + "application": False, + "installable": True, + "auto_install": False, + "summary": """ + REST API for simulation scenario management. + """, +} diff --git a/spp_api_v2_simulation/models/__init__.py b/spp_api_v2_simulation/models/__init__.py new file mode 100644 index 00000000..bbf93e58 --- /dev/null +++ b/spp_api_v2_simulation/models/__init__.py @@ -0,0 +1,3 @@ +# Part of OpenSPP. See LICENSE file for full copyright and licensing details. +from . import api_client_scope +from . import fastapi_endpoint diff --git a/spp_api_v2_simulation/models/api_client_scope.py b/spp_api_v2_simulation/models/api_client_scope.py new file mode 100644 index 00000000..e42b0a0d --- /dev/null +++ b/spp_api_v2_simulation/models/api_client_scope.py @@ -0,0 +1,29 @@ +# Part of OpenSPP. See LICENSE file for full copyright and licensing details. +"""Extends API client scope to support simulation and aggregation resources.""" + +from odoo import fields, models + + +class ApiClientScope(models.Model): + """Extend API client scope to include simulation and aggregation resources.""" + + _inherit = "spp.api.client.scope" + + resource = fields.Selection( + selection_add=[ + ("simulation", "Simulation"), + ("aggregation", "Aggregation"), + ], + ondelete={ + "simulation": "cascade", + "aggregation": "cascade", + }, + ) + + action = fields.Selection( + selection_add=[ + ("execute", "Execute"), + ("convert", "Convert"), + ], + ondelete={"execute": "cascade", "convert": "cascade"}, + ) diff --git a/spp_api_v2_simulation/models/fastapi_endpoint.py b/spp_api_v2_simulation/models/fastapi_endpoint.py new file mode 100644 index 00000000..931df10b --- /dev/null +++ b/spp_api_v2_simulation/models/fastapi_endpoint.py @@ -0,0 +1,37 @@ +# Part of OpenSPP. See LICENSE file for full copyright and licensing details. +"""Extend FastAPI endpoint to include simulation and aggregation routers.""" + +import logging + +from odoo import models + +from fastapi import APIRouter + +_logger = logging.getLogger(__name__) + + +class SppApiV2SimulationEndpoint(models.Model): + """Extend FastAPI endpoint for Simulation and Aggregation API.""" + + _inherit = "fastapi.endpoint" + + def _get_fastapi_routers(self) -> list[APIRouter]: + """Add simulation and aggregation routers to API V2.""" + routers = super()._get_fastapi_routers() + if self.app == "api_v2": + from ..routers.aggregation import aggregation_router + from ..routers.comparison import comparison_router + from ..routers.run import run_router + from ..routers.scenario import scenario_router + from ..routers.simulation import simulation_router + + routers.extend( + [ + scenario_router, + run_router, + comparison_router, + simulation_router, + aggregation_router, + ] + ) + return routers diff --git a/spp_api_v2_simulation/pyproject.toml b/spp_api_v2_simulation/pyproject.toml new file mode 100644 index 00000000..4231d0cc --- /dev/null +++ b/spp_api_v2_simulation/pyproject.toml @@ -0,0 +1,3 @@ +[build-system] +requires = ["whool"] +build-backend = "whool.buildapi" diff --git a/spp_api_v2_simulation/routers/__init__.py b/spp_api_v2_simulation/routers/__init__.py new file mode 100644 index 00000000..0655b6ab --- /dev/null +++ b/spp_api_v2_simulation/routers/__init__.py @@ -0,0 +1,16 @@ +# Part of OpenSPP. See LICENSE file for full copyright and licensing details. +"""FastAPI routers for simulation and aggregation API.""" + +from . import aggregation +from . import simulation +from .comparison import comparison_router +from .run import run_router +from .scenario import scenario_router + +__all__ = [ + "aggregation", + "simulation", + "scenario_router", + "run_router", + "comparison_router", +] diff --git a/spp_api_v2_simulation/routers/aggregation.py b/spp_api_v2_simulation/routers/aggregation.py new file mode 100644 index 00000000..412007ac --- /dev/null +++ b/spp_api_v2_simulation/routers/aggregation.py @@ -0,0 +1,112 @@ +# Part of OpenSPP. See LICENSE file for full copyright and licensing details. +"""Aggregation API endpoints for population analytics.""" + +import logging +from typing import Annotated + +from odoo.api import Environment + +from odoo.addons.fastapi.dependencies import odoo_env +from odoo.addons.spp_api_v2.middleware.auth import get_authenticated_client + +from fastapi import APIRouter, Depends, HTTPException, Query, status + +from ..schemas.aggregation import ( + AggregationResponse, + ComputeAggregationRequest, + DimensionsListResponse, +) +from ..services.aggregation_api_service import AggregationApiService + +_logger = logging.getLogger(__name__) + +aggregation_router = APIRouter(tags=["Aggregation"], prefix="/aggregation") + + +@aggregation_router.post( + "/compute", + response_model=AggregationResponse, + summary="Compute population aggregation", + description="Compute population counts and statistics with optional demographic breakdowns.", +) +async def compute_aggregation( + request: ComputeAggregationRequest, + env: Annotated[Environment, Depends(odoo_env)], + api_client: Annotated[dict, Depends(get_authenticated_client)], +): + """Compute aggregation for a scope with optional breakdown. + + Requires: + aggregation:read scope + + Response: + AggregationResponse with total_count, statistics, and optional breakdown + """ + if not api_client.has_scope("aggregation", "read"): + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="Client does not have aggregation:read scope", + ) + + try: + service = AggregationApiService(env) + result = service.compute_aggregation( + scope_dict=request.scope.model_dump(), + statistics=request.statistics, + group_by=request.group_by, + ) + return result + + except ValueError as e: + _logger.warning("Invalid aggregation request: %s", str(e)) + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail=str(e), + ) from e + except Exception as e: + _logger.error("Aggregation computation failed: %s", str(e), exc_info=True) + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail="Failed to compute aggregation", + ) from e + + +@aggregation_router.get( + "/dimensions", + response_model=DimensionsListResponse, + summary="List available dimensions", + description="Returns active demographic dimensions available for group_by.", +) +async def list_dimensions( + env: Annotated[Environment, Depends(odoo_env)], + api_client: Annotated[dict, Depends(get_authenticated_client)], + applies_to: Annotated[ + str | None, + Query(description="Filter: 'individuals', 'groups', or None for all"), + ] = None, +): + """List active demographic dimensions. + + Requires: + aggregation:read scope + + Response: + DimensionsListResponse with available dimensions + """ + if not api_client.has_scope("aggregation", "read"): + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="Client does not have aggregation:read scope", + ) + + try: + service = AggregationApiService(env) + dimensions = service.list_dimensions(applies_to=applies_to) + return {"dimensions": dimensions} + + except Exception as e: + _logger.error("Failed to list dimensions: %s", str(e), exc_info=True) + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail="Failed to list dimensions", + ) from e diff --git a/spp_api_v2_simulation/routers/comparison.py b/spp_api_v2_simulation/routers/comparison.py new file mode 100644 index 00000000..8afc4803 --- /dev/null +++ b/spp_api_v2_simulation/routers/comparison.py @@ -0,0 +1,183 @@ +# Part of OpenSPP. See LICENSE file for full copyright and licensing details. +"""Simulation comparison API endpoints.""" + +import logging +from typing import Annotated + +from odoo import Command +from odoo.api import Environment + +from odoo.addons.fastapi.dependencies import odoo_env +from odoo.addons.spp_api_v2.middleware.auth import get_authenticated_client + +from fastapi import APIRouter, Depends, HTTPException, status + +from ..schemas.comparison import ( + ComparisonCreateRequest, + ComparisonResponse, + ComparisonRunData, + OverlapData, +) + +_logger = logging.getLogger(__name__) + +comparison_router = APIRouter(tags=["Simulation"], prefix="/simulation") + + +def _optional_str(value): + """Convert Odoo False to None for optional string fields.""" + return value if value else None + + +def _comparison_to_response(comparison) -> ComparisonResponse: + """Convert Odoo comparison record to Pydantic response.""" + # Extract run data from comparison_json + runs_data = [] + if comparison.comparison_json: + for run_data in comparison.comparison_json.get("runs", []): + runs_data.append( + ComparisonRunData( + run_id=run_data.get("run_id"), + scenario_name=run_data.get("scenario_name"), + beneficiary_count=run_data.get("beneficiary_count", 0), + total_cost=run_data.get("total_cost", 0.0), + coverage_rate=run_data.get("coverage_rate", 0.0), + equity_score=run_data.get("equity_score", 0.0), + gini_coefficient=run_data.get("gini_coefficient", 0.0), + has_disparity=run_data.get("has_disparity", False), + leakage_rate=run_data.get("leakage_rate", 0.0), + undercoverage_rate=run_data.get("undercoverage_rate", 0.0), + budget_utilization=run_data.get("budget_utilization", 0.0), + executed_at=run_data.get("executed_at") or None, + ) + ) + + # Extract overlap data from overlap_count_json + overlap_list = [] + if comparison.overlap_count_json: + for _key, overlap_data in comparison.overlap_count_json.items(): + overlap_list.append( + OverlapData( + run_a_id=overlap_data.get("run_a_id"), + run_a_name=overlap_data.get("run_a_name"), + run_b_id=overlap_data.get("run_b_id"), + run_b_name=overlap_data.get("run_b_name"), + overlap_count=overlap_data.get("overlap_count", 0), + union_count=overlap_data.get("union_count", 0), + jaccard_index=overlap_data.get("jaccard_index", 0.0), + ) + ) + + return ComparisonResponse( + id=comparison.id, + name=comparison.name, + runs=runs_data, + overlap_data=overlap_list, + staleness_warning=_optional_str(comparison.staleness_warning), + ) + + +@comparison_router.post( + "/comparisons", + summary="Create a comparison", + description="Create a side-by-side comparison of multiple simulation runs", + response_model=ComparisonResponse, + status_code=status.HTTP_201_CREATED, +) +async def create_comparison( + request: ComparisonCreateRequest, + env: Annotated[Environment, Depends(odoo_env)], + api_client: Annotated[dict, Depends(get_authenticated_client)], +) -> ComparisonResponse: + """Create a comparison between runs.""" + # Check simulation:read scope (comparison is a read operation) + if not api_client.has_scope("simulation", "read"): + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="Client does not have simulation:read scope", + ) + + try: + # nosemgrep: odoo-sudo-without-context + Comparison = env["spp.simulation.comparison"].sudo() + # nosemgrep: odoo-sudo-without-context + Run = env["spp.simulation.run"].sudo() + + # Validate runs exist + runs = Run.browse(request.run_ids) + if len(runs) < 2: + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail="At least 2 runs are required for comparison", + ) + + # Check that all runs exist + if not all(run.exists() for run in runs): + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail="One or more runs not found", + ) + + # Create comparison + comparison = Comparison.create( + { + "name": request.name, + "run_ids": [Command.set(request.run_ids)], + } + ) + + # Compute comparison data + comparison.action_compute_comparison() + + return _comparison_to_response(comparison) + + except HTTPException: + raise + except Exception: + _logger.exception("Failed to create comparison") + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail="Failed to create comparison", + ) from None + + +@comparison_router.get( + "/comparisons/{comparison_id}", + summary="Get a comparison", + description="Returns detailed comparison data", + response_model=ComparisonResponse, +) +async def get_comparison( + comparison_id: int, + env: Annotated[Environment, Depends(odoo_env)], + api_client: Annotated[dict, Depends(get_authenticated_client)], +) -> ComparisonResponse: + """Get comparison details.""" + # Check simulation:read scope + if not api_client.has_scope("simulation", "read"): + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="Client does not have simulation:read scope", + ) + + try: + # nosemgrep: odoo-sudo-without-context + Comparison = env["spp.simulation.comparison"].sudo() + comparison = Comparison.browse(comparison_id) + + if not comparison.exists(): + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Comparison {comparison_id} not found", + ) + + return _comparison_to_response(comparison) + + except HTTPException: + raise + except Exception: + _logger.exception("Failed to get comparison") + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail="Failed to get comparison", + ) from None diff --git a/spp_api_v2_simulation/routers/run.py b/spp_api_v2_simulation/routers/run.py new file mode 100644 index 00000000..db8f4696 --- /dev/null +++ b/spp_api_v2_simulation/routers/run.py @@ -0,0 +1,245 @@ +# Part of OpenSPP. See LICENSE file for full copyright and licensing details. +"""Simulation run API endpoints.""" + +import logging +from typing import Annotated + +from odoo.api import Environment + +from odoo.addons.fastapi.dependencies import odoo_env +from odoo.addons.spp_api_v2.middleware.auth import get_authenticated_client + +from fastapi import APIRouter, Depends, HTTPException, Query, status + +from ..schemas.run import ( + DistributionData, + FairnessData, + GeographicData, + MetricResult, + RunListResponse, + RunResponse, + RunSummary, + ScenarioSnapshot, + TargetingEfficiencyData, +) + +_logger = logging.getLogger(__name__) + +run_router = APIRouter(tags=["Simulation"], prefix="/simulation") + + +def _optional_str(value): + """Convert Odoo False to None for optional string fields.""" + return value if value else None + + +def _datetime_to_iso(value): + """Convert datetime to ISO 8601 string, or None if empty.""" + if not value: + return None + # Handle datetime objects + if hasattr(value, "isoformat"): + return value.isoformat() + # Already a string + return str(value) + + +def _run_to_summary(run) -> RunSummary: + """Convert Odoo run record to Pydantic summary.""" + return RunSummary( + id=run.id, + scenario_id=run.scenario_id.id, + scenario_name=run.scenario_id.name, + state=run.state, + beneficiary_count=run.beneficiary_count, + total_cost=run.total_cost, + coverage_rate=run.coverage_rate, + equity_score=run.equity_score, + gini_coefficient=run.gini_coefficient, + executed_at=_datetime_to_iso(run.executed_at), + execution_duration_seconds=run.execution_duration_seconds if run.execution_duration_seconds else None, + ) + + +def _run_to_response(run, include_details: bool = False) -> RunResponse: + """Convert Odoo run record to Pydantic response.""" + response = RunResponse( + id=run.id, + scenario_id=run.scenario_id.id, + scenario_name=run.scenario_id.name, + state=run.state, + beneficiary_count=run.beneficiary_count, + total_cost=run.total_cost, + coverage_rate=run.coverage_rate, + equity_score=run.equity_score, + gini_coefficient=run.gini_coefficient, + total_registry_count=run.total_registry_count, + budget_utilization=run.budget_utilization, + has_disparity=run.has_disparity, + leakage_rate=run.leakage_rate, + undercoverage_rate=run.undercoverage_rate, + executed_at=_datetime_to_iso(run.executed_at), + execution_duration_seconds=run.execution_duration_seconds if run.execution_duration_seconds else None, + error_message=_optional_str(run.error_message), + ) + + if include_details and run.state == "completed": + # Add scenario snapshot + if run.scenario_snapshot_json: + snapshot_data = run.scenario_snapshot_json + response.scenario_snapshot = ScenarioSnapshot( + name=snapshot_data.get("name", ""), + target_type=snapshot_data.get("target_type", ""), + targeting_expression=snapshot_data.get("targeting_expression", ""), + budget_amount=snapshot_data.get("budget_amount", 0.0), + budget_strategy=snapshot_data.get("budget_strategy", ""), + ideal_population_expression=snapshot_data.get("ideal_population_expression") or None, + entitlement_rules=snapshot_data.get("entitlement_rules", []), + ) + + # Add distribution data + if run.distribution_json: + dist_data = run.distribution_json + response.distribution_data = DistributionData( + count=dist_data.get("count", 0), + total=dist_data.get("total", 0.0), + minimum=dist_data.get("minimum", 0.0), + maximum=dist_data.get("maximum", 0.0), + mean=dist_data.get("mean", 0.0), + median=dist_data.get("median", 0.0), + standard_deviation=dist_data.get("standard_deviation", 0.0), + gini_coefficient=dist_data.get("gini_coefficient", 0.0), + percentiles=dist_data.get("percentiles", {}), + ) + + # Add fairness data + if run.fairness_json: + fairness_data = run.fairness_json + response.fairness_data = FairnessData( + equity_score=fairness_data.get("equity_score", 100.0), + has_disparity=fairness_data.get("has_disparity", False), + demographic_breakdown=fairness_data.get("demographic_breakdown", {}), + ) + + # Add targeting efficiency data + if run.targeting_efficiency_json and "error" not in run.targeting_efficiency_json: + eff_data = run.targeting_efficiency_json + response.targeting_efficiency_data = TargetingEfficiencyData( + true_positives=eff_data.get("true_positives", 0), + false_positives=eff_data.get("false_positives", 0), + false_negatives=eff_data.get("false_negatives", 0), + total_simulated=eff_data.get("total_simulated", 0), + total_ideal=eff_data.get("total_ideal", 0), + leakage_rate=eff_data.get("leakage_rate", 0.0), + undercoverage_rate=eff_data.get("undercoverage_rate", 0.0), + ) + + # Add geographic data + if run.geographic_json: + response.geographic_data = GeographicData(areas=run.geographic_json) + + # Add metric results + if run.metric_results_json: + metrics = {} + for metric_name, metric_data in run.metric_results_json.items(): + metrics[metric_name] = MetricResult( + type=metric_data.get("type", "unknown"), + value=metric_data.get("value", 0), + ) + response.metric_results = metrics + + return response + + +@run_router.get( + "/runs", + summary="List simulation runs", + description="Returns all simulation runs with optional filters", + response_model=RunListResponse, +) +async def list_runs( + env: Annotated[Environment, Depends(odoo_env)], + api_client: Annotated[dict, Depends(get_authenticated_client)], + scenario_id: int | None = Query(default=None, description="Filter by scenario ID"), + state: str | None = Query(default=None, description="Filter by state (running, completed, failed)"), + limit: int = Query(default=100, le=500, description="Maximum number of results"), + offset: int = Query(default=0, ge=0, description="Number of results to skip"), +) -> RunListResponse: + """List all runs with optional filters.""" + # Check simulation:read scope + if not api_client.has_scope("simulation", "read"): + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="Client does not have simulation:read scope", + ) + + try: + Run = env["spp.simulation.run"].sudo() # nosemgrep: odoo-sudo-without-context + + # Build domain + domain = [] + if scenario_id: + domain.append(("scenario_id", "=", scenario_id)) + if state: + domain.append(("state", "=", state)) + + # Query runs + total_count = Run.search_count(domain) + runs = Run.search(domain, limit=limit, offset=offset, order="executed_at desc") + + # Convert to responses + run_summaries = [_run_to_summary(run) for run in runs] + + return RunListResponse( + runs=run_summaries, + total_count=total_count, + ) + + except Exception: + _logger.exception("Failed to list runs") + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail="Failed to list runs", + ) from None + + +@run_router.get( + "/runs/{run_id}", + summary="Get a simulation run", + description="Returns detailed information about a specific run", + response_model=RunResponse, +) +async def get_run( + run_id: int, + env: Annotated[Environment, Depends(odoo_env)], + api_client: Annotated[dict, Depends(get_authenticated_client)], + include_details: bool = Query(default=False, description="Include detailed distribution, fairness, etc."), +) -> RunResponse: + """Get run details.""" + # Check simulation:read scope + if not api_client.has_scope("simulation", "read"): + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="Client does not have simulation:read scope", + ) + + try: + Run = env["spp.simulation.run"].sudo() # nosemgrep: odoo-sudo-without-context + run = Run.browse(run_id) + + if not run.exists(): + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Run {run_id} not found", + ) + + return _run_to_response(run, include_details=include_details) + + except HTTPException: + raise + except Exception: + _logger.exception("Failed to get run") + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail="Failed to get run", + ) from None diff --git a/spp_api_v2_simulation/routers/scenario.py b/spp_api_v2_simulation/routers/scenario.py new file mode 100644 index 00000000..98611f7c --- /dev/null +++ b/spp_api_v2_simulation/routers/scenario.py @@ -0,0 +1,598 @@ +# Part of OpenSPP. See LICENSE file for full copyright and licensing details. +"""Simulation scenario API endpoints.""" + +import logging +from typing import Annotated + +from odoo.api import Environment + +from odoo.addons.fastapi.dependencies import odoo_env +from odoo.addons.spp_api_v2.middleware.auth import get_authenticated_client + +from fastapi import APIRouter, Depends, HTTPException, Query, status + +from ..schemas.run import RunSimulationResponse +from ..schemas.scenario import ( + ConvertToProgramRequest, + ConvertToProgramResponse, + ScenarioCreateRequest, + ScenarioListResponse, + ScenarioResponse, + ScenarioUpdateRequest, +) + +_logger = logging.getLogger(__name__) + +scenario_router = APIRouter(tags=["Simulation"], prefix="/simulation") + + +def _optional_str(value): + """Convert Odoo False to None for optional string fields.""" + return value if value else None + + +def _scenario_to_response(scenario) -> ScenarioResponse: + """Convert Odoo scenario record to Pydantic response.""" + from ..schemas.scenario import EntitlementRuleResponse + + rules = [] + for rule in scenario.entitlement_rule_ids: + rules.append( + EntitlementRuleResponse( + id=rule.id, + amount_mode=rule.amount_mode, + amount=rule.amount, + multiplier_field=_optional_str(rule.multiplier_field), + max_multiplier=rule.max_multiplier if rule.max_multiplier else None, + amount_cel_expression=_optional_str(rule.amount_cel_expression), + condition_cel_expression=_optional_str(rule.condition_cel_expression), + ) + ) + + return ScenarioResponse( + id=scenario.id, + name=scenario.name, + description=_optional_str(scenario.description), + category=_optional_str(scenario.category), + target_type=scenario.target_type, + targeting_expression=_optional_str(scenario.targeting_expression), + targeting_expression_explanation=_optional_str(scenario.targeting_expression_explanation), + budget_amount=scenario.budget_amount, + budget_strategy=scenario.budget_strategy, + ideal_population_expression=_optional_str(scenario.ideal_population_expression), + state=scenario.state, + targeting_preview_count=scenario.targeting_preview_count, + targeting_preview_error=_optional_str(scenario.targeting_preview_error), + run_count=scenario.run_count, + latest_beneficiary_count=scenario.latest_beneficiary_count, + latest_equity_score=scenario.latest_equity_score, + entitlement_rules=rules, + program_id=scenario.program_id.id if scenario.program_id else None, + ) + + +@scenario_router.get( + "/scenarios", + summary="List simulation scenarios", + description="Returns all simulation scenarios with optional filters", + response_model=ScenarioListResponse, +) +async def list_scenarios( + env: Annotated[Environment, Depends(odoo_env)], + api_client: Annotated[dict, Depends(get_authenticated_client)], + state: str | None = Query(default=None, description="Filter by state (draft, ready, archived)"), + category: str | None = Query(default=None, description="Filter by category"), + limit: int = Query(default=100, le=500, description="Maximum number of results"), + offset: int = Query(default=0, ge=0, description="Number of results to skip"), +) -> ScenarioListResponse: + """List all scenarios with optional filters.""" + # Check simulation:read scope + if not api_client.has_scope("simulation", "read"): + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="Client does not have simulation:read scope", + ) + + try: + Scenario = env["spp.simulation.scenario"].sudo() # nosemgrep: odoo-sudo-without-context + + # Build domain + domain = [] + if state: + domain.append(("state", "=", state)) + if category: + domain.append(("category", "=", category)) + + # Query scenarios + total_count = Scenario.search_count(domain) + scenarios = Scenario.search(domain, limit=limit, offset=offset, order="write_date desc") + + # Convert to responses + scenario_responses = [_scenario_to_response(scenario) for scenario in scenarios] + + return ScenarioListResponse( + scenarios=scenario_responses, + total_count=total_count, + ) + + except Exception: + _logger.exception("Failed to list scenarios") + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail="Failed to list scenarios", + ) from None + + +@scenario_router.post( + "/scenarios", + summary="Create a simulation scenario", + description="Create a new simulation scenario with entitlement rules", + response_model=ScenarioResponse, + status_code=status.HTTP_201_CREATED, +) +async def create_scenario( + request: ScenarioCreateRequest, + env: Annotated[Environment, Depends(odoo_env)], + api_client: Annotated[dict, Depends(get_authenticated_client)], +) -> ScenarioResponse: + """Create a new scenario.""" + # Check simulation:write scope + if not api_client.has_scope("simulation", "write"): + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="Client does not have simulation:write scope", + ) + + try: + Scenario = env["spp.simulation.scenario"].sudo() # nosemgrep: odoo-sudo-without-context + + # Build scenario values + vals = { + "name": request.name, + "description": request.description, + "category": request.category, + "target_type": request.target_type, + "targeting_expression": request.targeting_expression, + "targeting_expression_explanation": request.targeting_expression_explanation, + "budget_amount": request.budget_amount, + "budget_strategy": request.budget_strategy, + "ideal_population_expression": request.ideal_population_expression, + } + + if request.program_id: + vals["program_id"] = request.program_id + + # Create scenario + scenario = Scenario.create(vals) + + # Create entitlement rules + EntitlementRule = env["spp.simulation.entitlement.rule"].sudo() # nosemgrep: odoo-sudo-without-context + for rule_data in request.entitlement_rules: + EntitlementRule.create( + { + "scenario_id": scenario.id, + "amount_mode": rule_data.amount_mode, + "amount": rule_data.amount, + "multiplier_field": rule_data.multiplier_field, + "max_multiplier": rule_data.max_multiplier, + "amount_cel_expression": rule_data.amount_cel_expression, + "condition_cel_expression": rule_data.condition_cel_expression, + } + ) + + # Refresh to get computed fields + scenario.invalidate_recordset() + + return _scenario_to_response(scenario) + + except Exception: + _logger.exception("Failed to create scenario") + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail="Failed to create scenario", + ) from None + + +@scenario_router.get( + "/scenarios/{scenario_id}", + summary="Get a simulation scenario", + description="Returns detailed information about a specific scenario", + response_model=ScenarioResponse, +) +async def get_scenario( + scenario_id: int, + env: Annotated[Environment, Depends(odoo_env)], + api_client: Annotated[dict, Depends(get_authenticated_client)], +) -> ScenarioResponse: + """Get scenario details.""" + # Check simulation:read scope + if not api_client.has_scope("simulation", "read"): + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="Client does not have simulation:read scope", + ) + + try: + Scenario = env["spp.simulation.scenario"].sudo() # nosemgrep: odoo-sudo-without-context + scenario = Scenario.browse(scenario_id) + + if not scenario.exists(): + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Scenario {scenario_id} not found", + ) + + return _scenario_to_response(scenario) + + except HTTPException: + raise + except Exception: + _logger.exception("Failed to get scenario") + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail="Failed to get scenario", + ) from None + + +@scenario_router.put( + "/scenarios/{scenario_id}", + summary="Update a simulation scenario", + description="Update an existing scenario (only allowed in draft state)", + response_model=ScenarioResponse, +) +async def update_scenario( + scenario_id: int, + request: ScenarioUpdateRequest, + env: Annotated[Environment, Depends(odoo_env)], + api_client: Annotated[dict, Depends(get_authenticated_client)], +) -> ScenarioResponse: + """Update a scenario.""" + # Check simulation:write scope + if not api_client.has_scope("simulation", "write"): + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="Client does not have simulation:write scope", + ) + + try: + Scenario = env["spp.simulation.scenario"].sudo() # nosemgrep: odoo-sudo-without-context + scenario = Scenario.browse(scenario_id) + + if not scenario.exists(): + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Scenario {scenario_id} not found", + ) + + if scenario.state != "draft": + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail="Only draft scenarios can be updated", + ) + + # Build update values + vals = {} + if request.name is not None: + vals["name"] = request.name + if request.description is not None: + vals["description"] = request.description + if request.category is not None: + vals["category"] = request.category + if request.targeting_expression is not None: + vals["targeting_expression"] = request.targeting_expression + if request.targeting_expression_explanation is not None: + vals["targeting_expression_explanation"] = request.targeting_expression_explanation + if request.budget_amount is not None: + vals["budget_amount"] = request.budget_amount + if request.budget_strategy is not None: + vals["budget_strategy"] = request.budget_strategy + if request.ideal_population_expression is not None: + vals["ideal_population_expression"] = request.ideal_population_expression + + # Update scenario + if vals: + scenario.write(vals) + + # Refresh to get computed fields + scenario.invalidate_recordset() + + return _scenario_to_response(scenario) + + except HTTPException: + raise + except Exception: + _logger.exception("Failed to update scenario") + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail="Failed to update scenario", + ) from None + + +@scenario_router.delete( + "/scenarios/{scenario_id}", + summary="Archive a simulation scenario", + description="Archive a scenario (soft delete)", + status_code=status.HTTP_204_NO_CONTENT, +) +async def archive_scenario( + scenario_id: int, + env: Annotated[Environment, Depends(odoo_env)], + api_client: Annotated[dict, Depends(get_authenticated_client)], +) -> None: + """Archive a scenario.""" + # Check simulation:write scope + if not api_client.has_scope("simulation", "write"): + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="Client does not have simulation:write scope", + ) + + try: + Scenario = env["spp.simulation.scenario"].sudo() # nosemgrep: odoo-sudo-without-context + scenario = Scenario.browse(scenario_id) + + if not scenario.exists(): + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Scenario {scenario_id} not found", + ) + + scenario.action_archive() + + except HTTPException: + raise + except Exception: + _logger.exception("Failed to archive scenario") + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail="Failed to archive scenario", + ) from None + + +@scenario_router.post( + "/scenarios/{scenario_id}/ready", + summary="Mark scenario as ready", + description="Transition scenario from draft to ready state", + response_model=ScenarioResponse, +) +async def mark_scenario_ready( + scenario_id: int, + env: Annotated[Environment, Depends(odoo_env)], + api_client: Annotated[dict, Depends(get_authenticated_client)], +) -> ScenarioResponse: + """Mark scenario as ready.""" + # Check simulation:write scope + if not api_client.has_scope("simulation", "write"): + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="Client does not have simulation:write scope", + ) + + try: + Scenario = env["spp.simulation.scenario"].sudo() # nosemgrep: odoo-sudo-without-context + scenario = Scenario.browse(scenario_id) + + if not scenario.exists(): + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Scenario {scenario_id} not found", + ) + + try: + scenario.action_set_ready() + except Exception: + _logger.exception("Failed to mark scenario as ready") + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail="Failed to mark scenario as ready", + ) from None + + # Refresh to get updated state + scenario.invalidate_recordset() + + return _scenario_to_response(scenario) + + except HTTPException: + raise + except Exception: + _logger.exception("Failed to mark scenario as ready") + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail="Failed to mark scenario as ready", + ) from None + + +@scenario_router.post( + "/scenarios/{scenario_id}/run", + summary="Run a simulation", + description="Execute the simulation for this scenario", + response_model=RunSimulationResponse, +) +async def run_simulation( + scenario_id: int, + env: Annotated[Environment, Depends(odoo_env)], + api_client: Annotated[dict, Depends(get_authenticated_client)], +) -> RunSimulationResponse: + """Run a simulation.""" + # Check simulation:execute scope (special scope for running) + if not api_client.has_scope("simulation", "execute"): + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="Client does not have simulation:execute scope", + ) + + try: + Scenario = env["spp.simulation.scenario"].sudo() # nosemgrep: odoo-sudo-without-context + scenario = Scenario.browse(scenario_id) + + if not scenario.exists(): + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Scenario {scenario_id} not found", + ) + + if scenario.state != "ready": + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail="Only scenarios in 'ready' state can be run", + ) + + # Execute simulation via service + service = env["spp.simulation.service"] + run = service.execute_simulation(scenario) + + return RunSimulationResponse( + run_id=run.id, + scenario_id=scenario.id, + state=run.state, + message=f"Simulation completed with {run.beneficiary_count} beneficiaries" + if run.state == "completed" + else run.error_message or "Simulation failed", + ) + + except HTTPException: + raise + except Exception: + _logger.exception("Failed to run simulation") + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail="Failed to run simulation", + ) from None + + +@scenario_router.post( + "/scenarios/{scenario_id}/convert-to-program", + summary="Convert scenario to program", + description="Convert a ready simulation scenario into a real program with " + "CEL eligibility and cash entitlement managers.", + response_model=ConvertToProgramResponse, + status_code=status.HTTP_201_CREATED, +) +async def convert_to_program( + scenario_id: int, + request: ConvertToProgramRequest, + env: Annotated[Environment, Depends(odoo_env)], + api_client: Annotated[dict, Depends(get_authenticated_client)], +) -> ConvertToProgramResponse: + """Convert a simulation scenario to a program.""" + # Check simulation:write scope + if not api_client.has_scope("simulation", "write"): + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="Client does not have simulation:write scope", + ) + + # Check simulation:convert scope + if not api_client.has_scope("simulation", "convert"): + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="Client does not have simulation:convert scope", + ) + + try: + Scenario = env["spp.simulation.scenario"].sudo() # nosemgrep: odoo-sudo-without-context + scenario = Scenario.browse(scenario_id) + + if not scenario.exists(): + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Scenario {scenario_id} not found", + ) + + if scenario.state != "ready": + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail="Only scenarios in 'ready' state can be converted", + ) + + if scenario.converted_program_id: + raise HTTPException( + status_code=status.HTTP_409_CONFLICT, + detail=f"Scenario already converted to program '{scenario.converted_program_id.name}'", + ) + + if not scenario.entitlement_rule_ids: + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail="Scenario must have at least one entitlement rule", + ) + + # Build options from request + options = _build_convert_options(request) + + # Validate currency code if provided + if request.currency_code: + # nosemgrep: odoo-sudo-without-context + currency = env["res.currency"].sudo().search([("name", "=", request.currency_code)], limit=1) + if not currency: + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail=f"Currency code '{request.currency_code}' not found", + ) + + # Check for duplicate program name + program_name = request.name or scenario.name + existing = ( + env["spp.program"] # nosemgrep: odoo-sudo-without-context + .sudo() + .search([("name", "=", program_name)], limit=1) + ) + if existing: + raise HTTPException( + status_code=status.HTTP_409_CONFLICT, + detail="A program with this name already exists", + ) + + # Execute conversion via service + service = env["spp.simulation.service"].sudo() # nosemgrep: odoo-sudo-without-context + result = service.convert_to_program(scenario, options) + program = result["program"] + warnings = [str(w) for w in result.get("warnings", [])] + + return ConvertToProgramResponse( + program_id=program.id, + program_name=program.name, + scenario_id=scenario.id, + warnings=warnings, + ) + + except HTTPException: + raise + except Exception: + _logger.exception("Failed to convert scenario to program") + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail="Failed to convert scenario to program", + ) from None + + +def _build_convert_options(request: ConvertToProgramRequest) -> dict: + """Build service options dict from request schema.""" + options = {} + if request.name: + options["name"] = request.name + if request.currency_code: + options["currency_code"] = request.currency_code + if request.is_one_time_distribution: + options["is_one_time_distribution"] = True + if request.import_beneficiaries: + options["import_beneficiaries"] = True + if request.rrule_type: + options["rrule_type"] = request.rrule_type + if request.cycle_duration is not None: + options["cycle_duration"] = request.cycle_duration + if request.day is not None: + options["day"] = request.day + if request.month_by: + options["month_by"] = request.month_by + if request.weekday: + options["weekday"] = request.weekday + if request.byday: + options["byday"] = request.byday + + # Weekly day flags + for day_field in ("mon", "tue", "wed", "thu", "fri", "sat", "sun"): + if getattr(request, day_field, False): + options[day_field] = True + + return options diff --git a/spp_api_v2_simulation/routers/simulation.py b/spp_api_v2_simulation/routers/simulation.py new file mode 100644 index 00000000..1c025b34 --- /dev/null +++ b/spp_api_v2_simulation/routers/simulation.py @@ -0,0 +1,53 @@ +# Part of OpenSPP. See LICENSE file for full copyright and licensing details. +"""Simulation template API endpoints.""" + +import logging +from typing import Annotated + +from odoo.api import Environment + +from odoo.addons.fastapi.dependencies import odoo_env +from odoo.addons.spp_api_v2.middleware.auth import get_authenticated_client + +from fastapi import APIRouter, Depends, HTTPException, status + +from ..schemas.simulation import TemplateListResponse +from ..services.simulation_api_service import SimulationApiService + +_logger = logging.getLogger(__name__) + +simulation_router = APIRouter(tags=["Simulation"], prefix="/simulation") + + +@simulation_router.get( + "/templates", + response_model=TemplateListResponse, + summary="List scenario templates", + description="Returns active pre-built scenario templates.", +) +async def list_templates( + env: Annotated[Environment, Depends(odoo_env)], + api_client: Annotated[dict, Depends(get_authenticated_client)], +): + """List active scenario templates. + + Requires: + simulation:read scope + """ + if not api_client.has_scope("simulation", "read"): + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="Client does not have simulation:read scope", + ) + + try: + service = SimulationApiService(env) + templates = service.list_templates() + return {"templates": templates} + + except Exception as e: + _logger.error("Failed to list templates: %s", str(e), exc_info=True) + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail="Failed to list templates", + ) from e diff --git a/spp_api_v2_simulation/schemas/__init__.py b/spp_api_v2_simulation/schemas/__init__.py new file mode 100644 index 00000000..b6ac6759 --- /dev/null +++ b/spp_api_v2_simulation/schemas/__init__.py @@ -0,0 +1,61 @@ +# Part of OpenSPP. See LICENSE file for full copyright and licensing details. +"""Pydantic schemas for simulation and aggregation API.""" + +from . import aggregation +from . import simulation +from .comparison import ( + ComparisonCreateRequest, + ComparisonResponse, + ComparisonRunData, + OverlapData, +) +from .run import ( + DistributionData, + FairnessData, + GeographicData, + MetricResult, + RunListResponse, + RunResponse, + RunSimulationRequest, + RunSimulationResponse, + RunSummary, + ScenarioSnapshot, + TargetingEfficiencyData, +) +from .scenario import ( + EntitlementRuleResponse, + EntitlementRuleSchema, + ScenarioCreateRequest, + ScenarioListResponse, + ScenarioResponse, + ScenarioUpdateRequest, +) + +__all__ = [ + "aggregation", + "simulation", + # Scenario schemas + "EntitlementRuleSchema", + "EntitlementRuleResponse", + "ScenarioCreateRequest", + "ScenarioUpdateRequest", + "ScenarioResponse", + "ScenarioListResponse", + # Run schemas + "RunResponse", + "RunListResponse", + "RunSimulationRequest", + "RunSimulationResponse", + "RunSummary", + "DistributionData", + "FairnessData", + "GeographicData", + "MetricResult", + "TargetingEfficiencyData", + "ScenarioSnapshot", + # Comparison schemas + "ComparisonCreateRequest", + "ComparisonRunData", + "OverlapData", + "ComparisonResponse", +] diff --git a/spp_api_v2_simulation/schemas/aggregation.py b/spp_api_v2_simulation/schemas/aggregation.py new file mode 100644 index 00000000..d2f5b48d --- /dev/null +++ b/spp_api_v2_simulation/schemas/aggregation.py @@ -0,0 +1,78 @@ +# Part of OpenSPP. See LICENSE file for full copyright and licensing details. +"""Pydantic schemas for Aggregation API.""" + +from pydantic import BaseModel, Field + + +class AggregationScopeRequest(BaseModel): + """Inline scope definition for aggregation queries.""" + + target_type: str = Field( + default="group", + description="Target type: 'group' or 'individual'", + ) + cel_expression: str | None = Field( + default=None, + description="CEL expression to filter the target population", + ) + area_id: int | None = Field( + default=None, + description="Area ID to restrict scope geographically", + ) + + +class ComputeAggregationRequest(BaseModel): + """Request body for POST /aggregation/compute.""" + + scope: AggregationScopeRequest = Field( + ..., + description="Scope definition for the aggregation query", + ) + statistics: list[str] | None = Field( + default=None, + description="List of statistic names to compute (None for defaults)", + ) + group_by: list[str] | None = Field( + default=None, + description="List of dimension names for breakdown (max 3)", + ) + + +class AggregationResponse(BaseModel): + """Response from POST /aggregation/compute.""" + + total_count: int = Field(..., description="Total registrants matching the scope") + statistics: dict = Field( + default_factory=dict, + description="Computed statistics keyed by name", + ) + breakdown: dict | None = Field( + default=None, + description="Breakdown by group_by dimensions (if requested)", + ) + from_cache: bool = Field(..., description="Whether result was served from cache") + computed_at: str = Field(..., description="ISO 8601 timestamp of computation") + access_level: str = Field(..., description="Access level: 'aggregate' or 'individual'") + + +class DimensionInfo(BaseModel): + """Information about a demographic dimension.""" + + name: str = Field(..., description="Technical name of the dimension") + label: str = Field(..., description="Human-readable label") + description: str | None = Field(default=None, description="Dimension description") + dimension_type: str = Field(..., description="Type: 'field' or 'expression'") + applies_to: str = Field(..., description="Applies to: 'all', 'individuals', or 'groups'") + value_labels: dict | None = Field( + default=None, + description="Mapping of raw values to display labels", + ) + + +class DimensionsListResponse(BaseModel): + """Response from GET /aggregation/dimensions.""" + + dimensions: list[DimensionInfo] = Field( + ..., + description="Active demographic dimensions available for group_by", + ) diff --git a/spp_api_v2_simulation/schemas/comparison.py b/spp_api_v2_simulation/schemas/comparison.py new file mode 100644 index 00000000..00212446 --- /dev/null +++ b/spp_api_v2_simulation/schemas/comparison.py @@ -0,0 +1,50 @@ +# Part of OpenSPP. See LICENSE file for full copyright and licensing details. +"""Pydantic schemas for simulation run comparisons.""" + +from pydantic import BaseModel, Field + + +class ComparisonCreateRequest(BaseModel): + """Request to create a comparison between runs.""" + + name: str = Field(default="Comparison", description="Name for this comparison") + run_ids: list[int] = Field(..., min_length=2, description="IDs of runs to compare (minimum 2)") + + +class ComparisonRunData(BaseModel): + """Data for a single run in a comparison.""" + + run_id: int = Field(..., description="Run ID") + scenario_name: str = Field(..., description="Scenario name") + beneficiary_count: int = Field(..., description="Number of beneficiaries selected") + total_cost: float = Field(..., description="Total cost of entitlements") + coverage_rate: float = Field(..., description="Coverage rate (beneficiaries / total registry)") + equity_score: float = Field(..., description="Equity score (1 - Gini coefficient)") + gini_coefficient: float = Field(..., description="Gini coefficient measuring inequality") + has_disparity: bool = Field(..., description="Whether significant disparity was detected") + leakage_rate: float = Field(..., description="Leakage rate (beneficiaries outside ideal population)") + undercoverage_rate: float = Field(..., description="Undercoverage rate (ideal population not covered)") + budget_utilization: float = Field(..., description="Budget utilization rate") + executed_at: str | None = Field(default=None, description="Execution timestamp (ISO 8601)") + + +class OverlapData(BaseModel): + """Overlap data between two runs.""" + + run_a_id: int = Field(..., description="First run ID") + run_a_name: str = Field(..., description="First run scenario name") + run_b_id: int = Field(..., description="Second run ID") + run_b_name: str = Field(..., description="Second run scenario name") + overlap_count: int = Field(..., description="Number of beneficiaries in both runs") + union_count: int = Field(..., description="Total unique beneficiaries across both runs") + jaccard_index: float = Field(..., description="Jaccard similarity index (overlap / union)") + + +class ComparisonResponse(BaseModel): + """Comparison between simulation runs.""" + + id: int = Field(..., description="Comparison ID") + name: str = Field(..., description="Comparison name") + runs: list[ComparisonRunData] = Field(..., description="Run data being compared") + overlap_data: list[OverlapData] = Field(default_factory=list, description="Overlap analysis between runs") + staleness_warning: str | None = Field(default=None, description="Warning if scenarios were modified after runs") diff --git a/spp_api_v2_simulation/schemas/run.py b/spp_api_v2_simulation/schemas/run.py new file mode 100644 index 00000000..c370ec1c --- /dev/null +++ b/spp_api_v2_simulation/schemas/run.py @@ -0,0 +1,137 @@ +# Part of OpenSPP. See LICENSE file for full copyright and licensing details. +"""Pydantic schemas for simulation runs.""" + +from pydantic import BaseModel, Field + + +class DistributionData(BaseModel): + """Distribution analysis data for entitlements.""" + + count: int = Field(..., description="Number of beneficiaries") + total: float = Field(..., description="Total amount distributed") + minimum: float = Field(..., description="Minimum entitlement amount") + maximum: float = Field(..., description="Maximum entitlement amount") + mean: float = Field(..., description="Mean entitlement amount") + median: float = Field(..., description="Median entitlement amount") + standard_deviation: float = Field(..., description="Standard deviation") + gini_coefficient: float = Field(..., description="Gini coefficient") + percentiles: dict = Field(..., description="Percentile values (p25, p50, p75, etc.)") + + +class FairnessData(BaseModel): + """Fairness metrics by demographic groups.""" + + equity_score: float = Field(..., description="Overall equity score") + has_disparity: bool = Field(..., description="Whether significant disparity was detected") + demographic_breakdown: dict = Field(..., description="Breakdown by demographic groups") + + +class GeographicData(BaseModel): + """Geographic distribution data.""" + + areas: list = Field(..., description="Area-level statistics") + + +class MetricResult(BaseModel): + """Result of a custom metric calculation.""" + + type: str = Field(..., description="Metric type (count, sum, average, etc.)") + value: float | int = Field(..., description="Metric value") + + +class TargetingEfficiencyData(BaseModel): + """Targeting efficiency metrics (requires ideal population).""" + + true_positives: int = Field(..., description="Beneficiaries in ideal population") + false_positives: int = Field(..., description="Beneficiaries outside ideal population") + false_negatives: int = Field(..., description="Ideal population not covered") + total_simulated: int = Field(..., description="Total simulated beneficiaries") + total_ideal: int = Field(..., description="Total ideal population") + leakage_rate: float = Field(..., description="Leakage rate (false positives / total simulated)") + undercoverage_rate: float = Field(..., description="Undercoverage rate (false negatives / total ideal)") + + +class ScenarioSnapshot(BaseModel): + """Snapshot of scenario configuration at time of run.""" + + name: str = Field(..., description="Scenario name") + target_type: str = Field(..., description="Target type (group/individual)") + targeting_expression: str = Field(..., description="Targeting CEL expression") + budget_amount: float = Field(..., description="Budget amount") + budget_strategy: str = Field(..., description="Budget strategy") + ideal_population_expression: str | None = Field(default=None, description="Ideal population CEL expression") + entitlement_rules: list[dict] = Field(..., description="Entitlement rules") + + +class RunSummary(BaseModel): + """Summary data for a simulation run (list view).""" + + id: int = Field(..., description="Run ID") + scenario_id: int = Field(..., description="Scenario ID") + scenario_name: str = Field(..., description="Scenario name") + state: str = Field(..., description="Run state") + beneficiary_count: int = Field(..., description="Number of beneficiaries") + total_cost: float = Field(..., description="Total cost") + coverage_rate: float = Field(..., description="Coverage rate") + equity_score: float = Field(..., description="Equity score") + gini_coefficient: float = Field(..., description="Gini coefficient") + executed_at: str | None = Field(default=None, description="Execution timestamp") + execution_duration_seconds: float | None = Field(default=None, description="Execution duration in seconds") + + +class RunResponse(BaseModel): + """Simulation run details.""" + + id: int = Field(..., description="Run ID") + scenario_id: int = Field(..., description="Scenario ID") + scenario_name: str = Field(..., description="Scenario name") + state: str = Field(..., description="Run state: 'pending', 'running', 'completed', 'failed'") + executed_at: str | None = Field(default=None, description="Execution timestamp (ISO 8601)") + execution_duration_seconds: float | None = Field(default=None, description="Execution duration in seconds") + # Headline metrics + beneficiary_count: int = Field(..., description="Number of beneficiaries selected") + total_registry_count: int = Field(..., description="Total number of registrants in registry") + coverage_rate: float = Field(..., description="Coverage rate (beneficiaries / total registry)") + total_cost: float = Field(..., description="Total cost of entitlements") + budget_utilization: float = Field(..., description="Budget utilization rate (total cost / budget)") + gini_coefficient: float = Field(..., description="Gini coefficient measuring inequality") + equity_score: float = Field(..., description="Equity score (1 - Gini coefficient)") + has_disparity: bool = Field(..., description="Whether significant disparity was detected") + leakage_rate: float = Field(..., description="Leakage rate (beneficiaries outside ideal population)") + undercoverage_rate: float = Field(..., description="Undercoverage rate (ideal population not covered)") + error_message: str | None = Field(default=None, description="Error message if run failed") + # Optional detailed data + distribution_data: DistributionData | None = Field(default=None, description="Distribution analysis data") + fairness_data: FairnessData | None = Field(default=None, description="Fairness metrics by demographic group") + geographic_data: GeographicData | None = Field(default=None, description="Geographic distribution data") + targeting_efficiency_data: TargetingEfficiencyData | None = Field( + default=None, description="Targeting efficiency metrics" + ) + metric_results: dict[str, MetricResult] | None = Field( + default=None, description="Detailed metric calculation results" + ) + scenario_snapshot: ScenarioSnapshot | None = Field( + default=None, description="Snapshot of scenario configuration at time of run" + ) + + +class RunListResponse(BaseModel): + """List of simulation runs.""" + + runs: list[RunSummary] = Field(..., description="List of runs") + total_count: int = Field(..., description="Total number of runs") + + +class RunSimulationRequest(BaseModel): + """Request to run a simulation (no body needed, just POST to endpoint).""" + + pass + + +class RunSimulationResponse(BaseModel): + """Response after starting a simulation run.""" + + run_id: int = Field(..., description="ID of the created run") + scenario_id: int = Field(..., description="Scenario ID") + state: str = Field(..., description="Run state: 'running', 'completed', 'failed'") + message: str = Field(..., description="Human-readable status message") diff --git a/spp_api_v2_simulation/schemas/scenario.py b/spp_api_v2_simulation/schemas/scenario.py new file mode 100644 index 00000000..1ad9b0b4 --- /dev/null +++ b/spp_api_v2_simulation/schemas/scenario.py @@ -0,0 +1,141 @@ +# Part of OpenSPP. See LICENSE file for full copyright and licensing details. +"""Pydantic schemas for simulation scenarios.""" + +from pydantic import BaseModel, Field + + +class EntitlementRuleSchema(BaseModel): + """Entitlement rule for simulation scenario (create/update).""" + + amount_mode: str = Field(..., description="Mode: 'fixed', 'multiplier', or 'cel'") + amount: float = Field(..., description="Base amount") + multiplier_field: str | None = Field(default=None, description="Field for multiplier mode") + max_multiplier: int | None = Field(default=None, description="Maximum multiplier value") + amount_cel_expression: str | None = Field(default=None, description="CEL expression for amount calculation") + condition_cel_expression: str | None = Field(default=None, description="CEL condition for rule application") + + +class EntitlementRuleResponse(BaseModel): + """Entitlement rule in response.""" + + id: int = Field(..., description="Rule ID") + amount_mode: str = Field(..., description="Mode: 'fixed', 'multiplier', or 'cel'") + amount: float = Field(..., description="Base amount") + multiplier_field: str | None = Field(default=None, description="Field for multiplier mode") + max_multiplier: int | None = Field(default=None, description="Maximum multiplier value") + amount_cel_expression: str | None = Field(default=None, description="CEL expression for amount calculation") + condition_cel_expression: str | None = Field(default=None, description="CEL condition for rule application") + + +class ScenarioCreateRequest(BaseModel): + """Request to create a simulation scenario.""" + + name: str = Field(..., description="Scenario name") + description: str | None = Field(default=None, description="Scenario description") + category: str | None = Field(default=None, description="Scenario category") + target_type: str = Field(default="group", description="Target type: 'group' or 'individual'") + targeting_expression: str | None = Field(default=None, description="CEL expression for targeting beneficiaries") + targeting_expression_explanation: str | None = Field( + default=None, description="Human-readable explanation of targeting expression" + ) + ideal_population_expression: str | None = Field( + default=None, description="CEL expression for ideal population calculation" + ) + budget_amount: float = Field(default=0.0, description="Total budget amount") + budget_strategy: str = Field( + default="none", + description="Budget strategy: 'none', 'cap_total', or 'proportional_reduction'", + ) + entitlement_rules: list[EntitlementRuleSchema] = Field( + default_factory=list, description="List of entitlement rules" + ) + program_id: int | None = Field(default=None, description="Reference program ID") + + +class ScenarioUpdateRequest(BaseModel): + """Request to update a simulation scenario.""" + + name: str | None = Field(default=None, description="Scenario name") + description: str | None = Field(default=None, description="Scenario description") + category: str | None = Field(default=None, description="Scenario category") + target_type: str | None = Field(default=None, description="Target type: 'group' or 'individual'") + targeting_expression: str | None = Field(default=None, description="CEL expression for targeting beneficiaries") + ideal_population_expression: str | None = Field( + default=None, description="CEL expression for ideal population calculation" + ) + budget_amount: float | None = Field(default=None, description="Total budget amount") + budget_strategy: str | None = Field( + default=None, + description="Budget strategy: 'none', 'cap_total', or 'proportional_reduction'", + ) + entitlement_rules: list[EntitlementRuleSchema] | None = Field(default=None, description="List of entitlement rules") + + +class ScenarioResponse(BaseModel): + """Simulation scenario details.""" + + id: int = Field(..., description="Scenario ID") + name: str = Field(..., description="Scenario name") + description: str | None = Field(default=None, description="Scenario description") + category: str | None = Field(default=None, description="Scenario category") + target_type: str = Field(..., description="Target type: 'group' or 'individual'") + state: str = Field(..., description="Scenario state: 'draft', 'ready', 'archived'") + targeting_expression: str | None = Field(default=None, description="CEL expression for targeting beneficiaries") + targeting_expression_explanation: str | None = Field( + default=None, description="Human-readable explanation of targeting expression" + ) + ideal_population_expression: str | None = Field( + default=None, description="CEL expression for ideal population calculation" + ) + budget_amount: float = Field(..., description="Total budget amount") + budget_strategy: str = Field( + ..., + description="Budget strategy: 'none', 'cap_total', or 'proportional_reduction'", + ) + targeting_preview_count: int = Field(..., description="Number of beneficiaries in targeting preview") + targeting_preview_error: str | None = Field( + default=None, description="Error message from targeting preview, if any" + ) + run_count: int = Field(..., description="Total number of runs for this scenario") + latest_beneficiary_count: int = Field(..., description="Beneficiary count from latest run") + latest_equity_score: float = Field(..., description="Equity score from latest run") + entitlement_rules: list[EntitlementRuleResponse] = Field(..., description="List of entitlement rules") + program_id: int | None = Field(default=None, description="Reference program ID") + + +class ScenarioListResponse(BaseModel): + """List of simulation scenarios.""" + + scenarios: list[ScenarioResponse] = Field(..., description="List of scenarios") + total_count: int = Field(..., description="Total number of scenarios") + + +class ConvertToProgramRequest(BaseModel): + """Request to convert a simulation scenario to a program.""" + + name: str | None = Field(default=None, description="Override program name (defaults to scenario name)") + currency_code: str | None = Field(default=None, description="ISO currency code (e.g., 'USD', 'PHP')") + is_one_time_distribution: bool = Field(default=False, description="One-time program (auto-creates first cycle)") + import_beneficiaries: bool = Field(default=False, description="Auto-enroll matching registrants") + rrule_type: str | None = Field(default=None, description="Recurrence: daily/weekly/monthly/yearly") + cycle_duration: int | None = Field(default=None, description="Number of recurrence units per cycle") + day: int | None = Field(default=None, description="Day of month (for monthly by date)") + month_by: str | None = Field(default=None, description="Monthly mode: 'date' or 'day'") + weekday: str | None = Field(default=None, description="Day of week (MON-SUN, for monthly by day)") + byday: str | None = Field(default=None, description="Which week (1-4, -1=last, for monthly by day)") + mon: bool = Field(default=False, description="Monday (for weekly recurrence)") + tue: bool = Field(default=False, description="Tuesday (for weekly recurrence)") + wed: bool = Field(default=False, description="Wednesday (for weekly recurrence)") + thu: bool = Field(default=False, description="Thursday (for weekly recurrence)") + fri: bool = Field(default=False, description="Friday (for weekly recurrence)") + sat: bool = Field(default=False, description="Saturday (for weekly recurrence)") + sun: bool = Field(default=False, description="Sunday (for weekly recurrence)") + + +class ConvertToProgramResponse(BaseModel): + """Response from converting a scenario to a program.""" + + program_id: int = Field(..., description="Created program ID") + program_name: str = Field(..., description="Created program name") + scenario_id: int = Field(..., description="Source scenario ID") + warnings: list[str] = Field(default_factory=list, description="Lossy conversion warnings") diff --git a/spp_api_v2_simulation/schemas/simulation.py b/spp_api_v2_simulation/schemas/simulation.py new file mode 100644 index 00000000..e3f5ac11 --- /dev/null +++ b/spp_api_v2_simulation/schemas/simulation.py @@ -0,0 +1,267 @@ +# Part of OpenSPP. See LICENSE file for full copyright and licensing details. +"""Pydantic schemas for Simulation API.""" + +from pydantic import BaseModel, Field + +# --- Entitlement Rule Schemas --- + + +class EntitlementRuleRequest(BaseModel): + """Entitlement rule definition for scenario creation/update.""" + + name: str | None = Field(default=None, description="Rule name") + sequence: int = Field(default=10, description="Rule evaluation order") + amount_mode: str = Field( + default="fixed", + description="Amount mode: 'fixed', 'multiplier', or 'cel'", + ) + amount: float = Field(default=0.0, description="Fixed amount per beneficiary") + multiplier_field: str | None = Field( + default=None, + description="Field name to multiply (for multiplier mode)", + ) + max_multiplier: float = Field( + default=0.0, + description="Maximum multiplier cap (0 = no limit)", + ) + amount_cel_expression: str | None = Field( + default=None, + description="CEL expression returning amount (for cel mode)", + ) + condition_cel_expression: str | None = Field( + default=None, + description="Optional CEL sub-filter within targeted population", + ) + + +class EntitlementRuleResponse(BaseModel): + """Entitlement rule in API responses.""" + + id: int = Field(..., description="Rule ID") + name: str | None = Field(default=None, description="Rule name") + sequence: int = Field(..., description="Rule evaluation order") + amount_mode: str = Field(..., description="Amount mode") + amount: float = Field(..., description="Fixed amount") + multiplier_field: str | None = Field(default=None, description="Multiplier field") + max_multiplier: float = Field(..., description="Max multiplier cap") + amount_cel_expression: str | None = Field(default=None, description="CEL amount expression") + condition_cel_expression: str | None = Field(default=None, description="CEL condition expression") + + +# --- Template Schemas --- + + +class TemplateResponse(BaseModel): + """Simulation scenario template.""" + + id: int = Field(..., description="Template ID") + name: str = Field(..., description="Template name") + description: str | None = Field(default=None, description="Template description") + category: str = Field(..., description="Template category") + target_type: str = Field(..., description="Target type: 'group' or 'individual'") + targeting_expression: str = Field(..., description="CEL targeting expression") + default_amount: float = Field(..., description="Default entitlement amount") + default_amount_mode: str = Field(..., description="Default amount mode") + icon: str = Field(..., description="FontAwesome icon class") + + +class TemplateListResponse(BaseModel): + """Response from GET /simulation/templates.""" + + templates: list[TemplateResponse] = Field(..., description="Active scenario templates") + + +# --- Scenario Schemas --- + + +class CreateScenarioRequest(BaseModel): + """Request body for POST /simulation/scenarios.""" + + name: str = Field(..., description="Scenario name") + description: str | None = Field(default=None, description="Scenario description") + category: str | None = Field(default=None, description="Scenario category") + template_id: int | None = Field( + default=None, + description="Template ID to create from (overrides manual fields)", + ) + target_type: str = Field(default="group", description="Target type: 'group' or 'individual'") + targeting_expression: str | None = Field( + default=None, + description="CEL targeting expression (required if no template_id)", + ) + budget_amount: float = Field(default=0.0, description="Total budget amount") + budget_strategy: str = Field( + default="none", + description="Budget strategy: 'none', 'cap_total', or 'proportional_reduction'", + ) + ideal_population_expression: str | None = Field( + default=None, + description="CEL expression for ideal population (leakage/undercoverage measurement)", + ) + entitlement_rules: list[EntitlementRuleRequest] | None = Field( + default=None, + description="Entitlement rules (ignored if template_id is provided)", + ) + + +class UpdateScenarioRequest(BaseModel): + """Request body for PATCH /simulation/scenarios/{id}.""" + + name: str | None = Field(default=None, description="Updated scenario name") + description: str | None = Field(default=None, description="Updated description") + category: str | None = Field(default=None, description="Updated category") + targeting_expression: str | None = Field(default=None, description="Updated CEL expression") + budget_amount: float | None = Field(default=None, description="Updated budget amount") + budget_strategy: str | None = Field(default=None, description="Updated budget strategy") + ideal_population_expression: str | None = Field( + default=None, + description="Updated ideal population expression", + ) + entitlement_rules: list[EntitlementRuleRequest] | None = Field( + default=None, + description="Replacement entitlement rules (replaces all existing rules)", + ) + + +class ScenarioResponse(BaseModel): + """Simulation scenario in API responses.""" + + id: int = Field(..., description="Scenario ID") + name: str = Field(..., description="Scenario name") + description: str | None = Field(default=None, description="Scenario description") + category: str | None = Field(default=None, description="Scenario category") + template_id: int | None = Field(default=None, description="Source template ID") + target_type: str = Field(..., description="Target type") + targeting_expression: str | None = Field(default=None, description="CEL targeting expression") + budget_amount: float = Field(..., description="Budget amount") + budget_strategy: str = Field(..., description="Budget strategy") + ideal_population_expression: str | None = Field(default=None, description="Ideal population expression") + state: str = Field(..., description="Scenario state: 'draft', 'ready', or 'archived'") + targeting_preview_count: int = Field(..., description="Live count of matching registrants") + entitlement_rules: list[EntitlementRuleResponse] = Field( + default_factory=list, + description="Entitlement rules", + ) + run_count: int = Field(default=0, description="Number of simulation runs") + + +class ScenarioListResponse(BaseModel): + """Response from GET /simulation/scenarios.""" + + scenarios: list[ScenarioResponse] = Field(..., description="Matching scenarios") + + +# --- Run Schemas --- + + +class RunHeadlineResponse(BaseModel): + """Headline metrics from a simulation run (returned after execution).""" + + id: int = Field(..., description="Run ID") + scenario_id: int = Field(..., description="Parent scenario ID") + scenario_name: str = Field(..., description="Scenario name at time of run") + state: str = Field(..., description="Run state: 'running', 'completed', or 'failed'") + beneficiary_count: int = Field(..., description="Number of beneficiaries targeted") + total_registry_count: int = Field(..., description="Total registry population") + coverage_rate: float = Field(..., description="Coverage rate as percentage") + total_cost: float = Field(..., description="Total entitlement cost") + budget_utilization: float = Field(..., description="Budget utilization percentage") + gini_coefficient: float = Field(..., description="Gini coefficient (0=equal, 1=max inequality)") + equity_score: float = Field(..., description="Equity score 0-100") + has_disparity: bool = Field(..., description="Whether significant disparity was detected") + executed_at: str | None = Field(default=None, description="Execution timestamp (ISO 8601)") + execution_duration_seconds: float = Field(default=0.0, description="Execution duration in seconds") + error_message: str | None = Field(default=None, description="Error message if failed") + + +class RunDetailResponse(RunHeadlineResponse): + """Full detail of a simulation run including JSON breakdowns.""" + + leakage_rate: float = Field(default=0.0, description="Leakage rate percentage") + undercoverage_rate: float = Field(default=0.0, description="Undercoverage rate percentage") + distribution_json: dict | None = Field( + default=None, + description="Distribution statistics (min, max, mean, median, percentiles)", + ) + fairness_json: dict | None = Field( + default=None, + description="Fairness analysis data", + ) + targeting_efficiency_json: dict | None = Field( + default=None, + description="Targeting efficiency (true/false positives/negatives)", + ) + geographic_json: dict | None = Field( + default=None, + description="Geographic breakdown by area", + ) + metric_results_json: dict | None = Field( + default=None, + description="Custom metric results", + ) + scenario_snapshot_json: dict | None = Field( + default=None, + description="Scenario parameters at time of execution", + ) + + +# --- Comparison Schemas --- + + +class CompareRunsRequest(BaseModel): + """Request body for POST /simulation/runs/compare.""" + + run_ids: list[int] = Field( + ..., + min_length=2, + description="List of run IDs to compare (minimum 2)", + ) + name: str | None = Field( + default=None, + description="Comparison name (auto-generated if not provided)", + ) + + +class ComparisonRunMetrics(BaseModel): + """Metrics for a single run within a comparison.""" + + run_id: int = Field(..., description="Run ID") + scenario_name: str = Field(..., description="Scenario name") + beneficiary_count: int = Field(..., description="Number of beneficiaries") + total_cost: float = Field(..., description="Total cost") + coverage_rate: float = Field(..., description="Coverage rate percentage") + equity_score: float = Field(..., description="Equity score 0-100") + gini_coefficient: float = Field(..., description="Gini coefficient") + has_disparity: bool = Field(..., description="Disparity detected") + leakage_rate: float = Field(default=0.0, description="Leakage rate") + undercoverage_rate: float = Field(default=0.0, description="Undercoverage rate") + budget_utilization: float = Field(default=0.0, description="Budget utilization") + executed_at: str | None = Field(default=None, description="Execution timestamp") + + +class ComparisonOverlap(BaseModel): + """Overlap analysis between two runs.""" + + run_a_id: int = Field(..., description="First run ID") + run_a_name: str = Field(..., description="First run scenario name") + run_b_id: int = Field(..., description="Second run ID") + run_b_name: str = Field(..., description="Second run scenario name") + overlap_count: int = Field(..., description="Number of shared beneficiaries") + union_count: int = Field(..., description="Total unique beneficiaries across both") + jaccard_index: float = Field(..., description="Jaccard similarity index (0-1)") + + +class ComparisonResponse(BaseModel): + """Response from POST /simulation/runs/compare.""" + + id: int = Field(..., description="Comparison ID") + name: str = Field(..., description="Comparison name") + runs: list[ComparisonRunMetrics] = Field(..., description="Per-run metrics") + overlap: list[ComparisonOverlap] = Field( + default_factory=list, + description="Pairwise overlap analysis", + ) + staleness_warning: str | None = Field( + default=None, + description="Warning if runs were executed far apart", + ) diff --git a/spp_api_v2_simulation/security/ir.model.access.csv b/spp_api_v2_simulation/security/ir.model.access.csv new file mode 100644 index 00000000..97dd8b91 --- /dev/null +++ b/spp_api_v2_simulation/security/ir.model.access.csv @@ -0,0 +1 @@ +id,name,model_id:id,group_id:id,perm_read,perm_write,perm_create,perm_unlink diff --git a/spp_api_v2_simulation/services/__init__.py b/spp_api_v2_simulation/services/__init__.py new file mode 100644 index 00000000..93b1bfa1 --- /dev/null +++ b/spp_api_v2_simulation/services/__init__.py @@ -0,0 +1,8 @@ +# Part of OpenSPP. See LICENSE file for full copyright and licensing details. +from . import aggregation_api_service +from . import simulation_api_service + +__all__ = [ + "aggregation_api_service", + "simulation_api_service", +] diff --git a/spp_api_v2_simulation/services/aggregation_api_service.py b/spp_api_v2_simulation/services/aggregation_api_service.py new file mode 100644 index 00000000..dd6fbe71 --- /dev/null +++ b/spp_api_v2_simulation/services/aggregation_api_service.py @@ -0,0 +1,147 @@ +# Part of OpenSPP. See LICENSE file for full copyright and licensing details. +"""Service for delegating to the aggregation engine.""" + +import json +import logging + +from odoo.addons.spp_aggregation.services import build_explicit_scope + +_logger = logging.getLogger(__name__) + + +class AggregationApiService: + """Thin adapter between API layer and spp.aggregation.service.""" + + def __init__(self, env): + """Initialize aggregation API service. + + Args: + env: Odoo environment + """ + self.env = env + + def compute_aggregation(self, scope_dict, statistics=None, group_by=None): + """Compute aggregation for a scope with optional breakdown. + + Translates the API-level scope (target_type, cel_expression, area_id) + into the aggregation engine's scope format (scope_type, cel_profile). + + Args: + scope_dict: Inline scope definition dict with target_type, cel_expression, area_id + statistics: List of statistic names to compute (or None for defaults) + group_by: List of dimension names for breakdown (max 3) + + Returns: + dict: Aggregation result with total_count, statistics, breakdown, etc. + """ + engine_scope = self._build_engine_scope(scope_dict) + # nosemgrep: odoo-sudo-without-context + aggregation_service = self.env["spp.aggregation.service"].sudo() + result = aggregation_service.compute_aggregation( + scope=engine_scope, + statistics=statistics, + group_by=group_by, + context="api", + ) + return result + + @staticmethod + def _parse_value_labels(value): + """Parse value_labels_json which may be a dict, JSON string, or None.""" + if not value: + return None + if isinstance(value, dict): + return value + if isinstance(value, str): + try: + return json.loads(value) + except (json.JSONDecodeError, TypeError): + return None + return None + + def _build_engine_scope(self, scope_dict): + """Translate API scope dict to aggregation engine scope dict. + + Uses explicit scope with Odoo ORM search to resolve registrant IDs, + since the CEL scope resolver has issues with certain expressions. + If a cel_expression is provided, uses the CEL executor's + compile_for_batch to filter the base registrant set. + + Args: + scope_dict: API scope with target_type, cel_expression, area_id + + Returns: + dict: Engine-compatible scope dict with scope_type="explicit" + """ + target_type = scope_dict.get("target_type", "group") + cel_expression = scope_dict.get("cel_expression") + area_id = scope_dict.get("area_id") + + is_group = target_type == "group" + + # Build Odoo domain for base registrant filtering + domain = [ + ("is_registrant", "=", True), + ("is_group", "=", is_group), + ("active", "=", True), + ] + if area_id is not None: + domain.append(("area_id", "=", area_id)) + + # API service requires sudo to search all registrants regardless of user access rules + # nosemgrep: odoo-sudo-without-context, odoo-sudo-on-sensitive-models + partner_ids = self.env["res.partner"].sudo().search(domain).ids + + # Apply CEL expression filter if provided + if cel_expression and partner_ids: + try: + executor = self.env["spp.cel.executor"].sudo() # nosemgrep: odoo-sudo-without-context + filtered_ids = [] + for batch_ids in executor.compile_for_batch( + "res.partner", + cel_expression, + batch_size=5000, + ): + filtered_ids.extend(batch_ids) + # Intersect with our base set + partner_id_set = set(partner_ids) + partner_ids = [pid for pid in filtered_ids if pid in partner_id_set] + except Exception: + _logger.warning( + "CEL expression filter failed, using base registrant set", + exc_info=True, + ) + + return build_explicit_scope(partner_ids) + + def list_dimensions(self, applies_to=None): + """List active demographic dimensions. + + Args: + applies_to: Optional filter: 'individuals', 'groups', or None for all + + Returns: + list[dict]: List of dimension info dicts + """ + dimensions = ( + self.env["spp.demographic.dimension"] # nosemgrep: odoo-sudo-without-context + .sudo() + .get_active_dimensions( + applies_to=applies_to, + ) + ) + + result = [] + for dimension in dimensions: + result.append( + { + "name": dimension.name, + "label": dimension.label, + "description": dimension.description or None, + "dimension_type": dimension.dimension_type, + "applies_to": dimension.applies_to, + "value_labels": self._parse_value_labels(dimension.value_labels_json), + } + ) + + return result diff --git a/spp_api_v2_simulation/services/simulation_api_service.py b/spp_api_v2_simulation/services/simulation_api_service.py new file mode 100644 index 00000000..4895be45 --- /dev/null +++ b/spp_api_v2_simulation/services/simulation_api_service.py @@ -0,0 +1,528 @@ +# Part of OpenSPP. See LICENSE file for full copyright and licensing details. +"""Service for simulation scenario management and execution.""" + +import logging + +from odoo import Command + +_logger = logging.getLogger(__name__) + + +class SimulationApiService: + """Adapter between API layer and spp.simulation.* models.""" + + def __init__(self, env): + """Initialize simulation API service. + + Args: + env: Odoo environment + """ + self.env = env + + # --- Templates --- + + def list_templates(self): + """List active simulation scenario templates. + + Returns: + list[dict]: List of template info dicts + """ + templates = ( + self.env["spp.simulation.scenario.template"] # nosemgrep: odoo-sudo-without-context + .sudo() + .search( + [("active", "=", True)], + order="sequence, name", + ) + ) + + result = [] + for template in templates: + result.append( + { + "id": template.id, + "name": template.name, + "description": template.description or None, + "category": template.category, + "target_type": template.target_type, + "targeting_expression": template.targeting_expression, + "default_amount": template.default_amount, + "default_amount_mode": template.default_amount_mode, + "icon": template.icon or "fa-users", + } + ) + + return result + + # --- Scenario CRUD --- + + def create_scenario(self, data): + """Create a new simulation scenario. + + If template_id is provided, creates from template with pre-populated + fields. Otherwise creates with the provided fields and entitlement rules. + + Args: + data: Dict with scenario creation fields + + Returns: + dict: Created scenario as serialized dict + """ + template_id = data.get("template_id") + + if template_id: + return self._create_from_template(data) + return self._create_custom(data) + + def _create_from_template(self, data): + """Create scenario from a template. + + Args: + data: Dict with at least name and template_id + + Returns: + dict: Created scenario dict + """ + template = self.env["spp.simulation.scenario.template"].browse(data["template_id"]) + if not template.exists(): + raise ValueError(f"Template {data['template_id']} not found") + + # Create scenario with template fields + scenario_vals = { + "name": data["name"], + "template_id": template.id, + "target_type": template.target_type, + "targeting_expression": template.targeting_expression, + "ideal_population_expression": template.ideal_population_expression or False, + } + + if data.get("description"): + scenario_vals["description"] = data["description"] + if data.get("category"): + scenario_vals["category"] = data["category"] + if data.get("budget_amount"): + scenario_vals["budget_amount"] = data["budget_amount"] + if data.get("budget_strategy"): + scenario_vals["budget_strategy"] = data["budget_strategy"] + + scenario = self.env["spp.simulation.scenario"].create(scenario_vals) + + # Create default entitlement rule from template if it has a default amount + if template.default_amount: + self.env["spp.simulation.entitlement.rule"].create( + { + "scenario_id": scenario.id, + "name": "Default entitlement", + "amount_mode": template.default_amount_mode or "fixed", + "amount": template.default_amount, + } + ) + + return self._serialize_scenario(scenario) + + def _create_custom(self, data): + """Create a custom scenario without template. + + Args: + data: Dict with scenario fields and optional entitlement_rules + + Returns: + dict: Created scenario dict + """ + scenario_vals = { + "name": data["name"], + "target_type": data.get("target_type", "group"), + "targeting_expression": data.get("targeting_expression", ""), + "budget_amount": data.get("budget_amount", 0.0), + "budget_strategy": data.get("budget_strategy", "none"), + } + + if data.get("description"): + scenario_vals["description"] = data["description"] + if data.get("category"): + scenario_vals["category"] = data["category"] + if data.get("ideal_population_expression"): + scenario_vals["ideal_population_expression"] = data["ideal_population_expression"] + + scenario = self.env["spp.simulation.scenario"].create(scenario_vals) + + # Create entitlement rules + rules_data = data.get("entitlement_rules") or [] + for rule_data in rules_data: + rule_vals = { + "scenario_id": scenario.id, + "name": rule_data.get("name", ""), + "sequence": rule_data.get("sequence", 10), + "amount_mode": rule_data.get("amount_mode", "fixed"), + "amount": rule_data.get("amount", 0.0), + } + if rule_data.get("multiplier_field"): + rule_vals["multiplier_field"] = rule_data["multiplier_field"] + if rule_data.get("max_multiplier"): + rule_vals["max_multiplier"] = rule_data["max_multiplier"] + if rule_data.get("amount_cel_expression"): + rule_vals["amount_cel_expression"] = rule_data["amount_cel_expression"] + if rule_data.get("condition_cel_expression"): + rule_vals["condition_cel_expression"] = rule_data["condition_cel_expression"] + + self.env["spp.simulation.entitlement.rule"].create(rule_vals) + + return self._serialize_scenario(scenario) + + def list_scenarios(self, state=None): + """List simulation scenarios with optional state filter. + + Args: + state: Optional state filter ('draft', 'ready', 'archived') + + Returns: + list[dict]: List of scenario dicts + """ + domain = [] + if state: + domain.append(("state", "=", state)) + + scenarios = self.env["spp.simulation.scenario"].search( + domain, + order="write_date desc", + ) + + return [self._serialize_scenario(s) for s in scenarios] + + def get_scenario(self, scenario_id): + """Get a single scenario by ID. + + Args: + scenario_id: Database ID of the scenario + + Returns: + dict: Scenario dict with full details + + Raises: + ValueError: If scenario not found + """ + scenario = self.env["spp.simulation.scenario"].browse(scenario_id) + if not scenario.exists(): + raise ValueError(f"Scenario {scenario_id} not found") + + return self._serialize_scenario(scenario) + + def update_scenario(self, scenario_id, data): + """Update a scenario with partial data. + + If entitlement_rules is provided, replaces all existing rules. + + Args: + scenario_id: Database ID of the scenario + data: Dict with fields to update + + Returns: + dict: Updated scenario dict + + Raises: + ValueError: If scenario not found + """ + scenario = self.env["spp.simulation.scenario"].browse(scenario_id) + if not scenario.exists(): + raise ValueError(f"Scenario {scenario_id} not found") + + # Build write vals from non-None fields + write_vals = {} + field_mapping = { + "name": "name", + "description": "description", + "category": "category", + "targeting_expression": "targeting_expression", + "budget_amount": "budget_amount", + "budget_strategy": "budget_strategy", + "ideal_population_expression": "ideal_population_expression", + } + + for api_field, model_field in field_mapping.items(): + if api_field in data and data[api_field] is not None: + write_vals[model_field] = data[api_field] + + if write_vals: + scenario.write(write_vals) + + # Replace entitlement rules if provided + if "entitlement_rules" in data and data["entitlement_rules"] is not None: + # Delete existing rules + scenario.entitlement_rule_ids.unlink() + + # Create new rules + for rule_data in data["entitlement_rules"]: + rule_vals = { + "scenario_id": scenario.id, + "name": rule_data.get("name", ""), + "sequence": rule_data.get("sequence", 10), + "amount_mode": rule_data.get("amount_mode", "fixed"), + "amount": rule_data.get("amount", 0.0), + } + if rule_data.get("multiplier_field"): + rule_vals["multiplier_field"] = rule_data["multiplier_field"] + if rule_data.get("max_multiplier"): + rule_vals["max_multiplier"] = rule_data["max_multiplier"] + if rule_data.get("amount_cel_expression"): + rule_vals["amount_cel_expression"] = rule_data["amount_cel_expression"] + if rule_data.get("condition_cel_expression"): + rule_vals["condition_cel_expression"] = rule_data["condition_cel_expression"] + + self.env["spp.simulation.entitlement.rule"].create(rule_vals) + + # Refresh to get updated computed fields + scenario.invalidate_recordset() + return self._serialize_scenario(scenario) + + # --- Simulation Execution --- + + def run_simulation(self, scenario_id): + """Execute a simulation on a scenario. + + Auto-transitions draft -> ready if targeting expression is set. + Then delegates to spp.simulation.service.execute_simulation(). + + Args: + scenario_id: Database ID of the scenario + + Returns: + dict: Run headline metrics + + Raises: + ValueError: If scenario not found or cannot be run + """ + scenario = self.env["spp.simulation.scenario"].browse(scenario_id) + if not scenario.exists(): + raise ValueError(f"Scenario {scenario_id} not found") + + # Auto-transition draft -> ready if possible + if scenario.state == "draft" and scenario.targeting_expression: + scenario.action_set_ready() + + if scenario.state != "ready": + raise ValueError(f"Scenario must be in 'ready' state to run. Current state: {scenario.state}") + + # Execute simulation + simulation_service = self.env["spp.simulation.service"] + run = simulation_service.execute_simulation(scenario) + + return self._serialize_run_headline(run) + + def get_run(self, run_id): + """Get full details of a simulation run. + + Args: + run_id: Database ID of the run + + Returns: + dict: Full run details including JSON breakdowns + + Raises: + ValueError: If run not found + """ + run = self.env["spp.simulation.run"].browse(run_id) + if not run.exists(): + raise ValueError(f"Run {run_id} not found") + + return self._serialize_run_detail(run) + + # --- Comparison --- + + def compare_runs(self, run_ids, name=None): + """Compare multiple simulation runs. + + Creates an spp.simulation.comparison record, computes comparison + data, and returns structured result. + + Args: + run_ids: List of run IDs to compare (minimum 2) + name: Optional comparison name (auto-generated if None) + + Returns: + dict: Comparison result with per-run metrics and overlap + + Raises: + ValueError: If fewer than 2 run IDs provided + """ + if len(run_ids) < 2: + raise ValueError("At least 2 run IDs are required for comparison") + + # Verify runs exist + runs = self.env["spp.simulation.run"].browse(run_ids) + if len(runs.exists()) != len(run_ids): + missing = set(run_ids) - set(runs.exists().ids) + raise ValueError(f"Runs not found: {missing}") + + # Auto-generate name if not provided + if not name: + scenario_names = [r.scenario_id.name for r in runs] + name = f"Comparison: {' vs '.join(scenario_names[:3])}" + if len(scenario_names) > 3: + name += f" (+{len(scenario_names) - 3} more)" + + # Create comparison record + comparison = self.env["spp.simulation.comparison"].create( + { + "name": name, + "run_ids": [Command.set(run_ids)], + } + ) + + # Compute comparison data + comparison.action_compute_comparison() + + return self._serialize_comparison(comparison) + + # --- Serializers --- + + def _serialize_scenario(self, scenario): + """Serialize a scenario record to API dict. + + Args: + scenario: spp.simulation.scenario record + + Returns: + dict: Serialized scenario + """ + return { # nosemgrep: odoo-expose-database-id + "id": scenario.id, + "name": scenario.name, + "description": scenario.description or None, + "category": scenario.category or None, + "template_id": scenario.template_id.id if scenario.template_id else None, + "target_type": scenario.target_type, + "targeting_expression": scenario.targeting_expression or None, + "budget_amount": scenario.budget_amount, + "budget_strategy": scenario.budget_strategy, + "ideal_population_expression": scenario.ideal_population_expression or None, + "state": scenario.state, + "targeting_preview_count": scenario.targeting_preview_count, + "entitlement_rules": [self._serialize_entitlement_rule(rule) for rule in scenario.entitlement_rule_ids], + "run_count": scenario.run_count, + } + + def _serialize_entitlement_rule(self, rule): + """Serialize an entitlement rule record to API dict. + + Args: + rule: spp.simulation.entitlement.rule record + + Returns: + dict: Serialized rule + """ + return { # nosemgrep: odoo-expose-database-id + "id": rule.id, + "name": rule.name or None, + "sequence": rule.sequence, + "amount_mode": rule.amount_mode, + "amount": rule.amount, + "multiplier_field": rule.multiplier_field or None, + "max_multiplier": rule.max_multiplier, + "amount_cel_expression": rule.amount_cel_expression or None, + "condition_cel_expression": rule.condition_cel_expression or None, + } + + def _serialize_run_headline(self, run): + """Serialize a run record to headline metrics dict. + + Args: + run: spp.simulation.run record + + Returns: + dict: Headline run metrics + """ + return { # nosemgrep: odoo-expose-database-id + "id": run.id, + "scenario_id": run.scenario_id.id, + "scenario_name": run.scenario_id.name, + "state": run.state, + "beneficiary_count": run.beneficiary_count, + "total_registry_count": run.total_registry_count, + "coverage_rate": run.coverage_rate, + "total_cost": run.total_cost, + "budget_utilization": run.budget_utilization, + "gini_coefficient": run.gini_coefficient, + "equity_score": run.equity_score, + "has_disparity": run.has_disparity, + "executed_at": run.executed_at.isoformat() if run.executed_at else None, + "execution_duration_seconds": run.execution_duration_seconds, + "error_message": run.error_message or None, + } + + def _serialize_run_detail(self, run): + """Serialize a run record to full detail dict. + + Args: + run: spp.simulation.run record + + Returns: + dict: Full run details including JSON breakdowns + """ + result = self._serialize_run_headline(run) + result.update( + { + "leakage_rate": run.leakage_rate, + "undercoverage_rate": run.undercoverage_rate, + "distribution_json": run.distribution_json or None, + "fairness_json": run.fairness_json or None, + "targeting_efficiency_json": run.targeting_efficiency_json or None, + "geographic_json": run.geographic_json or None, + "metric_results_json": run.metric_results_json or None, + "scenario_snapshot_json": run.scenario_snapshot_json or None, + } + ) + return result + + def _serialize_comparison(self, comparison): + """Serialize a comparison record to API dict. + + Args: + comparison: spp.simulation.comparison record + + Returns: + dict: Comparison result + """ + # Extract per-run metrics from comparison_json + runs_data = [] + comparison_data = comparison.comparison_json or {} + for run_info in comparison_data.get("runs", []): + runs_data.append( + { + "run_id": run_info.get("run_id"), + "scenario_name": run_info.get("scenario_name", ""), + "beneficiary_count": run_info.get("beneficiary_count", 0), + "total_cost": run_info.get("total_cost", 0.0), + "coverage_rate": run_info.get("coverage_rate", 0.0), + "equity_score": run_info.get("equity_score", 0.0), + "gini_coefficient": run_info.get("gini_coefficient", 0.0), + "has_disparity": run_info.get("has_disparity", False), + "leakage_rate": run_info.get("leakage_rate", 0.0), + "undercoverage_rate": run_info.get("undercoverage_rate", 0.0), + "budget_utilization": run_info.get("budget_utilization", 0.0), + "executed_at": run_info.get("executed_at"), + } + ) + + # Extract overlap data from overlap_count_json + overlap_data = [] + overlap_json = comparison.overlap_count_json or {} + for _key, overlap_info in overlap_json.items(): + overlap_data.append( + { + "run_a_id": overlap_info.get("run_a_id"), + "run_a_name": overlap_info.get("run_a_name", ""), + "run_b_id": overlap_info.get("run_b_id"), + "run_b_name": overlap_info.get("run_b_name", ""), + "overlap_count": overlap_info.get("overlap_count", 0), + "union_count": overlap_info.get("union_count", 0), + "jaccard_index": overlap_info.get("jaccard_index", 0.0), + } + ) + + return { # nosemgrep: odoo-expose-database-id + "id": comparison.id, + "name": comparison.name, + "runs": runs_data, + "overlap": overlap_data, + "staleness_warning": comparison.staleness_warning or None, + } diff --git a/spp_api_v2_simulation/tests/__init__.py b/spp_api_v2_simulation/tests/__init__.py new file mode 100644 index 00000000..a225b580 --- /dev/null +++ b/spp_api_v2_simulation/tests/__init__.py @@ -0,0 +1,6 @@ +# Part of OpenSPP. See LICENSE file for full copyright and licensing details. + +from . import test_comparison_api, test_convert_to_program_api, test_run_api, test_scenario_api +from . import test_scope_registration +from . import test_aggregation_service +from . import test_simulation_service diff --git a/spp_api_v2_simulation/tests/common.py b/spp_api_v2_simulation/tests/common.py new file mode 100644 index 00000000..fbb6e10d --- /dev/null +++ b/spp_api_v2_simulation/tests/common.py @@ -0,0 +1,262 @@ +# Part of OpenSPP. See LICENSE file for full copyright and licensing details. +"""Shared test fixtures for spp_api_v2_simulation tests.""" + +import logging +from datetime import datetime + +from odoo.tests import tagged +from odoo.tests.common import TransactionCase + +_logger = logging.getLogger(__name__) + + +class SimulationApiTestCase(TransactionCase): + """Shared test fixtures for simulation API tests (REST endpoints).""" + + @classmethod + def setUpClass(cls): + """Set up test data.""" + super().setUpClass() + + # Get or create organization type for API client + cls.org_type = cls.env.ref( + "spp_consent.org_type_government", + raise_if_not_found=False, + ) + if not cls.org_type: + cls.org_type = cls.env["spp.consent.org.type"].search([("code", "=", "government")], limit=1) + + # Create partner for API client + cls.api_partner = cls.env["res.partner"].create( + { + "name": "Test API Partner", + "is_registrant": False, + "is_group": False, + } + ) + + # Create test API client with simulation scopes + cls.api_client = cls.env["spp.api.client"].create( + { + "name": "Test Simulation Client", + "partner_id": cls.api_partner.id, + "organization_type_id": cls.org_type.id, + } + ) + + # Create simulation:all scope (gives all permissions) + cls.scope_all = cls.env["spp.api.client.scope"].create( + { + "client_id": cls.api_client.id, + "resource": "simulation", + "action": "all", + } + ) + + # Create test area + cls.area = cls.env["spp.area"].create( + { + "draft_name": "Test Area", + } + ) + + # Create test registrants (groups) + cls.group_registrants = cls.env["res.partner"] + for i in range(5): + partner = cls.env["res.partner"].create( + { + "name": f"Test Household {i + 1}", + "is_registrant": True, + "is_group": True, + "area_id": cls.area.id, + } + ) + cls.group_registrants |= partner + + # Create test registrants (individuals) + cls.individual_registrants = cls.env["res.partner"] + for i in range(10): + partner = cls.env["res.partner"].create( + { + "name": f"Test Individual {i + 1}", + "is_registrant": True, + "is_group": False, + } + ) + cls.individual_registrants |= partner + + # Create a basic scenario in draft state + cls.scenario_draft = cls.env["spp.simulation.scenario"].create( + { + "name": "Draft Test Scenario", + "target_type": "group", + "targeting_expression": "true", + "state": "draft", + } + ) + + # Create entitlement rule for the draft scenario + cls.rule_draft = cls.env["spp.simulation.entitlement.rule"].create( + { + "scenario_id": cls.scenario_draft.id, + "amount_mode": "fixed", + "amount": 1000.0, + } + ) + + # Create a scenario in ready state + cls.scenario_ready = cls.env["spp.simulation.scenario"].create( + { + "name": "Ready Test Scenario", + "target_type": "group", + "targeting_expression": "true", + "state": "ready", + "budget_amount": 5000.0, + "budget_strategy": "none", + } + ) + + # Create entitlement rule for the ready scenario + cls.rule_ready = cls.env["spp.simulation.entitlement.rule"].create( + { + "scenario_id": cls.scenario_ready.id, + "amount_mode": "fixed", + "amount": 500.0, + } + ) + + # Create a completed simulation run + cls.run_completed = cls.env["spp.simulation.run"].create( + { + "scenario_id": cls.scenario_ready.id, + "state": "completed", + "beneficiary_count": 5, + "total_cost": 2500.0, + "coverage_rate": 50.0, + "equity_score": 85.0, + "gini_coefficient": 0.15, + "total_registry_count": 10, + "budget_utilization": 50.0, + "has_disparity": False, + "leakage_rate": 0.0, + "undercoverage_rate": 0.0, + "executed_at": datetime(2024, 1, 1, 0, 0, 0), + "execution_duration_seconds": 1.5, + } + ) + + # Create a second completed run for comparison tests + cls.run_completed_2 = cls.env["spp.simulation.run"].create( + { + "scenario_id": cls.scenario_ready.id, + "state": "completed", + "beneficiary_count": 4, + "total_cost": 2000.0, + "coverage_rate": 40.0, + "equity_score": 90.0, + "gini_coefficient": 0.10, + "total_registry_count": 10, + "budget_utilization": 40.0, + "has_disparity": False, + "leakage_rate": 0.0, + "undercoverage_rate": 0.0, + "executed_at": datetime(2024, 1, 2, 0, 0, 0), + "execution_duration_seconds": 1.2, + } + ) + + # Create a failed run + cls.run_failed = cls.env["spp.simulation.run"].create( + { + "scenario_id": cls.scenario_ready.id, + "state": "failed", + "beneficiary_count": 0, + "total_cost": 0.0, + "error_message": "Test error message", + "executed_at": datetime(2024, 1, 3, 0, 0, 0), + } + ) + + +@tagged("post_install", "-at_install") +class SimulationApiTestCommon(TransactionCase): + """Base test class with shared test data for service-level tests.""" + + @classmethod + def setUpClass(cls): + """Set up shared test data.""" + super().setUpClass() + + # Create partner and org type for API client + cls.api_partner = cls.env["res.partner"].create({"name": "Test Simulation Organization"}) + cls.org_type = cls.env.ref( + "spp_consent.org_type_government", + raise_if_not_found=False, + ) + if not cls.org_type: + cls.org_type = cls.env["spp.consent.org.type"].search([("code", "=", "government")], limit=1) + if not cls.org_type: + cls.org_type = cls.env["spp.consent.org.type"].create({"name": "Government", "code": "government"}) + + # Create an API client for scope testing + cls.api_client = cls.env["spp.api.client"].create( + { + "name": "Test Simulation API Client", + "partner_id": cls.api_partner.id, + "organization_type_id": cls.org_type.id, + } + ) + + # Create test registrants (groups) + cls.group1 = cls.env["res.partner"].create( + { + "name": "Test Group 1", + "is_registrant": True, + "is_group": True, + } + ) + cls.group2 = cls.env["res.partner"].create( + { + "name": "Test Group 2", + "is_registrant": True, + "is_group": True, + } + ) + + # Create test simulation template + cls.template = cls.env["spp.simulation.scenario.template"].create( + { + "name": "Test Template", + "description": "A test scenario template", + "category": "age", + "target_type": "group", + "targeting_expression": "age_years(r.birthdate) >= 60", + "default_amount": 1000.0, + "default_amount_mode": "fixed", + "active": True, + } + ) + + # Create test scenario + cls.scenario = cls.env["spp.simulation.scenario"].create( + { + "name": "Test Scenario", + "description": "A test scenario", + "target_type": "group", + "targeting_expression": "true", + "budget_amount": 50000.0, + "budget_strategy": "none", + "state": "draft", + } + ) + + # Create entitlement rule for the scenario + cls.entitlement_rule = cls.env["spp.simulation.entitlement.rule"].create( + { + "scenario_id": cls.scenario.id, + "name": "Base entitlement", + "amount_mode": "fixed", + "amount": 500.0, + "sequence": 10, + } + ) diff --git a/spp_api_v2_simulation/tests/test_aggregation_service.py b/spp_api_v2_simulation/tests/test_aggregation_service.py new file mode 100644 index 00000000..7304fb81 --- /dev/null +++ b/spp_api_v2_simulation/tests/test_aggregation_service.py @@ -0,0 +1,128 @@ +# Part of OpenSPP. See LICENSE file for full copyright and licensing details. +"""Tests for aggregation API service.""" + +import logging + +from odoo.tests import tagged + +from .common import SimulationApiTestCommon + +_logger = logging.getLogger(__name__) + + +@tagged("post_install", "-at_install") +class TestAggregationApiService(SimulationApiTestCommon): + """Test aggregation API service functionality.""" + + def _get_service(self): + """Import and create the service.""" + from ..services.aggregation_api_service import AggregationApiService + + return AggregationApiService(self.env) + + def test_compute_aggregation_basic(self): + """Test basic aggregation compute with inline scope.""" + service = self._get_service() + scope_dict = { + "target_type": "group", + } + result = service.compute_aggregation(scope_dict) + + self.assertIn("total_count", result) + self.assertIn("computed_at", result) + self.assertIn("access_level", result) + self.assertIsInstance(result["total_count"], int) + + def test_compute_aggregation_with_statistics(self): + """Test aggregation compute with statistics requested.""" + service = self._get_service() + scope_dict = { + "target_type": "group", + } + result = service.compute_aggregation( + scope_dict, + statistics=["total_count"], + ) + + self.assertIn("total_count", result) + self.assertIn("statistics", result) + + def test_compute_aggregation_with_group_by(self): + """Test aggregation compute with group_by dimension.""" + service = self._get_service() + scope_dict = { + "target_type": "group", + } + + # Create a dimension for testing if none exist + if not self.env["spp.demographic.dimension"].search([], limit=1): + self.env["spp.demographic.dimension"].create( + { + "name": "test_gender", + "label": "Test Gender", + "dimension_type": "field", + "field_path": "gender", + "applies_to": "all", + "active": True, + } + ) + + dimensions = self.env["spp.demographic.dimension"].search([("active", "=", True)], limit=1) + if dimensions: + result = service.compute_aggregation( + scope_dict, + group_by=[dimensions[0].name], + ) + self.assertIn("total_count", result) + + def test_list_dimensions(self): + """Test listing active dimensions.""" + service = self._get_service() + + # Ensure at least one dimension exists + self.env["spp.demographic.dimension"].create( + { + "name": "test_dim_for_listing", + "label": "Test Dimension", + "dimension_type": "field", + "field_path": "name", + "applies_to": "all", + "active": True, + } + ) + + result = service.list_dimensions() + + self.assertIsInstance(result, list) + self.assertGreaterEqual(len(result), 1) + + # Check structure of first dimension + dimension = result[0] + self.assertIn("name", dimension) + self.assertIn("label", dimension) + self.assertIn("dimension_type", dimension) + self.assertIn("applies_to", dimension) + + def test_list_dimensions_filtered_by_applies_to(self): + """Test that list_dimensions can filter by applies_to.""" + service = self._get_service() + + # Create individual-only dimension + self.env["spp.demographic.dimension"].create( + { + "name": "test_individual_dim", + "label": "Individual Only", + "dimension_type": "field", + "field_path": "name", + "applies_to": "individuals", + "active": True, + } + ) + + result_all = service.list_dimensions() + result_individuals = service.list_dimensions(applies_to="individuals") + + # Individual filter should include individual + all dims + self.assertIsInstance(result_individuals, list) + # All dims should be >= individual dims + self.assertGreaterEqual(len(result_all), len(result_individuals)) diff --git a/spp_api_v2_simulation/tests/test_comparison_api.py b/spp_api_v2_simulation/tests/test_comparison_api.py new file mode 100644 index 00000000..9ae458cc --- /dev/null +++ b/spp_api_v2_simulation/tests/test_comparison_api.py @@ -0,0 +1,428 @@ +# Part of OpenSPP. See LICENSE file for full copyright and licensing details. +"""Tests for simulation comparison API logic.""" + +from odoo import Command +from odoo.tests import tagged + +from .common import SimulationApiTestCase + + +@tagged("-at_install", "post_install") +class TestComparisonApi(SimulationApiTestCase): + """Test comparison endpoint logic and schemas.""" + + def test_comparison_to_response_conversion(self): + """Test conversion of Odoo comparison to Pydantic response.""" + # Create comparison + comparison = self.env["spp.simulation.comparison"].create( + { + "name": "Test Comparison", + "run_ids": [Command.set([self.run_completed.id, self.run_completed_2.id])], + } + ) + + # Compute comparison data + comparison.action_compute_comparison() + + from ..routers.comparison import _comparison_to_response + + response = _comparison_to_response(comparison) + + # Verify basic fields + self.assertEqual(response.id, comparison.id) + self.assertEqual(response.name, "Test Comparison") + + # Verify runs data + self.assertEqual(len(response.runs), 2) + self.assertEqual(response.runs[0].run_id, self.run_completed.id) + self.assertEqual(response.runs[1].run_id, self.run_completed_2.id) + + # Verify overlap data exists + self.assertIsNotNone(response.overlap_data) + self.assertGreater(len(response.overlap_data), 0) + + def test_comparison_response_schema(self): + """Test ComparisonResponse schema validation.""" + from ..schemas.comparison import ( + ComparisonResponse, + ComparisonRunData, + OverlapData, + ) + + runs = [ + ComparisonRunData( + run_id=1, + scenario_name="Scenario 1", + beneficiary_count=100, + total_cost=50000.0, + coverage_rate=10.0, + equity_score=85.0, + gini_coefficient=0.15, + has_disparity=False, + leakage_rate=5.0, + undercoverage_rate=10.0, + budget_utilization=50.0, + executed_at="2024-01-01T00:00:00Z", + ), + ComparisonRunData( + run_id=2, + scenario_name="Scenario 2", + beneficiary_count=90, + total_cost=45000.0, + coverage_rate=9.0, + equity_score=88.0, + gini_coefficient=0.12, + has_disparity=False, + leakage_rate=3.0, + undercoverage_rate=8.0, + budget_utilization=45.0, + executed_at="2024-01-02T00:00:00Z", + ), + ] + + overlap = [ + OverlapData( + run_a_id=1, + run_a_name="Scenario 1", + run_b_id=2, + run_b_name="Scenario 2", + overlap_count=80, + union_count=110, + jaccard_index=0.727, + ) + ] + + response = ComparisonResponse( + id=1, + name="Test Comparison", + runs=runs, + overlap_data=overlap, + staleness_warning=None, + ) + + self.assertEqual(response.id, 1) + self.assertEqual(response.name, "Test Comparison") + self.assertEqual(len(response.runs), 2) + self.assertEqual(len(response.overlap_data), 1) + self.assertEqual(response.overlap_data[0].overlap_count, 80) + self.assertAlmostEqual(response.overlap_data[0].jaccard_index, 0.727, places=3) + + def test_comparison_create_request_schema(self): + """Test ComparisonCreateRequest schema.""" + from ..schemas.comparison import ComparisonCreateRequest + + # Test with default name + request = ComparisonCreateRequest( + run_ids=[1, 2, 3], + ) + + self.assertEqual(len(request.run_ids), 3) + self.assertIsNotNone(request.name) + + # Test with custom name + custom_request = ComparisonCreateRequest( + name="Custom Comparison", + run_ids=[1, 2], + ) + + self.assertEqual(custom_request.name, "Custom Comparison") + self.assertEqual(len(custom_request.run_ids), 2) + + def test_comparison_run_data_schema(self): + """Test ComparisonRunData schema.""" + from ..schemas.comparison import ComparisonRunData + + run_data = ComparisonRunData( + run_id=1, + scenario_name="Test Scenario", + beneficiary_count=50, + total_cost=25000.0, + coverage_rate=5.0, + equity_score=90.0, + gini_coefficient=0.10, + has_disparity=False, + leakage_rate=2.0, + undercoverage_rate=5.0, + budget_utilization=50.0, + executed_at="2024-01-01T00:00:00Z", + ) + + self.assertEqual(run_data.run_id, 1) + self.assertEqual(run_data.scenario_name, "Test Scenario") + self.assertEqual(run_data.beneficiary_count, 50) + self.assertEqual(run_data.equity_score, 90.0) + + def test_overlap_data_schema(self): + """Test OverlapData schema.""" + from ..schemas.comparison import OverlapData + + overlap_data = OverlapData( + run_a_id=1, + run_a_name="Scenario A", + run_b_id=2, + run_b_name="Scenario B", + overlap_count=75, + union_count=125, + jaccard_index=0.6, + ) + + self.assertEqual(overlap_data.run_a_id, 1) + self.assertEqual(overlap_data.run_b_id, 2) + self.assertEqual(overlap_data.overlap_count, 75) + self.assertEqual(overlap_data.union_count, 125) + self.assertEqual(overlap_data.jaccard_index, 0.6) + + def test_comparison_with_two_runs(self): + """Test comparison requires at least 2 runs.""" + comparison = self.env["spp.simulation.comparison"].create( + { + "name": "Two Runs Comparison", + "run_ids": [Command.set([self.run_completed.id, self.run_completed_2.id])], + } + ) + + comparison.action_compute_comparison() + + from ..routers.comparison import _comparison_to_response + + response = _comparison_to_response(comparison) + + self.assertEqual(len(response.runs), 2) + + def test_comparison_with_three_runs(self): + """Test comparison with three runs.""" + from datetime import datetime + + # Create third run + run_3 = self.env["spp.simulation.run"].create( + { + "scenario_id": self.scenario_ready.id, + "state": "completed", + "beneficiary_count": 6, + "total_cost": 3000.0, + "coverage_rate": 60.0, + "equity_score": 82.0, + "gini_coefficient": 0.18, + "total_registry_count": 10, + "budget_utilization": 60.0, + "has_disparity": False, + "leakage_rate": 0.0, + "undercoverage_rate": 0.0, + "executed_at": datetime(2024, 1, 4, 0, 0, 0), + "execution_duration_seconds": 1.8, + } + ) + + comparison = self.env["spp.simulation.comparison"].create( + { + "name": "Three Runs Comparison", + "run_ids": [Command.set([self.run_completed.id, self.run_completed_2.id, run_3.id])], + } + ) + + comparison.action_compute_comparison() + + from ..routers.comparison import _comparison_to_response + + response = _comparison_to_response(comparison) + + self.assertEqual(len(response.runs), 3) + + # Should have 3 overlap pairs (A-B, A-C, B-C) + self.assertGreaterEqual(len(response.overlap_data), 3) + + def test_comparison_staleness_warning(self): + """Test comparison with staleness warning.""" + from datetime import datetime + + # Update scenario to be newer than the runs + self.scenario_ready.write_date = datetime.now() + + comparison = self.env["spp.simulation.comparison"].create( + { + "name": "Stale Comparison", + "run_ids": [Command.set([self.run_completed.id, self.run_completed_2.id])], + } + ) + + comparison.action_compute_comparison() + + from ..routers.comparison import _comparison_to_response + + response = _comparison_to_response(comparison) + + # Note: staleness_warning is a computed field that depends on scenario modification times + # We can't easily trigger it in tests since it compares scenario write_date to run executed_at + # So we just verify the response can handle both None and non-None staleness warnings + self.assertIsInstance(response.staleness_warning, (str, type(None))) + + def test_comparison_empty_overlap(self): + """Test comparison with empty overlap data.""" + comparison = self.env["spp.simulation.comparison"].create( + { + "name": "Empty Overlap Comparison", + "run_ids": [Command.set([self.run_completed.id, self.run_completed_2.id])], + "overlap_count_json": {}, + } + ) + + from ..routers.comparison import _comparison_to_response + + response = _comparison_to_response(comparison) + + # Should return empty list, not None + self.assertEqual(len(response.overlap_data), 0) + + def test_comparison_with_failed_run(self): + """Test comparison that includes a failed run.""" + comparison = self.env["spp.simulation.comparison"].create( + { + "name": "Comparison With Failed Run", + "run_ids": [Command.set([self.run_completed.id, self.run_failed.id])], + } + ) + + # Compute comparison (should handle failed run gracefully) + comparison.action_compute_comparison() + + from ..routers.comparison import _comparison_to_response + + response = _comparison_to_response(comparison) + + # Should include both runs in response + self.assertEqual(len(response.runs), 2) + + def test_comparison_jaccard_index_calculation(self): + """Test Jaccard index in overlap data.""" + comparison = self.env["spp.simulation.comparison"].create( + { + "name": "Jaccard Test", + "run_ids": [Command.set([self.run_completed.id, self.run_completed_2.id])], + } + ) + + # Set specific overlap data + comparison.overlap_count_json = { + f"{self.run_completed.id}_{self.run_completed_2.id}": { + "run_a_id": self.run_completed.id, + "run_a_name": "Ready Test Scenario", + "run_b_id": self.run_completed_2.id, + "run_b_name": "Ready Test Scenario", + "overlap_count": 50, + "union_count": 100, + "jaccard_index": 0.5, + } + } + + from ..routers.comparison import _comparison_to_response + + response = _comparison_to_response(comparison) + + # Verify Jaccard index + self.assertEqual(len(response.overlap_data), 1) + self.assertEqual(response.overlap_data[0].overlap_count, 50) + self.assertEqual(response.overlap_data[0].union_count, 100) + self.assertEqual(response.overlap_data[0].jaccard_index, 0.5) + + def test_comparison_different_scenarios(self): + """Test comparison of runs from different scenarios.""" + from datetime import datetime + + # Create second scenario + scenario_2 = self.env["spp.simulation.scenario"].create( + { + "name": "Second Test Scenario", + "target_type": "individual", + "targeting_expression": "true", + "state": "ready", + } + ) + + # Create run for second scenario + run_scenario_2 = self.env["spp.simulation.run"].create( + { + "scenario_id": scenario_2.id, + "state": "completed", + "beneficiary_count": 8, + "total_cost": 4000.0, + "coverage_rate": 80.0, + "equity_score": 88.0, + "gini_coefficient": 0.12, + "total_registry_count": 10, + "budget_utilization": 80.0, + "has_disparity": False, + "leakage_rate": 0.0, + "undercoverage_rate": 0.0, + "executed_at": datetime(2024, 1, 5, 0, 0, 0), + "execution_duration_seconds": 2.0, + } + ) + + # Create comparison across scenarios + comparison = self.env["spp.simulation.comparison"].create( + { + "name": "Cross-Scenario Comparison", + "run_ids": [Command.set([self.run_completed.id, run_scenario_2.id])], + } + ) + + comparison.action_compute_comparison() + + from ..routers.comparison import _comparison_to_response + + response = _comparison_to_response(comparison) + + # Verify both runs are included + self.assertEqual(len(response.runs), 2) + + # Verify different scenario names + scenario_names = {run.scenario_name for run in response.runs} + self.assertEqual(len(scenario_names), 2) + self.assertIn("Ready Test Scenario", scenario_names) + self.assertIn("Second Test Scenario", scenario_names) + + def test_comparison_run_data_completeness(self): + """Test that comparison includes all necessary run metrics.""" + comparison = self.env["spp.simulation.comparison"].create( + { + "name": "Complete Metrics Comparison", + "run_ids": [Command.set([self.run_completed.id, self.run_completed_2.id])], + } + ) + + # Set comparison JSON with complete data + comparison.comparison_json = { + "runs": [ + { + "run_id": self.run_completed.id, + "scenario_name": "Ready Test Scenario", + "beneficiary_count": 5, + "total_cost": 2500.0, + "coverage_rate": 50.0, + "equity_score": 85.0, + "gini_coefficient": 0.15, + "has_disparity": False, + "leakage_rate": 0.0, + "undercoverage_rate": 0.0, + "budget_utilization": 50.0, + "executed_at": "2024-01-01T00:00:00Z", + } + ] + } + + from ..routers.comparison import _comparison_to_response + + response = _comparison_to_response(comparison) + + # Verify all metrics are present + run_data = response.runs[0] + self.assertEqual(run_data.beneficiary_count, 5) + self.assertEqual(run_data.total_cost, 2500.0) + self.assertEqual(run_data.coverage_rate, 50.0) + self.assertEqual(run_data.equity_score, 85.0) + self.assertEqual(run_data.gini_coefficient, 0.15) + self.assertFalse(run_data.has_disparity) + self.assertEqual(run_data.leakage_rate, 0.0) + self.assertEqual(run_data.undercoverage_rate, 0.0) + self.assertEqual(run_data.budget_utilization, 50.0) + self.assertEqual(run_data.executed_at, "2024-01-01T00:00:00Z") diff --git a/spp_api_v2_simulation/tests/test_convert_to_program_api.py b/spp_api_v2_simulation/tests/test_convert_to_program_api.py new file mode 100644 index 00000000..d5ef035e --- /dev/null +++ b/spp_api_v2_simulation/tests/test_convert_to_program_api.py @@ -0,0 +1,254 @@ +# Part of OpenSPP. See LICENSE file for full copyright and licensing details. +"""Tests for the convert-to-program API endpoint.""" + +from odoo.tests import tagged + +from .common import SimulationApiTestCase + + +@tagged("-at_install", "post_install") +class TestConvertToProgramApi(SimulationApiTestCase): + """Test convert-to-program endpoint logic and schemas.""" + + @classmethod + def setUpClass(cls): + super().setUpClass() + + # Create a client with only simulation:read scope + cls.read_only_client = cls.env["spp.api.client"].create( + { + "name": "Read Only Client", + "partner_id": cls.api_partner.id, + "organization_type_id": cls.org_type.id, + } + ) + cls.env["spp.api.client.scope"].create( + { + "client_id": cls.read_only_client.id, + "resource": "simulation", + "action": "read", + } + ) + + # Create a client with only simulation:convert scope (but not "all") + # This tests that convert alone is not sufficient - write is also needed + cls.convert_only_client = cls.env["spp.api.client"].create( + { + "name": "Convert Only Client", + "partner_id": cls.api_partner.id, + "organization_type_id": cls.org_type.id, + } + ) + cls.env["spp.api.client.scope"].create( + { + "client_id": cls.convert_only_client.id, + "resource": "simulation", + "action": "convert", + } + ) + + def test_convert_to_program_request_schema(self): + """Test ConvertToProgramRequest schema with defaults.""" + from ..schemas.scenario import ConvertToProgramRequest + + request = ConvertToProgramRequest() + self.assertIsNone(request.name) + self.assertIsNone(request.currency_code) + self.assertFalse(request.is_one_time_distribution) + self.assertFalse(request.import_beneficiaries) + self.assertIsNone(request.rrule_type) + + def test_convert_to_program_request_with_overrides(self): + """Test ConvertToProgramRequest schema with overrides.""" + from ..schemas.scenario import ConvertToProgramRequest + + request = ConvertToProgramRequest( + name="Custom Program", + currency_code="USD", + is_one_time_distribution=True, + import_beneficiaries=True, + rrule_type="weekly", + mon=True, + wed=True, + fri=True, + ) + self.assertEqual(request.name, "Custom Program") + self.assertEqual(request.currency_code, "USD") + self.assertTrue(request.is_one_time_distribution) + self.assertTrue(request.import_beneficiaries) + self.assertEqual(request.rrule_type, "weekly") + self.assertTrue(request.mon) + self.assertTrue(request.wed) + self.assertTrue(request.fri) + self.assertFalse(request.tue) + + def test_convert_to_program_response_schema(self): + """Test ConvertToProgramResponse schema.""" + from ..schemas.scenario import ConvertToProgramResponse + + response = ConvertToProgramResponse( + program_id=1, + program_name="Test Program", + scenario_id=5, + warnings=["Some warning"], + ) + self.assertEqual(response.program_id, 1) + self.assertEqual(response.program_name, "Test Program") + self.assertEqual(response.scenario_id, 5) + self.assertEqual(len(response.warnings), 1) + + def test_convert_to_program_response_empty_warnings(self): + """Test ConvertToProgramResponse with no warnings.""" + from ..schemas.scenario import ConvertToProgramResponse + + response = ConvertToProgramResponse( + program_id=1, + program_name="Test Program", + scenario_id=5, + ) + self.assertEqual(len(response.warnings), 0) + + def test_scope_model_has_convert_action(self): + """Test that the convert action is available on scope model.""" + self.assertTrue( + self.convert_only_client.has_scope("simulation", "convert"), + "Convert client should have simulation:convert scope", + ) + + def test_all_scope_includes_convert(self): + """Test that simulation:all scope includes convert permission.""" + self.assertTrue( + self.api_client.has_scope("simulation", "convert"), + "Client with simulation:all should have convert scope", + ) + + def test_read_only_client_lacks_convert(self): + """Test that read-only client cannot convert.""" + self.assertFalse( + self.read_only_client.has_scope("simulation", "convert"), + "Read-only client should not have simulation:convert scope", + ) + + def test_convert_only_client_lacks_write(self): + """Test that convert-only client lacks write scope (needs both).""" + self.assertTrue( + self.convert_only_client.has_scope("simulation", "convert"), + "Convert client should have simulation:convert scope", + ) + self.assertFalse( + self.convert_only_client.has_scope("simulation", "write"), + "Convert-only client should not have simulation:write scope", + ) + + def test_convert_options_building(self): + """Test that options dict is built correctly from request schema.""" + from ..schemas.scenario import ConvertToProgramRequest + + request = ConvertToProgramRequest( + name="Override Name", + currency_code="EUR", + rrule_type="monthly", + cycle_duration=2, + day=15, + month_by="date", + ) + + # Build options dict as the endpoint helper does + from ..routers.scenario import _build_convert_options + + options = _build_convert_options(request) + + self.assertEqual(options["name"], "Override Name") + self.assertEqual(options["currency_code"], "EUR") + self.assertEqual(options["rrule_type"], "monthly") + self.assertEqual(options["cycle_duration"], 2) + self.assertEqual(options["day"], 15) + self.assertEqual(options["month_by"], "date") + + def test_convert_options_weekly_flags(self): + """Test that weekly day flags are included in options.""" + from ..routers.scenario import _build_convert_options + from ..schemas.scenario import ConvertToProgramRequest + + request = ConvertToProgramRequest( + rrule_type="weekly", + mon=True, + wed=True, + fri=True, + ) + options = _build_convert_options(request) + + self.assertTrue(options.get("mon")) + self.assertTrue(options.get("wed")) + self.assertTrue(options.get("fri")) + self.assertNotIn("tue", options) + self.assertNotIn("thu", options) + + def test_convert_options_defaults(self): + """Test that empty request produces minimal options.""" + from ..routers.scenario import _build_convert_options + from ..schemas.scenario import ConvertToProgramRequest + + request = ConvertToProgramRequest() + options = _build_convert_options(request) + + self.assertEqual(options, {}) + + def test_convert_endpoint_integration(self): + """Integration test: convert a ready scenario via the service.""" + scenario = self.env["spp.simulation.scenario"].create( + { + "name": "API Integration Test Scenario", + "target_type": "group", + "targeting_expression": "true", + "state": "ready", + } + ) + self.env["spp.simulation.entitlement.rule"].create( + { + "scenario_id": scenario.id, + "amount_mode": "fixed", + "amount": 500.0, + } + ) + + service = self.env["spp.simulation.service"] + result = service.convert_to_program(scenario, {}) + + self.assertTrue(result["program"].exists()) + self.assertEqual(result["program"].name, "API Integration Test Scenario") + self.assertEqual(scenario.converted_program_id, result["program"]) + + def test_convert_non_ready_scenario_error(self): + """Test that converting a draft scenario raises error.""" + from odoo.exceptions import UserError + + service = self.env["spp.simulation.service"] + with self.assertRaises(UserError): + service.convert_to_program(self.scenario_draft, {}) + + def test_convert_already_converted_error(self): + """Test that converting an already-converted scenario raises error.""" + from odoo.exceptions import UserError + + scenario = self.env["spp.simulation.scenario"].create( + { + "name": "Already Converted Test", + "target_type": "group", + "targeting_expression": "true", + "state": "ready", + } + ) + self.env["spp.simulation.entitlement.rule"].create( + { + "scenario_id": scenario.id, + "amount_mode": "fixed", + "amount": 100.0, + } + ) + + service = self.env["spp.simulation.service"] + service.convert_to_program(scenario, {}) + + with self.assertRaises(UserError): + service.convert_to_program(scenario, {}) diff --git a/spp_api_v2_simulation/tests/test_run_api.py b/spp_api_v2_simulation/tests/test_run_api.py new file mode 100644 index 00000000..4a255040 --- /dev/null +++ b/spp_api_v2_simulation/tests/test_run_api.py @@ -0,0 +1,464 @@ +# Part of OpenSPP. See LICENSE file for full copyright and licensing details. +"""Tests for simulation run API logic.""" + +from odoo.tests import tagged + +from .common import SimulationApiTestCase + + +@tagged("-at_install", "post_install") +class TestRunApi(SimulationApiTestCase): + """Test run endpoint logic and schemas.""" + + def test_run_to_summary_conversion(self): + """Test conversion of Odoo run to Pydantic summary.""" + from ..routers.run import _run_to_summary + + summary = _run_to_summary(self.run_completed) + + # Verify basic fields + self.assertEqual(summary.id, self.run_completed.id) + self.assertEqual(summary.scenario_id, self.scenario_ready.id) + self.assertEqual(summary.scenario_name, "Ready Test Scenario") + self.assertEqual(summary.state, "completed") + + # Verify metrics + self.assertEqual(summary.beneficiary_count, 5) + self.assertEqual(summary.total_cost, 2500.0) + self.assertEqual(summary.coverage_rate, 50.0) + self.assertEqual(summary.equity_score, 85.0) + self.assertEqual(summary.gini_coefficient, 0.15) + + # Verify execution data + self.assertEqual(summary.executed_at, "2024-01-01T00:00:00") + self.assertEqual(summary.execution_duration_seconds, 1.5) + + def test_run_to_response_conversion_without_details(self): + """Test conversion of Odoo run to Pydantic response without details.""" + from ..routers.run import _run_to_response + + response = _run_to_response(self.run_completed, include_details=False) + + # Verify basic fields + self.assertEqual(response.id, self.run_completed.id) + self.assertEqual(response.scenario_id, self.scenario_ready.id) + self.assertEqual(response.scenario_name, "Ready Test Scenario") + self.assertEqual(response.state, "completed") + + # Verify headline metrics + self.assertEqual(response.beneficiary_count, 5) + self.assertEqual(response.total_cost, 2500.0) + self.assertEqual(response.coverage_rate, 50.0) + self.assertEqual(response.equity_score, 85.0) + self.assertEqual(response.gini_coefficient, 0.15) + self.assertEqual(response.total_registry_count, 10) + self.assertEqual(response.budget_utilization, 50.0) + self.assertFalse(response.has_disparity) + self.assertEqual(response.leakage_rate, 0.0) + self.assertEqual(response.undercoverage_rate, 0.0) + + # Verify execution data + self.assertEqual(response.executed_at, "2024-01-01T00:00:00") + self.assertEqual(response.execution_duration_seconds, 1.5) + + # Detailed data should be None + self.assertIsNone(response.distribution_data) + self.assertIsNone(response.fairness_data) + self.assertIsNone(response.geographic_data) + self.assertIsNone(response.metric_results) + + def test_run_to_response_conversion_with_details(self): + """Test conversion of Odoo run to Pydantic response with details.""" + # Add distribution data + self.run_completed.distribution_json = { + "count": 5, + "total": 2500.0, + "minimum": 500.0, + "maximum": 500.0, + "mean": 500.0, + "median": 500.0, + "standard_deviation": 0.0, + "gini_coefficient": 0.0, + "percentiles": {"p25": 500.0, "p50": 500.0, "p75": 500.0}, + } + + # Add fairness data + self.run_completed.fairness_json = { + "equity_score": 85.0, + "has_disparity": False, + "demographic_breakdown": {"age_group": {"youth": 50, "adult": 50}}, + } + + # Add geographic data + self.run_completed.geographic_json = [ + { + "area_id": self.area.id, + "area_name": "Test Area", + "beneficiary_count": 5, + "total_cost": 2500.0, + } + ] + + # Add metric results + self.run_completed.metric_results_json = { + "total_beneficiaries": { + "type": "count", + "value": 5, + }, + "total_cost": { + "type": "sum", + "value": 2500.0, + }, + } + + # Add scenario snapshot + self.run_completed.scenario_snapshot_json = { + "name": "Ready Test Scenario", + "target_type": "group", + "targeting_expression": "true", + "budget_amount": 5000.0, + "budget_strategy": "none", + "entitlement_rules": [ + { + "amount_mode": "fixed", + "amount": 500.0, + } + ], + } + + from ..routers.run import _run_to_response + + response = _run_to_response(self.run_completed, include_details=True) + + # Verify detailed data is present + self.assertIsNotNone(response.distribution_data) + self.assertIsNotNone(response.fairness_data) + self.assertIsNotNone(response.geographic_data) + self.assertIsNotNone(response.metric_results) + self.assertIsNotNone(response.scenario_snapshot) + + # Verify distribution data + self.assertEqual(response.distribution_data.count, 5) + self.assertEqual(response.distribution_data.total, 2500.0) + self.assertEqual(response.distribution_data.mean, 500.0) + + # Verify fairness data + self.assertEqual(response.fairness_data.equity_score, 85.0) + self.assertFalse(response.fairness_data.has_disparity) + + # Verify geographic data + self.assertEqual(len(response.geographic_data.areas), 1) + + # Verify metric results + self.assertIn("total_beneficiaries", response.metric_results) + self.assertEqual(response.metric_results["total_beneficiaries"].type, "count") + self.assertEqual(response.metric_results["total_beneficiaries"].value, 5) + + # Verify scenario snapshot + self.assertEqual(response.scenario_snapshot.name, "Ready Test Scenario") + self.assertEqual(response.scenario_snapshot.target_type, "group") + self.assertEqual(response.scenario_snapshot.budget_amount, 5000.0) + + def test_run_response_schema_validation(self): + """Test RunResponse schema validation.""" + from ..schemas.run import RunResponse + + # Test minimal response + response = RunResponse( + id=1, + scenario_id=1, + scenario_name="Test Scenario", + state="completed", + beneficiary_count=10, + total_registry_count=100, + coverage_rate=10.0, + total_cost=5000.0, + budget_utilization=50.0, + gini_coefficient=0.2, + equity_score=80.0, + has_disparity=False, + leakage_rate=0.0, + undercoverage_rate=0.0, + ) + + self.assertEqual(response.id, 1) + self.assertEqual(response.state, "completed") + self.assertEqual(response.beneficiary_count, 10) + self.assertIsNone(response.distribution_data) + + def test_run_summary_schema(self): + """Test RunSummary schema.""" + from ..schemas.run import RunSummary + + summary = RunSummary( + id=1, + scenario_id=1, + scenario_name="Test Scenario", + state="completed", + beneficiary_count=10, + total_cost=5000.0, + coverage_rate=10.0, + equity_score=85.0, + gini_coefficient=0.15, + executed_at="2024-01-01T00:00:00Z", + execution_duration_seconds=2.5, + ) + + self.assertEqual(summary.id, 1) + self.assertEqual(summary.beneficiary_count, 10) + self.assertEqual(summary.executed_at, "2024-01-01T00:00:00Z") + + def test_run_list_response_schema(self): + """Test RunListResponse schema.""" + from ..schemas.run import RunListResponse, RunSummary + + runs = [ + RunSummary( + id=1, + scenario_id=1, + scenario_name="Scenario 1", + state="completed", + beneficiary_count=10, + total_cost=5000.0, + coverage_rate=10.0, + equity_score=85.0, + gini_coefficient=0.15, + executed_at="2024-01-01T00:00:00Z", + execution_duration_seconds=1.5, + ), + RunSummary( + id=2, + scenario_id=1, + scenario_name="Scenario 1", + state="completed", + beneficiary_count=8, + total_cost=4000.0, + coverage_rate=8.0, + equity_score=90.0, + gini_coefficient=0.10, + executed_at="2024-01-02T00:00:00Z", + execution_duration_seconds=1.2, + ), + ] + + response = RunListResponse( + runs=runs, + total_count=2, + ) + + self.assertEqual(len(response.runs), 2) + self.assertEqual(response.total_count, 2) + self.assertEqual(response.runs[0].beneficiary_count, 10) + self.assertEqual(response.runs[1].beneficiary_count, 8) + + def test_run_simulation_response_schema(self): + """Test RunSimulationResponse schema.""" + from ..schemas.run import RunSimulationResponse + + # Success response + success_response = RunSimulationResponse( + run_id=1, + scenario_id=1, + state="completed", + message="Simulation completed with 10 beneficiaries", + ) + + self.assertEqual(success_response.run_id, 1) + self.assertEqual(success_response.state, "completed") + self.assertIn("completed", success_response.message) + + # Failure response + failure_response = RunSimulationResponse( + run_id=2, + scenario_id=1, + state="failed", + message="Simulation failed: Invalid targeting expression", + ) + + self.assertEqual(failure_response.state, "failed") + self.assertIn("failed", failure_response.message) + + def test_distribution_data_schema(self): + """Test DistributionData schema.""" + from ..schemas.run import DistributionData + + dist_data = DistributionData( + count=100, + total=50000.0, + minimum=100.0, + maximum=1000.0, + mean=500.0, + median=450.0, + standard_deviation=150.0, + gini_coefficient=0.25, + percentiles={"p25": 300.0, "p50": 450.0, "p75": 600.0}, + ) + + self.assertEqual(dist_data.count, 100) + self.assertEqual(dist_data.mean, 500.0) + self.assertEqual(dist_data.gini_coefficient, 0.25) + self.assertIn("p50", dist_data.percentiles) + self.assertEqual(dist_data.percentiles["p50"], 450.0) + + def test_fairness_data_schema(self): + """Test FairnessData schema.""" + from ..schemas.run import FairnessData + + fairness_data = FairnessData( + equity_score=85.0, + has_disparity=True, + demographic_breakdown={ + "gender": {"male": 45, "female": 55}, + "age_group": {"youth": 30, "adult": 50, "senior": 20}, + }, + ) + + self.assertEqual(fairness_data.equity_score, 85.0) + self.assertTrue(fairness_data.has_disparity) + self.assertIn("gender", fairness_data.demographic_breakdown) + self.assertIn("age_group", fairness_data.demographic_breakdown) + + def test_targeting_efficiency_data_schema(self): + """Test TargetingEfficiencyData schema.""" + from ..schemas.run import TargetingEfficiencyData + + eff_data = TargetingEfficiencyData( + true_positives=80, + false_positives=20, + false_negatives=10, + total_simulated=100, + total_ideal=90, + leakage_rate=20.0, + undercoverage_rate=11.1, + ) + + self.assertEqual(eff_data.true_positives, 80) + self.assertEqual(eff_data.false_positives, 20) + self.assertEqual(eff_data.false_negatives, 10) + self.assertEqual(eff_data.leakage_rate, 20.0) + self.assertEqual(eff_data.undercoverage_rate, 11.1) + + def test_geographic_data_schema(self): + """Test GeographicData schema.""" + from ..schemas.run import GeographicData + + geo_data = GeographicData( + areas=[ + { + "area_id": 1, + "area_name": "Area 1", + "beneficiary_count": 50, + "total_cost": 25000.0, + }, + { + "area_id": 2, + "area_name": "Area 2", + "beneficiary_count": 30, + "total_cost": 15000.0, + }, + ] + ) + + self.assertEqual(len(geo_data.areas), 2) + self.assertEqual(geo_data.areas[0]["area_name"], "Area 1") + self.assertEqual(geo_data.areas[1]["beneficiary_count"], 30) + + def test_metric_result_schema(self): + """Test MetricResult schema.""" + from ..schemas.run import MetricResult + + # Count metric + count_metric = MetricResult(type="count", value=100) + self.assertEqual(count_metric.type, "count") + self.assertEqual(count_metric.value, 100) + + # Sum metric + sum_metric = MetricResult(type="sum", value=50000.0) + self.assertEqual(sum_metric.type, "sum") + self.assertEqual(sum_metric.value, 50000.0) + + # Average metric + avg_metric = MetricResult(type="average", value=500.0) + self.assertEqual(avg_metric.type, "average") + self.assertEqual(avg_metric.value, 500.0) + + def test_scenario_snapshot_schema(self): + """Test ScenarioSnapshot schema.""" + from ..schemas.run import ScenarioSnapshot + + snapshot = ScenarioSnapshot( + name="Test Scenario", + target_type="group", + targeting_expression="true", + budget_amount=10000.0, + budget_strategy="cap_total", + ideal_population_expression="r.is_vulnerable == true", + entitlement_rules=[ + { + "amount_mode": "fixed", + "amount": 500.0, + } + ], + ) + + self.assertEqual(snapshot.name, "Test Scenario") + self.assertEqual(snapshot.target_type, "group") + self.assertEqual(snapshot.budget_amount, 10000.0) + self.assertEqual(len(snapshot.entitlement_rules), 1) + self.assertEqual(snapshot.entitlement_rules[0]["amount"], 500.0) + + def test_failed_run_conversion(self): + """Test conversion of failed run.""" + from ..routers.run import _run_to_response + + response = _run_to_response(self.run_failed, include_details=False) + + self.assertEqual(response.state, "failed") + self.assertEqual(response.beneficiary_count, 0) + self.assertEqual(response.total_cost, 0.0) + self.assertEqual(response.error_message, "Test error message") + + def test_run_with_targeting_efficiency(self): + """Test run with targeting efficiency data.""" + # Add targeting efficiency data (success case) + self.run_completed.targeting_efficiency_json = { + "true_positives": 4, + "false_positives": 1, + "false_negatives": 2, + "total_simulated": 5, + "total_ideal": 6, + "leakage_rate": 20.0, + "undercoverage_rate": 33.3, + } + + from ..routers.run import _run_to_response + + response = _run_to_response(self.run_completed, include_details=True) + + self.assertIsNotNone(response.targeting_efficiency_data) + self.assertEqual(response.targeting_efficiency_data.true_positives, 4) + self.assertEqual(response.targeting_efficiency_data.false_positives, 1) + self.assertEqual(response.targeting_efficiency_data.false_negatives, 2) + self.assertEqual(response.targeting_efficiency_data.leakage_rate, 20.0) + + def test_run_with_targeting_efficiency_error(self): + """Test run with targeting efficiency error is excluded.""" + # Add targeting efficiency data with error + run_with_error = self.env["spp.simulation.run"].create( + { + "scenario_id": self.scenario_ready.id, + "state": "completed", + "beneficiary_count": 5, + "total_cost": 2500.0, + "targeting_efficiency_json": { + "error": "Ideal population expression not defined", + }, + } + ) + + from ..routers.run import _run_to_response + + response = _run_to_response(run_with_error, include_details=True) + + # Should not include targeting efficiency data when there's an error + self.assertIsNone(response.targeting_efficiency_data) diff --git a/spp_api_v2_simulation/tests/test_scenario_api.py b/spp_api_v2_simulation/tests/test_scenario_api.py new file mode 100644 index 00000000..8a9e66a6 --- /dev/null +++ b/spp_api_v2_simulation/tests/test_scenario_api.py @@ -0,0 +1,359 @@ +# Part of OpenSPP. See LICENSE file for full copyright and licensing details. +"""Tests for simulation scenario API logic.""" + +from odoo.tests import tagged + +from .common import SimulationApiTestCase + + +@tagged("-at_install", "post_install") +class TestScenarioApi(SimulationApiTestCase): + """Test scenario endpoint logic and schemas.""" + + def test_scenario_to_response_conversion(self): + """Test conversion of Odoo scenario to Pydantic response.""" + from ..routers.scenario import _scenario_to_response + + response = _scenario_to_response(self.scenario_draft) + + # Verify basic fields + self.assertEqual(response.id, self.scenario_draft.id) + self.assertEqual(response.name, "Draft Test Scenario") + self.assertEqual(response.target_type, "group") + self.assertEqual(response.state, "draft") + self.assertEqual(response.targeting_expression, "true") + + # Verify entitlement rules + self.assertEqual(len(response.entitlement_rules), 1) + self.assertEqual(response.entitlement_rules[0].amount_mode, "fixed") + self.assertEqual(response.entitlement_rules[0].amount, 1000.0) + + # Verify computed fields + self.assertIsNotNone(response.targeting_preview_count) + self.assertIsNotNone(response.run_count) + + def test_scenario_response_schema_validation(self): + """Test ScenarioResponse schema validation.""" + from ..schemas.scenario import EntitlementRuleResponse, ScenarioResponse + + # Test with minimal fields + response = ScenarioResponse( + id=1, + name="Test Scenario", + target_type="group", + state="draft", + targeting_expression="true", + budget_amount=0.0, + budget_strategy="none", + targeting_preview_count=0, + run_count=0, + latest_beneficiary_count=0, + latest_equity_score=0.0, + entitlement_rules=[], + ) + + self.assertEqual(response.id, 1) + self.assertEqual(response.name, "Test Scenario") + self.assertEqual(len(response.entitlement_rules), 0) + + # Test with entitlement rules + response_with_rules = ScenarioResponse( + id=2, + name="Test Scenario With Rules", + target_type="individual", + state="ready", + targeting_expression="r.age > 18", + budget_amount=10000.0, + budget_strategy="cap_total", + targeting_preview_count=100, + run_count=5, + latest_beneficiary_count=95, + latest_equity_score=87.5, + entitlement_rules=[ + EntitlementRuleResponse( + id=1, + amount_mode="fixed", + amount=500.0, + multiplier_field=None, + max_multiplier=None, + amount_cel_expression=None, + condition_cel_expression=None, + ) + ], + ) + + self.assertEqual(len(response_with_rules.entitlement_rules), 1) + self.assertEqual(response_with_rules.entitlement_rules[0].amount, 500.0) + + def test_scenario_create_request_schema(self): + """Test ScenarioCreateRequest schema.""" + from ..schemas.scenario import EntitlementRuleSchema, ScenarioCreateRequest + + # Test minimal request + request = ScenarioCreateRequest( + name="New Scenario", + target_type="group", + ) + + self.assertEqual(request.name, "New Scenario") + self.assertEqual(request.target_type, "group") + self.assertEqual(len(request.entitlement_rules), 0) + self.assertIsNone(request.description) + + # Test full request + full_request = ScenarioCreateRequest( + name="Full Scenario", + description="A complete scenario", + category="age", + target_type="individual", + targeting_expression="r.age >= 60", + targeting_expression_explanation="Senior citizens", + ideal_population_expression="r.is_vulnerable == true", + budget_amount=50000.0, + budget_strategy="proportional_reduction", + entitlement_rules=[ + EntitlementRuleSchema( + amount_mode="fixed", + amount=1000.0, + multiplier_field=None, + max_multiplier=None, + amount_cel_expression=None, + condition_cel_expression=None, + ) + ], + program_id=1, + ) + + self.assertEqual(full_request.name, "Full Scenario") + self.assertEqual(full_request.description, "A complete scenario") + self.assertEqual(full_request.budget_amount, 50000.0) + self.assertEqual(len(full_request.entitlement_rules), 1) + self.assertEqual(full_request.program_id, 1) + + def test_scenario_update_request_schema(self): + """Test ScenarioUpdateRequest schema.""" + from ..schemas.scenario import ScenarioUpdateRequest + + # All fields are optional + request = ScenarioUpdateRequest() + self.assertIsNone(request.name) + self.assertIsNone(request.description) + + # Test partial update + partial_update = ScenarioUpdateRequest( + name="Updated Name", + budget_amount=20000.0, + ) + + self.assertEqual(partial_update.name, "Updated Name") + self.assertEqual(partial_update.budget_amount, 20000.0) + self.assertIsNone(partial_update.description) + + def test_scenario_list_response_schema(self): + """Test ScenarioListResponse schema.""" + from ..schemas.scenario import ScenarioListResponse, ScenarioResponse + + scenarios = [ + ScenarioResponse( + id=1, + name="Scenario 1", + target_type="group", + state="draft", + targeting_expression="true", + budget_amount=0.0, + budget_strategy="none", + targeting_preview_count=0, + run_count=0, + latest_beneficiary_count=0, + latest_equity_score=0.0, + entitlement_rules=[], + ), + ScenarioResponse( + id=2, + name="Scenario 2", + target_type="individual", + state="ready", + targeting_expression="true", + budget_amount=10000.0, + budget_strategy="cap_total", + targeting_preview_count=50, + run_count=2, + latest_beneficiary_count=45, + latest_equity_score=88.0, + entitlement_rules=[], + ), + ] + + response = ScenarioListResponse( + scenarios=scenarios, + total_count=2, + ) + + self.assertEqual(len(response.scenarios), 2) + self.assertEqual(response.total_count, 2) + self.assertEqual(response.scenarios[0].name, "Scenario 1") + self.assertEqual(response.scenarios[1].name, "Scenario 2") + + def test_entitlement_rule_schema(self): + """Test EntitlementRuleSchema validation.""" + from ..schemas.scenario import EntitlementRuleSchema + + # Test fixed amount mode + rule_fixed = EntitlementRuleSchema( + amount_mode="fixed", + amount=500.0, + ) + self.assertEqual(rule_fixed.amount_mode, "fixed") + self.assertEqual(rule_fixed.amount, 500.0) + self.assertIsNone(rule_fixed.multiplier_field) + + # Test multiplier mode + rule_multiplier = EntitlementRuleSchema( + amount_mode="multiplier", + amount=100.0, + multiplier_field="household_size", + max_multiplier=10, + ) + self.assertEqual(rule_multiplier.amount_mode, "multiplier") + self.assertEqual(rule_multiplier.multiplier_field, "household_size") + self.assertEqual(rule_multiplier.max_multiplier, 10) + + # Test CEL mode + rule_cel = EntitlementRuleSchema( + amount_mode="cel", + amount=0.0, + amount_cel_expression="r.income * 0.1", + condition_cel_expression="r.income < 10000", + ) + self.assertEqual(rule_cel.amount_mode, "cel") + self.assertEqual(rule_cel.amount_cel_expression, "r.income * 0.1") + self.assertEqual(rule_cel.condition_cel_expression, "r.income < 10000") + + def test_api_client_has_simulation_all_scope(self): + """Test API client with simulation:all scope.""" + self.assertTrue(self.api_client.has_scope("simulation", "all")) + + def test_scenario_with_multiple_rules(self): + """Test scenario response with multiple entitlement rules.""" + # Create additional rules + self.env["spp.simulation.entitlement.rule"].create( + { + "scenario_id": self.scenario_draft.id, + "amount_mode": "multiplier", + "amount": 100.0, + "multiplier_field": "household_size", + "max_multiplier": 5, + } + ) + + from ..routers.scenario import _scenario_to_response + + response = _scenario_to_response(self.scenario_draft) + + # Should have 2 rules now + self.assertEqual(len(response.entitlement_rules), 2) + + # Verify first rule (original) + self.assertEqual(response.entitlement_rules[0].amount_mode, "fixed") + self.assertEqual(response.entitlement_rules[0].amount, 1000.0) + + # Verify second rule (new) + self.assertEqual(response.entitlement_rules[1].amount_mode, "multiplier") + self.assertEqual(response.entitlement_rules[1].amount, 100.0) + self.assertEqual(response.entitlement_rules[1].multiplier_field, "household_size") + self.assertEqual(response.entitlement_rules[1].max_multiplier, 5) + + def test_scenario_with_program_reference(self): + """Test scenario response with program reference.""" + # Create a program + program = self.env["spp.program"].create( + { + "name": "Test Program", + } + ) + + # Create scenario with program reference + scenario_with_program = self.env["spp.simulation.scenario"].create( + { + "name": "Scenario With Program", + "target_type": "group", + "targeting_expression": "true", + "program_id": program.id, + } + ) + + from ..routers.scenario import _scenario_to_response + + response = _scenario_to_response(scenario_with_program) + + self.assertIsNotNone(response.program_id) + self.assertEqual(response.program_id, program.id) + + def test_scenario_states(self): + """Test scenarios in different states.""" + from ..routers.scenario import _scenario_to_response + + # Draft scenario + draft_response = _scenario_to_response(self.scenario_draft) + self.assertEqual(draft_response.state, "draft") + + # Ready scenario + ready_response = _scenario_to_response(self.scenario_ready) + self.assertEqual(ready_response.state, "ready") + + # Archived scenario + archived_scenario = self.env["spp.simulation.scenario"].create( + { + "name": "Archived Scenario", + "target_type": "group", + "targeting_expression": "true", + "state": "archived", + } + ) + archived_response = _scenario_to_response(archived_scenario) + self.assertEqual(archived_response.state, "archived") + + def test_scenario_with_budget_strategies(self): + """Test scenarios with different budget strategies.""" + from ..routers.scenario import _scenario_to_response + + # None strategy + scenario_none = self.env["spp.simulation.scenario"].create( + { + "name": "No Budget Strategy", + "target_type": "group", + "targeting_expression": "true", + "budget_strategy": "none", + "budget_amount": 0.0, + } + ) + response_none = _scenario_to_response(scenario_none) + self.assertEqual(response_none.budget_strategy, "none") + + # Cap total strategy + scenario_cap = self.env["spp.simulation.scenario"].create( + { + "name": "Cap Total Strategy", + "target_type": "group", + "targeting_expression": "true", + "budget_strategy": "cap_total", + "budget_amount": 10000.0, + } + ) + response_cap = _scenario_to_response(scenario_cap) + self.assertEqual(response_cap.budget_strategy, "cap_total") + self.assertEqual(response_cap.budget_amount, 10000.0) + + # Proportional reduction strategy + scenario_prop = self.env["spp.simulation.scenario"].create( + { + "name": "Proportional Reduction Strategy", + "target_type": "group", + "targeting_expression": "true", + "budget_strategy": "proportional_reduction", + "budget_amount": 15000.0, + } + ) + response_prop = _scenario_to_response(scenario_prop) + self.assertEqual(response_prop.budget_strategy, "proportional_reduction") + self.assertEqual(response_prop.budget_amount, 15000.0) diff --git a/spp_api_v2_simulation/tests/test_scope_registration.py b/spp_api_v2_simulation/tests/test_scope_registration.py new file mode 100644 index 00000000..2f1981f4 --- /dev/null +++ b/spp_api_v2_simulation/tests/test_scope_registration.py @@ -0,0 +1,131 @@ +# Part of OpenSPP. See LICENSE file for full copyright and licensing details. +"""Tests for simulation and aggregation scope registration.""" + +import logging + +from odoo.tests import tagged +from odoo.tests.common import TransactionCase + +_logger = logging.getLogger(__name__) + + +@tagged("post_install", "-at_install") +class TestScopeRegistration(TransactionCase): + """Test that simulation and aggregation scopes are properly registered.""" + + @classmethod + def setUpClass(cls): + """Set up test data.""" + super().setUpClass() + + # Create partner and org type for API client + partner = cls.env["res.partner"].create({"name": "Test Scope Organization"}) + org_type = cls.env.ref( + "spp_consent.org_type_government", + raise_if_not_found=False, + ) + if not org_type: + org_type = cls.env["spp.consent.org.type"].search([("code", "=", "government")], limit=1) + if not org_type: + org_type = cls.env["spp.consent.org.type"].create({"name": "Government", "code": "government"}) + + cls.api_client = cls.env["spp.api.client"].create( + { + "name": "Test Scope Client", + "partner_id": partner.id, + "organization_type_id": org_type.id, + } + ) + + def test_simulation_scope_available(self): + """Test that 'simulation' is available as a scope resource.""" + scope = self.env["spp.api.client.scope"].create( + { + "client_id": self.api_client.id, + "resource": "simulation", + "action": "read", + } + ) + self.assertEqual(scope.resource, "simulation") + self.assertEqual(scope.action, "read") + + def test_aggregation_scope_available(self): + """Test that 'aggregation' is available as a scope resource.""" + scope = self.env["spp.api.client.scope"].create( + { + "client_id": self.api_client.id, + "resource": "aggregation", + "action": "read", + } + ) + self.assertEqual(scope.resource, "aggregation") + self.assertEqual(scope.action, "read") + + def test_simulation_has_scope_read(self): + """Test that has_scope works for simulation:read.""" + self.env["spp.api.client.scope"].create( + { + "client_id": self.api_client.id, + "resource": "simulation", + "action": "read", + } + ) + self.assertTrue(self.api_client.has_scope("simulation", "read")) + self.assertFalse(self.api_client.has_scope("simulation", "create")) + + def test_simulation_has_scope_create(self): + """Test that has_scope works for simulation:create.""" + self.env["spp.api.client.scope"].create( + { + "client_id": self.api_client.id, + "resource": "simulation", + "action": "create", + } + ) + self.assertTrue(self.api_client.has_scope("simulation", "create")) + + def test_simulation_has_scope_update(self): + """Test that has_scope works for simulation:update.""" + self.env["spp.api.client.scope"].create( + { + "client_id": self.api_client.id, + "resource": "simulation", + "action": "update", + } + ) + self.assertTrue(self.api_client.has_scope("simulation", "update")) + + def test_aggregation_has_scope_read(self): + """Test that has_scope works for aggregation:read.""" + self.env["spp.api.client.scope"].create( + { + "client_id": self.api_client.id, + "resource": "aggregation", + "action": "read", + } + ) + self.assertTrue(self.api_client.has_scope("aggregation", "read")) + + def test_simulation_scope_all_action(self): + """Test that 'all' action grants access to any simulation action.""" + self.env["spp.api.client.scope"].create( + { + "client_id": self.api_client.id, + "resource": "simulation", + "action": "all", + } + ) + self.assertTrue(self.api_client.has_scope("simulation", "read")) + self.assertTrue(self.api_client.has_scope("simulation", "create")) + self.assertTrue(self.api_client.has_scope("simulation", "update")) + + def test_no_cross_resource_access(self): + """Test that simulation scope doesn't grant aggregation access.""" + self.env["spp.api.client.scope"].create( + { + "client_id": self.api_client.id, + "resource": "simulation", + "action": "all", + } + ) + self.assertFalse(self.api_client.has_scope("aggregation", "read")) diff --git a/spp_api_v2_simulation/tests/test_simulation_service.py b/spp_api_v2_simulation/tests/test_simulation_service.py new file mode 100644 index 00000000..eeacd8bc --- /dev/null +++ b/spp_api_v2_simulation/tests/test_simulation_service.py @@ -0,0 +1,307 @@ +# Part of OpenSPP. See LICENSE file for full copyright and licensing details. +"""Tests for simulation API service.""" + +import logging + +from odoo.tests import tagged + +from .common import SimulationApiTestCommon + +_logger = logging.getLogger(__name__) + + +@tagged("post_install", "-at_install") +class TestSimulationApiService(SimulationApiTestCommon): + """Test simulation API service functionality.""" + + def _get_service(self): + """Import and create the service.""" + from ..services.simulation_api_service import SimulationApiService + + return SimulationApiService(self.env) + + # --- Template Tests --- + + def test_list_templates(self): + """Test listing active templates.""" + service = self._get_service() + result = service.list_templates() + + self.assertIsInstance(result, list) + self.assertGreaterEqual(len(result), 1, "Should have at least the test template") + + template = next((t for t in result if t["id"] == self.template.id), None) + self.assertIsNotNone(template, "Test template should be in results") + self.assertEqual(template["name"], "Test Template") + self.assertEqual(template["category"], "age") + self.assertEqual(template["target_type"], "group") + self.assertEqual(template["default_amount"], 1000.0) + + def test_list_templates_excludes_inactive(self): + """Test that inactive templates are excluded.""" + service = self._get_service() + + inactive_template = self.env["spp.simulation.scenario.template"].create( + { + "name": "Inactive Template", + "category": "age", + "target_type": "group", + "targeting_expression": "true", + "active": False, + } + ) + + result = service.list_templates() + template_ids = [t["id"] for t in result] + self.assertNotIn(inactive_template.id, template_ids) + + # --- Scenario CRUD Tests --- + + def test_create_scenario_from_template(self): + """Test creating a scenario from a template.""" + service = self._get_service() + result = service.create_scenario( + { + "name": "From Template", + "template_id": self.template.id, + } + ) + + self.assertIn("id", result) + self.assertEqual(result["name"], "From Template") + self.assertEqual(result["target_type"], "group") + self.assertEqual( + result["targeting_expression"], + "age_years(r.birthdate) >= 60", + ) + + def test_create_scenario_custom(self): + """Test creating a custom scenario without template.""" + service = self._get_service() + result = service.create_scenario( + { + "name": "Custom Scenario", + "target_type": "individual", + "targeting_expression": "is_female(r)", + "budget_amount": 25000.0, + "budget_strategy": "cap_total", + "entitlement_rules": [ + { + "name": "Base payment", + "amount_mode": "fixed", + "amount": 200.0, + } + ], + } + ) + + self.assertIn("id", result) + self.assertEqual(result["name"], "Custom Scenario") + self.assertEqual(result["target_type"], "individual") + self.assertEqual(result["budget_amount"], 25000.0) + self.assertEqual(result["budget_strategy"], "cap_total") + self.assertGreaterEqual(len(result["entitlement_rules"]), 1) + + def test_list_scenarios(self): + """Test listing scenarios.""" + service = self._get_service() + result = service.list_scenarios() + + self.assertIsInstance(result, list) + self.assertGreaterEqual(len(result), 1) + + scenario_ids = [s["id"] for s in result] + self.assertIn(self.scenario.id, scenario_ids) + + def test_list_scenarios_filter_by_state(self): + """Test listing scenarios with state filter.""" + service = self._get_service() + + result_draft = service.list_scenarios(state="draft") + result_ready = service.list_scenarios(state="ready") + + # Our test scenario is draft + draft_ids = [s["id"] for s in result_draft] + self.assertIn(self.scenario.id, draft_ids) + + ready_ids = [s["id"] for s in result_ready] + self.assertNotIn(self.scenario.id, ready_ids) + + def test_get_scenario(self): + """Test getting a single scenario by ID.""" + service = self._get_service() + result = service.get_scenario(self.scenario.id) + + self.assertEqual(result["id"], self.scenario.id) + self.assertEqual(result["name"], "Test Scenario") + self.assertEqual(result["state"], "draft") + self.assertIn("entitlement_rules", result) + self.assertGreaterEqual(len(result["entitlement_rules"]), 1) + + def test_get_scenario_not_found(self): + """Test getting a non-existent scenario raises ValueError.""" + service = self._get_service() + + with self.assertRaises(ValueError): + service.get_scenario(99999999) + + def test_update_scenario(self): + """Test updating scenario fields.""" + service = self._get_service() + result = service.update_scenario( + self.scenario.id, + { + "name": "Updated Scenario", + "budget_amount": 75000.0, + }, + ) + + self.assertEqual(result["name"], "Updated Scenario") + self.assertEqual(result["budget_amount"], 75000.0) + + def test_update_scenario_replace_rules(self): + """Test updating scenario replaces entitlement rules.""" + service = self._get_service() + result = service.update_scenario( + self.scenario.id, + { + "entitlement_rules": [ + { + "name": "New rule A", + "amount_mode": "fixed", + "amount": 300.0, + }, + { + "name": "New rule B", + "amount_mode": "fixed", + "amount": 150.0, + }, + ], + }, + ) + + self.assertEqual(len(result["entitlement_rules"]), 2) + + # --- Simulation Run Tests --- + + def test_run_simulation(self): + """Test running a simulation on a scenario.""" + service = self._get_service() + + # Scenario needs a targeting expression to transition to ready + self.scenario.write({"targeting_expression": "true"}) + + result = service.run_simulation(self.scenario.id) + + self.assertIn("id", result) + self.assertIn("state", result) + self.assertIn(result["state"], ("completed", "failed")) + self.assertIn("beneficiary_count", result) + self.assertIn("total_cost", result) + + def test_run_simulation_auto_transitions_draft_to_ready(self): + """Test that run_simulation auto-transitions draft scenarios to ready.""" + service = self._get_service() + + # Scenario is in draft with targeting expression + self.assertEqual(self.scenario.state, "draft") + self.scenario.write({"targeting_expression": "true"}) + + service.run_simulation(self.scenario.id) + + # Should have transitioned to ready + self.scenario.invalidate_recordset() + self.assertEqual(self.scenario.state, "ready") + + def test_run_simulation_not_found(self): + """Test running simulation for non-existent scenario.""" + service = self._get_service() + + with self.assertRaises(ValueError): + service.run_simulation(99999999) + + # --- Run Detail Tests --- + + def test_get_run(self): + """Test getting full run details.""" + service = self._get_service() + + # Run a simulation first + self.scenario.write({"targeting_expression": "true"}) + run_result = service.run_simulation(self.scenario.id) + run_id = run_result["id"] + + result = service.get_run(run_id) + + self.assertEqual(result["id"], run_id) + self.assertIn("distribution_json", result) + self.assertIn("fairness_json", result) + self.assertIn("targeting_efficiency_json", result) + self.assertIn("geographic_json", result) + self.assertIn("scenario_snapshot_json", result) + + def test_get_run_not_found(self): + """Test getting a non-existent run raises ValueError.""" + service = self._get_service() + + with self.assertRaises(ValueError): + service.get_run(99999999) + + # --- Comparison Tests --- + + def test_compare_runs(self): + """Test comparing two simulation runs.""" + service = self._get_service() + + # Create two scenarios with different expressions and run them + scenario_a = self.env["spp.simulation.scenario"].create( + { + "name": "Comparison A", + "target_type": "group", + "targeting_expression": "true", + "budget_amount": 10000.0, + "state": "draft", + } + ) + self.env["spp.simulation.entitlement.rule"].create( + { + "scenario_id": scenario_a.id, + "name": "Rule A", + "amount_mode": "fixed", + "amount": 100.0, + } + ) + run_a = service.run_simulation(scenario_a.id) + + scenario_b = self.env["spp.simulation.scenario"].create( + { + "name": "Comparison B", + "target_type": "group", + "targeting_expression": "true", + "budget_amount": 20000.0, + "state": "draft", + } + ) + self.env["spp.simulation.entitlement.rule"].create( + { + "scenario_id": scenario_b.id, + "name": "Rule B", + "amount_mode": "fixed", + "amount": 200.0, + } + ) + run_b = service.run_simulation(scenario_b.id) + + result = service.compare_runs([run_a["id"], run_b["id"]]) + + self.assertIn("id", result) + self.assertIn("runs", result) + self.assertEqual(len(result["runs"]), 2) + self.assertIn("overlap", result) + + def test_compare_runs_requires_minimum_two(self): + """Test that compare_runs requires at least two run IDs.""" + service = self._get_service() + + with self.assertRaises(ValueError): + service.compare_runs([1]) diff --git a/spp_simulation/services/simulation_service.py b/spp_simulation/services/simulation_service.py index 786419a7..3d9b40b3 100644 --- a/spp_simulation/services/simulation_service.py +++ b/spp_simulation/services/simulation_service.py @@ -497,7 +497,7 @@ def convert_to_program(self, scenario, options): # Execute wizard to create program action = wizard.create_program() - program = self.env["spp.program"].browse(action["res_id"]) + program = self.env["spp.program"].browse(action["params"]["program_id"]) # Link scenario to converted program scenario.converted_program_id = program.id