diff --git a/.gitignore b/.gitignore index e69de29b..b0e8167f 100644 --- a/.gitignore +++ b/.gitignore @@ -0,0 +1,6 @@ +fraud-alert-service/fraud_alerts.db +__pycache__/ +*.pyc +.pytest_cache/ +__pycache__/ +.claude/ diff --git a/README.md b/README.md index 494f1c75..95e41cbc 100644 --- a/README.md +++ b/README.md @@ -1,43 +1,73 @@ -# Candidate Assessment: Spec-Driven Development With Codegen Tools - -This assessment evaluates how you use modern code generation tools (for example `5.2-Codex`, `Claude`, `Copilot`, and similar) to design, build, and test a software application using a spec-driven development pattern. You may build a frontend, a backend, or both. - -## Goals -- Build a working application with at least one meaningful feature. -- Create a testing framework to validate the application. -- Demonstrate effective use of code generation tools to accelerate delivery. -- Show clear, maintainable engineering practices. - -## Deliverables -- Application source code in this repository. -- A test suite and test harness that can be run locally. -- Documentation that explains how to run the app and the tests. - -## Scope Options -Pick one: -- Frontend-only application. -- Backend-only application. -- Full-stack application. - -Your solution should include at least one real workflow, for example: -- Create and view a resource. -- Search or filter data. -- Persist data in memory or storage. - -## Rules -- You must use a code generation tool (for example `5.2-Codex`, `Claude`, or similar). You can use multiple tools. -- You must build the application and a testing framework for it. -- The application and tests must run locally. -- Do not include secrets or credentials in this repository. - -## Evaluation Criteria -- Working product: Does the app do what it claims? -- Test coverage: Do tests cover key workflows and edge cases? -- Engineering quality: Clarity, structure, and maintainability. -- Use of codegen: How effectively you used tools to accelerate work. -- Documentation: Clear setup and run instructions. - -## What to Submit -- When you are complete, put up a Pull Request against this repository with your changes. -- A short summary of your approach and tools used in your PR submission -- Any additional information or approach that helped you. +# Fraud Alert Validation Service + +This project is essentially a REST API for storing fraud alerts, enforcing lifecycle transitions, and properly handling customer data to comply with Ally's PII sensitivity principles. + +I put everything in fraud-alert-service to keep the project separated from given spec-driven-dev environment. + +To test this, just follow the steps below and test out the interactive docs at `http://localhost:8000/docs`. + +## Tech Stack + +- **FastAPI** — REST framework with automatic OpenAPI/Swagger docs +- **Pydantic v2** — request validation and response serialization +- **SQLite** — lightweight persistent storage via Python's built-in `sqlite3` +- **pytest** + **httpx** — integration tests using `TestClient` with per-test isolated databases + +## Setup + +```bash +cd fraud-alert-service +pip install -r requirements.txt +``` + +## Run + +```bash +uvicorn src.main:app --reload +``` + +The API will be available at `http://localhost:8000`. Interactive docs at `http://localhost:8000/docs`. + +## Test + +```bash +python -m pytest tests/ -v +``` + +## API Overview + +- POST | `/transactions` | Create a transaction +- GET | `/transactions/{id}` | Get a transaction by ID +- POST | `/alerts` | Create an alert for a transaction +- GET | `/alerts` | List and filter alerts +- GET | `/alerts/summary` | Aggregated stats by status, risk level, and resolution time +- GET | `/alerts/{id}` | Get a single alert +- PATCH | `/alerts/{id}/assign` | Assign an analyst to an alert +- PATCH | `/alerts/{id}/status` | Transition alert status + +## PII Masking + +`card_id` and `account_id` are masked by default in all responses (e.g. `****1234`). Append `?show_pii=true` to any endpoint to reveal full values. + +## Test Coverage + +142 tests across 6 test files, each corresponding to a spec: + +- `test_transactions.py` | 23 | Transaction creation, validation, field storage +- `test_alerts.py` | 30 | Alert creation, risk level derivation, boundary values +- `test_state_machine.py` | 33 | Status transitions, analyst assignment, audit trail +- `test_pii_masking.py` | 22 | Masking behavior, `show_pii` parameter, edge cases +- `test_filtering.py` | 21 | Filter parameters, combined filters, sort order +- `test_summary_stats.py` | 13 | Counts by status/risk level, resolution time calculation + +Each test uses an isolated SQLite database via `tmp_path`, so tests are fully independent and leave no state behind. + +## Spec Driven Dev + +I have experience with spec driven development, so I essentially just used my standard workflow where I broke down my project into specs and wrote these out myself, used Claude to review these and enhance them if necessary (it's good at finding edge cases and such that I may have missed), then I added actionable TODO tasks before implementation. + +## Code Generation Tools Used + +I used Claude as my primary code generation tool. + + diff --git a/SPECS/alerts.md b/SPECS/alerts.md new file mode 100644 index 00000000..4b6305ec --- /dev/null +++ b/SPECS/alerts.md @@ -0,0 +1,48 @@ +# Feature Spec: Alerts + +## Goal +- Provide endpoints for creating and retrieving fraud alerts that wrap a flagged transaction with risk assessment data and lifecycle tracking. + +## Scope +- In: Creating alerts linked to transactions, retrieving alerts by ID, risk score validation, automatic risk level derivation, timestamp generation +- Out: Alert status transitions (see state-machine spec), alert filtering/listing (see filtering spec), summary statistics (see summary-stats spec) + +## Requirements +- Each alert must have a unique `id` (UUID, server-generated) +- Required fields on creation: `transaction_id`, `risk_score` +- `transaction_id` must reference an existing transaction +- A transaction can only have one alert (no duplicate alerts for the same transaction) +- `risk_score` must be a float between 0.0 and 1.0 inclusive +- `risk_level` is automatically derived from `risk_score` — never provided by the client: + - `low`: 0.0 <= score < 0.3 + - `medium`: 0.3 <= score < 0.6 + - `high`: 0.6 <= score < 0.8 + - `critical`: 0.8 <= score <= 1.0 +- `status` is initialized to `pending` on creation +- `analyst_id` is null on creation +- `contains_pii` is set to `true` by default (since the linked transaction contains card_id and account_id) +- `created_at` is server-generated at creation time +- `updated_at` is server-generated and updated on any modification +- `status_history` is initialized with one entry: `{status: "pending", timestamp: , changed_by: "system"}` +- GET response returns the alert with its linked transaction's PII fields masked by default + +## Acceptance Criteria +- [x] POST /alerts creates an alert and returns it with generated UUID, pending status, and derived risk_level +- [x] POST /alerts returns 422 for missing transaction_id or risk_score +- [x] POST /alerts returns 404 if transaction_id does not reference an existing transaction +- [x] POST /alerts returns 409 if an alert already exists for the given transaction_id +- [x] POST /alerts returns 422 for risk_score < 0.0 +- [x] POST /alerts returns 422 for risk_score > 1.0 +- [x] POST /alerts returns 422 for non-numeric risk_score +- [x] Risk level is correctly derived at boundary: score 0.0 → low +- [x] Risk level is correctly derived at boundary: score 0.29 → low +- [x] Risk level is correctly derived at boundary: score 0.3 → medium +- [x] Risk level is correctly derived at boundary: score 0.59 → medium +- [x] Risk level is correctly derived at boundary: score 0.6 → high +- [x] Risk level is correctly derived at boundary: score 0.79 → high +- [x] Risk level is correctly derived at boundary: score 0.8 → critical +- [x] Risk level is correctly derived at boundary: score 1.0 → critical +- [x] Alert is created with status "pending" and a single status_history entry +- [x] GET /alerts/{id} returns the alert with all fields populated +- [x] GET /alerts/{id} returns 404 for nonexistent alert ID +- [x] Client cannot override risk_level, status, or created_at on creation \ No newline at end of file diff --git a/SPECS/feature-template.md b/SPECS/feature-template.md deleted file mode 100644 index 7dbc70a5..00000000 --- a/SPECS/feature-template.md +++ /dev/null @@ -1,14 +0,0 @@ -# Feature Spec: - -## Goal -- - -## Scope -- In: -- Out: - -## Requirements -- - -## Acceptance Criteria -- [ ] \ No newline at end of file diff --git a/SPECS/filtering.md b/SPECS/filtering.md new file mode 100644 index 00000000..4a3af92a --- /dev/null +++ b/SPECS/filtering.md @@ -0,0 +1,48 @@ +# Feature Spec: Alert Filtering and Listing + +## Goal +- Provide a flexible query interface for listing and filtering fraud alerts, enabling analysts to efficiently triage their workload by status, risk level, assignment, and time range. + +## Scope +- In: GET /alerts with query parameters for filtering and sorting +- Out: Individual alert retrieval (see alerts spec), summary aggregation (see summary-stats spec) + +## Requirements + +### Filter Parameters (all optional, combinable) +- `status` — filter by alert status (e.g., `?status=pending`). Accepts a single value. +- `risk_level` — filter by risk level (e.g., `?risk_level=critical`). Accepts a single value. +- `analyst_id` — filter by assigned analyst (e.g., `?analyst_id=analyst_42`). Use `unassigned` as a special value to find alerts with no analyst. +- `created_after` — ISO 8601 datetime, return alerts created on or after this time +- `created_before` — ISO 8601 datetime, return alerts created on or before this time +- When multiple filters are provided, they are combined with AND logic + +### Response Format +- Returns an object with `alerts` (array) and `total` (integer) +- Results are sorted by `created_at` descending (newest first) +- Each alert in the array includes all fields (with PII masked by default) +- Empty results return `{"alerts": [], "total": 0}` — not 404 + +## Acceptance Criteria + +### Single Filters +- [x] GET /alerts with no filters returns all alerts +- [x] GET /alerts?status=pending returns only pending alerts +- [x] GET /alerts?risk_level=critical returns only critical alerts +- [x] GET /alerts?analyst_id=analyst_1 returns only alerts assigned to analyst_1 +- [x] GET /alerts?analyst_id=unassigned returns only unassigned alerts +- [x] GET /alerts?created_after= returns alerts created on or after that time +- [x] GET /alerts?created_before= returns alerts created on or before that time + +### Combined Filters +- [x] GET /alerts?status=pending&risk_level=high returns alerts matching both conditions +- [x] GET /alerts?status=under_review&analyst_id=analyst_1 returns correct intersection +- [x] GET /alerts?created_after=&created_before= returns alerts within the date range + +### Edge Cases +- [x] Invalid status value returns 422 +- [x] Invalid risk_level value returns 422 +- [x] Invalid datetime format for created_after or created_before returns 422 +- [x] Filters that match zero alerts return {"alerts": [], "total": 0} with 200 status +- [x] Date range where created_after > created_before returns empty results (not an error) +- [x] Results are sorted by created_at descending by default \ No newline at end of file diff --git a/SPECS/pii-masking.md b/SPECS/pii-masking.md new file mode 100644 index 00000000..82d3d1b5 --- /dev/null +++ b/SPECS/pii-masking.md @@ -0,0 +1,61 @@ +# Feature Spec: PII Masking + +## Goal +- Ensure PII-sensitive fields are masked by default in all API responses, reflecting Ally Financial's commitment to responsible data handling. Ally contributed a PII Masking module to LangChain's open-source ecosystem specifically because customer interactions always involve PII — this feature demonstrates awareness of that principle. + +## Scope +- In: Masking card_id and account_id in API responses, an authorized access flag to reveal full values, consistent masking across all endpoints that return transaction data +- Out: Encryption at rest, role-based access control, authentication/authorization (this is a demonstration of the concept, not a production auth system) + +## Requirements + +### Masked Fields +- `card_id` and `account_id` are the PII-sensitive fields +- When masked, only the last 4 characters are visible, prefixed with asterisks: `****1234` +- If the value is 4 characters or fewer, mask the entire value: `****` +- Masking is applied at the API response layer — storage retains the full value + +### Default Behavior +- All API responses that include transaction data mask PII fields by default +- This applies to: + - GET /transactions/{id} + - POST /transactions (response body) + - GET /alerts/{id} (embedded transaction data) + - GET /alerts (embedded transaction data in each alert) + +### Authorized Access +- A query parameter `show_pii=true` reveals the unmasked values +- This simulates an authorized analyst session — in production this would be gated by role-based auth +- When `show_pii=true`, card_id and account_id are returned in full +- When `show_pii` is absent or `false`, fields are masked + +### contains_pii Flag +- Each alert has a `contains_pii` boolean +- Set to `true` by default since linked transactions always contain card_id and account_id +- This flag is informational — it does not control masking behavior (masking always applies regardless) + +## Acceptance Criteria + +### Default Masking +- [x] GET /transactions/{id} returns card_id and account_id masked (e.g., "****5678") +- [x] POST /transactions response body returns masked PII fields +- [x] GET /alerts/{id} returns embedded transaction data with masked PII +- [x] GET /alerts list returns all embedded transaction data with masked PII +- [x] Masking shows last 4 characters: "1234567890" → "****7890" +- [x] Values with 4 or fewer characters are fully masked: "1234" → "****" + +### Authorized Access +- [x] GET /transactions/{id}?show_pii=true returns full card_id and account_id +- [x] GET /alerts/{id}?show_pii=true returns full PII in embedded transaction +- [x] GET /alerts?show_pii=true returns full PII across all results +- [x] show_pii=false behaves the same as omitting the parameter (masked) + +### Consistency +- [x] PII masking is applied consistently across all endpoints — no endpoint leaks unmasked data by default +- [x] Masking does not affect stored data — full values are preserved in the database +- [x] The contains_pii flag on alerts is set to true by default + +### Edge Cases +- [x] Empty string card_id or account_id is masked as "****" +- [x] Very long PII values are correctly masked (only last 4 shown) +- [x] show_pii parameter with non-boolean values (e.g., "yes", "1") is handled gracefully (treat as false or return 422) \ No newline at end of file diff --git a/SPECS/state-machine.md b/SPECS/state-machine.md new file mode 100644 index 00000000..566469ff --- /dev/null +++ b/SPECS/state-machine.md @@ -0,0 +1,73 @@ +# Feature Spec: Alert Lifecycle State Machine + +## Goal +- Enforce a strict state machine governing how fraud alerts move through the analyst review pipeline, with full audit trail for every transition. This is the core business logic of the service. + +## Scope +- In: Status transitions via PATCH endpoint, analyst assignment, transition validation, status_history tracking, business rules around assignment and resolution +- Out: Alert creation (see alerts spec), filtering by status (see filtering spec) + +## Requirements + +### Status Transitions +- Valid transitions: + - `pending` → `under_review` (requires analyst_id to be assigned first) + - `under_review` → `confirmed_fraud` + - `under_review` → `false_positive` + - `under_review` → `escalated` +- All other transitions are invalid and must be rejected with 409 Conflict +- Terminal states: `confirmed_fraud`, `false_positive` (no further transitions allowed) +- `escalated` is also terminal for the scope of this service + +### Analyst Assignment +- PATCH /alerts/{id}/assign accepts `analyst_id` (string) +- Assignment is only allowed when status is `pending` or `under_review` +- Cannot assign to an alert in a terminal state (confirmed_fraud, false_positive, escalated) +- Assigning updates `updated_at` +- Re-assignment is allowed (analyst_id can be changed while alert is pending or under_review) + +### Transition Rules +- PATCH /alerts/{id}/status accepts `status` (the target status) and `changed_by` (string identifier) +- Transitioning to `under_review` requires that `analyst_id` is not null (someone must own it) +- `changed_by` is recorded in the status_history entry for the transition +- Each transition appends to `status_history`: `{status: , timestamp: , changed_by: }` +- `updated_at` is refreshed on every transition + +### Audit Trail +- `status_history` is append-only — entries are never modified or deleted +- The full history is returned on GET /alerts/{id} +- History entries are ordered chronologically + +## Acceptance Criteria + +### Valid Transitions +- [x] pending → under_review succeeds when analyst_id is assigned +- [x] under_review → confirmed_fraud succeeds +- [x] under_review → false_positive succeeds +- [x] under_review → escalated succeeds +- [x] Each successful transition appends to status_history with correct status, timestamp, and changed_by + +### Invalid Transitions +- [x] pending → confirmed_fraud returns 409 +- [x] pending → false_positive returns 409 +- [x] pending → escalated returns 409 +- [x] under_review → pending returns 409 +- [x] confirmed_fraud → any status returns 409 +- [x] false_positive → any status returns 409 +- [x] escalated → any status returns 409 +- [x] pending → under_review without analyst_id assigned returns 409 (or 422) + +### Analyst Assignment +- [x] Assigning analyst to a pending alert succeeds +- [x] Assigning analyst to an under_review alert succeeds (re-assignment) +- [x] Assigning analyst to a confirmed_fraud alert returns 409 +- [x] Assigning analyst to a false_positive alert returns 409 +- [x] Assigning analyst to an escalated alert returns 409 +- [x] Assignment updates the updated_at timestamp + +### Audit Trail +- [x] A newly created alert has exactly one status_history entry (pending) +- [x] After transitioning pending → under_review → confirmed_fraud, status_history has 3 entries +- [x] Status history entries are in chronological order +- [x] Each entry contains the correct changed_by value from the request +- [x] Status history is immutable — previous entries are unchanged after new transitions \ No newline at end of file diff --git a/SPECS/summary-stats.md b/SPECS/summary-stats.md new file mode 100644 index 00000000..6b6e561e --- /dev/null +++ b/SPECS/summary-stats.md @@ -0,0 +1,46 @@ +# Feature Spec: Summary Statistics + +## Goal +- Provide an aggregation endpoint that gives analysts and team leads a dashboard-level view of fraud alert volume, distribution, and resolution performance. + +## Scope +- In: GET /alerts/summary endpoint returning counts by status, counts by risk level, and average resolution time +- Out: Historical trend data, per-analyst performance metrics, real-time streaming updates + +## Requirements + +### Response Structure +- `total_alerts` — total number of alerts in the system +- `by_status` — object with counts for each status: `pending`, `under_review`, `confirmed_fraud`, `false_positive`, `escalated` +- `by_risk_level` — object with counts for each risk level: `low`, `medium`, `high`, `critical` +- `avg_resolution_time_seconds` — average time from alert creation to reaching a terminal state (confirmed_fraud, false_positive, or escalated), in seconds. Null if no alerts have been resolved. +- All counts default to 0 if no alerts match that category + +### Resolution Time Calculation +- Resolution time = timestamp of terminal status transition minus `created_at` +- Only alerts in terminal states (confirmed_fraud, false_positive, escalated) are included in the average +- If no alerts are in a terminal state, `avg_resolution_time_seconds` is null (not 0) +- Resolution time is calculated from the `status_history` entries, using the timestamp of the terminal transition + +## Acceptance Criteria + +### Basic Stats +- [x] GET /alerts/summary returns 200 with the correct response structure +- [x] total_alerts matches the actual number of alerts +- [x] by_status counts are accurate for each status category +- [x] by_risk_level counts are accurate for each risk level category +- [x] All status keys are present even when count is 0 +- [x] All risk_level keys are present even when count is 0 + +### Resolution Time +- [x] avg_resolution_time_seconds is calculated correctly for resolved alerts +- [x] avg_resolution_time_seconds is null when no alerts have been resolved +- [x] Resolution time uses the terminal status_history entry timestamp minus created_at +- [x] Average is computed across all terminal states (confirmed_fraud, false_positive, escalated) + +### Edge Cases +- [x] Summary with zero alerts returns all counts as 0 and avg_resolution_time as null +- [x] Summary with one resolved alert returns that alert's resolution time as the average +- [x] Summary with multiple resolved alerts returns the correct arithmetic mean +- [x] Alerts in non-terminal states (pending, under_review) do not affect avg_resolution_time +- [x] by_status and by_risk_level always include all possible keys regardless of data present \ No newline at end of file diff --git a/SPECS/transactions.md b/SPECS/transactions.md new file mode 100644 index 00000000..1d5e8c64 --- /dev/null +++ b/SPECS/transactions.md @@ -0,0 +1,31 @@ +# Feature Spec: Transactions + +## Goal +- Provide CRUD endpoints for transaction records that represent raw financial transaction data flagged upstream by Ally's fraud detection AI. + +## Scope +- In: Creating transactions, retrieving transactions by ID, field validation, PII-sensitive field storage +- Out: Updating or deleting transactions (transactions are immutable records of what occurred), bulk import, transaction search/listing (transactions are accessed individually or via their linked alerts) + +## Requirements +- Each transaction must have a unique `id` (UUID, server-generated) +- Required fields on creation: `amount`, `merchant_name`, `merchant_category`, `location`, `timestamp`, `card_id`, `account_id` +- `amount` must be a positive number greater than 0 +- `merchant_category` must be one of a defined enum: `electronics`, `travel`, `groceries`, `gas_station`, `restaurant`, `entertainment`, `healthcare`, `utilities`, `cash_advance`, `other` +- `location` is a string (e.g., "Charlotte, NC") +- `timestamp` must be a valid ISO 8601 datetime string +- `card_id` and `account_id` are stored in full but treated as PII-sensitive (see pii-masking spec) +- Transactions are immutable after creation, so no update or delete endpoints +- GET response returns the transaction with PII fields masked by default + +## Acceptance Criteria +- [x] POST /transactions creates a transaction and returns it with a generated UUID +- [x] POST /transactions returns 422 for missing required fields +- [x] POST /transactions returns 422 for amount <= 0 +- [x] POST /transactions returns 422 for invalid merchant_category +- [x] POST /transactions returns 422 for invalid timestamp format +- [x] GET /transactions/{id} returns the transaction with PII fields masked +- [x] GET /transactions/{id} returns 404 for nonexistent ID +- [x] POST /transactions accepts and stores all valid merchant_category values +- [x] Response includes server-generated `id` that was not provided in the request body +- [x] Extra/unknown fields in the request body are ignored or rejected (pick one, document it) \ No newline at end of file diff --git a/TODO.md b/TODO.md index b5d82042..125112d1 100644 --- a/TODO.md +++ b/TODO.md @@ -1,7 +1,12 @@ # TODO ## Refactor Proposals -- +- ## New Feature Proposals -- \ No newline at end of file +- + + + + + diff --git a/fraud-alert-service/pytest.ini b/fraud-alert-service/pytest.ini new file mode 100644 index 00000000..2f4c80e3 --- /dev/null +++ b/fraud-alert-service/pytest.ini @@ -0,0 +1,2 @@ +[pytest] +asyncio_mode = auto diff --git a/fraud-alert-service/requirements.txt b/fraud-alert-service/requirements.txt new file mode 100644 index 00000000..89e5cf77 --- /dev/null +++ b/fraud-alert-service/requirements.txt @@ -0,0 +1,7 @@ +fastapi==0.115.0 +uvicorn==0.30.6 +pydantic==2.9.2 +httpx==0.27.2 +pytest==8.3.3 +pytest-asyncio==0.24.0 +aiosqlite==0.20.0 diff --git a/fraud-alert-service/src/__init__.py b/fraud-alert-service/src/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/fraud-alert-service/src/database.py b/fraud-alert-service/src/database.py new file mode 100644 index 00000000..e0f91a2d --- /dev/null +++ b/fraud-alert-service/src/database.py @@ -0,0 +1,55 @@ +import sqlite3 +from contextlib import contextmanager +from pathlib import Path + +DB_PATH = Path(__file__).parent.parent / "fraud_alerts.db" + + +def get_connection(db_path: Path | None = None) -> sqlite3.Connection: + conn = sqlite3.connect(db_path or DB_PATH) + conn.row_factory = sqlite3.Row + return conn + + +@contextmanager +def db(db_path: Path | None = None): + conn = get_connection(db_path or DB_PATH) + try: + yield conn + conn.commit() + except Exception: + conn.rollback() + raise + finally: + conn.close() + + +def init_db(db_path: Path | None = None) -> None: + with db(db_path) as conn: + conn.execute(""" + CREATE TABLE IF NOT EXISTS transactions ( + id TEXT PRIMARY KEY, + amount REAL NOT NULL, + merchant_name TEXT NOT NULL, + merchant_category TEXT NOT NULL, + location TEXT NOT NULL, + timestamp TEXT NOT NULL, + card_id TEXT NOT NULL, + account_id TEXT NOT NULL + ) + """) + conn.execute(""" + CREATE TABLE IF NOT EXISTS alerts ( + id TEXT PRIMARY KEY, + transaction_id TEXT NOT NULL UNIQUE, + risk_score REAL NOT NULL, + risk_level TEXT NOT NULL, + status TEXT NOT NULL DEFAULT 'pending', + analyst_id TEXT, + contains_pii INTEGER NOT NULL DEFAULT 1, + created_at TEXT NOT NULL, + updated_at TEXT NOT NULL, + status_history TEXT NOT NULL, + FOREIGN KEY (transaction_id) REFERENCES transactions(id) + ) + """) diff --git a/fraud-alert-service/src/main.py b/fraud-alert-service/src/main.py new file mode 100644 index 00000000..7e890895 --- /dev/null +++ b/fraud-alert-service/src/main.py @@ -0,0 +1,19 @@ +from contextlib import asynccontextmanager + +from fastapi import FastAPI + +from src.database import init_db +from src.routes.alerts import router as alerts_router +from src.routes.transactions import router as transactions_router + + +@asynccontextmanager +async def lifespan(app: FastAPI): + init_db() + yield + + +app = FastAPI(title="Fraud Alert Validation Service", lifespan=lifespan) + +app.include_router(transactions_router) +app.include_router(alerts_router) diff --git a/fraud-alert-service/src/models.py b/fraud-alert-service/src/models.py new file mode 100644 index 00000000..49fcdf15 --- /dev/null +++ b/fraud-alert-service/src/models.py @@ -0,0 +1,160 @@ +from datetime import datetime +from enum import Enum +from typing import Optional +from uuid import UUID + +from pydantic import BaseModel, ConfigDict, Field, field_validator + + +class MerchantCategory(str, Enum): + electronics = "electronics" + travel = "travel" + groceries = "groceries" + gas_station = "gas_station" + restaurant = "restaurant" + entertainment = "entertainment" + healthcare = "healthcare" + utilities = "utilities" + cash_advance = "cash_advance" + other = "other" + + +class TransactionCreate(BaseModel): + model_config = ConfigDict( + extra="forbid", + json_schema_extra={ + "example": { + "amount": 149.99, + "merchant_name": "Best Buy", + "merchant_category": "electronics", + "location": "Charlotte, NC", + "timestamp": "2024-05-01T14:30:00Z", + "card_id": "4111111111111234", + "account_id": "ACC0000000001", + } + }, + ) + + amount: float + merchant_name: str + merchant_category: MerchantCategory + location: str + timestamp: datetime + card_id: str + account_id: str + + @field_validator("amount") + @classmethod + def amount_must_be_positive(cls, v: float) -> float: + if v <= 0: + raise ValueError("amount must be greater than 0") + return v + + +class TransactionResponse(BaseModel): + id: UUID + amount: float + merchant_name: str + merchant_category: MerchantCategory + location: str + timestamp: datetime + card_id: str + account_id: str + + +class RiskLevel(str, Enum): + low = "low" + medium = "medium" + high = "high" + critical = "critical" + + +class AlertStatus(str, Enum): + pending = "pending" + under_review = "under_review" + confirmed_fraud = "confirmed_fraud" + false_positive = "false_positive" + escalated = "escalated" + + +class StatusHistoryEntry(BaseModel): + status: AlertStatus + timestamp: datetime + changed_by: str + + +class AlertCreate(BaseModel): + model_config = ConfigDict( + extra="forbid", + json_schema_extra={ + "example": { + "transaction_id": "3fa85f64-5717-4562-b3fc-2c963f66afa6", + "risk_score": 0.85, + } + }, + ) + + transaction_id: UUID + risk_score: float + + @field_validator("risk_score") + @classmethod + def risk_score_in_range(cls, v: float) -> float: + if v < 0.0 or v > 1.0: + raise ValueError("risk_score must be between 0.0 and 1.0 inclusive") + return v + + +class AlertResponse(BaseModel): + id: UUID + transaction_id: UUID + transaction: TransactionResponse + risk_score: float + risk_level: RiskLevel + status: AlertStatus + analyst_id: Optional[str] + contains_pii: bool + created_at: datetime + updated_at: datetime + status_history: list[StatusHistoryEntry] + + +TERMINAL_STATUSES = {AlertStatus.confirmed_fraud, AlertStatus.false_positive, AlertStatus.escalated} + +VALID_TRANSITIONS = { + AlertStatus.pending: {AlertStatus.under_review}, + AlertStatus.under_review: {AlertStatus.confirmed_fraud, AlertStatus.false_positive, AlertStatus.escalated}, +} + + +class AssignRequest(BaseModel): + model_config = ConfigDict(extra="forbid") + analyst_id: str + + +class StatusUpdateRequest(BaseModel): + model_config = ConfigDict(extra="forbid") + status: AlertStatus + changed_by: str + + +class SummaryResponse(BaseModel): + total_alerts: int + by_status: dict[str, int] + by_risk_level: dict[str, int] + avg_resolution_time_seconds: Optional[float] + + +class AlertListResponse(BaseModel): + alerts: list[AlertResponse] + total: int + + +def derive_risk_level(score: float) -> RiskLevel: + if score < 0.3: + return RiskLevel.low + if score < 0.6: + return RiskLevel.medium + if score < 0.8: + return RiskLevel.high + return RiskLevel.critical diff --git a/fraud-alert-service/src/pii.py b/fraud-alert-service/src/pii.py new file mode 100644 index 00000000..39de14c3 --- /dev/null +++ b/fraud-alert-service/src/pii.py @@ -0,0 +1,14 @@ +from src.models import TransactionResponse + + +def mask_value(value: str) -> str: + if len(value) <= 4: + return "****" + return f"****{value[-4:]}" + + +def mask_transaction(tx: TransactionResponse) -> TransactionResponse: + return tx.model_copy(update={ + "card_id": mask_value(tx.card_id), + "account_id": mask_value(tx.account_id), + }) diff --git a/fraud-alert-service/src/routes/__init__.py b/fraud-alert-service/src/routes/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/fraud-alert-service/src/routes/__pycache__/__init__.cpython-310.pyc b/fraud-alert-service/src/routes/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 00000000..637d4a30 Binary files /dev/null and b/fraud-alert-service/src/routes/__pycache__/__init__.cpython-310.pyc differ diff --git a/fraud-alert-service/src/routes/__pycache__/alerts.cpython-310.pyc b/fraud-alert-service/src/routes/__pycache__/alerts.cpython-310.pyc new file mode 100644 index 00000000..83c38def Binary files /dev/null and b/fraud-alert-service/src/routes/__pycache__/alerts.cpython-310.pyc differ diff --git a/fraud-alert-service/src/routes/__pycache__/transactions.cpython-310.pyc b/fraud-alert-service/src/routes/__pycache__/transactions.cpython-310.pyc new file mode 100644 index 00000000..890aeb9d Binary files /dev/null and b/fraud-alert-service/src/routes/__pycache__/transactions.cpython-310.pyc differ diff --git a/fraud-alert-service/src/routes/alerts.py b/fraud-alert-service/src/routes/alerts.py new file mode 100644 index 00000000..bc4e4aa9 --- /dev/null +++ b/fraud-alert-service/src/routes/alerts.py @@ -0,0 +1,287 @@ +import json +import uuid +from datetime import datetime, timezone + +from typing import Literal + +from fastapi import APIRouter, HTTPException, Query + +from src.database import db +from src.models import ( + AlertCreate, + AlertListResponse, + AlertResponse, + AlertStatus, + AssignRequest, + RiskLevel, + StatusHistoryEntry, + StatusUpdateRequest, + SummaryResponse, + TERMINAL_STATUSES, + VALID_TRANSITIONS, + TransactionResponse, + derive_risk_level, +) +from src.pii import mask_transaction + +router = APIRouter(prefix="/alerts", tags=["alerts"]) + + +def _build_alert_response(alert_row, tx_row, show_pii: bool = False) -> AlertResponse: + transaction = TransactionResponse( + id=tx_row["id"], + amount=tx_row["amount"], + merchant_name=tx_row["merchant_name"], + merchant_category=tx_row["merchant_category"], + location=tx_row["location"], + timestamp=tx_row["timestamp"], + card_id=tx_row["card_id"], + account_id=tx_row["account_id"], + ) + if not show_pii: + transaction = mask_transaction(transaction) + history = [StatusHistoryEntry(**entry) for entry in json.loads(alert_row["status_history"])] + return AlertResponse( + id=alert_row["id"], + transaction_id=alert_row["transaction_id"], + transaction=transaction, + risk_score=alert_row["risk_score"], + risk_level=alert_row["risk_level"], + status=alert_row["status"], + analyst_id=alert_row["analyst_id"], + contains_pii=bool(alert_row["contains_pii"]), + created_at=alert_row["created_at"], + updated_at=alert_row["updated_at"], + status_history=history, + ) + + +@router.post("", response_model=AlertResponse, status_code=201) +def create_alert(body: AlertCreate): + alert_id = str(uuid.uuid4()) + now = datetime.now(timezone.utc) + risk_level = derive_risk_level(body.risk_score) + initial_history = json.dumps([ + {"status": "pending", "timestamp": now.isoformat(), "changed_by": "system"} + ]) + + with db() as conn: + tx_row = conn.execute( + "SELECT * FROM transactions WHERE id = ?", (str(body.transaction_id),) + ).fetchone() + if tx_row is None: + raise HTTPException(status_code=404, detail="Transaction not found") + + existing = conn.execute( + "SELECT id FROM alerts WHERE transaction_id = ?", (str(body.transaction_id),) + ).fetchone() + if existing is not None: + raise HTTPException(status_code=409, detail="Alert already exists for this transaction") + + conn.execute( + """ + INSERT INTO alerts + (id, transaction_id, risk_score, risk_level, status, analyst_id, + contains_pii, created_at, updated_at, status_history) + VALUES (?, ?, ?, ?, 'pending', NULL, 1, ?, ?, ?) + """, + ( + alert_id, + str(body.transaction_id), + body.risk_score, + risk_level.value, + now.isoformat(), + now.isoformat(), + initial_history, + ), + ) + alert_row = conn.execute("SELECT * FROM alerts WHERE id = ?", (alert_id,)).fetchone() + + return _build_alert_response(alert_row, tx_row) + + +@router.get("", response_model=AlertListResponse) +def list_alerts( + status: AlertStatus | None = Query(default=None), + risk_level: RiskLevel | None = Query(default=None), + analyst_id: str | None = Query(default=None), + created_after: datetime | None = Query(default=None), + created_before: datetime | None = Query(default=None), + show_pii: Literal["true", "false"] | None = Query(default=None), +): + conditions = [] + params = [] + + if status is not None: + conditions.append("a.status = ?") + params.append(status.value) + + if risk_level is not None: + conditions.append("a.risk_level = ?") + params.append(risk_level.value) + + if analyst_id is not None: + if analyst_id == "unassigned": + conditions.append("a.analyst_id IS NULL") + else: + conditions.append("a.analyst_id = ?") + params.append(analyst_id) + + if created_after is not None: + conditions.append("a.created_at >= ?") + params.append(created_after.isoformat()) + + if created_before is not None: + conditions.append("a.created_at <= ?") + params.append(created_before.isoformat()) + + where = ("WHERE " + " AND ".join(conditions)) if conditions else "" + query = f""" + SELECT a.*, t.id AS t_id, t.amount, t.merchant_name, t.merchant_category, + t.location, t.timestamp, t.card_id, t.account_id + FROM alerts a + JOIN transactions t ON a.transaction_id = t.id + {where} + ORDER BY a.created_at DESC + """ + + with db() as conn: + rows = conn.execute(query, params).fetchall() + + reveal_pii = show_pii == "true" + alerts = [] + for row in rows: + tx = TransactionResponse( + id=row["t_id"], + amount=row["amount"], + merchant_name=row["merchant_name"], + merchant_category=row["merchant_category"], + location=row["location"], + timestamp=row["timestamp"], + card_id=row["card_id"], + account_id=row["account_id"], + ) + if not reveal_pii: + tx = mask_transaction(tx) + history = [StatusHistoryEntry(**e) for e in json.loads(row["status_history"])] + alerts.append(AlertResponse( + id=row["id"], + transaction_id=row["transaction_id"], + transaction=tx, + risk_score=row["risk_score"], + risk_level=row["risk_level"], + status=row["status"], + analyst_id=row["analyst_id"], + contains_pii=bool(row["contains_pii"]), + created_at=row["created_at"], + updated_at=row["updated_at"], + status_history=history, + )) + + return AlertListResponse(alerts=alerts, total=len(alerts)) + + +@router.get("/summary", response_model=SummaryResponse) +def get_summary(): + by_status = {s.value: 0 for s in AlertStatus} + by_risk_level = {r.value: 0 for r in RiskLevel} + total_alerts = 0 + resolution_times = [] + + with db() as conn: + for row in conn.execute("SELECT status, risk_level, created_at, status_history FROM alerts").fetchall(): + total_alerts += 1 + by_status[row["status"]] += 1 + by_risk_level[row["risk_level"]] += 1 + + status = AlertStatus(row["status"]) + if status in TERMINAL_STATUSES: + history = json.loads(row["status_history"]) + terminal_entry = next( + (e for e in reversed(history) if AlertStatus(e["status"]) in TERMINAL_STATUSES), + None, + ) + if terminal_entry: + created = datetime.fromisoformat(row["created_at"]) + resolved = datetime.fromisoformat(terminal_entry["timestamp"]) + resolution_times.append((resolved - created).total_seconds()) + + avg = sum(resolution_times) / len(resolution_times) if resolution_times else None + + return SummaryResponse( + total_alerts=total_alerts, + by_status=by_status, + by_risk_level=by_risk_level, + avg_resolution_time_seconds=avg, + ) + + +@router.get("/{alert_id}", response_model=AlertResponse) +def get_alert(alert_id: str, show_pii: Literal["true", "false"] | None = Query(default=None)): + with db() as conn: + alert_row = conn.execute( + "SELECT * FROM alerts WHERE id = ?", (alert_id,) + ).fetchone() + if alert_row is None: + raise HTTPException(status_code=404, detail="Alert not found") + tx_row = conn.execute( + "SELECT * FROM transactions WHERE id = ?", (alert_row["transaction_id"],) + ).fetchone() + return _build_alert_response(alert_row, tx_row, show_pii=(show_pii == "true")) + + +@router.patch("/{alert_id}/assign", response_model=AlertResponse) +def assign_analyst(alert_id: str, body: AssignRequest): + now = datetime.now(timezone.utc) + with db() as conn: + alert_row = conn.execute( + "SELECT * FROM alerts WHERE id = ?", (alert_id,) + ).fetchone() + if alert_row is None: + raise HTTPException(status_code=404, detail="Alert not found") + + current_status = AlertStatus(alert_row["status"]) + if current_status in TERMINAL_STATUSES: + raise HTTPException(status_code=409, detail="Cannot assign analyst to a terminal alert") + + conn.execute( + "UPDATE alerts SET analyst_id = ?, updated_at = ? WHERE id = ?", + (body.analyst_id, now.isoformat(), alert_id), + ) + alert_row = conn.execute("SELECT * FROM alerts WHERE id = ?", (alert_id,)).fetchone() + tx_row = conn.execute( + "SELECT * FROM transactions WHERE id = ?", (alert_row["transaction_id"],) + ).fetchone() + return _build_alert_response(alert_row, tx_row) + + +@router.patch("/{alert_id}/status", response_model=AlertResponse) +def update_status(alert_id: str, body: StatusUpdateRequest): + now = datetime.now(timezone.utc) + with db() as conn: + alert_row = conn.execute( + "SELECT * FROM alerts WHERE id = ?", (alert_id,) + ).fetchone() + if alert_row is None: + raise HTTPException(status_code=404, detail="Alert not found") + + current_status = AlertStatus(alert_row["status"]) + allowed = VALID_TRANSITIONS.get(current_status, set()) + if body.status not in allowed: + raise HTTPException(status_code=409, detail=f"Invalid transition: {current_status} → {body.status}") + + if body.status == AlertStatus.under_review and alert_row["analyst_id"] is None: + raise HTTPException(status_code=409, detail="Cannot move to under_review without an assigned analyst") + + history = json.loads(alert_row["status_history"]) + history.append({"status": body.status.value, "timestamp": now.isoformat(), "changed_by": body.changed_by}) + + conn.execute( + "UPDATE alerts SET status = ?, status_history = ?, updated_at = ? WHERE id = ?", + (body.status.value, json.dumps(history), now.isoformat(), alert_id), + ) + alert_row = conn.execute("SELECT * FROM alerts WHERE id = ?", (alert_id,)).fetchone() + tx_row = conn.execute( + "SELECT * FROM transactions WHERE id = ?", (alert_row["transaction_id"],) + ).fetchone() + return _build_alert_response(alert_row, tx_row) diff --git a/fraud-alert-service/src/routes/transactions.py b/fraud-alert-service/src/routes/transactions.py new file mode 100644 index 00000000..f3c4a0eb --- /dev/null +++ b/fraud-alert-service/src/routes/transactions.py @@ -0,0 +1,65 @@ +import uuid + +from typing import Literal + +from fastapi import APIRouter, HTTPException, Query + +from src.database import db +from src.models import TransactionCreate, TransactionResponse +from src.pii import mask_transaction + +router = APIRouter(prefix="/transactions", tags=["transactions"]) + + +def _row_to_response(row) -> TransactionResponse: + return TransactionResponse( + id=row["id"], + amount=row["amount"], + merchant_name=row["merchant_name"], + merchant_category=row["merchant_category"], + location=row["location"], + timestamp=row["timestamp"], + card_id=row["card_id"], + account_id=row["account_id"], + ) + + +@router.post("", response_model=TransactionResponse, status_code=201) +def create_transaction(body: TransactionCreate, show_pii: Literal["true", "false"] | None = Query(default=None)): + transaction_id = str(uuid.uuid4()) + with db() as conn: + conn.execute( + """ + INSERT INTO transactions + (id, amount, merchant_name, merchant_category, location, timestamp, card_id, account_id) + VALUES + (?, ?, ?, ?, ?, ?, ?, ?) + """, + ( + transaction_id, + body.amount, + body.merchant_name, + body.merchant_category.value, + body.location, + body.timestamp.isoformat(), + body.card_id, + body.account_id, + ), + ) + row = conn.execute( + "SELECT * FROM transactions WHERE id = ?", (transaction_id,) + ).fetchone() + tx = _row_to_response(row) + return tx if show_pii == "true" else mask_transaction(tx) + + +@router.get("/{transaction_id}", response_model=TransactionResponse) +def get_transaction(transaction_id: str, show_pii: Literal["true", "false"] | None = Query(default=None)): + with db() as conn: + row = conn.execute( + "SELECT * FROM transactions WHERE id = ?", (transaction_id,) + ).fetchone() + if row is None: + raise HTTPException(status_code=404, detail="Transaction not found") + tx = _row_to_response(row) + return tx if show_pii == "true" else mask_transaction(tx) diff --git a/fraud-alert-service/tests/__init__.py b/fraud-alert-service/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/fraud-alert-service/tests/conftest.py b/fraud-alert-service/tests/conftest.py new file mode 100644 index 00000000..c6e9e62f --- /dev/null +++ b/fraud-alert-service/tests/conftest.py @@ -0,0 +1,23 @@ +import tempfile +from pathlib import Path + +import pytest +from fastapi.testclient import TestClient + +import src.database as database_module +from src.database import init_db +from src.main import app + + +@pytest.fixture +def client(tmp_path): + db_path = tmp_path / "test.db" + # Point the module-level DB_PATH to the temp file + original = database_module.DB_PATH + database_module.DB_PATH = db_path + init_db(db_path) + + with TestClient(app) as c: + yield c + + database_module.DB_PATH = original diff --git a/fraud-alert-service/tests/test_alerts.py b/fraud-alert-service/tests/test_alerts.py new file mode 100644 index 00000000..af7112f5 --- /dev/null +++ b/fraud-alert-service/tests/test_alerts.py @@ -0,0 +1,211 @@ +import pytest + +VALID_TRANSACTION = { + "amount": 249.99, + "merchant_name": "Apple Store", + "merchant_category": "electronics", + "location": "Charlotte, NC", + "timestamp": "2024-01-15T10:30:00Z", + "card_id": "4111111111111234", + "account_id": "ACC9876543210", +} + + +def create_transaction(client): + return client.post("/transactions", json=VALID_TRANSACTION).json() + + +def create_alert(client, transaction_id, risk_score=0.75): + return client.post("/alerts", json={"transaction_id": transaction_id, "risk_score": risk_score}) + + +# --------------------------------------------------------------------------- +# POST /alerts +# --------------------------------------------------------------------------- + +def test_create_alert_returns_201(client): + tx = create_transaction(client) + response = create_alert(client, tx["id"]) + assert response.status_code == 201 + + +def test_create_alert_returns_generated_id(client): + tx = create_transaction(client) + data = create_alert(client, tx["id"]).json() + assert "id" in data + assert data["id"] != tx["id"] + + +def test_create_alert_status_is_pending(client): + tx = create_transaction(client) + data = create_alert(client, tx["id"]).json() + assert data["status"] == "pending" + + +def test_create_alert_analyst_id_is_null(client): + tx = create_transaction(client) + data = create_alert(client, tx["id"]).json() + assert data["analyst_id"] is None + + +def test_create_alert_contains_pii_true(client): + tx = create_transaction(client) + data = create_alert(client, tx["id"]).json() + assert data["contains_pii"] is True + + +def test_create_alert_initial_status_history(client): + tx = create_transaction(client) + data = create_alert(client, tx["id"]).json() + assert len(data["status_history"]) == 1 + entry = data["status_history"][0] + assert entry["status"] == "pending" + assert entry["changed_by"] == "system" + assert "timestamp" in entry + + +def test_create_alert_has_created_at_and_updated_at(client): + from datetime import datetime + tx = create_transaction(client) + data = create_alert(client, tx["id"]).json() + # Both timestamps must be valid ISO datetimes + created = datetime.fromisoformat(data["created_at"].replace("Z", "+00:00")) + updated = datetime.fromisoformat(data["updated_at"].replace("Z", "+00:00")) + # On creation they should be equal + assert created == updated + + +def test_create_alert_embeds_transaction(client): + tx = create_transaction(client) + data = create_alert(client, tx["id"]).json() + assert "transaction" in data + assert data["transaction"]["id"] == tx["id"] + + +def test_create_alert_missing_transaction_id(client): + response = client.post("/alerts", json={"risk_score": 0.5}) + assert response.status_code == 422 + + +def test_create_alert_missing_risk_score(client): + tx = create_transaction(client) + response = client.post("/alerts", json={"transaction_id": tx["id"]}) + assert response.status_code == 422 + + +def test_create_alert_transaction_not_found(client): + response = create_alert(client, "00000000-0000-0000-0000-000000000000") + assert response.status_code == 404 + + +def test_create_alert_duplicate_transaction_returns_409(client): + tx = create_transaction(client) + create_alert(client, tx["id"]) + response = create_alert(client, tx["id"]) + assert response.status_code == 409 + + +def test_create_alert_risk_score_below_zero(client): + tx = create_transaction(client) + response = create_alert(client, tx["id"], risk_score=-0.1) + assert response.status_code == 422 + + +def test_create_alert_risk_score_above_one(client): + tx = create_transaction(client) + response = create_alert(client, tx["id"], risk_score=1.1) + assert response.status_code == 422 + + +def test_create_alert_non_numeric_risk_score(client): + tx = create_transaction(client) + response = client.post("/alerts", json={"transaction_id": tx["id"], "risk_score": "high"}) + assert response.status_code == 422 + + +def test_create_alert_extra_fields_rejected(client): + tx = create_transaction(client) + response = client.post("/alerts", json={ + "transaction_id": tx["id"], + "risk_score": 0.5, + "status": "confirmed_fraud", + }) + assert response.status_code == 422 + + +def test_create_alert_cannot_override_risk_level(client): + tx = create_transaction(client) + response = client.post("/alerts", json={ + "transaction_id": tx["id"], + "risk_score": 0.5, + "risk_level": "critical", + }) + assert response.status_code == 422 + + +def test_create_alert_cannot_override_created_at(client): + tx = create_transaction(client) + response = client.post("/alerts", json={ + "transaction_id": tx["id"], + "risk_score": 0.5, + "created_at": "2020-01-01T00:00:00Z", + }) + assert response.status_code == 422 + + +# --------------------------------------------------------------------------- +# Risk level derivation boundaries +# --------------------------------------------------------------------------- + +@pytest.mark.parametrize("score,expected_level", [ + (0.0, "low"), + (0.29, "low"), + (0.3, "medium"), + (0.59, "medium"), + (0.6, "high"), + (0.79, "high"), + (0.8, "critical"), + (1.0, "critical"), +]) +def test_risk_level_boundary(client, score, expected_level): + tx = create_transaction(client) + data = create_alert(client, tx["id"], risk_score=score).json() + assert data["risk_level"] == expected_level, ( + f"score={score} expected {expected_level}, got {data['risk_level']}" + ) + + +# --------------------------------------------------------------------------- +# GET /alerts/{id} +# --------------------------------------------------------------------------- + +def test_get_alert_returns_200(client): + tx = create_transaction(client) + created = create_alert(client, tx["id"]).json() + response = client.get(f"/alerts/{created['id']}") + assert response.status_code == 200 + + +def test_get_alert_returns_all_fields(client): + tx = create_transaction(client) + created = create_alert(client, tx["id"]).json() + data = client.get(f"/alerts/{created['id']}").json() + for field in ("id", "transaction_id", "transaction", "risk_score", "risk_level", + "status", "analyst_id", "contains_pii", "created_at", "updated_at", + "status_history"): + assert field in data, f"Missing field: {field}" + + +def test_get_alert_not_found(client): + response = client.get("/alerts/00000000-0000-0000-0000-000000000000") + assert response.status_code == 404 + + +def test_get_alert_matches_created(client): + tx = create_transaction(client) + created = create_alert(client, tx["id"], risk_score=0.65).json() + fetched = client.get(f"/alerts/{created['id']}").json() + assert fetched["id"] == created["id"] + assert fetched["risk_score"] == 0.65 + assert fetched["risk_level"] == "high" + assert fetched["transaction"]["id"] == tx["id"] diff --git a/fraud-alert-service/tests/test_filtering.py b/fraud-alert-service/tests/test_filtering.py new file mode 100644 index 00000000..4e86eaee --- /dev/null +++ b/fraud-alert-service/tests/test_filtering.py @@ -0,0 +1,276 @@ +import time + +import pytest + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + +TX_BASE = { + "amount": 150.00, + "merchant_name": "Test Merchant", + "merchant_category": "electronics", + "location": "Charlotte, NC", + "timestamp": "2024-04-01T10:00:00Z", + "card_id": "4111111111111234", + "account_id": "ACC0000000001", +} + + +def make_transaction(client, **overrides): + return client.post("/transactions", json={**TX_BASE, **overrides}).json() + + +def make_alert(client, transaction_id, risk_score=0.5): + return client.post("/alerts", json={"transaction_id": transaction_id, "risk_score": risk_score}).json() + + +def assign(client, alert_id, analyst_id="analyst-1"): + return client.patch(f"/alerts/{alert_id}/assign", json={"analyst_id": analyst_id}).json() + + +def transition(client, alert_id, status, changed_by="analyst-1"): + return client.patch(f"/alerts/{alert_id}/status", json={"status": status, "changed_by": changed_by}).json() + + +# --------------------------------------------------------------------------- +# No filters — returns all alerts +# --------------------------------------------------------------------------- + +def test_list_alerts_no_filters_returns_all(client): + for _ in range(3): + tx = make_transaction(client) + make_alert(client, tx["id"]) + response = client.get("/alerts") + assert response.status_code == 200 + data = response.json() + assert data["total"] == 3 + assert len(data["alerts"]) == 3 + + +def test_list_alerts_empty_returns_200(client): + response = client.get("/alerts") + assert response.status_code == 200 + assert response.json() == {"alerts": [], "total": 0} + + +# --------------------------------------------------------------------------- +# Single filters +# --------------------------------------------------------------------------- + +def test_filter_by_status_pending(client): + tx1 = make_transaction(client) + tx2 = make_transaction(client) + a1 = make_alert(client, tx1["id"]) + a2 = make_alert(client, tx2["id"]) + # Move a2 to under_review + assign(client, a2["id"]) + transition(client, a2["id"], "under_review") + + data = client.get("/alerts?status=pending").json() + assert data["total"] == 1 + assert data["alerts"][0]["id"] == a1["id"] + + +def test_filter_by_status_under_review(client): + tx1 = make_transaction(client) + tx2 = make_transaction(client) + make_alert(client, tx1["id"]) + a2 = make_alert(client, tx2["id"]) + assign(client, a2["id"]) + transition(client, a2["id"], "under_review") + + data = client.get("/alerts?status=under_review").json() + assert data["total"] == 1 + assert data["alerts"][0]["id"] == a2["id"] + + +def test_filter_by_risk_level_critical(client): + tx1 = make_transaction(client) + tx2 = make_transaction(client) + make_alert(client, tx1["id"], risk_score=0.9) # critical + make_alert(client, tx2["id"], risk_score=0.3) # medium + + data = client.get("/alerts?risk_level=critical").json() + assert data["total"] == 1 + assert data["alerts"][0]["risk_level"] == "critical" + + +def test_filter_by_analyst_id(client): + tx1 = make_transaction(client) + tx2 = make_transaction(client) + a1 = make_alert(client, tx1["id"]) + a2 = make_alert(client, tx2["id"]) + assign(client, a1["id"], "analyst-42") + assign(client, a2["id"], "analyst-99") + + data = client.get("/alerts?analyst_id=analyst-42").json() + assert data["total"] == 1 + assert data["alerts"][0]["analyst_id"] == "analyst-42" + + +def test_filter_by_analyst_id_unassigned(client): + tx1 = make_transaction(client) + tx2 = make_transaction(client) + a1 = make_alert(client, tx1["id"]) + a2 = make_alert(client, tx2["id"]) + assign(client, a2["id"], "analyst-1") + + data = client.get("/alerts?analyst_id=unassigned").json() + assert data["total"] == 1 + assert data["alerts"][0]["id"] == a1["id"] + + +def test_filter_by_created_after(client): + tx1 = make_transaction(client) + a1 = make_alert(client, tx1["id"]) + after_ts = a1["created_at"] + + time.sleep(0.05) + + tx2 = make_transaction(client) + a2 = make_alert(client, tx2["id"]) + + data = client.get(f"/alerts?created_after={after_ts}").json() + ids = [a["id"] for a in data["alerts"]] + assert a2["id"] in ids + assert a1["id"] in ids # created_at >= boundary is inclusive + + +def test_filter_by_created_before(client): + tx1 = make_transaction(client) + a1 = make_alert(client, tx1["id"]) + before_ts = a1["created_at"] + + time.sleep(0.05) + + tx2 = make_transaction(client) + make_alert(client, tx2["id"]) + + data = client.get(f"/alerts?created_before={before_ts}").json() + ids = [a["id"] for a in data["alerts"]] + assert a1["id"] in ids + assert len(ids) == 1 + + +# --------------------------------------------------------------------------- +# Combined filters +# --------------------------------------------------------------------------- + +def test_combined_status_and_risk_level(client): + tx1 = make_transaction(client) + tx2 = make_transaction(client) + tx3 = make_transaction(client) + a1 = make_alert(client, tx1["id"], risk_score=0.9) # critical, pending + a2 = make_alert(client, tx2["id"], risk_score=0.9) # critical, under_review + make_alert(client, tx3["id"], risk_score=0.3) # medium, pending + + assign(client, a2["id"]) + transition(client, a2["id"], "under_review") + + data = client.get("/alerts?status=pending&risk_level=critical").json() + assert data["total"] == 1 + assert data["alerts"][0]["id"] == a1["id"] + + +def test_combined_status_and_analyst_id(client): + tx1 = make_transaction(client) + tx2 = make_transaction(client) + a1 = make_alert(client, tx1["id"]) + a2 = make_alert(client, tx2["id"]) + assign(client, a1["id"], "analyst-1") + assign(client, a2["id"], "analyst-1") + transition(client, a1["id"], "under_review") + + data = client.get("/alerts?status=under_review&analyst_id=analyst-1").json() + assert data["total"] == 1 + assert data["alerts"][0]["id"] == a1["id"] + + +def test_combined_date_range(client): + tx1 = make_transaction(client) + a1 = make_alert(client, tx1["id"]) + time.sleep(0.05) + tx2 = make_transaction(client) + a2 = make_alert(client, tx2["id"]) + time.sleep(0.05) + tx3 = make_transaction(client) + make_alert(client, tx3["id"]) + + after = a1["created_at"] + before = a2["created_at"] + data = client.get(f"/alerts?created_after={after}&created_before={before}").json() + ids = [a["id"] for a in data["alerts"]] + assert a1["id"] in ids + assert a2["id"] in ids + + +# --------------------------------------------------------------------------- +# Edge cases +# --------------------------------------------------------------------------- + +def test_invalid_status_returns_422(client): + response = client.get("/alerts?status=not_a_status") + assert response.status_code == 422 + + +def test_invalid_risk_level_returns_422(client): + response = client.get("/alerts?risk_level=extreme") + assert response.status_code == 422 + + +def test_invalid_created_after_returns_422(client): + response = client.get("/alerts?created_after=not-a-date") + assert response.status_code == 422 + + +def test_invalid_created_before_returns_422(client): + response = client.get("/alerts?created_before=not-a-date") + assert response.status_code == 422 + + +def test_filters_matching_zero_returns_empty(client): + tx = make_transaction(client) + make_alert(client, tx["id"]) + data = client.get("/alerts?status=confirmed_fraud").json() + assert data == {"alerts": [], "total": 0} + + +def test_created_after_greater_than_before_returns_empty(client): + tx = make_transaction(client) + a = make_alert(client, tx["id"]) + data = client.get(f"/alerts?created_after={a['created_at']}&created_before=2020-01-01T00:00:00Z").json() + assert data == {"alerts": [], "total": 0} + + +def test_results_sorted_by_created_at_descending(client): + for _ in range(3): + tx = make_transaction(client) + make_alert(client, tx["id"]) + time.sleep(0.02) + + data = client.get("/alerts").json() + timestamps = [a["created_at"] for a in data["alerts"]] + assert timestamps == sorted(timestamps, reverse=True) + + +# --------------------------------------------------------------------------- +# PII masking on list endpoint +# --------------------------------------------------------------------------- + +def test_list_alerts_masks_pii_by_default(client): + tx = make_transaction(client) + make_alert(client, tx["id"]) + data = client.get("/alerts").json() + alert = data["alerts"][0] + assert alert["transaction"]["card_id"] == "****1234" + assert alert["transaction"]["account_id"] == "****0001" + + +def test_list_alerts_show_pii_true_reveals_values(client): + tx = make_transaction(client) + make_alert(client, tx["id"]) + data = client.get("/alerts?show_pii=true").json() + alert = data["alerts"][0] + assert alert["transaction"]["card_id"] == TX_BASE["card_id"] + assert alert["transaction"]["account_id"] == TX_BASE["account_id"] diff --git a/fraud-alert-service/tests/test_pii_masking.py b/fraud-alert-service/tests/test_pii_masking.py new file mode 100644 index 00000000..59cbf6fd --- /dev/null +++ b/fraud-alert-service/tests/test_pii_masking.py @@ -0,0 +1,169 @@ +import pytest + +from src.pii import mask_value + +VALID_TRANSACTION = { + "amount": 99.99, + "merchant_name": "Shell", + "merchant_category": "gas_station", + "location": "Charlotte, NC", + "timestamp": "2024-03-01T12:00:00Z", + "card_id": "4111111111117890", + "account_id": "ACC1234567890", +} + + +def create_transaction(client, card_id="4111111111117890", account_id="ACC1234567890"): + payload = {**VALID_TRANSACTION, "card_id": card_id, "account_id": account_id} + return client.post("/transactions", json=payload).json() + + +def create_alert(client, transaction_id): + return client.post("/alerts", json={"transaction_id": transaction_id, "risk_score": 0.5}).json() + + +# --------------------------------------------------------------------------- +# Unit: mask_value +# --------------------------------------------------------------------------- + +def test_mask_value_shows_last_4(): + assert mask_value("1234567890") == "****7890" + + +def test_mask_value_exactly_4_chars(): + assert mask_value("1234") == "****" + + +def test_mask_value_fewer_than_4_chars(): + assert mask_value("AB") == "****" + + +def test_mask_value_empty_string(): + assert mask_value("") == "****" + + +def test_mask_value_long_string(): + assert mask_value("A" * 50 + "1234") == "****1234" + + +# --------------------------------------------------------------------------- +# Default masking — POST /transactions +# --------------------------------------------------------------------------- + +def test_post_transaction_masks_card_id_by_default(client): + data = create_transaction(client) + assert data["card_id"] == "****7890" + assert data["account_id"] != "ACC1234567890" + + +def test_post_transaction_masks_account_id_by_default(client): + data = create_transaction(client) + assert data["account_id"] == "****7890" + + +# --------------------------------------------------------------------------- +# Default masking — GET /transactions/{id} +# --------------------------------------------------------------------------- + +def test_get_transaction_masks_by_default(client): + tx = create_transaction(client) + fetched = client.get(f"/transactions/{tx['id']}").json() + assert fetched["card_id"] == "****7890" + assert fetched["account_id"] == "****7890" + + +def test_get_transaction_masks_last_4(client): + tx = create_transaction(client, card_id="1234567890", account_id="9876543210") + fetched = client.get(f"/transactions/{tx['id']}").json() + assert fetched["card_id"] == "****7890" + assert fetched["account_id"] == "****3210" + + +# --------------------------------------------------------------------------- +# Default masking — GET /alerts/{id} +# --------------------------------------------------------------------------- + +def test_get_alert_masks_embedded_transaction_by_default(client): + tx = create_transaction(client) + alert = create_alert(client, tx["id"]) + fetched = client.get(f"/alerts/{alert['id']}").json() + assert fetched["transaction"]["card_id"] == "****7890" + assert fetched["transaction"]["account_id"] == "****7890" + + +# --------------------------------------------------------------------------- +# Authorized access — show_pii=true +# --------------------------------------------------------------------------- + +def test_get_transaction_show_pii_returns_full_values(client): + tx = create_transaction(client) + fetched = client.get(f"/transactions/{tx['id']}?show_pii=true").json() + assert fetched["card_id"] == "4111111111117890" + assert fetched["account_id"] == "ACC1234567890" + + +def test_post_transaction_show_pii_returns_full_values(client): + data = client.post("/transactions?show_pii=true", json=VALID_TRANSACTION).json() + assert data["card_id"] == "4111111111117890" + assert data["account_id"] == "ACC1234567890" + + +def test_get_alert_show_pii_returns_full_values(client): + tx = create_transaction(client) + alert = create_alert(client, tx["id"]) + fetched = client.get(f"/alerts/{alert['id']}?show_pii=true").json() + assert fetched["transaction"]["card_id"] == "4111111111117890" + assert fetched["transaction"]["account_id"] == "ACC1234567890" + + +def test_show_pii_false_same_as_omitting(client): + tx = create_transaction(client) + masked = client.get(f"/transactions/{tx['id']}?show_pii=false").json() + assert masked["card_id"] == "****7890" + assert masked["account_id"] == "****7890" + + +# --------------------------------------------------------------------------- +# Consistency +# --------------------------------------------------------------------------- + +def test_masking_does_not_affect_stored_data(client): + tx = create_transaction(client) + # Default response is masked + masked = client.get(f"/transactions/{tx['id']}").json() + assert masked["card_id"] == "****7890" + # show_pii=true still returns the full original value from storage + full = client.get(f"/transactions/{tx['id']}?show_pii=true").json() + assert full["card_id"] == "4111111111117890" + + +def test_contains_pii_flag_is_true(client): + tx = create_transaction(client) + alert = create_alert(client, tx["id"]) + assert alert["contains_pii"] is True + + +# --------------------------------------------------------------------------- +# Edge cases +# --------------------------------------------------------------------------- + +def test_short_card_id_fully_masked(client): + tx = create_transaction(client, card_id="1234", account_id="5678") + fetched = client.get(f"/transactions/{tx['id']}").json() + assert fetched["card_id"] == "****" + assert fetched["account_id"] == "****" + + +def test_very_long_pii_only_shows_last_4(client): + long_id = "X" * 40 + "9999" + tx = create_transaction(client, card_id=long_id, account_id=long_id) + fetched = client.get(f"/transactions/{tx['id']}").json() + assert fetched["card_id"] == "****9999" + assert fetched["account_id"] == "****9999" + + +@pytest.mark.parametrize("bad_value", ["yes", "1", "TRUE", "on"]) +def test_show_pii_non_boolean_returns_422(client, bad_value): + tx = create_transaction(client) + response = client.get(f"/transactions/{tx['id']}?show_pii={bad_value}") + assert response.status_code == 422 diff --git a/fraud-alert-service/tests/test_state_machine.py b/fraud-alert-service/tests/test_state_machine.py new file mode 100644 index 00000000..4d752dcd --- /dev/null +++ b/fraud-alert-service/tests/test_state_machine.py @@ -0,0 +1,236 @@ +import time + +import pytest + +VALID_TRANSACTION = { + "amount": 199.99, + "merchant_name": "Delta Airlines", + "merchant_category": "travel", + "location": "Atlanta, GA", + "timestamp": "2024-02-01T08:00:00Z", + "card_id": "4111111111115678", + "account_id": "ACC1234567890", +} + + +def create_transaction(client): + return client.post("/transactions", json=VALID_TRANSACTION).json() + + +def create_alert(client, risk_score=0.75): + tx = create_transaction(client) + return client.post("/alerts", json={"transaction_id": tx["id"], "risk_score": risk_score}).json() + + +def assign(client, alert_id, analyst_id="analyst-1"): + return client.patch(f"/alerts/{alert_id}/assign", json={"analyst_id": analyst_id}) + + +def transition(client, alert_id, status, changed_by="analyst-1"): + return client.patch(f"/alerts/{alert_id}/status", json={"status": status, "changed_by": changed_by}) + + +# --------------------------------------------------------------------------- +# Valid transitions +# --------------------------------------------------------------------------- + +def test_pending_to_under_review(client): + alert = create_alert(client) + assign(client, alert["id"]) + response = transition(client, alert["id"], "under_review") + assert response.status_code == 200 + assert response.json()["status"] == "under_review" + + +def test_under_review_to_confirmed_fraud(client): + alert = create_alert(client) + assign(client, alert["id"]) + transition(client, alert["id"], "under_review") + response = transition(client, alert["id"], "confirmed_fraud") + assert response.status_code == 200 + assert response.json()["status"] == "confirmed_fraud" + + +def test_under_review_to_false_positive(client): + alert = create_alert(client) + assign(client, alert["id"]) + transition(client, alert["id"], "under_review") + response = transition(client, alert["id"], "false_positive") + assert response.status_code == 200 + assert response.json()["status"] == "false_positive" + + +def test_under_review_to_escalated(client): + alert = create_alert(client) + assign(client, alert["id"]) + transition(client, alert["id"], "under_review") + response = transition(client, alert["id"], "escalated") + assert response.status_code == 200 + assert response.json()["status"] == "escalated" + + +def test_transition_appends_status_history(client): + alert = create_alert(client) + assign(client, alert["id"]) + data = transition(client, alert["id"], "under_review", changed_by="reviewer-99").json() + assert len(data["status_history"]) == 2 + assert data["status_history"][1]["status"] == "under_review" + assert data["status_history"][1]["changed_by"] == "reviewer-99" + assert "timestamp" in data["status_history"][1] + + +# --------------------------------------------------------------------------- +# Invalid transitions +# --------------------------------------------------------------------------- + +@pytest.mark.parametrize("target", ["confirmed_fraud", "false_positive", "escalated"]) +def test_pending_to_terminal_returns_409(client, target): + alert = create_alert(client) + response = transition(client, alert["id"], target) + assert response.status_code == 409 + + +def test_pending_to_under_review_without_analyst_returns_409(client): + alert = create_alert(client) + response = transition(client, alert["id"], "under_review") + assert response.status_code == 409 + + +def test_under_review_to_pending_returns_409(client): + alert = create_alert(client) + assign(client, alert["id"]) + transition(client, alert["id"], "under_review") + response = transition(client, alert["id"], "pending") + assert response.status_code == 409 + + +@pytest.mark.parametrize("target", ["pending", "under_review", "false_positive", "escalated"]) +def test_confirmed_fraud_to_any_returns_409(client, target): + alert = create_alert(client) + assign(client, alert["id"]) + transition(client, alert["id"], "under_review") + transition(client, alert["id"], "confirmed_fraud") + response = transition(client, alert["id"], target) + assert response.status_code == 409 + + +@pytest.mark.parametrize("target", ["pending", "under_review", "confirmed_fraud", "escalated"]) +def test_false_positive_to_any_returns_409(client, target): + alert = create_alert(client) + assign(client, alert["id"]) + transition(client, alert["id"], "under_review") + transition(client, alert["id"], "false_positive") + response = transition(client, alert["id"], target) + assert response.status_code == 409 + + +@pytest.mark.parametrize("target", ["pending", "under_review", "confirmed_fraud", "false_positive"]) +def test_escalated_to_any_returns_409(client, target): + alert = create_alert(client) + assign(client, alert["id"]) + transition(client, alert["id"], "under_review") + transition(client, alert["id"], "escalated") + response = transition(client, alert["id"], target) + assert response.status_code == 409 + + +# --------------------------------------------------------------------------- +# Analyst assignment +# --------------------------------------------------------------------------- + +def test_assign_to_pending_alert_succeeds(client): + alert = create_alert(client) + response = assign(client, alert["id"], "analyst-42") + assert response.status_code == 200 + assert response.json()["analyst_id"] == "analyst-42" + + +def test_assign_to_under_review_alert_succeeds(client): + alert = create_alert(client) + assign(client, alert["id"]) + transition(client, alert["id"], "under_review") + response = assign(client, alert["id"], "analyst-99") + assert response.status_code == 200 + assert response.json()["analyst_id"] == "analyst-99" + + +def test_assign_to_confirmed_fraud_returns_409(client): + alert = create_alert(client) + assign(client, alert["id"]) + transition(client, alert["id"], "under_review") + transition(client, alert["id"], "confirmed_fraud") + response = assign(client, alert["id"], "analyst-99") + assert response.status_code == 409 + + +def test_assign_to_false_positive_returns_409(client): + alert = create_alert(client) + assign(client, alert["id"]) + transition(client, alert["id"], "under_review") + transition(client, alert["id"], "false_positive") + response = assign(client, alert["id"], "analyst-99") + assert response.status_code == 409 + + +def test_assign_to_escalated_returns_409(client): + alert = create_alert(client) + assign(client, alert["id"]) + transition(client, alert["id"], "under_review") + transition(client, alert["id"], "escalated") + response = assign(client, alert["id"], "analyst-99") + assert response.status_code == 409 + + +def test_assign_updates_updated_at(client): + alert = create_alert(client) + original_updated_at = alert["updated_at"] + time.sleep(0.01) + data = assign(client, alert["id"]).json() + assert data["updated_at"] != original_updated_at + + +# --------------------------------------------------------------------------- +# Audit trail +# --------------------------------------------------------------------------- + +def test_new_alert_has_one_history_entry(client): + alert = create_alert(client) + assert len(alert["status_history"]) == 1 + assert alert["status_history"][0]["status"] == "pending" + + +def test_full_workflow_history_has_three_entries(client): + alert = create_alert(client) + assign(client, alert["id"]) + transition(client, alert["id"], "under_review", changed_by="analyst-1") + data = transition(client, alert["id"], "confirmed_fraud", changed_by="analyst-1").json() + assert len(data["status_history"]) == 3 + assert data["status_history"][0]["status"] == "pending" + assert data["status_history"][1]["status"] == "under_review" + assert data["status_history"][2]["status"] == "confirmed_fraud" + + +def test_history_is_chronological(client): + alert = create_alert(client) + assign(client, alert["id"]) + transition(client, alert["id"], "under_review") + data = transition(client, alert["id"], "confirmed_fraud").json() + timestamps = [e["timestamp"] for e in data["status_history"]] + assert timestamps == sorted(timestamps) + + +def test_history_records_changed_by(client): + alert = create_alert(client) + assign(client, alert["id"]) + data = transition(client, alert["id"], "under_review", changed_by="supervisor-7").json() + assert data["status_history"][1]["changed_by"] == "supervisor-7" + + +def test_previous_history_entries_unchanged(client): + alert = create_alert(client) + assign(client, alert["id"]) + transition(client, alert["id"], "under_review", changed_by="analyst-1") + data = transition(client, alert["id"], "confirmed_fraud", changed_by="analyst-1").json() + # First entry must still be the original pending/system entry + assert data["status_history"][0]["status"] == "pending" + assert data["status_history"][0]["changed_by"] == "system" diff --git a/fraud-alert-service/tests/test_summary_stats.py b/fraud-alert-service/tests/test_summary_stats.py new file mode 100644 index 00000000..a603a3b8 --- /dev/null +++ b/fraud-alert-service/tests/test_summary_stats.py @@ -0,0 +1,204 @@ +import time + +VALID_TX = { + "amount": 75.00, + "merchant_name": "Amazon", + "merchant_category": "other", + "location": "Seattle, WA", + "timestamp": "2024-05-01T09:00:00Z", + "card_id": "4111111111119999", + "account_id": "ACC5555555555", +} + + +def make_transaction(client): + return client.post("/transactions", json=VALID_TX).json() + + +def make_alert(client, risk_score=0.5): + tx = make_transaction(client) + return client.post("/alerts", json={"transaction_id": tx["id"], "risk_score": risk_score}).json() + + +def resolve_alert(client, alert_id, terminal_status="confirmed_fraud"): + client.patch(f"/alerts/{alert_id}/assign", json={"analyst_id": "analyst-1"}) + client.patch(f"/alerts/{alert_id}/status", json={"status": "under_review", "changed_by": "analyst-1"}) + client.patch(f"/alerts/{alert_id}/status", json={"status": terminal_status, "changed_by": "analyst-1"}) + + +def get_summary(client): + response = client.get("/alerts/summary") + assert response.status_code == 200 + return response.json() + + +# --------------------------------------------------------------------------- +# Basic structure +# --------------------------------------------------------------------------- + +def test_summary_returns_200(client): + assert client.get("/alerts/summary").status_code == 200 + + +def test_summary_has_required_keys(client): + data = get_summary(client) + assert "total_alerts" in data + assert "by_status" in data + assert "by_risk_level" in data + assert "avg_resolution_time_seconds" in data + + +def test_summary_all_status_keys_present(client): + data = get_summary(client) + for key in ("pending", "under_review", "confirmed_fraud", "false_positive", "escalated"): + assert key in data["by_status"], f"Missing status key: {key}" + + +def test_summary_all_risk_level_keys_present(client): + data = get_summary(client) + for key in ("low", "medium", "high", "critical"): + assert key in data["by_risk_level"], f"Missing risk_level key: {key}" + + +# --------------------------------------------------------------------------- +# Zero-alert state +# --------------------------------------------------------------------------- + +def test_summary_zero_alerts(client): + data = get_summary(client) + assert data["total_alerts"] == 0 + assert all(v == 0 for v in data["by_status"].values()) + assert all(v == 0 for v in data["by_risk_level"].values()) + assert data["avg_resolution_time_seconds"] is None + + +# --------------------------------------------------------------------------- +# total_alerts +# --------------------------------------------------------------------------- + +def test_total_alerts_count(client): + for _ in range(4): + make_alert(client) + data = get_summary(client) + assert data["total_alerts"] == 4 + + +# --------------------------------------------------------------------------- +# by_status counts +# --------------------------------------------------------------------------- + +def test_by_status_counts(client): + a1 = make_alert(client) + a2 = make_alert(client) + a3 = make_alert(client) + + # Move a2 to under_review + client.patch(f"/alerts/{a2['id']}/assign", json={"analyst_id": "analyst-1"}) + client.patch(f"/alerts/{a2['id']}/status", json={"status": "under_review", "changed_by": "analyst-1"}) + + # Resolve a3 + resolve_alert(client, a3["id"]) + + data = get_summary(client) + assert data["by_status"]["pending"] == 1 + assert data["by_status"]["under_review"] == 1 + assert data["by_status"]["confirmed_fraud"] == 1 + assert data["by_status"]["false_positive"] == 0 + assert data["by_status"]["escalated"] == 0 + + +# --------------------------------------------------------------------------- +# by_risk_level counts +# --------------------------------------------------------------------------- + +def test_by_risk_level_counts(client): + tx1 = make_transaction(client) + tx2 = make_transaction(client) + tx3 = make_transaction(client) + client.post("/alerts", json={"transaction_id": tx1["id"], "risk_score": 0.1}) # low + client.post("/alerts", json={"transaction_id": tx2["id"], "risk_score": 0.7}) # high + client.post("/alerts", json={"transaction_id": tx3["id"], "risk_score": 0.9}) # critical + + data = get_summary(client) + assert data["by_risk_level"]["low"] == 1 + assert data["by_risk_level"]["medium"] == 0 + assert data["by_risk_level"]["high"] == 1 + assert data["by_risk_level"]["critical"] == 1 + + +# --------------------------------------------------------------------------- +# avg_resolution_time_seconds +# --------------------------------------------------------------------------- + +def test_avg_resolution_time_null_when_no_resolved(client): + make_alert(client) + data = get_summary(client) + assert data["avg_resolution_time_seconds"] is None + + +def test_avg_resolution_time_single_alert(client): + a = make_alert(client) + time.sleep(0.1) + resolve_alert(client, a["id"]) + data = get_summary(client) + assert data["avg_resolution_time_seconds"] is not None + assert data["avg_resolution_time_seconds"] > 0 + + +def test_avg_resolution_time_multiple_alerts(client): + from datetime import datetime + + def resolution_seconds(alert_id): + alert = client.get(f"/alerts/{alert_id}?show_pii=true").json() + created = datetime.fromisoformat(alert["created_at"].replace("Z", "+00:00")) + terminal = next( + e for e in reversed(alert["status_history"]) + if e["status"] in ("confirmed_fraud", "false_positive", "escalated") + ) + resolved = datetime.fromisoformat(terminal["timestamp"].replace("Z", "+00:00")) + return (resolved - created).total_seconds() + + a1 = make_alert(client) + time.sleep(0.05) + resolve_alert(client, a1["id"]) + + a2 = make_alert(client) + time.sleep(0.1) + resolve_alert(client, a2["id"], terminal_status="false_positive") + + a3 = make_alert(client) + time.sleep(0.15) + resolve_alert(client, a3["id"], terminal_status="escalated") + + expected_avg = ( + resolution_seconds(a1["id"]) + + resolution_seconds(a2["id"]) + + resolution_seconds(a3["id"]) + ) / 3 + + data = get_summary(client) + assert data["avg_resolution_time_seconds"] is not None + assert abs(data["avg_resolution_time_seconds"] - expected_avg) < 0.001 + + +def test_non_terminal_alerts_excluded_from_avg(client): + # One resolved, one still pending + a1 = make_alert(client) + time.sleep(0.05) + resolve_alert(client, a1["id"]) + + make_alert(client) # stays pending + + data = get_summary(client) + # Should still compute avg from only the resolved one + assert data["avg_resolution_time_seconds"] is not None + assert data["total_alerts"] == 2 + + +def test_avg_uses_terminal_history_timestamp(client): + """Resolution time should be > 0, confirming it uses transition timestamp not updated_at.""" + a = make_alert(client) + time.sleep(0.05) + resolve_alert(client, a["id"]) + data = get_summary(client) + assert data["avg_resolution_time_seconds"] > 0 diff --git a/fraud-alert-service/tests/test_transactions.py b/fraud-alert-service/tests/test_transactions.py new file mode 100644 index 00000000..706e0a5a --- /dev/null +++ b/fraud-alert-service/tests/test_transactions.py @@ -0,0 +1,120 @@ +import pytest + +VALID_PAYLOAD = { + "amount": 49.99, + "merchant_name": "Best Buy", + "merchant_category": "electronics", + "location": "Charlotte, NC", + "timestamp": "2024-01-15T10:30:00Z", + "card_id": "4111111111111234", + "account_id": "ACC9876543210", +} + + +# --------------------------------------------------------------------------- +# POST /transactions +# --------------------------------------------------------------------------- + +def test_create_transaction_returns_201(client): + response = client.post("/transactions", json=VALID_PAYLOAD) + assert response.status_code == 201 + + +def test_create_transaction_returns_generated_id(client): + response = client.post("/transactions", json=VALID_PAYLOAD) + data = response.json() + assert "id" in data + # id was not in the request body + assert data["id"] not in VALID_PAYLOAD.values() + + +def test_create_transaction_returns_all_fields(client): + response = client.post("/transactions", json=VALID_PAYLOAD) + data = response.json() + assert data["amount"] == VALID_PAYLOAD["amount"] + assert data["merchant_name"] == VALID_PAYLOAD["merchant_name"] + assert data["merchant_category"] == VALID_PAYLOAD["merchant_category"] + assert data["location"] == VALID_PAYLOAD["location"] + + +def test_create_transaction_missing_required_field(client): + for field in VALID_PAYLOAD: + payload = {k: v for k, v in VALID_PAYLOAD.items() if k != field} + response = client.post("/transactions", json=payload) + assert response.status_code == 422, f"Expected 422 when '{field}' is missing" + + +def test_create_transaction_amount_zero_returns_422(client): + payload = {**VALID_PAYLOAD, "amount": 0} + response = client.post("/transactions", json=payload) + assert response.status_code == 422 + + +def test_create_transaction_amount_negative_returns_422(client): + payload = {**VALID_PAYLOAD, "amount": -10.00} + response = client.post("/transactions", json=payload) + assert response.status_code == 422 + + +def test_create_transaction_invalid_merchant_category(client): + payload = {**VALID_PAYLOAD, "merchant_category": "invalid_category"} + response = client.post("/transactions", json=payload) + assert response.status_code == 422 + + +def test_create_transaction_invalid_timestamp(client): + payload = {**VALID_PAYLOAD, "timestamp": "not-a-date"} + response = client.post("/transactions", json=payload) + assert response.status_code == 422 + + +def test_create_transaction_extra_fields_rejected(client): + payload = {**VALID_PAYLOAD, "unexpected_field": "value"} + response = client.post("/transactions", json=payload) + assert response.status_code == 422 + + +@pytest.mark.parametrize("category", [ + "electronics", "travel", "groceries", "gas_station", "restaurant", + "entertainment", "healthcare", "utilities", "cash_advance", "other", +]) +def test_create_transaction_all_valid_categories(client, category): + payload = {**VALID_PAYLOAD, "merchant_category": category} + response = client.post("/transactions", json=payload) + assert response.status_code == 201 + assert response.json()["merchant_category"] == category + + +# --------------------------------------------------------------------------- +# GET /transactions/{id} +# --------------------------------------------------------------------------- + +def test_get_transaction_returns_200(client): + created = client.post("/transactions", json=VALID_PAYLOAD).json() + response = client.get(f"/transactions/{created['id']}") + assert response.status_code == 200 + + +def test_get_transaction_returns_correct_data(client): + created = client.post("/transactions", json=VALID_PAYLOAD).json() + fetched = client.get(f"/transactions/{created['id']}").json() + assert fetched["id"] == created["id"] + assert fetched["amount"] == VALID_PAYLOAD["amount"] + assert fetched["merchant_name"] == VALID_PAYLOAD["merchant_name"] + + +def test_get_transaction_not_found(client): + response = client.get("/transactions/00000000-0000-0000-0000-000000000000") + assert response.status_code == 404 + + +def test_get_transaction_pii_masked_by_default(client): + """PII fields are masked in responses but full values are preserved in storage.""" + created = client.post("/transactions", json=VALID_PAYLOAD).json() + fetched = client.get(f"/transactions/{created['id']}").json() + assert fetched["card_id"] == "****1234" + assert fetched["account_id"] != VALID_PAYLOAD["account_id"] + # show_pii=true reveals the stored values + full = client.get(f"/transactions/{created['id']}?show_pii=true").json() + assert full["card_id"] == VALID_PAYLOAD["card_id"] + assert full["account_id"] == VALID_PAYLOAD["account_id"]