diff --git a/INTEGRATION_ANALYSIS_REPORT.md b/INTEGRATION_ANALYSIS_REPORT.md
deleted file mode 100644
index 0f6a5897..00000000
--- a/INTEGRATION_ANALYSIS_REPORT.md
+++ /dev/null
@@ -1,387 +0,0 @@
-# Analisis de Integracion: claude/complete-docs-generation vs Automation System
-
-**Fecha**: 2025-11-16
-**Rama Actual**: `claude/integration-analysis-011CV5YLxdEnu9YN3qpzGV2R` (desde develop)
-**Rama a Analizar**: `origin/claude/complete-docs-generation-01PQVB5kB6yrSmSZ46fb65xd`
-
----
-
-## Resumen Ejecutivo
-
-El merge anterior elimino **157,776 lineas** porque la rama `claude/complete-docs-generation-01PQVB5kB6yrSmSZ46fb65xd` NO contiene el trabajo reciente de:
-- Sistema de automatizacion (docs/devops/automatizacion/)
-- Documentacion de scripts (docs/scripts/)
-- Agentes Python en scripts/coding/ai/automation/
-
-Sin embargo, esa rama TIENE cambios valiosos que SI queremos integrar:
-- GitHub Copilot agents (.github/agents/)
-- Reorganizacion naming conventions (snake_case, ADR-NNN)
-- Archivos root importantes (INDEX.md, CONTRIBUTING.md, etc.)
-
----
-
-## Historial Git
-
-### Estado Actual de Develop
-
-```
-* 72a78a3 Revert "Claude/automation docs integration..." (HEAD actual)
-* 42e1a53 Merge pull request #221 (el merge malo - REVERTIDO)
-* 7242011 merge: integrate automation + docs (elimino 147K lineas)
-```
-
-**Develop esta LIMPIO** despues del revert - NO tiene ni automation ni docs-reorganization.
-
-### Commits en claude/complete-docs-generation-01PQVB5kB6yrSmSZ46fb65xd
-
-**Total**: 16 commits (desde 3fe7c6a hasta 033bf54)
-
-**Fase 1: Reorganizacion Base** (commits 3fe7c6a a c1cc785)
-- 3fe7c6a - docs: generate comprehensive documentation for empty directories
-- 2b4a086 - refactor(docs): reorganize diagrams by domain (backend/frontend)
-- c1cc785 - refactor(docs): reorganize root files by domain and consolidate AI directories
-- 91e176b - refactor(docs): integrate guias/ and features/ into domain-specific locations
-- 15f6d45 - refactor(docs): integrate plans/ and planificacion_y_releases/
-- f82343e - refactor(docs): consolidate remaining directories by domain
-
-**Fase 2: Naming Conventions** (commits d52b0a3 a 3c404e9)
-- d52b0a3 - refactor(docs): reorganize DevOps, Operations and Infrastructure for clarity
-- 34d86f0 - docs: add comprehensive analysis of documentation structure issues
-- d80ffaf - refactor(docs): distribute QA by domain + ADR-020 organization principle
-- ce16d78 - docs(naming): apply Phase 1 naming conventions - rename files with sequential numbers
-- 3c404e9 - docs(naming): apply Phase 2 naming conventions - convert UPPERCASE to snake_case
-- 74b1f31 - docs(naming): apply Phase 3 ADR standardization - rename and organize
-
-**Fase 3: Agents & Analysis** (commits cf50335 a 033bf54)
-- cf50335 - feat(ai): create Documentation Naming Agent for automated file renaming
-- e266337 - docs(gobernanza): add documented procedure for file renaming automation
-- 6383c17 - docs(analisis): comprehensive documentation analysis report 2025-11-16
-- 033bf54 - docs(gobernanza): move analysis to correct domain and add structural problems
-
----
-
-## Cambios Valiosos a Integrar
-
-### 1. GitHub Copilot Agents (.github/agents/) - ALTA PRIORIDAD
-
-**Archivos**: 100+ agent definitions
-
-**Contenido**:
-- AGENTS_IMPLEMENTATION_MAP.md
-- CONVENTIONS_AND_LESSONS_LEARNED.md
-- META_PROMPTS_LIBRARY.md
-- README.md
-- 100+ archivos .agent.md organizados por categoria:
- - Domain agents (api, ui, docs, infrastructure, scripts)
- - LLM providers (claude, chatgpt, huggingface)
- - SDLC agents (planner, design, testing, deployment)
- - Quality agents (coverage, syntax, shell-analysis)
- - Automation agents (constitution, ci-orchestrator, coherence)
- - Documentation agents (analysis, eta-codex, sync-reporter)
- - Generators (llm-generator, template-generator)
- - Techniques (auto-cot, self-consistency, chain-of-verification)
-
-**Valor**: GitHub Copilot integration - permite invocar agentes especializados con @agent-name
-
-**Riesgo**: NINGUNO - estos archivos NO existen en develop actualmente
-
-**Recomendacion**: INTEGRAR COMPLETO
-
----
-
-### 2. GitHub Copilot Configuration (.github/copilot/) - ALTA PRIORIDAD
-
-**Archivos**:
-- .github/copilot-instructions.md
-- .github/copilot/README.md
-- .github/copilot/agents.json
-
-**Valor**: Configura GitHub Copilot para el proyecto
-
-**Riesgo**: NINGUNO
-
-**Recomendacion**: INTEGRAR COMPLETO
-
----
-
-### 3. Archivos Root Reorganizados - MEDIA PRIORIDAD
-
-**Archivos Movidos a Root** (de docs/ a raiz):
-- CHANGELOG.md (de docs/CHANGELOG.md)
-- CONTRIBUTING.md (de docs/CONTRIBUTING.md)
-- INDEX.md (NUEVO - no existe en develop)
-- INDICE.md (de docs/INDICE.md)
-- ONBOARDING.md (de docs/ONBOARDING.md)
-- SETUP.md (de docs/SETUP.md)
-- Makefile (de docs/Makefile)
-- docker-compose.cassandra.yml (de docs/docker-compose.cassandra.yml)
-
-**Archivos Root NUEVOS**:
-- INDEX.md - Indice maestro organizando docs por roles (AI Engineers, Backend, Frontend, DevOps)
-- CONSOLIDATION_STATUS.md
-- MERGE_STRATEGY_PR_175.md
-- PLAN_CONSOLIDACION_PRS.md
-- PR_BODY.md
-
-**Valor**: Mejor visibilidad archivos importantes, navegacion por roles
-
-**Riesgo**: BAJO - solo mueve archivos, no elimina contenido
-
-**Recomendacion**: INTEGRAR (son mejoras de organizacion)
-
----
-
-### 4. Naming Conventions Aplicadas - MEDIA PRIORIDAD
-
-**Cambios**:
-- UPPERCASE files → snake_case (100+ archivos renombrados)
- - docs/ai/ANALISIS_POLITICA_NO_EMOJIS.md → analisis_politica_no_emojis.md
- - docs/ai/CONFIGURACION_API_KEYS.md → configuracion_api_keys.md
- - docs/backend/ARQUITECTURA-MODULOS-COMPLETA.md → arquitectura_modulos_completa.md
- - etc.
-
-- ADR standardization (ADR_YYYY_NNN → ADR-NNN)
- - ADR_2025_003_dora_sdlc_integration.md → ADR-003-dora-sdlc-integration.md
- - ADR_2025_017_sistema_permisos.md → ADR-017-sistema-permisos-sin-roles-jerarquicos.md
- - etc.
-
-**Valor**: Consistencia naming, facil navegacion
-
-**Riesgo**: MEDIO - muchos renames pueden romper links internos
-
-**Recomendacion**: REVISAR - validar que no rompa referencias
-
----
-
-### 5. Reorganizacion Docs por Dominio - MEDIA PRIORIDAD
-
-**Cambios**:
-- docs/backend/requisitos/funcionales/ → requerimientos_funcionales/
-- docs/backend/requisitos/necesidades/ → requerimientos_negocio/
-- docs/backend/requisitos/no_funcionales/ → atributos_calidad/
-- docs/anexos/diagramas/ → docs/backend/diagramas/ (diagramas backend)
-- docs/infraestructura/devops/ → docs/devops/
-- docs/guias/deployment/ → docs/devops/deployment/
-- docs/guias/testing/ → docs/backend/qa/
-
-**Valor**: Organizacion logica por dominio
-
-**Riesgo**: MEDIO - mueve muchos archivos
-
-**Recomendacion**: REVISAR - verificar que no se pierdan archivos
-
----
-
-### 6. Documentacion de Analisis - BAJA PRIORIDAD
-
-**Archivos Nuevos**:
-- docs/ANALISIS_FALLAS_DOCS.md
-- docs/AUDITORIA_NOMBRES_ARCHIVOS.md
-- docs/gobernanza/structural_problems_documentation.md
-- docs/ai/analisis/ (varios archivos de analisis)
-
-**Valor**: Documentacion de proceso de reorganizacion
-
-**Riesgo**: NINGUNO
-
-**Recomendacion**: INTEGRAR (es documentacion adicional)
-
----
-
-### 7. Agent Templates (.agent/) - MEDIA PRIORIDAD
-
-**Archivos**:
-- .agent/agents/ (28 agent templates)
-- .agent/execplans/ (12 execution plans)
-
-**Valor**: Templates para desarrollo de agentes
-
-**Riesgo**: NINGUNO
-
-**Recomendacion**: INTEGRAR
-
----
-
-## Cambios que NO Debemos Integrar (Causan Perdida de Lineas)
-
-### 1. Eliminacion de docs/scripts/ - NO INTEGRAR
-
-La rama `claude/complete-docs-generation` NO tiene:
-- docs/scripts/sdlc-agent-guide.md (1,116 lineas)
-- docs/scripts/sdlc-agents-reference.md (802 lineas)
-- docs/scripts/script-development-guide.md (566 lineas)
-- docs/scripts/ci-cd-scripts.md (536 lineas)
-- docs/scripts/analisis/ completo (4,041+ lineas JSON/MD)
-- docs/scripts/QUICKSTART.md (347 lineas)
-- etc.
-
-**Razon**: Esta rama es mas ANTIGUA, esos archivos se agregaron despues
-
-**Accion**: MANTENER archivos de develop (no mergear esta parte)
-
----
-
-### 2. Eliminacion de docs/creation - SI ELIMINAR (OK)
-
-**Archivo**: docs/creation (109,250 lineas)
-**Tipo**: Log temporal ASCII text de 9.2MB
-
-**Razon**: Es basura temporal, no es documentacion real
-
-**Accion**: Eliminar OK
-
----
-
-### 3. Falta Sistema Automatizacion - NO INTEGRAR
-
-La rama NO tiene:
-- docs/devops/automatizacion/ completo (8,000+ lineas)
-- scripts/coding/ai/automation/ (6 agentes Python)
-- tests/ai/automation/ (252 tests)
-- .constitucion.yaml (676 lineas)
-- schemas/constitucion_schema.json
-- scripts/utils/validate_automation_agents.sh
-- scripts/utils/test_agent_integration.sh
-- 6 ADRs (ADR-040 a ADR-045)
-
-**Razon**: El trabajo de automatizacion se hizo DESPUES de esta rama
-
-**Accion**: MANTENER archivos de develop (YA existen)
-
----
-
-## Estrategia de Integracion Recomendada
-
-### Opcion 1: Cherry-Pick Selectivo (RECOMENDADA)
-
-**Paso 1**: Integrar .github/agents/ y .github/copilot/ completos
-```bash
-git checkout origin/claude/complete-docs-generation-01PQVB5kB6yrSmSZ46fb65xd -- .github/agents/
-git checkout origin/claude/complete-docs-generation-01PQVB5kB6yrSmSZ46fb65xd -- .github/copilot/
-git checkout origin/claude/complete-docs-generation-01PQVB5kB6yrSmSZ46fb65xd -- .github/copilot-instructions.md
-```
-
-**Paso 2**: Integrar .agent/ completo
-```bash
-git checkout origin/claude/complete-docs-generation-01PQVB5kB6yrSmSZ46fb65xd -- .agent/
-```
-
-**Paso 3**: Integrar archivos root importantes
-```bash
-git checkout origin/claude/complete-docs-generation-01PQVB5kB6yrSmSZ46fb65xd -- INDEX.md
-git checkout origin/claude/complete-docs-generation-01PQVB5kB6yrSmSZ46fb65xd -- CONSOLIDATION_STATUS.md
-# etc.
-```
-
-**Paso 4**: Integrar analisis docs
-```bash
-git checkout origin/claude/complete-docs-generation-01PQVB5kB6yrSmSZ46fb65xd -- docs/ANALISIS_FALLAS_DOCS.md
-git checkout origin/claude/complete-docs-generation-01PQVB5kB6yrSmSZ46fb65xd -- docs/AUDITORIA_NOMBRES_ARCHIVOS.md
-```
-
-**Paso 5**: Eliminar docs/creation (log temporal)
-```bash
-git rm docs/creation
-```
-
-**Paso 6**: Revisar naming conventions caso por caso
-- Evaluar si renombrar UPPERCASE → snake_case
-- Evaluar si renombrar ADRs
-- CUIDADO: validar links internos
-
-**Ventajas**:
-- Control total sobre que se integra
-- NO perdemos lineas de docs/scripts/
-- NO perdemos sistema automatizacion
-- Solo integramos lo valioso
-
-**Desventajas**:
-- Mas manual
-- Requiere validacion cuidadosa
-
----
-
-### Opcion 2: Merge con Exclusiones (ALTERNATIVA)
-
-Hacer merge pero mantener archivos criticos de develop:
-
-```bash
-git merge origin/claude/complete-docs-generation-01PQVB5kB6yrSmSZ46fb65xd --no-commit
-# Mantener archivos criticos
-git checkout HEAD -- docs/scripts/
-git checkout HEAD -- docs/devops/automatizacion/
-git checkout HEAD -- scripts/coding/ai/automation/
-git checkout HEAD -- tests/ai/automation/
-# etc.
-```
-
-**Ventajas**:
-- Obtiene TODOS los cambios de docs-reorganization
-- Incluye naming conventions automaticamente
-
-**Desventajas**:
-- Mas complejo resolver conflictos
-- Riesgo de perder archivos si no excluimos correctamente
-- Dificil de auditar
-
----
-
-## Recomendacion Final
-
-**OPCION 1: Cherry-Pick Selectivo**
-
-**Integracion Prioritaria**:
-1. .github/agents/ (GitHub Copilot) - 100+ archivos
-2. .github/copilot/ (configuracion) - 3 archivos
-3. .agent/ (templates) - 40 archivos
-4. INDEX.md y archivos root analisis - 5 archivos
-5. Analisis docs (ANALISIS_FALLAS_DOCS.md, etc.) - 3 archivos
-6. Eliminar docs/creation (log temporal)
-
-**Total a Integrar**: ~150 archivos NUEVOS
-**Total a Eliminar**: 1 archivo (log temporal)
-**Lineas Perdidas**: 0 (no tocamos docs/scripts/ ni automatizacion)
-
-**Integracion Posterior (Requiere Revision)**:
-- Naming conventions (UPPERCASE → snake_case)
-- ADR standardization
-- Reorganizacion por dominio
-
----
-
-## Validacion Post-Integracion
-
-Despues de integrar, validar:
-
-1. **NO se perdieron lineas criticas**:
- ```bash
- git diff develop --stat
- # Verificar que NO aparezcan deletions masivas
- ```
-
-2. **Archivos criticos existen**:
- ```bash
- ls docs/scripts/sdlc-agent-guide.md
- ls docs/devops/automatizacion/README.md
- ls scripts/coding/ai/automation/
- ```
-
-3. **Tests pasan**:
- ```bash
- pytest tests/ai/automation/ -v
- ```
-
-4. **GitHub Copilot funciona**:
- - Verificar .github/agents/ existe
- - Verificar .github/copilot-instructions.md existe
-
----
-
-## Siguientes Pasos
-
-1. Revisar este analisis con el equipo
-2. Decidir que cambios integrar
-3. Ejecutar cherry-pick selectivo
-4. Validar integracion
-5. Commit y push
-6. Crear PR limpio
diff --git a/PR_BODY_INTEGRATION.md b/PR_BODY_INTEGRATION.md
deleted file mode 100644
index 1b5507bf..00000000
--- a/PR_BODY_INTEGRATION.md
+++ /dev/null
@@ -1,1037 +0,0 @@
-# Integration: Automation System + Documentation Reorganization
-
-## Summary
-
-This PR integrates two major workstreams into a unified codebase:
-
-1. **Complete SDLC Automation System** - Hybrid Bash/Python architecture with 6 specialized agents
-2. **Comprehensive Documentation Reorganization** - GitHub Copilot integration + naming conventions
-
-The integration provides a production-ready automation platform with governance, testing infrastructure, and clean documentation structure ready for GitHub Copilot and AI-assisted development.
-
----
-
-## Workstream 1: Automation System
-
-### SDLC 6-Phase Documentation (8,000+ lines)
-
-**Phase 1-2: Planning & Feasibility**
-- `ISSUE_SISTEMA_AUTOMATIZACION_LOCAL.md` - Problem definition and requirements
-- `FEASIBILITY_SISTEMA_AUTOMATIZACION.md` - Technical and business viability analysis
-- Analysis of REAL IACT architecture (React UI, Django API, 2 databases, DevContainer)
-
-**Phase 3: Design**
-- **HLD v2.0** (`HLD_SISTEMA_AUTOMATIZACION.md`) - High-Level Design reflecting actual project structure
-- **Modular LLD** (`LLD_00` through `LLD_05`) - Low-Level Design using Auto-CoT decomposition:
- - `LLD_00_OVERVIEW.md` - Master index
- - `LLD_01_CONSTITUCION.md` - Constitution system design
- - `LLD_02_CI_LOCAL.md` - CI/CD local pipeline design
- - `LLD_03_DEVCONTAINER.md` - DevContainer integration
- - `LLD_04_SCRIPTS_HELPERS.md` - Helper scripts design
- - `LLD_05_INSTALACION.md` - Installation procedures
-- **AGENTS_ARCHITECTURE.md** - Self-Consistency analysis validating hybrid Bash/Python approach
-- **AGENTES_ANALYSIS.md** - Complete Auto-CoT decomposition of 5 Python agents
-
-**Phase 4-6: Testing, Deployment, Maintenance**
-- `TESTING_PLAN.md` - 79-105 tests specification (TDD RED phase)
-- `DEPLOYMENT_PLAN.md` - 18-22h implementation roadmap (TDD GREEN phase)
-- `MAINTENANCE_PLAN.md` - Continuous improvement plan (TDD REFACTOR phase)
-
-### Python Agents Architecture
-
-Complete directory structure for extensible automation agents:
-
-```
-scripts/coding/ai/
-├── constitution/ # Constitution validation (R1-R6)
-│ └── validators/ # Modular rule validators
-├── pipeline/ # CI pipeline orchestration
-│ ├── stages/ # lint, test, build, validate
-│ └── jobs/ # Parallel job execution
-├── coherence/ # UI/API coherence analysis with AST parsing
-├── devcontainer/ # DevContainer environment validation
-├── validation/ # Generic schema/config validation
-└── utils/ # Shared utilities (logger, config_loader, git_helper)
-
-tests/ai/ # Mirror structure for TDD compliance
-├── constitution/
-├── pipeline/
-├── coherence/
-├── devcontainer/
-├── validation/
-└── utils/
-```
-
-**Hybrid Bash/Python Architecture:**
-
-Design validated through Self-Consistency analysis across 4 perspectives:
-
-```
-Git Hooks (Entry Points)
- ↓
-Bash Scripts (CLI, orchestration, Git integration)
- ↓
-Python Agents (Business logic, intelligent validation, AST analysis)
- ↓
-JSON Reports → Exit Codes (0=success, 1=error, 2=warning, 3=config error)
-```
-
-**Why Hybrid?**
-- Performance: Bash fast for Git/filesystem operations, Python better for complex logic
-- Integration: Git hooks expect simple executable scripts (Bash ideal)
-- Maintainability: Python testable, modular, type hints (vs difficult Bash debugging)
-- Consistency: Extends existing 40+ Bash scripts without breaking changes
-
-### Production Configuration
-
-**.constitucion.yaml (676 lines)**
-- 6 principles (P1-P5) defining project governance
-- 6 rules (R1-R6) with severity levels (error/warning):
- - R1: No direct push to main/master
- - R2: No emojis anywhere (comprehensive Unicode detection)
- - R3: UI/API coherence (AST-based analysis)
- - R4: Database router validation
- - R5: Tests must pass
- - R6: DevContainer compatibility
-- Metrics tracking and reporting configuration
-- Validated against JSON Schema
-
-**schemas/constitucion_schema.json**
-- JSON Schema (draft-07) for `.constitucion.yaml` validation
-- Ensures config integrity and type safety
-- Supports validation tooling integration
-
-### Validation & Integration Utilities
-
-**scripts/utils/validate_automation_agents.sh (351 lines)**
-- Validates all 6 Python agents work correctly
-- Checks dependencies, imports, basic functionality
-- Result: 6/6 agents validated successfully
-
-**scripts/utils/test_agent_integration.sh (529 lines)**
-- Tests Bash-Python integration (JSON protocol, exit codes)
-- Validates Git hooks integration
-- Result: 9/9 integration tests passed
-
-### Architecture Decision Records (6 ADRs)
-
-Comprehensive ADRs documenting each agent's design:
-
-- **ADR-040**: SchemaValidatorAgent (YAML/JSON validation)
-- **ADR-041**: DevContainerValidatorAgent (environment validation)
-- **ADR-042**: MetricsCollectorAgent (violations tracking, trend analysis)
-- **ADR-043**: CoherenceAnalyzerAgent (AST-based UI/API coherence)
-- **ADR-044**: ConstitutionValidatorAgent (R1-R6 orchestration)
-- **ADR-045**: CIPipelineOrchestratorAgent (AsyncIO pipeline execution)
-
-Each ADR includes:
-- Context: Problem the agent solves
-- Decision: Why this approach was chosen
-- Alternatives Analysis: 3-4 alternatives evaluated
-- Consequences: Positive and negative trade-offs
-- Implementation Details: Key technical decisions
-
-### Comprehensive Documentation
-
-**README.md (344 lines)** - `docs/devops/automatizacion/README.md`
-- Executive overview of automation system
-- Quick start guide
-- Testing summary (252 tests, 100% passing)
-- Architecture diagrams (ASCII art)
-- Contribution guidelines
-
-**USE_CASES.md (2,124 lines)** - `docs/devops/automatizacion/USE_CASES.md`
-- 30+ detailed use cases
-- 12 complete workflows
-- 70+ code examples
-- Real-world scenarios for each agent:
- - Pre-commit validation scenarios
- - Pre-push validation scenarios
- - CI pipeline orchestration
- - Coherence analysis workflows
- - DevContainer validation checks
- - Metrics collection and reporting
-
-**INTEGRATION_GUIDE.md (1,179 lines)** - `docs/devops/automatizacion/INTEGRATION_GUIDE.md`
-- Bash-Python communication protocols
-- Git hooks setup and integration
-- CI/CD pipeline configuration
-- Configuration file formats
-- Troubleshooting guide
-- Best practices
-
-**GOVERNANCE_COMPLIANCE.md** - `docs/devops/automatizacion/GOVERNANCE_COMPLIANCE.md`
-- Validates 95% compliance with governance rules
-- ZERO emoji violations (strict NO EMOJIS policy enforced)
-- Metrics and compliance reports
-- Remediation recommendations
-
-### Testing Status
-
-All 6 Python agents implemented with complete TDD approach:
-
-- **SchemaValidatorAgent**: 23 tests, 100% passing
-- **DevContainerValidatorAgent**: 51 tests, 100% passing, 76% coverage
-- **MetricsCollectorAgent**: 25 tests, 100% passing, 75% coverage
-- **CoherenceAnalyzerAgent**: 50 tests, 100% passing
-- **ConstitutionValidatorAgent**: 46 tests, 100% passing
-- **CIPipelineOrchestratorAgent**: 57 tests, 100% passing
-
-**Total**: 252 tests, 100% passing (252/252), 75-90% coverage per agent
-
-**Integration Tests:**
-- Bash-Python integration: 9/9 tests passed
-- Agent validation: 6/6 agents validated
-- Schema validation: `.constitucion.yaml` valid
-- NO emoji violations detected
-
-### Prompt Engineering Innovation
-
-Updated **PROMPT_TECHNIQUES_CATALOG.md** with new technique:
-
-**"Task Masivo Paralelo para SDLC"**
-- Launch N Task agents in parallel (1 message, N tool calls)
-- Each agent executes complete SDLC cycle (TDD RED-GREEN-REFACTOR + ADR)
-- **Result**: 6 agents implemented in 10 minutes vs 6+ hours sequential
-- **Performance gain**: 94% time reduction
-- Documented in `docs/ai_capabilities/prompting/PROMPT_TECHNIQUES_CATALOG.md`
-
----
-
-## Workstream 2: Documentation Reorganization
-
-### GitHub Copilot Integration
-
-**.github/agents/** (100+ agent definitions)
-- Domain agents (api-agent, ui-agent, docs-agent, infrastructure-agent, scripts-agent)
-- LLM provider agents (claude-agent, chatgpt-agent, huggingface-agent)
-- SDLC agents (planner, design, testing, deployment, feasibility)
-- Quality agents (coverage-analyzer, syntax-validator, shell-analysis, shell-remediation)
-- Automation agents (constitution-validator, ci-orchestrator, coherence-analyzer, etc.)
-- Technique agents (auto-cot, self-consistency, chain-of-verification, tree-of-thoughts)
-- Shared agents (pr-creator, test-runner)
-- Documentation agents (analysis, eta-codex, consistency-verifier, sync-reporter)
-
-**.github/copilot-instructions.md**
-- GitHub Copilot configuration
-- Agent invocation patterns
-- Project-specific guidelines
-
-**.github/agents/META_PROMPTS_LIBRARY.md**
-- Reusable prompt templates
-- Meta-prompting patterns
-- Agent orchestration examples
-
-**.github/agents/CONVENTIONS_AND_LESSONS_LEARNED.md**
-- Naming conventions
-- Best practices from implementation
-- Pitfalls to avoid
-
-### Documentation Naming Conventions
-
-**Applied standardization across entire docs/ directory:**
-
-1. **Snake_case for files** (UPPERCASE → snake_case)
- - `ANALISIS_POLITICA_NO_EMOJIS.md` → `analisis_politica_no_emojis.md`
- - `CONFIGURACION_API_KEYS.md` → `configuracion_api_keys.md`
- - 100+ files renamed
-
-2. **ADR standardization** (ADR_YYYY_NNN → ADR-NNN)
- - `ADR_2025_003_dora_sdlc_integration.md` → `ADR-003-dora-sdlc-integration.md`
- - `ADR_2025_017_sistema_permisos.md` → `ADR-017-sistema-permisos-sin-roles-jerarquicos.md`
-
-3. **Directory structure by domain**
- - Backend: `docs/backend/` (api, adr, requisitos, diagramas, qa)
- - DevOps: `docs/devops/` (automatizacion, deployment, runbooks)
- - AI: `docs/ai/` (agents, prompting, analisis, tareas)
- - Frontend: `docs/frontend/`
- - Infrastructure: `docs/infraestructura/`
-
-4. **Requirements restructuring**
- - `docs/backend/requisitos/funcionales/` → `requerimientos_funcionales/`
- - `docs/backend/requisitos/necesidades/` → `requerimientos_negocio/`
- - `docs/backend/requisitos/no_funcionales/` → `atributos_calidad/`
- - Added README.md to each category
-
-### Root-Level Documentation Promoted
-
-Key documentation files promoted to root for visibility:
-- `CHANGELOG.md`
-- `CONTRIBUTING.md`
-- `INDEX.md`
-- `INDICE.md`
-- `ONBOARDING.md`
-- `SETUP.md`
-- `Makefile`
-
-### Agent Templates (.agent/)
-
-**.agent/agents/** (28 agent templates)
-- automation agents (8 templates)
-- sdlc agents (6 templates)
-- quality agents (2 templates)
-- meta agents (8 templates)
-- documentation agents (2 templates)
-- tdd agents (2 templates)
-
-**.agent/execplans/** (12 execution plans)
-- Agent domain alignment
-- Template standardization
-- CI shell resilience
-- Codex MCP integration
-- Design patterns catalog
-- Hamilton framework integration
-- Context memory management
-- VPN/proxy infrastructure
-
-### Documentation Analysis & Audits
-
-**docs/ANALISIS_FALLAS_DOCS.md**
-- Comprehensive documentation structure analysis
-- Identified problems and solutions
-- Remediation recommendations
-
-**docs/AUDITORIA_NOMBRES_ARCHIVOS.md**
-- File naming audit report
-- Convention violations identified
-- Renaming execution plan
-
-**docs/gobernanza/structural_problems_documentation.md**
-- Structural issues analysis
-- Organization principles
-- Improvement roadmap
-
----
-
-## Integration Details
-
-### Conflict Resolution Strategy
-
-8 files had merge conflicts. Resolution strategy:
-
-**Kept Automation System versions (newer, more complete):**
-- `.constitucion.yaml` (676 lines - production config with 6 principles, 6 rules)
-- `docs/devops/automatizacion/GOVERNANCE_COMPLIANCE.md`
-- `docs/devops/automatizacion/INTEGRATION_GUIDE.md`
-- `docs/devops/automatizacion/README.md`
-- `docs/devops/automatizacion/USE_CASES.md`
-- `schemas/constitucion_schema.json`
-- `scripts/utils/test_agent_integration.sh`
-- `scripts/utils/validate_automation_agents.sh`
-
-**Rationale:** Automation system files are most recent (2025-11-13), contain validated production configs, and have complete test coverage.
-
-### Combined Benefits
-
-1. **Unified Agent Ecosystem**
- - `.github/agents/` → GitHub Copilot integration (100+ agents)
- - `scripts/coding/ai/` → Python automation agents (6 implemented)
- - `.agent/agents/` → Agent templates for development
- - Seamless interaction between GitHub Copilot and automation agents
-
-2. **Clean, Organized Documentation**
- - Consistent naming conventions (snake_case)
- - ADRs standardized (ADR-NNN format)
- - Domain-based structure (backend/, devops/, ai/)
- - Root-level visibility for key docs
-
-3. **Production-Ready Automation + Governance**
- - YAML-driven constitution system
- - Automated validation (252 tests, 100% passing)
- - Git hooks integration ready
- - CI/CD pipeline orchestration
-
-4. **AI-Assisted Development Ready**
- - GitHub Copilot instructions configured
- - 100+ agent definitions for Copilot
- - Meta prompts library
- - Prompt engineering techniques documented
-
-5. **Complete SDLC Coverage**
- - Planning → Feasibility → Design → Testing → Deployment → Maintenance
- - TDD methodology enforced (RED-GREEN-REFACTOR)
- - Automated governance validation
- - Metrics and compliance tracking
-
----
-
-## Files Changed Summary
-
-### New Files (300+)
-
-**Automation System:**
-- 29 Python module directories (`scripts/coding/ai/`, `tests/ai/`)
-- 6 ADRs (`docs/adr/ADR-040` through `ADR-045`)
-- 4 major documentation files (`docs/devops/automatizacion/`)
-- 2 config files (`.constitucion.yaml`, `schemas/constitucion_schema.json`)
-- 2 validation scripts (`scripts/utils/`)
-
-**GitHub Copilot Integration:**
-- 100+ agent definitions (`.github/agents/`)
-- 28 agent templates (`.agent/agents/`)
-- 12 execution plans (`.agent/execplans/`)
-- GitHub Copilot config files (`.github/copilot/`)
-
-**Documentation:**
-- Analysis reports (`docs/ANALISIS_FALLAS_DOCS.md`, `docs/AUDITORIA_NOMBRES_ARCHIVOS.md`)
-- Root-level docs (`INDEX.md`, `CONSOLIDATION_STATUS.md`, `MERGE_STRATEGY_PR_175.md`)
-- Backend SDLC docs (`docs/backend/planning/`, `feasibility/`, `design/`, `testing/`)
-
-### Renamed Files (100+)
-
-**Naming Convention Standardization:**
-- AI docs: UPPERCASE → snake_case (20+ files)
-- ADRs: ADR_YYYY_NNN → ADR-NNN (10+ files)
-- Backend requisitos: funcionales → requerimientos_funcionales (30+ files)
-- DevOps docs: infraestructura/devops → devops (15+ files)
-- Diagramas: anexos/diagramas → backend/diagramas (10+ files)
-
-**Root Promotion:**
-- `docs/CHANGELOG.md` → `CHANGELOG.md`
-- `docs/CONTRIBUTING.md` → `CONTRIBUTING.md`
-- `docs/INDICE.md` → `INDICE.md`
-- `docs/SETUP.md` → `SETUP.md`
-- `docs/Makefile` → `Makefile`
-
-### Modified Files (50+)
-
-- `AGENTS.md` - Updated with new agent definitions
-- `PR_DESCRIPTION.md` - Updated with integration info
-- Backend code (api/callcentersite/): serializers, urls, views, settings
-- Documentation README files (backend, devops, ai)
-- Agent architecture docs
-
-### Deleted Files (10+)
-
-- `docs/CODEOWNERS` (moved to root)
-- `docs/INDEX.md` (consolidated into root INDEX.md)
-- `docs/creation/` (obsolete)
-- Empty `.gitkeep` files (replaced with README.md)
-
----
-
-## Statistics
-
-### Code & Configuration
-- **Python Modules**: 29 new directories/files
-- **Configuration**: 676 lines (.constitucion.yaml) + JSON Schema
-- **Validation Scripts**: 880 lines (2 bash scripts)
-- **Total Code**: 2,000+ lines
-
-### Documentation
-- **SDLC Documentation**: 8,000+ lines
-- **USE_CASES.md**: 2,124 lines
-- **INTEGRATION_GUIDE.md**: 1,179 lines
-- **ADRs**: 6 new (2,500+ lines total)
-- **Total Documentation**: 15,000+ lines
-
-### Testing
-- **Unit Tests**: 112 tests
-- **Integration Tests**: 28 tests
-- **E2E Tests**: 10 tests
-- **Agent Validation**: 9 tests
-- **Total Tests**: 159 tests (not counting the 252 already implemented agent tests)
-- **Pass Rate**: 100% (252/252 agent tests + 9/9 integration tests)
-- **Coverage**: 75-90% per Python agent
-
-### Agents
-- **GitHub Copilot Agents**: 100+ definitions
-- **Python Automation Agents**: 6 implemented
-- **Agent Templates**: 28 templates
-- **Total Agent Ecosystem**: 130+ agents
-
----
-
-## Architecture
-
-### Hybrid Bash/Python Automation
-
-```
-┌─────────────────────────────────────────────────────────┐
-│ Git Hooks (Entry Points) │
-│ - pre-commit, pre-push, commit-msg │
-└────────────────────┬────────────────────────────────────┘
- ↓
-┌─────────────────────────────────────────────────────────┐
-│ Bash Scripts (Orchestration) │
-│ - constitucion.sh (656 lines) │
-│ - ci-local.sh (945 lines) │
-│ - check_ui_api_coherence.sh (75 lines) │
-│ - validate_*.sh (helper scripts) │
-└────────────────────┬────────────────────────────────────┘
- ↓
-┌─────────────────────────────────────────────────────────┐
-│ Python Agents (Business Logic) │
-│ - ConstitutionValidatorAgent (R1-R6) │
-│ - CIPipelineOrchestratorAgent (AsyncIO) │
-│ - CoherenceAnalyzerAgent (AST parsing) │
-│ - DevContainerValidatorAgent (environment) │
-│ - SchemaValidatorAgent (YAML/JSON) │
-│ - MetricsCollectorAgent (tracking, trends) │
-└────────────────────┬────────────────────────────────────┘
- ↓
-┌─────────────────────────────────────────────────────────┐
-│ Output & Results │
-│ - JSON Reports (structured data) │
-│ - Exit Codes (0/1/2/3) │
-│ - Logs (violations, metrics) │
-│ - Dashboards (future) │
-└─────────────────────────────────────────────────────────┘
-```
-
-### GitHub Copilot Integration
-
-```
-┌─────────────────────────────────────────────────────────┐
-│ .github/copilot-instructions.md │
-│ - Project guidelines │
-│ - Agent invocation patterns │
-└────────────────────┬────────────────────────────────────┘
- ↓
-┌─────────────────────────────────────────────────────────┐
-│ .github/agents/ (100+ agent definitions) │
-│ - Domain agents (api, ui, docs, infra, scripts) │
-│ - LLM providers (claude, chatgpt, huggingface) │
-│ - SDLC agents (plan, design, test, deploy) │
-│ - Quality agents (coverage, syntax, shell) │
-│ - Automation agents (constitution, ci, coherence) │
-└────────────────────┬────────────────────────────────────┘
- ↓
-┌─────────────────────────────────────────────────────────┐
-│ .github/agents/META_PROMPTS_LIBRARY.md │
-│ - Reusable templates │
-│ - Orchestration patterns │
-└─────────────────────────────────────────────────────────┘
-```
-
-### Documentation Structure
-
-```
-docs/
-├── backend/ # Backend domain
-│ ├── adr/ # ADR-NNN format
-│ ├── api/ # API catalog
-│ ├── requisitos/ # Requirements
-│ │ ├── requerimientos_funcionales/
-│ │ ├── requerimientos_negocio/
-│ │ └── atributos_calidad/
-│ ├── diagramas/ # UML diagrams
-│ └── qa/ # Testing guides
-│
-├── devops/ # DevOps domain
-│ ├── automatizacion/ # Automation system (NEW)
-│ │ ├── README.md
-│ │ ├── USE_CASES.md
-│ │ ├── INTEGRATION_GUIDE.md
-│ │ ├── GOVERNANCE_COMPLIANCE.md
-│ │ └── planificacion/ # SDLC 6-phase docs
-│ ├── deployment/
-│ └── runbooks/
-│
-├── ai/ # AI/ML domain
-│ ├── agents/
-│ ├── prompting/ # Prompt engineering
-│ ├── analisis/
-│ └── tareas/
-│
-├── frontend/ # Frontend domain
-├── infraestructura/ # Infrastructure domain
-└── gobernanza/ # Governance
-```
-
----
-
-## Methodology
-
-### SDLC 6-Fases
-- **Phase 1**: Planning (ISSUE definition)
-- **Phase 2**: Feasibility (viability analysis)
-- **Phase 3**: Design (HLD + LLD)
-- **Phase 4**: Testing (TDD RED - test writing)
-- **Phase 5**: Deployment (TDD GREEN - implementation)
-- **Phase 6**: Maintenance (TDD REFACTOR - optimization)
-
-### TDD (Test-Driven Development)
-1. **RED**: Write failing tests first (Testing Plan)
-2. **GREEN**: Implement minimal code to pass (Deployment Plan)
-3. **REFACTOR**: Optimize and clean up (Maintenance Plan)
-
-### Prompt Engineering Techniques
-
-**Auto-CoT (Automatic Chain-of-Thought):**
-- Systematic problem decomposition
-- Modular design approach
-- Applied to LLD structure (6 modules vs 1 monolithic)
-- Applied to agent architecture (5 independent agents)
-
-**Self-Consistency:**
-- Multi-perspective validation
-- 4 perspectives analyzed: Performance, Integration, Maintainability, Team
-- All perspectives converged on hybrid Bash/Python architecture
-- Documented in AGENTS_ARCHITECTURE.md
-
-**Task Masivo Paralelo (NEW):**
-- Parallel Task agent execution for SDLC implementation
-- 6 agents launched simultaneously
-- Complete SDLC cycle per agent (TDD + ADR)
-- **Result**: 94% time reduction (10 min vs 6+ hours)
-- Documented in PROMPT_TECHNIQUES_CATALOG.md
-
----
-
-## Compliance & Governance
-
-### NO Emojis Policy
-
-**Strictly Enforced:**
-- ZERO emojis in code, docs, configs, commits, PRs
-- R2 rule validator with comprehensive Unicode detection (10+ emoji ranges)
-- Text-only notifications: NOTA:, ADVERTENCIA:, ERROR:, COMPLETADO:
-- Validation: NO violations detected across 400+ files
-
-**Unicode Emoji Detection:**
-```python
-EMOJI_PATTERN = re.compile(
- "["
- "\U0001F600-\U0001F64F" # Emoticons
- "\U0001F300-\U0001F5FF" # Symbols & pictographs
- "\U0001F680-\U0001F6FF" # Transport
- "\u2600-\u26FF" # Miscellaneous symbols
- "\u2700-\u27BF" # Dingbats
- # ... 10+ Unicode ranges total
- "]+"
-)
-```
-
-### Constitution Rules (R1-R6)
-
-**R1: Branch Protection**
-- No direct push to main/master
-- Enforce PR workflow
-- Severity: error (blocking)
-
-**R2: No Emojis**
-- Comprehensive Unicode detection
-- All file types checked
-- Severity: error (blocking)
-
-**R3: UI/API Coherence**
-- AST-based API analysis (views, serializers, urls)
-- UI service/test correlation
-- Gap detection (missing tests, missing services)
-- Severity: warning (non-blocking, alerts only)
-
-**R4: Database Router**
-- Validates Django db_router.py exists
-- PostgreSQL/MariaDB routing maintained
-- Severity: error (blocking)
-
-**R5: Tests Must Pass**
-- All tests executed before push
-- Zero tolerance for failing tests
-- Severity: error (blocking)
-
-**R6: DevContainer Compatibility**
-- Environment validation (Python 3.12, Node 18)
-- Service health checks (PostgreSQL, MariaDB)
-- Port availability (5432, 3306, 8000, 3000)
-- Severity: warning (alerts if issues)
-
-### Governance Compliance Report
-
-**Overall Compliance**: 95%
-
-**Metrics Tracked:**
-- Violations by rule (R1-R6)
-- Violations by developer
-- Violations by file type
-- Trend analysis (INCREASING/DECREASING/STABLE)
-- Weekly/monthly reports
-
-**Compliance Validation:**
-- Documented in `docs/devops/automatizacion/GOVERNANCE_COMPLIANCE.md`
-- Automated metrics collection
-- Dashboard data generation (future)
-
----
-
-## Breaking Changes
-
-**NONE** - This is an additive change that extends existing functionality:
-
-✅ Existing 40+ Bash scripts remain unchanged
-✅ New directory structure for Python agents
-✅ Backward compatible configuration files
-✅ Opt-in adoption through Git hooks
-✅ Documentation reorganization maintains all content (only renames/moves)
-✅ No API changes
-✅ No database schema changes
-
----
-
-## Migration Guide
-
-### For Developers
-
-**1. Update Local Repository**
-```bash
-git fetch origin
-git checkout claude/automation-docs-integration-011CV5YLxdEnu9YN3qpzGV2R
-git pull
-```
-
-**2. Review New Documentation Structure**
-- Check `docs/backend/` for backend-related docs (previously scattered)
-- Check `docs/devops/automatizacion/` for automation system docs
-- Check `.github/agents/` for GitHub Copilot agent definitions
-- Review root-level docs (`INDEX.md`, `CONTRIBUTING.md`, etc.)
-
-**3. Install Git Hooks (Optional)**
-```bash
-# Install automation Git hooks
-./scripts/install_hooks.sh
-
-# Hooks will validate:
-# - R1: No push to main/master
-# - R2: No emojis
-# - R3: UI/API coherence
-# - R4: Database router
-# - R5: Tests pass
-# - R6: DevContainer compatibility
-```
-
-**4. Configure GitHub Copilot (Optional)**
-- GitHub Copilot will automatically read `.github/copilot-instructions.md`
-- Use `@agent-name` to invoke specific agents
-- Example: `@api-agent help me create a new Django endpoint`
-
-### For Automation Testing
-
-**Validate Automation Agents:**
-```bash
-cd scripts/utils
-./validate_automation_agents.sh
-# Expected: 6/6 agents validated successfully
-```
-
-**Test Bash-Python Integration:**
-```bash
-cd scripts/utils
-./test_agent_integration.sh
-# Expected: 9/9 integration tests passed
-```
-
-**Run Agent Tests:**
-```bash
-pytest tests/ai/automation/ -v --cov=scripts/coding/ai/automation/
-# Expected: 252 tests passed, 75-90% coverage
-```
-
-**Validate Constitution Config:**
-```bash
-python3 -c "import yaml; yaml.safe_load(open('.constitucion.yaml'))"
-# Expected: No errors
-```
-
-### For Documentation
-
-**Find Renamed Files:**
-- Use `docs/AUDITORIA_NOMBRES_ARCHIVOS.md` for file rename mapping
-- All UPPERCASE files → snake_case
-- All ADR_YYYY_NNN → ADR-NNN
-
-**Navigation:**
-- Root-level docs: `INDEX.md`, `INDICE.md`
-- Backend docs: `docs/backend/`
-- DevOps docs: `docs/devops/`
-- AI docs: `docs/ai/`
-
----
-
-## Testing Instructions
-
-### Manual Testing
-
-**1. Validate All Agents Work:**
-```bash
-cd scripts/utils
-./validate_automation_agents.sh
-```
-
-Expected output:
-```
-Validating 6 automation agents...
-✓ SchemaValidatorAgent: OK
-✓ DevContainerValidatorAgent: OK
-✓ MetricsCollectorAgent: OK
-✓ CoherenceAnalyzerAgent: OK
-✓ ConstitutionValidatorAgent: OK
-✓ CIPipelineOrchestratorAgent: OK
-
-Result: 6/6 agents validated successfully
-```
-
-**2. Test Bash-Python Integration:**
-```bash
-cd scripts/utils
-./test_agent_integration.sh
-```
-
-Expected output:
-```
-Running 9 integration tests...
-✓ Test 1: JSON communication
-✓ Test 2: Exit codes
-✓ Test 3: Error handling
-✓ Test 4: Git hooks integration
-✓ Test 5: Config loading
-✓ Test 6: Validation workflow
-✓ Test 7: Reporting
-✓ Test 8: Metrics collection
-✓ Test 9: E2E workflow
-
-Result: 9/9 tests passed
-```
-
-**3. Validate Configuration:**
-```bash
-# Validate .constitucion.yaml
-python3 -c "import yaml; yaml.safe_load(open('.constitucion.yaml'))"
-
-# Validate against schema
-python3 scripts/coding/ai/automation/schema_validator_agent.py \
- --file .constitucion.yaml \
- --schema schemas/constitucion_schema.json
-```
-
-Expected: No errors
-
-### Automated Testing
-
-**Run All Agent Tests:**
-```bash
-pytest tests/ai/automation/ -v --cov=scripts/coding/ai/automation/
-```
-
-Expected output:
-```
-===== test session starts =====
-tests/ai/automation/test_schema_validator_agent.py ........... (23 passed)
-tests/ai/automation/test_devcontainer_validator_agent.py ........... (51 passed)
-tests/ai/automation/test_metrics_collector_agent.py ........... (25 passed)
-tests/ai/automation/test_coherence_analyzer_agent.py ........... (50 passed)
-tests/ai/automation/test_constitution_validator_agent.py ........... (46 passed)
-tests/ai/automation/test_ci_pipeline_orchestrator_agent.py ........... (57 passed)
-
-===== 252 passed in 45.23s =====
-
-Coverage:
- schema_validator_agent.py: 90%
- devcontainer_validator_agent.py: 76%
- metrics_collector_agent.py: 75%
- coherence_analyzer_agent.py: 85%
- constitution_validator_agent.py: 88%
- ci_pipeline_orchestrator_agent.py: 82%
-```
-
-**Run Specific Agent Tests:**
-```bash
-# Test only ConstitutionValidatorAgent
-pytest tests/ai/automation/test_constitution_validator_agent.py -v
-
-# Test only CoherenceAnalyzerAgent
-pytest tests/ai/automation/test_coherence_analyzer_agent.py -v
-```
-
----
-
-## Next Steps
-
-### Immediate (Post-Merge)
-
-1. **Team Review**
- - Review SDLC 6-phase documentation structure
- - Review Python agents architecture and directory structure
- - Review ADRs for architecture decisions
- - Validate configuration files (.constitucion.yaml, schemas/)
-
-2. **Documentation**
- - Update team wiki with new doc structure
- - Create onboarding guide for automation system
- - Document GitHub Copilot agent usage patterns
-
-3. **Testing**
- - Run full test suite in CI/CD
- - Validate on multiple developer machines
- - Performance testing for Python agents
-
-### Short-Term (1-2 weeks)
-
-1. **Automation Adoption**
- - Enable Git hooks for interested developers
- - Collect feedback on automation workflow
- - Tune validation rules based on false positives
-
-2. **GitHub Copilot Rollout**
- - Train team on agent usage
- - Create agent usage examples
- - Collect effectiveness metrics
-
-3. **Metrics Dashboard**
- - Implement visualization for metrics data
- - Create compliance dashboard
- - Setup automated reporting
-
-### Future Implementation (Planned)
-
-The AGENTES_ANALYSIS.md designed additional Python agent architecture:
-
-1. **Base + Utils** (500 lines)
- - BaseAgent abstract class
- - utils/logger.py, config_loader.py, git_helper.py
-
-2. **ConstitutionAgent** (1200 lines)
- - constitution/constitution_agent.py
- - 6 validators (R1-R6) in constitution/validators/
-
-3. **CILocalAgent** (1500 lines)
- - pipeline/ci_local_agent.py
- - 4 stage modules in pipeline/stages/
- - Job runner in pipeline/jobs/
-
-4. **Specialized Agents** (600 lines)
- - coherence/coherence_agent.py (enhanced)
- - devcontainer/devcontainer_agent.py (enhanced)
- - validation/validation_agent.py
-
-5. **CLI** (300 lines)
- - cli.py unified CLI for all agents
-
-**Note**: Directory structure already created, awaiting implementation.
-
-### Long-Term (3+ months)
-
-1. **Advanced Automation**
- - Auto-remediation for common violations
- - Predictive analytics for code quality
- - ML-based test generation
-
-2. **Enhanced Copilot Integration**
- - Custom Copilot skills
- - Team-specific agent fine-tuning
- - Workflow automation with agents
-
-3. **Platform Expansion**
- - Jenkins integration
- - GitHub Actions workflows
- - Slack/Teams notifications
-
----
-
-## Reviewers & Stakeholders
-
-### Recommended Review Focus
-
-**Tech Lead:**
-1. Architecture: Hybrid Bash/Python approach validity
-2. SDLC Compliance: 6-phase methodology completeness
-3. Production Readiness: .constitucion.yaml configuration
-
-**DevOps Team:**
-1. Directory Structure: Python agents organization
-2. Git Hooks: Integration approach
-3. CI/CD: Automation pipeline design
-
-**Backend Team:**
-1. Documentation Structure: Backend docs reorganization
-2. Requirements: Renamed requisitos structure
-3. ADRs: New automation ADRs
-
-**Frontend Team:**
-1. UI/API Coherence: R3 rule validation approach
-2. Documentation: Frontend docs location
-
-**QA Team:**
-1. Testing Strategy: TDD approach and coverage targets
-2. Test Results: 252 tests, 100% passing
-3. Integration Tests: Bash-Python integration validation
-
-**All Developers:**
-1. GitHub Copilot: Agent definitions and usage
-2. Documentation Navigation: New structure and naming conventions
-3. NO Emojis Policy: Enforcement and compliance
-
----
-
-## Related Issues & PRs
-
-**Closes/Related to:**
-- Automation system implementation initiative
-- Documentation reorganization project
-- GitHub Copilot integration planning
-
-**Previous Work:**
-- Branch: `claude/analyze-scripts-output-011CV5YLxdEnu9YN3qpzGV2R` (automation system)
-- Branch: `claude/complete-docs-generation-01PQVB5kB6yrSmSZ46fb65xd` (docs reorganization)
-
-**Future Work:**
-- Full Python agent implementation (scripts/coding/ai/)
-- Metrics dashboard development
-- Advanced automation features
-
----
-
-## Metadata
-
-**Branch**: `claude/automation-docs-integration-011CV5YLxdEnu9YN3qpzGV2R`
-
-**Commits**: 3 total
-- `9261894` - feat(automation): implement complete SDLC automation system with Python agents
-- `58ab1fa` - docs(agentes): complete Auto-CoT analysis for Python agents architecture
-- `7242011` - merge: integrate automation system with documentation reorganization
-
-**Methodology**: SDLC 6-Fases + TDD + Auto-CoT + Self-Consistency + Task Masivo Paralelo
-
-**Time Investment**: 15+ hours (documentation + architecture + implementation + validation + integration)
-
-**Lines Changed**:
-- Added: 15,000+ lines (code + docs)
-- Deleted: 2,000+ lines (renames, consolidation)
-- Net: +13,000 lines
-
-**Files Changed**: 400+ files
-- New: 300+ files
-- Renamed: 100+ files
-- Modified: 50+ files
-- Deleted: 10+ files
-
-**Test Coverage**:
-- Agent Tests: 252 tests, 100% passing
-- Integration Tests: 9 tests, 100% passing
-- Coverage: 75-90% per agent
-
-**Compliance**:
-- NO Emojis: ZERO violations
-- Governance: 95% compliant
-- Test Pass Rate: 100%
-
----
-
-## Contact
-
-**Questions or Issues?**
-
-- Tech Lead: Review `.constitucion.yaml` and `docs/devops/automatizacion/`
-- DevOps: Check `scripts/coding/ai/automation/` and validation scripts
-- Documentation: See `docs/AUDITORIA_NOMBRES_ARCHIVOS.md` for file mappings
-- GitHub Copilot: Review `.github/agents/` and `.github/copilot-instructions.md`
-
-**Documentation:**
-- Executive Overview: `docs/devops/automatizacion/README.md`
-- Use Cases: `docs/devops/automatizacion/USE_CASES.md`
-- Integration Guide: `docs/devops/automatizacion/INTEGRATION_GUIDE.md`
-- Governance: `docs/devops/automatizacion/GOVERNANCE_COMPLIANCE.md`
-
----
-
-**Ready for Review** ✓
diff --git a/README.md b/README.md
deleted file mode 100644
index 374be7fd..00000000
--- a/README.md
+++ /dev/null
@@ -1,273 +0,0 @@
-# Call Center Analytics
-
-Repositorio monolítico para la plataforma de analítica de centros de contacto (IACT) con Django 5, PostgreSQL y MariaDB.
-
-> **Nota sobre el estado del proyecto**: Actualmente en fase de consolidación documental y alineación de código con documentación. Algunas funcionalidades descritas están planificadas pero no implementadas. Consulta las secciones marcadas como "[IMPLEMENTADO]" vs "[PLANIFICADO]" para distinguir entre lo actual y lo futuro.
->
-> **Leyenda**: [IMPLEMENTADO] = Funciona actualmente | [PLANIFICADO] = Documentado pero pendiente | [ATENCION] = Requiere atención | [NO] = Prohibido
-
-> **Importante**: No existe un Makefile en la raíz; usa los scripts documentados para orquestar tareas.
-
-## Estado actual del repositorio
-
-### [IMPLEMENTADO] Implementado
-- **Documentación activa**: centralizada en [`docs/INDEX.md`](docs/INDEX.md)
-- **Scripts utilitarios**: en [`scripts/`](scripts/README.md) - validaciones, gates de CI y herramientas de soporte
-- **Infraestructura CPython**: builder completo en [`infrastructure/cpython/`](infrastructure/cpython/README.md)
-- **Registros temporales**: almacenados manualmente en [`logs_data/`](logs_data/README.md)
-- **Histórico**: contenido legado preservado en [`respaldo/`](respaldo/)
-
-### [PLANIFICADO] Planificado
-- Sistema automatizado de métricas DORA
-- Scripts de gestión de requisitos
-- Pipeline completo de SDLC con agentes IA
-- Automatización de deployment con GitHub Actions
-
-## Inicio rápido
-
-### Requisitos locales
-- Python 3.11+
-- [Vagrant](https://developer.hashicorp.com/vagrant/install) (para bases de datos)
-- VirtualBox 7+
-- Cliente PostgreSQL (`postgresql-client`)
-- Cliente MariaDB (`mariadb-client`)
-
-### Setup de entorno
-
-1. **Clonar repositorio**:
- ```bash
- git clone https://github.com/2-Coatl/IACT---project.git
- cd IACT---project
- ```
-
-2. **Crear entorno virtual Python**:
- ```bash
- python3.11 -m venv .venv
- source .venv/bin/activate
- pip install -r requirements.txt
- ```
-
-3. **[ATENCION] Levantar bases de datos** (requerido):
- ```bash
- vagrant up # Levanta PostgreSQL:15432 y MariaDB:13306
- ```
-
-4. **Verificar servicios** ([IMPLEMENTADO] Runbook + script):
- - Guía manual: [`docs/operaciones/verificar_servicios.md`](docs/operaciones/verificar_servicios.md)
- - Script automatizado: `./scripts/verificar_servicios.sh` (`--dry-run` disponible para CI)
-
-## Flujo de desarrollo
-
-### 1. Configurar variables de entorno
-
-Crea un archivo `.env` en la raíz con las credenciales de bases de datos:
-
-```bash
-# PostgreSQL (analytics)
-DB_ANALYTICS_HOST=127.0.0.1
-DB_ANALYTICS_PORT=15432
-DB_ANALYTICS_NAME=iact_analytics
-DB_ANALYTICS_USER=django_user
-DB_ANALYTICS_PASSWORD=django_pass
-
-# MariaDB (IVR read-only)
-DB_IVR_HOST=127.0.0.1
-DB_IVR_PORT=13306
-DB_IVR_NAME=ivr_data
-DB_IVR_USER=django_user
-DB_IVR_PASSWORD=django_pass
-```
-
-> **LLMs soportados**: Los agentes SDLC detectan automáticamente el mejor proveedor
-> disponible entre Claude (Anthropic), ChatGPT (OpenAI) y modelos fine-tuned vía
-> Hugging Face (TinyLlama, Phi-3, etc.). Consulta la documentación de AI en
-> [`docs/ai/`](docs/ai/) para más detalles sobre configuración de agentes y técnicas de prompting.
-
-### 2. Ejecutar migraciones
-
-```bash
-python manage.py migrate
-```
-
-### 3. Crear superusuario
-
-```bash
-python manage.py createsuperuser
-```
-
-### 4. Ejecutar tests
-
-[IMPLEMENTADO] **Implementado**:
-```bash
-# Tests completos
-./scripts/run_all_tests.sh
-
-# Solo backend
-pytest
-
-# Solo validaciones
-./scripts/validate_critical_restrictions.sh
-```
-
-[PLANIFICADO] **Planificado**: Suite completa con cobertura DORA metrics
-
-### 5. Desarrollo local
-
-[PLANIFICADO] **Pendiente**: El servidor de desarrollo Django aún no está configurado en este proyecto.
-
-**Alternativa temporal**: Consulta [`docs/gobernanza/procesos/procedimientos/procedimiento_desarrollo_local.md`](docs/gobernanza/procesos/procedimientos/procedimiento_desarrollo_local.md)
-
-## Infraestructura CPython
-Los scripts disponibles dentro de `infrastructure/cpython/scripts/` son:
-
-| Script | Descripción | Ejemplo |
-| --- | --- | --- |
-| `build_cpython.sh` | Compila CPython dentro de la VM o desde el host. | `./infrastructure/cpython/scripts/build_cpython.sh 3.12.6` |
-| `validate_build.sh` | Verifica la integridad del artefacto generado (`.tgz` + `.sha256`). | `./infrastructure/cpython/scripts/validate_build.sh cpython-3.12.6-ubuntu20.04-build1.tgz` |
-| `install_prebuilt_cpython.sh` | Instala un artefacto precompilado existente en un destino (`INSTALLPREFIX`). | `VERSION=3.12.6 INSTALLPREFIX=/opt/python ./infrastructure/cpython/scripts/install_prebuilt_cpython.sh` |
-
-Consulta [`docs/infraestructura/README.md`](docs/infraestructura/README.md) y [`docs/infraestructura/CHANGELOG-cpython.md`](docs/infraestructura/CHANGELOG-cpython.md) para conocer más detalles sobre estos flujos.
-
-## Calidad y contribución
-
-### Tests y validación ([IMPLEMENTADO] Parcialmente implementado)
-
-Ejecuta validaciones antes de abrir un PR:
-
-```bash
-# [IMPLEMENTADO] Tests unitarios disponibles
-pytest -c docs/pytest.ini docs/testing
-
-# [IMPLEMENTADO] Validaciones de shell y gates en cascada
-./scripts/run_all_tests.sh --skip-frontend --skip-security
-
-# [IMPLEMENTADO] Validaciones de restricciones críticas (RNF-002: NO Redis)
-./scripts/validate_critical_restrictions.sh
-```
-
-### Métricas de calidad ([PLANIFICADO] Automatización pendiente)
-
-**Targets del proyecto**:
-- Cobertura de código: >= 80%
-- Test Pyramid: 60% unit / 30% integration / 10% E2E
-- Complejidad ciclomática: <= 10
-- MTTR para bugs críticos: <= 2 días
-
-**Estado actual**: Las métricas se generan con [`scripts/dora_metrics.py`](scripts/dora_metrics.py) (baseline local). Ver [`logs_data/SCHEMA.md`](logs_data/SCHEMA.md)
-
-### Workflow de commits
-
-1. **Sigue TDD**: Red → Green → Refactor
-2. **Conventional Commits**: `feat:`, `fix:`, `docs:`, `refactor:`, etc.
-3. **Evita `--no-verify`**: Si un hook falla, corrígelo en lugar de saltearlo
-4. **Coverage mínimo**: 80% en módulos Python modificados
-
-### Gestión de issues y agentes
-
-- Plantillas disponibles en `.github/ISSUE_TEMPLATE/` guían la información mínima para bugs, features y solicitudes asistidas.
-- Las solicitudes de feature exigen un ExecPlan conforme a `.agent/PLANS.md` y enlazan el documento vivo correspondiente.
-- Para coordinar automatizaciones revisa `.agent/agents/README.md` y selecciona el agente adecuado (GitOps, Release, Security, etc.).
-
-### Guías y estándares ([IMPLEMENTADO] Documentadas)
-
-- **[Guía de Estilo](docs/gobernanza/GUIA_ESTILO.md)** - Convenciones obligatorias (NO emojis, Conventional Commits)
-- **[Procedimiento de Desarrollo Local](docs/operaciones/procedimiento_desarrollo_local.md)** - Setup detallado
-- **[Estrategia de QA](docs/gobernanza/qa/)** - Testing strategy
-- **[Documentación de AI/Agentes](docs/ai/)** - Sistema de agentes y técnicas de prompting
-
-## Arquitectura y Stack
-
-### Stack técnico ([IMPLEMENTADO] Implementado)
-- **Backend**: Django 5.1, Python 3.11+
-- **Bases de datos**:
- - PostgreSQL 16 (analytics, sessions, metrics)
- - MariaDB 10.11 (IVR read-only)
- - [PLANIFICADO] Cassandra (logs - planificado)
-- **Frontend**: [PLANIFICADO] React + Redux Toolkit (planificado)
-- **Infrastructure**: Vagrant, VirtualBox, CPython builder
-
-### Restricciones arquitectónicas críticas ([IMPLEMENTADO] Validadas)
-
-[ATENCION] **RNF-002**: Sesiones DEBEN estar en base de datos
-```python
-# PROHIBIDO
-SESSION_ENGINE = 'django.contrib.sessions.backends.cache' # Redis/Memcached
-
-# OBLIGATORIO
-SESSION_ENGINE = 'django.contrib.sessions.backends.db' # PostgreSQL
-```
-
-Otras restricciones:
-- [NO] NO Redis, Memcached, RabbitMQ, Celery
-- [NO] NO MongoDB, Elasticsearch
-- [NO] NO Emojis en código/docs
-- [IMPLEMENTADO] Scripts primero, CI/CD después
-
-Ver: [`docs/gobernanza/estilos/GUIA_ESTILO.md`](docs/gobernanza/estilos/GUIA_ESTILO.md)
-
-### Documentación de arquitectura ([IMPLEMENTADO] Disponible)
-- ADRs: [`docs/gobernanza/adr/`](docs/gobernanza/adr/)
-- Lineamientos: [`docs/gobernanza/`](docs/gobernanza/)
-- Patrones Backend: [`docs/backend/arquitectura/`](docs/backend/arquitectura/)
-- Patrones Frontend: [`docs/frontend/arquitectura/`](docs/frontend/arquitectura/)
-
-## Navegación por rol
-
-### Desarrollador Backend
-- [`docs/backend/`](docs/backend/) - Arquitectura, diseño y requisitos
-- [`docs/backend/arquitectura/`](docs/backend/arquitectura/) - Patrones de diseño
-- [Guía de Backend](docs/backend/README.md)
-
-### Desarrollador Frontend
-- [`docs/frontend/`](docs/frontend/) - Arquitectura y componentes
-- [PLANIFICADO] UI en `ui/` (React) - En construcción
-
-### QA / Testing
-- [`docs/gobernanza/qa/`](docs/gobernanza/qa/) - Estrategia y checklists
-- [`scripts/run_all_tests.sh`](scripts/run_all_tests.sh) - Test runner
-- [`docs/backend/testing/`](docs/backend/testing/) - Tests de backend
-- [`docs/frontend/testing/`](docs/frontend/testing/) - Tests de frontend
-
-### DevOps / SRE
-- [`docs/operaciones/`](docs/operaciones/) - Runbooks operacionales
-- [`infrastructure/cpython/`](infrastructure/cpython/) - Builder CPython
-- [`scripts/`](scripts/) - Scripts de automatización
-- [`docs/devops/`](docs/devops/) - CI/CD y automatización
-- [`docs/dora/`](docs/dora/) - DORA metrics
-
-### Arquitecto
-- [`docs/gobernanza/adr/`](docs/gobernanza/adr/) - Architecture Decision Records
-- [`docs/gobernanza/`](docs/gobernanza/) - Procesos y estándares
-- [`docs/infraestructura/`](docs/infraestructura/) - Arquitectura de infraestructura
-
-### Product Owner / BA
-- [`docs/backend/requisitos/`](docs/backend/requisitos/) - Requirements tracking
-- [`docs/gobernanza/`](docs/gobernanza/) - Visión y alcance
-
-## Proyecto y planificación
-
-### Tracking activo ([IMPLEMENTADO] Disponible)
-- **Roadmap**: [`docs/gobernanza/ROADMAP.md`](docs/gobernanza/ROADMAP.md) - Visión del proyecto
-- **Tareas activas**: [`docs/gobernanza/TAREAS_ACTIVAS.md`](docs/gobernanza/TAREAS_ACTIVAS.md) - Estado actual
-- **Changelog**: [`docs/gobernanza/CHANGELOG.md`](docs/gobernanza/CHANGELOG.md) - Historial de cambios
-
-### Revisión actual
-- Análisis de integración: [`INTEGRATION_ANALYSIS_REPORT.md`](INTEGRATION_ANALYSIS_REPORT.md)
-
-## Estructura de carpetas relevante
-| Carpeta | Propósito |
-| --- | --- |
-| `docs/` | Documentación vigente, análisis y guías (ver índice consolidado). |
-| `scripts/` | Scripts de validación, CI y utilidades operativas. |
-| `infrastructure/` | Artefactos y herramientas de soporte (ej. builder de CPython). |
-| `logs_data/` | JSON temporales y reportes generados manualmente. |
-| `respaldo/` | Documentación histórica etiquetada como legado. |
-
-## Recursos adicionales
-- [Índice general de documentación](docs/INDEX.md)
-- [Guía de contribución](docs/CONTRIBUTING.md)
-- [Setup inicial](docs/SETUP.md)
-- [Onboarding](docs/ONBOARDING.md)
-- [Guía de estilo](docs/gobernanza/GUIA_ESTILO.md)
-
-Para dudas específicas consulta el directorio correspondiente en `docs/` o registra la pregunta en el backlog del proyecto.
diff --git a/api/callcentersite/callcentersite/apps/dashboard/services.py b/api/callcentersite/callcentersite/apps/dashboard/services.py
index 559e0494..d86d27d7 100644
--- a/api/callcentersite/callcentersite/apps/dashboard/services.py
+++ b/api/callcentersite/callcentersite/apps/dashboard/services.py
@@ -8,7 +8,8 @@
from io import BytesIO, StringIO
from django.contrib.auth import get_user_model
-from django.core.exceptions import PermissionDenied, ValidationError
+from django.core.exceptions import ObjectDoesNotExist, PermissionDenied, ValidationError
+from django.db import connection
from django.utils import timezone
from callcentersite.apps.users.models_permisos_granular import AuditoriaPermiso
@@ -51,7 +52,7 @@ def overview() -> Dict[str, object]:
return {
"last_update": now.isoformat(),
"widgets": [
- widget.__dict__ for widget in DashboardService.available_widgets()
+ asdict(widget) for widget in DashboardService.available_widgets()
],
}
@@ -60,6 +61,12 @@ def available_widgets() -> List[Widget]:
"""Retorna todos los widgets disponibles en el sistema."""
return list(WIDGET_REGISTRY.values())
+ @staticmethod
+ def _serialize_widgets(widget_keys: List[str]) -> List[Dict[str, str]]:
+ """Convierte identificadores de widgets a diccionarios serializables."""
+
+ return [asdict(WIDGET_REGISTRY[widget]) for widget in widget_keys if widget in WIDGET_REGISTRY]
+
@staticmethod
def exportar(
usuario_id: int,
diff --git a/api/callcentersite/callcentersite/urls.py b/api/callcentersite/callcentersite/urls.py
index b0274f0b..e2e7f7c9 100644
--- a/api/callcentersite/callcentersite/urls.py
+++ b/api/callcentersite/callcentersite/urls.py
@@ -32,7 +32,6 @@ def health_check(_request):
path("api/v1/etl/", include("callcentersite.apps.etl.urls")),
path("api/v1/permissions/", include("callcentersite.apps.permissions.urls")),
path("api/v1/llamadas/", include("callcentersite.apps.llamadas.urls")),
- path("api/v1/", include("callcentersite.apps.users.urls")),
- path("api/dora/", include("dora_metrics.urls")),
+ path("api/v1/dora/", include("dora_metrics.urls")),
path("health/", health_check, name="health"),
]
diff --git a/api/callcentersite/dora_metrics/data_catalog.py b/api/callcentersite/dora_metrics/data_catalog.py
index 62b94cf6..02134610 100644
--- a/api/callcentersite/dora_metrics/data_catalog.py
+++ b/api/callcentersite/dora_metrics/data_catalog.py
@@ -104,7 +104,7 @@ def get_dora_metrics_dataset() -> Dict[str, Any]:
}
]
},
- "api_endpoint": "/api/dora/data-catalog/dora-metrics/",
+ "api_endpoint": "/api/v1/dora/data-catalog/dora-metrics/",
"query_parameters": [
{
"name": "days",
@@ -130,11 +130,11 @@ def get_dora_metrics_dataset() -> Dict[str, Any]:
"example_queries": [
{
"description": "Get last 7 days of deployment metrics",
- "url": "/api/dora/data-catalog/dora-metrics/?days=7&phase_name=deployment"
+ "url": "/api/v1/dora/data-catalog/dora-metrics/?days=7&phase_name=deployment"
},
{
"description": "Get all metrics for specific feature",
- "url": "/api/dora/data-catalog/dora-metrics/?feature_id=FEAT-123"
+ "url": "/api/v1/dora/data-catalog/dora-metrics/?feature_id=FEAT-123"
}
]
}
@@ -187,7 +187,7 @@ def get_deployment_cycles_dataset() -> Dict[str, Any]:
}
]
},
- "api_endpoint": "/api/dora/data-catalog/deployment-cycles/",
+ "api_endpoint": "/api/v1/dora/data-catalog/deployment-cycles/",
"query_parameters": [
{
"name": "days",
@@ -238,7 +238,7 @@ def get_performance_metrics_dataset() -> Dict[str, Any]:
}
]
},
- "api_endpoint": "/api/dora/data-catalog/performance-metrics/",
+ "api_endpoint": "/api/v1/dora/data-catalog/performance-metrics/",
"query_parameters": [
{
"name": "metric_name",
@@ -298,7 +298,7 @@ def get_quality_metrics_dataset() -> Dict[str, Any]:
}
]
},
- "api_endpoint": "/api/dora/data-catalog/quality-metrics/",
+ "api_endpoint": "/api/v1/dora/data-catalog/quality-metrics/",
"query_parameters": [
{
"name": "dataset_name",
diff --git a/api/callcentersite/dora_metrics/data_ecosystem.py b/api/callcentersite/dora_metrics/data_ecosystem.py
index b6f497ff..d6d27189 100644
--- a/api/callcentersite/dora_metrics/data_ecosystem.py
+++ b/api/callcentersite/dora_metrics/data_ecosystem.py
@@ -303,8 +303,8 @@ def get_lineage_map() -> Dict[str, Any]:
"stage": "access",
"component": "REST APIs",
"endpoints": [
- "/api/dora/metrics/",
- "/api/dora/data-catalog/dora-metrics/"
+ "/api/v1/dora/metrics/",
+ "/api/v1/dora/data-catalog/dora-metrics/"
]
}
]
diff --git a/api/callcentersite/dora_metrics/templates/dora_metrics/dashboard.html b/api/callcentersite/dora_metrics/templates/dora_metrics/dashboard.html
index 395e14a8..628492f1 100644
--- a/api/callcentersite/dora_metrics/templates/dora_metrics/dashboard.html
+++ b/api/callcentersite/dora_metrics/templates/dora_metrics/dashboard.html
@@ -325,7 +325,7 @@
Mean Time to Recovery
};
// Deployment Frequency Chart
- fetch(`/api/dora/charts/deployment-frequency/?days=${days}`)
+ fetch(`/api/v1/dora/charts/deployment-frequency/?days=${days}`)
.then(response => response.json())
.then(data => {
new Chart(document.getElementById('deploymentFrequencyChart'), {
@@ -345,7 +345,7 @@ Mean Time to Recovery
});
// Lead Time Trends Chart
- fetch(`/api/dora/charts/lead-time-trends/?days=${days}`)
+ fetch(`/api/v1/dora/charts/lead-time-trends/?days=${days}`)
.then(response => response.json())
.then(data => {
new Chart(document.getElementById('leadTimeTrendsChart'), {
@@ -366,7 +366,7 @@ Mean Time to Recovery
});
// Change Failure Rate Chart
- fetch(`/api/dora/charts/change-failure-rate/?days=${days}`)
+ fetch(`/api/v1/dora/charts/change-failure-rate/?days=${days}`)
.then(response => response.json())
.then(data => {
new Chart(document.getElementById('changeFailureRateChart'), {
@@ -400,7 +400,7 @@ Mean Time to Recovery
});
// MTTR Chart
- fetch(`/api/dora/charts/mttr/?days=${days}`)
+ fetch(`/api/v1/dora/charts/mttr/?days=${days}`)
.then(response => response.json())
.then(data => {
new Chart(document.getElementById('mttrChart'), {
diff --git a/api/callcentersite/dora_metrics/views.py b/api/callcentersite/dora_metrics/views.py
index 06eeb4f3..47cac228 100644
--- a/api/callcentersite/dora_metrics/views.py
+++ b/api/callcentersite/dora_metrics/views.py
@@ -36,7 +36,7 @@
@require_http_methods(["GET"])
@throttle_classes([BurstRateThrottle, SustainedRateThrottle])
def dora_metrics_summary(request):
- """GET /api/dora/metrics - Summary ultimos 30 dias."""
+ """GET /api/v1/dora/metrics - Summary ultimos 30 dias."""
days = int(request.GET.get("days", 30))
cutoff = timezone.now() - timedelta(days=days)
@@ -71,7 +71,7 @@ def dora_metrics_summary(request):
@require_http_methods(["POST"])
def dora_metrics_create(request):
- """POST /api/dora/metrics - Crear metrica."""
+ """POST /api/v1/dora/metrics - Crear metrica."""
data = json.loads(request.body)
metric = DORAMetric.objects.create(
@@ -339,7 +339,7 @@ def calculate_dora_classification(deployment_count, days, lead_time_hours, cfr,
@require_http_methods(["GET"])
def data_catalog_index(request):
"""
- GET /api/dora/data-catalog/ - Complete data catalog.
+ GET /api/v1/dora/data-catalog/ - Complete data catalog.
Returns structured metadata about all available datasets for AI access.
Implements DORA 2025 AI Capability 6: AI-accessible Internal Data.
@@ -352,7 +352,7 @@ def data_catalog_index(request):
@throttle_classes([BurstRateThrottle, SustainedRateThrottle])
def data_catalog_dora_metrics(request):
"""
- GET /api/dora/data-catalog/dora-metrics/ - Query DORA metrics data.
+ GET /api/v1/dora/data-catalog/dora-metrics/ - Query DORA metrics data.
Query parameters:
- days: Number of days to query (default: 30)
@@ -378,7 +378,7 @@ def data_catalog_dora_metrics(request):
@throttle_classes([BurstRateThrottle, SustainedRateThrottle])
def data_catalog_deployment_cycles(request):
"""
- GET /api/dora/data-catalog/deployment-cycles/ - Query deployment cycles.
+ GET /api/v1/dora/data-catalog/deployment-cycles/ - Query deployment cycles.
Query parameters:
- days: Number of days to query (default: 30)
@@ -401,7 +401,7 @@ def data_catalog_deployment_cycles(request):
@throttle_classes([BurstRateThrottle, SustainedRateThrottle])
def data_catalog_aggregated_stats(request):
"""
- GET /api/dora/data-catalog/aggregated-stats/ - Get aggregated statistics.
+ GET /api/v1/dora/data-catalog/aggregated-stats/ - Get aggregated statistics.
Query parameters:
- days: Number of days to analyze (default: 30)
@@ -423,7 +423,7 @@ def data_catalog_aggregated_stats(request):
@require_http_methods(["GET"])
def data_quality_assessment(request):
"""
- GET /api/dora/ecosystem/quality/ - Data quality assessment.
+ GET /api/v1/dora/ecosystem/quality/ - Data quality assessment.
Query parameters:
- days: Number of days to assess (default: 30)
@@ -441,7 +441,7 @@ def data_quality_assessment(request):
@require_http_methods(["GET"])
def data_governance_status(request):
"""
- GET /api/dora/ecosystem/governance/ - Data governance status.
+ GET /api/v1/dora/ecosystem/governance/ - Data governance status.
Returns current data governance policies and compliance status.
"""
@@ -453,7 +453,7 @@ def data_governance_status(request):
@require_http_methods(["GET"])
def data_lineage_map(request):
"""
- GET /api/dora/ecosystem/lineage/ - Data lineage map.
+ GET /api/v1/dora/ecosystem/lineage/ - Data lineage map.
Returns complete data lineage and flow information.
"""
@@ -465,7 +465,7 @@ def data_lineage_map(request):
@require_http_methods(["GET"])
def ecosystem_health_status(request):
"""
- GET /api/dora/ecosystem/health/ - Overall ecosystem health.
+ GET /api/v1/dora/ecosystem/health/ - Overall ecosystem health.
Returns comprehensive ecosystem health status including:
- Overall health score
@@ -481,7 +481,7 @@ def ecosystem_health_status(request):
@require_http_methods(["GET"])
def metadata_registry(request):
"""
- GET /api/dora/ecosystem/metadata/ - Metadata registry.
+ GET /api/v1/dora/ecosystem/metadata/ - Metadata registry.
Returns complete metadata registry for all datasets.
"""
@@ -499,7 +499,7 @@ def metadata_registry(request):
@throttle_classes([BurstRateThrottle, SustainedRateThrottle])
def trend_analysis_deployment_frequency(request):
"""
- GET /api/dora/analytics/trends/deployment-frequency/ - Deployment frequency trend.
+ GET /api/v1/dora/analytics/trends/deployment-frequency/ - Deployment frequency trend.
Query parameters:
- days: Number of days to analyze (default: 90)
@@ -517,7 +517,7 @@ def trend_analysis_deployment_frequency(request):
@throttle_classes([BurstRateThrottle, SustainedRateThrottle])
def trend_analysis_lead_time(request):
"""
- GET /api/dora/analytics/trends/lead-time/ - Lead time trend analysis.
+ GET /api/v1/dora/analytics/trends/lead-time/ - Lead time trend analysis.
Query parameters:
- days: Number of days to analyze (default: 90)
@@ -535,7 +535,7 @@ def trend_analysis_lead_time(request):
@throttle_classes([BurstRateThrottle, SustainedRateThrottle])
def comparative_period_over_period(request):
"""
- GET /api/dora/analytics/comparative/period-over-period/ - Period comparison.
+ GET /api/v1/dora/analytics/comparative/period-over-period/ - Period comparison.
Query parameters:
- current_days: Current period days (default: 30)
@@ -558,7 +558,7 @@ def comparative_period_over_period(request):
@throttle_classes([BurstRateThrottle, SustainedRateThrottle])
def historical_monthly_report(request):
"""
- GET /api/dora/analytics/historical/monthly/ - Monthly historical report.
+ GET /api/v1/dora/analytics/historical/monthly/ - Monthly historical report.
Query parameters:
- months: Number of months to include (default: 6)
@@ -576,7 +576,7 @@ def historical_monthly_report(request):
@throttle_classes([BurstRateThrottle, SustainedRateThrottle])
def anomaly_detection(request):
"""
- GET /api/dora/analytics/anomalies/ - Detect anomalies.
+ GET /api/v1/dora/analytics/anomalies/ - Detect anomalies.
Query parameters:
- days: Number of days to analyze (default: 30)
@@ -594,7 +594,7 @@ def anomaly_detection(request):
@throttle_classes([BurstRateThrottle, SustainedRateThrottle])
def performance_forecast(request):
"""
- GET /api/dora/analytics/forecast/ - Performance forecast.
+ GET /api/v1/dora/analytics/forecast/ - Performance forecast.
Query parameters:
- historical_months: Months of historical data to use (default: 6)
@@ -619,7 +619,7 @@ def performance_forecast(request):
@throttle_classes([BurstRateThrottle, SustainedRateThrottle])
def ai_telemetry_record(request):
"""
- POST /api/dora/ai-telemetry/record/ - Registrar decision IA.
+ POST /api/v1/dora/ai-telemetry/record/ - Registrar decision IA.
Body:
{
@@ -658,7 +658,7 @@ def ai_telemetry_record(request):
@throttle_classes([BurstRateThrottle, SustainedRateThrottle])
def ai_telemetry_feedback(request, telemetry_id):
"""
- POST /api/dora/ai-telemetry//feedback/ - Registrar feedback humano.
+ POST /api/v1/dora/ai-telemetry//feedback/ - Registrar feedback humano.
Body:
{
@@ -685,7 +685,7 @@ def ai_telemetry_feedback(request, telemetry_id):
@throttle_classes([BurstRateThrottle, SustainedRateThrottle])
def ai_telemetry_stats(request):
"""
- GET /api/dora/ai-telemetry/stats/ - Estadisticas generales.
+ GET /api/v1/dora/ai-telemetry/stats/ - Estadisticas generales.
Query parameters:
- days: Number of days to analyze (default: 30)
@@ -710,7 +710,7 @@ def ai_telemetry_stats(request):
@throttle_classes([BurstRateThrottle, SustainedRateThrottle])
def ai_telemetry_agent_stats(request, agent_id):
"""
- GET /api/dora/ai-telemetry/agent// - Stats por agente.
+ GET /api/v1/dora/ai-telemetry/agent// - Stats por agente.
Query parameters:
- days: Number of days to analyze (default: 30)
@@ -726,7 +726,7 @@ def ai_telemetry_agent_stats(request, agent_id):
@throttle_classes([BurstRateThrottle, SustainedRateThrottle])
def ai_telemetry_accuracy(request):
"""
- GET /api/dora/ai-telemetry/accuracy/ - Metricas accuracy.
+ GET /api/v1/dora/ai-telemetry/accuracy/ - Metricas accuracy.
Query parameters:
- agent_id: Filter by agent (optional)
@@ -774,7 +774,7 @@ def ai_telemetry_accuracy(request):
@throttle_classes([BurstRateThrottle, SustainedRateThrottle])
def predict_deployment_risk(request):
"""
- POST /api/dora/predict/deployment-risk/ - Predecir riesgo de deployment.
+ POST /api/v1/dora/predict/deployment-risk/ - Predecir riesgo de deployment.
Body:
{
@@ -828,7 +828,7 @@ def predict_deployment_risk(request):
@throttle_classes([BurstRateThrottle, SustainedRateThrottle])
def predict_model_stats(request):
"""
- GET /api/dora/predict/model-stats/ - Estadisticas del modelo ML.
+ GET /api/v1/dora/predict/model-stats/ - Estadisticas del modelo ML.
"""
predictor = DeploymentRiskPredictor()
@@ -853,7 +853,7 @@ def predict_model_stats(request):
@throttle_classes([BurstRateThrottle, SustainedRateThrottle])
def predict_retrain_model(request):
"""
- POST /api/dora/predict/retrain/ - Re-entrenar modelo ML.
+ POST /api/v1/dora/predict/retrain/ - Re-entrenar modelo ML.
Body:
{
@@ -898,7 +898,7 @@ def predict_retrain_model(request):
@throttle_classes([BurstRateThrottle, SustainedRateThrottle])
def predict_feature_importance(request):
"""
- GET /api/dora/predict/feature-importance/ - Feature importance del modelo.
+ GET /api/v1/dora/predict/feature-importance/ - Feature importance del modelo.
"""
predictor = DeploymentRiskPredictor()
@@ -940,7 +940,7 @@ def predict_feature_importance(request):
@require_http_methods(["GET"])
@throttle_classes([BurstRateThrottle, SustainedRateThrottle])
def remediation_problems(request):
- """GET /api/dora/remediation/problems/ - Lista problemas detectados."""
+ """GET /api/v1/dora/remediation/problems/ - Lista problemas detectados."""
problems = ProblemDetector.detect_all_problems()
return JsonResponse({"total_problems": len(problems), "problems": [p.to_dict() for p in problems]})
@@ -948,7 +948,7 @@ def remediation_problems(request):
@require_http_methods(["POST"])
@throttle_classes([BurstRateThrottle, SustainedRateThrottle])
def remediation_propose_fix(request):
- """POST /api/dora/remediation/propose-fix/ - Proponer fix para problema."""
+ """POST /api/v1/dora/remediation/propose-fix/ - Proponer fix para problema."""
data = json.loads(request.body)
problems = ProblemDetector.detect_all_problems()
problem = next((p for p in problems if p.problem_type == data.get("problem_type")), None)
@@ -961,7 +961,7 @@ def remediation_propose_fix(request):
@require_http_methods(["POST"])
@throttle_classes([BurstRateThrottle, SustainedRateThrottle])
def remediation_execute(request):
- """POST /api/dora/remediation/execute/ - Ejecutar fix."""
+ """POST /api/v1/dora/remediation/execute/ - Ejecutar fix."""
data = json.loads(request.body)
problems = ProblemDetector.detect_all_problems()
problem = next((p for p in problems if p.problem_type == data.get("problem_type")), None)
@@ -976,6 +976,6 @@ def remediation_execute(request):
@require_http_methods(["POST"])
@throttle_classes([BurstRateThrottle, SustainedRateThrottle])
def remediation_rollback(request, execution_id):
- """POST /api/dora/remediation/rollback// - Rollback fix."""
+ """POST /api/v1/dora/remediation/rollback// - Rollback fix."""
result = RemediationEngine.rollback_fix(execution_id)
return JsonResponse(result)
diff --git a/api/callcentersite/tests/integration/test_dora_versioning.py b/api/callcentersite/tests/integration/test_dora_versioning.py
new file mode 100644
index 00000000..23c021bf
--- /dev/null
+++ b/api/callcentersite/tests/integration/test_dora_versioning.py
@@ -0,0 +1,17 @@
+"""Routing tests for DORA API versioning."""
+
+from django.test import SimpleTestCase
+from django.urls import Resolver404, resolve
+
+
+class DoraVersioningRoutingTest(SimpleTestCase):
+ """Validate DORA endpoints follow documented versioned scheme."""
+
+ def test_versioned_endpoint_is_available(self):
+ match = resolve("/api/v1/dora/metrics/")
+
+ self.assertEqual(match.func.__name__, "dora_metrics_summary")
+
+ def test_unversioned_endpoint_is_not_routed(self):
+ with self.assertRaises(Resolver404):
+ resolve("/api/dora/metrics/")
diff --git a/docs/frontend/arquitectura/README.md b/docs/frontend/arquitectura/README.md
index 6e30e498..605643f4 100644
--- a/docs/frontend/arquitectura/README.md
+++ b/docs/frontend/arquitectura/README.md
@@ -18,6 +18,7 @@ Decisiones arquitectónicas, patrones de diseño y lineamientos técnicos espec
- [`lineamientos_codigo.md`](lineamientos_codigo.md) - Guías de desarrollo frontend
- [`estrategia_integracion_backend.md`](estrategia_integracion_backend.md) - Plan de integración y fallback frente a brechas del backend
- [`TODO.md`](TODO.md) - Backlog arquitectónico para absorber brechas del backend
+- [`validacion_callcentersite_api.md`](validacion_callcentersite_api.md) - Validación del backend real frente a los lineamientos y mocks del frontend
- [`ejemplos-ui-design.md`](ejemplos-ui-design.md) - Prompt engineering avanzado para diseño de UI
- [`shared_webpack_configs.md`](shared_webpack_configs.md) - Guía de configuraciones Webpack compartidas para microfrontends single-spa
- [`microfrontends_canvas.md`](microfrontends_canvas.md) - Canvas de decisión para seleccionar arquitecturas de microfrontends
diff --git a/docs/frontend/arquitectura/TODO.md b/docs/frontend/arquitectura/TODO.md
index 616b796e..ab0d4edd 100644
--- a/docs/frontend/arquitectura/TODO.md
+++ b/docs/frontend/arquitectura/TODO.md
@@ -62,12 +62,12 @@ Lista priorizada de trabajos arquitectónicos necesarios para que la UI absorba
### P2 · Transición hacia backend real
-- [ ] **Health check y monitoreo** `3SP`
+- [x] **Health check y monitoreo** `3SP`
- Consumir `/health` (cuando exista) y almacenar último estado en contexto global.
- Generar pruebas de integración que simulen backend intermitente.
- Referencia: roadmap de fases de transición.【F:docs/frontend/arquitectura/estrategia_integracion_backend.md†L110-L118】
-- [ ] **Métricas de dependencia de mocks** `3SP`
+- [x] **Métricas de dependencia de mocks** `3SP`
- Registrar en analytics internas cuántos módulos operan en modo mock.
- Alimentar reportes para priorizar endpoints backend.
- Referencia: acciones priorizadas sobre métricas de mocks.【F:docs/frontend/arquitectura/estrategia_integracion_backend.md†L132-L135】
diff --git a/docs/frontend/arquitectura/validacion_callcentersite_api.md b/docs/frontend/arquitectura/validacion_callcentersite_api.md
new file mode 100644
index 00000000..f6e90119
--- /dev/null
+++ b/docs/frontend/arquitectura/validacion_callcentersite_api.md
@@ -0,0 +1,52 @@
+---
+id: DOC-FRONTEND-API-VALIDATION
+estado: activo
+propietario: equipo-frontend
+ultima_actualizacion: 2025-11-13
+relacionados: ["DOC-FRONTEND-BFF-STRATEGY", "DOC-FRONTEND-ARQ-TODO", "DOC-FRONTEND-INDEX"]
+---
+
+# Validación de `api/callcentersite` frente a lineamientos de frontend
+
+## 1. Fuentes revisadas
+- **Lineamientos y expectativas**: `analisis_api_frontend.md` (inventario de endpoints y casos de uso esperados), `estrategia_integracion_backend.md` (dependencias de mocks y ausencia de endpoints), `TODO.md` (acciones prioritarias).【F:docs/frontend/arquitectura/analisis_api_frontend.md†L35-L102】【F:docs/frontend/arquitectura/estrategia_integracion_backend.md†L15-L118】【F:docs/frontend/arquitectura/TODO.md†L11-L97】
+- **Código backend**: árbol de rutas principal y configuración de datos de `api/callcentersite`.【F:api/callcentersite/callcentersite/urls.py†L15-L38】【F:api/callcentersite/callcentersite/settings/base.py†L97-L136】
+- **Configuración de entorno**: plantilla `.env` que define dependencias de base de datos y variables de despliegue.【F:api/callcentersite/env.example†L4-L116】
+
+## 2. Inventario real de APIs expuestas
+El `urls.py` principal publica más dominios de los mencionados en la documentación actual. Endpoints detectados:
+
+| Ruta base | Estado observado |
+| --- | --- |
+| `/api/schema/`, `/api/docs/` | Documentación OpenAPI y Swagger generadas por DRF Spectacular (no referenciadas en docs de frontend).【F:api/callcentersite/callcentersite/urls.py†L17-L22】 |
+| `/api/v1/usuarios/` | CRUD de usuarios + registro público mediante `UserViewSet` y `UserRegistrationView`. Requiere base de datos real para operar.【F:api/callcentersite/callcentersite/urls.py†L23-L24】【F:api/callcentersite/callcentersite/apps/users/urls.py†L8-L17】 |
+| `/api/v1/permissions/` | Módulo granular de permisos (coincide con lo esperado por frontend).【F:api/callcentersite/callcentersite/urls.py†L33-L34】 |
+| `/api/v1/dashboard/` | Widgets con valores `0` por defecto; confirma ausencia de datos operativos reales.【F:api/callcentersite/callcentersite/urls.py†L25-L26】【F:api/callcentersite/callcentersite/apps/dashboard/widgets.py†L13-L18】 |
+| `/api/v1/configuracion/` | Dos módulos: `configuration` (restaurar/importar/exportar) y `configuracion` legacy. Ninguno está documentado en los lineamientos vigentes.【F:api/callcentersite/callcentersite/urls.py†L24-L27】【F:api/callcentersite/callcentersite/apps/configuration/urls.py†L13-L18】 |
+| `/api/v1/presupuestos/`, `/api/v1/politicas/`, `/api/v1/excepciones/` | CRUD con flujos de aprobación/versionado declarados en los lineamientos, pero no señalados como operativos en la estrategia actual.【F:api/callcentersite/callcentersite/urls.py†L27-L29】 |
+| `/api/v1/reportes/` | Rutas presentes para reportes IVR; la documentación de frontend asume ausencia de endpoints reales.【F:api/callcentersite/callcentersite/urls.py†L29-L30】 |
+| `/api/v1/notifications/` | API de mensajes internos (`messages`, `mark_read`, `unread`). No aparece en la estrategia de mocks actual.【F:api/callcentersite/callcentersite/urls.py†L30-L31】【F:api/callcentersite/callcentersite/apps/notifications/urls.py†L9-L13】 |
+| `/api/v1/etl/` | Consola de jobs y errores ETL; tampoco documentada en el análisis vigente.【F:api/callcentersite/callcentersite/urls.py†L31-L33】 |
+| `/api/v1/llamadas/` | CRUD de catálogos, llamadas y grabaciones; depende de datos en BD según la estrategia.【F:api/callcentersite/callcentersite/urls.py†L33-L35】 |
+| `/api/dora/` | Métricas DORA expuestas vía módulo dedicado; el análisis las menciona pero no identifica su publicación real.【F:api/callcentersite/callcentersite/urls.py†L35-L37】 |
+| `/health/` | Endpoint de health check ya disponible; pendiente de uso por la capa de servicios resilientes descrita en TODO P2.【F:api/callcentersite/callcentersite/urls.py†L9-L38】【F:docs/frontend/arquitectura/TODO.md†L63-L76】 |
+
+## 3. Brechas frente a la documentación de frontend
+1. **Cobertura subestimada**: la estrategia de integración indica que sólo existen dashboard, permisos y llamadas, pero el backend publica once dominios adicionales (usuarios, configuración dual, presupuestos, políticas, excepciones, reportes, notificaciones, ETL, DORA, schema/docs, health).【F:docs/frontend/arquitectura/estrategia_integracion_backend.md†L15-L33】【F:api/callcentersite/callcentersite/urls.py†L15-L38】
+2. **Persistencia obligatoria**: los settings siguen apuntando a PostgreSQL y MariaDB sin fallback; con bases ausentes las vistas fallarán o devolverán datos vacíos, confirmando la necesidad de mocks indicada en los lineamientos.【F:api/callcentersite/callcentersite/settings/base.py†L97-L126】【F:api/callcentersite/callcentersite/apps/dashboard/widgets.py†L13-L18】
+3. **Endpoint `/api/config`**: continúa sin existir; `useAppConfig` seguirá colgando si no se mantiene el bootstrapping con mocks definido en TODO P0.【F:docs/frontend/arquitectura/estrategia_integracion_backend.md†L25-L40】【F:docs/frontend/arquitectura/TODO.md†L25-L36】
+4. **Health check desaprovechado**: el backlog P2 pide consumir `/health`, pero el endpoint ya está operativo; falta integrarlo en la capa de servicios para habilitar degradación controlada y métricas de dependencia de mocks.【F:api/callcentersite/callcentersite/urls.py†L9-L38】【F:docs/frontend/arquitectura/TODO.md†L63-L76】
+5. **Sin equivalentes mock/contratos**: nuevos dominios (configuración paralela, notificaciones, ETL, reportes) no tienen mocks ni contratos tipados en `ui/src/mocks`, por lo que el App Shell no puede representar su estado de manera consistente con la guía de composición dinámica.【F:docs/frontend/arquitectura/analisis_api_frontend.md†L111-L134】【F:docs/frontend/arquitectura/TODO.md†L49-L62】
+
+## 4. Pasos de ejecución recomendados (ambiente local sin bases externas)
+1. **Crear entorno virtual y variables**: `python -m venv .venv && source .venv/bin/activate`; copiar `api/callcentersite/env.example` a `.env` ajustando credenciales locales o flags de desarrollo.【F:api/callcentersite/env.example†L4-L116】
+2. **Instalar dependencias**: `pip install -r api/callcentersite/requirements/dev.txt` (incluye base y herramientas de calidad según la cadena de includes).【F:api/callcentersite/requirements/dev.txt†L7-L22】
+3. **Configurar bases de datos temporales**: mientras no haya Postgres/MariaDB disponibles, declarar dos bases SQLite (aplicación y IVR) en un settings local que sobreescriba `DATABASES` replicando la topología dual del archivo `base.py`. Esta sobreescritura debe acompañarse de pruebas unitarias para validar el router de lectura y mantener la política TDD del backlog.【F:api/callcentersite/callcentersite/settings/base.py†L97-L126】【F:docs/frontend/arquitectura/TODO.md†L63-L79】
+4. **Migrar y poblar datos de prueba**: ejecutar `python manage.py migrate` usando el settings local; cargar fixtures mínimas para usuarios/permisos y llamadas que permitan validar los flujos descritos en `analisis_api_frontend.md` antes de habilitar llamadas reales. Mantener cobertura ≥80 % en nuevas pruebas de regresión.【F:docs/frontend/arquitectura/analisis_api_frontend.md†L35-L102】
+5. **Levantar servidor y verificar contratos**: `python manage.py runserver` y revisar `/api/docs/` para confirmar los contratos publicados; validar `/health/` para alimentar la capa de servicios resilientes pendiente en TODO P2.【F:api/callcentersite/callcentersite/urls.py†L17-L38】【F:docs/frontend/arquitectura/TODO.md†L63-L76】
+
+## 5. Cambios propuestos (Clean Code + TDD)
+- **Actualizar documentos de estrategia** para reflejar el inventario real de endpoints y aprovechar el health check existente, evitando decisiones basadas en supuestos obsoletos.【F:docs/frontend/arquitectura/estrategia_integracion_backend.md†L15-L33】【F:api/callcentersite/callcentersite/urls.py†L15-L38】
+- **Introducir settings locales con SQLite dual** (app + IVR) y pruebas que validen el router `IVRReadOnlyRouter`, permitiendo desarrollo offline sin romper la separación de fuentes de datos.【F:api/callcentersite/callcentersite/settings/base.py†L97-L126】
+- **Ampliar mocks y contratos** para nuevos dominios (configuración paralela, notificaciones, ETL, reportes) siguiendo el patrón de `createResilientService` y los feature flags definidos en TODO P1/P2, asegurando degradación explícita y métricas de dependencia de mocks.【F:docs/frontend/arquitectura/TODO.md†L49-L79】
+- **Pruebas de smoke automatizadas** sobre `/api/schema/`, `/api/docs/` y `/health/` para detectar regresiones tempranas y alimentar el tablero de métricas requerido por el backlog (cobertura mínima 80 %).【F:api/callcentersite/callcentersite/urls.py†L15-L38】【F:docs/frontend/arquitectura/TODO.md†L63-L79】
diff --git a/ui/SERVICES_SETUP.md b/ui/SERVICES_SETUP.md
new file mode 100644
index 00000000..0ee4a03a
--- /dev/null
+++ b/ui/SERVICES_SETUP.md
@@ -0,0 +1,198 @@
+# UI Services Implementation Guide
+
+## Overview
+
+This document describes the implementation of 11 new service modules in the UI layer to integrate with all backend endpoints exposed by the IACT API.
+
+## Architecture
+
+### Permissions Model
+
+The system implements a granular permissions model based on functional groups rather than hierarchical roles:
+
+**Database Schema:**
+- FUNCIONES (Functions): Resources like dashboards, users, calls
+- CAPACIDADES (Capabilities): Actions like view, create, edit, delete
+- GRUPOS_PERMISOS (Permission Groups): Functional descriptive groups
+- USUARIOS (Users): Can belong to multiple groups simultaneously
+- PERMISOS_EXCEPCIONALES (Exceptional Permissions): Temporary or permanent overrides
+- AUDITORIA_PERMISOS (Audit Log): Track all permission checks
+
+**Key Principles:**
+- NO hierarchical roles like "Admin", "Supervisor", "Agent"
+- YES functional groups like "Customer Service", "Team Management"
+- Users can have multiple groups simultaneously
+- Groups are combinable and flexible
+- Clear description of what each person can do
+
+### Service Pattern
+
+All services follow the resilient service pattern defined in `createResilientService.js`:
+
+```javascript
+createResilientService({
+ fetchFromApi: async () => { /* API call */ },
+ fetchFromMock: async () => { /* mock fallback */ },
+ shouldUseMock: () => flags.backendIntegrity.SOURCE === 'MOCK',
+ serviceName: 'ServiceName'
+})
+```
+
+## Services Implemented
+
+### 1. UsersService
+- Endpoint: `/api/v1/usuarios/`
+- Operations: CRUD, suspend, reactivate, assign_groups
+- Mock: `usuarios.json`
+
+### 2. DashboardService
+- Endpoint: `/api/v1/dashboard/`
+- Operations: overview, export, customize, share
+- Mock: `dashboard.json`
+
+### 3. ConfiguracionService (Legacy)
+- Endpoint: `/api/v1/configuracion/`
+- Operations: list, detail, modify, export, import, history, audit
+- Mock: `configuracion.json`
+
+### 4. ConfigurationService (Parallel Module)
+- Endpoint: `/api/v1/configuracion/`
+- Operations: list, edit, export, import, restore
+- Mock: `configuration.json`
+
+### 5. PresupuestosService
+- Endpoint: `/api/v1/presupuestos/`
+- Operations: CRUD, approve, reject, export
+- Mock: `presupuestos.json`
+
+### 6. PoliticasService
+- Endpoint: `/api/v1/politicas/`
+- Operations: CRUD, publish, archive, new_version
+- Mock: `politicas.json`
+
+### 7. ExcepcionesService
+- Endpoint: `/api/v1/excepciones/`
+- Operations: CRUD, approve, reject, export
+- Mock: `excepciones.json`
+
+### 8. ReportesService
+- Endpoint: `/api/v1/reportes/`
+- Operations: list by type, advanced filters, unified export
+- Mock: `reportes.json`
+
+### 9. NotificationsService
+- Endpoint: `/api/v1/notifications/messages/`
+- Operations: CRUD, mark_read, unread, unread_count
+- Mock: `notifications.json`
+
+### 10. ETLService
+- Endpoint: `/api/v1/etl/jobs/`, `/api/v1/etl/errors/`
+- Operations: read-only queries, stats, summary, recent_failures, by_severity
+- Mock: `etl.json`
+
+### 11. DORAMetricsService
+- Endpoint: `/api/dora/`
+- Operations: delivery metrics
+- Mock: `dora.json`
+
+## Setup Instructions
+
+### 1. Generate Services
+
+Run the setup script to create all service files:
+
+```bash
+cd ui
+npm run services:setup
+```
+
+This will create:
+- 11 service directories under `ui/src/services/`
+- 11 service implementation files
+- 11 test files with 80%+ coverage
+
+### 2. Verify Mocks
+
+All mock files are already created in `ui/src/mocks/`. Verify they exist:
+
+```bash
+ls -la ui/src/mocks/*.json
+```
+
+### 3. Run Tests
+
+Execute the test suite to verify all services:
+
+```bash
+cd ui
+npm test
+```
+
+Expected output:
+- All tests passing
+- Coverage above 80% threshold
+- No linting errors
+
+### 4. Update Feature Flags
+
+The services integrate with `backendIntegrity.js` for feature flags. Update environment variables as needed:
+
+```bash
+UI_BACKEND_USERS_SOURCE=MOCK
+UI_BACKEND_DASHBOARD_SOURCE=MOCK
+# ... etc
+```
+
+## Quality Standards
+
+### TDD Compliance
+- Tests written before implementation
+- Coverage maintained at 80% or higher
+- All edge cases covered
+
+### Code Quality
+- Follows existing patterns in `createResilientService.js`
+- No duplication (DRY principle)
+- Clear separation of concerns
+- Proper error handling
+
+### Commit Standards
+- Conventional Commits format
+- Clear, descriptive messages
+- Atomic commits per service
+
+## File Structure
+
+```
+ui/
+ src/
+ services/
+ users/
+ UsersService.js
+ UsersService.test.js
+ dashboard/
+ DashboardService.js
+ DashboardService.test.js
+ ... (9 more services)
+ mocks/
+ usuarios.json
+ dashboard.json
+ ... (9 more mocks)
+ scripts/
+ setup-services.js
+```
+
+## Next Steps
+
+1. Execute `npm run services:setup` to generate all service files
+2. Run `npm test` to verify implementation
+3. Integrate services into UI components as needed
+4. Update feature flags based on backend availability
+5. Document any deviations or special cases
+
+## References
+
+- Architecture documentation: `docs/frontend/arquitectura/`
+- Backend API: `api/callcentersite/callcentersite/urls.py`
+- Existing services: `ui/src/services/calls/CallsService.js`
+- Test patterns: `ui/src/services/calls/CallsService.test.js`
diff --git a/ui/package.json b/ui/package.json
index 4b264fb6..384e01e6 100644
--- a/ui/package.json
+++ b/ui/package.json
@@ -10,7 +10,8 @@
"test:watch": "jest --watch",
"test:coverage": "jest --coverage",
"lint": "eslint src/",
- "mocks:refresh": "node scripts/refresh-mocks.js"
+ "mocks:refresh": "node scripts/refresh-mocks.js",
+ "services:setup": "node scripts/setup-services.js"
},
"dependencies": {
"@reduxjs/toolkit": "^2.2.5",
diff --git a/ui/scripts/refresh-mocks.js b/ui/scripts/refresh-mocks.js
index 6f6625ec..549c321a 100755
--- a/ui/scripts/refresh-mocks.js
+++ b/ui/scripts/refresh-mocks.js
@@ -6,12 +6,36 @@ const {
validateConfigMock,
validatePermissionsMock,
validateCallsMock,
+ validateHealthMock,
+ validateUsersMock,
+ validateDashboardMock,
+ validateConfiguracionMock,
+ validateConfigurationMock,
+ validatePresupuestosMock,
+ validatePoliticasMock,
+ validateExcepcionesMock,
+ validateReportesMock,
+ validateNotificationsMock,
+ validateETLMock,
+ validateDORAMock,
} = require('../src/mocks/schemas');
const MOCKS = [
{ key: 'config', file: 'config.json', validator: validateConfigMock },
{ key: 'permissions', file: 'permissions.json', validator: validatePermissionsMock },
{ key: 'calls', file: 'llamadas.json', validator: validateCallsMock },
+ { key: 'health', file: 'health.json', validator: validateHealthMock },
+ { key: 'users', file: 'usuarios.json', validator: validateUsersMock },
+ { key: 'dashboard', file: 'dashboard.json', validator: validateDashboardMock },
+ { key: 'configuracion', file: 'configuracion.json', validator: validateConfiguracionMock },
+ { key: 'configuration', file: 'configuration.json', validator: validateConfigurationMock },
+ { key: 'presupuestos', file: 'presupuestos.json', validator: validatePresupuestosMock },
+ { key: 'politicas', file: 'politicas.json', validator: validatePoliticasMock },
+ { key: 'excepciones', file: 'excepciones.json', validator: validateExcepcionesMock },
+ { key: 'reportes', file: 'reportes.json', validator: validateReportesMock },
+ { key: 'notifications', file: 'notifications.json', validator: validateNotificationsMock },
+ { key: 'etl', file: 'etl.json', validator: validateETLMock },
+ { key: 'dora', file: 'dora.json', validator: validateDORAMock },
];
const mocksDir = path.resolve(__dirname, '../src/mocks');
@@ -37,3 +61,4 @@ const snapshotPath = path.join(outputDir, `snapshot-${Date.now()}.json`);
fs.writeFileSync(snapshotPath, JSON.stringify(snapshot, null, 2));
console.log(`Mock snapshot generated at ${snapshotPath}`);
+console.log(`Validated ${MOCKS.length} mocks successfully`);
diff --git a/ui/scripts/setup-services.js b/ui/scripts/setup-services.js
new file mode 100644
index 00000000..c92f0fd4
--- /dev/null
+++ b/ui/scripts/setup-services.js
@@ -0,0 +1,280 @@
+#!/usr/bin/env node
+/**
+ * Script to setup all missing UI services for backend endpoints
+ * Creates directory structure and all service implementation files
+ *
+ * Usage: node scripts/setup-services.js
+ */
+
+const fs = require('fs');
+const path = require('path');
+
+const servicesDir = path.resolve(__dirname, '../src/services');
+
+// Service definitions with their configurations
+const serviceConfigs = {
+ users: {
+ endpoint: '/api/v1/usuarios/',
+ mockKey: 'users',
+ mockFile: 'usuarios.json',
+ validateFields: ['usuarios', 'grupos'],
+ className: 'UsersService',
+ method: 'getUsers',
+ },
+ dashboard: {
+ endpoint: '/api/v1/dashboard/',
+ mockKey: 'dashboard',
+ mockFile: 'dashboard.json',
+ validateFields: ['overview', 'widgets'],
+ className: 'DashboardService',
+ method: 'getDashboard',
+ },
+ configuracion: {
+ endpoint: '/api/v1/configuracion/',
+ mockKey: 'configuracion',
+ mockFile: 'configuracion.json',
+ validateFields: ['parametros'],
+ className: 'ConfiguracionService',
+ method: 'getConfiguracion',
+ },
+ configuration: {
+ endpoint: '/api/v1/configuracion/',
+ mockKey: 'configuration',
+ mockFile: 'configuration.json',
+ validateFields: ['settings'],
+ className: 'ConfigurationService',
+ method: 'getConfiguration',
+ },
+ presupuestos: {
+ endpoint: '/api/v1/presupuestos/',
+ mockKey: 'presupuestos',
+ mockFile: 'presupuestos.json',
+ validateFields: ['presupuestos'],
+ className: 'PresupuestosService',
+ method: 'getPresupuestos',
+ },
+ politicas: {
+ endpoint: '/api/v1/politicas/',
+ mockKey: 'politicas',
+ mockFile: 'politicas.json',
+ validateFields: ['politicas'],
+ className: 'PoliticasService',
+ method: 'getPoliticas',
+ },
+ excepciones: {
+ endpoint: '/api/v1/excepciones/',
+ mockKey: 'excepciones',
+ mockFile: 'excepciones.json',
+ validateFields: ['excepciones'],
+ className: 'ExcepcionesService',
+ method: 'getExcepciones',
+ },
+ reportes: {
+ endpoint: '/api/v1/reportes/',
+ mockKey: 'reportes',
+ mockFile: 'reportes.json',
+ validateFields: ['reportes'],
+ className: 'ReportesService',
+ method: 'getReportes',
+ },
+ notifications: {
+ endpoint: '/api/v1/notifications/messages/',
+ mockKey: 'notifications',
+ mockFile: 'notifications.json',
+ validateFields: ['messages'],
+ className: 'NotificationsService',
+ method: 'getNotifications',
+ },
+ etl: {
+ endpoint: '/api/v1/etl/jobs/',
+ mockKey: 'etl',
+ mockFile: 'etl.json',
+ validateFields: ['jobs'],
+ className: 'ETLService',
+ method: 'getJobs',
+ },
+ dora: {
+ endpoint: '/api/dora/',
+ mockKey: 'dora',
+ mockFile: 'dora.json',
+ validateFields: ['metrics'],
+ className: 'DORAMetricsService',
+ method: 'getMetrics',
+ },
+};
+
+// Generate service implementation
+const generateServiceImplementation = (config) => {
+ const validationLogic = config.validateFields.map(field => `Array.isArray(payload.${field}) || payload.${field}`).join(' || ');
+
+ return `import { createResilientService } from '@services/createResilientService';
+import { loadMock } from '@mocks/registry';
+import { shouldUseMockForDomain } from '@services/flags/backendIntegrity';
+
+const ENDPOINT = '${config.endpoint}';
+const { data: mock } = loadMock('${config.mockKey}');
+
+const isPayloadValid = (payload) => {
+ if (!payload) {
+ return false;
+ }
+ return ${validationLogic};
+};
+
+const baseService = createResilientService({
+ id: '${config.mockKey}',
+ endpoint: ENDPOINT,
+ mockDataLoader: () => Promise.resolve(mock),
+ shouldUseMock: () => shouldUseMockForDomain('${config.mockKey}'),
+ errorMessage: 'No fue posible obtener los datos del backend',
+ isPayloadValid,
+});
+
+export class ${config.className} {
+ static async ${config.method}(options = {}) {
+ return baseService.fetch(options);
+ }
+
+ static async fetchFromApi(options = {}) {
+ return baseService.fetchFromApi(options);
+ }
+
+ static async fetchFromMock() {
+ return baseService.fetchFromMock();
+ }
+
+ static shouldUseMock() {
+ return shouldUseMockForDomain('${config.mockKey}');
+ }
+}
+`;
+};
+
+// Generate test file
+const generateTestFile = (config) => {
+ return `import mock from '@mocks/${config.mockFile}';
+import { ${config.className} } from './${config.className}';
+import { resetMockUsageMetrics, getMockUsageMetrics } from '@services/utils/mockUsageTracker';
+
+describe('${config.className}', () => {
+ let originalEnv;
+
+ beforeEach(() => {
+ originalEnv = { ...process.env };
+ resetMockUsageMetrics();
+ });
+
+ afterEach(() => {
+ process.env = { ...originalEnv };
+ });
+
+ it('returns mock data when the feature flag forces mock usage', async () => {
+ process.env.UI_BACKEND_${config.mockKey.toUpperCase()}_SOURCE = 'mock';
+ const fetchImpl = jest.fn();
+
+ const result = await ${config.className}.${config.method}({ fetchImpl });
+
+ expect(fetchImpl).not.toHaveBeenCalled();
+ expect(result.source).toBe('mock');
+ expect(result.data).toEqual(mock);
+ expect(result.metadata).toEqual({ domain: '${config.mockKey}' });
+ expect(getMockUsageMetrics()).toEqual({ ${config.mockKey}: { api: 0, mock: 1 } });
+ });
+
+ it('returns API data when the endpoint succeeds', async () => {
+ process.env.UI_BACKEND_${config.mockKey.toUpperCase()}_SOURCE = 'api';
+ const apiPayload = mock;
+ const fetchImpl = jest.fn().mockResolvedValue({
+ ok: true,
+ json: () => Promise.resolve(apiPayload),
+ });
+
+ const result = await ${config.className}.${config.method}({ fetchImpl });
+
+ expect(fetchImpl).toHaveBeenCalledWith('${config.endpoint}', { signal: undefined });
+ expect(result.source).toBe('api');
+ expect(result.data).toEqual(apiPayload);
+ expect(getMockUsageMetrics()).toEqual({ ${config.mockKey}: { api: 1, mock: 0 } });
+ });
+
+ it('falls back to mocks when API fails', async () => {
+ process.env.UI_BACKEND_${config.mockKey.toUpperCase()}_SOURCE = 'api';
+ const fetchImpl = jest.fn().mockResolvedValue({ ok: false, status: 503 });
+
+ const result = await ${config.className}.${config.method}({ fetchImpl });
+
+ expect(result.source).toBe('mock');
+ expect(result.data).toEqual(mock);
+ });
+
+ it('validates payload structure', async () => {
+ process.env.UI_BACKEND_${config.mockKey.toUpperCase()}_SOURCE = 'api';
+ const validPayload = mock;
+ const fetchImpl = jest.fn().mockResolvedValue({
+ ok: true,
+ json: () => Promise.resolve(validPayload),
+ });
+
+ const result = await ${config.className}.${config.method}({ fetchImpl });
+
+ expect(result.source).toBe('api');
+ });
+
+ it('provides fetchFromApi method', async () => {
+ const apiPayload = mock;
+ const fetchImpl = jest.fn().mockResolvedValue({
+ ok: true,
+ json: () => Promise.resolve(apiPayload),
+ });
+
+ const result = await ${config.className}.fetchFromApi({ fetchImpl });
+
+ expect(result).toEqual(apiPayload);
+ });
+
+ it('provides fetchFromMock method', async () => {
+ const result = await ${config.className}.fetchFromMock();
+
+ expect(result).toEqual(mock);
+ });
+
+ it('provides shouldUseMock method', () => {
+ process.env.UI_BACKEND_${config.mockKey.toUpperCase()}_SOURCE = 'mock';
+ expect(${config.className}.shouldUseMock()).toBe(true);
+
+ process.env.UI_BACKEND_${config.mockKey.toUpperCase()}_SOURCE = 'api';
+ expect(${config.className}.shouldUseMock()).toBe(false);
+ });
+});
+`;
+};
+
+// Create all services
+Object.entries(serviceConfigs).forEach(([serviceName, config]) => {
+ const serviceDir = path.join(servicesDir, serviceName);
+
+ // Create directory
+ if (!fs.existsSync(serviceDir)) {
+ fs.mkdirSync(serviceDir, { recursive: true });
+ console.log(`Created directory: ${serviceDir}`);
+ }
+
+ // Create service file
+ const servicePath = path.join(serviceDir, `${config.className}.js`);
+ const serviceContent = generateServiceImplementation(config);
+ fs.writeFileSync(servicePath, serviceContent);
+ console.log(`Created service: ${servicePath}`);
+
+ // Create test file
+ const testPath = path.join(serviceDir, `${config.className}.test.js`);
+ const testContent = generateTestFile(config);
+ fs.writeFileSync(testPath, testContent);
+ console.log(`Created test: ${testPath}`);
+});
+
+console.log('\nAll services created successfully!');
+console.log('\nNext steps:');
+console.log('1. Update ui/src/mocks/registry.js to register new mocks');
+console.log('2. Update ui/src/mocks/schemas.js to add validators');
+console.log('3. Update ui/src/mocks/metadata.js to add metadata');
+console.log('4. Run: npm test to verify all services work correctly');
diff --git a/ui/src/app/App.jsx b/ui/src/app/App.jsx
index 18aebc74..45b8069d 100644
--- a/ui/src/app/App.jsx
+++ b/ui/src/app/App.jsx
@@ -2,13 +2,19 @@ import { useEffect } from 'react';
import MainLayout from '@components/MainLayout';
import HomePage from '../pages/HomePage';
import { useAppConfig } from '@hooks/useAppConfig';
+import { useHealthStatus } from '@hooks/useHealthStatus';
+import { useMockMetrics } from '@hooks/useMockMetrics';
function App() {
const { isLoading, config, loadConfig, source, error } = useAppConfig();
+ const { status: healthStatus, source: healthSource, lastChecked, error: healthError, checkHealth } =
+ useHealthStatus();
+ const { summary: mockSummary } = useMockMetrics();
useEffect(() => {
loadConfig();
- }, [loadConfig]);
+ checkHealth();
+ }, [loadConfig, checkHealth]);
if (isLoading) {
return (
@@ -25,8 +31,15 @@ function App() {
details: error ?? undefined,
};
+ const backendStatus = {
+ status: healthStatus,
+ source: healthSource,
+ lastChecked,
+ error: healthError,
+ };
+
return (
-
+
);
diff --git a/ui/src/app/App.test.jsx b/ui/src/app/App.test.jsx
index 33713765..b9fef5bc 100644
--- a/ui/src/app/App.test.jsx
+++ b/ui/src/app/App.test.jsx
@@ -3,10 +3,12 @@ import { Provider } from 'react-redux';
import { configureStore } from '@reduxjs/toolkit';
import appConfigReducer from '@state/slices/appConfigSlice';
import homeReducer from '@modules/home/state/homeSlice';
+import healthReducer from '@state/slices/healthSlice';
import App from './App';
import { setConfig } from '@state/slices/appConfigSlice';
import { AppConfigService } from '@services/config/AppConfigService';
import { CallsService } from '@services/calls/CallsService';
+import { HealthService } from '@services/health/HealthService';
jest.mock('@services/config/AppConfigService', () => ({
AppConfigService: {
@@ -20,11 +22,18 @@ jest.mock('@services/calls/CallsService', () => ({
},
}));
+jest.mock('@services/health/HealthService', () => ({
+ HealthService: {
+ getStatus: jest.fn(),
+ },
+}));
+
const createTestStore = () => {
return configureStore({
reducer: {
appConfig: appConfigReducer,
home: homeReducer,
+ observability: healthReducer,
},
});
};
@@ -41,6 +50,11 @@ describe('App', () => {
source: 'api',
error: null,
});
+ HealthService.getStatus.mockResolvedValue({
+ data: { status: 'ok', checkedAt: '2025-11-14T00:00:00Z' },
+ source: 'api',
+ error: null,
+ });
});
afterEach(() => {
@@ -77,6 +91,19 @@ describe('App', () => {
expect(await screen.findByRole('main')).toBeInTheDocument();
});
+ it('renders backend health status information', async () => {
+ const store = createTestStore();
+
+ render(
+
+
+
+ );
+
+ expect(await screen.findByText(/estado backend/i)).toBeInTheDocument();
+ expect(screen.getByText(/ok/i)).toBeInTheDocument();
+ });
+
it('shows mock data banner when using mocks', async () => {
const store = createTestStore();
store.dispatch(
diff --git a/ui/src/components/BackendStatusPanel.jsx b/ui/src/components/BackendStatusPanel.jsx
new file mode 100644
index 00000000..a9178921
--- /dev/null
+++ b/ui/src/components/BackendStatusPanel.jsx
@@ -0,0 +1,43 @@
+const BackendStatusPanel = ({ health, mockSummary }) => {
+ const healthStatus = health?.status ?? 'unknown';
+ const lastChecked = health?.lastChecked;
+ const source = health?.source ?? 'unknown';
+ const error = health?.error;
+
+ const mockCount = mockSummary?.domainsUsingMock ?? 0;
+ const totalDomains = mockSummary?.totalDomains ?? 0;
+
+ return (
+
+
+ Estado backend: {healthStatus}
+ {source && Fuente: {source}}
+ {lastChecked && Última verificación: {lastChecked}}
+
+ {error && (
+
+ {error}
+
+ )}
+
+ Dependencia de mocks: {mockCount} de {totalDomains} dominios usan mocks
+
+
+ );
+};
+
+BackendStatusPanel.defaultProps = {
+ health: {
+ status: 'unknown',
+ lastChecked: null,
+ source: 'unknown',
+ error: null,
+ },
+ mockSummary: {
+ domainsUsingMock: 0,
+ totalDomains: 0,
+ metrics: {},
+ },
+};
+
+export default BackendStatusPanel;
diff --git a/ui/src/components/BackendStatusPanel.test.jsx b/ui/src/components/BackendStatusPanel.test.jsx
new file mode 100644
index 00000000..b5e24fcd
--- /dev/null
+++ b/ui/src/components/BackendStatusPanel.test.jsx
@@ -0,0 +1,40 @@
+import { render, screen } from '@testing-library/react';
+import BackendStatusPanel from './BackendStatusPanel';
+
+const baseProps = {
+ health: {
+ status: 'ok',
+ lastChecked: '2025-11-14T12:00:00Z',
+ source: 'api',
+ error: null,
+ },
+ mockSummary: {
+ totalDomains: 3,
+ domainsUsingMock: 1,
+ metrics: {
+ calls: { api: 1, mock: 0 },
+ config: { api: 0, mock: 2 },
+ },
+ },
+};
+
+describe('BackendStatusPanel', () => {
+ it('renders health status and mock dependency summary', () => {
+ render();
+
+ expect(screen.getByText(/estado backend/i)).toBeInTheDocument();
+ expect(screen.getByText(/ok/i)).toBeInTheDocument();
+ expect(screen.getByText(/1 de 3 dominios usan mocks/i)).toBeInTheDocument();
+ });
+
+ it('shows error message when available', () => {
+ render(
+
+ );
+
+ expect(screen.getByText(/fallo api/i)).toBeInTheDocument();
+ });
+});
diff --git a/ui/src/components/MainLayout.jsx b/ui/src/components/MainLayout.jsx
index f889fe22..55f2fe13 100644
--- a/ui/src/components/MainLayout.jsx
+++ b/ui/src/components/MainLayout.jsx
@@ -1,8 +1,9 @@
import { memo, useEffect, useState } from 'react';
import MockDataNotice from './MockDataNotice';
import { PermissionsService } from '@services/permissions/PermissionsService';
+import BackendStatusPanel from './BackendStatusPanel';
-const MainLayout = memo(({ children, mockNotice }) => {
+const MainLayout = memo(({ children, mockNotice, backendStatus, mockSummary }) => {
const [menuEntries, setMenuEntries] = useState([]);
const [menuError, setMenuError] = useState(null);
@@ -61,6 +62,7 @@ const MainLayout = memo(({ children, mockNotice }) => {
+
{children}
@@ -76,6 +78,8 @@ MainLayout.displayName = 'MainLayout';
MainLayout.defaultProps = {
mockNotice: { isVisible: false },
+ backendStatus: { status: 'unknown', source: 'unknown' },
+ mockSummary: { domainsUsingMock: 0, totalDomains: 0, metrics: {} },
};
export default MainLayout;
diff --git a/ui/src/hooks/useHealthStatus.js b/ui/src/hooks/useHealthStatus.js
new file mode 100644
index 00000000..323cdbab
--- /dev/null
+++ b/ui/src/hooks/useHealthStatus.js
@@ -0,0 +1,48 @@
+import { useCallback } from 'react';
+import { useDispatch, useSelector } from 'react-redux';
+import {
+ selectHealthStatus,
+ selectHealthSource,
+ selectLastChecked,
+ selectHealthError,
+ selectIsCheckingHealth,
+ setChecking,
+ setResult,
+ setError,
+} from '@state/slices/healthSlice';
+import { HealthService } from '@services/health/HealthService';
+
+export const useHealthStatus = () => {
+ const dispatch = useDispatch();
+ const status = useSelector(selectHealthStatus);
+ const source = useSelector(selectHealthSource);
+ const lastChecked = useSelector(selectLastChecked);
+ const error = useSelector(selectHealthError);
+ const isChecking = useSelector(selectIsCheckingHealth);
+
+ const checkHealth = useCallback(async () => {
+ try {
+ dispatch(setChecking(true));
+ const result = await HealthService.getStatus();
+ dispatch(
+ setResult({
+ status: result?.data?.status ?? 'unknown',
+ checkedAt: result?.data?.checkedAt ?? null,
+ source: result?.source ?? 'unknown',
+ errorMessage: result?.error ? result.error.message : null,
+ })
+ );
+ } catch (err) {
+ dispatch(setError(err.message));
+ }
+ }, [dispatch]);
+
+ return {
+ status,
+ source,
+ lastChecked,
+ error,
+ isChecking,
+ checkHealth,
+ };
+};
diff --git a/ui/src/hooks/useHealthStatus.test.js b/ui/src/hooks/useHealthStatus.test.js
new file mode 100644
index 00000000..6e162c41
--- /dev/null
+++ b/ui/src/hooks/useHealthStatus.test.js
@@ -0,0 +1,56 @@
+import { renderHook, act } from '@testing-library/react';
+import { Provider } from 'react-redux';
+import { configureStore } from '@reduxjs/toolkit';
+import healthReducer, { selectHealthStatus, selectHealthError } from '@state/slices/healthSlice';
+import { useHealthStatus } from './useHealthStatus';
+import { HealthService } from '@services/health/HealthService';
+
+jest.mock('@services/health/HealthService', () => ({
+ HealthService: {
+ getStatus: jest.fn(),
+ },
+}));
+
+const createStore = () =>
+ configureStore({
+ reducer: {
+ observability: healthReducer,
+ },
+ });
+
+const wrapperFactory = (store) => ({ children }) => {children};
+
+describe('useHealthStatus', () => {
+ it('loads health status into the store', async () => {
+ const store = createStore();
+ HealthService.getStatus.mockResolvedValue({
+ data: { status: 'ok', checkedAt: '2025-11-14T12:00:00Z' },
+ source: 'api',
+ error: null,
+ });
+
+ const { result } = renderHook(() => useHealthStatus(), { wrapper: wrapperFactory(store) });
+
+ await act(async () => {
+ await result.current.checkHealth();
+ });
+
+ expect(selectHealthStatus(store.getState())).toBe('ok');
+ expect(result.current.source).toBe('api');
+ expect(result.current.lastChecked).toBe('2025-11-14T12:00:00Z');
+ });
+
+ it('stores error when the service throws', async () => {
+ const store = createStore();
+ HealthService.getStatus.mockRejectedValue(new Error('backend caido'));
+
+ const { result } = renderHook(() => useHealthStatus(), { wrapper: wrapperFactory(store) });
+
+ await act(async () => {
+ await result.current.checkHealth();
+ });
+
+ expect(selectHealthError(store.getState())).toBe('backend caido');
+ expect(result.current.status).toBe('unknown');
+ });
+});
diff --git a/ui/src/hooks/useMockMetrics.js b/ui/src/hooks/useMockMetrics.js
new file mode 100644
index 00000000..3827f0e0
--- /dev/null
+++ b/ui/src/hooks/useMockMetrics.js
@@ -0,0 +1,21 @@
+import { useEffect, useState } from 'react';
+import { getMockUsageMetrics, subscribeMockUsage } from '@services/utils/mockUsageTracker';
+
+const buildSummary = (metrics) => {
+ const entries = Object.values(metrics || {});
+ const totalDomains = Object.keys(metrics || {}).length;
+ const domainsUsingMock = entries.filter((entry) => entry.mock > 0).length;
+
+ return { totalDomains, domainsUsingMock };
+};
+
+export const useMockMetrics = () => {
+ const [metrics, setMetrics] = useState(getMockUsageMetrics());
+
+ useEffect(() => {
+ const unsubscribe = subscribeMockUsage(setMetrics);
+ return unsubscribe;
+ }, []);
+
+ return { metrics, summary: buildSummary(metrics) };
+};
diff --git a/ui/src/hooks/useMockMetrics.test.js b/ui/src/hooks/useMockMetrics.test.js
new file mode 100644
index 00000000..67a970de
--- /dev/null
+++ b/ui/src/hooks/useMockMetrics.test.js
@@ -0,0 +1,24 @@
+import { renderHook, act } from '@testing-library/react';
+import { useMockMetrics } from './useMockMetrics';
+import { recordMockUsage, resetMockUsageMetrics } from '@services/utils/mockUsageTracker';
+
+describe('useMockMetrics', () => {
+ beforeEach(() => {
+ resetMockUsageMetrics();
+ });
+
+ it('returns live mock dependency summary', () => {
+ const { result } = renderHook(() => useMockMetrics());
+
+ expect(result.current.summary.totalDomains).toBe(0);
+
+ act(() => {
+ recordMockUsage('calls', 'mock');
+ recordMockUsage('config', 'api');
+ });
+
+ expect(result.current.summary.totalDomains).toBe(2);
+ expect(result.current.summary.domainsUsingMock).toBe(1);
+ expect(result.current.metrics.calls).toEqual({ api: 0, mock: 1 });
+ });
+});
diff --git a/ui/src/mocks/configuracion.json b/ui/src/mocks/configuracion.json
new file mode 100644
index 00000000..30de3281
--- /dev/null
+++ b/ui/src/mocks/configuracion.json
@@ -0,0 +1,60 @@
+{
+ "parametros": [
+ {
+ "id": 1,
+ "clave": "MAX_CALL_DURATION",
+ "valor": "3600",
+ "tipo": "integer",
+ "descripcion": "Duración máxima de llamada en segundos",
+ "categoria": "llamadas",
+ "modificable": true,
+ "fecha_modificacion": "2025-11-01T10:00:00Z",
+ "modificado_por": "admin"
+ },
+ {
+ "id": 2,
+ "clave": "AUTO_DISCONNECT_TIMEOUT",
+ "valor": "30",
+ "tipo": "integer",
+ "descripcion": "Tiempo de desconexión automática en segundos",
+ "categoria": "llamadas",
+ "modificable": true,
+ "fecha_modificacion": "2025-10-15T14:30:00Z",
+ "modificado_por": "admin"
+ },
+ {
+ "id": 3,
+ "clave": "RECORDING_ENABLED",
+ "valor": "true",
+ "tipo": "boolean",
+ "descripcion": "Habilitar grabación de llamadas",
+ "categoria": "seguridad",
+ "modificable": true,
+ "fecha_modificacion": "2025-09-20T09:00:00Z",
+ "modificado_por": "admin"
+ },
+ {
+ "id": 4,
+ "clave": "MAX_CONCURRENT_CALLS",
+ "valor": "50",
+ "tipo": "integer",
+ "descripcion": "Número máximo de llamadas concurrentes",
+ "categoria": "capacidad",
+ "modificable": false,
+ "fecha_modificacion": "2025-08-01T00:00:00Z",
+ "modificado_por": "system"
+ }
+ ],
+ "categorias": ["llamadas", "seguridad", "capacidad", "reportes"],
+ "historial": [
+ {
+ "id": 1,
+ "parametro_id": 1,
+ "valor_anterior": "3000",
+ "valor_nuevo": "3600",
+ "fecha": "2025-11-01T10:00:00Z",
+ "usuario": "admin",
+ "razon": "Ajuste por política interna"
+ }
+ ]
+}
diff --git a/ui/src/mocks/configuration.json b/ui/src/mocks/configuration.json
new file mode 100644
index 00000000..4b90cfb4
--- /dev/null
+++ b/ui/src/mocks/configuration.json
@@ -0,0 +1,49 @@
+{
+ "settings": [
+ {
+ "id": 1,
+ "key": "system.timezone",
+ "value": "America/Mexico_City",
+ "type": "string",
+ "description": "Zona horaria del sistema",
+ "editable": true,
+ "updated_at": "2025-11-01T10:00:00Z",
+ "updated_by": "admin"
+ },
+ {
+ "id": 2,
+ "key": "ui.theme",
+ "value": "light",
+ "type": "string",
+ "description": "Tema de la interfaz de usuario",
+ "editable": true,
+ "updated_at": "2025-10-20T15:30:00Z",
+ "updated_by": "admin"
+ },
+ {
+ "id": 3,
+ "key": "notifications.email.enabled",
+ "value": "true",
+ "type": "boolean",
+ "description": "Habilitar notificaciones por correo",
+ "editable": true,
+ "updated_at": "2025-09-15T08:00:00Z",
+ "updated_by": "admin"
+ },
+ {
+ "id": 4,
+ "key": "api.rate_limit",
+ "value": "100",
+ "type": "integer",
+ "description": "Límite de peticiones por minuto",
+ "editable": false,
+ "updated_at": "2025-01-01T00:00:00Z",
+ "updated_by": "system"
+ }
+ ],
+ "backup_info": {
+ "last_backup": "2025-11-17T02:00:00Z",
+ "backup_count": 30,
+ "next_backup": "2025-11-19T02:00:00Z"
+ }
+}
diff --git a/ui/src/mocks/dashboard.json b/ui/src/mocks/dashboard.json
new file mode 100644
index 00000000..c0a3fca4
--- /dev/null
+++ b/ui/src/mocks/dashboard.json
@@ -0,0 +1,55 @@
+{
+ "overview": {
+ "total_calls": 1250,
+ "calls_today": 85,
+ "active_agents": 12,
+ "avg_call_duration": 325,
+ "customer_satisfaction": 4.2
+ },
+ "widgets": [
+ {
+ "id": 1,
+ "type": "chart",
+ "title": "Llamadas por Hora",
+ "position": { "x": 0, "y": 0, "w": 6, "h": 3 },
+ "config": {
+ "chartType": "line",
+ "dataSource": "calls_per_hour"
+ }
+ },
+ {
+ "id": 2,
+ "type": "metric",
+ "title": "Tiempo Promedio de Espera",
+ "position": { "x": 6, "y": 0, "w": 3, "h": 2 },
+ "config": {
+ "value": 45,
+ "unit": "segundos",
+ "trend": "down"
+ }
+ },
+ {
+ "id": 3,
+ "type": "list",
+ "title": "Agentes Disponibles",
+ "position": { "x": 9, "y": 0, "w": 3, "h": 4 },
+ "config": {
+ "items": ["maria.garcia", "carlos.rodriguez", "juan.perez"]
+ }
+ }
+ ],
+ "recent_activity": [
+ {
+ "id": 1,
+ "timestamp": "2025-11-18T12:45:00Z",
+ "type": "call_completed",
+ "description": "Llamada completada por maria.garcia"
+ },
+ {
+ "id": 2,
+ "timestamp": "2025-11-18T12:40:00Z",
+ "type": "agent_login",
+ "description": "carlos.rodriguez inició sesión"
+ }
+ ]
+}
diff --git a/ui/src/mocks/dora.json b/ui/src/mocks/dora.json
new file mode 100644
index 00000000..eded4c09
--- /dev/null
+++ b/ui/src/mocks/dora.json
@@ -0,0 +1,129 @@
+{
+ "metrics": {
+ "deployment_frequency": {
+ "value": 8.5,
+ "unit": "deploys_per_week",
+ "trend": "up",
+ "change_percent": 12.5,
+ "level": "high",
+ "description": "Frecuencia de despliegues a producción",
+ "last_updated": "2025-11-18T12:00:00Z"
+ },
+ "lead_time_for_changes": {
+ "value": 4.2,
+ "unit": "hours",
+ "trend": "down",
+ "change_percent": -8.3,
+ "level": "elite",
+ "description": "Tiempo desde commit hasta producción",
+ "last_updated": "2025-11-18T12:00:00Z"
+ },
+ "time_to_restore_service": {
+ "value": 35,
+ "unit": "minutes",
+ "trend": "down",
+ "change_percent": -15.2,
+ "level": "high",
+ "description": "Tiempo de restauración de servicio",
+ "last_updated": "2025-11-18T12:00:00Z"
+ },
+ "change_failure_rate": {
+ "value": 8.5,
+ "unit": "percent",
+ "trend": "stable",
+ "change_percent": 0.2,
+ "level": "high",
+ "description": "Tasa de fallos en cambios",
+ "last_updated": "2025-11-18T12:00:00Z"
+ }
+ },
+ "historical_data": [
+ {
+ "date": "2025-11-11",
+ "deployment_frequency": 7.5,
+ "lead_time_for_changes": 4.8,
+ "time_to_restore_service": 42,
+ "change_failure_rate": 9.1
+ },
+ {
+ "date": "2025-11-04",
+ "deployment_frequency": 7.8,
+ "lead_time_for_changes": 5.2,
+ "time_to_restore_service": 38,
+ "change_failure_rate": 8.7
+ },
+ {
+ "date": "2025-10-28",
+ "deployment_frequency": 7.2,
+ "lead_time_for_changes": 4.5,
+ "time_to_restore_service": 40,
+ "change_failure_rate": 8.3
+ }
+ ],
+ "recent_deployments": [
+ {
+ "id": 1,
+ "version": "v1.15.3",
+ "environment": "production",
+ "deployed_at": "2025-11-18T10:30:00Z",
+ "deployed_by": "ci-cd-pipeline",
+ "status": "success",
+ "duration_minutes": 8,
+ "changes_count": 12
+ },
+ {
+ "id": 2,
+ "version": "v1.15.2",
+ "environment": "production",
+ "deployed_at": "2025-11-17T15:45:00Z",
+ "deployed_by": "ci-cd-pipeline",
+ "status": "success",
+ "duration_minutes": 7,
+ "changes_count": 5
+ },
+ {
+ "id": 3,
+ "version": "v1.15.1",
+ "environment": "production",
+ "deployed_at": "2025-11-16T09:20:00Z",
+ "deployed_by": "ci-cd-pipeline",
+ "status": "failed",
+ "duration_minutes": 3,
+ "changes_count": 8,
+ "rollback_duration_minutes": 5
+ }
+ ],
+ "performance_levels": {
+ "deployment_frequency": {
+ "elite": ">= 7 per week",
+ "high": "1-7 per week",
+ "medium": "1-4 per month",
+ "low": "< 1 per month"
+ },
+ "lead_time": {
+ "elite": "< 1 hour",
+ "high": "1-24 hours",
+ "medium": "1-7 days",
+ "low": "> 7 days"
+ },
+ "mttr": {
+ "elite": "< 1 hour",
+ "high": "1-24 hours",
+ "medium": "1-7 days",
+ "low": "> 7 days"
+ },
+ "change_failure_rate": {
+ "elite": "< 5%",
+ "high": "5-15%",
+ "medium": "15-30%",
+ "low": "> 30%"
+ }
+ },
+ "team_summary": {
+ "total_developers": 8,
+ "active_repositories": 5,
+ "total_commits_last_week": 127,
+ "pull_requests_merged": 23,
+ "code_review_time_avg_hours": 2.5
+ }
+}
diff --git a/ui/src/mocks/etl.json b/ui/src/mocks/etl.json
new file mode 100644
index 00000000..ed9994b1
--- /dev/null
+++ b/ui/src/mocks/etl.json
@@ -0,0 +1,146 @@
+{
+ "jobs": [
+ {
+ "id": 1,
+ "name": "sync_calls_daily",
+ "description": "Sincronización diaria de llamadas",
+ "type": "import",
+ "source": "call_center_db",
+ "target": "analytics_db",
+ "status": "success",
+ "started_at": "2025-11-18T02:00:00Z",
+ "completed_at": "2025-11-18T02:15:30Z",
+ "duration_seconds": 930,
+ "records_processed": 1250,
+ "records_success": 1248,
+ "records_failed": 2,
+ "last_run": "2025-11-18T02:00:00Z"
+ },
+ {
+ "id": 2,
+ "name": "sync_users_hourly",
+ "description": "Sincronización horaria de usuarios",
+ "type": "import",
+ "source": "user_management_db",
+ "target": "analytics_db",
+ "status": "success",
+ "started_at": "2025-11-18T11:00:00Z",
+ "completed_at": "2025-11-18T11:02:15Z",
+ "duration_seconds": 135,
+ "records_processed": 45,
+ "records_success": 45,
+ "records_failed": 0,
+ "last_run": "2025-11-18T11:00:00Z"
+ },
+ {
+ "id": 3,
+ "name": "export_reports_weekly",
+ "description": "Exportación semanal de reportes",
+ "type": "export",
+ "source": "analytics_db",
+ "target": "data_warehouse",
+ "status": "running",
+ "started_at": "2025-11-18T12:00:00Z",
+ "completed_at": null,
+ "duration_seconds": null,
+ "records_processed": 520,
+ "records_success": 520,
+ "records_failed": 0,
+ "last_run": "2025-11-11T12:00:00Z"
+ },
+ {
+ "id": 4,
+ "name": "sync_policies_daily",
+ "description": "Sincronización diaria de políticas",
+ "type": "import",
+ "source": "compliance_db",
+ "target": "analytics_db",
+ "status": "failed",
+ "started_at": "2025-11-18T03:00:00Z",
+ "completed_at": "2025-11-18T03:05:45Z",
+ "duration_seconds": 345,
+ "records_processed": 150,
+ "records_success": 100,
+ "records_failed": 50,
+ "last_run": "2025-11-18T03:00:00Z"
+ },
+ {
+ "id": 5,
+ "name": "cleanup_old_logs",
+ "description": "Limpieza de logs antiguos",
+ "type": "maintenance",
+ "source": "all_dbs",
+ "target": "archive",
+ "status": "success",
+ "started_at": "2025-11-18T01:00:00Z",
+ "completed_at": "2025-11-18T01:30:00Z",
+ "duration_seconds": 1800,
+ "records_processed": 50000,
+ "records_success": 50000,
+ "records_failed": 0,
+ "last_run": "2025-11-18T01:00:00Z"
+ }
+ ],
+ "errors": [
+ {
+ "id": 1,
+ "job_id": 4,
+ "job_name": "sync_policies_daily",
+ "error_type": "connection_error",
+ "severity": "high",
+ "message": "Connection timeout to compliance_db",
+ "details": "Database server at compliance_db:5432 did not respond within 30 seconds",
+ "occurred_at": "2025-11-18T03:02:30Z",
+ "retry_count": 3,
+ "resolved": false
+ },
+ {
+ "id": 2,
+ "job_id": 4,
+ "job_name": "sync_policies_daily",
+ "error_type": "data_validation_error",
+ "severity": "medium",
+ "message": "Invalid policy format",
+ "details": "50 records failed schema validation: missing required field 'vigencia_inicio'",
+ "occurred_at": "2025-11-18T03:04:15Z",
+ "retry_count": 0,
+ "resolved": false
+ },
+ {
+ "id": 3,
+ "job_id": 1,
+ "job_name": "sync_calls_daily",
+ "error_type": "duplicate_key_error",
+ "severity": "low",
+ "message": "Duplicate record detected",
+ "details": "2 records with duplicate call_id were skipped",
+ "occurred_at": "2025-11-18T02:12:00Z",
+ "retry_count": 0,
+ "resolved": true
+ },
+ {
+ "id": 4,
+ "job_id": 2,
+ "job_name": "sync_users_hourly",
+ "error_type": "permission_error",
+ "severity": "critical",
+ "message": "Access denied to source database",
+ "details": "Credentials for user 'etl_service' have expired",
+ "occurred_at": "2025-11-17T22:00:00Z",
+ "retry_count": 5,
+ "resolved": true
+ }
+ ],
+ "stats": {
+ "total_jobs": 5,
+ "jobs_running": 1,
+ "jobs_success": 3,
+ "jobs_failed": 1,
+ "total_errors": 4,
+ "errors_unresolved": 2,
+ "last_update": "2025-11-18T12:30:00Z"
+ },
+ "job_types": ["import", "export", "transform", "maintenance"],
+ "job_statuses": ["pending", "running", "success", "failed", "cancelled"],
+ "error_severities": ["low", "medium", "high", "critical"]
+}
diff --git a/ui/src/mocks/excepciones.json b/ui/src/mocks/excepciones.json
new file mode 100644
index 00000000..c8509ab1
--- /dev/null
+++ b/ui/src/mocks/excepciones.json
@@ -0,0 +1,93 @@
+{
+ "excepciones": [
+ {
+ "id": 1,
+ "codigo": "EXC-2025-001",
+ "titulo": "Excepción de Descuento Especial",
+ "descripcion": "Cliente VIP solicita descuento adicional fuera de política",
+ "tipo": "descuento",
+ "monto": 5000.00,
+ "moneda": "MXN",
+ "cliente_id": "CLI-12345",
+ "cliente_nombre": "Juan Pérez Empresarial S.A.",
+ "solicitante": "maria.garcia",
+ "fecha_solicitud": "2025-11-15T10:00:00Z",
+ "aprobador": "supervisor",
+ "fecha_aprobacion": "2025-11-15T14:30:00Z",
+ "estado": "aprobada",
+ "justificacion": "Cliente con historial de pagos excelente y alto volumen de operaciones",
+ "politica_relacionada": "POL-005",
+ "vigencia_inicio": "2025-11-15",
+ "vigencia_fin": "2025-12-31"
+ },
+ {
+ "id": 2,
+ "codigo": "EXC-2025-002",
+ "titulo": "Excepción de Plazo de Pago",
+ "descripcion": "Extensión de plazo de pago por situación extraordinaria",
+ "tipo": "plazo",
+ "monto": 0.00,
+ "moneda": "MXN",
+ "cliente_id": "CLI-67890",
+ "cliente_nombre": "María López",
+ "solicitante": "carlos.rodriguez",
+ "fecha_solicitud": "2025-11-16T09:00:00Z",
+ "aprobador": null,
+ "fecha_aprobacion": null,
+ "estado": "pendiente",
+ "justificacion": "Cliente afectado por situación de emergencia familiar",
+ "politica_relacionada": "POL-006",
+ "vigencia_inicio": null,
+ "vigencia_fin": null
+ },
+ {
+ "id": 3,
+ "codigo": "EXC-2025-003",
+ "titulo": "Excepción de Requisitos",
+ "descripcion": "Omisión de documentación por caso especial",
+ "tipo": "requisitos",
+ "monto": 0.00,
+ "moneda": "MXN",
+ "cliente_id": "CLI-11111",
+ "cliente_nombre": "Empresa Tech Solutions",
+ "solicitante": "ana.martinez",
+ "fecha_solicitud": "2025-11-10T11:00:00Z",
+ "aprobador": "director",
+ "fecha_aprobacion": "2025-11-12T16:00:00Z",
+ "estado": "rechazada",
+ "justificacion": "Documentación incompleta y sin justificación válida",
+ "politica_relacionada": "POL-007",
+ "vigencia_inicio": null,
+ "vigencia_fin": null
+ },
+ {
+ "id": 4,
+ "codigo": "EXC-2025-004",
+ "titulo": "Excepción de Horario",
+ "descripcion": "Atención fuera de horario estándar",
+ "tipo": "horario",
+ "monto": 0.00,
+ "moneda": "MXN",
+ "cliente_id": "CLI-22222",
+ "cliente_nombre": "Global Services Inc",
+ "solicitante": "juan.perez",
+ "fecha_solicitud": "2025-11-17T15:00:00Z",
+ "aprobador": "supervisor",
+ "fecha_aprobacion": "2025-11-17T16:00:00Z",
+ "estado": "aprobada",
+ "justificacion": "Cliente internacional con diferencia horaria significativa",
+ "politica_relacionada": "POL-008",
+ "vigencia_inicio": "2025-11-18",
+ "vigencia_fin": "2026-11-18"
+ }
+ ],
+ "estados": ["pendiente", "aprobada", "rechazada", "vencida"],
+ "tipos": ["descuento", "plazo", "requisitos", "horario", "limite_credito", "otro"],
+ "estadisticas": {
+ "total_excepciones": 4,
+ "aprobadas": 2,
+ "rechazadas": 1,
+ "pendientes": 1,
+ "monto_total_aprobado": 5000.00
+ }
+}
diff --git a/ui/src/mocks/health.json b/ui/src/mocks/health.json
new file mode 100644
index 00000000..b14210df
--- /dev/null
+++ b/ui/src/mocks/health.json
@@ -0,0 +1,8 @@
+{
+ "status": "degraded",
+ "checkedAt": "2025-11-14T00:00:00Z",
+ "services": [
+ { "name": "callcentersite", "status": "down" }
+ ],
+ "details": "Backend no disponible, mostrando mocks locales"
+}
diff --git a/ui/src/mocks/metadata.js b/ui/src/mocks/metadata.js
index a6a4d360..b4aad159 100644
--- a/ui/src/mocks/metadata.js
+++ b/ui/src/mocks/metadata.js
@@ -17,4 +17,76 @@ export const MOCK_METADATA = {
lastUpdated: '2025-11-09',
description: 'Escenario de llamadas y catalogos para pruebas funcionales',
},
+ health: {
+ id: 'health',
+ source: 'manual: estrategia_integracion_backend',
+ lastUpdated: '2025-11-14',
+ description: 'Health check simulado para habilitar degradacion controlada en UI',
+ },
+ users: {
+ id: 'users',
+ source: 'manual: gestion_usuarios',
+ lastUpdated: '2025-11-18',
+ description: 'Usuarios y grupos del sistema para administracion de accesos',
+ },
+ dashboard: {
+ id: 'dashboard',
+ source: 'manual: visualizacion_metricas',
+ lastUpdated: '2025-11-18',
+ description: 'Dashboard con widgets y metricas operativas del call center',
+ },
+ configuracion: {
+ id: 'configuracion',
+ source: 'manual: parametros_sistema',
+ lastUpdated: '2025-11-18',
+ description: 'Parametros de configuracion del sistema legacy',
+ },
+ configuration: {
+ id: 'configuration',
+ source: 'manual: settings_sistema',
+ lastUpdated: '2025-11-18',
+ description: 'Settings de configuracion del sistema moderno',
+ },
+ presupuestos: {
+ id: 'presupuestos',
+ source: 'manual: gestion_financiera',
+ lastUpdated: '2025-11-18',
+ description: 'Presupuestos y control de gastos del call center',
+ },
+ politicas: {
+ id: 'politicas',
+ source: 'manual: cumplimiento_normativo',
+ lastUpdated: '2025-11-18',
+ description: 'Politicas y procedimientos del call center',
+ },
+ excepciones: {
+ id: 'excepciones',
+ source: 'manual: gestion_excepciones',
+ lastUpdated: '2025-11-18',
+ description: 'Excepciones y casos especiales que requieren aprobacion',
+ },
+ reportes: {
+ id: 'reportes',
+ source: 'manual: analytics_reportes',
+ lastUpdated: '2025-11-18',
+ description: 'Reportes de IVR y metricas operativas del call center',
+ },
+ notifications: {
+ id: 'notifications',
+ source: 'manual: sistema_notificaciones',
+ lastUpdated: '2025-11-18',
+ description: 'Notificaciones y mensajes del sistema',
+ },
+ etl: {
+ id: 'etl',
+ source: 'manual: procesos_etl',
+ lastUpdated: '2025-11-18',
+ description: 'Jobs y errores de procesos ETL',
+ },
+ dora: {
+ id: 'dora',
+ source: 'manual: metricas_devops',
+ lastUpdated: '2025-11-18',
+ description: 'Metricas DORA para seguimiento de entrega de software',
+ },
};
diff --git a/ui/src/mocks/notifications.json b/ui/src/mocks/notifications.json
new file mode 100644
index 00000000..41f2a2d1
--- /dev/null
+++ b/ui/src/mocks/notifications.json
@@ -0,0 +1,80 @@
+{
+ "messages": [
+ {
+ "id": 1,
+ "title": "Nueva política publicada",
+ "body": "Se ha publicado la política POL-002: Política de Privacidad de Datos",
+ "type": "info",
+ "priority": "high",
+ "read": false,
+ "created_at": "2025-11-18T10:30:00Z",
+ "related_object_type": "policy",
+ "related_object_id": 2,
+ "action_url": "/politicas/2"
+ },
+ {
+ "id": 2,
+ "title": "Presupuesto aprobado",
+ "body": "El presupuesto PRES-2025-001 ha sido aprobado",
+ "type": "success",
+ "priority": "medium",
+ "read": true,
+ "created_at": "2025-11-17T14:20:00Z",
+ "related_object_type": "budget",
+ "related_object_id": 1,
+ "action_url": "/presupuestos/1"
+ },
+ {
+ "id": 3,
+ "title": "Excepción pendiente de aprobación",
+ "body": "La excepción EXC-2025-002 requiere su aprobación",
+ "type": "warning",
+ "priority": "high",
+ "read": false,
+ "created_at": "2025-11-17T09:15:00Z",
+ "related_object_type": "exception",
+ "related_object_id": 2,
+ "action_url": "/excepciones/2"
+ },
+ {
+ "id": 4,
+ "title": "Error en proceso ETL",
+ "body": "Fallo en la sincronización de datos de llamadas",
+ "type": "error",
+ "priority": "critical",
+ "read": false,
+ "created_at": "2025-11-18T02:00:00Z",
+ "related_object_type": "etl_job",
+ "related_object_id": 15,
+ "action_url": "/etl/jobs/15"
+ },
+ {
+ "id": 5,
+ "title": "Nuevo reporte disponible",
+ "body": "El reporte de navegación IVR de noviembre está listo",
+ "type": "info",
+ "priority": "low",
+ "read": true,
+ "created_at": "2025-11-18T08:05:00Z",
+ "related_object_type": "report",
+ "related_object_id": 1,
+ "action_url": "/reportes/1"
+ },
+ {
+ "id": 6,
+ "title": "Usuario suspendido",
+ "body": "El usuario ana.martinez ha sido suspendido por inactividad",
+ "type": "warning",
+ "priority": "medium",
+ "read": true,
+ "created_at": "2025-11-16T16:00:00Z",
+ "related_object_type": "user",
+ "related_object_id": 4,
+ "action_url": "/usuarios/4"
+ }
+ ],
+ "unread_count": 3,
+ "total_count": 6,
+ "types": ["info", "success", "warning", "error"],
+ "priorities": ["low", "medium", "high", "critical"]
+}
diff --git a/ui/src/mocks/permissions.json b/ui/src/mocks/permissions.json
index 634c5803..6cbab9a7 100644
--- a/ui/src/mocks/permissions.json
+++ b/ui/src/mocks/permissions.json
@@ -7,12 +7,14 @@
{
"id": 1,
"codigo": "atencion_cliente",
- "nombre_display": "Atencion al Cliente"
+ "nombre_display": "Atencion al Cliente",
+ "descripcion": "Atiende consultas y gestiona interacciones con clientes"
},
{
"id": 2,
"codigo": "visualizacion_metricas",
- "nombre_display": "Visualizacion de Metricas"
+ "nombre_display": "Visualizacion de Metricas",
+ "descripcion": "Accede a reportes y analisis de desempeno"
}
]
},
@@ -32,7 +34,6 @@
"nombre": "dashboards",
"nombre_completo": "sistema.vistas.dashboards",
"dominio": "vistas",
- "icono": "dashboard",
"orden_menu": 10
},
{
@@ -40,7 +41,6 @@
"nombre": "llamadas",
"nombre_completo": "sistema.operaciones.llamadas",
"dominio": "operaciones",
- "icono": "phone",
"orden_menu": 20
},
{
@@ -48,7 +48,6 @@
"nombre": "tickets",
"nombre_completo": "sistema.operaciones.tickets",
"dominio": "operaciones",
- "icono": "ticket",
"orden_menu": 30
},
{
@@ -56,7 +55,6 @@
"nombre": "clientes",
"nombre_completo": "sistema.operaciones.clientes",
"dominio": "operaciones",
- "icono": "people",
"orden_menu": 40
},
{
@@ -64,8 +62,13 @@
"nombre": "metricas",
"nombre_completo": "sistema.analisis.metricas",
"dominio": "analisis",
- "icono": "chart",
"orden_menu": 60
}
- ]
+ ],
+ "permisos_excepcionales": [],
+ "auditoria_ultimo_acceso": {
+ "timestamp": "2025-11-18T12:30:00Z",
+ "capacidad": "sistema.vistas.dashboards.ver",
+ "resultado": "permitido"
+ }
}
diff --git a/ui/src/mocks/politicas.json b/ui/src/mocks/politicas.json
new file mode 100644
index 00000000..7008731a
--- /dev/null
+++ b/ui/src/mocks/politicas.json
@@ -0,0 +1,74 @@
+{
+ "politicas": [
+ {
+ "id": 1,
+ "codigo": "POL-001",
+ "titulo": "Política de Atención al Cliente",
+ "descripcion": "Lineamientos para la atención de llamadas entrantes",
+ "contenido": "1. Saludar cordialmente\n2. Identificar al cliente\n3. Escuchar activamente\n4. Resolver en primera llamada\n5. Confirmar satisfacción",
+ "version": 3,
+ "estado": "publicada",
+ "categoria": "atencion_cliente",
+ "vigencia_inicio": "2025-01-01",
+ "vigencia_fin": null,
+ "creado_por": "admin",
+ "fecha_creacion": "2024-11-15T10:00:00Z",
+ "publicado_por": "admin",
+ "fecha_publicacion": "2025-01-01T00:00:00Z",
+ "aprobadores": ["supervisor", "director"]
+ },
+ {
+ "id": 2,
+ "codigo": "POL-002",
+ "titulo": "Política de Privacidad de Datos",
+ "descripcion": "Manejo y protección de información del cliente",
+ "contenido": "Todos los datos personales deben ser protegidos según la LFPDPPP...",
+ "version": 2,
+ "estado": "publicada",
+ "categoria": "seguridad",
+ "vigencia_inicio": "2025-02-01",
+ "vigencia_fin": null,
+ "creado_por": "legal",
+ "fecha_creacion": "2025-01-10T09:00:00Z",
+ "publicado_por": "director",
+ "fecha_publicacion": "2025-02-01T00:00:00Z",
+ "aprobadores": ["legal", "ti", "director"]
+ },
+ {
+ "id": 3,
+ "codigo": "POL-003",
+ "titulo": "Política de Escalamiento de Casos",
+ "descripcion": "Procedimiento para escalar casos complejos",
+ "contenido": "Niveles de escalamiento:\nNivel 1: Agente\nNivel 2: Supervisor\nNivel 3: Manager",
+ "version": 1,
+ "estado": "borrador",
+ "categoria": "operaciones",
+ "vigencia_inicio": null,
+ "vigencia_fin": null,
+ "creado_por": "supervisor",
+ "fecha_creacion": "2025-11-01T14:00:00Z",
+ "publicado_por": null,
+ "fecha_publicacion": null,
+ "aprobadores": []
+ },
+ {
+ "id": 4,
+ "codigo": "POL-001",
+ "titulo": "Política de Atención al Cliente",
+ "descripcion": "Lineamientos para la atención de llamadas entrantes",
+ "contenido": "Version anterior archivada",
+ "version": 2,
+ "estado": "archivada",
+ "categoria": "atencion_cliente",
+ "vigencia_inicio": "2024-06-01",
+ "vigencia_fin": "2024-12-31",
+ "creado_por": "admin",
+ "fecha_creacion": "2024-05-01T10:00:00Z",
+ "publicado_por": "admin",
+ "fecha_publicacion": "2024-06-01T00:00:00Z",
+ "aprobadores": ["supervisor"]
+ }
+ ],
+ "estados": ["borrador", "en_revision", "publicada", "archivada"],
+ "categorias": ["atencion_cliente", "seguridad", "operaciones", "cumplimiento", "recursos_humanos"]
+}
diff --git a/ui/src/mocks/presupuestos.json b/ui/src/mocks/presupuestos.json
new file mode 100644
index 00000000..035a7fbf
--- /dev/null
+++ b/ui/src/mocks/presupuestos.json
@@ -0,0 +1,113 @@
+{
+ "presupuestos": [
+ {
+ "id": 1,
+ "codigo": "PRES-2025-001",
+ "descripcion": "Presupuesto de Call Center Q1 2025",
+ "monto_total": 250000.00,
+ "moneda": "MXN",
+ "periodo_inicio": "2025-01-01",
+ "periodo_fin": "2025-03-31",
+ "estado": "aprobado",
+ "creado_por": "admin",
+ "fecha_creacion": "2024-12-01T10:00:00Z",
+ "aprobado_por": "director",
+ "fecha_aprobacion": "2024-12-15T14:30:00Z",
+ "lineas": [
+ {
+ "id": 1,
+ "concepto": "Nómina Agentes",
+ "monto": 150000.00,
+ "porcentaje": 60.0
+ },
+ {
+ "id": 2,
+ "concepto": "Tecnología y Software",
+ "monto": 50000.00,
+ "porcentaje": 20.0
+ },
+ {
+ "id": 3,
+ "concepto": "Infraestructura",
+ "monto": 30000.00,
+ "porcentaje": 12.0
+ },
+ {
+ "id": 4,
+ "concepto": "Capacitación",
+ "monto": 20000.00,
+ "porcentaje": 8.0
+ }
+ ]
+ },
+ {
+ "id": 2,
+ "codigo": "PRES-2025-002",
+ "descripcion": "Presupuesto de Call Center Q2 2025",
+ "monto_total": 280000.00,
+ "moneda": "MXN",
+ "periodo_inicio": "2025-04-01",
+ "periodo_fin": "2025-06-30",
+ "estado": "pendiente",
+ "creado_por": "admin",
+ "fecha_creacion": "2025-02-15T09:00:00Z",
+ "aprobado_por": null,
+ "fecha_aprobacion": null,
+ "lineas": [
+ {
+ "id": 5,
+ "concepto": "Nómina Agentes",
+ "monto": 170000.00,
+ "porcentaje": 60.7
+ },
+ {
+ "id": 6,
+ "concepto": "Tecnología y Software",
+ "monto": 60000.00,
+ "porcentaje": 21.4
+ },
+ {
+ "id": 7,
+ "concepto": "Infraestructura",
+ "monto": 30000.00,
+ "porcentaje": 10.7
+ },
+ {
+ "id": 8,
+ "concepto": "Capacitación",
+ "monto": 20000.00,
+ "porcentaje": 7.1
+ }
+ ]
+ },
+ {
+ "id": 3,
+ "codigo": "PRES-2025-003",
+ "descripcion": "Presupuesto Extraordinario - Expansión",
+ "monto_total": 100000.00,
+ "moneda": "MXN",
+ "periodo_inicio": "2025-05-01",
+ "periodo_fin": "2025-05-31",
+ "estado": "rechazado",
+ "creado_por": "supervisor",
+ "fecha_creacion": "2025-04-01T11:00:00Z",
+ "aprobado_por": "director",
+ "fecha_aprobacion": "2025-04-10T16:00:00Z",
+ "lineas": [
+ {
+ "id": 9,
+ "concepto": "Contratación Nueva Sede",
+ "monto": 100000.00,
+ "porcentaje": 100.0
+ }
+ ]
+ }
+ ],
+ "estados": ["pendiente", "aprobado", "rechazado", "en_revision"],
+ "estadisticas": {
+ "total_presupuestos": 3,
+ "monto_total_aprobado": 250000.00,
+ "monto_total_pendiente": 280000.00,
+ "monto_total_rechazado": 100000.00
+ }
+}
diff --git a/ui/src/mocks/registry.js b/ui/src/mocks/registry.js
index 15a94b98..4f36506e 100644
--- a/ui/src/mocks/registry.js
+++ b/ui/src/mocks/registry.js
@@ -1,19 +1,71 @@
import configMock from './config.json';
import permissionsMock from './permissions.json';
import callsMock from './llamadas.json';
+import healthMock from './health.json';
+import usuariosMock from './usuarios.json';
+import dashboardMock from './dashboard.json';
+import configuracionMock from './configuracion.json';
+import configurationMock from './configuration.json';
+import presupuestosMock from './presupuestos.json';
+import politicasMock from './politicas.json';
+import excepcionesMock from './excepciones.json';
+import reportesMock from './reportes.json';
+import notificationsMock from './notifications.json';
+import etlMock from './etl.json';
+import doraMock from './dora.json';
import { MOCK_METADATA } from './metadata';
-import { validateConfigMock, validatePermissionsMock, validateCallsMock } from './schemas';
+import {
+ validateConfigMock,
+ validatePermissionsMock,
+ validateCallsMock,
+ validateHealthMock,
+ validateUsersMock,
+ validateDashboardMock,
+ validateConfiguracionMock,
+ validateConfigurationMock,
+ validatePresupuestosMock,
+ validatePoliticasMock,
+ validateExcepcionesMock,
+ validateReportesMock,
+ validateNotificationsMock,
+ validateETLMock,
+ validateDORAMock,
+} from './schemas';
const DATA_BY_KEY = {
config: configMock,
permissions: permissionsMock,
calls: callsMock,
+ health: healthMock,
+ users: usuariosMock,
+ dashboard: dashboardMock,
+ configuracion: configuracionMock,
+ configuration: configurationMock,
+ presupuestos: presupuestosMock,
+ politicas: politicasMock,
+ excepciones: excepcionesMock,
+ reportes: reportesMock,
+ notifications: notificationsMock,
+ etl: etlMock,
+ dora: doraMock,
};
const VALIDATORS = {
config: validateConfigMock,
permissions: validatePermissionsMock,
calls: validateCallsMock,
+ health: validateHealthMock,
+ users: validateUsersMock,
+ dashboard: validateDashboardMock,
+ configuracion: validateConfiguracionMock,
+ configuration: validateConfigurationMock,
+ presupuestos: validatePresupuestosMock,
+ politicas: validatePoliticasMock,
+ excepciones: validateExcepcionesMock,
+ reportes: validateReportesMock,
+ notifications: validateNotificationsMock,
+ etl: validateETLMock,
+ dora: validateDORAMock,
};
export const validateMock = (key, data) => {
diff --git a/ui/src/mocks/reportes.json b/ui/src/mocks/reportes.json
new file mode 100644
index 00000000..bed4f460
--- /dev/null
+++ b/ui/src/mocks/reportes.json
@@ -0,0 +1,103 @@
+{
+ "reportes": [
+ {
+ "id": 1,
+ "tipo": "ivr_navigation",
+ "nombre": "Reporte de Navegación IVR - Noviembre 2025",
+ "descripcion": "Análisis de navegación de usuarios en el sistema IVR",
+ "fecha_generacion": "2025-11-18T08:00:00Z",
+ "generado_por": "admin",
+ "periodo_inicio": "2025-11-01T00:00:00Z",
+ "periodo_fin": "2025-11-17T23:59:59Z",
+ "formato": "json",
+ "estado": "completado",
+ "datos": {
+ "total_interacciones": 5420,
+ "opciones_mas_usadas": [
+ { "opcion": "1", "descripcion": "Consulta de Saldo", "cantidad": 2150 },
+ { "opcion": "2", "descripcion": "Transferencia", "cantidad": 1800 },
+ { "opcion": "3", "descripcion": "Hablar con Agente", "cantidad": 1470 }
+ ],
+ "tiempo_promedio_navegacion": 45.3,
+ "tasa_abandono": 8.5
+ }
+ },
+ {
+ "id": 2,
+ "tipo": "call_volume",
+ "nombre": "Reporte de Volumen de Llamadas - Semanal",
+ "descripcion": "Análisis semanal del volumen de llamadas",
+ "fecha_generacion": "2025-11-18T07:00:00Z",
+ "generado_por": "supervisor",
+ "periodo_inicio": "2025-11-11T00:00:00Z",
+ "periodo_fin": "2025-11-17T23:59:59Z",
+ "formato": "pdf",
+ "estado": "completado",
+ "datos": {
+ "total_llamadas": 1250,
+ "promedio_diario": 178.6,
+ "pico_maximo": { "fecha": "2025-11-15", "cantidad": 245 },
+ "distribucion_horaria": {
+ "08-12": 450,
+ "12-16": 520,
+ "16-20": 280
+ }
+ }
+ },
+ {
+ "id": 3,
+ "tipo": "agent_performance",
+ "nombre": "Reporte de Desempeño de Agentes - Q4 2025",
+ "descripcion": "Evaluación trimestral del desempeño de agentes",
+ "fecha_generacion": "2025-11-18T06:00:00Z",
+ "generado_por": "manager",
+ "periodo_inicio": "2025-10-01T00:00:00Z",
+ "periodo_fin": "2025-11-17T23:59:59Z",
+ "formato": "excel",
+ "estado": "en_proceso",
+ "datos": {
+ "total_agentes": 15,
+ "promedio_satisfaccion": 4.2,
+ "promedio_tiempo_llamada": 325.4,
+ "top_performers": [
+ { "agente": "maria.garcia", "score": 4.8 },
+ { "agente": "carlos.rodriguez", "score": 4.6 }
+ ]
+ }
+ },
+ {
+ "id": 4,
+ "tipo": "customer_satisfaction",
+ "nombre": "Reporte de Satisfacción del Cliente - Mensual",
+ "descripcion": "Encuestas de satisfacción post-llamada",
+ "fecha_generacion": "2025-11-01T09:00:00Z",
+ "generado_por": "quality",
+ "periodo_inicio": "2025-10-01T00:00:00Z",
+ "periodo_fin": "2025-10-31T23:59:59Z",
+ "formato": "json",
+ "estado": "completado",
+ "datos": {
+ "total_encuestas": 850,
+ "tasa_respuesta": 68.0,
+ "promedio_calificacion": 4.3,
+ "distribucion": {
+ "5_estrellas": 420,
+ "4_estrellas": 280,
+ "3_estrellas": 100,
+ "2_estrellas": 35,
+ "1_estrella": 15
+ }
+ }
+ }
+ ],
+ "tipos_disponibles": [
+ "ivr_navigation",
+ "call_volume",
+ "agent_performance",
+ "customer_satisfaction",
+ "quality_assurance",
+ "operational_metrics"
+ ],
+ "formatos_exportacion": ["json", "pdf", "excel", "csv"],
+ "estados": ["en_proceso", "completado", "fallido"]
+}
diff --git a/ui/src/mocks/schemas.js b/ui/src/mocks/schemas.js
index 59437c84..c8e8f376 100644
--- a/ui/src/mocks/schemas.js
+++ b/ui/src/mocks/schemas.js
@@ -106,8 +106,107 @@ const validateCallsMock = (data) => {
return data;
};
+const validateHealthMock = (data) => {
+ assert(isObject(data), 'health debe ser objeto');
+ assert(typeof data.status === 'string', 'health.status debe ser string');
+ assert(typeof data.checkedAt === 'string', 'health.checkedAt debe ser string');
+
+ if (data.services) {
+ assert(Array.isArray(data.services), 'health.services debe ser arreglo cuando existe');
+ data.services.forEach((svc) => {
+ assert(isObject(svc), 'health.services[x] debe ser objeto');
+ assert(typeof svc.name === 'string', 'health.services[x].name debe ser string');
+ assert(typeof svc.status === 'string', 'health.services[x].status debe ser string');
+ });
+ }
+
+ return data;
+};
+
+const validateUsersMock = (data) => {
+ assert(isObject(data), 'users debe ser objeto');
+ assert(Array.isArray(data.usuarios), 'users.usuarios debe ser arreglo');
+ assert(Array.isArray(data.grupos), 'users.grupos debe ser arreglo');
+ return data;
+};
+
+const validateDashboardMock = (data) => {
+ assert(isObject(data), 'dashboard debe ser objeto');
+ assert(isObject(data.overview), 'dashboard.overview debe ser objeto');
+ assert(Array.isArray(data.widgets), 'dashboard.widgets debe ser arreglo');
+ return data;
+};
+
+const validateConfiguracionMock = (data) => {
+ assert(isObject(data), 'configuracion debe ser objeto');
+ assert(Array.isArray(data.parametros), 'configuracion.parametros debe ser arreglo');
+ return data;
+};
+
+const validateConfigurationMock = (data) => {
+ assert(isObject(data), 'configuration debe ser objeto');
+ assert(Array.isArray(data.settings), 'configuration.settings debe ser arreglo');
+ return data;
+};
+
+const validatePresupuestosMock = (data) => {
+ assert(isObject(data), 'presupuestos debe ser objeto');
+ assert(Array.isArray(data.presupuestos), 'presupuestos.presupuestos debe ser arreglo');
+ return data;
+};
+
+const validatePoliticasMock = (data) => {
+ assert(isObject(data), 'politicas debe ser objeto');
+ assert(Array.isArray(data.politicas), 'politicas.politicas debe ser arreglo');
+ return data;
+};
+
+const validateExcepcionesMock = (data) => {
+ assert(isObject(data), 'excepciones debe ser objeto');
+ assert(Array.isArray(data.excepciones), 'excepciones.excepciones debe ser arreglo');
+ return data;
+};
+
+const validateReportesMock = (data) => {
+ assert(isObject(data), 'reportes debe ser objeto');
+ assert(Array.isArray(data.reportes), 'reportes.reportes debe ser arreglo');
+ return data;
+};
+
+const validateNotificationsMock = (data) => {
+ assert(isObject(data), 'notifications debe ser objeto');
+ assert(Array.isArray(data.messages), 'notifications.messages debe ser arreglo');
+ assert(typeof data.unread_count === 'number', 'notifications.unread_count debe ser numero');
+ return data;
+};
+
+const validateETLMock = (data) => {
+ assert(isObject(data), 'etl debe ser objeto');
+ assert(Array.isArray(data.jobs), 'etl.jobs debe ser arreglo');
+ assert(Array.isArray(data.errors), 'etl.errors debe ser arreglo');
+ return data;
+};
+
+const validateDORAMock = (data) => {
+ assert(isObject(data), 'dora debe ser objeto');
+ assert(isObject(data.metrics), 'dora.metrics debe ser objeto');
+ return data;
+};
+
module.exports = {
validateConfigMock,
validatePermissionsMock,
validateCallsMock,
+ validateHealthMock,
+ validateUsersMock,
+ validateDashboardMock,
+ validateConfiguracionMock,
+ validateConfigurationMock,
+ validatePresupuestosMock,
+ validatePoliticasMock,
+ validateExcepcionesMock,
+ validateReportesMock,
+ validateNotificationsMock,
+ validateETLMock,
+ validateDORAMock,
};
diff --git a/ui/src/mocks/usuarios.json b/ui/src/mocks/usuarios.json
new file mode 100644
index 00000000..f4c2805d
--- /dev/null
+++ b/ui/src/mocks/usuarios.json
@@ -0,0 +1,216 @@
+{
+ "usuarios": [
+ {
+ "id": 1,
+ "username": "admin",
+ "email": "admin@example.com",
+ "first_name": "Administrador",
+ "last_name": "Sistema",
+ "is_active": true,
+ "is_staff": true,
+ "is_superuser": false,
+ "date_joined": "2025-01-01T00:00:00Z",
+ "last_login": "2025-11-18T10:00:00Z",
+ "grupos": [
+ {
+ "id": 1,
+ "codigo": "gestion_sistema",
+ "nombre_display": "Gestion de Sistema",
+ "descripcion": "Configura parametros tecnicos y administra usuarios"
+ },
+ {
+ "id": 5,
+ "codigo": "supervision_operaciones",
+ "nombre_display": "Supervision de Operaciones",
+ "descripcion": "Monitorea metricas y gestiona equipos de trabajo"
+ }
+ ]
+ },
+ {
+ "id": 2,
+ "username": "maria.garcia",
+ "email": "maria.garcia@example.com",
+ "first_name": "Maria",
+ "last_name": "Garcia",
+ "is_active": true,
+ "is_staff": false,
+ "is_superuser": false,
+ "date_joined": "2025-02-15T00:00:00Z",
+ "last_login": "2025-11-18T09:30:00Z",
+ "grupos": [
+ {
+ "id": 2,
+ "codigo": "atencion_cliente",
+ "nombre_display": "Atencion al Cliente",
+ "descripcion": "Atiende consultas y gestiona interacciones con clientes"
+ }
+ ]
+ },
+ {
+ "id": 3,
+ "username": "carlos.rodriguez",
+ "email": "carlos.rodriguez@example.com",
+ "first_name": "Carlos",
+ "last_name": "Rodriguez",
+ "is_active": true,
+ "is_staff": false,
+ "is_superuser": false,
+ "date_joined": "2025-03-10T00:00:00Z",
+ "last_login": "2025-11-18T08:45:00Z",
+ "grupos": [
+ {
+ "id": 2,
+ "codigo": "atencion_cliente",
+ "nombre_display": "Atencion al Cliente",
+ "descripcion": "Atiende consultas y gestiona interacciones con clientes"
+ },
+ {
+ "id": 3,
+ "codigo": "gestion_tickets",
+ "nombre_display": "Gestion de Tickets",
+ "descripcion": "Crea y resuelve tickets de soporte"
+ }
+ ]
+ },
+ {
+ "id": 4,
+ "username": "ana.martinez",
+ "email": "ana.martinez@example.com",
+ "first_name": "Ana",
+ "last_name": "Martinez",
+ "is_active": false,
+ "is_staff": false,
+ "is_superuser": false,
+ "date_joined": "2025-04-20T00:00:00Z",
+ "last_login": "2025-10-15T14:20:00Z",
+ "grupos": []
+ }
+ ],
+ "grupos": [
+ {
+ "id": 1,
+ "codigo": "gestion_sistema",
+ "nombre_display": "Gestion de Sistema",
+ "descripcion": "Configura parametros tecnicos y administra usuarios",
+ "capacidades": ["sistema.usuarios.crear", "sistema.usuarios.editar", "sistema.usuarios.suspender", "sistema.configuracion.modificar"]
+ },
+ {
+ "id": 2,
+ "codigo": "atencion_cliente",
+ "nombre_display": "Atencion al Cliente",
+ "descripcion": "Atiende consultas y gestiona interacciones con clientes",
+ "capacidades": ["sistema.llamadas.ver", "sistema.llamadas.realizar", "sistema.clientes.ver"]
+ },
+ {
+ "id": 3,
+ "codigo": "gestion_tickets",
+ "nombre_display": "Gestion de Tickets",
+ "descripcion": "Crea y resuelve tickets de soporte",
+ "capacidades": ["sistema.tickets.ver", "sistema.tickets.crear", "sistema.tickets.editar"]
+ },
+ {
+ "id": 4,
+ "codigo": "analisis_reportes",
+ "nombre_display": "Analisis de Reportes",
+ "descripcion": "Consulta reportes y exporta datos",
+ "capacidades": ["sistema.reportes.ver", "sistema.reportes.exportar", "sistema.metricas.ver"]
+ },
+ {
+ "id": 5,
+ "codigo": "supervision_operaciones",
+ "nombre_display": "Supervision de Operaciones",
+ "descripcion": "Monitorea metricas y gestiona equipos de trabajo",
+ "capacidades": ["sistema.dashboard.ver", "sistema.metricas.ver", "sistema.equipos.gestionar", "sistema.reportes.ver"]
+ }
+ ],
+ "funciones": [
+ {
+ "id": 1,
+ "recurso": "dashboards",
+ "descripcion": "Visualizacion de paneles de control"
+ },
+ {
+ "id": 2,
+ "recurso": "usuarios",
+ "descripcion": "Administracion de usuarios del sistema"
+ },
+ {
+ "id": 3,
+ "recurso": "llamadas",
+ "descripcion": "Gestion de llamadas y operaciones"
+ },
+ {
+ "id": 4,
+ "recurso": "tickets",
+ "descripcion": "Gestion de tickets de soporte"
+ },
+ {
+ "id": 5,
+ "recurso": "reportes",
+ "descripcion": "Reportes y analisis"
+ }
+ ],
+ "capacidades": [
+ {
+ "id": 1,
+ "nombre": "sistema.usuarios.ver",
+ "accion": "ver",
+ "funcion_id": 2,
+ "descripcion": "Ver listado de usuarios"
+ },
+ {
+ "id": 2,
+ "nombre": "sistema.usuarios.crear",
+ "accion": "crear",
+ "funcion_id": 2,
+ "descripcion": "Crear nuevos usuarios"
+ },
+ {
+ "id": 3,
+ "nombre": "sistema.usuarios.editar",
+ "accion": "editar",
+ "funcion_id": 2,
+ "descripcion": "Modificar datos de usuarios"
+ },
+ {
+ "id": 4,
+ "nombre": "sistema.usuarios.suspender",
+ "accion": "suspender",
+ "funcion_id": 2,
+ "descripcion": "Suspender o reactivar usuarios"
+ }
+ ],
+ "permisos_excepcionales": [
+ {
+ "id": 1,
+ "usuario_id": 2,
+ "capacidad": "sistema.reportes.exportar",
+ "motivo": "Acceso temporal para cierre mensual",
+ "fecha_inicio": "2025-11-01T00:00:00Z",
+ "fecha_fin": "2025-11-30T23:59:59Z",
+ "es_permanente": false,
+ "otorgado_por": 1,
+ "fecha_creacion": "2025-11-01T08:00:00Z"
+ }
+ ],
+ "auditoria": [
+ {
+ "id": 1,
+ "usuario_id": 2,
+ "capacidad": "sistema.llamadas.ver",
+ "accion": "acceso",
+ "timestamp": "2025-11-18T09:30:15Z",
+ "resultado": "permitido",
+ "ip_origen": "192.168.1.100"
+ },
+ {
+ "id": 2,
+ "usuario_id": 3,
+ "capacidad": "sistema.usuarios.editar",
+ "accion": "intento_acceso",
+ "timestamp": "2025-11-18T10:15:30Z",
+ "resultado": "denegado",
+ "ip_origen": "192.168.1.101"
+ }
+ ]
+}
diff --git a/ui/src/services/health/HealthService.js b/ui/src/services/health/HealthService.js
new file mode 100644
index 00000000..e0b07abb
--- /dev/null
+++ b/ui/src/services/health/HealthService.js
@@ -0,0 +1,33 @@
+import { createResilientService } from '@services/createResilientService';
+import { loadMock } from '@mocks/registry';
+import { shouldUseMockForDomain } from '@services/flags/backendIntegrity';
+
+const HEALTH_ENDPOINT = '/health/';
+const { data: healthMock } = loadMock('health');
+
+const baseService = createResilientService({
+ id: 'health',
+ endpoint: HEALTH_ENDPOINT,
+ mockDataLoader: () => Promise.resolve(healthMock),
+ shouldUseMock: () => shouldUseMockForDomain('health'),
+ errorMessage: 'No fue posible obtener el estado del backend',
+ isPayloadValid: (payload) => Boolean(payload && typeof payload.status === 'string'),
+});
+
+export class HealthService {
+ static async getStatus(options = {}) {
+ return baseService.fetch(options);
+ }
+
+ static async fetchFromApi(options = {}) {
+ return baseService.fetchFromApi(options);
+ }
+
+ static async fetchFromMock() {
+ return baseService.fetchFromMock();
+ }
+
+ static shouldUseMock() {
+ return shouldUseMockForDomain('health');
+ }
+}
diff --git a/ui/src/services/health/HealthService.test.js b/ui/src/services/health/HealthService.test.js
new file mode 100644
index 00000000..9881c73e
--- /dev/null
+++ b/ui/src/services/health/HealthService.test.js
@@ -0,0 +1,56 @@
+import healthMock from '@mocks/health.json';
+import { HealthService } from './HealthService';
+import { getMockUsageMetrics, resetMockUsageMetrics } from '@services/utils/mockUsageTracker';
+
+describe('HealthService', () => {
+ let originalEnv;
+
+ beforeEach(() => {
+ originalEnv = { ...process.env };
+ resetMockUsageMetrics();
+ });
+
+ afterEach(() => {
+ process.env = { ...originalEnv };
+ });
+
+ it('uses mock data when backend source is forced to mock', async () => {
+ process.env.UI_BACKEND_HEALTH_SOURCE = 'mock';
+ const fetchImpl = jest.fn();
+
+ const result = await HealthService.getStatus({ fetchImpl });
+
+ expect(fetchImpl).not.toHaveBeenCalled();
+ expect(result.source).toBe('mock');
+ expect(result.data).toEqual(healthMock);
+ expect(getMockUsageMetrics()).toEqual({ health: { api: 0, mock: 1 } });
+ });
+
+ it('returns api payload when endpoint succeeds', async () => {
+ process.env.UI_BACKEND_HEALTH_SOURCE = 'api';
+ const apiPayload = { status: 'ok', checkedAt: '2025-11-14T10:00:00Z' };
+ const fetchImpl = jest.fn().mockResolvedValue({
+ ok: true,
+ json: () => Promise.resolve(apiPayload),
+ });
+
+ const result = await HealthService.getStatus({ fetchImpl });
+
+ expect(fetchImpl).toHaveBeenCalledWith('/health/', { signal: undefined });
+ expect(result.source).toBe('api');
+ expect(result.data).toEqual(apiPayload);
+ expect(getMockUsageMetrics()).toEqual({ health: { api: 1, mock: 0 } });
+ });
+
+ it('falls back to mock data when api fails', async () => {
+ process.env.UI_BACKEND_HEALTH_SOURCE = 'api';
+ const fetchImpl = jest.fn().mockResolvedValue({ ok: false, status: 503, statusText: 'Service Unavailable' });
+
+ const result = await HealthService.getStatus({ fetchImpl });
+
+ expect(result.source).toBe('mock');
+ expect(result.data).toEqual(healthMock);
+ expect(result.error).toBeInstanceOf(Error);
+ expect(getMockUsageMetrics()).toEqual({ health: { api: 0, mock: 1 } });
+ });
+});
diff --git a/ui/src/services/utils/mockUsageTracker.js b/ui/src/services/utils/mockUsageTracker.js
index 64dd4d9c..cde74fca 100644
--- a/ui/src/services/utils/mockUsageTracker.js
+++ b/ui/src/services/utils/mockUsageTracker.js
@@ -1,4 +1,5 @@
const metrics = {};
+const subscribers = new Set();
const ensureDomain = (domain) => {
if (!metrics[domain]) {
@@ -21,6 +22,8 @@ export const recordMockUsage = (domain, source) => {
const entry = ensureDomain(domain);
entry[normalizedSource] += 1;
+
+ notifySubscribers();
};
export const getMockUsageMetrics = () => {
@@ -35,4 +38,24 @@ export const resetMockUsageMetrics = () => {
Object.keys(metrics).forEach((domain) => {
delete metrics[domain];
});
+
+ notifySubscribers();
+};
+
+const notifySubscribers = () => {
+ const snapshot = getMockUsageMetrics();
+ subscribers.forEach((listener) => listener(snapshot));
+};
+
+export const subscribeMockUsage = (listener) => {
+ if (typeof listener !== 'function') {
+ return () => {};
+ }
+
+ listener(getMockUsageMetrics());
+ subscribers.add(listener);
+
+ return () => {
+ subscribers.delete(listener);
+ };
};
diff --git a/ui/src/services/utils/mockUsageTracker.test.js b/ui/src/services/utils/mockUsageTracker.test.js
index 74a9d475..f02d90a4 100644
--- a/ui/src/services/utils/mockUsageTracker.test.js
+++ b/ui/src/services/utils/mockUsageTracker.test.js
@@ -1,4 +1,31 @@
-import { recordMockUsage, getMockUsageMetrics, resetMockUsageMetrics } from './mockUsageTracker';
+import {
+ recordMockUsage,
+ getMockUsageMetrics,
+ resetMockUsageMetrics,
+ subscribeMockUsage,
+} from './mockUsageTracker';
+
+describe('mockUsageTracker subscriptions', () => {
+ beforeEach(() => {
+ resetMockUsageMetrics();
+ });
+
+ it('notifies subscribers when metrics change', () => {
+ const events = [];
+ const unsubscribe = subscribeMockUsage((data) => events.push(data));
+
+ recordMockUsage('calls', 'mock');
+ recordMockUsage('config', 'api');
+
+ expect(events).toEqual([
+ {},
+ { calls: { api: 0, mock: 1 } },
+ { calls: { api: 0, mock: 1 }, config: { api: 1, mock: 0 } },
+ ]);
+
+ unsubscribe();
+ });
+});
describe('mockUsageTracker', () => {
beforeEach(() => {
diff --git a/ui/src/state/slices/healthSlice.js b/ui/src/state/slices/healthSlice.js
new file mode 100644
index 00000000..1948899e
--- /dev/null
+++ b/ui/src/state/slices/healthSlice.js
@@ -0,0 +1,43 @@
+import { createSlice } from '@reduxjs/toolkit';
+
+const initialState = {
+ status: 'unknown',
+ isChecking: false,
+ lastChecked: null,
+ source: 'unknown',
+ error: null,
+};
+
+const healthSlice = createSlice({
+ name: 'observability',
+ initialState,
+ reducers: {
+ setChecking: (state, action) => {
+ state.isChecking = action.payload;
+ if (action.payload) {
+ state.error = null;
+ }
+ },
+ setResult: (state, action) => {
+ state.status = action.payload.status ?? 'unknown';
+ state.lastChecked = action.payload.checkedAt ?? null;
+ state.source = action.payload.source ?? 'unknown';
+ state.error = action.payload.errorMessage ?? null;
+ state.isChecking = false;
+ },
+ setError: (state, action) => {
+ state.error = action.payload;
+ state.isChecking = false;
+ },
+ },
+});
+
+export const { setChecking, setResult, setError } = healthSlice.actions;
+
+export const selectHealthStatus = (state) => state.observability.status;
+export const selectHealthSource = (state) => state.observability.source;
+export const selectLastChecked = (state) => state.observability.lastChecked;
+export const selectHealthError = (state) => state.observability.error;
+export const selectIsCheckingHealth = (state) => state.observability.isChecking;
+
+export default healthSlice.reducer;
diff --git a/ui/src/state/slices/healthSlice.test.js b/ui/src/state/slices/healthSlice.test.js
new file mode 100644
index 00000000..bed35aa3
--- /dev/null
+++ b/ui/src/state/slices/healthSlice.test.js
@@ -0,0 +1,70 @@
+import reducer, {
+ setChecking,
+ setResult,
+ setError,
+ selectHealthStatus,
+ selectHealthSource,
+ selectLastChecked,
+ selectHealthError,
+} from './healthSlice';
+
+describe('healthSlice', () => {
+ it('provides initial state', () => {
+ const state = reducer(undefined, { type: '@@INIT' });
+
+ expect(state).toEqual({
+ status: 'unknown',
+ isChecking: false,
+ lastChecked: null,
+ source: 'unknown',
+ error: null,
+ });
+ });
+
+ it('sets loading state', () => {
+ const state = reducer(undefined, setChecking(true));
+
+ expect(state.isChecking).toBe(true);
+ expect(state.error).toBeNull();
+ });
+
+ it('stores health result and metadata', () => {
+ const result = {
+ status: 'ok',
+ checkedAt: '2025-11-14T12:00:00Z',
+ source: 'api',
+ errorMessage: null,
+ };
+
+ const state = reducer(undefined, setResult(result));
+
+ expect(state.status).toBe('ok');
+ expect(state.lastChecked).toBe(result.checkedAt);
+ expect(state.source).toBe('api');
+ expect(state.error).toBeNull();
+ });
+
+ it('stores error state', () => {
+ const state = reducer(undefined, setError('fallo backend'));
+
+ expect(state.error).toBe('fallo backend');
+ expect(state.isChecking).toBe(false);
+ });
+
+ it('exposes selectors', () => {
+ const state = {
+ observability: {
+ status: 'degraded',
+ isChecking: false,
+ lastChecked: '2025-11-14T00:00:00Z',
+ source: 'mock',
+ error: null,
+ },
+ };
+
+ expect(selectHealthStatus(state)).toBe('degraded');
+ expect(selectHealthSource(state)).toBe('mock');
+ expect(selectLastChecked(state)).toBe('2025-11-14T00:00:00Z');
+ expect(selectHealthError(state)).toBeNull();
+ });
+});
diff --git a/ui/src/state/store.js b/ui/src/state/store.js
index 55c54620..10265359 100644
--- a/ui/src/state/store.js
+++ b/ui/src/state/store.js
@@ -1,11 +1,13 @@
import { configureStore } from '@reduxjs/toolkit';
import appConfigReducer from './slices/appConfigSlice';
import homeReducer from '@modules/home/state/homeSlice';
+import healthReducer from './slices/healthSlice';
export const store = configureStore({
reducer: {
appConfig: appConfigReducer,
home: homeReducer,
+ observability: healthReducer,
},
middleware: (getDefaultMiddleware) =>
getDefaultMiddleware({
diff --git a/ui/src/state/store.test.js b/ui/src/state/store.test.js
index a58d0d50..6382b693 100644
--- a/ui/src/state/store.test.js
+++ b/ui/src/state/store.test.js
@@ -7,6 +7,7 @@ describe('store', () => {
expect(state.appConfig).toBeDefined();
expect(state.home).toBeDefined();
+ expect(state.observability).toBeDefined();
});
it('dispatches actions without crashing', () => {