Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
25 commits
Select commit Hold shift + click to select a range
6c41c89
feat(ai): update dataset & cleanliness
galagyy Oct 24, 2025
4cab101
feat(ai): clean endpoints
galagyy Oct 24, 2025
b8eb86f
feat(ai): update documentation
galagyy Oct 24, 2025
78d7bd4
fix(ai): update model version
galagyy Oct 24, 2025
934cc7a
fix(ai): remove hard coded path
galagyy Oct 24, 2025
b664bf8
fix(ai): keys & documentation
galagyy Oct 24, 2025
183a2f8
fix(ai): change endpoint from GET to POST
galagyy Oct 24, 2025
4d92b22
fix(ai): update career_routes for PEP8
galagyy Oct 24, 2025
9b7c5c7
fix(ai): update files to PEP8 standard
galagyy Oct 24, 2025
35da1a4
fix(ai): fix small convention issues
galagyy Oct 24, 2025
f6c11af
merge: add AI changes
galagyy Oct 24, 2025
4c90d33
feat(backend): add class descriptions & update DTos
galagyy Oct 24, 2025
117ab22
feat(backend): remove unused methods
galagyy Oct 24, 2025
740b0ec
feat(backend): college recommendation service
galagyy Oct 24, 2025
47c1dca
misc: add database version scripts
galagyy Oct 24, 2025
c24dcaa
merge: updated backend
galagyy Oct 24, 2025
b83c138
feat(backend): updated python endpoints
galagyy Oct 24, 2025
6dcc5ec
fix(ai): fixed request header
galagyy Oct 24, 2025
5a961c1
fix(frontend): rewire questionnaire
galagyy Oct 24, 2025
68a1be2
fix(backend): switch questionnaire flag
galagyy Oct 24, 2025
fbbcb79
fix(frontend): token verification
galagyy Oct 24, 2025
96f517f
feat(frontend): add proper documentation & styling
galagyy Oct 24, 2025
5b47a69
fix(ai): retrained random forest model
Om-Kasar Oct 24, 2025
01fad13
feat(frontend): update code to use response
galagyy Oct 24, 2025
148b7fa
chore: nitpick backend & ai errors
galagyy Oct 24, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Empty file removed ai/careers/app/__init__.py
Empty file.
239 changes: 126 additions & 113 deletions ai/careers/app/config.py

Large diffs are not rendered by default.

168 changes: 104 additions & 64 deletions ai/careers/app/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
for intelligent career path recommendations based on user profiles.

Author: Aspira AI Team
Version: 2.0.0
Version: 4.0.0
License: GNU aGPL v3.0
"""

Expand All @@ -22,6 +22,9 @@
from fastapi.exceptions import RequestValidationError
from starlette.exceptions import HTTPException as StarletteHTTPException

from app.config import settings
from app.routes.career_routes import router as career_router

# Configure logging
LOG_DIR = Path("logs")
LOG_DIR.mkdir(exist_ok=True)
Expand All @@ -43,73 +46,101 @@

@asynccontextmanager
async def lifespan(app: FastAPI):
"""
Lifespan context manager for startup and shutdown events.
"""Lifespan context manager for startup and shutdown events.

Pre-loads the ML model to avoid cold starts and improve response times.
"""
logger.info("="*70)
logger.info("=" * 70)
logger.info("ASPIRA AI CAREER RECOMMENDATION SYSTEM")
logger.info("ML-Powered Random Forest Classifier v2.0.0")
logger.info("="*70)
logger.info("=" * 70)

'''
# Create necessary directories
LOG_DIR.mkdir(exist_ok=True)
Path("app/models").mkdir(exist_ok=True)
'''

# Pre-load ML model at startup
try:
from app.routes.career_routes import get_model
logger.info("Pre-loading Random Forest model...")

start_time = time.time()
model = get_model()
load_time = time.time() - start_time

logger.info(f"✓ Model loaded successfully in {load_time:.2f}s")
logger.info(f" - Model version: {model.config.get('model_version', '2.0.0')}")
logger.info(f" - Careers available: {len(model.label_encoder.classes_)}")
logger.info(
f" - Model version: "
f"{model.config.get('model_version', '2.0.0')}"
)
logger.info(
f" - Careers available: "
f"{len(model.label_encoder.classes_)}"
)
logger.info(f" - Features: {len(model.feature_columns)}")
logger.info(f" - Trees in forest: {model.model.n_estimators}")

# Handle both calibrated and non-calibrated models
try:
# If it's a CalibratedClassifierCV, access the base estimator
# CalibratedClassifierCV uses base_estimator (older sklearn)
# or estimator_ (newer sklearn)
if hasattr(model.model, 'base_estimator'):
n_estimators = model.model.base_estimator.n_estimators
elif hasattr(model.model, 'estimator_'):
n_estimators = model.model.estimator_.n_estimators
elif hasattr(model.model, 'n_estimators'):
n_estimators = model.model.n_estimators
else:
n_estimators = "N/A"
logger.info(f" - Trees in forest: {n_estimators}")
except AttributeError:
logger.info(f" - Model type: {type(model.model).__name__}")

logger.info("✓ API ready to accept requests")

# Store model reference in app state
app.state.model_loaded = True
app.state.model_version = model.config.get('model_version', '2.0.0')
app.state.model_version = model.config.get(
'model_version', '2.0.0'
)
app.state.careers_count = len(model.label_encoder.classes_)
app.state.features_count = len(model.feature_columns)

except FileNotFoundError as e:
logger.error(f"✗ Model file not found: {str(e)}")
logger.error(" Please ensure career_recommender.pkl exists in the models directory")
logger.error(
" Please ensure career_recommender.pkl exists in the "
"models directory"
)
app.state.model_loaded = False
app.state.error_message = "Model file not found"

except Exception as e:
logger.error(f"✗ Failed to pre-load model: {str(e)}", exc_info=True)
logger.warning(" API will attempt to load model on first request")
logger.warning(
" API will attempt to load model on first request"
)
app.state.model_loaded = False
app.state.error_message = str(e)
logger.info("="*70)

logger.info("=" * 70)
logger.info("Server startup complete")
logger.info("="*70 + "\n")
logger.info("=" * 70 + "\n")

yield

# Cleanup on shutdown
logger.info("\n" + "="*70)
logger.info("\n" + "=" * 70)
logger.info("Shutting down Aspira AI Career Recommendation System")
logger.info("="*70)
logger.info("=" * 70)


# ============================================================================
# FASTAPI APPLICATION INITIALIZATION
# ============================================================================

from app.config import settings

app = FastAPI(
title=settings.API_TITLE,
description=settings.API_DESCRIPTION,
Expand Down Expand Up @@ -161,20 +192,20 @@ async def add_process_time_header(request: Request, call_next):
async def log_requests(request: Request, call_next):
"""Log all incoming requests with timing information."""
start_time = time.time()

# Log request
logger.info(f"→ {request.method} {request.url.path}")

# Process request
response = await call_next(request)

# Log response with timing
duration = time.time() - start_time
logger.info(
f"← {response.status_code} {request.url.path} "
f"({duration*1000:.2f}ms)"
)

return response


Expand All @@ -183,18 +214,22 @@ async def log_requests(request: Request, call_next):
# ============================================================================

@app.exception_handler(RequestValidationError)
async def validation_exception_handler(request: Request, exc: RequestValidationError):
async def validation_exception_handler(
request: Request, exc: RequestValidationError
):
"""Handle Pydantic validation errors with detailed messages."""
logger.warning(f"Validation error on {request.url.path}: {exc.errors()}")

logger.warning(
f"Validation error on {request.url.path}: {exc.errors()}"
)

errors = []
for error in exc.errors():
errors.append({
"field": " -> ".join(str(loc) for loc in error["loc"]),
"message": error["msg"],
"type": error["type"]
})

return JSONResponse(
status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
content={
Expand All @@ -207,10 +242,14 @@ async def validation_exception_handler(request: Request, exc: RequestValidationE


@app.exception_handler(StarletteHTTPException)
async def http_exception_handler(request: Request, exc: StarletteHTTPException):
async def http_exception_handler(
request: Request, exc: StarletteHTTPException
):
"""Handle HTTP exceptions with consistent format."""
logger.error(f"HTTP {exc.status_code} on {request.url.path}: {exc.detail}")

logger.error(
f"HTTP {exc.status_code} on {request.url.path}: {exc.detail}"
)

return JSONResponse(
status_code=exc.status_code,
content={
Expand All @@ -228,7 +267,7 @@ async def general_exception_handler(request: Request, exc: Exception):
f"Unexpected error on {request.url.path}: {str(exc)}",
exc_info=True
)

return JSONResponse(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
content={
Expand All @@ -243,8 +282,6 @@ async def general_exception_handler(request: Request, exc: Exception):
# ROUTE REGISTRATION
# ============================================================================

from app.routes.career_routes import router as career_router

app.include_router(career_router)


Expand All @@ -254,9 +291,8 @@ async def general_exception_handler(request: Request, exc: Exception):

@app.get("/", tags=["Root"])
async def root() -> Dict[str, Any]:
"""
Root endpoint providing service information and status.

"""Root endpoint providing service information and status.

Returns basic information about the API service, version,
and available endpoints.
"""
Expand Down Expand Up @@ -285,15 +321,19 @@ async def root() -> Dict[str, Any]:

@app.get("/health", tags=["Health"])
async def health_check() -> Dict[str, Any]:
"""
Health check endpoint for monitoring and load balancers.

"""Health check endpoint for monitoring and load balancers.

Returns the current health status of the service and model.
Used by Docker healthcheck and orchestration systems.
"""
model_status = "healthy" if getattr(app.state, 'model_loaded', False) else "unhealthy"
overall_status = "healthy" if model_status == "healthy" else "degraded"

model_status = (
"healthy" if getattr(app.state, 'model_loaded', False)
else "unhealthy"
)
overall_status = (
"healthy" if model_status == "healthy" else "degraded"
)

health_info = {
"status": overall_status,
"service": "aspira-career-recommendation",
Expand All @@ -305,15 +345,17 @@ async def health_check() -> Dict[str, Any]:
},
"timestamp": time.time()
}

if not getattr(app.state, 'model_loaded', False):
health_info["error"] = getattr(app.state, 'error_message', "Model not loaded")

health_info["error"] = getattr(
app.state, 'error_message', "Model not loaded"
)

status_code = (
status.HTTP_200_OK if overall_status == "healthy"
status.HTTP_200_OK if overall_status == "healthy"
else status.HTTP_503_SERVICE_UNAVAILABLE
)

return JSONResponse(
status_code=status_code,
content=health_info
Expand All @@ -322,9 +364,8 @@ async def health_check() -> Dict[str, Any]:

@app.get("/version", tags=["Root"])
async def version_info() -> Dict[str, Any]:
"""
Detailed version information endpoint.

"""Detailed version information endpoint.

Returns comprehensive version and configuration details
about the service and ML model.
"""
Expand Down Expand Up @@ -353,9 +394,8 @@ async def version_info() -> Dict[str, Any]:

@app.get("/status", tags=["Root"])
async def detailed_status() -> Dict[str, Any]:
"""
Detailed service status endpoint.

"""Detailed service status endpoint.

Provides comprehensive status information including
uptime, configuration, and operational metrics.
"""
Expand Down Expand Up @@ -395,17 +435,17 @@ async def detailed_status() -> Dict[str, Any]:

if __name__ == "__main__":
import uvicorn

logger.info("Starting Aspira AI Career Recommendation System")
logger.info(f"Environment: {settings.ENV}")
logger.info(f"Debug mode: {settings.DEBUG}")
logger.info(f"Log level: {settings.LOG_LEVEL}")

uvicorn.run(
"app.main:app",
host="0.0.0.0",
port=8000,
reload=settings.DEBUG,
log_level=settings.LOG_LEVEL.lower(),
access_log=True
)
)
Loading