-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathMakefile
More file actions
108 lines (85 loc) · 4.44 KB
/
Makefile
File metadata and controls
108 lines (85 loc) · 4.44 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
.DEFAULT_GOAL := help
SHELL := /bin/bash
PYTHON ?= python3
VENV := .venv
PIP := $(VENV)/bin/pip
PYTEST := $(VENV)/bin/pytest
RUFF := $(VENV)/bin/ruff
AWS_REGION ?= us-east-1
CLUSTER_NAME ?= ml-platform-prod
NAMESPACE ?= inference
IMAGE_TAG ?= latest
.PHONY: help install test lint format typecheck clean
.PHONY: docker-build-inference docker-build-training
.PHONY: helm-lint helm-diff deploy-staging deploy-prod
.PHONY: tf-plan-prod tf-apply-prod tf-plan-staging tf-apply-staging
.PHONY: kubeconfig port-forward-mlflow
help: ## Show this help
@grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) \
| awk 'BEGIN {FS = ":.*?## "}; {printf " \033[36m%-28s\033[0m %s\n", $$1, $$2}'
# ── Python ────────────────────────────────────────────────────────────────────
install: ## Create venv and install all dev dependencies
$(PYTHON) -m venv $(VENV)
$(PIP) install --upgrade pip
$(PIP) install -r src/inference/requirements.txt \
-r src/training/requirements.txt \
-r src/shared/requirements.txt
$(PIP) install pytest pytest-cov ruff mypy
test: ## Run unit tests
$(PYTEST) tests/unit/ -v --tb=short
test-integration: ## Run integration tests (requires real AWS/Mongo)
$(PYTEST) tests/integration/ -v --tb=short -m integration
lint: ## Lint with ruff
$(RUFF) check src/ tests/
format: ## Auto-fix with ruff
$(RUFF) check --fix src/ tests/
$(RUFF) format src/ tests/
typecheck: ## Type-check with mypy
$(VENV)/bin/mypy src/
clean: ## Remove build artefacts and caches
rm -rf $(VENV) .pytest_cache .mypy_cache .ruff_cache \
__pycache__ src/**/__pycache__ tests/**/__pycache__ \
.coverage htmlcov/
# ── Docker ────────────────────────────────────────────────────────────────────
docker-build-inference: ## Build inference Docker image
docker build -f docker/inference/Dockerfile \
-t emox-inference:$(IMAGE_TAG) .
docker-build-training: ## Build training Docker image
docker build -f docker/training/Dockerfile \
-t emox-training:$(IMAGE_TAG) .
# ── Helm ─────────────────────────────────────────────────────────────────────
helm-lint: ## Lint all Helm charts
helm lint infra/helm/inference
helm lint infra/helm/mlflow
helm lint infra/helm/mongodb
helm-diff: ## Show diff vs deployed release (requires helm-diff plugin)
helm diff upgrade emox-inference infra/helm/inference \
--namespace $(NAMESPACE) \
--values infra/helm/inference/values.yaml
deploy-staging: ## Deploy inference to staging
helm upgrade --install emox-inference infra/helm/inference \
--namespace $(NAMESPACE) \
--set image.tag=$(IMAGE_TAG) \
--values infra/helm/inference/values.yaml \
--wait
deploy-prod: ## Deploy inference to prod (requires prod kubeconfig)
helm upgrade --install emox-inference infra/helm/inference \
--namespace $(NAMESPACE) \
--set image.tag=$(IMAGE_TAG) \
--values infra/helm/inference/values.yaml \
--values infra/helm/inference/values-prod.yaml \
--wait
# ── Terraform ─────────────────────────────────────────────────────────────────
tf-plan-prod: ## Plan prod infra
cd infra/terraform/environments/prod && terraform init && terraform plan
tf-apply-prod: ## Apply prod infra
cd infra/terraform/environments/prod && terraform init && terraform apply
tf-plan-staging: ## Plan staging infra
cd infra/terraform/environments/staging && terraform init && terraform plan
tf-apply-staging: ## Apply staging infra
cd infra/terraform/environments/staging && terraform init && terraform apply
# ── Cluster helpers ───────────────────────────────────────────────────────────
kubeconfig: ## Refresh kubeconfig
aws eks update-kubeconfig --region $(AWS_REGION) --name $(CLUSTER_NAME)
port-forward-mlflow: ## Port-forward MLflow UI to localhost:5000
kubectl port-forward svc/mlflow 5000:5000 -n mlops