-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathMakefile
More file actions
1023 lines (877 loc) · 43 KB
/
Makefile
File metadata and controls
1023 lines (877 loc) · 43 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# ==================================================================================
# Makefile for OpenProcessor - Visual AI Processing Engine
# ==================================================================================
# This Makefile provides convenient shortcuts for common development tasks
# with unified service management.
# ==================================================================================
# Default shell
SHELL := /bin/bash
# Variables
COMPOSE := docker compose
API_SERVICE := yolo-api
TRITON_SERVICE := triton-server
OPENSEARCH_SERVICE := opensearch
BENCHMARK_DIR := benchmarks
SCRIPTS_DIR := scripts
# Port configurations
API_PORT := 4603
TRITON_HTTP_PORT := 4600
TRITON_GRPC_PORT := 4601
TRITON_METRICS_PORT := 4602
PROMETHEUS_PORT := 4604
GRAFANA_PORT := 4605
LOKI_PORT := 4606
OPENSEARCH_PORT := 4607
OPENSEARCH_DASH_PORT := 4608
# Default target
.DEFAULT_GOAL := help
# ==================================================================================
# Help
# ==================================================================================
.PHONY: help
help: ## Show this help message
@echo "==================================================================================="
@echo "OpenProcessor - Visual AI Processing Engine"
@echo "==================================================================================="
@echo ""
@echo "Available targets:"
@echo ""
@grep -E '^[a-zA-Z0-9_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf " \033[36m%-25s\033[0m %s\n", $$1, $$2}'
@echo ""
@echo "Quick Start:"
@echo " make up # Start all services"
@echo " make status # Check service status"
@echo " make bench-quick # Run quick benchmark"
@echo " make logs # View all logs"
@echo ""
# ==================================================================================
# Service Management
# ==================================================================================
.PHONY: up
up: ## Start all services (Triton + API + Monitoring + OpenSearch)
@echo "Starting all services..."
$(COMPOSE) up -d
@echo ""
@echo "Services starting. Check status with: make status"
@echo "API available at: http://localhost:$(API_PORT)"
@echo "Grafana dashboard: http://localhost:$(GRAFANA_PORT) (admin/admin)"
.PHONY: down
down: ## Stop all services
@echo "Stopping all services..."
$(COMPOSE) down
.PHONY: restart
restart: ## Restart all services
@echo "Restarting all services..."
$(COMPOSE) restart
@echo "Services restarted. Check status with: make status"
.PHONY: restart-triton
restart-triton: ## Restart only Triton server (after model changes)
@echo "Restarting Triton server..."
$(COMPOSE) restart $(TRITON_SERVICE)
@sleep 5
@echo "Triton restarted. Checking model status..."
@$(MAKE) status
.PHONY: restart-api
restart-api: ## Restart only API service
@echo "Restarting API service..."
$(COMPOSE) restart $(API_SERVICE)
.PHONY: build
build: ## Build all containers
@echo "Building containers..."
$(COMPOSE) build
.PHONY: rebuild
rebuild: ## Rebuild containers without cache
@echo "Rebuilding containers (no cache)..."
$(COMPOSE) build --no-cache
# ==================================================================================
# Logs and Monitoring
# ==================================================================================
.PHONY: logs
logs: ## Follow logs from all services
$(COMPOSE) logs -f
.PHONY: logs-triton
logs-triton: ## Follow Triton server logs
$(COMPOSE) logs -f $(TRITON_SERVICE)
.PHONY: logs-api
logs-api: ## Follow API service logs
$(COMPOSE) logs -f $(API_SERVICE)
.PHONY: logs-opensearch
logs-opensearch: ## Follow OpenSearch logs
$(COMPOSE) logs -f $(OPENSEARCH_SERVICE)
.PHONY: status
status: ## Check health of all services
@bash $(SCRIPTS_DIR)/openprocessor.sh status
.PHONY: health
health: status ## Alias for status
.PHONY: ps
ps: ## Show running containers
$(COMPOSE) ps
# ==================================================================================
# Testing
# ==================================================================================
.PHONY: download-test-images
download-test-images: ## Download standard test images (bus.jpg, zidane.jpg from Ultralytics)
@mkdir -p test_images
@if [ ! -f test_images/bus.jpg ]; then \
echo "Downloading bus.jpg..."; \
curl -sL https://ultralytics.com/images/bus.jpg -o test_images/bus.jpg; \
else \
echo "bus.jpg already exists"; \
fi
@if [ ! -f test_images/zidane.jpg ]; then \
echo "Downloading zidane.jpg..."; \
curl -sL https://ultralytics.com/images/zidane.jpg -o test_images/zidane.jpg; \
else \
echo "zidane.jpg already exists"; \
fi
@echo "Test images ready in test_images/"
.PHONY: test-detect
test-detect: ## Test object detection endpoint
curl -X POST http://localhost:$(API_PORT)/detect -F "image=@test_images/bus.jpg" | jq
.PHONY: test-faces-quick
test-faces-quick: ## Quick face recognition test (single image)
curl -X POST http://localhost:$(API_PORT)/v1/faces/recognize -F "image=@test_images/zidane.jpg" | jq
.PHONY: test-embed
test-embed: ## Test embedding endpoints
curl -X POST http://localhost:$(API_PORT)/embed/image -F "image=@test_images/bus.jpg" | jq
.PHONY: test-ocr
test-ocr: ## Test OCR endpoint
curl -X POST http://localhost:$(API_PORT)/ocr/predict -F "image=@test_images/ocr-synthetic/caution_sign.jpg" | jq
.PHONY: test-analyze
test-analyze: ## Test combined analysis
curl -X POST http://localhost:$(API_PORT)/analyze -F "image=@test_images/zidane.jpg" | jq
.PHONY: test-search
test-search: ## Test image search
curl -X POST http://localhost:$(API_PORT)/search/image -F "image=@test_images/bus.jpg" | jq
.PHONY: test-ingest
test-ingest: ## Test image ingestion
curl -X POST http://localhost:$(API_PORT)/ingest -F "image=@test_images/bus.jpg" | jq
.PHONY: test-verify
test-verify: ## Test face verification (1:1 comparison)
curl -X POST http://localhost:$(API_PORT)/faces/verify -F "image1=@test_images/zidane.jpg" -F "image2=@test_images/zidane.jpg" | jq
.PHONY: test-embed-text
test-embed-text: ## Test text embedding
curl -X POST http://localhost:$(API_PORT)/embed/text -H "Content-Type: application/json" -d '{"text": "a photo of a bus"}' | jq
.PHONY: test-embed-boxes
test-embed-boxes: ## Test bounding box crop embeddings
curl -X POST http://localhost:$(API_PORT)/embed/boxes -F "image=@test_images/zidane.jpg" -F 'boxes=[[0.09,0.27,0.87,0.98],[0.57,0.05,0.89,0.98]]' | jq '{num_boxes: .num_boxes, status: .status, embedding_dims: (.boxes[0].embedding | length), total_time_ms: .total_time_ms}'
.PHONY: test-analyze-full
test-analyze-full: ## Test combined analysis with all embeddings
curl -X POST "http://localhost:$(API_PORT)/analyze?include_embedding=true&include_face_embeddings=true" -F "image=@test_images/zidane.jpg" | jq '{status, image, num_detections, num_faces, global_embedding_dims: (.global_embedding | length), embedding_norm, detection_embeddings: [.detections[] | {class_name, confidence, embedding_dims: (.embedding | length)}], face_embeddings: [.faces[] | {score, embedding_dims: (.embedding | length)}], ocr_texts: .ocr.num_texts, total_time_ms}'
.PHONY: test-all
test-all: ## Run all endpoint tests
@echo "Testing all endpoints..."
$(MAKE) test-detect
$(MAKE) test-faces
$(MAKE) test-verify
$(MAKE) test-embed
$(MAKE) test-embed-text
$(MAKE) test-ocr
$(MAKE) test-analyze
$(MAKE) test-ingest
$(MAKE) test-search
.PHONY: test-api-health
test-api-health: ## Test API health
@echo "Testing API health (port $(API_PORT))..."
@curl -sf http://localhost:$(API_PORT)/health && echo " OK" || echo " FAILED"
.PHONY: test-inference
test-inference: ## Test inference on all tracks (shell script)
@echo "Testing inference on all tracks..."
@bash tests/test_inference.sh
.PHONY: test-integration
test-integration: ## Run integration tests
@echo "Running integration tests..."
$(COMPOSE) exec $(API_SERVICE) python /app/scripts/test_integration.py
.PHONY: test-patch
test-patch: ## Verify End2End TRT NMS patch is applied
@echo "Verifying End2End TensorRT NMS patch..."
$(COMPOSE) exec $(API_SERVICE) python /app/tests/test_end2end_patch.py
.PHONY: test-onnx
test-onnx: ## Test ONNX End2End model locally (bypasses Triton)
@echo "Testing ONNX End2End model locally..."
$(COMPOSE) exec $(API_SERVICE) python /app/tests/test_onnx_end2end.py
.PHONY: test-shared-client
test-shared-client: ## Test shared vs per-request client performance
@echo "Testing shared vs per-request client..."
@bash tests/test_shared_vs_per_request.sh
# ==================================================================================
# Benchmarking
# ==================================================================================
# Benchmark configuration
BENCH_DURATION := 30s
BENCH_CLIENTS := 32
BENCH_REQUESTS := 1000
TEST_IMAGE := test_images/bus.jpg
.PHONY: bench-detect
bench-detect: ## Benchmark detection endpoint with wrk
@echo "Benchmarking /detect ($(BENCH_DURATION), $(BENCH_CLIENTS) clients)..."
@which wrk > /dev/null 2>&1 || (echo "Install wrk: apt install wrk"; exit 1)
@echo "POST http://localhost:$(API_PORT)/detect" > /tmp/bench_detect.lua
@echo 'wrk.method = "POST"' >> /tmp/bench_detect.lua
@echo 'wrk.body = ""' >> /tmp/bench_detect.lua
@echo 'wrk.headers["Content-Type"] = "multipart/form-data"' >> /tmp/bench_detect.lua
wrk -t4 -c$(BENCH_CLIENTS) -d$(BENCH_DURATION) -s $(BENCHMARK_DIR)/scripts/detect.lua http://localhost:$(API_PORT)/detect
.PHONY: bench-faces
bench-faces: ## Benchmark face recognition with ab
@echo "Benchmarking /faces/recognize ($(BENCH_REQUESTS) requests, $(BENCH_CLIENTS) concurrent)..."
@which ab > /dev/null 2>&1 || (echo "Install ab: apt install apache2-utils"; exit 1)
ab -n $(BENCH_REQUESTS) -c $(BENCH_CLIENTS) -p $(TEST_IMAGE) -T "image/jpeg" \
http://localhost:$(API_PORT)/faces/recognize
.PHONY: bench-embed
bench-embed: ## Benchmark image embedding with ab
@echo "Benchmarking /embed/image ($(BENCH_REQUESTS) requests, $(BENCH_CLIENTS) concurrent)..."
ab -n $(BENCH_REQUESTS) -c $(BENCH_CLIENTS) -p $(TEST_IMAGE) -T "image/jpeg" \
http://localhost:$(API_PORT)/embed/image
.PHONY: bench-ingest
bench-ingest: ## Benchmark image ingestion with ab
@echo "Benchmarking /ingest ($(BENCH_REQUESTS) requests, $(BENCH_CLIENTS) concurrent)..."
ab -n $(BENCH_REQUESTS) -c $(BENCH_CLIENTS) -p $(TEST_IMAGE) -T "image/jpeg" \
http://localhost:$(API_PORT)/ingest
.PHONY: bench-search
bench-search: ## Benchmark image search with ab
@echo "Benchmarking /search/image ($(BENCH_REQUESTS) requests, $(BENCH_CLIENTS) concurrent)..."
ab -n $(BENCH_REQUESTS) -c $(BENCH_CLIENTS) -p $(TEST_IMAGE) -T "image/jpeg" \
http://localhost:$(API_PORT)/search/image
.PHONY: bench-quick
bench-quick: ## Quick benchmark of all endpoints (1000 requests each)
@echo "==================================================================================="
@echo "Quick Benchmark - All Endpoints"
@echo "==================================================================================="
@echo ""
@echo "--- /detect ---"
@curl -s -w "Time: %{time_total}s\n" -o /dev/null -X POST http://localhost:$(API_PORT)/detect -F "image=@$(TEST_IMAGE)"
@echo ""
@echo "--- /faces/recognize ---"
@curl -s -w "Time: %{time_total}s\n" -o /dev/null -X POST http://localhost:$(API_PORT)/faces/recognize -F "image=@$(TEST_IMAGE)"
@echo ""
@echo "--- /embed/image ---"
@curl -s -w "Time: %{time_total}s\n" -o /dev/null -X POST http://localhost:$(API_PORT)/embed/image -F "image=@$(TEST_IMAGE)"
@echo ""
@echo "--- /ocr/predict ---"
@curl -s -w "Time: %{time_total}s\n" -o /dev/null -X POST http://localhost:$(API_PORT)/ocr/predict -F "image=@$(TEST_IMAGE)"
@echo ""
@echo "Quick benchmark complete."
.PHONY: bench-results
bench-results: ## Show recent benchmark results
@echo "Recent benchmark results:"
@ls -lt $(BENCHMARK_DIR)/results/ 2>/dev/null | head -n 10 || echo "No results yet"
.PHONY: bench-python
bench-python: ## Run Python benchmark script with httpx
@echo "Running Python benchmark..."
$(COMPOSE) exec $(API_SERVICE) python /app/benchmarks/scripts/benchmark.py
# ==================================================================================
# Model Management
# ==================================================================================
.PHONY: models-list
models-list: ## List loaded Triton models
curl -s http://localhost:$(API_PORT)/models | jq
.PHONY: models-status
models-status: ## Check model health
curl -s http://localhost:$(API_PORT)/health | jq
.PHONY: models-reload
models-reload: ## Reload all models
docker compose exec triton-server tritonserver --model-control-mode=explicit --load-model=*
# ==================================================================================
# Development and Testing
# ==================================================================================
.PHONY: shell-api
shell-api: ## Open shell in API container
$(COMPOSE) exec $(API_SERVICE) /bin/bash
.PHONY: shell-triton
shell-triton: ## Open shell in Triton container
$(COMPOSE) exec $(TRITON_SERVICE) /bin/bash
.PHONY: shell-opensearch
shell-opensearch: ## Open shell in OpenSearch container
$(COMPOSE) exec $(OPENSEARCH_SERVICE) /bin/bash
.PHONY: profile-api
profile-api: ## Profile API with py-spy (DURATION=30, OUTPUT=profile.svg)
@echo "======================================"
@echo "FastAPI Performance Profiling"
@echo "======================================"
@DURATION=$${DURATION:-30}; OUTPUT=$${OUTPUT:-profile.svg}; \
echo "Duration: $${DURATION} seconds"; \
echo "Output: $${OUTPUT}"; \
CONTAINER_PID=$$($(COMPOSE) exec $(API_SERVICE) pgrep -f "uvicorn src.main:app" | head -1 | tr -d '[:space:]'); \
if [ -z "$$CONTAINER_PID" ]; then \
echo "ERROR: Could not find uvicorn process"; \
exit 1; \
fi; \
echo "Found process: PID $$CONTAINER_PID"; \
FORMAT="flamegraph"; \
case "$$OUTPUT" in *.speedscope) FORMAT="speedscope";; esac; \
echo "Generating $$FORMAT visualization..."; \
$(COMPOSE) exec $(API_SERVICE) py-spy record --pid $$CONTAINER_PID --duration $$DURATION --rate 100 --format $$FORMAT --output /tmp/$$OUTPUT --subprocesses; \
docker cp $$(docker compose ps -q $(API_SERVICE)):/tmp/$$OUTPUT ./$$OUTPUT; \
echo "Profile saved to: $$OUTPUT"
.PHONY: resize-images
resize-images: ## Resize images for testing (SOURCE_DIR, OUTPUT_DIR, SIZE)
@echo "Resizing images..."
@. .venv/bin/activate && python scripts/resize_images.py \
--source $${SOURCE_DIR:-test_images} \
--output $${OUTPUT_DIR:-test_images_resized} \
--size $${SIZE:-640}
.PHONY: test-create-images
test-create-images: ## Generate test images in various sizes (SOURCE required)
@echo "Creating test images..."
@if [ -z "$(SOURCE)" ]; then \
echo "Error: SOURCE parameter required"; \
echo "Example: make test-create-images SOURCE=/path/to/image.jpg"; \
exit 1; \
fi
python tests/create_test_images.py --source "$(SOURCE)"
# ==================================================================================
# Model Management API (Dynamic Upload & Export)
# ==================================================================================
.PHONY: api-upload-model
api-upload-model: ## Upload a model via API (usage: make api-upload-model MODEL=/path/to/model.pt [NAME=custom_name])
@if [ -z "$(MODEL)" ]; then \
echo "Error: MODEL parameter required"; \
echo "Usage: make api-upload-model MODEL=/path/to/model.pt [NAME=custom_name]"; \
echo ""; \
echo "Examples:"; \
echo " make api-upload-model MODEL=./my_model.pt"; \
echo " make api-upload-model MODEL=./my_model.pt NAME=vehicle_detector"; \
exit 1; \
fi
@NAME_ARG=""; \
if [ -n "$(NAME)" ]; then NAME_ARG="-F triton_name=$(NAME)"; fi; \
echo "Uploading model $(MODEL) via API..."; \
curl -s -X POST http://localhost:$(API_PORT)/models/upload \
-F "file=@$(MODEL)" \
$$NAME_ARG | jq '.'
.PHONY: api-export-status
api-export-status: ## Check export task status (usage: make api-export-status ID=task_id)
@if [ -z "$(ID)" ]; then \
echo "Error: ID parameter required"; \
echo "Usage: make api-export-status ID=task_id"; \
exit 1; \
fi
@curl -s http://localhost:$(API_PORT)/models/export/$(ID) | jq '.'
.PHONY: api-exports
api-exports: ## List all export tasks
@echo "Export tasks:"
@curl -s http://localhost:$(API_PORT)/models/exports | jq '.'
.PHONY: api-models
api-models: ## List all models in Triton repository
@echo "Models in Triton repository:"
@curl -s http://localhost:$(API_PORT)/models/ | jq '.'
.PHONY: api-load-model
api-load-model: ## Load a model into Triton (usage: make api-load-model NAME=model_name)
@if [ -z "$(NAME)" ]; then \
echo "Error: NAME parameter required"; \
echo "Usage: make api-load-model NAME=model_name"; \
exit 1; \
fi
@echo "Loading model $(NAME) into Triton..."
@curl -s -X POST http://localhost:$(API_PORT)/models/$(NAME)/load | jq '.'
.PHONY: api-unload-model
api-unload-model: ## Unload a model from Triton (usage: make api-unload-model NAME=model_name)
@if [ -z "$(NAME)" ]; then \
echo "Error: NAME parameter required"; \
echo "Usage: make api-unload-model NAME=model_name"; \
exit 1; \
fi
@echo "Unloading model $(NAME) from Triton..."
@curl -s -X POST http://localhost:$(API_PORT)/models/$(NAME)/unload | jq '.'
.PHONY: api-delete-model
api-delete-model: ## Delete a model from repository (usage: make api-delete-model NAME=model_name)
@if [ -z "$(NAME)" ]; then \
echo "Error: NAME parameter required"; \
echo "Usage: make api-delete-model NAME=model_name"; \
exit 1; \
fi
@echo "Deleting model $(NAME)..."
@curl -s -X DELETE http://localhost:$(API_PORT)/models/$(NAME) | jq '.'
.PHONY: api-health
api-health: ## Check if API is healthy and ready
@echo "Checking API health..."
@curl -sf http://localhost:$(API_PORT)/health > /dev/null && echo "API is healthy" || (echo "API not ready"; exit 1)
.PHONY: api-wait-ready
api-wait-ready: ## Wait for API to be ready (up to 60 seconds)
@echo "Waiting for API to be ready..."
@for i in $$(seq 1 12); do \
if curl -sf http://localhost:$(API_PORT)/health > /dev/null 2>&1; then \
echo "API is ready!"; \
exit 0; \
fi; \
echo " Attempt $$i/12 - waiting 5 seconds..."; \
sleep 5; \
done; \
echo "ERROR: API not ready after 60 seconds"; \
exit 1
.PHONY: api-test-quick
api-test-quick: ## Quick API test (no export, just endpoint verification)
@echo "==================================================================================="
@echo "Model Management API - Quick Endpoint Test"
@echo "==================================================================================="
@echo ""
@echo "--- Testing GET /models/ ---"
@curl -s http://localhost:$(API_PORT)/models/ | jq '.triton_status, .total'
@echo ""
@echo "--- Testing GET /models/exports ---"
@curl -s http://localhost:$(API_PORT)/models/exports | jq 'length'
@echo ""
@echo "--- Testing API Health ---"
@curl -sf http://localhost:$(API_PORT)/health | jq '.status'
@echo ""
@echo "All quick tests passed!"
# ==================================================================================
# Model Export (CLI-based)
# ==================================================================================
.PHONY: export-models
export-models: ## Export YOLO models (TRT + End2End with normalized boxes)
@echo "Exporting YOLO models to TensorRT formats (normalized boxes)..."
$(COMPOSE) exec $(API_SERVICE) python /app/export/export_models.py --models small --formats trt trt_end2end --normalize-boxes --save-labels --generate-config
.PHONY: export-all
export-all: ## Export all models (nano through xlarge) in all formats
@echo "Exporting all YOLO models in all formats (normalized boxes)..."
@echo "WARNING: This will take 60-120 minutes depending on GPU"
$(COMPOSE) exec $(API_SERVICE) python /app/export/export_models.py --models nano small medium large xlarge --formats all --normalize-boxes --save-labels --generate-config
.PHONY: export-small
export-small: ## Quick export for small model (TRT + End2End with normalized boxes)
@echo "Exporting small model (TRT + End2End, normalized boxes)..."
$(COMPOSE) exec $(API_SERVICE) python /app/export/export_models.py --models small --formats trt trt_end2end --normalize-boxes --save-labels --generate-config
.PHONY: export-onnx
export-onnx: ## Export ONNX-only format (with normalized boxes)
@echo "Exporting ONNX models only (normalized boxes)..."
$(COMPOSE) exec $(API_SERVICE) python /app/export/export_models.py --models small --formats onnx onnx_end2end --normalize-boxes --save-labels
.PHONY: export-custom
export-custom: ## Export custom model (usage: make export-custom MODEL=/path/to/model.pt [NAME=custom_name] [BATCH=32])
@if [ -z "$(MODEL)" ]; then \
echo "Error: MODEL parameter required"; \
echo "Usage: make export-custom MODEL=/path/to/model.pt [NAME=custom_name] [BATCH=32]"; \
echo ""; \
echo "Examples:"; \
echo " make export-custom MODEL=/app/pytorch_models/my_model.pt"; \
echo " make export-custom MODEL=/app/pytorch_models/my_model.pt NAME=my_detector BATCH=64"; \
exit 1; \
fi
@CUSTOM_ARG="$(MODEL)"; \
if [ -n "$(NAME)" ]; then CUSTOM_ARG="$$CUSTOM_ARG:$(NAME)"; elif [ -n "$(BATCH)" ]; then CUSTOM_ARG="$$CUSTOM_ARG:"; fi; \
if [ -n "$(BATCH)" ]; then CUSTOM_ARG="$$CUSTOM_ARG:$(BATCH)"; fi; \
echo "Exporting custom model: $$CUSTOM_ARG"; \
$(COMPOSE) exec $(API_SERVICE) python /app/export/export_models.py \
--custom-model "$$CUSTOM_ARG" \
--formats trt trt_end2end \
--normalize-boxes \
--save-labels \
--generate-config
.PHONY: export-config
export-config: ## Export models from YAML config file (usage: make export-config CONFIG=/path/to/config.yaml)
@if [ -z "$(CONFIG)" ]; then \
echo "Error: CONFIG parameter required"; \
echo "Usage: make export-config CONFIG=/path/to/config.yaml"; \
echo ""; \
echo "Example YAML format:"; \
echo " models:"; \
echo " my_model:"; \
echo " pt_file: /app/pytorch_models/my_model.pt"; \
echo " triton_name: my_custom_detector # optional"; \
echo " max_batch: 32 # optional"; \
exit 1; \
fi
@echo "Exporting models from config: $(CONFIG)"
$(COMPOSE) exec $(API_SERVICE) python /app/export/export_models.py \
--config-file "$(CONFIG)" \
--formats trt trt_end2end \
--normalize-boxes \
--save-labels \
--generate-config
.PHONY: export-list
export-list: ## List available built-in models
@$(COMPOSE) exec $(API_SERVICE) python /app/export/export_models.py --list-models
.PHONY: export-mobileclip
export-mobileclip: ## Export MobileCLIP models
@echo "Exporting MobileCLIP image encoder..."
$(COMPOSE) exec $(API_SERVICE) python /app/export/export_mobileclip_image_encoder.py
@echo "Exporting MobileCLIP text encoder..."
$(COMPOSE) exec $(API_SERVICE) python /app/export/export_mobileclip_text_encoder.py
@$(MAKE) restart-triton
.PHONY: export-status
export-status: ## Show status of all exported models
@echo "==================================================================================="
@echo "Model Export Status"
@echo "==================================================================================="
@echo ""
@echo "PyTorch Models (pytorch_models/):"
@ls -lh pytorch_models/*.pt 2>/dev/null || echo " No PyTorch models found"
@echo ""
@echo "Triton Models (models/):"
@for dir in models/yolov11*; do \
if [ -d "$$dir" ]; then \
name=$$(basename $$dir); \
model=""; config=""; \
[ -f "$$dir/1/model.onnx" ] && model="ONNX"; \
[ -f "$$dir/1/model.plan" ] && model="TRT"; \
[ -f "$$dir/config.pbtxt" ] && config="OK" || config="MISSING"; \
printf " %-35s model: %-5s config: %s\n" "$$name" "$${model:-NONE}" "$$config"; \
fi \
done
@echo ""
.PHONY: validate-exports
validate-exports: ## Validate that Triton can load exported models
@echo "Validating exported models with Triton..."
@echo "Checking Triton model repository status..."
@curl -s http://localhost:$(TRITON_HTTP_PORT)/v2/models | jq -r '.models[]? | "\(.name): \(.state)"' 2>/dev/null || echo "Triton not running. Start with: make up"
@echo ""
@echo "Model details:"
@curl -s http://localhost:$(TRITON_HTTP_PORT)/v2/models/yolov11_small_trt/config 2>/dev/null | jq '.name, .max_batch_size' || echo " yolov11_small_trt: Not loaded"
@curl -s http://localhost:$(TRITON_HTTP_PORT)/v2/models/yolov11_small_trt_end2end/config 2>/dev/null | jq '.name, .max_batch_size' || echo " yolov11_small_trt_end2end: Not loaded"
# ==================================================================================
# Face Detection & Recognition
# ==================================================================================
.PHONY: download-face-models
download-face-models: ## Download ArcFace face recognition models
@echo "Downloading face recognition models..."
$(COMPOSE) exec $(API_SERVICE) python /app/export/download_face_models.py
.PHONY: export-face-recognition
export-face-recognition: ## Export ArcFace to TensorRT
@echo "Exporting ArcFace face recognition to TensorRT..."
$(COMPOSE) exec $(API_SERVICE) python /app/export/export_face_recognition.py
@$(MAKE) load-face-models
.PHONY: load-face-models
load-face-models: ## Load face models into Triton (SCRFD + ArcFace)
@echo "Loading face models into Triton..."
@curl -s -X POST "http://localhost:$(TRITON_HTTP_PORT)/v2/repository/models/scrfd_10g_bnkps/load" || true
@curl -s -X POST "http://localhost:$(TRITON_HTTP_PORT)/v2/repository/models/arcface_w600k_r50/load" || true
@echo "Face models loaded."
.PHONY: setup-face-pipeline
setup-face-pipeline: download-face-models export-face-recognition export-scrfd ## Complete face pipeline setup (SCRFD + ArcFace)
@echo "Face pipeline setup complete!"
@echo ""
@echo "Loaded models:"
@curl -s -X POST "http://localhost:$(TRITON_HTTP_PORT)/v2/repository/index" | grep -E "(scrfd|arcface)" || true
.PHONY: download-face-test-data
download-face-test-data: ## Download LFW and WIDER Face test datasets
@echo "Downloading face test datasets..."
@bash $(SCRIPTS_DIR)/setup_face_test_data.sh --all
@echo ""
@echo "Datasets downloaded to test_images/faces/"
# ==================================================================================
# SCRFD (Face Detection with 5-point Landmarks)
# ==================================================================================
.PHONY: export-scrfd
export-scrfd: ## Download + export SCRFD to TensorRT for Triton
@echo "Exporting SCRFD face detection to TensorRT..."
$(COMPOSE) exec $(API_SERVICE) python /app/export/export_scrfd.py
.PHONY: load-scrfd
load-scrfd: ## Load SCRFD model into Triton
@echo "Loading SCRFD model..."
@curl -s -X POST "http://localhost:$(TRITON_HTTP_PORT)/v2/repository/models/scrfd_10g_bnkps/load" || true
@echo "SCRFD loaded."
.PHONY: setup-scrfd
setup-scrfd: export-scrfd restart-triton load-scrfd ## Complete SCRFD setup (download, export, deploy)
@echo "SCRFD setup complete!"
@echo ""
@echo "Test with:"
@echo " curl -X POST http://localhost:$(API_PORT)/faces/recognize -F 'file=@test.jpg' | jq '.faces[0].landmarks'"
# ==================================================================================
# Integration Tests (uses tests/test_endpoints.sh)
# ==================================================================================
.PHONY: test-faces
test-faces: ## Test face detection + recognition (SCRFD + ArcFace)
@./tests/test_endpoints.sh faces
.PHONY: test-endpoints
test-endpoints: download-test-images ## Run ALL endpoint integration tests
@./tests/test_endpoints.sh all
.PHONY: test-models
test-models: ## Verify all Triton models are loaded
@./tests/test_endpoints.sh models
# ==================================================================================
# OCR (PP-OCRv5)
# ==================================================================================
.PHONY: download-paddleocr
download-paddleocr: ## Download PP-OCRv5 ONNX models and dictionaries
@echo "Downloading PP-OCRv5 models..."
$(COMPOSE) exec $(API_SERVICE) python /app/export/download_paddleocr.py
.PHONY: export-paddleocr-det
export-paddleocr-det: ## Export PaddleOCR detection model to TensorRT
@echo "Exporting PP-OCRv5 detection to TensorRT..."
$(COMPOSE) exec $(API_SERVICE) python /app/export/export_paddleocr_det.py
.PHONY: export-paddleocr-rec
export-paddleocr-rec: ## Export PaddleOCR recognition model to TensorRT
@echo "Exporting PP-OCRv5 recognition to TensorRT..."
$(COMPOSE) exec $(API_SERVICE) python /app/export/export_paddleocr_rec.py
.PHONY: export-paddleocr
export-paddleocr: export-paddleocr-det export-paddleocr-rec ## Export both PaddleOCR models to TensorRT
@echo "Both OCR models exported!"
.PHONY: setup-ocr
setup-ocr: download-paddleocr export-paddleocr restart-triton ## Complete OCR pipeline setup
@echo "OCR pipeline setup complete!"
.PHONY: load-ocr-models
load-ocr-models: ## Load OCR models into Triton
@echo "Loading OCR models into Triton..."
@curl -s -X POST "http://localhost:$(TRITON_HTTP_PORT)/v2/repository/models/paddleocr_det_trt/load" || true
@curl -s -X POST "http://localhost:$(TRITON_HTTP_PORT)/v2/repository/models/paddleocr_rec_trt/load" || true
@echo "OCR models loaded."
# ==================================================================================
# Monitoring and Metrics
# ==================================================================================
.PHONY: open-grafana
open-grafana: ## Open Grafana dashboard in browser
@echo "Opening Grafana dashboard..."
@echo "URL: http://localhost:$(GRAFANA_PORT)"
@echo "Login: admin/admin"
@xdg-open http://localhost:$(GRAFANA_PORT) 2>/dev/null || open http://localhost:$(GRAFANA_PORT) 2>/dev/null || echo "Please open http://localhost:$(GRAFANA_PORT) in your browser"
.PHONY: open-prometheus
open-prometheus: ## Open Prometheus UI in browser
@echo "Opening Prometheus..."
@xdg-open http://localhost:$(PROMETHEUS_PORT) 2>/dev/null || open http://localhost:$(PROMETHEUS_PORT) 2>/dev/null || echo "Please open http://localhost:$(PROMETHEUS_PORT) in your browser"
.PHONY: open-opensearch
open-opensearch: ## Open OpenSearch Dashboards in browser
@echo "Opening OpenSearch Dashboards..."
@xdg-open http://localhost:$(OPENSEARCH_DASH_PORT) 2>/dev/null || open http://localhost:$(OPENSEARCH_DASH_PORT) 2>/dev/null || echo "Please open http://localhost:$(OPENSEARCH_DASH_PORT) in your browser"
.PHONY: metrics
metrics: ## Show Triton metrics
@curl -s http://localhost:$(TRITON_METRICS_PORT)/metrics | grep -E "nv_inference_|nv_gpu_" | head -n 20
.PHONY: gpu
gpu: ## Show GPU status
@nvidia-smi
.PHONY: gpu-watch
gpu-watch: ## Watch GPU status (updates every second)
@watch -n 1 nvidia-smi
# ==================================================================================
# Triton Model Management
# ==================================================================================
.PHONY: triton-health
triton-health: ## Check Triton server health
@echo "Checking Triton health..."
@curl -sf http://localhost:$(TRITON_HTTP_PORT)/v2/health/ready > /dev/null && echo "Triton is ready" || (echo "Triton not ready"; exit 1)
.PHONY: triton-models-ready
triton-models-ready: ## List all READY models in Triton
@echo "=== Triton Models (READY) ==="
@curl -s -X POST http://localhost:$(TRITON_HTTP_PORT)/v2/repository/index 2>/dev/null | \
python3 -c "import sys,json; models=json.load(sys.stdin); ready=[m['name'] for m in models if m.get('state')=='READY']; print(f'Total: {len(ready)} models'); [print(f' - {n}') for n in sorted(ready)]" 2>/dev/null || echo "Error: Cannot connect to Triton"
.PHONY: triton-stats
triton-stats: ## Show Triton model statistics
@echo "=== Triton Model Statistics ==="
@curl -s http://localhost:$(TRITON_HTTP_PORT)/v2/models/stats 2>/dev/null | \
python3 -c "import sys,json; stats=json.load(sys.stdin); ms=stats.get('model_stats',[]); [print(f\"{m['name']}: {m['inference_stats']['success']['count']} inferences\") for m in ms if m['inference_stats']['success']['count']>0]" 2>/dev/null || echo "Error: Cannot fetch stats"
.PHONY: triton-metrics
triton-metrics: ## Show key Triton metrics (inference counts, latencies)
@echo "=== Triton Key Metrics ==="
@curl -s http://localhost:$(TRITON_METRICS_PORT)/metrics 2>/dev/null | \
grep -E "nv_inference_count|nv_inference_compute_infer_duration" | \
grep -v "^#" | head -30
.PHONY: triton-unload-all
triton-unload-all: ## Unload all models from Triton server
@echo "Unloading all models from Triton..."
@for model in $$(curl -s -X POST http://localhost:$(TRITON_HTTP_PORT)/v2/repository/index | jq -r '.[].name'); do \
echo " Unloading $$model..."; \
curl -s -X POST "http://localhost:$(TRITON_HTTP_PORT)/v2/repository/models/$$model/unload" > /dev/null; \
done
@sleep 3
@echo ""
@echo "READY models remaining:"
@curl -s -X POST http://localhost:$(TRITON_HTTP_PORT)/v2/repository/index | jq -r '.[] | select(.state == "READY") | .name'
.PHONY: triton-models
triton-models: ## List all Triton models and their state
@echo "Triton Model Repository:"
@curl -s -X POST http://localhost:$(TRITON_HTTP_PORT)/v2/repository/index | jq -r '.[] | "\(.name): \(.state)"'
.PHONY: triton-load
triton-load: ## Load a model into Triton (usage: make triton-load MODEL=model_name)
@if [ -z "$(MODEL)" ]; then \
echo "Error: MODEL parameter required"; \
echo "Usage: make triton-load MODEL=model_name"; \
exit 1; \
fi
@echo "Loading $(MODEL) into Triton..."
@curl -s -X POST "http://localhost:$(TRITON_HTTP_PORT)/v2/repository/models/$(MODEL)/load" | jq '.'
.PHONY: triton-unload
triton-unload: ## Unload a model from Triton (usage: make triton-unload MODEL=model_name)
@if [ -z "$(MODEL)" ]; then \
echo "Error: MODEL parameter required"; \
echo "Usage: make triton-unload MODEL=model_name"; \
exit 1; \
fi
@echo "Unloading $(MODEL) from Triton..."
@curl -s -X POST "http://localhost:$(TRITON_HTTP_PORT)/v2/repository/models/$(MODEL)/unload" | jq '.'
# ==================================================================================
# Health & Status Checks
# ==================================================================================
.PHONY: check-all
check-all: ## Full system health check (API + Triton + OpenSearch)
@echo "==================================================================================="
@echo "System Health Check"
@echo "==================================================================================="
@echo ""
@echo "--- API Service ---"
@curl -sf http://localhost:$(API_PORT)/health > /dev/null && echo "API is healthy" || echo "API not responding"
@echo ""
@echo "--- Triton Server ---"
@curl -sf http://localhost:$(TRITON_HTTP_PORT)/v2/health/ready > /dev/null && echo "Triton is ready" || echo "Triton not ready"
@echo ""
@echo "--- OpenSearch ---"
@curl -sf http://localhost:$(OPENSEARCH_PORT)/_cluster/health > /dev/null && echo "OpenSearch is healthy" || echo "OpenSearch not responding"
@echo ""
@echo "--- GPU Status ---"
@nvidia-smi --query-gpu=name,memory.used,memory.total,utilization.gpu --format=csv,noheader 2>/dev/null || echo "Cannot query GPU"
@echo ""
@echo "==================================================================================="
# ==================================================================================
# Cleanup
# ==================================================================================
.PHONY: clean
clean: ## Stop services and remove containers
@echo "Stopping and removing containers..."
$(COMPOSE) down
.PHONY: clean-all
clean-all: ## Stop services, remove containers and volumes (WARNING: deletes all data)
@echo "WARNING: This will remove all containers, volumes, and data!"
@echo "Press Ctrl+C to cancel, or Enter to continue..."
@read
$(COMPOSE) down -v
.PHONY: clean-logs
clean-logs: ## Clear Docker logs
@echo "Clearing Docker logs..."
$(COMPOSE) down
@docker system prune -f
.PHONY: clean-bench
clean-bench: ## Remove benchmark results
@echo "Removing benchmark results..."
@rm -rf $(BENCHMARK_DIR)/results/*
@echo "Benchmark results cleared."
.PHONY: clean-exports
clean-exports: ## Clean old model exports (keeps configs, prepares for re-export)
@echo "==================================================================================="
@echo "Cleaning old model exports..."
@echo "==================================================================================="
@echo ""
@echo "Backing up config.pbtxt files..."
@mkdir -p models/backup_configs_$$(date +%Y%m%d_%H%M%S)
@for config in models/*/config.pbtxt; do \
if [ -f "$$config" ]; then \
model_name=$$(dirname "$$config" | xargs basename); \
cp "$$config" "models/backup_configs_$$(date +%Y%m%d_%H%M%S)/$${model_name}_config.pbtxt" 2>/dev/null || true; \
fi; \
done
@echo ""
@echo "Removing old ONNX and TRT files..."
@for dir in models/yolov11_*/1; do \
if [ -d "$$dir" ]; then \
rm -f "$$dir/model.onnx" "$$dir/model.onnx.old" "$$dir/model.plan" "$$dir/model.plan.old" 2>/dev/null || true; \
fi; \
done
@echo ""
@echo "Clearing TRT cache..."
@rm -rf trt_cache/* 2>/dev/null || true
@echo ""
@echo "Done! Run 'make export-status' to see current state."
@echo "Then run 'make export-models' or 'make export-all' to re-export."
# ==================================================================================
# OpenSearch / Data Management
# ==================================================================================
.PHONY: opensearch-reset
opensearch-reset: ## Reset OpenSearch indices (WARNING: deletes all visual search data)
@echo "WARNING: This will delete all OpenSearch indices and data!"
@echo "Press Ctrl+C to cancel, or Enter to continue..."
@read
@curl -X DELETE "http://localhost:$(OPENSEARCH_PORT)/_all" || echo "Failed to delete indices"
@echo "OpenSearch indices cleared."
.PHONY: opensearch-status
opensearch-status: ## Show OpenSearch cluster status
@echo "OpenSearch Cluster Status:"
@curl -s http://localhost:$(OPENSEARCH_PORT)/_cluster/health?pretty
.PHONY: opensearch-indices
opensearch-indices: ## List OpenSearch indices
@echo "OpenSearch Indices:"
@curl -s http://localhost:$(OPENSEARCH_PORT)/_cat/indices?v
.PHONY: opensearch-reset-indexes
opensearch-reset-indexes: ## Reset all OpenSearch indexes (delete and recreate)
@echo "Resetting OpenSearch indexes..."
@curl -s -X DELETE "http://localhost:$(API_PORT)/index" | python3 -c "import sys,json; print(json.load(sys.stdin).get('message','deleted'))" 2>/dev/null || true
@sleep 1
@curl -s -X POST "http://localhost:$(API_PORT)/index/create" | python3 -c "import sys,json; print('Indexes created:', json.load(sys.stdin).get('status','unknown'))" 2>/dev/null
@echo "Done."
# ==================================================================================
# Documentation
# ==================================================================================
.PHONY: info
info: ## Show service URLs and ports
@echo "==================================================================================="
@echo "OpenProcessor - Visual AI Processing Engine"
@echo "==================================================================================="
@echo ""
@echo "Quick Start:"
@echo " make up Start all services"
@echo " make status Check service health"
@echo " make test-all Test all endpoints"
@echo ""
@echo "Services:"
@echo " API: http://localhost:$(API_PORT)"
@echo " Triton HTTP: http://localhost:$(TRITON_HTTP_PORT)"
@echo " Triton gRPC: http://localhost:$(TRITON_GRPC_PORT)"
@echo " OpenSearch: http://localhost:$(OPENSEARCH_PORT)"
@echo ""
@echo "Monitoring:"
@echo " Grafana: http://localhost:$(GRAFANA_PORT) (admin/admin)"
@echo " Prometheus: http://localhost:$(PROMETHEUS_PORT)"
@echo ""
@echo "API Endpoints (port $(API_PORT)):"
@echo " Detection: POST /detect"
@echo " Face Recognition: POST /faces/recognize"
@echo " Image Embedding: POST /embed/image"
@echo " Text Embedding: POST /embed/text"
@echo " OCR: POST /ocr/predict"
@echo " Image Search: POST /search/image"
@echo " Text Search: POST /search/text"
@echo " Ingest: POST /ingest"
@echo " Analyze: POST /analyze"
@echo ""
.PHONY: docs
docs: info ## Alias for info
# ==================================================================================
# Reference Repositories (for attribution and development)
# ==================================================================================
.PHONY: clone-refs-essential
clone-refs-essential: ## Clone essential reference repos (ultralytics-end2end, ml-mobileclip)
@echo "Cloning essential reference repositories..."
@bash $(SCRIPTS_DIR)/clone_reference_repos.sh --essential
.PHONY: clone-refs-recommended
clone-refs-recommended: ## Clone essential + recommended reference repos
@echo "Cloning recommended reference repositories..."
@bash $(SCRIPTS_DIR)/clone_reference_repos.sh --recommended
.PHONY: clone-refs-all
clone-refs-all: ## Clone all reference repositories
@echo "Cloning all reference repositories..."
@bash $(SCRIPTS_DIR)/clone_reference_repos.sh --all
.PHONY: clone-refs-list
clone-refs-list: ## List available reference repos and their status
@bash $(SCRIPTS_DIR)/clone_reference_repos.sh --list
.PHONY: clone-ref
clone-ref: ## Clone a specific reference repo (usage: make clone-ref REPO=ultralytics-end2end)
@if [ -z "$(REPO)" ]; then \
echo "Error: REPO parameter required"; \
echo "Usage: make clone-ref REPO=repo_name"; \
echo ""; \
echo "Available repos:"; \
bash $(SCRIPTS_DIR)/clone_reference_repos.sh --list; \
exit 1; \
fi
@bash $(SCRIPTS_DIR)/clone_reference_repos.sh --repo $(REPO)
# ==================================================================================
# Phony targets (targets that don't create files)
# ==================================================================================