-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathrun_engine.py
More file actions
109 lines (95 loc) · 3.31 KB
/
run_engine.py
File metadata and controls
109 lines (95 loc) · 3.31 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Coding Engine — CLI Entry Point
Launches the Master Orchestrator which:
1. Reads requirements.json from the project path
2. Registers AI agents in Minibook (collaboration platform)
3. Each agent thinks via Ollama (qwen2.5-coder, local LLM)
4. Agents communicate by posting/commenting in Minibook
5. Orchestrator drives phases: Architecture → Code → DB → Test → Fix → Review → Infra
6. Output: Complete project written to output/ directory
Prerequisites:
- Ollama running: ollama serve
- Model pulled: ollama pull qwen2.5-coder:7b
- Minibook running: cd ../minibook && python run.py
Usage:
python run_engine.py --project Data/all_services/whatsapp
python run_engine.py --project Data/all_services/whatsapp --model qwen2.5-coder:14b
python run_engine.py --project Data/all_services/whatsapp --output ./my_output
"""
import argparse
import logging
import sys
import os
# Ensure project root is in path
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
from src.engine.master_orchestrator import MasterOrchestrator
def main() -> int:
parser = argparse.ArgumentParser(
description="Coding Engine — AI-powered code generation via Minibook + Ollama",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
python run_engine.py --project Data/all_services/whatsapp
python run_engine.py --project Data/all_services/whatsapp --model qwen2.5-coder:14b
python run_engine.py --project Data/all_services/whatsapp --minibook-url http://192.168.1.10:3456
""",
)
parser.add_argument(
"--project", "-p",
required=True,
help="Path to project directory containing requirements.json",
)
parser.add_argument(
"--output", "-o",
default=None,
help="Output directory for generated files (default: output/<project-name>)",
)
parser.add_argument(
"--model", "-m",
default="qwen2.5-coder:7b",
help="Ollama model to use (default: qwen2.5-coder:7b)",
)
parser.add_argument(
"--ollama-url",
default="http://localhost:11434",
help="Ollama server URL (default: http://localhost:11434)",
)
parser.add_argument(
"--minibook-url",
default="http://localhost:8080",
help="Minibook server URL (default: http://localhost:3456)",
)
parser.add_argument(
"--max-fix-rounds",
type=int,
default=3,
help="Max bug-fix iterations (default: 3)",
)
parser.add_argument(
"--verbose", "-v",
action="store_true",
help="Enable verbose logging",
)
args = parser.parse_args()
# Setup logging
log_level = logging.DEBUG if args.verbose else logging.INFO
logging.basicConfig(
level=log_level,
format="%(asctime)s [%(name)s] %(levelname)s: %(message)s",
datefmt="%H:%M:%S",
)
# Run orchestrator
orchestrator = MasterOrchestrator(
project_path=args.project,
minibook_url=args.minibook_url,
ollama_model=args.model,
ollama_url=args.ollama_url,
output_dir=args.output,
max_fix_rounds=args.max_fix_rounds,
)
success = orchestrator.run()
return 0 if success else 1
if __name__ == "__main__":
sys.exit(main())