-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathbootstrap.py
More file actions
4125 lines (3440 loc) · 175 KB
/
bootstrap.py
File metadata and controls
4125 lines (3440 loc) · 175 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
print("--- Python Script bootstrap.py Started ---")
# MIT License
#
# Copyright (c) 2024 Oren Collaco
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import abc
import time
from typing import Dict, Any
import anthropic
import datetime
import os
import requests
import json
from pyjson5 import load as json5_load
from io import StringIO
import re
import shutil
import time
from functools import wraps
import copy
import sys
from datetime import datetime
import shlex
import signal
import subprocess
import tempfile
import threading
import queue
import atexit
from typing import Optional
import psutil
import difflib
import argparse
import queue
import psutil
from typing import List, Dict
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.service import Service
from webdriver_manager.chrome import ChromeDriverManager
# Global variables for model and source settings
MODEL = 'claude' # Default to 'claude'
SOURCE = 'anthropic' # Default to 'anthropic'
API_KEY = None
PROJECT_ID = None
REGION = None
OPENAI_BASE_URL = 'https://api.openai.com/v1'
NEWLINE = "\n"
DEBUG_PROMPT = False
PUBLISHER = 'anthropic' # NEW: Default publisher for Vertex
# Define the devlm folder path
DEVLM_FOLDER = ".devlm"
GLOBAL_MAX_PROMPT_LENGTH = 200000
Global_error = ""
GLOBAL_ERROR_PROMPT_LENGTH = "Prompt length is too long. Truncated to 200000 characters. However, this is a FATAL problem that will prevent the LLM from getting other relevant information making it useless. Figure out what is causing prompt length to be too long and fix it."
ALLOWED_COMMANDS = [
'python3',
'go run',
'go test',
'docker build',
'docker run',
'pip3 install',
'go mod tidy',
'curl',
'wget',
'cd',
'g++',
'gcc',
'make',
'ls',
'mkdir',
'cp',
'mv',
'chmod',
'chown',
'lsof',
'netstat',
'ss',
'pgrep',
'erlc',
'echo',
'erl',
'west build',
'git clone',
# Add more commands as needed
]
APPROVAL_REQUIRED_COMMANDS = [
'sudo apt install',
'./',
# Add a raw command that requires approval
'RAW: <raw_command>'
]
try:
import anthropic
from anthropic import AnthropicVertex
except ImportError:
print("Error: anthropic package is not installed. Please run: pip install anthropic[vertex]")
sys.exit(1)
class LLMError(Exception):
def __init__(self, error_type, message):
self.error_type = error_type
self.message = message
super().__init__(f"{error_type}: {message}")
class LLMInterface(abc.ABC):
@abc.abstractmethod
def generate_response(self, prompt: str, max_tokens: int) -> str:
pass
def _write_debug_prompt(self, prompt: str):
global DEBUG_PROMPT
if DEBUG_PROMPT:
with open(os.path.join(DEBUG_PROMPT_FOLDER, f"prompt_{datetime.now().strftime('%Y%m%d_%H%M%S')}.txt"), "w") as f:
f.write(prompt)
def _write_debug_response(self, response: str):
global DEBUG_PROMPT
if DEBUG_PROMPT:
os.makedirs(DEBUG_RESPONSE_FOLDER, exist_ok=True)
with open(os.path.join(DEBUG_RESPONSE_FOLDER, f"response_{datetime.now().strftime('%Y%m%d_%H%M%S')}.txt"), "w") as f:
f.write(response)
class AnthropicLLM(LLMInterface):
def __init__(self, client):
self.client = client
def generate_response(self, prompt: str, max_tokens: int) -> str:
self._write_debug_prompt(prompt)
Global_error = ""
# make sure the prompt length is less than 200000 else truncate it
if len(prompt) > GLOBAL_MAX_PROMPT_LENGTH:
prompt = prompt[:GLOBAL_MAX_PROMPT_LENGTH]
Global_error = GLOBAL_ERROR_PROMPT_LENGTH
print(Global_error)
while True:
try:
response = self.client.messages.create(
model="claude-3-5-sonnet-20241022",
max_tokens=max_tokens,
messages=[
{"role": "user", "content": prompt}
]
)
self._write_debug_response(response.content[0].text if response.content else "")
return response.content[0].text if response.content else ""
except anthropic.APIError as e:
if hasattr(e, 'status_code'):
error_data = e.response.json() if hasattr(e, 'response') else {}
error = error_data.get('error', {})
error_type = error.get('type', 'unknown_error')
error_message = error.get('message', str(e))
if self._handle_error(error_type, error_message):
continue # Retry after handling the error
else:
raise LLMError(error_type, error_message)
else:
print(f"API error: {str(e)}")
raise
except Exception as e:
# Handle any unexpected exceptions
print(f"Unexpected error: {str(e)}")
raise
def _handle_error(self, error_type, error_message):
if error_type == 'rate_limit_error':
if 'daily rate limit' in error_message.lower():
self._wait_until_midnight()
else:
self._handle_rate_limit(error_message)
return True
elif error_type == 'overloaded_error':
self._handle_overloaded()
return True
elif error_type == 'invalid_request_error' and 'credit balance is too low' in error_message.lower():
self._handle_credit_issue()
return True
elif error_type == 'internal_server_error':
if self.retries < self.max_retries:
wait_time = self._calculate_wait_time(self.retries)
print(f"Internal server error. Retrying in {wait_time} seconds...")
time.sleep(wait_time)
self.retries += 1
return True
else:
return False
return False
def _calculate_wait_time(self, retries):
return self.base_delay * (2 ** retries) + random.uniform(0, 1)
def _wait_until_midnight(self):
now = datetime.datetime.now()
tomorrow = now.replace(hour=0, minute=0, second=0, microsecond=0) + datetime.timedelta(days=1)
wait_time = (tomorrow - now).total_seconds()
print(f"Daily rate limit reached. Waiting until midnight ({tomorrow.strftime('%Y-%m-%d %H:%M:%S')})...")
time.sleep(wait_time)
def _handle_rate_limit(self, error_message):
wait_time = 60 # Default to 60 seconds
# Try to extract wait time from error message if available
try:
wait_time = int(error_message.split("try again in ")[1].split(" ")[0])
except:
pass
print(f"Rate limit exceeded. Retrying in {wait_time} seconds...")
time.sleep(wait_time)
def _handle_overloaded(self):
wait_time = 60
print(f"API temporarily overloaded. Retrying in {wait_time} seconds...")
time.sleep(wait_time)
def _handle_credit_issue(self):
print("Your account has insufficient credit. Please add credit to your account.")
input("Press Enter once you've added credit to continue, or Ctrl+C to exit...")
class OpenAILLM(LLMInterface):
def __init__(self, api_key: str, model: str = "gpt-4", base_url: Optional[str] = None):
# Import required modules only when OpenAI LLM is initialized
try:
import openai
from openai import OpenAI
import time
import random
import os
import re
from typing import Optional
except ImportError as e:
missing_package = str(e).split("'")[1]
if missing_package == "openai":
print("Error: openai package is not installed. Please run: pip install openai")
else:
print(f"Error: Required package {missing_package} is not installed.")
sys.exit(1)
# Store necessary imports as class attributes to use in other methods
self._openai = openai
self._OpenAI = OpenAI
self._time = time
self._random = random
self._re = re
# Initialize the client with optional base_url
client_kwargs = {"api_key": api_key}
if base_url:
client_kwargs["base_url"] = base_url
print(f"Using custom OpenAI API server: {base_url}")
try:
self.client = self._OpenAI(**client_kwargs)
except Exception as e:
print(f"Error initializing OpenAI client: {str(e)}")
raise
self.model = model
self.max_retries = 5
self.base_delay = 1
self.retries = 0
self.base_url = base_url
def generate_response(self, prompt: str, max_tokens: int) -> str:
self._write_debug_prompt(prompt)
# Make sure the prompt length is less than 200000 else truncate it
if len(prompt) > GLOBAL_MAX_PROMPT_LENGTH:
prompt = prompt[:GLOBAL_MAX_PROMPT_LENGTH]
Global_error = GLOBAL_ERROR_PROMPT_LENGTH
print(Global_error)
while True:
try:
#print the message length
print(f"Message length: {len(prompt)}")
if "o1" in self.model or "o3" in self.model:
response = self.client.chat.completions.create(
model=self.model,
messages=[
{"role": "user", "content": prompt}
],
max_completion_tokens=max_tokens
)
else:
response = self.client.chat.completions.create(
model=self.model,
messages=[
{"role": "user", "content": prompt}
],
max_tokens=max_tokens
)
self._write_debug_response(response.choices[0].message.content if response.choices else "")
return response.choices[0].message.content if response.choices else ""
except self._openai.RateLimitError as e:
if self._handle_error("rate_limit_error", str(e)):
continue
raise LLMError("rate_limit_error", str(e))
except self._openai.APIError as e:
if self._handle_error("api_error", str(e)):
continue
raise LLMError("api_error", str(e))
except self._openai.APIConnectionError as e:
if self._handle_error("connection_error", str(e)):
continue
raise LLMError("connection_error", str(e))
except self._openai.InsufficientQuotaError as e:
if self._handle_error("insufficient_quota", str(e)):
continue
raise LLMError("insufficient_quota", str(e))
except self._openai.InvalidRequestError as e:
raise LLMError("invalid_request", str(e))
except Exception as e:
print(f"Unexpected error: {str(e)}")
raise
def _handle_error(self, error_type: str, error_message: str) -> bool:
print(f"Error type: {error_type}")
print(f"Error message: {error_message}")
if error_type == "rate_limit_error":
wait_time = self._extract_wait_time(error_message)
print(f"Rate limit exceeded. Retrying in {wait_time} seconds...")
self._time.sleep(wait_time)
return True
elif error_type == "api_error":
if self.retries < self.max_retries:
wait_time = self._calculate_wait_time(self.retries)
print(f"API error. Retrying in {wait_time} seconds...")
self._time.sleep(wait_time)
self.retries += 1
return True
return False
elif error_type == "connection_error":
if self.retries < self.max_retries:
wait_time = self._calculate_wait_time(self.retries)
print(f"Connection error. Retrying in {wait_time} seconds...")
self._time.sleep(wait_time)
self.retries += 1
return True
return False
elif error_type == "insufficient_quota":
print("Insufficient quota. Please check your OpenAI account balance.")
input("Press Enter once you've added credit to continue, or Ctrl+C to exit...")
return True
return False
def _calculate_wait_time(self, retries: int) -> float:
"""Calculate wait time using exponential backoff with jitter."""
return self.base_delay * (2 ** retries) + self._random.uniform(0, 1)
def _extract_wait_time(self, error_message: str) -> int:
"""Extract wait time from rate limit error message."""
try:
match = self._re.search(r'(\d+)\s*seconds?', error_message.lower())
if match:
return int(match.group(1))
except:
pass
return 60 # Default wait time if we can't extract it from the message
def switch_model(self, new_model: str):
"""Switch to a different OpenAI model."""
self.model = new_model
print(f"Switched to model: {self.model}")
def get_server_info(self) -> dict:
"""Get information about the current server configuration."""
return {
"base_url": self.base_url or "https://api.openai.com/v1",
"model": self.model,
"max_retries": self.max_retries
}
class VertexAILLM(LLMInterface):
def __init__(self, project_id: str, region: str, model: Optional[str] = None):
self.project_id = project_id
self.region = region
self.client = AnthropicVertex(region=region, project_id=project_id)
self.max_retries = 5
self.retry_delay = 32 # Start with 32 seconds delay
self.model = model or "claude-3-5-sonnet-v2@20241022" # Default model
def generate_response(self, prompt: str, max_tokens: int) -> str:
self._write_debug_prompt(prompt)
# make sure the prompt length is less than 200000 else truncate it
if len(prompt) > 200000:
prompt = prompt[:200000]
Global_error = GLOBAL_ERROR_PROMPT_LENGTH
print(Global_error)
messages = [{"role": "user", "content": prompt}]
full_response = ""
iteration = 0
max_iterations = 4 # Limit the number of iterations to prevent infinite loops
while iteration < max_iterations:
for attempt in range(self.max_retries):
try:
response = self.client.messages.create(
model=self.model,
max_tokens=max_tokens,
messages=messages
)
# For debugging, print the response usage
if response.usage:
print(f"Response usage: {response.usage}")
current_output = response.content[0].text if response.content else ""
full_response += current_output
# Check if we're close to the token limit (within 5%)
if response.usage and response.usage.output_tokens > 0.999 * max_tokens:
print("Output close to token limit. Continuing response...")
continuation_prompt = (
f"{prompt}\n\n"
f"Previous output (possibly incomplete):\n<<<START>>>{full_response}<<<END>>>\n\n"
"The previous response was very close to the output token limit and might not have completed. "
"Your previous output starts after the third greater than sign in <<<START>>> and ends at the character before the first less than sign in <<<END>>>. Please continue the output (adding new line and tabs if needed at the beginning), picking up where you left off without repeating information, your output will be appended without modification before first less than sign in <<<END>>>. Do not include anything other than the continuation of the output."
)
messages = [{"role": "user", "content": continuation_prompt}]
iteration += 1
break
else:
self._write_debug_response(full_response)
return full_response
except Exception as e:
if attempt < self.max_retries - 1:
print(f"Error occurred: {str(e)}. Retrying in {self.retry_delay} seconds...")
time.sleep(self.retry_delay)
if self.retry_delay < 64:
self.retry_delay *= 2 # Exponential backoff
else:
print(f"Max retries reached. Error: {str(e)}")
user_input = input("Do you want to try again? (yes/no): ").lower()
if user_input == 'yes':
self.retry_delay = 32 # Reset delay
continue
else:
raise
if iteration == max_iterations:
print("Reached maximum number of continuation attempts.")
break
self._write_debug_response(full_response)
return full_response
def switch_model(self, new_model: str):
self.model = new_model
print(f"Switched to model: {self.model}")
def _handle_error(self, error_type, error_message):
print(f"Vertex AI error: {error_type} - {error_message}")
return False
# --- NEW: Google Vertex LLM Class (using google-cloud-aiplatform) ---
class GoogleVertexLLM(LLMInterface):
def __init__(self, project_id: str, region: str, model: Optional[str] = None):
print(f"[GoogleVertexLLM] Initializing...")
self.project_id = project_id
self.region = region
self.model_name = model or "gemini-1.5-flash-001" # Default Google model
try:
from google.cloud import aiplatform
from vertexai.generative_models import GenerativeModel
aiplatform.init(project=self.project_id, location=self.region)
self.model = GenerativeModel(self.model_name)
print(f"Initialized Google Vertex AI client for model: {self.model_name}")
except ImportError:
print("Error: google-cloud-aiplatform package not installed.")
print("Please run: pip install google-cloud-aiplatform")
sys.exit(1)
except Exception as e:
print(f"Error initializing Google Vertex AI client: {e}")
sys.exit(1)
def generate_response(self, prompt: str, max_tokens: int) -> str:
self._write_debug_prompt(prompt)
print(f"[GoogleVertexLLM] Generating response for model {self.model_name}...")
if len(prompt) > GLOBAL_MAX_PROMPT_LENGTH:
prompt = prompt[:GLOBAL_MAX_PROMPT_LENGTH]
Global_error = GLOBAL_ERROR_PROMPT_LENGTH
print(Global_error)
try:
# Example for Gemini
response = self.model.generate_content(
prompt,
generation_config={"max_output_tokens": max_tokens}
)
# TODO: Check Gemini response structure, might not be .text directly
# Example: Accessing parts if it's a multi-part response
if hasattr(response, 'parts') and response.parts:
response_text = "".join(part.text for part in response.parts if hasattr(part, 'text'))
elif hasattr(response, 'text'):
response_text = response.text
else: # Fallback if structure is unknown
response_text = str(response)
print(f"[GoogleVertexLLM] Warning: Unknown response structure: {response}")
self._write_debug_response(response_text)
return response_text
except Exception as e:
print(f"Error generating response from Google Vertex AI model: {e}")
# TODO: Add specific Google API error handling
raise LLMError("google_vertex_error", str(e))
def switch_model(self, new_model: str):
self.model_name = new_model
# TODO: Re-initialize self.model if needed
print(f"Switched GoogleVertexLLM model to: {self.model_name}")
def _handle_error(self, error_type, error_message):
print(f"GoogleVertexLLM error: {error_type} - {error_message}")
return False
# --- END NEW ---
def get_llm_client(provider: str, model: Optional[str] = None, publisher: Optional[str] = None) -> LLMInterface:
global API_KEY, PROJECT_ID, REGION, SERVER, PUBLISHER
print(f"[get_llm_client] Provider: {provider}, Model: {model}, Publisher: {publisher}")
if provider == "anthropic":
if not API_KEY: raise ValueError("Anthropic API key not loaded.")
return AnthropicLLM(anthropic.Anthropic(api_key=API_KEY))
elif provider == "vertex_ai" or provider == "gcloud":
effective_publisher = publisher or PUBLISHER # Use passed publisher or global default
print(f"[get_llm_client] Effective publisher for Vertex: {effective_publisher}")
if not PROJECT_ID or not REGION:
raise ValueError("PROJECT_ID and REGION must be loaded for Vertex AI.")
# --- Instantiate correct class based on publisher ---
if effective_publisher == 'google':
print("[get_llm_client] Initializing GoogleVertexLLM...")
return GoogleVertexLLM(project_id=PROJECT_ID, region=REGION, model=model)
elif effective_publisher == 'anthropic':
print("[get_llm_client] Initializing VertexAILLM (for Anthropic on Vertex)...")
# Pass model name here, VertexAILLM handles default if None
return VertexAILLM(project_id=PROJECT_ID, region=REGION, model=model)
else:
raise ValueError(f"Unsupported publisher for Vertex AI: {effective_publisher}. Use 'google' or 'anthropic'.")
elif provider == "openai":
if not API_KEY: raise ValueError("OpenAI API key not loaded.")
# Pass model name here, OpenAILLM handles default if None
return OpenAILLM(api_key=API_KEY, model=model, base_url=SERVER)
else:
raise ValueError(f"Unsupported LLM provider: {provider}")
# Update the global llm_client variable
llm_client = None
# llm_client = get_llm_client()
# Initialize the Anthropic client
# client = anthropic.Anthropic(api_key=API_KEY)
TECHNICAL_BRIEF_FILE = os.path.join(DEVLM_FOLDER, "project_technical_brief.json")
TEST_PROGRESS_FILE = os.path.join(DEVLM_FOLDER, "test_progress.json")
CHAT_FILE = os.path.join(DEVLM_FOLDER, "chat.txt")
PROJECT_STRUCTURE_FILE = os.path.join(DEVLM_FOLDER, "project_structure.json")
DEBUG_PROMPT_FOLDER = os.path.join(DEVLM_FOLDER + "/debug/prompts/")
DEBUG_RESPONSE_FOLDER = os.path.join(DEVLM_FOLDER + "/debug/responses/")
TASK = None
WRITE_MODE = 'diff'
MAX_FILE_LENGTH = 20000
NO_APPROVAL = False
# Update the COMMAND_HISTORY_FILE and HISTORY_BRIEF_FILE
COMMAND_HISTORY_FILE = os.path.join(DEVLM_FOLDER+ "/actions", f"action_history_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json")
HISTORY_BRIEF_FILE = os.path.join(DEVLM_FOLDER+ "/briefs", f"history_brief_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json")
def wait_until_midnight():
now = datetime.now()
tomorrow = now.replace(hour=0, minute=0, second=0, microsecond=0) + timedelta(days=1)
wait_time = (tomorrow - now).total_seconds()
print(f"Rate limit exceeded. Waiting until midnight ({tomorrow.strftime('%Y-%m-%d %H:%M:%S')})...")
time.sleep(wait_time)
def handle_low_credit_balance():
print("Error: Your credit balance is too low to access the Claude API.")
print("Please go to Plans & Billing to upgrade or purchase credits.")
input("Press Enter when you have added credits to continue, or Ctrl+C to exit...")
def get_last_processed_file():
if os.path.exists(TECHNICAL_BRIEF_FILE):
with open(TECHNICAL_BRIEF_FILE, 'r') as f:
brief = json.load(f)
last_processed = None
last_iteration = 0
for dir_entry in brief["directories"]:
for file_entry in dir_entry["files"]:
if file_entry.get("last_updated_iteration", 0) > last_iteration:
last_processed = os.path.join(dir_entry["path"].lstrip('/'), file_entry["name"])
last_iteration = file_entry["last_updated_iteration"]
return last_processed
return None
from functools import wraps
def retry_on_overload(max_retries=3, initial_delay=1, backoff_factor=2):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
delay = initial_delay
for attempt in range(max_retries):
try:
return func(*args, **kwargs)
except Exception as e:
if attempt == max_retries - 1:
raise
if isinstance(e, anthropic.RateLimitError) or "rate limit" in str(e).lower():
print(f"Rate limit exceeded. Retrying in {delay} seconds...")
time.sleep(delay)
delay *= backoff_factor
else:
raise
return None # This line should never be reached due to the raise in the loop
return wrapper
return decorator
def get_project_structure():
structure_file = os.path.join(DEVLM_FOLDER, "project_structure.json")
if os.path.exists(structure_file):
with open(structure_file, "r") as f:
return json.load(f)
else:
return {
"": []
}
def update_directory_summary(brief, directory_path):
path_parts = directory_path.split(os.sep)
current_dir = brief["directories"]
for part in path_parts:
if part:
current_dir = current_dir["directories"][part]
# Check if all files and subdirectories are processed
all_processed = all(f["status"] in ["done", "in_progress"] for f in current_dir["files"]) and \
all(subdir in brief["directory_summaries"] for subdir in current_dir["directories"])
if all_processed:
directory_summary_prompt = f"""Please provide a concise summary of the following directory based on its files, functions, and subdirectories:
{json.dumps(current_dir, indent=2)}
The summary should be a brief overview of the directory's purpose and main components. It should be useful for an AI when updating or creating new files in this directory or its subdirectories. Include key information about:
1. The overall purpose of this directory
2. Main functionalities implemented in its files
3. Important relationships or dependencies between files or subdirectories
4. Any design patterns or architectural decisions evident from the structure
5. Key interfaces or APIs exposed by this directory's components
If this directory only contains subdirectories, focus on the overall structure and purpose of these subdirectories.
Limit your response to 200 words.
"""
directory_summary = llm_client.generate_response(directory_summary_prompt, 2000)
brief["directory_summaries"][directory_path] = directory_summary.strip()
# Recursively update parent directory summaries
parent_dir = os.path.dirname(directory_path)
if parent_dir:
update_directory_summary(brief, parent_dir)
def review_project_structure(project_summary):
current_structure = get_project_structure()
prompt = f"""As an experienced software developer, please review and suggest improvements to the following project structure for our LLM-based Software Developer Project. Consider best practices, scalability, and maintainability. Suggest a new structure if needed, explaining your reasoning.
Current Project Structure:
{json.dumps(current_structure, indent=2)}
Project Summary:
{project_summary}
Please provide your suggested project structure as a JSON object, following this format:
{{
"directory_name": ["file1.ext", "file2.ext"],
"another_directory": ["file3.ext"]
}}
Important: Make sure to include appropriate file extensions for all files in your suggested structure.
Also, include sample configuration files (e.g., GitHub URL, API endpoints) in your suggested structure. The actual content for these will be provided by the user later.
Also, provide a brief explanation of your suggested changes.
"""
try:
response_text = llm_client.generate_response(prompt, 4000)
json_match = re.search(r'\{[\s\S]*\}', response_text)
if json_match:
suggested_structure = json.loads(json_match.group(0))
explanation = response_text.split(json_match.group(0))[-1].strip()
return suggested_structure, explanation
else:
raise ValueError("No JSON object found in the response")
except Exception as e:
print(f"Error reviewing project structure: {str(e)}")
return None, None
def create_project_structure(structure):
def create_files(path, items):
if isinstance(items, list):
for item in items:
file_path = os.path.join(path, item)
os.makedirs(os.path.dirname(file_path), exist_ok=True)
if not os.path.exists(file_path):
open(file_path, 'w').close()
elif isinstance(items, dict):
for subdir, subitems in items.items():
subpath = os.path.join(path, subdir)
create_files(subpath, subitems)
for directory, items in structure.items():
if directory == "":
create_files(".", items)
else:
create_files(directory, items)
def remove_old_structure(preserve_files):
for root, dirs, files in os.walk(".", topdown=False):
for name in files:
if name not in preserve_files:
os.remove(os.path.join(root, name))
for name in dirs:
try:
os.rmdir(os.path.join(root, name))
except OSError:
pass # Directory not empty, will be removed in next iterations
def initialize_technical_brief(structure):
if os.path.exists(TECHNICAL_BRIEF_FILE):
print("Technical brief already exists. Checking progress...")
return check_progress(structure)
brief = {
"project": "LLM-based Software Developer Project",
"directory_summaries": {},
"directories": {
"files": [],
"directories": {}
}
}
def process_directory(items):
dir_entry = {
"files": [],
"directories": {}
}
if isinstance(items, list):
for file in items:
if file not in ["bootstrap.py", "project_structure.json", "project_summary.md"]:
dir_entry["files"].append({"name": file, "functions": [], "status": "not_started"})
elif isinstance(items, dict):
for name, content in items.items():
if isinstance(content, list): # It's a list of files
sub_dir = {
"files": [],
"directories": {}
}
for file in content:
if file not in ["bootstrap.py", "project_structure.json", "project_summary.md"]:
sub_dir["files"].append({"name": file, "functions": [], "status": "not_started"})
dir_entry["directories"][name] = sub_dir
elif isinstance(content, dict): # It's a subdirectory
dir_entry["directories"][name] = process_directory(content)
return dir_entry
for name, content in structure.items():
if name == "": # Root-level files
for file in content:
if file not in ["bootstrap.py", "project_structure.json", "project_summary.md"]:
brief["directories"]["files"].append({"name": file, "functions": [], "status": "not_started"})
else:
brief["directories"]["directories"][name] = process_directory(content)
with open(TECHNICAL_BRIEF_FILE, 'w') as f:
json.dump(brief, f, indent=4)
save_technical_brief(brief)
return brief
def check_progress(structure):
with open(TECHNICAL_BRIEF_FILE, 'r') as f:
brief = json.load(f)
def update_directory_progress(brief_dir, structure_dir, current_path=""):
for file_name, file_info in structure_dir.items():
if isinstance(file_info, str): # It's a file
file_path = os.path.join(current_path, file_name)
file_entry = next((f for f in brief_dir.get("files", []) if f["name"] == file_name), None)
if file_entry is None:
file_entry = {"name": file_name, "functions": [], "status": "not_started"}
brief_dir.setdefault("files", []).append(file_entry)
if os.path.exists(file_path):
with open(file_path, 'r') as f:
content = f.read()
if content.strip():
file_entry["status"] = "in_progress"
else:
file_entry["status"] = "not_started"
else:
file_entry["status"] = "not_started"
elif isinstance(file_info, dict): # It's a directory
new_path = os.path.join(current_path, file_name)
brief_subdir = brief_dir.get("directories", {}).setdefault(file_name, {"files": [], "directories": {}})
update_directory_progress(brief_subdir, file_info, new_path)
update_directory_progress(brief["directories"], structure)
with open(TECHNICAL_BRIEF_FILE, 'w') as f:
json.dump(brief, f, indent=4)
return brief
def update_technical_brief(file_path, content, iteration, mode="generate", test_info=None):
with open(TECHNICAL_BRIEF_FILE, 'r') as f:
brief = json.load(f)
file_entry = find_file_entry(brief["directories"], file_path)
if file_entry is None:
file_entry = {"name": os.path.basename(file_path), "functions": [], "status": "not_started"}
update_file_entry(brief["directories"], file_path, file_entry)
if mode == "generate":
prompt = f"""Based on the following file content, please generate a complete and valid JSON object for the technical brief of the file {os.path.basename(file_path)}. The brief should include a summary of the file's purpose and a list of functions with their inputs, outputs, and a brief summary. Also, include a "todo" field for each function if there's anything that needs to be completed or improved.
File content:
{content}
Output format:
{{
"name": "{os.path.basename(file_path)}",
"summary": "File summary",
"status": "in_progress",
"functions": [
{{
"name": "function_name",
"inputs": ["param1", "param2"],
"input_types": ["type1", "type2"],
"outputs": ["result"],
"output_types": ["result_type"],
"summary": "Brief description of the function",
"todo": "Optional: any additional information or tasks to be completed"
}}
]
}}
Important: Ensure that the JSON is complete, properly formatted, and enclosed in triple backticks. Do not include any text outside the JSON object.
"""
try:
response_text = llm_client.generate_response(prompt, 4000)
json_match = re.search(r'```(?:json)?\n([\s\S]*?)\n```', response_text)
if json_match:
json_str = json_match.group(1)
else:
json_str = response_text
try:
brief_content = json.loads(json_str)
except json.JSONDecodeError:
brief_content = json5_load(StringIO(json_str))
file_entry.update(brief_content)
file_entry["last_updated_iteration"] = iteration
def is_todo_empty(todo):
if not todo:
return True
todo_lower = str(todo).lower().strip()
return todo_lower in ['', 'none', 'n/a', 'na', 'null']
if any(not is_todo_empty(func.get("todo")) for func in file_entry["functions"]):
file_entry["status"] = "in_progress"
else:
file_entry["status"] = "done"
except Exception as e:
print(f"Error updating technical brief for {file_path}: {str(e)}")
file_entry.update({
"name": os.path.basename(file_path),
"summary": f"Error generating technical brief for {os.path.basename(file_path)}",
"functions": [],
"status": "error",
"last_updated_iteration": iteration
})
elif mode == "test":
if test_info:
if "test_status" not in file_entry:
file_entry["test_status"] = []
file_entry["test_status"].append({
"timestamp": datetime.now().isoformat(),
"info": test_info
})
file_entry["last_updated_iteration"] = iteration
file_entry["status"] = "tested"
if len(file_path.split(os.sep)) == 1:
update_root_directory_summary(brief)
else:
update_directory_summary(brief, os.path.dirname(file_path))
save_technical_brief(brief)
return brief
def save_technical_brief(brief):
temp_file = TECHNICAL_BRIEF_FILE + ".temp"
with open(temp_file, 'w') as f:
json.dump(brief, f, indent=4)
os.replace(temp_file, TECHNICAL_BRIEF_FILE)
print(f"Technical brief saved to {TECHNICAL_BRIEF_FILE}")
# Verify that the file was actually updated
with open(TECHNICAL_BRIEF_FILE, 'r') as f:
saved_brief = json.load(f)
if saved_brief != brief:
print("Warning: The saved technical brief does not match the in-memory version.")
print("In-memory version:", brief)
print("Saved version:", saved_brief)
def update_root_directory_summary(brief):
root_files = brief["directories"].get("files", [])
prompt = f"""Please provide a concise summary of the root directory based on the following files:
{json.dumps(root_files, indent=2)}
The summary should focus on the purpose and content of these root-level files, their relationships, and their role in the project structure. Do not include information about subdirectories, as they have their own summaries.
Limit your response to 200 words.
"""
try:
root_summary = llm_client.generate_response(prompt, 2000)
brief["directory_summaries"]["."] = root_summary.strip()
except Exception as e:
print(f"Error generating root directory summary: {str(e)}")
brief["directory_summaries"]["."] = "Error generating root directory summary"
def get_context_for_file(file_path, brief):
path_parts = file_path.split(os.sep)
current_dir = brief["directories"]
context = {
"directory_summaries": {},
"current_directory": {}
}
# Special handling for root directory files
if len(path_parts) == 1:
context["directory_summaries"]["."] = brief.get("directory_summaries", {}).get(".", "No summary available for root directory")
context["current_directory"] = {