diff --git a/.github/NOTES.md b/.github/NOTES.md index a82587a..77d1052 100644 --- a/.github/NOTES.md +++ b/.github/NOTES.md @@ -3,6 +3,7 @@ ## Agents - https://github.com/Doriandarko/make-it-heavy - https://github.com/frdel/agent-zero + - https://github.com/kris-hansen/comanda - [Flowise - Build AI Agents, Visually](https://github.com/FlowiseAI/Flowise) - [AWS Multiagent AI Framework](https://github.com/awslabs/agent-squad) - [Google Agent Development Kit](https://github.com/google/adk-python) diff --git a/.github/README.md b/.github/README.md index 485228b..e2dc9d1 100644 --- a/.github/README.md +++ b/.github/README.md @@ -73,6 +73,11 @@ source-agent \ --prompt "Review the error handling in this codebase" ``` +### Interactive Mode +```bash +source-agent --interactive +``` + --- ## Supported Providers diff --git a/config.yaml b/config.yaml new file mode 100644 index 0000000..4077028 --- /dev/null +++ b/config.yaml @@ -0,0 +1,10 @@ +orchestrator: + parallel_agents: 2 + task_timeout: 300 + aggregation_strategy: consensus + question_generation_prompt: | + Generate {num_agents} JSON-array questions to explore: "{user_input}" + synthesis_prompt: | + You are to synthesize {num_responses} inputs: + {agent_responses} + Produce a final coherent summary. \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index ba84371..192038d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,11 +5,11 @@ build-backend = "hatchling.build" [project.scripts] source-agent = "source_agent.entrypoint:main" - +heavy-agent = "source_agent.heavy:main" [project] requires-python = ">=3.10" -version = "0.0.10" +version = "0.0.11" name = "source-agent" description = "Simple coding agent." readme = ".github/README.md" @@ -21,9 +21,7 @@ dependencies = [ "openai", "pathspec", "requests", - - # "python-dotenv", - # "pyyaml", + "pyyaml", ] [project.optional-dependencies] @@ -32,10 +30,8 @@ developer = [ "black", "hatch", "mypy", - # "pre-commit", "pytest", "ruff", - "uv", ] diff --git a/src/source_agent/agents/code.py b/src/source_agent/agents/code.py index 60fe151..9f71d18 100644 --- a/src/source_agent/agents/code.py +++ b/src/source_agent/agents/code.py @@ -13,7 +13,6 @@ def __init__( base_url=None, model=None, temperature=0.3, - prompt=None, ): self.api_key = api_key self.base_url = base_url @@ -25,17 +24,9 @@ def __init__( # self.presence_penalty = 0.0005 self.messages = [] - self.prompt = prompt self.system_prompt = Path("AGENTS.md").read_text(encoding="utf-8") - self.user_prompt = ( - "You are a helpful code assistant. Think step-by-step and use tools when needed.\n" - "Stop when you have completed your analysis.\n" - f"The user's prompt is:\n\n{self.prompt}" - ) - # Initialize system and user messages self.messages.append({"role": "system", "content": self.system_prompt}) - self.messages.append({"role": "user", "content": self.user_prompt}) # Load tools from the registry self.tools = source_agent.tools.tool_registry.registry.get_tools() @@ -47,19 +38,23 @@ def __init__( api_key=self.api_key, ) - def run(self, max_steps=50): + def run(self, user_prompt: str = None, max_steps: int = 50): + """ + If user_prompt is provided, seed it; + otherwise assume messages already has the last user turn. + Then run the full react loop to completion. + """ + if user_prompt is not None: + self.messages.append({"role": "user", "content": user_prompt}) + for step in range(max_steps): print(f"πŸ”„ Agent iteration {step}/{max_steps}") - response = self.call_llm(self.messages) - choice = response.choices[0] message = choice.message self.messages.append(message) - print("πŸ€– Agent:", message.content) - # If the agent is using a tool, run it and loop again if message.tool_calls: for tool_call in message.tool_calls: print(f"πŸ”§ Calling: {tool_call.function.name}") @@ -68,22 +63,17 @@ def run(self, max_steps=50): result = self.handle_tool_call(tool_call) self.messages.append(result) - print(f"βœ… Result: {result}") + print("βœ… Result:", result) - # Check if this was the task completion tool if tool_call.function.name == "task_mark_complete": print("πŸ’― Task marked complete!") return result else: - print("πŸ’­ Agent responded without tool calls - continuing loop") + print("πŸ’­ No tools; continuing") - print(f"\n{'-'*40}\n") - - print( - "🚨 Max steps reached without task completion" - " - consider refining the prompt or tools." - ) + print("\n" + "-" * 40 + "\n") + print("🚨 Max steps reached without task completion.") return {"error": "Max steps reached without task completion."} def handle_tool_call(self, tool_call): diff --git a/src/source_agent/entrypoint.py b/src/source_agent/entrypoint.py index e161803..fb4e102 100644 --- a/src/source_agent/entrypoint.py +++ b/src/source_agent/entrypoint.py @@ -24,7 +24,7 @@ def get_provider(provider_name: str = "openrouter") -> tuple[str, str]: Raises: ValueError: If the provider is unknown or the API key is missing. """ - PROVIDER_KEYS = { + provider_keys = { "xai": "XAI_API_KEY", "google": "GEMINI_API_KEY", "google_vertex": "GOOGLE_VERTEX_API_KEY", @@ -38,7 +38,7 @@ def get_provider(provider_name: str = "openrouter") -> tuple[str, str]: "openrouter": "OPENROUTER_API_KEY", } - PROVIDER_BASE_URLS = { + provider_base_urls = { "xai": "https://api.x.ai/v1", "google": "https://generativelanguage.googleapis.com/v1beta", "google_vertex": "https://generativelanguage.googleapis.com/v1beta", @@ -52,7 +52,7 @@ def get_provider(provider_name: str = "openrouter") -> tuple[str, str]: "openrouter": "https://openrouter.ai/api/v1", } - provider_key = PROVIDER_KEYS.get(provider_name.lower()) + provider_key = provider_keys.get(provider_name.lower()) if not provider_key: raise ValueError(f"Unknown provider: {provider_name}") @@ -60,74 +60,53 @@ def get_provider(provider_name: str = "openrouter") -> tuple[str, str]: if not api_key: raise ValueError(f"Missing API key for provider: {provider_name}") - base_url = PROVIDER_BASE_URLS.get(provider_name.lower()) + base_url = provider_base_urls.get(provider_name.lower()) if not base_url: raise ValueError(f"Missing base URL for provider: {provider_name}") return api_key, base_url -def dispatch_agent( - prompt: str, - provider: str = "openrouter", - model: str = "moonshotai/kimi-k2", - temperature: float = 0.3, -) -> str: +def dispatch_agent(agent, prompt) -> str: """ Dispatch the agent with the given prompt. Args: - prompt: The prompt to send to the agent. - provider: The AI provider to use. - model: The model to use. - temperature: The temperature for the model. + agent: The agent instance to run. + prompt: The prompt to provide to the agent. Returns: The response from the agent. - - Raises: - Exception: If agent execution fails. """ print("Starting Source Agent") - print(f"Using provider: {provider}, model: {model}, temperature: {temperature}") - api_key, provider_url = get_provider(provider) - - agent = source_agent.agents.code.CodeAgent( - api_key=api_key, - base_url=provider_url, - model=model, - prompt=prompt, - temperature=temperature, + user_prompt = ( + "You are a helpful code assistant. Think step-by-step and use tools when needed.\n" + "Stop when you have completed your analysis.\n" + f"The user's prompt is:\n\n{prompt}" ) - result = agent.run() + result = agent.run(user_prompt=user_prompt) print("Agent execution completed successfully") - return result - -def validate_prompt(prompt: str, max_length: int = 10000) -> str: - """ - Validate and sanitize the prompt. - - Args: - prompt: The prompt to validate. + return result - Returns: - The validated prompt. - Raises: - ValueError: If prompt is invalid. - """ - prompt = prompt.strip() - if not prompt: - raise ValueError("Prompt cannot be empty or whitespace only") +def interactive_session(agent): + print("Entering interactive mode. Type your prompt and ↡; type 'q' to quit.") + while True: + user_input = input("\n> ").strip() + if user_input.lower() == "q": + print("Exiting interactive session.") + return - # Reasonable upper limit - if len(prompt) > max_length: - raise ValueError(f"Prompt is too long (max {max_length} characters)") + # reset the conversation to just the system prompt + the new user prompt + agent.messages = [{"role": "system", "content": agent.system_prompt}] + agent.messages.append({"role": "user", "content": user_input}) - return prompt + # run full react loop + agent.run() + print("\nπŸ”š Run completed.\n") def main() -> int: @@ -182,25 +161,41 @@ def main() -> int: default=False, help="Enable verbose logging", ) + parser.add_argument( + "-i", + "--interactive", + action="store_true", + default=False, + help="Run in interactive step‑through mode", + ) + parser.add_argument( + "-h", + "--heavy", + action="store_true", + default=False, + help="Enable heavy mode", + ) args = parser.parse_args() # if args.verbose: # logging.getLogger().setLevel(logging.DEBUG) - # Validate prompt - prompt = validate_prompt(args.prompt) - - # Run agent - result = dispatch_agent( - prompt=prompt, - provider=args.provider, + api_key, base_url = get_provider(args.provider) + agent = source_agent.agents.code.CodeAgent( + api_key=api_key, + base_url=base_url, model=args.model, temperature=args.temperature, ) - print(result) - return 0 + if args.interactive: + # Run in interactive mode + return interactive_session(agent) + + else: + # Let the agent run autonomously + return dispatch_agent(agent=agent, prompt=args.prompt) if __name__ == "__main__": diff --git a/src/source_agent/heavy.py b/src/source_agent/heavy.py new file mode 100644 index 0000000..92fd75c --- /dev/null +++ b/src/source_agent/heavy.py @@ -0,0 +1,11 @@ +import sys + + +def main(): + print("Heavy agent running...") + # Add heavy agent logic here + return 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/src/source_agent/orchestrator.py b/src/source_agent/orchestrator.py new file mode 100644 index 0000000..e69de29