Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .github/NOTES.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
## Agents
- https://github.com/Doriandarko/make-it-heavy
- https://github.com/frdel/agent-zero
- https://github.com/kris-hansen/comanda
- [Flowise - Build AI Agents, Visually](https://github.com/FlowiseAI/Flowise)
- [AWS Multiagent AI Framework](https://github.com/awslabs/agent-squad)
- [Google Agent Development Kit](https://github.com/google/adk-python)
Expand Down
5 changes: 5 additions & 0 deletions .github/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,11 @@ source-agent \
--prompt "Review the error handling in this codebase"
```

### Interactive Mode
```bash
source-agent --interactive
```

---

## Supported Providers
Expand Down
10 changes: 10 additions & 0 deletions config.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
orchestrator:
parallel_agents: 2
task_timeout: 300
aggregation_strategy: consensus
question_generation_prompt: |
Generate {num_agents} JSON-array questions to explore: "{user_input}"
synthesis_prompt: |
You are to synthesize {num_responses} inputs:
{agent_responses}
Produce a final coherent summary.
10 changes: 3 additions & 7 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,11 @@ build-backend = "hatchling.build"

[project.scripts]
source-agent = "source_agent.entrypoint:main"

heavy-agent = "source_agent.heavy:main"

[project]
requires-python = ">=3.10"
version = "0.0.10"
version = "0.0.11"
name = "source-agent"
description = "Simple coding agent."
readme = ".github/README.md"
Expand All @@ -21,9 +21,7 @@ dependencies = [
"openai",
"pathspec",
"requests",

# "python-dotenv",
# "pyyaml",
"pyyaml",
]

[project.optional-dependencies]
Expand All @@ -32,10 +30,8 @@ developer = [
"black",
"hatch",
"mypy",
# "pre-commit",
"pytest",
"ruff",
"uv",
]


Expand Down
36 changes: 13 additions & 23 deletions src/source_agent/agents/code.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@ def __init__(
base_url=None,
model=None,
temperature=0.3,
prompt=None,
):
self.api_key = api_key
self.base_url = base_url
Expand All @@ -25,17 +24,9 @@ def __init__(
# self.presence_penalty = 0.0005

self.messages = []
self.prompt = prompt
self.system_prompt = Path("AGENTS.md").read_text(encoding="utf-8")
self.user_prompt = (
"You are a helpful code assistant. Think step-by-step and use tools when needed.\n"
"Stop when you have completed your analysis.\n"
f"The user's prompt is:\n\n{self.prompt}"
)

# Initialize system and user messages
self.messages.append({"role": "system", "content": self.system_prompt})
self.messages.append({"role": "user", "content": self.user_prompt})

# Load tools from the registry
self.tools = source_agent.tools.tool_registry.registry.get_tools()
Expand All @@ -47,19 +38,23 @@ def __init__(
api_key=self.api_key,
)

def run(self, max_steps=50):
def run(self, user_prompt: str = None, max_steps: int = 50):
"""
If user_prompt is provided, seed it;
otherwise assume messages already has the last user turn.
Then run the full react loop to completion.
"""
if user_prompt is not None:
self.messages.append({"role": "user", "content": user_prompt})

for step in range(max_steps):
print(f"🔄 Agent iteration {step}/{max_steps}")

response = self.call_llm(self.messages)

choice = response.choices[0]
message = choice.message
self.messages.append(message)

print("🤖 Agent:", message.content)

# If the agent is using a tool, run it and loop again
if message.tool_calls:
for tool_call in message.tool_calls:
print(f"🔧 Calling: {tool_call.function.name}")
Expand All @@ -68,22 +63,17 @@ def run(self, max_steps=50):
result = self.handle_tool_call(tool_call)
self.messages.append(result)

print(f"✅ Result: {result}")
print("✅ Result:", result)

# Check if this was the task completion tool
if tool_call.function.name == "task_mark_complete":
print("💯 Task marked complete!")
return result
else:
print("💭 Agent responded without tool calls - continuing loop")
print("💭 No tools; continuing")

print(f"\n{'-'*40}\n")

print(
"🚨 Max steps reached without task completion"
" - consider refining the prompt or tools."
)
print("\n" + "-" * 40 + "\n")

print("🚨 Max steps reached without task completion.")
return {"error": "Max steps reached without task completion."}

def handle_tool_call(self, tool_call):
Expand Down
107 changes: 51 additions & 56 deletions src/source_agent/entrypoint.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ def get_provider(provider_name: str = "openrouter") -> tuple[str, str]:
Raises:
ValueError: If the provider is unknown or the API key is missing.
"""
PROVIDER_KEYS = {
provider_keys = {
"xai": "XAI_API_KEY",
"google": "GEMINI_API_KEY",
"google_vertex": "GOOGLE_VERTEX_API_KEY",
Expand All @@ -38,7 +38,7 @@ def get_provider(provider_name: str = "openrouter") -> tuple[str, str]:
"openrouter": "OPENROUTER_API_KEY",
}

PROVIDER_BASE_URLS = {
provider_base_urls = {
"xai": "https://api.x.ai/v1",
"google": "https://generativelanguage.googleapis.com/v1beta",
"google_vertex": "https://generativelanguage.googleapis.com/v1beta",
Expand All @@ -52,82 +52,61 @@ def get_provider(provider_name: str = "openrouter") -> tuple[str, str]:
"openrouter": "https://openrouter.ai/api/v1",
}

provider_key = PROVIDER_KEYS.get(provider_name.lower())
provider_key = provider_keys.get(provider_name.lower())
if not provider_key:
raise ValueError(f"Unknown provider: {provider_name}")

api_key = os.getenv(provider_key)
if not api_key:
raise ValueError(f"Missing API key for provider: {provider_name}")

base_url = PROVIDER_BASE_URLS.get(provider_name.lower())
base_url = provider_base_urls.get(provider_name.lower())
if not base_url:
raise ValueError(f"Missing base URL for provider: {provider_name}")

return api_key, base_url


def dispatch_agent(
prompt: str,
provider: str = "openrouter",
model: str = "moonshotai/kimi-k2",
temperature: float = 0.3,
) -> str:
def dispatch_agent(agent, prompt) -> str:
"""
Dispatch the agent with the given prompt.

Args:
prompt: The prompt to send to the agent.
provider: The AI provider to use.
model: The model to use.
temperature: The temperature for the model.
agent: The agent instance to run.
prompt: The prompt to provide to the agent.

Returns:
The response from the agent.

Raises:
Exception: If agent execution fails.
"""
print("Starting Source Agent")
print(f"Using provider: {provider}, model: {model}, temperature: {temperature}")

api_key, provider_url = get_provider(provider)

agent = source_agent.agents.code.CodeAgent(
api_key=api_key,
base_url=provider_url,
model=model,
prompt=prompt,
temperature=temperature,
user_prompt = (
"You are a helpful code assistant. Think step-by-step and use tools when needed.\n"
"Stop when you have completed your analysis.\n"
f"The user's prompt is:\n\n{prompt}"
)

result = agent.run()
result = agent.run(user_prompt=user_prompt)
print("Agent execution completed successfully")
return result


def validate_prompt(prompt: str, max_length: int = 10000) -> str:
"""
Validate and sanitize the prompt.

Args:
prompt: The prompt to validate.
return result

Returns:
The validated prompt.

Raises:
ValueError: If prompt is invalid.
"""
prompt = prompt.strip()
if not prompt:
raise ValueError("Prompt cannot be empty or whitespace only")
def interactive_session(agent):
print("Entering interactive mode. Type your prompt and ↵; type 'q' to quit.")
while True:
user_input = input("\n> ").strip()
if user_input.lower() == "q":
print("Exiting interactive session.")
return

# Reasonable upper limit
if len(prompt) > max_length:
raise ValueError(f"Prompt is too long (max {max_length} characters)")
# reset the conversation to just the system prompt + the new user prompt
agent.messages = [{"role": "system", "content": agent.system_prompt}]
agent.messages.append({"role": "user", "content": user_input})

return prompt
# run full react loop
agent.run()
print("\n🔚 Run completed.\n")


def main() -> int:
Expand Down Expand Up @@ -182,25 +161,41 @@ def main() -> int:
default=False,
help="Enable verbose logging",
)
parser.add_argument(
"-i",
"--interactive",
action="store_true",
default=False,
help="Run in interactive step‑through mode",
)
parser.add_argument(
"-h",
"--heavy",
action="store_true",
default=False,
help="Enable heavy mode",
)

args = parser.parse_args()

# if args.verbose:
# logging.getLogger().setLevel(logging.DEBUG)

# Validate prompt
prompt = validate_prompt(args.prompt)

# Run agent
result = dispatch_agent(
prompt=prompt,
provider=args.provider,
api_key, base_url = get_provider(args.provider)
agent = source_agent.agents.code.CodeAgent(
api_key=api_key,
base_url=base_url,
model=args.model,
temperature=args.temperature,
)

print(result)
return 0
if args.interactive:
# Run in interactive mode
return interactive_session(agent)

else:
# Let the agent run autonomously
return dispatch_agent(agent=agent, prompt=args.prompt)


if __name__ == "__main__":
Expand Down
11 changes: 11 additions & 0 deletions src/source_agent/heavy.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
import sys


def main():
print("Heavy agent running...")
# Add heavy agent logic here
return 0


if __name__ == "__main__":
sys.exit(main())
Empty file.