Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 2 additions & 28 deletions .github/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -19,14 +19,7 @@ A simple coding agent.
## How it Works
**Source Agent** operates as a stateless entity, guided by clear directives and external context. Its behavior is primarily defined by **`AGENTS.md`**, which serves as the core system prompt.

![](docs/example.gif)

---

## Prerequisites
- Python 3.10 or higher
- An API key from one of the supported AI providers (see [Supported Providers](#supported-providers))
- Git (for .gitignore support)
![](docs/example4.gif)

---

Expand All @@ -42,7 +35,7 @@ python -m venv .venv
source .venv/bin/activate # On Windows: .venv\Scripts\activate

# Install in development mode
pip install -e ".[developer]"
pip install --editable ".[developer]"

# Verify the installation
source-agent --help
Expand Down Expand Up @@ -116,22 +109,3 @@ Source Agent provides these built-in tools for code analysis:
- **msg_complete_tool** - REQUIRED tool to signal task completion and exit the agent loop

These tools are automatically available to the AI agent during analysis.

---

## Core Architecture
- **Entry Point**: `src/source_agent/entrypoint.py` - CLI interface with argument parsing
- **Agent Engine**: `src/source_agent/agents/code.py` - OpenAI-compatible client with tool integration
- **System Prompt**: `AGENTS.md` - Defines agent behavior, roles, and constraints

### Project Structure

```
source-agent/
├── src/source_agent/
│ ├── entrypoint.py # CLI interface
│ ├── agents/
│ │ └── code.py # Main agent logic
│ └── tools/ # File system tools
└── AGENTS.md # System prompt & behavior rules
```
Binary file added .github/docs/example4.gif
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Empty file removed CHANGELOG.md
Empty file.
10 changes: 0 additions & 10 deletions config.yaml

This file was deleted.

3 changes: 1 addition & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,10 @@ build-backend = "hatchling.build"

[project.scripts]
source-agent = "source_agent.entrypoint:main"
heavy-agent = "source_agent.heavy:main"

[project]
requires-python = ">=3.10"
version = "0.0.13"
version = "0.0.14"
name = "source-agent"
description = "Simple coding agent."
readme = ".github/README.md"
Expand Down
161 changes: 124 additions & 37 deletions src/source_agent/agents/code.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,38 @@
import re
import sys
import json
import time
import openai
import random
import source_agent
from enum import Enum
from typing import Any, Dict, Iterator
from pathlib import Path
from dataclasses import field, dataclass


class AgentEventType(Enum):
ITERATION_START = "iteration_start"
AGENT_MESSAGE = "agent_message"
TOOL_CALL = "tool_call"
TOOL_RESULT = "tool_result"
TASK_COMPLETE = "task_complete"
MAX_STEPS_REACHED = "max_steps_reached"
ERROR = "error"


@dataclass(frozen=True)
class AgentEvent:
"""
Represents an event occurring during the agent's operation.

Attributes:
type: The type of event (e.g., iteration_start, agent_message).
data: A dictionary containing event-specific data.
"""

type: AgentEventType
data: Dict[str, Any] = field(default_factory=dict)


class CodeAgent:
Expand All @@ -17,10 +45,10 @@ class CodeAgent:

def __init__(
self,
api_key=None,
base_url=None,
model=None,
temperature=0.3,
api_key: str = None,
base_url: str = None,
model: str = None,
temperature: float = 0.3,
system_prompt: str = None,
):
self.api_key = api_key
Expand All @@ -46,45 +74,88 @@ def reset_conversation(self):
"""Clear conversation and initialize with system prompt."""
self.messages = [{"role": "system", "content": self.system_prompt}]

def run(self, user_prompt: str = None, max_steps: int = None):
def run(
self, user_prompt: str = None, max_steps: int = None
) -> Iterator[AgentEvent]:
"""
Run a full ReAct-style loop with tool usage.
Run a full ReAct-style loop with tool usage, yielding events at each step.

Args:
user_prompt: Optional user input to start the conversation.
max_steps: Maximum steps before stopping.

Yields:
AgentEvent: An event describing the current state or action of the agent.
"""
if user_prompt:
self.messages.append({"role": "user", "content": user_prompt})

steps = max_steps or self.MAX_STEPS

for step in range(1, steps + 1):
print(f"🔄 Iteration {step}/{steps}")
response = self.call_llm(self.messages)
yield AgentEvent(
type=AgentEventType.ITERATION_START,
data={"step": step, "max_steps": steps},
)

try:
response = self.call_llm(self.messages)
except Exception as e:
yield AgentEvent(
type=AgentEventType.ERROR,
data={
"message": f"LLM call failed: {str(e)}",
"exception_type": type(e).__name__,
},
)
return

message = response.choices[0].message
self.messages.append(message)

parsed_content = self.parse_response_message(message.content)
if parsed_content:
print("🤖 Agent:", parsed_content)
yield AgentEvent(
type=AgentEventType.AGENT_MESSAGE, data={"content": parsed_content}
)

if message.tool_calls:
for tool_call in message.tool_calls:
tool_name = tool_call.function.name
print(f"🔧 Calling: {tool_name}")
yield AgentEvent(
type=AgentEventType.TOOL_CALL,
data={
"name": tool_name,
"arguments": tool_call.function.arguments,
},
)

if tool_name == "msg_complete_tool":
print("💯 Task marked complete!\n")
yield AgentEvent(
type=AgentEventType.TASK_COMPLETE,
data={"message": "Task marked complete!"},
)
return

result = self.handle_tool_call(tool_call)
self.messages.append(result)

print("-" * 40 + "\n")

return {"error": "Max steps reached without task completion."}
result_message = self.handle_tool_call(tool_call)
self.messages.append(result_message)
# Attempt to parse tool result content as JSON if it's a string, otherwise use as-is
tool_result_content = result_message["content"]
try:
parsed_tool_result = json.loads(tool_result_content)
except (json.JSONDecodeError, TypeError):
# Fallback to string representation for complex types
parsed_tool_result = tool_result_content

yield AgentEvent(
type=AgentEventType.TOOL_RESULT,
data={"name": tool_name, "result": parsed_tool_result},
)

yield AgentEvent(
type=AgentEventType.MAX_STEPS_REACHED,
data={"message": f"Max steps ({steps}) reached without task completion."},
)

def parse_response_message(self, message: str) -> str:
"""
Expand Down Expand Up @@ -119,6 +190,11 @@ def handle_tool_call(self, tool_call):
return self._tool_error(tool_call, f"Unknown tool: {tool_name}")

result = func(**tool_args)
# Ensure result is always JSON serializable for the 'content' field of the tool message
# This is important for the LLM to process it correctly
if not isinstance(result, (str, dict, list, int, float, bool, type(None))):
result = str(result)

return {
"role": "tool",
"tool_call_id": tool_call.id,
Expand Down Expand Up @@ -160,19 +236,22 @@ def call_llm(
The response from the chat API.

Raises:
openai.Timeout: If the API call times out.
openai.APIError: If the API call fails due to an API error.
openai.OpenAIError: If the API call fails after retries.
openai.APIStatusError: If the API call fails due to an API status error.
openai.RateLimitError: If the API call exceeds the rate limit.
openai.APITimeoutError: If the API call times out.
openai.APIConnectionError: If the API call fails due to a connection error.
openai.OpenAIError: If the API call fails after retries due to an OpenAI-specific error.
Exception: For any other unexpected errors.
"""
retries = max_retries or self.MAX_RETRIES
base = backoff_base or self.BACKOFF_BASE
factor = backoff_factor or self.BACKOFF_FACTOR
cap = max_backoff or self.MAX_BACKOFF

# Define specific OpenAI errors that are generally retryable.
RETRYABLE_OPENAI_ERRORS = (
openai.RateLimitError, # 429 status code
openai.APITimeoutError, # Timeout during the API call
openai.APIConnectionError, # Network connection issues
openai.APIStatusError, # Covers 5xx errors which are often retryable, and also other 4xx errors
)

for attempt in range(1, retries + 1):
try:
return self.session.chat.completions.create(
Expand All @@ -182,26 +261,34 @@ def call_llm(
tool_choice="auto",
temperature=self.temperature,
)
except (
openai.Timeout,
openai.APIError,
openai.OpenAIError,
openai.APIStatusError,
openai.RateLimitError,
openai.APITimeoutError,
openai.APIConnectionError,
) as e:
except RETRYABLE_OPENAI_ERRORS as e:
# This block handles known retryable OpenAI API errors.
if attempt == retries:
print(f"❌ LLM call failed after {attempt} attempts: {e}")
raise
print(
f"❌ LLM call failed after {attempt} attempts: {e}",
file=sys.stderr,
)
raise # Re-raise if all retries exhausted

delay = min(base * (factor ** (attempt - 1)) + random.random(), cap)
print(
f"⚠️ Attempt {attempt} failed: {type(e).__name__}: {e}. "
f"Retrying in {delay:.1f}s..."
f"Retrying in {delay:.1f}s...",
file=sys.stderr,
)
time.sleep(delay)

except openai.OpenAIError as e:
# This block handles non-retryable OpenAI API errors (e.g., AuthenticationError,
# PermissionDeniedError, InvalidRequestError, etc.).
# These typically indicate a problem that retrying won't solve.
print(
f"❌ Non-retryable OpenAI error during LLM call: {e}",
file=sys.stderr,
)
raise # Re-raise immediately

except Exception as e:
print(f"❌ Unexpected error during LLM call: {e}")
# This block catches any other unexpected Python exceptions.
print(f"❌ Unexpected error during LLM call: {e}", file=sys.stderr)
raise
Loading