Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .github/workflows/checks.yml
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,9 @@ jobs:
- name: Run pyright
run: uv run scripts/lint.py

- name: Run ty
run: uv run scripts/typecheck.py

test:
runs-on: ubuntu-latest
steps:
Expand Down
7 changes: 7 additions & 0 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8,3 +8,10 @@ repos:
args: [--fix]
# Run the formatter.
- id: ruff-format
- repo: local
hooks:
- id: ty
name: ty check
entry: uv run scripts/typecheck.py
language: system
pass_filenames: false
4 changes: 3 additions & 1 deletion examples/tool-use-agent/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,9 @@ def __init__(
@fast.custom(CustomToolAgent)
async def main() -> None:
async with fast.run() as agent:
await agent.default.generate("What is the topic of the video call no.1234?")
await agent.default.generate(
"What is the topic of the video call no.1234?",
)


if __name__ == "__main__":
Expand Down
47 changes: 27 additions & 20 deletions publish/hf-inference-acp/src/hf_inference_acp/agents.py
Original file line number Diff line number Diff line change
Expand Up @@ -131,8 +131,9 @@ async def attach_llm(self, llm_factory, model=None, request_params=None, **kwarg
llm = await super().attach_llm(llm_factory, model, request_params, **kwargs)

# Set up wizard callback if LLM supports it
if hasattr(llm, "set_completion_callback"):
llm.set_completion_callback(self._on_wizard_complete)
callback_setter = getattr(llm, "set_completion_callback", None)
if callback_setter is not None:
callback_setter(self._on_wizard_complete)

return llm

Expand Down Expand Up @@ -220,6 +221,7 @@ def acp_session_commands_allowlist(self) -> set[str]:

async def _handle_set_model(self, arguments: str) -> str:
"""Handler for /set-model command."""
from fast_agent.llm.hf_inference_lookup import validate_hf_model
from fast_agent.llm.model_factory import ModelFactory

model = arguments.strip()
Expand All @@ -229,20 +231,24 @@ async def _handle_set_model(self, arguments: str) -> str:
# Normalize the model string (auto-add hf. prefix if needed)
model = _normalize_hf_model(model)

# Validate the model string before saving to config
# Validate the model string format
try:
ModelFactory.parse_model_string(model)
except Exception as e:
return f"Error: Invalid model `{model}` - {e}"

# Look up inference providers for this model
provider_info = await _lookup_and_format_providers(model)
# Validate model exists on HuggingFace and has providers
validation = await validate_hf_model(model, aliases=ModelFactory.MODEL_ALIASES)
if not validation.valid:
return validation.error or "Error: Model validation failed"

try:
update_model_in_config(model)
applied = await self._apply_model_to_running_hf_agent(model)
applied_note = "\n\nApplied to the running Hugging Face agent." if applied else ""
provider_prefix = f"{provider_info}\n\n" if provider_info else ""
provider_prefix = (
f"{validation.display_message}\n\n" if validation.display_message else ""
)
return (
f"{provider_prefix}"
f"Default model set to: `{model}`\n\nConfig file updated: `{CONFIG_FILE}`"
Expand Down Expand Up @@ -498,29 +504,25 @@ async def _send_connect_update(
await _send_connect_update(title="Connected", status="in_progress")

# Rebuild system prompt to include fresh server instructions
await _send_connect_update(
title="Rebuilding system prompt…", status="in_progress"
)
await self.rebuild_instruction_templates()
await _send_connect_update(title="Rebuilding system prompt…", status="in_progress")
await self._apply_instruction_templates()

# Get available tools
await _send_connect_update(title="Fetching available tools…", status="in_progress")
tools_result = await self._aggregator.list_tools()
tool_names = [t.name for t in tools_result.tools] if tools_result.tools else []

# Send final progress update (but don't mark as completed yet -
# the return value serves as the completion signal)
if tool_names:
preview = ", ".join(tool_names[:10])
suffix = f" (+{len(tool_names) - 10} more)" if len(tool_names) > 10 else ""
await _send_connect_update(
title="Connected (tools available)",
title=f"Connected ({len(tool_names)} tools)",
status="completed",
message=f"Available tools: {preview}{suffix}",
)
else:
await _send_connect_update(
title="Connected (no tools found)",
title="Connected (no tools)",
status="completed",
message="No tools available from the server.",
)

if tool_names:
Expand All @@ -543,6 +545,7 @@ async def _send_connect_update(

async def _handle_set_model(self, arguments: str) -> str:
"""Handler for /set-model in Hugging Face mode."""
from fast_agent.llm.hf_inference_lookup import validate_hf_model
from fast_agent.llm.model_factory import ModelFactory

model = arguments.strip()
Expand All @@ -552,20 +555,24 @@ async def _handle_set_model(self, arguments: str) -> str:
# Normalize the model string (auto-add hf. prefix if needed)
model = _normalize_hf_model(model)

# Validate the model string before applying
# Validate the model string format
try:
ModelFactory.parse_model_string(model)
except Exception as e:
return f"Error: Invalid model `{model}` - {e}"

# Look up inference providers for this model
provider_info = await _lookup_and_format_providers(model)
# Validate model exists on HuggingFace and has providers
validation = await validate_hf_model(model, aliases=ModelFactory.MODEL_ALIASES)
if not validation.valid:
return validation.error or "Error: Model validation failed"

try:
# Apply model first - if this fails, don't update config
await self.apply_model(model)
update_model_in_config(model)
provider_prefix = f"{provider_info}\n\n" if provider_info else ""
provider_prefix = (
f"{validation.display_message}\n\n" if validation.display_message else ""
)
return f"{provider_prefix}Active model set to: `{model}`\n\nConfig file updated: `{CONFIG_FILE}`"
except Exception as e:
return f"Error setting model: {e}"
Expand Down
3 changes: 2 additions & 1 deletion publish/hf-inference-acp/src/hf_inference_acp/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
import shlex
import sys
from pathlib import Path # noqa: TC003 - typer needs runtime access
from typing import Any, cast

import typer

Expand Down Expand Up @@ -220,7 +221,7 @@ async def run_agents(
if skills_directory is not None:
fast_kwargs["skills_directory"] = skills_directory

fast = FastAgent(**fast_kwargs)
fast = FastAgent(**cast("Any", fast_kwargs))

if shell_runtime:
await fast.app.initialize()
Expand Down
2 changes: 1 addition & 1 deletion publish/hf-inference-acp/src/hf_inference_acp/hf_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ def discover_hf_token(*, ignore_env: bool = False) -> tuple[str | None, str | No
from huggingface_hub import get_token

token = get_token()
return token, "huggingface_hub" if token else (None, None)
return (token, "huggingface_hub") if token else (None, None)
except ImportError:
pass

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -414,6 +414,8 @@ async def _handle_confirm(self, user_input: str) -> str:
elif cmd in ("y", "yes", "confirm", "ok", "save"):
# Save configuration
try:
if self._state.selected_model is None:
return "No model selected. Please select a model first."
update_model_in_config(self._state.selected_model)
update_mcp_server_load_on_start("huggingface", self._state.mcp_load_on_start)
self._state.stage = WizardStage.COMPLETE
Expand Down
12 changes: 2 additions & 10 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -64,16 +64,6 @@ all-providers = [
"boto3>=1.35.0",
"tensorzero>=2025.7.5"
]
dev = [
"pre-commit>=4.0.1",
"pydantic>=2.10.4",
"pyyaml>=6.0.2",
"ruff>=0.8.4",
"pytest>=7.4.0",
"pytest-asyncio>=0.21.1",
"pytest-cov",
"ruamel.yaml>=0.18.0",
]

[build-system]
requires = ["hatchling"]
Expand Down Expand Up @@ -117,11 +107,13 @@ testpaths = ["tests"]

[dependency-groups]
dev = [
"boto3>=1.35.0",
"pre-commit>=4.0.1",
"pydantic>=2.10.4",
"ruamel.yaml>=0.18.0",
"pyyaml>=6.0.2",
"ruff>=0.8.4",
"ty>=0.0.5",
"pytest>=7.4.0",
"pytest-asyncio>=0.21.1",
"pytest-cov>=6.1.1",
Expand Down
27 changes: 27 additions & 0 deletions scripts/typecheck.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
import subprocess
import sys

import typer
from rich import print


def main(path: str = "src") -> None:
try:
command = ["ty", "check", path]
process = subprocess.run(
command,
check=True,
stdout=sys.stdout,
stderr=sys.stderr,
)
sys.exit(process.returncode)
except subprocess.CalledProcessError as e:
print(f"Error: {e}")
sys.exit(e.returncode)
except FileNotFoundError:
print("Error: `ty` command not found. Make sure it's installed in the environment.")
sys.exit(1)


if __name__ == "__main__":
typer.run(main)
36 changes: 36 additions & 0 deletions src/fast_agent/acp/filesystem_runtime.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@

from typing import TYPE_CHECKING, Any

from acp.helpers import tool_diff_content
from acp.schema import ToolCallProgress
from mcp.types import CallToolResult, Tool

from fast_agent.core.logging.logger import get_logger
Expand Down Expand Up @@ -312,6 +314,18 @@ async def write_text_file(
content_length=len(content),
)

# Read existing file content for diff display (if file exists)
old_text: str | None = None
try:
response = await self.connection.read_text_file(
path=path,
session_id=self.session_id,
)
old_text = response.content
except Exception:
# File doesn't exist or can't be read - that's fine, old_text stays None
pass

# Check permission before execution
if self._permission_handler:
try:
Expand All @@ -320,6 +334,9 @@ async def write_text_file(
server_name="acp_filesystem",
arguments=arguments,
tool_use_id=tool_use_id,
diff_old_text=old_text,
diff_new_text=content,
diff_path=path,
)
if not permission_result.allowed:
error_msg = permission_result.error_message or (
Expand Down Expand Up @@ -382,6 +399,25 @@ async def write_text_file(
except Exception as e:
self.logger.error(f"Error in tool complete handler: {e}", exc_info=True)

# Send diff content update for UI display
if tool_call_id:
try:
diff_content = tool_diff_content(
path=path,
new_text=content,
old_text=old_text,
)
await self.connection.session_update(
session_id=self.session_id,
update=ToolCallProgress(
session_update="tool_call_update",
tool_call_id=tool_call_id,
content=[diff_content],
),
)
except Exception as e:
self.logger.error(f"Error sending diff content update: {e}", exc_info=True)

return result

except Exception as e:
Expand Down
56 changes: 56 additions & 0 deletions src/fast_agent/acp/protocols.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
"""
ACP capability Protocols for type-safe isinstance checks.

These Protocols define optional capabilities that agents may implement.
Use isinstance() checks instead of hasattr() to verify capability support.
"""

from __future__ import annotations

from typing import TYPE_CHECKING, Protocol, runtime_checkable

if TYPE_CHECKING:
from fast_agent.acp.filesystem_runtime import ACPFilesystemRuntime
from fast_agent.acp.terminal_runtime import ACPTerminalRuntime
from fast_agent.tools.shell_runtime import ShellRuntime
from fast_agent.workflow_telemetry import PlanTelemetryProvider, WorkflowTelemetryProvider


@runtime_checkable
class ShellRuntimeCapable(Protocol):
"""Agent that supports external shell runtime injection."""

_shell_runtime: "ShellRuntime"

@property
def _shell_runtime_enabled(self) -> bool: ...

def set_external_runtime(self, runtime: "ACPTerminalRuntime") -> None: ...


@runtime_checkable
class FilesystemRuntimeCapable(Protocol):
"""Agent that supports external filesystem runtime injection."""

def set_filesystem_runtime(self, runtime: "ACPFilesystemRuntime") -> None: ...


@runtime_checkable
class InstructionContextCapable(Protocol):
"""Agent that supports dynamic instruction context updates."""

def set_instruction_context(self, context: dict[str, str]) -> None: ...


@runtime_checkable
class WorkflowTelemetryCapable(Protocol):
"""Agent that supports workflow telemetry."""

workflow_telemetry: "WorkflowTelemetryProvider | None"


@runtime_checkable
class PlanTelemetryCapable(Protocol):
"""Agent that supports plan telemetry."""

plan_telemetry: "PlanTelemetryProvider | None"
Loading
Loading