Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Binary file removed .coverage
Binary file not shown.
78 changes: 39 additions & 39 deletions README.md

Large diffs are not rendered by default.

6 changes: 3 additions & 3 deletions examples/basic_llm_call.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@

# imports
from chuk_llm.llm.client import get_client
from mcp_cli.llm.system_prompt_generator import SystemPromptGenerator
from mcp_cli.chat.system_prompt import generate_system_prompt

# load environment variables
load_dotenv()
Expand All @@ -35,8 +35,8 @@ async def run_llm_diagnostic(provider: str, model: str, prompt: str) -> None:
# get the client
client = get_client(provider=provider, model=model)

# get the system prompt
system_prompt = SystemPromptGenerator().generate_prompt({})
# get the system prompt (tools are passed via API, not embedded in prompt)
system_prompt = generate_system_prompt()
messages: List[Dict[str, Any]] = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": prompt},
Expand Down
7 changes: 4 additions & 3 deletions examples/mcp_round_trip.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,9 +26,9 @@
from chuk_tool_processor.core.processor import ToolProcessor
from chuk_tool_processor.models.tool_result import ToolResult

# MCP CLI imports - only using llm_client and system_prompt_generator
# MCP CLI imports
from chuk_llm.llm.client import get_client
from mcp_cli.llm.system_prompt_generator import SystemPromptGenerator
from mcp_cli.chat.system_prompt import generate_system_prompt

# Initialize colorama for colored output
colorama_init(autoreset=True)
Expand Down Expand Up @@ -231,7 +231,8 @@ async def main() -> None:

# 5) Send prompt to LLM
client = get_client(provider=args.provider, model=args.model)
sys_prompt = SystemPromptGenerator().generate_prompt({"tools": openai_tools})
# Tools are passed via API, not embedded in system prompt
sys_prompt = generate_system_prompt(openai_tools)
messages = [
{"role": "system", "content": sys_prompt},
{"role": "user", "content": args.prompt},
Expand Down
5 changes: 3 additions & 2 deletions examples/mcp_round_trip_with_toolmanager.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
# ── MCP & LLM helpers ───────────────────────────────────────────────────
from chuk_llm.llm.client import get_client
from mcp_cli.tools.manager import ToolManager
from mcp_cli.llm.system_prompt_generator import SystemPromptGenerator
from mcp_cli.chat.system_prompt import generate_system_prompt

colorama_init(autoreset=True)

Expand Down Expand Up @@ -95,7 +95,8 @@ async def main() -> None:

# 3️⃣ Initial LLM call (allow tool usage)
client = get_client(provider=args.provider, model=args.model)
sys_prompt = SystemPromptGenerator().generate_prompt({"tools": llm_tools})
# Tools are passed via API, not embedded in system prompt
sys_prompt = generate_system_prompt(llm_tools)
messages: List[Dict[str, str | None]] = [
{"role": "system", "content": sys_prompt},
{"role": "user", "content": args.prompt},
Expand Down
4 changes: 2 additions & 2 deletions examples/ollama_llm_call.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@

# mcp cli imports
from chuk_llm.llm.client import get_client
from mcp_cli.llm.system_prompt_generator import SystemPromptGenerator
from mcp_cli.chat.system_prompt import generate_system_prompt


async def run_ollama_diagnostic(model: str, prompt: str) -> None:
Expand All @@ -30,7 +30,7 @@ async def run_ollama_diagnostic(model: str, prompt: str) -> None:
except Exception as exc:
sys.exit(f"[ERROR] Could not create Ollama client: {exc}")

system_prompt = SystemPromptGenerator().generate_prompt({})
system_prompt = generate_system_prompt()
messages: List[Dict[str, Any]] = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": prompt},
Expand Down
Loading
Loading