Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions dynamiq/nodes/agents/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -1576,6 +1576,14 @@ def _init_prompt_blocks(self):
has_sub_agent_tools=any(isinstance(t, SubAgentTool) for t in self.tools),
)

# Append user-provided instructions to operational_instructions block
if self.instructions:
existing = self.system_prompt_manager._prompt_blocks.get("operational_instructions", "")
self.system_prompt_manager.set_block(
"operational_instructions",
f"{existing}\n\n{self.instructions}" if existing else self.instructions,
)
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

User instructions lack Jinja raw wrapping unlike role

Medium Severity

The new self.instructions content is injected directly into the operational_instructions Jinja template block without {% raw %}...{% endraw %} wrapping. The role field has explicit protection against unintended Jinja rendering (lines 1591–1595), but instructions does not. If a user provides instructions containing {{ }} or {% %} syntax (e.g., "Use format {{ name }}: {{ value }}"), the Jinja renderer will attempt variable substitution, likely causing an UndefinedError or silently producing incorrect prompt content.

Additional Locations (1)
Fix in Cursor Fix in Web

Reviewed by Cursor Bugbot for commit fd81f9a. Configure here.


# Only auto-wrap the entire role in a raw block if the user did not
# provide explicit raw/endraw markers. This allows roles to mix
# literal sections (via raw) with Jinja variables like {{ input }}
Expand Down
4 changes: 4 additions & 0 deletions dynamiq/nodes/agents/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -236,6 +236,10 @@ class Agent(Node):
Can be used to provide additional context or instructions to the agent.
Accepts Jinja templates to provide additional parameters.""",
)
instructions: str | None = Field(
default=None,
description="Additional operational instructions appended to the operational instructions block.",
)
description: str | None = Field(default=None, description="Short human-readable description of the agent.")
_mcp_servers: list[MCPServer] = PrivateAttr(default_factory=list)
_excluded_tool_ids: set[str] = PrivateAttr(default_factory=set)
Expand Down
19 changes: 9 additions & 10 deletions dynamiq/nodes/agents/prompts/manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
REACT_BLOCK_INSTRUCTIONS_STRUCTURED_OUTPUT,
REACT_BLOCK_OUTPUT_FORMAT,
REACT_BLOCK_TOOLS,
REACT_BLOCK_TOOLS_NO_FORMATS,
REACT_BLOCK_TOOLS_BRIEF,
REACT_BLOCK_XML_INSTRUCTIONS_NO_TOOLS,
REACT_BLOCK_XML_INSTRUCTIONS_SINGLE,
REACT_MAX_LOOPS_PROMPT,
Expand Down Expand Up @@ -321,7 +321,7 @@ def get_model_specific_prompts(
)
if has_tools:
prompt_blocks["tools"] = get_prompt_constant(
model_name, "REACT_BLOCK_TOOLS_NO_FORMATS", REACT_BLOCK_TOOLS_NO_FORMATS
model_name, "REACT_BLOCK_TOOLS_BRIEF", REACT_BLOCK_TOOLS_BRIEF
)

case InferenceMode.STRUCTURED_OUTPUT:
Expand All @@ -341,7 +341,7 @@ def get_model_specific_prompts(
logger.debug(f"Using model-specific REACT_BLOCK_XML_INSTRUCTIONS_SINGLE for '{model_name}'")
prompt_blocks["instructions"] = xml_instructions_no_tools if not has_tools else instructions_xml

# Build secondary_instructions from enabled features
# Build operational_instructions from enabled features
secondary_parts = []
if parallel_tool_calls_enabled:
secondary_parts.append(REACT_BLOCK_MULTI_TOOL_PLANNING)
Expand All @@ -354,16 +354,15 @@ def get_model_specific_prompts(
secondary_parts.append(CONTEXT_MANAGER_INSTRUCTIONS)
if todo_management_enabled:
secondary_parts.append(TODO_TOOLS_INSTRUCTIONS)
if sandbox_base_path:
secondary_parts.append(
SANDBOX_INSTRUCTIONS_TEMPLATE.format(
base_path=sandbox_base_path,
)
)
if has_sub_agent_tools:
secondary_parts.append(SUB_AGENT_INSTRUCTIONS)

if secondary_parts:
prompt_blocks["secondary_instructions"] = "\n\n".join(secondary_parts)
prompt_blocks["operational_instructions"] = "\n\n".join(secondary_parts)

if sandbox_base_path:
prompt_blocks["environment"] = SANDBOX_INSTRUCTIONS_TEMPLATE.format(
base_path=sandbox_base_path,
)

return prompt_blocks, agent_template
23 changes: 14 additions & 9 deletions dynamiq/nodes/agents/prompts/react/instructions.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,12 +33,12 @@
- Explicitly link key statements to specific findings from the referenced materials to strengthen credibility and transparency.
- Make sure to adhere to AGENT PERSONA & STYLE & ADDITIONAL BEHAVIORAL GUIDELINES.

SINGLE ACTION PER TURN:
## Single Action Per Turn
- Execute exactly ONE action per response, then wait for its Observation before continuing
- Do NOT chain multiple Action/Action Input pairs in the same response
- After receiving an Observation, decide the next single action based on the result

FILE HANDLING:
## File Handling
- Tools may generate or process files (images, CSVs, PDFs, etc.)
- If you want to return files, include an "Output Files:" line before "Answer:" listing file paths (comma-separated). This line is optional — omit it if there are no files to return.
""" # noqa: E501
Expand Down Expand Up @@ -82,7 +82,7 @@
<output_files>[Optional: comma-separated absolute file paths to return]</output_files>
</output>

CRITICAL XML FORMAT RULES:
## Critical XML Format Rules
- ALWAYS include <thought> tags with detailed reasoning
- Start the text immediately after each opening tag; do not add leading newlines or indentation inside the tags
- Write thoughts in the first person (e.g., "I will...", "I should...")
Expand All @@ -105,19 +105,19 @@
- Explicitly link key statements to specific findings from the referenced materials to strengthen credibility and transparency.
- Make sure to adhere to AGENT PERSONA & STYLE & ADDITIONAL BEHAVIORAL GUIDELINES.

SINGLE ACTION PER TURN:
## Single Action Per Turn
- Execute exactly ONE <action>/<action_input> pair per response, then wait for its Observation before continuing
- Do NOT include multiple action blocks or answer blocks in the same response
- After receiving an Observation, decide the next single action based on the result

JSON FORMATTING REQUIREMENTS:
## JSON Formatting Requirements
- Put JSON on single line within tags
- Use double quotes for all strings
- Escape newlines as \\n, quotes as \\"
- NO multi-line JSON formatting
- For tools that accept code (e.g. python), the code must be one JSON string with \\n for line breaks, not literal newlines

FILE HANDLING:
## File Handling
- Tools may generate or process files (images, CSVs, PDFs, reports, etc.)
- If you want to return files, include an <output_files> tag after </answer> (but still inside <output>) listing absolute file paths (comma-separated). This tag is optional — omit it if there are no files to return.
""" # noqa: E501
Expand Down Expand Up @@ -145,6 +145,11 @@
{{ tool_description }}
"""

REACT_BLOCK_TOOLS_BRIEF = """
Available tools: [{{ tools_name }}]
Refer to each tool's function schema for detailed usage.
"""


REACT_BLOCK_INSTRUCTIONS_STRUCTURED_OUTPUT = """Always structure your responses in this JSON format:

Expand Down Expand Up @@ -179,7 +184,7 @@
- Ensure proper JSON syntax with quoted keys and values
- To return an agent tool's response as the final output, include "delegate_final": true inside that tool's action_input. Use this only for a single agent tool call and do not call finish yourself afterward; the system will return the agent's result directly.

FILE HANDLING:
## File Handling
- Tools may generate or process files (images, CSVs, PDFs, etc.)
- When using action "finish", include an "output_files" field with comma-separated file paths to return. Use an empty string if there are no files to return.
- Never return empty response.
Expand All @@ -197,15 +202,15 @@
Only after utilizing the necessary tools and gathering the required information should
you call `provide_final_answer` to deliver the final response.

FUNCTION CALLING GUIDELINES:
## Function Calling Guidelines
- ALWAYS populate the "thought" field FIRST before "action_input" in your function calls
- Analyze the request carefully to determine if tools are needed
- Call functions with properly formatted arguments
- Handle tool responses appropriately before providing final answer
- Chain multiple tool calls when necessary for complex tasks
- If you want an agent tool's response returned verbatim as the final output, include "delegate_final": true inside that tool's action_input. Use this only for a single agent tool call and do not call provide_final_answer yourself; the system will return the agent's result directly.

FILE HANDLING:
## File Handling
- Tools may generate or process files (images, CSVs, PDFs, etc.)
- When calling `provide_final_answer`, include an `output_files` argument with comma-separated file paths to return. Pass an empty string if there are no files to return.
""" # noqa: E501
Expand Down
151 changes: 27 additions & 124 deletions dynamiq/nodes/agents/prompts/secondary_instructions.py
Original file line number Diff line number Diff line change
@@ -1,23 +1,26 @@
"""Secondary instructions for agent prompts."""

DELEGATION_INSTRUCTIONS = (
"- Optional: If you want an agent tool's response returned verbatim as the final output, "
'set "delegate_final": true in that tool\'s input. Use this only for a single agent tool call '
"and do not provide your own final answer; the system will return the agent's result directly. "
"Do not set delegate_final: true inside metadata of the input, it has to be a separate field."
"## Delegation\n"
"- If you want an agent tool's response returned verbatim as the final output, "
'set "delegate_final": true in that tool\'s input.\n'
"- Use this only for a single agent tool call "
"and do not provide your own final answer; the system will return the agent's result directly.\n"
"- Do not set delegate_final: true inside metadata of the input, it has to be a separate field."
)

DELEGATION_INSTRUCTIONS_XML = (
"## Delegation\n"
'- To return an agent tool\'s response as the final output, include "delegate_final": true inside that '
"tool's <input> or <action_input>. Use this only for a single agent tool call and do not provide an "
"<answer> yourself; the system will return the agent's result directly."
)

CONTEXT_MANAGER_INSTRUCTIONS = """CONTEXT MANAGEMENT:
CONTEXT_MANAGER_INSTRUCTIONS = """## Context Management
- Use the context-manager tool proactively when conversation is getting long
- Save critical info (IDs, filenames) in "notes" field BEFORE calling - previous messages will be summarized"""

TODO_TOOLS_INSTRUCTIONS = """TODO MANAGEMENT:
TODO_TOOLS_INSTRUCTIONS = """## Todo Management
- Use the todo-write tool for complex 3+ step tasks; skip for simple requests
- Current todos shown in [State: ...] at the end of user last messages under "Todos:"
- When creating initial list: first task "in_progress", rest "pending"
Expand All @@ -26,7 +29,7 @@
- Only mark completed when FULLY done; if blocked, keep in_progress"""


SANDBOX_INSTRUCTIONS_TEMPLATE = """SANDBOX EXECUTION ENVIRONMENT:
SANDBOX_INSTRUCTIONS_TEMPLATE = """## Sandbox Environment
You operate inside a persistent sandbox filesystem.
The sandbox directory is your working memory.

Expand All @@ -35,7 +38,7 @@
- Uploaded files are placed in {base_path}/input/.
- Other tools can ONLY access files under {base_path}/.

CRITICAL PRINCIPLE — PERSIST EVERYTHING IMPORTANT:
## Persistence
The sandbox filesystem is your long-term memory. Do not rely on conversation context alone.

Whenever you:
Expand All @@ -53,7 +56,7 @@
- parsed_results.md
- intermediate_analysis_step_1.md

EXECUTION RULES:
## Execution Rules
1. Use 'python3' instead of 'python'.
2. For Python tasks:
- Always write a .py script file first; then execute it.
Expand All @@ -74,123 +77,23 @@
"""


REACT_BLOCK_MULTI_TOOL_PLANNING = """
MULTI-TOOL PLANNING AND STRATEGY:

Core Principle: Scale tool usage to match task complexity
Start with the minimum number of tools needed and scale up based on the task's requirements.

Decision Framework:
1. Zero Tools - Answer directly when:
- Question is within your knowledge base
- No real-time data needed
- Simple explanations or general concepts

2. Single Tool - Use one tool when:
- Single fact verification needed
- One specific data point required
- Simple lookup or calculation

3. Multiple Tools (2-4) - Use parallel tools when:
- Comparing information from different sources
- Gathering complementary data points
- Cross-referencing or validation needed

4. Comprehensive Research (5+) - Use extensive tooling when:
- Deep analysis requested ("comprehensive", "detailed", "thorough")
- Multiple aspects of complex topic
- Creating reports or extensive documentation

MANDATORY MULTI-TOOL PATTERNS:

1. Research & Data Tasks:

- Start: Analyze query complexity.
- Simple facts / single source: 1–2 tool calls.
- Moderate complexity / multiple sources: 3–4 tool calls with complementary queries.
- Comprehensive research / broad topics: 5+ tool calls covering:
- General overview
- Specific benefits, use cases, or features
- Limitations, challenges, or drawbacks
- Comparisons or alternatives
- Recent developments or updates
- Example progression:
1. General topic overview
2. Specific aspects (benefits, use cases)
3. Challenges or limitations
4+. Deep dives on critical aspects

2. Coding & Technical Tasks:
- Documentation lookup: 1–2 tools (official docs + examples)
- Debugging: 2–3 tools (error search + solution patterns)
- Architecture decisions: 3–5 tools (best practices + comparisons + examples)
- Full implementation: 5+ tools (docs + patterns + edge cases + optimization)

3. Comparative Analysis / Verification:
- Single source: 1 tool
- Multiple sources: Use parallel calls for efficiency
- Comparative or critical analysis: Minimum 3 sources
- Market research or controversial topics: 3+ diverse sources, cross-referenced
- Scientific/technical validation: Cross-check experiments, methods, or datasets

EFFICIENCY GUIDELINES:

1. Parallel vs Sequential Tool Calls:
- Use PARALLEL calls when queries are independent
- Use SEQUENTIAL only when later queries depend on earlier results

2. Optimized Query Strategy:
- Start broad, then narrow based on results.
- Use varied search parameters to cover different angles.
- Each tool call should add unique value; avoid redundant queries.
- Scale detail dynamically: begin with essentials, add deeper queries if needed,
and stop once sufficient information is gathered.

MULTIPLE ENTITIES PATTERN:
When researching multiple distinct entities (companies, products, people, locations):
- BAD: "Report about Company A, Company B, Company C"
- GOOD: Separate queries for each entity
- Query 1: "Company A financial performance"
- Query 2: "Company B market position"
- Query 3: "Company C recent developments"
- REASON: Each entity gets full search attention, avoiding diluted results

QUERY FORMULATION BEST PRACTICES:
1. Entity Separation: Search distinct subjects individually
2. Perspective Variation: Use different angles for same topic
- "benefits of X" → "advantages of X" → "X success stories"
3. Temporal Layering: Mix timeframes
- "latest developments in X"
- "X trends 2025-2026"
- "future of X"
4. Source Diversification: Target different source types
- Official documentation
- Independent reviews/analysis
- Case studies/examples

REFLECTION AND VALIDATION:

- Before and after each tool use, briefly reflect on whether it's the most appropriate choice, how it contributes to solving the problem,
and whether the results are relevant, complete, and high quality – adjusting your strategy if needed.
- Before delivering the final answer, confirm that all parts of the original question are addressed
and that your conclusions are well-supported and internally consistent.

IMPORTANT RULES:
- Quality over quantity - each tool call must serve a purpose
- Explain your multi-tool strategy in your thought process
- Adapt the number of tools based on result quality
- If initial results are comprehensive, don't add unnecessary calls
- For coding: balance between documentation, examples, and best practices
- Always consider user's implicit needs beyond explicit request
- Employ strategic thinking and reflection at each step
""" # noqa: E501

SUB_AGENT_INSTRUCTIONS = """SUB-AGENT TOOLS:
REACT_BLOCK_MULTI_TOOL_PLANNING = """## Multi-Tool Planning

- Scale tool usage to task complexity: answer directly when possible, use 1-2 tools for simple lookups,
3-5 for multi-source research, more for comprehensive analysis.
- Use PARALLEL calls for independent queries; SEQUENTIAL only when results feed into the next call.
- When researching multiple entities, search each one separately - don't combine into one query.
- Each tool call must add unique value. Avoid redundant or semantically similar queries.
- Start broad, then narrow. Stop once sufficient information is gathered.
- Verify that all parts of the original question are addressed before delivering the final answer.
"""

SUB_AGENT_INSTRUCTIONS = """## Sub-Agent Tools
Sub-agent tools are specialized agents you can delegate tasks to. Each sub-agent is an independent agent \
with its own tools and expertise use them to break complex work into focused subtasks.
with its own tools and expertise - use them to break complex work into focused subtasks.

Execution modes (shown in each tool's description):
- "[Independent agent: ...]" spawns a fresh instance per call. Safe to call in parallel with other tools.
- "[Shared agent: ...]" reuses a single instance. Calls run sequentially; do not call in parallel.
- "[Independent agent: ...]" - spawns a fresh instance per call. Safe to call in parallel with other tools.
- "[Shared agent: ...]" - reuses a single instance. Calls run sequentially; do not call in parallel.

Provide each sub-agent with a clear, self-contained task description in the "input" field."""
Loading