Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions docs/control/reference/rest_api/messages.md
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ Where `{endpoint_type}` is `chat`, `code`, `agent`, or `lang-graph`. See [URL Pa
| `tools` | `array[ToolParam]` | No | Tool definitions the model may use. See [Tools](#tools). |
| `tool_choice` | `ToolChoice` | No | How the model should use tools. See [Tool Choice](#tool-choice). |
| `thinking` | `ThinkingConfig` | No | Extended thinking configuration. See [Thinking](#thinking). |
| `reasoning_effort` | `string` | No | Reasoning effort for reasoning models: `"none"`, `"minimal"`, `"low"`, `"medium"`, `"high"`, `"xhigh"`. |
| `stop_sequences` | `array[string]` | No | Custom stop sequences. |
| `stream` | `boolean` | No | If `true`, stream the response via server-sent events. |
| `output_config` | `OutputConfig` | No | Output format configuration (e.g. JSON schema). |
Expand Down Expand Up @@ -132,6 +133,7 @@ Extended thinking configuration. Discriminated by `type`:
|------|--------|-------------|
| `enabled` | `budget_tokens` (int, min 1024) | Enable thinking with a token budget. |
| `disabled` | — | Disable thinking. |
| `adaptive` | — | Claude automatically decides whether to use extended thinking based on request complexity. |

### Output Config

Expand Down
14 changes: 14 additions & 0 deletions docs/release_notes.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,20 @@ hide:

# Release Notes

## v0.4.2

`time: 2026-03-11`

| Product | Version |
| ---------|---------|
| Control API | `60c3b428492056c3a31711d314a6978040f43774` |

??? info "v0.4.2 Release Notes"

**Updates**
- Updated reasoning efforts to match Anthropic's adaptive thinking
- Add Dual-LLM prompt flavor 'code'

## v0.4.1

`time: 2026-02-25`
Expand Down
10 changes: 10 additions & 0 deletions src/sequrity/control/resources/messages.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
MessageParam,
MetadataParam,
OutputConfigParam,
ReasoningEffort,
TextBlockParam,
ThinkingConfigParam,
ToolChoiceParam,
Expand Down Expand Up @@ -43,6 +44,7 @@ def create(
tools: list[ToolParam | dict] | None = None,
tool_choice: ToolChoiceParam | dict | None = None,
thinking: ThinkingConfigParam | dict | None = None,
reasoning_effort: ReasoningEffort | None = None,
stop_sequences: list[str] | None = None,
stream: Literal[True],
output_config: OutputConfigParam | dict | None = None,
Expand Down Expand Up @@ -76,6 +78,7 @@ def create(
tools: list[ToolParam | dict] | None = None,
tool_choice: ToolChoiceParam | dict | None = None,
thinking: ThinkingConfigParam | dict | None = None,
reasoning_effort: ReasoningEffort | None = None,
stop_sequences: list[str] | None = None,
stream: Literal[False] | None = None,
output_config: OutputConfigParam | dict | None = None,
Expand Down Expand Up @@ -109,6 +112,7 @@ def create(
tools: list[ToolParam | dict] | None = None,
tool_choice: ToolChoiceParam | dict | None = None,
thinking: ThinkingConfigParam | dict | None = None,
reasoning_effort: ReasoningEffort | None = None,
stop_sequences: list[str] | None = None,
stream: bool | None = None,
output_config: OutputConfigParam | dict | None = None,
Expand Down Expand Up @@ -141,6 +145,7 @@ def create(
tools: List of tool definitions available to the model.
tool_choice: How the model should use the provided tools.
thinking: Configuration for extended thinking.
reasoning_effort: Reasoning effort level for supported models.
stop_sequences: Custom text sequences that cause the model to stop.
stream: Whether to stream the response. When ``True``, returns a
:class:`SyncStream` of :class:`AnthropicStreamEvent` objects.
Expand Down Expand Up @@ -179,6 +184,7 @@ def create(
"tools": tools,
"tool_choice": tool_choice,
"thinking": thinking,
"reasoning_effort": reasoning_effort,
"top_k": top_k,
"top_p": top_p,
"output_config": output_config,
Expand Down Expand Up @@ -248,6 +254,7 @@ async def create(
tools: list[ToolParam | dict] | None = None,
tool_choice: ToolChoiceParam | dict | None = None,
thinking: ThinkingConfigParam | dict | None = None,
reasoning_effort: ReasoningEffort | None = None,
stop_sequences: list[str] | None = None,
stream: Literal[True],
output_config: OutputConfigParam | dict | None = None,
Expand Down Expand Up @@ -281,6 +288,7 @@ async def create(
tools: list[ToolParam | dict] | None = None,
tool_choice: ToolChoiceParam | dict | None = None,
thinking: ThinkingConfigParam | dict | None = None,
reasoning_effort: ReasoningEffort | None = None,
stop_sequences: list[str] | None = None,
stream: Literal[False] | None = None,
output_config: OutputConfigParam | dict | None = None,
Expand Down Expand Up @@ -313,6 +321,7 @@ async def create(
tools: list[ToolParam | dict] | None = None,
tool_choice: ToolChoiceParam | dict | None = None,
thinking: ThinkingConfigParam | dict | None = None,
reasoning_effort: ReasoningEffort | None = None,
stop_sequences: list[str] | None = None,
stream: bool | None = None,
output_config: OutputConfigParam | dict | None = None,
Expand Down Expand Up @@ -344,6 +353,7 @@ async def create(
"tools": tools,
"tool_choice": tool_choice,
"thinking": thinking,
"reasoning_effort": reasoning_effort,
"top_k": top_k,
"top_p": top_p,
"output_config": output_config,
Expand Down
2 changes: 1 addition & 1 deletion src/sequrity/control/types/headers.py
Original file line number Diff line number Diff line change
Expand Up @@ -478,7 +478,7 @@ class PllmPromptOverrides(BaseModel):
model_config = ConfigDict(extra="forbid")

flavor: PromptFlavor | str | None = Field(
default=None, description="Prompt template variant to use (e.g., 'universal')."
default=None, description="Prompt template variant to use (e.g., 'universal', 'code')."
)
version: PromptVersion | str | None = Field(
default=None,
Expand Down
4 changes: 2 additions & 2 deletions src/sequrity/types/chat_completion/request.py
Original file line number Diff line number Diff line change
Expand Up @@ -325,7 +325,7 @@ class ResponseFormatJsonSchema(BaseModel):

ResponseFormat = ResponseFormatText | ResponseFormatJsonObject | ResponseFormatJsonSchema

ReasoningEffort = Literal["minimal", "low", "medium", "high"]
ReasoningEffort = Literal["none", "minimal", "low", "medium", "high", "xhigh"]

# =============================================================================
# Main Request Class
Expand All @@ -347,7 +347,7 @@ class ChatCompletionRequest(BaseModel):
# Optional fields
reasoning_effort: ReasoningEffort | None = Field(
default=None,
description="Constrains effort on reasoning for reasoning models. Supported values are 'minimal', 'low', 'medium', and 'high'.",
description="Constrains effort on reasoning for reasoning models. Supported values are 'none', 'minimal', 'low', 'medium', 'high', and 'xhigh'.",
)
response_format: ResponseFormat | None = Field(
default=None,
Expand Down
6 changes: 6 additions & 0 deletions src/sequrity/types/messages/request.py
Original file line number Diff line number Diff line change
Expand Up @@ -343,6 +343,8 @@ class ThinkingConfigAdaptiveParam(BaseModel):
Field(discriminator="type"),
]

ReasoningEffort = Literal["none", "minimal", "low", "medium", "high", "xhigh"]


# =============================================================================
# Main Request Class
Expand Down Expand Up @@ -372,6 +374,10 @@ class AnthropicMessageRequest(BaseModel):

# Optional fields
metadata: MetadataParam | None = Field(default=None, description="An object describing metadata about the request.")
reasoning_effort: ReasoningEffort | None = Field(
default=None,
description="Constrains effort on reasoning for reasoning models. Supported values are 'none', 'minimal', 'low', 'medium', 'high', and 'xhigh'.",
)
output_config: OutputConfigParam | None = Field(
default=None, description="Configuration options for the model's output, such as the output format."
)
Expand Down
Loading