diff --git a/README.md b/README.md index a306faf..036b186 100644 --- a/README.md +++ b/README.md @@ -97,12 +97,34 @@ export PINCHBENCH_OFFICIAL_KEY=your_official_key | `--suite SUITE` | `all`, `automated-only`, or comma-separated task IDs | | `--runs N` | Number of runs per task for averaging | | `--timeout-multiplier N` | Scale timeouts for slower models | +| `--thinking LEVELS` | Comma-separated thinking levels (e.g., `low,medium,high`) | | `--output-dir DIR` | Where to save results (default: `results/`) | | `--no-upload` | Skip uploading to leaderboard | | `--register` | Request an API token for submissions | | `--upload FILE` | Upload a previous results JSON | | `--official-key KEY` | Mark submission as official (or use `PINCHBENCH_OFFICIAL_KEY` env var) | +## Thinking Levels + +Many models support configurable thinking/reasoning levels. Test how different reasoning depths affect task performance: + +```bash +# Test multiple thinking levels +./scripts/run.sh --model anthropic/claude-sonnet-4 --thinking low,medium,high + +# Compare a single level against default +./scripts/run.sh --model anthropic/claude-sonnet-4 --thinking high +``` + +Valid levels: `off`, `minimal`, `low`, `medium`, `high`, `xhigh`, `adaptive` + +**Model-specific notes:** +- `xhigh` is only available for GPT-5.x models (gpt-5.4, gpt-5.2, codex variants) +- `adaptive` is provider-managed reasoning budget (Anthropic Claude 4.6 family) +- Invalid levels for your model are warned and skipped + +Results include a `thinking_aggregates` section with per-level statistics, and each task result includes the `thinking_level` used. + ## Contributing Tasks We welcome new tasks! Check out [`tasks/TASK_TEMPLATE.md`](tasks/TASK_TEMPLATE.md) for the format. Good tasks are: diff --git a/SKILL.md b/SKILL.md index d6f0284..a1fcba9 100644 --- a/SKILL.md +++ b/SKILL.md @@ -73,6 +73,7 @@ uv run benchmark.py --model anthropic/claude-sonnet-4 --no-upload | `--output-dir` | Results directory (default: `results/`) | | `--timeout-multiplier` | Scale task timeouts for slower models | | `--runs` | Number of runs per task for averaging | +| `--thinking` | Comma-separated thinking levels (e.g., `low,medium,high`) | | `--no-upload` | Skip uploading to leaderboard | | `--register` | Request new API token for submissions | | `--upload FILE` | Upload previous results JSON | @@ -89,6 +90,29 @@ uv run benchmark.py --register uv run benchmark.py --model anthropic/claude-sonnet-4 ``` +## Thinking Levels + +Many models support different thinking/reasoning levels (e.g., Claude's extended thinking). PinchBench can run tasks across multiple thinking levels to measure how reasoning depth affects performance: + +```bash +# Run with multiple thinking levels +uv run benchmark.py --model anthropic/claude-sonnet-4 --thinking low,medium,high + +# Run with a single thinking level +uv run benchmark.py --model anthropic/claude-sonnet-4 --thinking high +``` + +Valid thinking levels: `off`, `minimal`, `low`, `medium`, `high`, `xhigh`, `adaptive` + +**Model-specific notes:** +- `xhigh` is only supported by GPT-5.x models (gpt-5.4, gpt-5.2, codex variants) +- `adaptive` is provider-managed reasoning (Anthropic Claude 4.6 family) +- Invalid levels for your model are warned and skipped + +Results include per-level aggregates: +- `thinking_aggregates`: Summary statistics for each thinking level +- Per-task results include `thinking_level` field + ## Results Results are saved as JSON in the output directory: diff --git a/scripts/benchmark.py b/scripts/benchmark.py index 566bcb3..5662848 100644 --- a/scripts/benchmark.py +++ b/scripts/benchmark.py @@ -30,6 +30,8 @@ ModelValidationError, slugify_model, validate_openrouter_model, + THINKING_LEVELS, + validate_thinking_level, ) from lib_grading import GradeResult, grade_task from lib_tasks import Task, TaskLoader @@ -236,6 +238,15 @@ def _parse_args() -> argparse.Namespace: action="store_true", help="Continue running all tasks even if sanity check scores 0%%", ) + parser.add_argument( + "--thinking", + type=str, + default=None, + help="Comma-separated thinking levels to test (e.g., 'low,medium,high'). " + f"Valid levels: {', '.join(THINKING_LEVELS)}. " + "Note: 'xhigh' requires GPT-5.x models; 'adaptive' is for Anthropic Claude 4.6. " + "If not specified, runs without explicit thinking level.", + ) return parser.parse_args() @@ -247,6 +258,43 @@ def _select_task_ids(tasks: List[Task], suite: str) -> Optional[List[str]]: return [task_id.strip() for task_id in suite.split(",") if task_id.strip()] +def _parse_thinking_levels( + thinking_arg: Optional[str], + model_id: Optional[str] = None, +) -> List[Optional[str]]: + """ + Parse thinking levels from the argument. + + Args: + thinking_arg: Comma-separated thinking levels or None + model_id: Optional model ID to check level compatibility + + Returns: + List of validated thinking levels (or [None] if no explicit level). + + Raises: + ValueError: If --thinking was provided but no levels are valid for the model. + """ + if thinking_arg is None: + return [None] # Run once without explicit thinking level + + levels: List[str] = [] + seen = set() + for level in thinking_arg.split(","): + validated = validate_thinking_level(level.strip(), model_id) + if validated and validated not in seen: + levels.append(validated) + seen.add(validated) + + if not levels: + raise ValueError( + "No valid thinking levels remain after validation. " + "Check your --thinking values for this model." + ) + + return levels + + def _next_run_id(run_root: Path) -> str: run_root.mkdir(parents=True, exist_ok=True) existing = [] @@ -572,8 +620,14 @@ def main(): cleanup_agent_sessions(agent_id) task_ids = _select_task_ids(runner.tasks, args.suite) + try: + thinking_levels = _parse_thinking_levels(args.thinking, args.model) + except ValueError as exc: + logger.error(str(exc)) + sys.exit(2) results = [] - grades_by_task_id = {} + grades_by_task_id: Dict[str, Dict[str, Any]] = {} + grades_by_task_and_thinking: Dict[str, Dict[str, Any]] = {} sanity_task_id = "task_00_sanity" tasks_to_run = runner.tasks @@ -582,116 +636,115 @@ def main(): tasks_by_id = {task.task_id: task for task in tasks_to_run} runs_per_task = max(1, args.runs) - for i, task in enumerate(tasks_to_run, 1): - task_grades = [] - task_results = [] - for run_index in range(runs_per_task): - logger.info("\n%s", "=" * 80) - logger.info( - "📋 Task %s/%s (Run %s/%s)", - i, - len(tasks_to_run), - run_index + 1, - runs_per_task, - ) - logger.info("%s", "=" * 80) - execution_error = None - try: - result = execute_openclaw_task( - task=task, - agent_id=agent_id, - model_id=args.model, - run_id=f"{run_id}-{run_index + 1}", - timeout_multiplier=args.timeout_multiplier, - skill_dir=skill_dir, - verbose=args.verbose, - ) - except Exception as exc: - execution_error = str(exc) - logger.warning("Task execution failed for %s, continuing: %s", task.task_id, exc) - result = { - "agent_id": agent_id, - "task_id": task.task_id, - "status": "error", - "transcript": [], - "usage": {}, - "workspace": "", - "exit_code": -1, - "timed_out": False, - "execution_time": 0.0, - "stdout": "", - "stderr": execution_error, - } - try: - grade_kwargs = dict( - task=task, execution_result=result, skill_dir=skill_dir, verbose=args.verbose - ) - if args.judge: - grade_kwargs["judge_model"] = args.judge - grade = grade_task(**grade_kwargs) - except Exception as exc: - if execution_error: - note = f"Execution failed: {execution_error}; Grading failed: {exc}" - else: - note = f"Grading failed: {exc}" - logger.warning("Task grading failed for %s, continuing: %s", task.task_id, exc) - grade = GradeResult( - task_id=task.task_id, - score=0.0, - max_score=1.0, - grading_type=task.grading_type, - breakdown={}, - notes=note, - ) - task_grades.append(grade) - task_results.append(result) - results.append(result) + total_runs = len(tasks_to_run) * runs_per_task * len(thinking_levels) + run_counter = 0 - # Log score immediately after grading - score_pct = grade.score / grade.max_score * 100 if grade.max_score > 0 else 0 - status_emoji = ( - "✅" if grade.score >= grade.max_score else "⚠️" if grade.score > 0 else "❌" - ) - logger.info( - "%s Task %s: %.1f/%.1f (%.0f%%) - %s", - status_emoji, - task.task_id, - grade.score, - grade.max_score, - score_pct, - grade.grading_type, - ) - if grade.notes: - logger.info(" Notes: %s", grade.notes[:200]) - - task_scores = [grade.score for grade in task_grades] - grades_by_task_id[task.task_id] = { - "runs": [grade.to_dict() for grade in task_grades], - "mean": statistics.mean(task_scores), - "std": statistics.stdev(task_scores) if len(task_scores) > 1 else 0.0, - "min": min(task_scores), - "max": max(task_scores), - } + for thinking_level in thinking_levels: + thinking_label = thinking_level or "default" + logger.info("\n%s", "=" * 80) + logger.info("🧠 Thinking Level: %s", thinking_label) + logger.info("%s", "=" * 80) - all_runs_missing_transcript = all( - not run_result.get("transcript") for run_result in task_results - ) - if ( - task.task_id == sanity_task_id - and grades_by_task_id[task.task_id]["mean"] == 0.0 - and not args.no_fail_fast - and not all_runs_missing_transcript - ): - logger.error( - "🚨 FAIL FAST: Sanity check (%s) scored 0%%. Aborting benchmark run to avoid wasting resources.", - sanity_task_id, - ) - sys.exit(3) - if task.task_id == sanity_task_id and grades_by_task_id[task.task_id]["mean"] == 0.0: - if all_runs_missing_transcript: - logger.warning( - "⚠️ Sanity check scored 0%% but transcripts were missing for all runs; skipping fail-fast as likely infrastructure/logging issue." + for i, task in enumerate(tasks_to_run, 1): + task_key = f"{task.task_id}:{thinking_label}" if thinking_level else task.task_id + task_grades = [] + task_results = [] + + for run_index in range(runs_per_task): + run_counter += 1 + logger.info("\n%s", "-" * 80) + logger.info( + "📋 Task %s/%s (Run %s/%s) [%s] — Overall progress: %s/%s", + i, + len(tasks_to_run), + run_index + 1, + runs_per_task, + thinking_label, + run_counter, + total_runs, ) + logger.info("%s", "-" * 80) + execution_error = None + try: + result = execute_openclaw_task( + task=task, + agent_id=agent_id, + model_id=args.model, + run_id=f"{run_id}-{run_index + 1}", + timeout_multiplier=args.timeout_multiplier, + skill_dir=skill_dir, + thinking_level=thinking_level, + ) + except Exception as exc: + execution_error = str(exc) + logger.warning( + "Task execution failed for %s, continuing: %s", task.task_id, exc + ) + result = { + "agent_id": agent_id, + "task_id": task.task_id, + "thinking_level": thinking_level, + "status": "error", + "transcript": [], + "usage": {}, + "workspace": "", + "exit_code": -1, + "timed_out": False, + "execution_time": 0.0, + "stdout": "", + "stderr": execution_error, + } + try: + grade = grade_task(task=task, execution_result=result, skill_dir=skill_dir) + except Exception as exc: + if execution_error: + note = f"Execution failed: {execution_error}; Grading failed: {exc}" + else: + note = f"Grading failed: {exc}" + logger.warning("Task grading failed for %s, continuing: %s", task.task_id, exc) + grade = GradeResult( + task_id=task.task_id, + score=0.0, + max_score=1.0, + grading_type=task.grading_type, + breakdown={}, + notes=note, + ) + task_grades.append(grade) + result["thinking_level"] = thinking_level + results.append(result) + + task_scores = [grade.score for grade in task_grades] + grades_by_task_and_thinking[task_key] = { + "task_id": task.task_id, + "thinking_level": thinking_level, + "runs": [grade.to_dict() for grade in task_grades], + "mean": statistics.mean(task_scores), + "std": statistics.stdev(task_scores) if len(task_scores) > 1 else 0.0, + "min": min(task_scores), + "max": max(task_scores), + } + + # Compute per-thinking-level aggregates + thinking_aggregates: Dict[str, Dict[str, Any]] = {} + for thinking_level in thinking_levels: + thinking_label = thinking_level or "default" + level_keys = [ + k + for k, v in grades_by_task_and_thinking.items() + if v.get("thinking_level") == thinking_level + ] + if not level_keys: + continue + scores = [grades_by_task_and_thinking[k]["mean"] for k in level_keys] + thinking_aggregates[thinking_label] = { + "thinking_level": thinking_label, + "task_count": len(scores), + "mean_score": statistics.mean(scores) if scores else 0.0, + "std_score": statistics.stdev(scores) if len(scores) > 1 else 0.0, + "min_score": min(scores) if scores else 0.0, + "max_score": max(scores) if scores else 0.0, + } output_dir = Path(args.output_dir) output_dir.mkdir(parents=True, exist_ok=True) @@ -720,7 +773,26 @@ def main(): "timestamp": time.time(), "suite": args.suite, "runs_per_task": runs_per_task, - "tasks": task_entries, + "thinking_levels": [tl or "default" for tl in thinking_levels], + "thinking_aggregates": thinking_aggregates, + "tasks": [ + { + "task_id": result["task_id"], + "thinking_level": result.get("thinking_level"), + "status": result["status"], + "timed_out": result["timed_out"], + "execution_time": result["execution_time"], + "transcript_length": len(result["transcript"]), + "usage": result.get("usage", {}), + "workspace": result["workspace"], + "grading": grades_by_task_and_thinking.get( + f"{result['task_id']}:{result.get('thinking_level') or 'default'}", + grades_by_task_and_thinking.get(result["task_id"], {}), + ), + "frontmatter": tasks_by_id[result["task_id"]].frontmatter, + } + for result in results + ], "efficiency": efficiency, } diff --git a/scripts/lib_agent.py b/scripts/lib_agent.py index bac28d2..288a7b4 100644 --- a/scripts/lib_agent.py +++ b/scripts/lib_agent.py @@ -36,6 +36,41 @@ def _coerce_subprocess_output(value: Any) -> str: return str(value) +# Thinking levels supported by OpenClaw +# See: https://docs.openclaw.ai/tools/thinking +THINKING_LEVELS = ("off", "minimal", "low", "medium", "high", "xhigh", "adaptive") + +# Models that support xhigh thinking level (high reasoning budget) +# Sourced from OpenClaw src/auto-reply/thinking.ts XHIGH_MODEL_REFS +XHIGH_MODELS = { + # OpenAI + "openai/gpt-5.4", + "openai/gpt-5.4-pro", + "openai/gpt-5.2", + # OpenAI Codex + "openai-codex/gpt-5.4", + "openai-codex/gpt-5.3-codex", + "openai-codex/gpt-5.3-codex-spark", + "openai-codex/gpt-5.2-codex", + "openai-codex/gpt-5.1-codex", + # GitHub Copilot + "github-copilot/gpt-5.2-codex", + "github-copilot/gpt-5.2", +} + +XHIGH_MODELS_LOWER = {model.lower() for model in XHIGH_MODELS} +XHIGH_MODEL_IDS_LOWER = {model.split("/")[-1].lower() for model in XHIGH_MODELS} + +# Adaptive thinking is currently provider-managed for Anthropic Claude 4.6 models. +ADAPTIVE_PROVIDER = "anthropic" +ADAPTIVE_MODEL_PREFIXES = ( + "claude-opus-4-6", + "claude-opus-4.6", + "claude-sonnet-4-6", + "claude-sonnet-4.6", +) + + def slugify_model(model_id: str) -> str: return model_id.replace("/", "-").replace(".", "-").lower() @@ -155,6 +190,78 @@ def validate_openrouter_model(model_id: str, timeout_seconds: float = 10.0) -> b raise ModelValidationError(error_msg) +def supports_xhigh_thinking(model_id: str) -> bool: + """Check if a model supports xhigh thinking level.""" + normalized = slugify_model(model_id) + model_lower = model_id.lower() + xhigh_models_lower = {m.lower() for m in XHIGH_MODELS} + if normalized in xhigh_models_lower: + return True + parts = normalized.split("/") + if len(parts) == 3 and parts[0] == "openrouter": + provider_model = f"{parts[1]}/{parts[2]}" + if provider_model in xhigh_models_lower: + return True + if "/" not in model_id: + return model_lower in xhigh_models_lower + return False + + +def supports_adaptive_thinking(model_id: str) -> bool: + """Check if a model natively supports adaptive thinking.""" + normalized = slugify_model(model_id) + parts = normalized.split("/") + adaptive_provider = "anthropic" + adaptive_prefixes = ("claude-4.6",) + if len(parts) == 3 and parts[0] == "openrouter": + provider = parts[1] + model = parts[2] + elif len(parts) >= 2: + provider = parts[-2] + model = parts[-1] + else: + return False + if provider != adaptive_provider: + return False + return any(model.startswith(prefix) for prefix in adaptive_prefixes) + + +def validate_thinking_level(level: str, model_id: Optional[str] = None) -> Optional[str]: + """ + Validate a thinking level and check model compatibility. + + Args: + level: The thinking level to validate + model_id: Optional model ID to check xhigh compatibility + + Returns: + The validated level, or None if invalid + """ + level_lower = level.lower().strip() + if level_lower not in THINKING_LEVELS: + logger.warning( + "Invalid thinking level '%s'. Valid levels: %s", + level, + ", ".join(THINKING_LEVELS), + ) + return None + if level_lower == "xhigh" and model_id and not supports_xhigh_thinking(model_id): + logger.warning( + "Thinking level 'xhigh' not supported by model '%s'. " + "xhigh is only available for GPT-5.x model families.", + model_id, + ) + return None + if level_lower == "adaptive" and model_id and not supports_adaptive_thinking(model_id): + logger.warning( + "Thinking level 'adaptive' is not natively supported by model '%s'. " + "adaptive is currently intended for Anthropic Claude 4.6 models.", + model_id, + ) + return None + return level_lower + + def _get_agent_workspace(agent_id: str) -> Path | None: """Get the workspace path for an agent from OpenClaw config.""" try: @@ -624,6 +731,7 @@ def execute_openclaw_task( timeout_multiplier: float, skill_dir: Path, verbose: bool = False, + thinking_level: str | None = None, ) -> Dict[str, Any]: logger.info("🤖 Agent [%s] starting task: %s", agent_id, task.task_id) logger.info(" Task: %s", task.name) @@ -632,6 +740,8 @@ def execute_openclaw_task( logger.info( " Prompt: %s", task.prompt[:500] + "..." if len(task.prompt) > 500 else task.prompt ) + if thinking_level: + logger.info(" Thinking: %s", thinking_level) # Clean up previous session transcripts so we can reliably find this task's # transcript (OpenClaw uses its own UUID-based naming, not our session ID). @@ -668,17 +778,20 @@ def execute_openclaw_task( timed_out = True break try: + cmd = [ + "openclaw", + "agent", + "--agent", + agent_id, + "--session-id", + session_id, + "--message", + session_prompt, + ] + if thinking_level: + cmd.extend(["--thinking", thinking_level]) result = subprocess.run( - [ - "openclaw", - "agent", - "--agent", - agent_id, - "--session-id", - session_id, - "--message", - session_prompt, - ], + cmd, capture_output=True, text=True, cwd=str(workspace), @@ -701,17 +814,20 @@ def execute_openclaw_task( else: # Single-session task: send task.prompt once try: + cmd = [ + "openclaw", + "agent", + "--agent", + agent_id, + "--session-id", + session_id, + "--message", + task.prompt, + ] + if thinking_level: + cmd.extend(["--thinking", thinking_level]) result = subprocess.run( - [ - "openclaw", - "agent", - "--agent", - agent_id, - "--session-id", - session_id, - "--message", - task.prompt, - ], + cmd, capture_output=True, text=True, cwd=str(workspace), @@ -781,6 +897,7 @@ def execute_openclaw_task( return { "agent_id": agent_id, "task_id": task.task_id, + "thinking_level": thinking_level, "status": status, "transcript": transcript, "usage": usage, @@ -799,6 +916,7 @@ def run_openclaw_prompt( prompt: str, workspace: Path, timeout_seconds: float, + thinking_level: Optional[str] = None, ) -> Dict[str, Any]: """Run a single OpenClaw prompt for helper agents like the judge.""" # Clean up previous session transcripts so we can reliably find this @@ -843,17 +961,20 @@ def run_openclaw_prompt( timed_out = True break try: + cmd = [ + "openclaw", + "agent", + "--agent", + agent_id, + "--session-id", + session_id, + "--message", + chunk, + ] + if thinking_level: + cmd.extend(["--thinking", thinking_level]) result = subprocess.run( - [ - "openclaw", - "agent", - "--agent", - agent_id, - "--session-id", - session_id, - "--message", - chunk, - ], + cmd, capture_output=True, text=True, cwd=str(workspace),