From 83253916c514062375790158cd07a8ef6ccbe19e Mon Sep 17 00:00:00 2001 From: Kevin Meisel Date: Tue, 3 Feb 2026 21:39:52 +0100 Subject: [PATCH] Fix eval prompt collision with model flag --- src/upskill/cli.py | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/src/upskill/cli.py b/src/upskill/cli.py index 6e8c680..82df596 100644 --- a/src/upskill/cli.py +++ b/src/upskill/cli.py @@ -45,12 +45,31 @@ console = Console() +KNOWN_PROVIDERS = ("anthropic", "openai", "generic") + + +def _model_has_provider(model: str) -> bool: + return any(model.startswith(f"{provider}.") for provider in KNOWN_PROVIDERS) + + +def _apply_provider_to_models(models: list[str], provider: str | None) -> list[str]: + if not provider: + return models + normalized: list[str] = [] + for model in models: + if _model_has_provider(model): + normalized.append(model) + else: + normalized.append(f"{provider}.{model}") + return normalized + @asynccontextmanager async def _fast_agent_context() -> AsyncIterator[object]: fast = FastAgent( "upskill", ignore_unknown_args=True, + parse_cli_args=False, # Prevent FastAgent from consuming upskill CLI flags (e.g. -m). ) @fast.agent() @@ -719,6 +738,8 @@ async def _eval_async( if not models: models = [config.effective_eval_model] + models = _apply_provider_to_models(models, provider) + is_benchmark_mode = len(models) > 1 or num_runs > 1 async with _fast_agent_context() as agent: