Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 14 additions & 12 deletions skills/pm/SKILL.md
Original file line number Diff line number Diff line change
Expand Up @@ -58,15 +58,8 @@ After every MCP response, do these three things:

Print the MCP content text to the user first.

Then check: does `meta.ask_user_question` exist?

- **YES** → Pass it directly to `AskUserQuestion`:
```
AskUserQuestion(questions=[meta.ask_user_question])
```
Do NOT modify it. Do NOT add options. Do NOT rephrase the question.

- **NO** → This is an interview question. Use `AskUserQuestion` with `meta.question` and generate 2-3 suggested answers.
Then use `AskUserQuestion` with `meta.question` and generate 2-3 suggested answers.
Do not wait for `meta.ask_user_question` — the PM backend does not emit that field.

**C. Relay answer back:**

Expand Down Expand Up @@ -94,10 +87,18 @@ Arguments:

### Step 5: Copy to Clipboard

After generation, read the pm.md file from `meta.pm_path` and copy its contents to the clipboard:
After generation, read the pm.md file from `meta.pm_path` and copy its contents to the clipboard when a local clipboard tool exists:

```bash
cat <meta.pm_path> | pbcopy
if command -v pbcopy >/dev/null 2>&1; then
cat <meta.pm_path> | pbcopy
elif command -v wl-copy >/dev/null 2>&1; then
cat <meta.pm_path> | wl-copy
elif command -v xclip >/dev/null 2>&1; then
cat <meta.pm_path> | xclip -selection clipboard
else
echo "No clipboard tool found; skipping clipboard copy."
fi
```

### Step 6: Show Result & Next Step
Expand All @@ -106,7 +107,8 @@ Show the following to the user:

```
PM document saved: <meta.pm_path>
(Clipboard에 복사되었습니다)
If Step 5 copied successfully, also show:
(Copied to clipboard)

Next step:
ooo interview <meta.pm_path>
Expand Down
173 changes: 95 additions & 78 deletions src/ouroboros/bigbang/pm_interview.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,7 @@

# Model for extraction (uses same as interview for consistency)
_FALLBACK_MODEL = "claude-opus-4-6"
_MAX_AUTO_ADVANCE_QUESTIONS = 8


@dataclass
Expand Down Expand Up @@ -458,93 +459,109 @@ async def ask_next_question(
Returns:
Result containing the (possibly reframed) question or error.
"""
# Generate question via inner engine
question_result = await self.inner.ask_next_question(state)
auto_advanced_questions = 0

if question_result.is_err:
return question_result
while True:
# Generate question via inner engine
question_result = await self.inner.ask_next_question(state)

question = question_result.value
if question_result.is_err:
return question_result

# Classify the question
context = self._build_interview_context(state)
classify_result = await self.classifier.classify(
question=question,
interview_context=context,
)
question = question_result.value

if classify_result.is_err:
# Classification failed — return original question (safe fallback)
log.warning("pm.classification_failed", question=question[:100])
return question_result

classification = classify_result.value
self.classifications.append(classification)
# Classify the question
context = self._build_interview_context(state)
classify_result = await self.classifier.classify(
question=question,
interview_context=context,
)

output_type = classification.output_type
if classify_result.is_err:
# Classification failed — return original question (safe fallback)
log.warning("pm.classification_failed", question=question[:100])
return question_result

if output_type == ClassifierOutputType.DEFERRED:
# Track as deferred item and generate a new question
self.deferred_items.append(classification.original_question)
log.info(
"pm.question_deferred",
question=classification.original_question[:100],
reasoning=classification.reasoning,
output_type=output_type,
)
# Feed an automatic response back to the inner InterviewEngine
# so the round is properly recorded and the engine advances.
# This prevents the inner engine from re-generating similar
# technical questions it doesn't know were already handled.
await self.record_response(
state,
user_response="[Deferred to development phase] "
"This technical decision will be addressed during the "
"development interview.",
question=classification.original_question,
)
# Recursively ask for the next real question
return await self.ask_next_question(state)
classification = classify_result.value
self.classifications.append(classification)

if output_type == ClassifierOutputType.DECIDE_LATER:
# Auto-answer with placeholder — no PM interaction needed
placeholder = classification.placeholder_response
self.decide_later_items.append(classification.original_question)
log.info(
"pm.question_decide_later",
question=classification.original_question[:100],
placeholder=placeholder[:100],
reasoning=classification.reasoning,
)
# Record the placeholder as the response so the interview
# engine advances its round count
await self.record_response(
state,
user_response=f"[Decide later] {placeholder}",
question=classification.original_question,
)
# Recursively ask for the next real question
return await self.ask_next_question(state)
output_type = classification.output_type

if output_type == ClassifierOutputType.REFRAMED:
# Use the reframed version and track the mapping
reframed = classification.question_for_pm
self._reframe_map[reframed] = classification.original_question
log.info(
"pm.question_reframed",
original=classification.original_question[:100],
reframed=reframed[:100],
output_type=output_type,
)
return Result.ok(reframed)
if output_type == ClassifierOutputType.DEFERRED:
self.deferred_items.append(classification.original_question)
log.info(
"pm.question_deferred",
question=classification.original_question[:100],
reasoning=classification.reasoning,
output_type=output_type,
)
record_result = await self.record_response(
state,
user_response="[Deferred to development phase] "
"This technical decision will be addressed during the "
"development interview.",
question=classification.original_question,
)
if record_result.is_err:
return Result.err(record_result.error)
state = record_result.value
elif output_type == ClassifierOutputType.DECIDE_LATER:
placeholder = classification.placeholder_response
self.decide_later_items.append(classification.original_question)
log.info(
"pm.question_decide_later",
question=classification.original_question[:100],
placeholder=placeholder[:100],
reasoning=classification.reasoning,
)
record_result = await self.record_response(
state,
user_response=f"[Decide later] {placeholder}",
question=classification.original_question,
)
if record_result.is_err:
return Result.err(record_result.error)
state = record_result.value
elif output_type == ClassifierOutputType.REFRAMED:
# Use the reframed version and track the mapping
reframed = classification.question_for_pm
self._reframe_map[reframed] = classification.original_question
log.info(
"pm.question_reframed",
original=classification.original_question[:100],
reframed=reframed[:100],
output_type=output_type,
)
return Result.ok(reframed)
else:
# PASSTHROUGH — planning question forwarded unchanged to the PM
log.debug(
"pm.question_passthrough",
question=classification.original_question[:100],
output_type=output_type,
)
return Result.ok(classification.question_for_pm)

# PASSTHROUGH — planning question forwarded unchanged to the PM
log.debug(
"pm.question_passthrough",
question=classification.original_question[:100],
output_type=output_type,
)
return Result.ok(classification.question_for_pm)
auto_advanced_questions += 1
if auto_advanced_questions >= _MAX_AUTO_ADVANCE_QUESTIONS:
log.warning(
"pm.question_auto_advance_limit_exceeded",
interview_id=state.interview_id,
auto_advanced_questions=auto_advanced_questions,
deferred_count=len(self.deferred_items),
decide_later_count=len(self.decide_later_items),
)
return Result.err(
ProviderError(
"PM interview skipped too many non-PM questions without "
"finding a PM-answerable follow-up. Please retry.",
details={
"auto_advanced_questions": auto_advanced_questions,
"deferred_count": len(self.deferred_items),
"decide_later_count": len(self.decide_later_items),
},
)
)

async def record_response(
self,
Expand Down
11 changes: 10 additions & 1 deletion src/ouroboros/cli/commands/pm.py
Original file line number Diff line number Diff line change
Expand Up @@ -316,7 +316,16 @@ async def _run_pm_interview(
output_dir: Optional output directory for the generated PM document.
"""
from ouroboros.bigbang.pm_interview import PMInterviewEngine
from ouroboros.providers.litellm_adapter import LiteLLMAdapter

try:
from ouroboros.providers.litellm_adapter import LiteLLMAdapter
except ImportError:
print_error(
"litellm is required for the PM command but is not installed.\n"
" Install with: uv tool install --with litellm ouroboros-ai\n"
" Or: pip install 'ouroboros-ai[litellm]'"
)
raise typer.Exit(code=1)

adapter = LiteLLMAdapter()
engine = PMInterviewEngine.create(llm_adapter=adapter, model=model)
Expand Down
35 changes: 26 additions & 9 deletions src/ouroboros/mcp/tools/pm_handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,8 @@


_DATA_DIR = Path.home() / ".ouroboros" / "data"
_INTERVIEW_LLM_MAX_TURNS = 5
_INTERVIEW_LLM_TIMEOUT_SECONDS = 90.0


def _meta_path(session_id: str, data_dir: Path | None = None) -> Path:
Expand Down Expand Up @@ -305,16 +307,24 @@ def definition(self) -> MCPToolDefinition:
),
)

def _get_engine(self) -> PMInterviewEngine:
def _get_engine(self, *, cwd: str | None = None) -> PMInterviewEngine:
"""Return the injected engine or create a new one using the server's configured backend."""
if self.pm_engine is not None:
return self.pm_engine
adapter = self.llm_adapter or create_llm_adapter(
backend=self.llm_backend,
max_turns=1,
use_case="interview",
allowed_tools=[],
)
adapter_kwargs: dict[str, Any] = {
"backend": self.llm_backend,
"max_turns": _INTERVIEW_LLM_MAX_TURNS,
"use_case": "interview",
"allowed_tools": [],
"timeout": _INTERVIEW_LLM_TIMEOUT_SECONDS,
}
if cwd is not None:
adapter_kwargs["cwd"] = cwd
if self.llm_backend is not None:
adapter_kwargs["backend"] = self.llm_backend
adapter = create_llm_adapter(**adapter_kwargs)
else:
adapter = self.llm_adapter or create_llm_adapter(**adapter_kwargs)
model = get_clarification_model(self.llm_backend)
return PMInterviewEngine.create(
llm_adapter=adapter,
Expand Down Expand Up @@ -344,7 +354,15 @@ async def handle(
# Auto-detect action from parameter presence (AC 13)
action = _detect_action(arguments)

engine = self._get_engine()
if isinstance(initial_context, str) and not initial_context.strip() and action == "start":
return Result.err(
MCPToolError(
"initial_context must be a non-empty string to start a PM interview",
tool_name="ouroboros_pm_interview",
)
)

engine = self._get_engine(cwd=cwd)

try:
# ── Generate PM seed ──────────────────────────────────
Expand Down Expand Up @@ -771,7 +789,6 @@ async def _handle_answer(
)
state = load_result.value

# Restore PM meta into engine
meta = _load_pm_meta(session_id, self.data_dir)
if meta:
engine.restore_meta(meta)
Expand Down
Loading