Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

18 changes: 0 additions & 18 deletions src/cli/orchestrator.ts
Original file line number Diff line number Diff line change
Expand Up @@ -619,7 +619,6 @@ function routePromptResult(
const { decisions, surfacedViolations } = getViolationFilterResults(
result.violations
);
const violationCount = surfacedViolations.length;

// Score calculated from surfaced violations only — matches what user sees
const scored = calculateCheckScore(
Expand All @@ -631,7 +630,6 @@ function routePromptResult(
}
);
const severity = scored.severity;

// Group violations by criterionName
const violationsByCriterion = new Map<
string | undefined,
Expand Down Expand Up @@ -681,22 +679,6 @@ function routePromptResult(
}
}

// If no violations but we have a message (JSON output), report it
if (violationCount === 0 && (outputFormat === OutputFormat.Json || outputFormat === OutputFormat.ValeJson) && scored.message) {
const ruleName = buildRuleName(promptFile.pack, promptId, undefined);
reportIssue({
file: relFile,
line: 1,
column: 1,
severity,
summary: scored.message,
ruleName,
outputFormat,
jsonFormatter,
match: "",
});
}

// Create scoreEntry for Quality Scores display
const scoreEntry: EvaluationSummary = {
id: buildRuleName(promptFile.pack, promptId, undefined),
Expand Down
22 changes: 18 additions & 4 deletions src/prompts/schema.ts
Original file line number Diff line number Diff line change
Expand Up @@ -52,8 +52,15 @@ export function buildJudgeLLMSchema() {
context_before: { type: "string" },
context_after: { type: "string" },
description: { type: "string" },
analysis: { type: "string" },
suggestion: { type: "string" },
analysis: {
type: "string",
description:
"A concise 1-2 sentence explanation of the specific issue.",
},
suggestion: {
type: "string",
description: "Suggest a fix in 15 words or less.",
},
fix: { type: "string" },
rule_quote: { type: "string" },
checks: {
Expand Down Expand Up @@ -156,8 +163,15 @@ export function buildCheckLLMSchema() {
context_before: { type: "string" },
context_after: { type: "string" },
description: { type: "string" },
analysis: { type: "string" },
suggestion: { type: "string" },
analysis: {
type: "string",
description:
"A concise 1-2 sentence explanation of the specific issue.",
},
suggestion: {
type: "string",
description: "Suggest a fix in 15 words or less.",
},
fix: { type: "string" },
rule_quote: { type: "string" },
checks: {
Expand Down
72 changes: 72 additions & 0 deletions tests/orchestrator-filtering.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,9 @@ import path from "path";
import { evaluateFiles } from "../src/cli/orchestrator";
import { OutputFormat, type EvaluationOptions } from "../src/cli/types";
import { EvaluationType, Severity } from "../src/evaluators/types";
import type { Result } from "../src/output/json-formatter";
import type { PromptFile } from "../src/prompts/prompt-loader";
import type { ValeOutput } from "../src/schemas/vale-responses";
import type { JudgeResult, RawCheckResult } from "../src/prompts/schema";

const { EVALUATE_MOCK } = vi.hoisted(() => ({
Expand Down Expand Up @@ -358,4 +360,74 @@ describe("CLI violation filtering", () => {
);
expect(zeroThresholdRun.totalWarnings).toBe(2);
});

it("does not emit dummy issues in JSON output when no violations are surfaced", async () => {
const targetFile = createTempFile("Alpha text\n");
const prompt = createPrompt({
id: "CheckJsonPrompt",
name: "Check JSON Prompt",
type: "check",
severity: Severity.WARNING,
});

EVALUATE_MOCK.mockResolvedValue(
makeCheckResult({
violations: [
makeCheckViolation({
confidence: 0.2,
}),
],
})
);

const run = await evaluateFiles([targetFile], {
...createBaseOptions([prompt]),
outputFormat: OutputFormat.Json,
});

expect(run.totalWarnings).toBe(0);

const parsed = JSON.parse(
String(vi.mocked(console.log).mock.calls.at(-1)?.[0])
) as Result;
const allIssues = Object.values(parsed.files).flatMap((file) => file.issues);

expect(allIssues).toHaveLength(0);
expect(JSON.stringify(parsed)).not.toContain("No issues found");
});

it("does not emit dummy issues in Vale JSON output when no violations are surfaced", async () => {
const targetFile = createTempFile("Alpha text\n");
const prompt = createPrompt({
id: "CheckValeJsonPrompt",
name: "Check Vale JSON Prompt",
type: "check",
severity: Severity.WARNING,
});

EVALUATE_MOCK.mockResolvedValue(
makeCheckResult({
violations: [
makeCheckViolation({
confidence: 0.2,
}),
],
})
);

const run = await evaluateFiles([targetFile], {
...createBaseOptions([prompt]),
outputFormat: OutputFormat.ValeJson,
});

expect(run.totalWarnings).toBe(0);

const parsed = JSON.parse(
String(vi.mocked(console.log).mock.calls.at(-1)?.[0])
) as ValeOutput;
const allIssues = Object.values(parsed).flat();

expect(allIssues).toHaveLength(0);
expect(JSON.stringify(parsed)).not.toContain("No issues found");
});
});
35 changes: 35 additions & 0 deletions tests/prompt-schema.test.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
import { describe, expect, it } from "vitest";
import { buildCheckLLMSchema, buildJudgeLLMSchema } from "../src/prompts/schema";

describe("prompt schema verbosity constraints", () => {
it("includes concise analysis and suggestion descriptions for check schema", () => {
const schema = buildCheckLLMSchema();
const violationProperties =
schema.schema.properties.violations.items.properties;

expect(violationProperties.analysis).toEqual({
type: "string",
description: "A concise 1-2 sentence explanation of the specific issue.",
});
expect(violationProperties.suggestion).toEqual({
type: "string",
description: "Suggest a fix in 15 words or less.",
});
});

it("includes concise analysis and suggestion descriptions for judge schema", () => {
const schema = buildJudgeLLMSchema();
const violationProperties =
schema.schema.properties.criteria.items.properties.violations.items
.properties;

expect(violationProperties.analysis).toEqual({
type: "string",
description: "A concise 1-2 sentence explanation of the specific issue.",
});
expect(violationProperties.suggestion).toEqual({
type: "string",
description: "Suggest a fix in 15 words or less.",
});
});
});