diff --git a/package.json b/package.json
index b496f659..fa34abb2 100644
--- a/package.json
+++ b/package.json
@@ -19,6 +19,11 @@
"import": "./dist/inline-flow.js",
"default": "./dist/inline-flow.js"
},
+ "./measurement-validator": {
+ "types": "./dist/measurement-validator.d.ts",
+ "import": "./dist/measurement-validator.js",
+ "default": "./dist/measurement-validator.js"
+ },
"./demos/*": "./pages/demos/*",
"./assets/*": "./pages/assets/*",
"./package.json": "./package.json"
@@ -28,6 +33,7 @@
"dist",
"src",
"!src/layout.test.ts",
+ "!src/measurement-validator.test.ts",
"!src/test-data.ts",
"pages/demos",
"pages/assets"
diff --git a/src/measurement-validator.test.ts b/src/measurement-validator.test.ts
new file mode 100644
index 00000000..0b4feb28
--- /dev/null
+++ b/src/measurement-validator.test.ts
@@ -0,0 +1,388 @@
+import { beforeAll, beforeEach, describe, expect, test } from 'bun:test'
+
+// Tests for the MeasurementValidator module.
+// These run in a Node/Bun environment (no live DOM), so the comparator is
+// always configured with pretextOnly: true to avoid DOM reads.
+
+// Minimal OffscreenCanvas stub (matches the one in layout.test.ts).
+class TestOffscreenCanvas {
+ width: number
+ height: number
+
+ constructor(width: number, height: number) {
+ this.width = width
+ this.height = height
+ }
+
+ getContext(_type: string): CanvasRenderingContext2D {
+ return {
+ measureText(text: string) {
+ const fontSize = 16
+ let width = 0
+ for (const ch of text) {
+ if (ch === ' ') {
+ width += fontSize * 0.33
+ } else if (ch === '\t') {
+ width += fontSize * 1.32
+ } else if (/\p{Emoji_Presentation}/u.test(ch) || ch === '\uFE0F') {
+ width += fontSize
+ } else if (isWideChar(ch)) {
+ width += fontSize
+ } else if (/[.,!?;:%)\]}'""'»›…—-]/u.test(ch)) {
+ width += fontSize * 0.4
+ } else {
+ width += fontSize * 0.6
+ }
+ }
+ return { width }
+ },
+ } as unknown as CanvasRenderingContext2D
+ }
+}
+
+function isWideChar(ch: string): boolean {
+ const code = ch.codePointAt(0)!
+ return (
+ (code >= 0x4e00 && code <= 0x9fff) ||
+ (code >= 0x3400 && code <= 0x4dbf) ||
+ (code >= 0x3040 && code <= 0x309f) ||
+ (code >= 0x30a0 && code <= 0x30ff) ||
+ (code >= 0xac00 && code <= 0xd7af) ||
+ (code >= 0xff00 && code <= 0xffef)
+ )
+}
+
+type ValidatorModule = typeof import('./measurement-validator.ts')
+
+let MeasurementComparator: ValidatorModule['MeasurementComparator']
+let DivergenceClassifier: ValidatorModule['DivergenceClassifier']
+let ReportGenerator: ValidatorModule['ReportGenerator']
+let TestSuiteRunner: ValidatorModule['TestSuiteRunner']
+let classifyDelta: ValidatorModule['classifyDelta']
+let deriveResultSeverity: ValidatorModule['deriveResultSeverity']
+let clearCache: ValidatorModule['clearCache']
+let setLocale: ValidatorModule['setLocale']
+
+const FONT = '16px Test Sans'
+const LINE_HEIGHT = 19
+
+beforeAll(async () => {
+ Reflect.set(globalThis, 'OffscreenCanvas', TestOffscreenCanvas)
+ const mod = await import('./measurement-validator.ts')
+ ;({ MeasurementComparator, DivergenceClassifier, ReportGenerator, TestSuiteRunner, classifyDelta, deriveResultSeverity, clearCache, setLocale } = mod)
+})
+
+beforeEach(() => {
+ setLocale(undefined)
+ clearCache()
+})
+
+// ---------------------------------------------------------------------------
+// classifyDelta
+// ---------------------------------------------------------------------------
+
+describe('classifyDelta', () => {
+ test('zero delta is exact', () => {
+ expect(classifyDelta(0)).toBe('exact')
+ })
+
+ test('small delta under 0.5px is minor', () => {
+ expect(classifyDelta(0.3)).toBe('minor')
+ expect(classifyDelta(-0.4)).toBe('minor')
+ })
+
+ test('delta between 0.5px and 2px is major', () => {
+ expect(classifyDelta(1)).toBe('major')
+ expect(classifyDelta(-1.5)).toBe('major')
+ })
+
+ test('delta at or above 2px is critical', () => {
+ expect(classifyDelta(2)).toBe('critical')
+ expect(classifyDelta(15)).toBe('critical')
+ expect(classifyDelta(-3)).toBe('critical')
+ })
+})
+
+// ---------------------------------------------------------------------------
+// deriveResultSeverity
+// ---------------------------------------------------------------------------
+
+describe('deriveResultSeverity', () => {
+ test('no lines → pass', () => {
+ expect(deriveResultSeverity([], false)).toBe('pass')
+ })
+
+ test('line mismatch overrides everything → critical', () => {
+ expect(deriveResultSeverity([], true)).toBe('critical')
+ })
+
+ test('exact lines → pass', () => {
+ const lines = [{ index: 0, pretextWidth: 100, domWidth: 100, delta: 0, text: 'x', severity: 'exact' as const }]
+ expect(deriveResultSeverity(lines, false)).toBe('pass')
+ })
+
+ test('minor delta → pass', () => {
+ const lines = [{ index: 0, pretextWidth: 100, domWidth: 100.3, delta: 0.3, text: 'x', severity: 'minor' as const }]
+ expect(deriveResultSeverity(lines, false)).toBe('pass')
+ })
+
+ test('major delta → warning', () => {
+ const lines = [{ index: 0, pretextWidth: 100, domWidth: 101, delta: 1, text: 'x', severity: 'major' as const }]
+ expect(deriveResultSeverity(lines, false)).toBe('warning')
+ })
+
+ test('critical delta → critical', () => {
+ const lines = [{ index: 0, pretextWidth: 100, domWidth: 103, delta: 3, text: 'x', severity: 'critical' as const }]
+ expect(deriveResultSeverity(lines, false)).toBe('critical')
+ })
+})
+
+// ---------------------------------------------------------------------------
+// DivergenceClassifier
+// ---------------------------------------------------------------------------
+
+describe('DivergenceClassifier', () => {
+ test('detects bidi from RTL characters', () => {
+ const classifier = new DivergenceClassifier()
+ const causes = classifier.classify({ text: 'مرحبا', font: FONT }, [])
+ expect(causes).toContain('bidi')
+ })
+
+ test('detects emoji', () => {
+ const classifier = new DivergenceClassifier()
+ const causes = classifier.classify({ text: 'Hello 😊', font: FONT }, [])
+ expect(causes).toContain('emoji')
+ })
+
+ test('detects pre-wrap mode', () => {
+ const classifier = new DivergenceClassifier()
+ const causes = classifier.classify({ text: 'hello', font: FONT, whiteSpace: 'pre-wrap' }, [])
+ expect(causes).toContain('pre-wrap')
+ })
+
+ test('returns empty causes for plain LTR text with no divergence', () => {
+ const classifier = new DivergenceClassifier()
+ const causes = classifier.classify({ text: 'hello world', font: FONT }, [])
+ expect(causes).toHaveLength(0)
+ })
+
+ test('marks font-fallback when major divergence but no specific cause', () => {
+ const classifier = new DivergenceClassifier()
+ const lines = [
+ { index: 0, pretextWidth: 100, domWidth: 103, delta: 3, text: 'hello', severity: 'critical' as const },
+ ]
+ const causes = classifier.classify({ text: 'hello', font: FONT }, lines)
+ expect(causes).toContain('font-fallback')
+ })
+})
+
+// ---------------------------------------------------------------------------
+// MeasurementComparator (pretextOnly mode — no DOM)
+// ---------------------------------------------------------------------------
+
+describe('MeasurementComparator (pretextOnly)', () => {
+ test('single-line short text produces zero height delta', () => {
+ const comparator = new MeasurementComparator({ pretextOnly: true })
+ const result = comparator.compare({ text: 'Hello world', font: FONT, maxWidth: 400, lineHeight: LINE_HEIGHT })
+ expect(result.heightDelta).toBe(0)
+ expect(result.lineMismatch).toBe(false)
+ expect(result.severity).toBe('pass')
+ })
+
+ test('line counts match in pretextOnly mode', () => {
+ const comparator = new MeasurementComparator({ pretextOnly: true })
+ const result = comparator.compare({ text: 'Hello world', font: FONT, maxWidth: 400, lineHeight: LINE_HEIGHT })
+ expect(result.pretextLineCount).toBe(result.domLineCount)
+ })
+
+ test('result carries input sample back', () => {
+ const comparator = new MeasurementComparator({ pretextOnly: true })
+ const sample = { text: 'test', font: FONT, maxWidth: 200, lineHeight: LINE_HEIGHT }
+ const result = comparator.compare(sample)
+ expect(result.sample).toBe(sample)
+ })
+
+ test('empty text produces zero lines and height', () => {
+ const comparator = new MeasurementComparator({ pretextOnly: true })
+ const result = comparator.compare({ text: '', font: FONT, maxWidth: 300, lineHeight: LINE_HEIGHT })
+ expect(result.pretextLineCount).toBe(0)
+ expect(result.pretextHeight).toBe(0)
+ })
+
+ test('compareAll returns one result per sample', () => {
+ const comparator = new MeasurementComparator({ pretextOnly: true })
+ const samples = [
+ { text: 'foo', font: FONT, maxWidth: 200, lineHeight: LINE_HEIGHT },
+ { text: 'bar baz', font: FONT, maxWidth: 50, lineHeight: LINE_HEIGHT },
+ ]
+ const results = comparator.compareAll(samples)
+ expect(results).toHaveLength(2)
+ })
+
+ test('result has valid ISO timestamp', () => {
+ const comparator = new MeasurementComparator({ pretextOnly: true })
+ const result = comparator.compare({ text: 'Hello', font: FONT })
+ expect(() => new Date(result.timestamp)).not.toThrow()
+ expect(Number.isFinite(new Date(result.timestamp).getTime())).toBe(true)
+ })
+
+ test('narrow width forces multi-line layout', () => {
+ const comparator = new MeasurementComparator({ pretextOnly: true })
+ const result = comparator.compare({ text: 'Hello world foo bar', font: FONT, maxWidth: 60, lineHeight: LINE_HEIGHT })
+ expect(result.pretextLineCount).toBeGreaterThan(1)
+ })
+
+ test('per-line entries match line count', () => {
+ const comparator = new MeasurementComparator({ pretextOnly: true })
+ const result = comparator.compare({ text: 'Hello world', font: FONT, maxWidth: 400, lineHeight: LINE_HEIGHT })
+ expect(result.lines).toHaveLength(result.pretextLineCount)
+ })
+
+ test('all per-line deltas are zero in pretextOnly mode', () => {
+ const comparator = new MeasurementComparator({ pretextOnly: true })
+ const result = comparator.compare({ text: 'Hello world', font: FONT, maxWidth: 300, lineHeight: LINE_HEIGHT })
+ for (const line of result.lines) {
+ expect(line.delta).toBe(0)
+ expect(line.severity).toBe('exact')
+ }
+ })
+
+ test('arabic text gets bidi cause even with zero divergence', () => {
+ const comparator = new MeasurementComparator({ pretextOnly: true })
+ const result = comparator.compare({ text: 'مرحبا بالعالم', font: FONT, maxWidth: 300, lineHeight: LINE_HEIGHT })
+ expect(result.causes).toContain('bidi')
+ })
+
+ test('pre-wrap mode produces a result', () => {
+ const comparator = new MeasurementComparator({ pretextOnly: true })
+ const result = comparator.compare({
+ text: 'line one\nline two',
+ font: FONT,
+ maxWidth: 300,
+ lineHeight: LINE_HEIGHT,
+ whiteSpace: 'pre-wrap',
+ })
+ expect(result.pretextLineCount).toBeGreaterThanOrEqual(2)
+ expect(result.causes).toContain('pre-wrap')
+ })
+})
+
+// ---------------------------------------------------------------------------
+// TestSuiteRunner
+// ---------------------------------------------------------------------------
+
+describe('TestSuiteRunner', () => {
+ test('empty corpus produces a zero-sample report', () => {
+ const runner = new TestSuiteRunner({
+ comparator: new MeasurementComparator({ pretextOnly: true }),
+ })
+ const report = runner.run([])
+ expect(report.total).toBe(0)
+ expect(report.passed).toBe(0)
+ expect(report.passRate).toBe(1)
+ })
+
+ test('all-passing corpus produces 100% pass rate', () => {
+ const runner = new TestSuiteRunner({
+ comparator: new MeasurementComparator({ pretextOnly: true }),
+ })
+ const report = runner.run([
+ { text: 'Hello', font: FONT, maxWidth: 300, lineHeight: LINE_HEIGHT },
+ { text: 'World', font: FONT, maxWidth: 300, lineHeight: LINE_HEIGHT },
+ ])
+ expect(report.passed).toBe(2)
+ expect(report.passRate).toBe(1)
+ expect(report.errors).toBe(0)
+ expect(report.criticals).toBe(0)
+ })
+
+ test('runOrThrow does not throw on all-passing corpus', () => {
+ const runner = new TestSuiteRunner({
+ comparator: new MeasurementComparator({ pretextOnly: true }),
+ failOn: 'error',
+ })
+ expect(() =>
+ runner.runOrThrow([{ text: 'test', font: FONT, maxWidth: 300, lineHeight: LINE_HEIGHT }]),
+ ).not.toThrow()
+ })
+
+ test('report includes one result per sample', () => {
+ const runner = new TestSuiteRunner({
+ comparator: new MeasurementComparator({ pretextOnly: true }),
+ })
+ const samples = [
+ { text: 'foo', font: FONT, maxWidth: 200, lineHeight: LINE_HEIGHT },
+ { text: 'bar', font: FONT, maxWidth: 200, lineHeight: LINE_HEIGHT },
+ { text: 'baz', font: FONT, maxWidth: 200, lineHeight: LINE_HEIGHT },
+ ]
+ const report = runner.run(samples)
+ expect(report.results).toHaveLength(3)
+ })
+})
+
+// ---------------------------------------------------------------------------
+// ReportGenerator
+// ---------------------------------------------------------------------------
+
+describe('ReportGenerator', () => {
+ function makeReport() {
+ const comparator = new MeasurementComparator({ pretextOnly: true })
+ const runner = new TestSuiteRunner({ comparator })
+ return runner.run([
+ { text: 'Hello world', font: FONT, maxWidth: 300, lineHeight: LINE_HEIGHT },
+ { text: 'مرحبا', font: FONT, maxWidth: 300, lineHeight: LINE_HEIGHT },
+ ])
+ }
+
+ test('text format contains pass count', () => {
+ const gen = new ReportGenerator()
+ const report = makeReport()
+ const text = gen.render(report, 'text')
+ expect(text).toContain('passed')
+ expect(typeof text).toBe('string')
+ })
+
+ test('json format produces valid JSON', () => {
+ const gen = new ReportGenerator()
+ const report = makeReport()
+ const json = gen.render(report, 'json')
+ expect(() => JSON.parse(json)).not.toThrow()
+ const parsed = JSON.parse(json) as { total: number }
+ expect(parsed.total).toBe(2)
+ })
+
+ test('html format contains expected HTML structure', () => {
+ const gen = new ReportGenerator()
+ const report = makeReport()
+ const html = gen.render(report, 'html')
+ expect(html).toContain('')
+ expect(html).toContain('
')
+ expect(html).toContain('Measurement Validator')
+ })
+
+ test('renderResult produces valid text for a single result', () => {
+ const comparator = new MeasurementComparator({ pretextOnly: true })
+ const result = comparator.compare({ text: 'Hello', font: FONT, maxWidth: 200, lineHeight: LINE_HEIGHT })
+ const gen = new ReportGenerator({ includePassing: true })
+ const text = gen.renderResult(result, 'text')
+ expect(typeof text).toBe('string')
+ expect(text.length).toBeGreaterThan(0)
+ })
+
+ test('includePassing includes pass rows in html', () => {
+ const gen = new ReportGenerator({ includePassing: true })
+ const report = makeReport()
+ const html = gen.render(report, 'html')
+ expect(html).toContain('severity-pass')
+ })
+
+ test('default (no includePassing) excludes pass rows from text', () => {
+ const gen = new ReportGenerator()
+ const comparator = new MeasurementComparator({ pretextOnly: true })
+ const runner = new TestSuiteRunner({ comparator })
+ const report = runner.run([{ text: 'Hello', font: FONT, maxWidth: 300, lineHeight: LINE_HEIGHT }])
+ const text = gen.render(report, 'text')
+ // Only the header line should be present since the sample passes
+ expect(text).not.toContain('[PASS]')
+ })
+})
diff --git a/src/measurement-validator.ts b/src/measurement-validator.ts
new file mode 100644
index 00000000..9e6a8a98
--- /dev/null
+++ b/src/measurement-validator.ts
@@ -0,0 +1,666 @@
+// Measurement Validator: detect and quantify divergence between Pretext's
+// canvas-based measurements and browser DOM rendering.
+//
+// Usage:
+// import { MeasurementComparator } from '@chenglou/pretext/measurement-validator'
+//
+// const comparator = new MeasurementComparator()
+// const result = await comparator.compare({
+// text: 'Hello world',
+// font: '16px Inter',
+// maxWidth: 300,
+// lineHeight: 20,
+// })
+//
+// Components:
+// MeasurementComparator — Core engine: Pretext vs DOM comparison
+// DivergenceClassifier — Root cause detection (font fallback, bidi, emoji, etc.)
+// ReportGenerator — Human-readable output (text, JSON, HTML)
+// TestSuiteRunner — Batch validation across a corpus of samples
+
+import { layoutWithLines, prepareWithSegments } from './layout.js'
+
+// ---------------------------------------------------------------------------
+// Types
+// ---------------------------------------------------------------------------
+
+/** Platform/browser context for a measurement sample. */
+export type MeasurementPlatform = 'chrome' | 'firefox' | 'safari' | 'node' | 'unknown'
+
+/** Input descriptor for a single measurement comparison. */
+export interface MeasurementSample {
+ text: string
+ font: string
+ maxWidth?: number
+ lineHeight?: number
+ /** @default 'normal' */
+ whiteSpace?: 'normal' | 'pre-wrap'
+ locale?: string
+ /** Hint for reporting — does not influence layout. */
+ platform?: MeasurementPlatform
+}
+
+/** Per-line breakdown between Pretext and DOM measurements. */
+export interface LineDivergence {
+ index: number
+ pretextWidth: number
+ domWidth: number
+ delta: number
+ text: string
+ severity: DivergenceSeverity
+}
+
+/** Classification of a single measurement divergence. */
+export type DivergenceSeverity = 'exact' | 'minor' | 'major' | 'critical'
+
+/** Overall pass/fail level of a comparison result. */
+export type ResultSeverity = 'pass' | 'warning' | 'error' | 'critical'
+
+/** Detected root cause for a divergence. */
+export type DivergenceCause =
+ | 'font-fallback'
+ | 'bidi'
+ | 'emoji'
+ | 'kerning'
+ | 'variable-font'
+ | 'pre-wrap'
+ | 'unknown'
+
+/** Full comparison result for a single MeasurementSample. */
+export interface MeasurementResult {
+ sample: MeasurementSample
+
+ // Line counts
+ pretextLineCount: number
+ domLineCount: number
+ lineMismatch: boolean
+
+ // Total block height
+ pretextHeight: number
+ domHeight: number
+ heightDelta: number
+ heightPercentError: number
+
+ // Per-line breakdown
+ lines: LineDivergence[]
+
+ // Classification
+ severity: ResultSeverity
+ causes: DivergenceCause[]
+
+ // Metadata
+ timestamp: string
+ platform: MeasurementPlatform
+ viewport: { width: number; height: number }
+}
+
+/** Aggregated summary over a batch of samples. */
+export interface SuiteReport {
+ total: number
+ passed: number
+ warnings: number
+ errors: number
+ criticals: number
+ passRate: number
+ results: MeasurementResult[]
+ timestamp: string
+}
+
+// ---------------------------------------------------------------------------
+// Severity helpers
+// ---------------------------------------------------------------------------
+
+const MINOR_THRESHOLD = 0.5
+const MAJOR_THRESHOLD = 2.0
+
+/** Classify a pixel delta into a severity bucket. */
+export function classifyDelta(delta: number): DivergenceSeverity {
+ const abs = Math.abs(delta)
+ if (abs === 0) return 'exact'
+ if (abs < MINOR_THRESHOLD) return 'minor'
+ if (abs < MAJOR_THRESHOLD) return 'major'
+ return 'critical'
+}
+
+/** Map the worst line severity to a result-level severity. */
+export function lineSeverityToResult(lineSeverity: DivergenceSeverity): ResultSeverity {
+ switch (lineSeverity) {
+ case 'exact':
+ case 'minor':
+ return 'pass'
+ case 'major':
+ return 'warning'
+ case 'critical':
+ return 'error'
+ }
+}
+
+/** Derive overall result severity from per-line divergences and line-count mismatch. */
+export function deriveResultSeverity(lines: LineDivergence[], lineMismatch: boolean): ResultSeverity {
+ if (lineMismatch) return 'critical'
+ let worst: DivergenceSeverity = 'exact'
+ for (const line of lines) {
+ if (line.severity === 'critical') return 'critical'
+ if (line.severity === 'major') worst = 'major'
+ else if (line.severity === 'minor' && worst === 'exact') worst = 'minor'
+ }
+ return lineSeverityToResult(worst)
+}
+
+// ---------------------------------------------------------------------------
+// DivergenceClassifier
+// ---------------------------------------------------------------------------
+
+/** Detect root causes for a measurement divergence. */
+export class DivergenceClassifier {
+ private static rtlRe = /[\u0600-\u06FF\u0750-\u077F\u0590-\u05FF\u07C0-\u07FF\uFB50-\uFDFF\uFE70-\uFEFF]/
+ private static emojiRe = /\p{Emoji_Presentation}/u
+ private static variationSelectorRe = /[\uFE0F\uFE0E]/
+
+ /**
+ * Classify the likely root causes from the sample and its result.
+ * Can be called before DOM comparison is available by passing `lines = []`.
+ */
+ classify(sample: MeasurementSample, lines: LineDivergence[]): DivergenceCause[] {
+ const causes: DivergenceCause[] = []
+
+ // Bidi: RTL characters in text
+ if (DivergenceClassifier.rtlRe.test(sample.text)) {
+ causes.push('bidi')
+ }
+
+ // Emoji: emoji codepoints
+ if (DivergenceClassifier.emojiRe.test(sample.text) || DivergenceClassifier.variationSelectorRe.test(sample.text)) {
+ causes.push('emoji')
+ }
+
+ // Pre-wrap mode
+ if (sample.whiteSpace === 'pre-wrap') {
+ causes.push('pre-wrap')
+ }
+
+ // Variable font: font-variation-settings hint
+ if (sample.font.includes('wght') || sample.font.includes('ital') || sample.font.includes('wdth')) {
+ causes.push('variable-font')
+ }
+
+ // Font fallback: if we detect divergence but no more specific cause
+ if (lines.some(l => l.severity === 'critical' || l.severity === 'major') && causes.length === 0) {
+ causes.push('font-fallback')
+ }
+
+ // If still nothing but there are divergences, mark unknown
+ if (causes.length === 0 && lines.some(l => l.severity !== 'exact')) {
+ causes.push('unknown')
+ }
+
+ return causes
+ }
+}
+
+// ---------------------------------------------------------------------------
+// MeasurementComparator
+// ---------------------------------------------------------------------------
+
+/** Options for MeasurementComparator. */
+export interface ComparatorOptions {
+ /** Override the default DivergenceClassifier. */
+ classifier?: DivergenceClassifier
+ /** If true, skip DOM measurement and use Pretext-only data for testing. */
+ pretextOnly?: boolean
+}
+
+/**
+ * Core comparison engine. Compares Pretext's canvas-based measurements against
+ * browser DOM measurements.
+ *
+ * In browser environments, call `compare()` to get DOM-vs-Pretext divergence.
+ * In Node/test environments, set `pretextOnly: true` to get Pretext-only data.
+ */
+export class MeasurementComparator {
+ private readonly classifier: DivergenceClassifier
+ private readonly pretextOnly: boolean
+
+ constructor(options: ComparatorOptions = {}) {
+ this.classifier = options.classifier ?? new DivergenceClassifier()
+ this.pretextOnly = options.pretextOnly ?? false
+ }
+
+ /**
+ * Run a single comparison. In browser contexts this also reads DOM geometry;
+ * in `pretextOnly` mode the DOM fields mirror Pretext values (delta = 0).
+ */
+ compare(sample: MeasurementSample): MeasurementResult {
+ const {
+ text,
+ font,
+ maxWidth = 300,
+ lineHeight = 20,
+ whiteSpace = 'normal',
+ platform = detectPlatform(),
+ } = sample
+
+ // --- Pretext measurement ---
+ const options = whiteSpace === 'pre-wrap' ? { whiteSpace: 'pre-wrap' as const } : undefined
+ const prepared = prepareWithSegments(text, font, options)
+ const layoutResult = layoutWithLines(prepared, maxWidth, lineHeight)
+ const pretextLines = layoutResult.lines
+ const pretextLineCount = layoutResult.lineCount
+ const pretextHeight = layoutResult.height
+
+ // --- DOM measurement (or stub in pretextOnly mode) ---
+ let domLines: { text: string; width: number }[]
+ let domHeight: number
+
+ if (!this.pretextOnly && typeof document !== 'undefined') {
+ const domResult = measureDOM(text, font, maxWidth, lineHeight, whiteSpace)
+ domLines = domResult.lines
+ domHeight = domResult.height
+ } else {
+ // Stub: DOM = Pretext (delta = 0). Useful in Node / unit-test environments.
+ domLines = pretextLines.map(l => ({ text: l.text, width: l.width }))
+ domHeight = pretextHeight
+ }
+
+ const domLineCount = domLines.length
+
+ // --- Per-line divergence ---
+ const lineCount = Math.max(pretextLineCount, domLineCount)
+ const lines: LineDivergence[] = []
+
+ for (let i = 0; i < lineCount; i++) {
+ const pt = pretextLines[i]
+ const dm = domLines[i]
+
+ const pretextWidth = pt?.width ?? 0
+ const domWidth = dm?.width ?? 0
+ const delta = domWidth - pretextWidth
+ const severity = classifyDelta(delta)
+
+ lines.push({
+ index: i,
+ pretextWidth,
+ domWidth,
+ delta,
+ text: pt?.text ?? dm?.text ?? '',
+ severity,
+ })
+ }
+
+ // --- Heights ---
+ const heightDelta = domHeight - pretextHeight
+ const heightPercentError = pretextHeight === 0 ? 0 : (Math.abs(heightDelta) / pretextHeight) * 100
+
+ // --- Classification ---
+ const lineMismatch = pretextLineCount !== domLineCount
+ const severity = deriveResultSeverity(lines, lineMismatch)
+ const causes = this.classifier.classify(sample, lines)
+
+ const viewport =
+ typeof window !== 'undefined'
+ ? { width: window.innerWidth, height: window.innerHeight }
+ : { width: 0, height: 0 }
+
+ return {
+ sample,
+ pretextLineCount,
+ domLineCount,
+ lineMismatch,
+ pretextHeight,
+ domHeight,
+ heightDelta,
+ heightPercentError,
+ lines,
+ severity,
+ causes,
+ timestamp: new Date().toISOString(),
+ platform,
+ viewport,
+ }
+ }
+
+ /** Convenience: compare multiple samples and return an array of results. */
+ compareAll(samples: MeasurementSample[]): MeasurementResult[] {
+ return samples.map(s => this.compare(s))
+ }
+}
+
+// ---------------------------------------------------------------------------
+// DOM measurement helper (browser-only)
+// ---------------------------------------------------------------------------
+
+interface DOMLines {
+ lines: { text: string; width: number }[]
+ height: number
+}
+
+/**
+ * Measure text using real DOM layout. Returns per-line widths and total height.
+ * Only works in browser environments with a live `document`.
+ */
+function measureDOM(
+ text: string,
+ font: string,
+ maxWidth: number,
+ lineHeight: number,
+ whiteSpace: 'normal' | 'pre-wrap',
+): DOMLines {
+ const container = document.createElement('div')
+ Object.assign(container.style, {
+ position: 'absolute',
+ top: '-9999px',
+ left: '-9999px',
+ width: `${maxWidth}px`,
+ font,
+ lineHeight: `${lineHeight}px`,
+ whiteSpace: whiteSpace === 'pre-wrap' ? 'pre-wrap' : 'normal',
+ wordBreak: 'normal',
+ overflowWrap: 'break-word',
+ visibility: 'hidden',
+ pointerEvents: 'none',
+ })
+ container.textContent = text
+ document.body.appendChild(container)
+
+ const domHeight = container.offsetHeight
+ const domLineCount = Math.round(domHeight / lineHeight) || 1
+
+ // Extract per-line widths using Range
+ const textNode = container.firstChild
+ const lines: { text: string; width: number }[] = []
+
+ if (textNode != null) {
+ const range = document.createRange()
+ range.selectNodeContents(textNode)
+ const clientRects = range.getClientRects()
+
+ for (let i = 0; i < clientRects.length; i++) {
+ const rect = clientRects[i]!
+ lines.push({ text: '', width: rect.width })
+ }
+ }
+
+ // Fallback: equal-width placeholder lines if Range gave no rects
+ if (lines.length === 0) {
+ const avgWidth = container.offsetWidth
+ for (let i = 0; i < domLineCount; i++) {
+ lines.push({ text: '', width: avgWidth })
+ }
+ }
+
+ document.body.removeChild(container)
+ return { lines, height: domHeight }
+}
+
+// ---------------------------------------------------------------------------
+// Platform detection
+// ---------------------------------------------------------------------------
+
+function detectPlatform(): MeasurementPlatform {
+ if (typeof navigator === 'undefined') return 'node'
+ const ua = navigator.userAgent
+ if (/Chrome/.test(ua) && !/Edg/.test(ua)) return 'chrome'
+ if (/Firefox/.test(ua)) return 'firefox'
+ if (/Safari/.test(ua) && !/Chrome/.test(ua)) return 'safari'
+ return 'unknown'
+}
+
+// ---------------------------------------------------------------------------
+// TestSuiteRunner
+// ---------------------------------------------------------------------------
+
+/** Options for a TestSuiteRunner. */
+export interface TestSuiteOptions {
+ /** Abort if any sample produces a result at or above this severity. */
+ failOn?: ResultSeverity
+ /** Comparator to use (defaults to a new pretextOnly MeasurementComparator). */
+ comparator?: MeasurementComparator
+}
+
+/**
+ * Batch validator: run a corpus of samples through the comparator and
+ * produce an aggregated SuiteReport.
+ */
+export class TestSuiteRunner {
+ private readonly comparator: MeasurementComparator
+ private readonly failOn: ResultSeverity | undefined
+
+ constructor(options: TestSuiteOptions = {}) {
+ this.comparator = options.comparator ?? new MeasurementComparator()
+ this.failOn = options.failOn
+ }
+
+ /** Run all samples. Returns a SuiteReport. */
+ run(samples: MeasurementSample[]): SuiteReport {
+ const results = this.comparator.compareAll(samples)
+
+ let passed = 0
+ let warnings = 0
+ let errors = 0
+ let criticals = 0
+
+ for (const r of results) {
+ switch (r.severity) {
+ case 'pass':
+ passed++
+ break
+ case 'warning':
+ warnings++
+ break
+ case 'error':
+ errors++
+ break
+ case 'critical':
+ criticals++
+ break
+ }
+ }
+
+ return {
+ total: results.length,
+ passed,
+ warnings,
+ errors,
+ criticals,
+ passRate: results.length === 0 ? 1 : passed / results.length,
+ results,
+ timestamp: new Date().toISOString(),
+ }
+ }
+
+ /**
+ * Run samples and throw if the configured `failOn` threshold is breached.
+ * Returns the report on success.
+ */
+ runOrThrow(samples: MeasurementSample[]): SuiteReport {
+ const report = this.run(samples)
+ if (this.failOn != null) {
+ const severityOrder: ResultSeverity[] = ['pass', 'warning', 'error', 'critical']
+ const threshold = severityOrder.indexOf(this.failOn)
+ for (const r of report.results) {
+ if (severityOrder.indexOf(r.severity) >= threshold) {
+ throw new Error(
+ `Measurement validation failed: sample "${r.sample.text.slice(0, 40)}" ` +
+ `produced ${r.severity} (threshold: ${this.failOn})`,
+ )
+ }
+ }
+ }
+ return report
+ }
+}
+
+// ---------------------------------------------------------------------------
+// ReportGenerator
+// ---------------------------------------------------------------------------
+
+/** Output format for a report. */
+export type ReportFormat = 'text' | 'json' | 'html'
+
+/** Options for ReportGenerator. */
+export interface ReportOptions {
+ /** Whether to include passing results in the report. @default false */
+ includePassing?: boolean
+}
+
+/** Generate human-readable reports from a SuiteReport or single MeasurementResult. */
+export class ReportGenerator {
+ private readonly includePassing: boolean
+
+ constructor(options: ReportOptions = {}) {
+ this.includePassing = options.includePassing ?? false
+ }
+
+ /** Render in the requested format. */
+ render(report: SuiteReport, format: ReportFormat = 'text'): string {
+ switch (format) {
+ case 'json':
+ return this.toJSON(report)
+ case 'html':
+ return this.toHTML(report)
+ default:
+ return this.toText(report)
+ }
+ }
+
+ /** Render a single result as text. */
+ renderResult(result: MeasurementResult, format: ReportFormat = 'text'): string {
+ const report: SuiteReport = {
+ total: 1,
+ passed: result.severity === 'pass' ? 1 : 0,
+ warnings: result.severity === 'warning' ? 1 : 0,
+ errors: result.severity === 'error' ? 1 : 0,
+ criticals: result.severity === 'critical' ? 1 : 0,
+ passRate: result.severity === 'pass' ? 1 : 0,
+ results: [result],
+ timestamp: result.timestamp,
+ }
+ return this.render(report, format)
+ }
+
+ private toText(report: SuiteReport): string {
+ const pct = (report.passRate * 100).toFixed(1)
+ const lines: string[] = [
+ `Measurement Validator Report — ${report.timestamp}`,
+ `${report.passed}/${report.total} passed (${pct}%) ` +
+ `${report.warnings} warnings ${report.errors} errors ${report.criticals} critical`,
+ '',
+ ]
+
+ for (const r of report.results) {
+ if (!this.includePassing && r.severity === 'pass') continue
+ const icon = severityIcon(r.severity)
+ const text = r.sample.text.slice(0, 60)
+ lines.push(
+ `${icon} [${r.severity.toUpperCase()}] "${text}"`,
+ ` font: ${r.sample.font} width: ${r.sample.maxWidth ?? 300}px lines: ${r.pretextLineCount}→${r.domLineCount}`,
+ ` height: Pretext ${r.pretextHeight}px DOM ${r.domHeight}px Δ${r.heightDelta >= 0 ? '+' : ''}${r.heightDelta.toFixed(1)}px (${r.heightPercentError.toFixed(1)}%)`,
+ )
+ if (r.causes.length > 0) {
+ lines.push(` causes: ${r.causes.join(', ')}`)
+ }
+ for (const line of r.lines) {
+ if (!this.includePassing && line.severity === 'exact') continue
+ lines.push(
+ ` line ${line.index}: Pretext ${line.pretextWidth.toFixed(1)}px DOM ${line.domWidth.toFixed(1)}px Δ${line.delta >= 0 ? '+' : ''}${line.delta.toFixed(1)}px [${line.severity}]`,
+ )
+ }
+ lines.push('')
+ }
+
+ return lines.join('\n')
+ }
+
+ private toJSON(report: SuiteReport): string {
+ return JSON.stringify(report, null, 2)
+ }
+
+ private toHTML(report: SuiteReport): string {
+ const pct = (report.passRate * 100).toFixed(1)
+ const rows = report.results
+ .filter(r => this.includePassing || r.severity !== 'pass')
+ .map(r => {
+ const icon = severityIcon(r.severity)
+ const text = escapeHTML(r.sample.text.slice(0, 80))
+ return `
+ | ${icon} ${text} |
+ ${escapeHTML(r.sample.font)} |
+ ${r.sample.maxWidth ?? 300}px |
+ Pretext ${r.pretextLineCount} / DOM ${r.domLineCount} |
+ Δ${r.heightDelta >= 0 ? '+' : ''}${r.heightDelta.toFixed(1)}px (${r.heightPercentError.toFixed(1)}%) |
+ ${r.causes.join(', ') || '—'} |
+
`
+ })
+ .join('\n')
+
+ return `
+
+
+
+ Pretext Measurement Validator Report
+
+
+
+ Pretext Measurement Validator
+
+ ✅ ${report.passed}/${report.total} passed (${pct}%)
+ ⚠️ ${report.warnings} warnings
+ ❌ ${report.errors} errors
+ 🔴 ${report.criticals} critical
+ ${report.timestamp}
+
+
+
+
+ | Text |
+ Font |
+ Width |
+ Lines (Pretext/DOM) |
+ Height Δ |
+ Causes |
+
+
+
+${rows}
+
+
+
+`
+ }
+}
+
+// ---------------------------------------------------------------------------
+// Internal helpers
+// ---------------------------------------------------------------------------
+
+function severityIcon(s: ResultSeverity): string {
+ switch (s) {
+ case 'pass':
+ return '✅'
+ case 'warning':
+ return '⚠️'
+ case 'error':
+ return '❌'
+ case 'critical':
+ return '🔴'
+ }
+}
+
+function escapeHTML(s: string): string {
+ return s.replace(/&/g, '&').replace(//g, '>').replace(/"/g, '"')
+}
+
+// ---------------------------------------------------------------------------
+// Convenience re-exports of Pretext primitives used by validators
+// ---------------------------------------------------------------------------
+export { clearCache, layout, layoutWithLines, prepare, prepareWithSegments, setLocale } from './layout.js'