diff --git a/.gitignore b/.gitignore
index 7428ea11..c33e80e8 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,5 +1,6 @@
# dependencies (bun install)
node_modules
+package-lock.json
# output
out
diff --git a/package.json b/package.json
index 31f0bdb5..25e25d88 100644
--- a/package.json
+++ b/package.json
@@ -65,6 +65,7 @@
"probe-check": "bun run scripts/probe-check.ts",
"probe-check:safari": "PROBE_CHECK_BROWSER=safari bun run scripts/probe-check.ts",
"status-dashboard": "bun run scripts/status-dashboard.ts",
+ "validator": "bun run scripts/validator.ts",
"site:build": "rm -rf site && bun run scripts/build-demo-site.ts",
"start": "HOST=${HOST:-127.0.0.1}; PORT=3000; pids=$(lsof -tiTCP:$PORT -sTCP:LISTEN 2>/dev/null); if [ -n \"$pids\" ]; then echo \"Freeing port $PORT: terminating $pids\"; kill $pids 2>/dev/null || true; sleep 1; pids=$(lsof -tiTCP:$PORT -sTCP:LISTEN 2>/dev/null); if [ -n \"$pids\" ]; then echo \"Port $PORT still busy: killing $pids\"; kill -9 $pids 2>/dev/null || true; fi; fi; bun pages/*.html pages/demos/*.html pages/demos/*/index.html --host=$HOST:$PORT",
"start:lan": "HOST=0.0.0.0 bun run start",
diff --git a/scripts/validator.ts b/scripts/validator.ts
new file mode 100644
index 00000000..4592f37d
--- /dev/null
+++ b/scripts/validator.ts
@@ -0,0 +1,83 @@
+#!/usr/bin/env bun
+// Measurement validator CLI.
+//
+// Usage:
+// bun run validator # run all built-in fixtures
+// bun run validator --language en # run only English fixtures
+// bun run validator --report csv # print CSV to stdout
+// bun run validator --report markdown # print Markdown to stdout
+// bun run validator --report html # print HTML to stdout
+// bun run validator --report json # print JSON to stdout
+// bun run validator --filter exact # show only matching severity rows
+//
+// Exit codes: 0 = all passed, 1 = at least one non-exact result
+
+import { compare } from '../src/measurement-validator/comparator.js'
+import {
+ buildReport,
+ printConsoleReport,
+ toCSV,
+ toHTML,
+ toJSON,
+ toMarkdown,
+} from '../src/measurement-validator/report-generator.js'
+import { fixtures } from '../src/measurement-validator/test-suite.js'
+import type { DivergenceSeverity } from '../src/measurement-validator/types.js'
+
+// ─── Arg parsing ─────────────────────────────────────────────────────────────
+
+const args = process.argv.slice(2)
+
+function getFlag(name: string): string | undefined {
+ const idx = args.indexOf(name)
+ return idx !== -1 ? args[idx + 1] : undefined
+}
+
+const language = getFlag('--language')
+const reportFormat = getFlag('--report') // csv | markdown | html | json
+const filterSeverity = getFlag('--filter') as DivergenceSeverity | undefined
+
+// ─── Collect samples ─────────────────────────────────────────────────────────
+
+const allFixtures = language !== undefined
+ ? (fixtures[language] ?? [])
+ : Object.values(fixtures).flat()
+
+if (allFixtures.length === 0) {
+ const available = Object.keys(fixtures).join(', ')
+ console.error(`No fixtures found for language "${language ?? ''}". Available: ${available}`)
+ process.exit(1)
+}
+
+// ─── Run comparisons ──────────────────────────────────────────────────────────
+
+const results = allFixtures.map(compare)
+
+// Apply optional severity filter
+const filtered = filterSeverity !== undefined
+ ? results.filter((r) => r.severity === filterSeverity)
+ : results
+
+const report = buildReport(filtered)
+
+// ─── Output ───────────────────────────────────────────────────────────────────
+
+switch (reportFormat) {
+ case 'csv':
+ process.stdout.write(toCSV(report))
+ break
+ case 'markdown':
+ process.stdout.write(toMarkdown(report))
+ break
+ case 'html':
+ process.stdout.write(toHTML(report))
+ break
+ case 'json':
+ process.stdout.write(toJSON(report) + '\n')
+ break
+ default:
+ printConsoleReport(report)
+}
+
+// Exit 1 if any failures
+process.exit(report.failed > 0 ? 1 : 0)
diff --git a/src/measurement-validator/comparator.ts b/src/measurement-validator/comparator.ts
new file mode 100644
index 00000000..2644b9da
--- /dev/null
+++ b/src/measurement-validator/comparator.ts
@@ -0,0 +1,83 @@
+// Comparator: compare Pretext layout height against a reference DOM height.
+//
+// In a browser environment the DOM reference height comes from creating a
+// temporary element and measuring its offsetHeight. Outside a browser (Node /
+// Bun unit tests) the DOM is unavailable, so domHeight is left as NaN and the
+// severity is always 'exact' — callers can detect the browser-less path via
+// isNaN(result.domHeight).
+
+import { layout, prepare, setLocale, type PrepareOptions } from '../layout.js'
+import type { ComparisonResult, DivergenceSeverity, MeasurementSample } from './types.js'
+
+function classifySeverity(diffPx: number): DivergenceSeverity {
+ if (diffPx <= 1) return 'exact'
+ if (diffPx <= 4) return 'minor'
+ if (diffPx <= 20) return 'major'
+ return 'critical'
+}
+
+/**
+ * Measure the reference DOM height for a sample.
+ *
+ * Creates a temporary `
` with the same font, width and text as the
+ * sample, appends it off-screen, reads `offsetHeight`, then removes it.
+ * Returns NaN when the DOM is not available (non-browser environments).
+ */
+function measureDomHeight(sample: MeasurementSample): number {
+ if (typeof document === 'undefined') return Number.NaN
+
+ const el = document.createElement('div')
+ el.style.cssText = [
+ 'position:absolute',
+ 'visibility:hidden',
+ 'pointer-events:none',
+ 'white-space:normal',
+ 'word-break:normal',
+ 'overflow-wrap:break-word',
+ `font:${sample.font}`,
+ `width:${sample.maxWidth}px`,
+ `line-height:${sample.lineHeight}px`,
+ ].join(';')
+ el.textContent = sample.text
+ document.body.appendChild(el)
+ const h = el.offsetHeight
+ document.body.removeChild(el)
+ return h
+}
+
+/**
+ * Compare Pretext layout height against a DOM reference height for one sample.
+ */
+export function compare(sample: MeasurementSample): ComparisonResult {
+ const start = performance.now()
+
+ const options: PrepareOptions = {}
+
+ // Apply locale if specified, then restore the default after measuring.
+ if (sample.language !== undefined) {
+ setLocale(sample.language)
+ }
+
+ const prepared = prepare(sample.text, sample.font, options)
+
+ if (sample.language !== undefined) {
+ setLocale(undefined)
+ }
+
+ const { height: pretextHeight } = layout(prepared, sample.maxWidth, sample.lineHeight)
+
+ const domHeight = measureDomHeight(sample)
+ const diffPx = Number.isNaN(domHeight) ? 0 : Math.abs(pretextHeight - domHeight)
+ const severity = classifySeverity(diffPx)
+
+ const executionTimeMs = performance.now() - start
+
+ return {
+ sample,
+ pretextHeight,
+ domHeight,
+ diffPx,
+ severity,
+ executionTimeMs,
+ }
+}
diff --git a/src/measurement-validator/index.ts b/src/measurement-validator/index.ts
new file mode 100644
index 00000000..db3e6cdd
--- /dev/null
+++ b/src/measurement-validator/index.ts
@@ -0,0 +1,29 @@
+// Public API for the measurement-validator module.
+//
+// Usage (in a browser or via the CLI):
+//
+// import { compare, buildReport, toCSV } from '@chenglou/pretext/measurement-validator'
+//
+// const results = fixtures.en.map(compare)
+// const report = buildReport(results)
+// console.log(toCSV(report))
+
+export type {
+ ComparisonResult,
+ DivergenceSeverity,
+ MeasurementSample,
+ ValidatorReport,
+} from './types.js'
+
+export { compare } from './comparator.js'
+
+export { fixtures, englishFixtures } from './test-suite.js'
+
+export {
+ buildReport,
+ printConsoleReport,
+ toCSV,
+ toHTML,
+ toJSON,
+ toMarkdown,
+} from './report-generator.js'
diff --git a/src/measurement-validator/report-generator.ts b/src/measurement-validator/report-generator.ts
new file mode 100644
index 00000000..70f4f226
--- /dev/null
+++ b/src/measurement-validator/report-generator.ts
@@ -0,0 +1,226 @@
+// Report generator: produce JSON, console, CSV, Markdown, and HTML output
+// from a set of ComparisonResults.
+
+import type { ComparisonResult, ValidatorReport } from './types.js'
+
+// ─── Build report ─────────────────────────────────────────────────────────────
+
+/** Build a ValidatorReport from a list of ComparisonResults. */
+export function buildReport(results: ComparisonResult[]): ValidatorReport {
+ const passed = results.filter((r) => r.severity === 'exact').length
+ return {
+ timestamp: new Date().toISOString(),
+ total: results.length,
+ passed,
+ failed: results.length - passed,
+ passRate: results.length === 0 ? 1 : passed / results.length,
+ results,
+ }
+}
+
+// ─── Console ──────────────────────────────────────────────────────────────────
+
+const SEVERITY_ICON: Record
= {
+ exact: '✅',
+ minor: '⚠️ ',
+ major: '❌',
+ critical: '🔴',
+}
+
+/** Print a human-readable summary to the console. */
+export function printConsoleReport(report: ValidatorReport): void {
+ const pct = (report.passRate * 100).toFixed(1)
+ console.log(`\nMeasurement Validator Report — ${report.timestamp}`)
+ console.log(`${'─'.repeat(60)}`)
+ console.log(` ${report.passed}/${report.total} passed (${pct}%)`)
+ console.log(`${'─'.repeat(60)}`)
+
+ for (const r of report.results) {
+ const icon = SEVERITY_ICON[r.severity] ?? '?'
+ const dom = Number.isNaN(r.domHeight) ? 'n/a (no DOM)' : `${r.domHeight}px`
+ const diff = Number.isNaN(r.domHeight) ? '' : ` diff=${r.diffPx.toFixed(1)}px`
+ console.log(` ${icon} [${r.sample.label}] pretext=${r.pretextHeight}px dom=${dom}${diff} (${r.executionTimeMs.toFixed(2)}ms)`)
+ }
+
+ console.log(`${'─'.repeat(60)}\n`)
+}
+
+// ─── JSON ─────────────────────────────────────────────────────────────────────
+
+/** Serialize the report as pretty-printed JSON. */
+export function toJSON(report: ValidatorReport): string {
+ return JSON.stringify(report, null, 2)
+}
+
+// ─── CSV ──────────────────────────────────────────────────────────────────────
+
+function csvEscape(value: string | number): string {
+ const s = String(value)
+ if (s.includes(',') || s.includes('"') || s.includes('\n')) {
+ return `"${s.replace(/"/g, '""')}"`
+ }
+ return s
+}
+
+/**
+ * Serialize the report as UTF-8 CSV (with BOM for Excel compatibility).
+ *
+ * Columns: label, text, font, maxWidth, lineHeight, pretextHeight, domHeight,
+ * diffPx, severity, executionTimeMs
+ */
+export function toCSV(report: ValidatorReport): string {
+ const HEADER = [
+ 'label',
+ 'text',
+ 'font',
+ 'maxWidth',
+ 'lineHeight',
+ 'pretextHeight',
+ 'domHeight',
+ 'diffPx',
+ 'severity',
+ 'executionTimeMs',
+ ]
+
+ const rows = report.results.map((r) =>
+ [
+ r.sample.label,
+ r.sample.text,
+ r.sample.font,
+ r.sample.maxWidth,
+ r.sample.lineHeight,
+ r.pretextHeight,
+ Number.isNaN(r.domHeight) ? '' : r.domHeight,
+ Number.isNaN(r.domHeight) ? '' : r.diffPx.toFixed(2),
+ r.severity,
+ r.executionTimeMs.toFixed(2),
+ ]
+ .map(csvEscape)
+ .join(','),
+ )
+
+ // UTF-8 BOM + header + rows
+ return '\uFEFF' + [HEADER.join(','), ...rows].join('\n') + '\n'
+}
+
+// ─── Markdown ─────────────────────────────────────────────────────────────────
+
+/**
+ * Serialize the report as GitHub-flavored Markdown with a summary and table.
+ */
+export function toMarkdown(report: ValidatorReport): string {
+ const pct = (report.passRate * 100).toFixed(1)
+ const statusLine =
+ report.failed === 0
+ ? `✅ **${report.passed}/${report.total} passed (${pct}%)**`
+ : `❌ **${report.passed}/${report.total} passed (${pct}%) — ${report.failed} failure(s)**`
+
+ const header = [
+ '## Measurement Validator Report',
+ '',
+ `Generated: ${report.timestamp}`,
+ '',
+ statusLine,
+ '',
+ '| Label | Pretext (px) | DOM (px) | Diff (px) | Severity | Time (ms) |',
+ '|-------|-------------|---------|----------|----------|-----------|',
+ ]
+
+ const rows = report.results.map((r) => {
+ const dom = Number.isNaN(r.domHeight) ? 'n/a' : String(r.domHeight)
+ const diff = Number.isNaN(r.domHeight) ? 'n/a' : r.diffPx.toFixed(2)
+ const icon = SEVERITY_ICON[r.severity] ?? '?'
+ return `| ${r.sample.label} | ${r.pretextHeight} | ${dom} | ${diff} | ${icon} ${r.severity} | ${r.executionTimeMs.toFixed(2)} |`
+ })
+
+ return [...header, ...rows, ''].join('\n')
+}
+
+// ─── HTML ─────────────────────────────────────────────────────────────────────
+
+function htmlEscape(s: string): string {
+ return s
+ .replace(/&/g, '&')
+ .replace(//g, '>')
+ .replace(/"/g, '"')
+}
+
+/**
+ * Serialize the report as a single-file HTML page with a summary and table.
+ * Intentionally minimal: no external dependencies, no JavaScript.
+ */
+export function toHTML(report: ValidatorReport): string {
+ const pct = (report.passRate * 100).toFixed(1)
+ const statusColor = report.failed === 0 ? '#2da44e' : '#cf222e'
+ const statusText =
+ report.failed === 0
+ ? `✅ ${report.passed}/${report.total} (${pct}%)`
+ : `❌ ${report.passed}/${report.total} (${pct}%) — ${report.failed} failure(s)`
+
+ const SEVERITY_COLOR: Record = {
+ exact: '#2da44e',
+ minor: '#bf8700',
+ major: '#cf222e',
+ critical: '#8c0000',
+ }
+
+ const rows = report.results
+ .map((r) => {
+ const dom = Number.isNaN(r.domHeight) ? 'n/a' : String(r.domHeight)
+ const diff = Number.isNaN(r.domHeight) ? 'n/a' : r.diffPx.toFixed(2)
+ const color = SEVERITY_COLOR[r.severity] ?? '#000'
+ const icon = SEVERITY_ICON[r.severity] ?? '?'
+ return [
+ '',
+ `| ${htmlEscape(r.sample.label)} | `,
+ `${r.pretextHeight} | `,
+ `${dom} | `,
+ `${diff} | `,
+ `${icon} ${r.severity} | `,
+ `${r.executionTimeMs.toFixed(2)} | `,
+ '
',
+ ].join('')
+ })
+ .join('\n')
+
+ return `
+
+
+
+
+Measurement Validator Report
+
+
+
+Measurement Validator Report
+${statusText}
+Generated: ${htmlEscape(report.timestamp)}
+
+
+
+ | Label |
+ Pretext (px) |
+ DOM (px) |
+ Diff (px) |
+ Severity |
+ Time (ms) |
+
+
+
+${rows}
+
+
+
+
+`
+}
diff --git a/src/measurement-validator/test-suite.ts b/src/measurement-validator/test-suite.ts
new file mode 100644
index 00000000..8c52bc86
--- /dev/null
+++ b/src/measurement-validator/test-suite.ts
@@ -0,0 +1,122 @@
+// Test fixtures covering a variety of English language patterns.
+// These are used by the validator CLI and unit tests.
+
+import type { MeasurementSample } from './types.js'
+
+const FONT = '16px sans-serif'
+const LINE_HEIGHT = 20
+const MAX_WIDTH = 320
+
+/** 15 durable English test fixtures. */
+export const englishFixtures: MeasurementSample[] = [
+ {
+ label: 'en-short-word',
+ text: 'Hello world',
+ font: FONT,
+ maxWidth: MAX_WIDTH,
+ lineHeight: LINE_HEIGHT,
+ },
+ {
+ label: 'en-long-sentence',
+ text: 'The quick brown fox jumps over the lazy dog near the river bank.',
+ font: FONT,
+ maxWidth: MAX_WIDTH,
+ lineHeight: LINE_HEIGHT,
+ },
+ {
+ label: 'en-narrow-width',
+ text: 'Typography matters in every application.',
+ font: FONT,
+ maxWidth: 120,
+ lineHeight: LINE_HEIGHT,
+ },
+ {
+ label: 'en-wide-width',
+ text: 'Wide containers rarely wrap short text like this.',
+ font: FONT,
+ maxWidth: 800,
+ lineHeight: LINE_HEIGHT,
+ },
+ {
+ label: 'en-multiple-sentences',
+ text: 'First sentence here. Second sentence follows. Third sentence ends the paragraph.',
+ font: FONT,
+ maxWidth: MAX_WIDTH,
+ lineHeight: LINE_HEIGHT,
+ },
+ {
+ label: 'en-long-word',
+ text: 'Supercalifragilisticexpialidocious is a famously long word.',
+ font: FONT,
+ maxWidth: MAX_WIDTH,
+ lineHeight: LINE_HEIGHT,
+ },
+ {
+ label: 'en-punctuation',
+ text: 'Wait — really? Yes, absolutely. (Or maybe not.)',
+ font: FONT,
+ maxWidth: MAX_WIDTH,
+ lineHeight: LINE_HEIGHT,
+ },
+ {
+ label: 'en-numbers',
+ text: 'Version 3.14 was released on 2024-01-15 at 09:30 AM.',
+ font: FONT,
+ maxWidth: MAX_WIDTH,
+ lineHeight: LINE_HEIGHT,
+ },
+ {
+ label: 'en-url',
+ text: 'Visit https://example.com/path?query=value for more details.',
+ font: FONT,
+ maxWidth: MAX_WIDTH,
+ lineHeight: LINE_HEIGHT,
+ },
+ {
+ label: 'en-caps',
+ text: 'ALL CAPS TEXT CAN APPEAR IN HEADINGS AND LABELS.',
+ font: FONT,
+ maxWidth: MAX_WIDTH,
+ lineHeight: LINE_HEIGHT,
+ },
+ {
+ label: 'en-mixed-case',
+ text: 'JavaScript, TypeScript, WebAssembly, and CSS are web technologies.',
+ font: FONT,
+ maxWidth: MAX_WIDTH,
+ lineHeight: LINE_HEIGHT,
+ },
+ {
+ label: 'en-emoji',
+ text: 'Great work today 🎉 Keep it up! 🚀',
+ font: FONT,
+ maxWidth: MAX_WIDTH,
+ lineHeight: LINE_HEIGHT,
+ },
+ {
+ label: 'en-small-font',
+ text: 'Smaller font sizes are common in secondary text and captions.',
+ font: '12px sans-serif',
+ maxWidth: MAX_WIDTH,
+ lineHeight: 16,
+ },
+ {
+ label: 'en-large-font',
+ text: 'Large display headings are typically short.',
+ font: '32px sans-serif',
+ maxWidth: MAX_WIDTH,
+ lineHeight: 40,
+ },
+ {
+ label: 'en-whitespace-runs',
+ text: 'Leading and trailing spaces and internal gaps.',
+ font: FONT,
+ maxWidth: MAX_WIDTH,
+ lineHeight: LINE_HEIGHT,
+ },
+]
+
+/** All built-in fixtures, indexed by language tag. */
+export const fixtures: Record = {
+ en: englishFixtures,
+}
diff --git a/src/measurement-validator/types.ts b/src/measurement-validator/types.ts
new file mode 100644
index 00000000..bf6b68ad
--- /dev/null
+++ b/src/measurement-validator/types.ts
@@ -0,0 +1,57 @@
+// Core types for the measurement validator.
+//
+// The validator compares Pretext's computed line heights against DOM reference
+// measurements. Each sample captures a single text + font + maxWidth combination;
+// results are collected into a report.
+
+/** A single text measurement input. */
+export type MeasurementSample = {
+ /** Human-readable label for diagnostics. */
+ label: string
+ /** The text string to measure. */
+ text: string
+ /** CSS font shorthand, e.g. "16px sans-serif". */
+ font: string
+ /** Container width in pixels. */
+ maxWidth: number
+ /** Line height in pixels used for Pretext layout. */
+ lineHeight: number
+ /** Optional language tag for Intl.Segmenter locale, e.g. "ar", "ja". */
+ language?: string | undefined
+}
+
+/** Severity of a measurement divergence. */
+export type DivergenceSeverity =
+ | 'exact' // diffPx <= 1px
+ | 'minor' // 1px < diffPx <= 4px
+ | 'major' // 4px < diffPx <= 20px
+ | 'critical' // diffPx > 20px
+
+/** Result of comparing Pretext height against a DOM reference height. */
+export type ComparisonResult = {
+ sample: MeasurementSample
+ /** Height predicted by Pretext (lineHeight × lineCount). */
+ pretextHeight: number
+ /** DOM reference height in pixels (NaN when DOM is unavailable). */
+ domHeight: number
+ /** Absolute pixel difference (|pretextHeight − domHeight|). */
+ diffPx: number
+ severity: DivergenceSeverity
+ /** Execution time for this comparison in milliseconds. */
+ executionTimeMs: number
+}
+
+/** Aggregate report produced after running a set of samples. */
+export type ValidatorReport = {
+ /** ISO timestamp of when the report was generated. */
+ timestamp: string
+ /** Total number of samples tested. */
+ total: number
+ /** Number of samples with severity === 'exact'. */
+ passed: number
+ /** Number of samples with severity !== 'exact'. */
+ failed: number
+ /** Overall pass rate 0–1. */
+ passRate: number
+ results: ComparisonResult[]
+}