+
+
+
+
+
+
DUAL FUSION
+
+
+
Enable your webcam for live video pose estimation.
+ Or switch to CSI Only mode for WiFi-based sensing.
+
+
+
+
+
+
+
+
+
+
◆ Fusion Confidence
+
+
+ Cross-modal: 0.000
+
+
+
+
+
+
◆ CSI Amplitude Heatmap
+
+
+
+
+
+
+
+
◆ Embedding Space (2D Projection)
+
+
+
+
+
+
+
+
+
+
+
◆ Controls
+
+
+
+
+
+
+
+ 0.30
+
+
+
+
◆ Live CSI Source
+
+
+
+
+
+
+
+
+
+
+
+
+ WiFi-DensePose · Dual-Modal Pose Estimation ·
+ Architecture: MobileNet-V3 × 2 → Attention Fusion → 17-Keypoint COCO
+
+
+
+
+
+
+
+
+
diff --git a/ui/pose-fusion/build.sh b/ui/pose-fusion/build.sh
new file mode 100644
index 00000000..4d76eba2
--- /dev/null
+++ b/ui/pose-fusion/build.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+# Build WASM packages for the dual-modal pose estimation demo.
+# Requires: wasm-pack (cargo install wasm-pack)
+#
+# Usage: ./build.sh
+#
+# Output: pkg/ruvector_cnn_wasm/ — WASM CNN embedder for browser
+
+set -e
+
+SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
+VENDOR_DIR="$SCRIPT_DIR/../../vendor/ruvector"
+OUT_DIR="$SCRIPT_DIR/pkg/ruvector_cnn_wasm"
+
+echo "Building ruvector-cnn-wasm..."
+wasm-pack build "$VENDOR_DIR/crates/ruvector-cnn-wasm" \
+ --target web \
+ --out-dir "$OUT_DIR" \
+ --no-typescript
+
+# Remove .gitignore so we can commit the build output for GitHub Pages
+rm -f "$OUT_DIR/.gitignore"
+
+echo ""
+echo "Build complete!"
+echo " WASM: $(du -sh "$OUT_DIR/ruvector_cnn_wasm_bg.wasm" | cut -f1)"
+echo " JS: $(du -sh "$OUT_DIR/ruvector_cnn_wasm.js" | cut -f1)"
+echo ""
+echo "Serve the demo: cd $SCRIPT_DIR/.. && python3 -m http.server 8080"
+echo "Open: http://localhost:8080/pose-fusion.html"
diff --git a/ui/pose-fusion/css/style.css b/ui/pose-fusion/css/style.css
new file mode 100644
index 00000000..1bf5dd89
--- /dev/null
+++ b/ui/pose-fusion/css/style.css
@@ -0,0 +1,405 @@
+/* WiFi-DensePose — Dual-Modal Pose Fusion Demo
+ Dark theme matching Observatory */
+
+@import url('https://fonts.googleapis.com/css2?family=Inter:wght@300;400;600;700&family=JetBrains+Mono:wght@400;600&display=swap');
+
+:root {
+ --bg-deep: #080c14;
+ --bg-panel: rgba(8, 16, 28, 0.92);
+ --bg-panel-border: rgba(0, 210, 120, 0.25);
+ --green-glow: #00d878;
+ --green-bright:#3eff8a;
+ --green-dim: #0a6b3a;
+ --amber: #ffb020;
+ --amber-dim: #a06800;
+ --blue-signal: #2090ff;
+ --blue-dim: #0a3060;
+ --red-alert: #ff3040;
+ --cyan: #00e5ff;
+ --text-primary: #e8ece0;
+ --text-secondary: rgba(232,236,224, 0.55);
+ --text-label: rgba(232,236,224, 0.35);
+ --radius: 8px;
+}
+
+* { margin: 0; padding: 0; box-sizing: border-box; }
+
+body {
+ background: var(--bg-deep);
+ font-family: 'Inter', -apple-system, sans-serif;
+ color: var(--text-primary);
+ -webkit-font-smoothing: antialiased;
+ overflow-x: hidden;
+ min-height: 100vh;
+}
+
+/* === Header === */
+.header {
+ display: flex;
+ align-items: center;
+ justify-content: space-between;
+ padding: 16px 24px;
+ border-bottom: 1px solid var(--bg-panel-border);
+ background: var(--bg-panel);
+ backdrop-filter: blur(12px);
+}
+
+.header-left {
+ display: flex;
+ align-items: center;
+ gap: 16px;
+}
+
+.logo {
+ font-weight: 700;
+ font-size: 24px;
+ color: var(--green-glow);
+}
+
+.logo .pi { font-style: normal; }
+
+.header-title {
+ font-size: 14px;
+ color: var(--text-secondary);
+ font-weight: 300;
+}
+
+.header-right {
+ display: flex;
+ align-items: center;
+ gap: 16px;
+}
+
+.mode-select {
+ background: rgba(0,210,120,0.1);
+ border: 1px solid var(--bg-panel-border);
+ color: var(--text-primary);
+ padding: 6px 12px;
+ border-radius: var(--radius);
+ font-family: inherit;
+ font-size: 13px;
+ cursor: pointer;
+}
+
+.mode-select option { background: #0c1420; }
+
+.status-badge {
+ display: flex;
+ align-items: center;
+ gap: 6px;
+ font-family: 'JetBrains Mono', monospace;
+ font-size: 12px;
+ padding: 4px 10px;
+ border-radius: 12px;
+ background: rgba(0,210,120,0.1);
+ border: 1px solid var(--bg-panel-border);
+}
+
+.status-dot {
+ width: 8px; height: 8px;
+ border-radius: 50%;
+ background: var(--green-glow);
+ box-shadow: 0 0 8px var(--green-glow);
+ animation: pulse-dot 2s ease infinite;
+}
+
+.status-dot.offline { background: #555; box-shadow: none; animation: none; }
+.status-dot.warning { background: var(--amber); box-shadow: 0 0 8px var(--amber); }
+
+@keyframes pulse-dot {
+ 0%, 100% { opacity: 1; }
+ 50% { opacity: 0.5; }
+}
+
+.fps-badge {
+ font-family: 'JetBrains Mono', monospace;
+ font-size: 12px;
+ color: var(--green-glow);
+}
+
+.back-link {
+ color: var(--text-secondary);
+ text-decoration: none;
+ font-size: 13px;
+ transition: color 0.2s;
+}
+.back-link:hover { color: var(--green-glow); }
+
+/* === Main Layout === */
+.main-grid {
+ display: grid;
+ grid-template-columns: 1fr 360px;
+ grid-template-rows: 1fr auto;
+ gap: 16px;
+ padding: 16px 24px;
+ height: calc(100vh - 72px);
+ overflow: hidden;
+}
+
+/* === Video Panel === */
+.video-panel {
+ position: relative;
+ background: #000;
+ border-radius: var(--radius);
+ border: 1px solid var(--bg-panel-border);
+ overflow: hidden;
+ min-height: 0;
+}
+
+.video-panel video {
+ width: 100%;
+ height: 100%;
+ object-fit: cover;
+ transform: scaleX(-1);
+}
+
+.video-panel canvas {
+ position: absolute;
+ top: 0; left: 0;
+ width: 100%;
+ height: 100%;
+ transform: scaleX(-1);
+}
+
+.video-overlay-label {
+ position: absolute;
+ top: 12px; left: 12px;
+ font-family: 'JetBrains Mono', monospace;
+ font-size: 11px;
+ padding: 4px 8px;
+ background: rgba(0,0,0,0.7);
+ border-radius: 4px;
+ color: var(--green-glow);
+ z-index: 5;
+ transform: scaleX(-1);
+}
+
+.camera-prompt {
+ position: absolute;
+ top: 50%; left: 50%;
+ transform: translate(-50%, -50%);
+ text-align: center;
+ color: var(--text-secondary);
+}
+
+.camera-prompt button {
+ margin-top: 12px;
+ padding: 10px 24px;
+ background: var(--green-glow);
+ color: #000;
+ border: none;
+ border-radius: var(--radius);
+ font-family: inherit;
+ font-weight: 600;
+ font-size: 14px;
+ cursor: pointer;
+ transition: background 0.2s;
+}
+
+.camera-prompt button:hover { background: var(--green-bright); }
+
+/* === Side Panels === */
+.side-panels {
+ display: flex;
+ flex-direction: column;
+ gap: 12px;
+ overflow-y: auto;
+ min-height: 0;
+}
+
+.panel {
+ background: var(--bg-panel);
+ border: 1px solid var(--bg-panel-border);
+ border-radius: var(--radius);
+ padding: 14px;
+}
+
+.panel-title {
+ font-size: 11px;
+ text-transform: uppercase;
+ letter-spacing: 1.2px;
+ color: var(--text-label);
+ margin-bottom: 10px;
+ display: flex;
+ align-items: center;
+ gap: 6px;
+}
+
+/* === CSI Heatmap === */
+.csi-canvas-wrapper {
+ position: relative;
+ border-radius: 4px;
+ overflow: hidden;
+ background: #000;
+}
+
+.csi-canvas-wrapper canvas {
+ width: 100%;
+ display: block;
+}
+
+/* === Fusion Bars === */
+.fusion-bars {
+ display: flex;
+ flex-direction: column;
+ gap: 8px;
+}
+
+.bar-row {
+ display: flex;
+ align-items: center;
+ gap: 8px;
+}
+
+.bar-label {
+ font-family: 'JetBrains Mono', monospace;
+ font-size: 11px;
+ color: var(--text-secondary);
+ width: 55px;
+ text-align: right;
+}
+
+.bar-track {
+ flex: 1;
+ height: 6px;
+ background: rgba(255,255,255,0.06);
+ border-radius: 3px;
+ overflow: hidden;
+}
+
+.bar-fill {
+ height: 100%;
+ border-radius: 3px;
+ transition: width 0.3s ease;
+}
+
+.bar-fill.video { background: var(--cyan); }
+.bar-fill.csi { background: var(--amber); }
+.bar-fill.fused { background: var(--green-glow); box-shadow: 0 0 8px var(--green-glow); }
+
+.bar-value {
+ font-family: 'JetBrains Mono', monospace;
+ font-size: 11px;
+ color: var(--text-primary);
+ width: 36px;
+}
+
+/* === Embedding Space === */
+.embedding-canvas-wrapper {
+ position: relative;
+ background: #000;
+ border-radius: 4px;
+ overflow: hidden;
+}
+.embedding-canvas-wrapper canvas {
+ width: 100%;
+ display: block;
+}
+
+/* === Latency Panel === */
+.latency-grid {
+ display: grid;
+ grid-template-columns: repeat(4, 1fr);
+ gap: 6px;
+}
+
+.latency-item {
+ text-align: center;
+ padding: 6px 0;
+}
+
+.latency-value {
+ font-family: 'JetBrains Mono', monospace;
+ font-size: 16px;
+ font-weight: 600;
+ color: var(--green-glow);
+}
+
+.latency-label {
+ font-size: 10px;
+ color: var(--text-label);
+ margin-top: 2px;
+}
+
+/* === Controls === */
+.controls-row {
+ display: flex;
+ gap: 8px;
+ flex-wrap: wrap;
+}
+
+.btn {
+ padding: 6px 14px;
+ border: 1px solid var(--bg-panel-border);
+ background: rgba(0,210,120,0.08);
+ color: var(--text-primary);
+ border-radius: var(--radius);
+ font-family: inherit;
+ font-size: 12px;
+ cursor: pointer;
+ transition: all 0.2s;
+}
+.btn:hover { background: rgba(0,210,120,0.2); }
+.btn.active { background: var(--green-glow); color: #000; font-weight: 600; }
+
+.slider-row {
+ display: flex;
+ align-items: center;
+ gap: 8px;
+ margin-top: 8px;
+}
+
+.slider-row label {
+ font-size: 11px;
+ color: var(--text-secondary);
+ white-space: nowrap;
+}
+
+.slider-row input[type=range] {
+ flex: 1;
+ accent-color: var(--green-glow);
+}
+
+.slider-row .slider-val {
+ font-family: 'JetBrains Mono', monospace;
+ font-size: 11px;
+ width: 32px;
+ color: var(--green-glow);
+}
+
+/* === Bottom Bar === */
+.bottom-bar {
+ grid-column: 1 / -1;
+ display: flex;
+ align-items: center;
+ justify-content: space-between;
+ padding: 10px 16px;
+ background: var(--bg-panel);
+ border: 1px solid var(--bg-panel-border);
+ border-radius: var(--radius);
+ font-family: 'JetBrains Mono', monospace;
+ font-size: 11px;
+ color: var(--text-secondary);
+}
+
+.bottom-bar a {
+ color: var(--green-glow);
+ text-decoration: none;
+}
+
+/* === Skeleton colors === */
+.skeleton-joint { fill: var(--green-glow); }
+.skeleton-limb { stroke: var(--green-bright); }
+.skeleton-joint-csi { fill: var(--amber); }
+.skeleton-limb-csi { stroke: var(--amber); }
+
+/* === Responsive === */
+@media (max-width: 900px) {
+ .main-grid {
+ grid-template-columns: 1fr;
+ height: auto;
+ overflow: auto;
+ }
+ .video-panel { aspect-ratio: 16/9; max-height: 50vh; }
+ .side-panels { max-height: none; overflow: visible; }
+}
diff --git a/ui/pose-fusion/js/canvas-renderer.js b/ui/pose-fusion/js/canvas-renderer.js
new file mode 100644
index 00000000..8ac169d9
--- /dev/null
+++ b/ui/pose-fusion/js/canvas-renderer.js
@@ -0,0 +1,247 @@
+/**
+ * CanvasRenderer — Renders skeleton overlay on video, CSI heatmap,
+ * embedding space visualization, and fusion confidence bars.
+ */
+
+import { SKELETON_CONNECTIONS } from './pose-decoder.js';
+
+export class CanvasRenderer {
+ constructor() {
+ this.colors = {
+ joint: '#00d878',
+ jointGlow: 'rgba(0, 216, 120, 0.4)',
+ limb: '#3eff8a',
+ limbGlow: 'rgba(62, 255, 138, 0.15)',
+ csiJoint: '#ffb020',
+ csiLimb: '#ffc850',
+ fused: '#00e5ff',
+ confidence: 'rgba(255,255,255,0.3)',
+ videoEmb: '#00e5ff',
+ csiEmb: '#ffb020',
+ fusedEmb: '#00d878',
+ };
+ }
+
+ /**
+ * Draw skeleton overlay on the video canvas
+ * @param {CanvasRenderingContext2D} ctx
+ * @param {Array<{x,y,confidence}>} keypoints - Normalized [0,1] coordinates
+ * @param {number} width - Canvas width
+ * @param {number} height - Canvas height
+ * @param {object} opts
+ */
+ drawSkeleton(ctx, keypoints, width, height, opts = {}) {
+ const minConf = opts.minConfidence || 0.3;
+ const color = opts.color || 'green';
+ const jointColor = color === 'amber' ? this.colors.csiJoint : this.colors.joint;
+ const limbColor = color === 'amber' ? this.colors.csiLimb : this.colors.limb;
+ const glowColor = color === 'amber' ? 'rgba(255,176,32,0.4)' : this.colors.jointGlow;
+
+ ctx.clearRect(0, 0, width, height);
+
+ if (!keypoints || keypoints.length === 0) return;
+
+ // Draw limbs first (behind joints)
+ ctx.lineWidth = 3;
+ ctx.lineCap = 'round';
+
+ for (const [i, j] of SKELETON_CONNECTIONS) {
+ const kpA = keypoints[i];
+ const kpB = keypoints[j];
+ if (!kpA || !kpB || kpA.confidence < minConf || kpB.confidence < minConf) continue;
+
+ const ax = kpA.x * width, ay = kpA.y * height;
+ const bx = kpB.x * width, by = kpB.y * height;
+ const avgConf = (kpA.confidence + kpB.confidence) / 2;
+
+ // Glow
+ ctx.strokeStyle = this.colors.limbGlow;
+ ctx.lineWidth = 8;
+ ctx.globalAlpha = avgConf * 0.4;
+ ctx.beginPath();
+ ctx.moveTo(ax, ay);
+ ctx.lineTo(bx, by);
+ ctx.stroke();
+
+ // Main line
+ ctx.strokeStyle = limbColor;
+ ctx.lineWidth = 2.5;
+ ctx.globalAlpha = avgConf;
+ ctx.beginPath();
+ ctx.moveTo(ax, ay);
+ ctx.lineTo(bx, by);
+ ctx.stroke();
+ }
+
+ // Draw joints
+ ctx.globalAlpha = 1;
+ for (const kp of keypoints) {
+ if (!kp || kp.confidence < minConf) continue;
+
+ const x = kp.x * width;
+ const y = kp.y * height;
+ const r = 3 + kp.confidence * 3;
+
+ // Glow
+ ctx.beginPath();
+ ctx.arc(x, y, r + 4, 0, Math.PI * 2);
+ ctx.fillStyle = glowColor;
+ ctx.globalAlpha = kp.confidence * 0.6;
+ ctx.fill();
+
+ // Joint dot
+ ctx.beginPath();
+ ctx.arc(x, y, r, 0, Math.PI * 2);
+ ctx.fillStyle = jointColor;
+ ctx.globalAlpha = kp.confidence;
+ ctx.fill();
+
+ // White center
+ ctx.beginPath();
+ ctx.arc(x, y, r * 0.4, 0, Math.PI * 2);
+ ctx.fillStyle = '#fff';
+ ctx.globalAlpha = kp.confidence * 0.8;
+ ctx.fill();
+ }
+
+ ctx.globalAlpha = 1;
+
+ // Confidence label
+ if (opts.label) {
+ ctx.font = '11px "JetBrains Mono", monospace';
+ ctx.fillStyle = jointColor;
+ ctx.globalAlpha = 0.8;
+ ctx.fillText(opts.label, 8, height - 8);
+ ctx.globalAlpha = 1;
+ }
+ }
+
+ /**
+ * Draw CSI amplitude heatmap
+ * @param {CanvasRenderingContext2D} ctx
+ * @param {{ data: Float32Array, width: number, height: number }} heatmap
+ * @param {number} canvasW
+ * @param {number} canvasH
+ */
+ drawCsiHeatmap(ctx, heatmap, canvasW, canvasH) {
+ ctx.clearRect(0, 0, canvasW, canvasH);
+
+ if (!heatmap || !heatmap.data || heatmap.height < 2) {
+ ctx.fillStyle = '#0a0e18';
+ ctx.fillRect(0, 0, canvasW, canvasH);
+ ctx.font = '11px "JetBrains Mono", monospace';
+ ctx.fillStyle = 'rgba(255,255,255,0.3)';
+ ctx.fillText('Waiting for CSI data...', 8, canvasH / 2);
+ return;
+ }
+
+ const { data, width: dw, height: dh } = heatmap;
+ const cellW = canvasW / dw;
+ const cellH = canvasH / dh;
+
+ for (let y = 0; y < dh; y++) {
+ for (let x = 0; x < dw; x++) {
+ const val = Math.min(1, Math.max(0, data[y * dw + x]));
+ ctx.fillStyle = this._heatmapColor(val);
+ ctx.fillRect(x * cellW, y * cellH, cellW + 0.5, cellH + 0.5);
+ }
+ }
+
+ // Axis labels
+ ctx.font = '9px "JetBrains Mono", monospace';
+ ctx.fillStyle = 'rgba(255,255,255,0.4)';
+ ctx.fillText('Subcarrier →', 4, canvasH - 4);
+ ctx.save();
+ ctx.translate(canvasW - 4, canvasH - 4);
+ ctx.rotate(-Math.PI / 2);
+ ctx.fillText('Time ↑', 0, 0);
+ ctx.restore();
+ }
+
+ /**
+ * Draw embedding space 2D projection
+ * @param {CanvasRenderingContext2D} ctx
+ * @param {{ video: Array, csi: Array, fused: Array }} points
+ * @param {number} w
+ * @param {number} h
+ */
+ drawEmbeddingSpace(ctx, points, w, h) {
+ ctx.fillStyle = '#050810';
+ ctx.fillRect(0, 0, w, h);
+
+ // Grid
+ ctx.strokeStyle = 'rgba(255,255,255,0.05)';
+ ctx.lineWidth = 0.5;
+ for (let i = 0; i <= 4; i++) {
+ const x = (i / 4) * w;
+ ctx.beginPath(); ctx.moveTo(x, 0); ctx.lineTo(x, h); ctx.stroke();
+ const y = (i / 4) * h;
+ ctx.beginPath(); ctx.moveTo(0, y); ctx.lineTo(w, y); ctx.stroke();
+ }
+
+ // Axes
+ ctx.strokeStyle = 'rgba(255,255,255,0.1)';
+ ctx.lineWidth = 1;
+ ctx.beginPath(); ctx.moveTo(w / 2, 0); ctx.lineTo(w / 2, h); ctx.stroke();
+ ctx.beginPath(); ctx.moveTo(0, h / 2); ctx.lineTo(w, h / 2); ctx.stroke();
+
+ const drawPoints = (pts, color, size) => {
+ if (!pts || pts.length === 0) return;
+ const len = pts.length;
+ for (let i = 0; i < len; i++) {
+ const p = pts[i];
+ if (!p) continue;
+ const age = 1 - (i / len) * 0.7; // Fade older points
+ const px = w / 2 + p[0] * w * 0.35;
+ const py = h / 2 + p[1] * h * 0.35;
+
+ if (px < 0 || px > w || py < 0 || py > h) continue;
+
+ ctx.beginPath();
+ ctx.arc(px, py, size, 0, Math.PI * 2);
+ ctx.fillStyle = color;
+ ctx.globalAlpha = age * 0.7;
+ ctx.fill();
+ }
+ };
+
+ drawPoints(points.video, this.colors.videoEmb, 3);
+ drawPoints(points.csi, this.colors.csiEmb, 3);
+ drawPoints(points.fused, this.colors.fusedEmb, 4);
+ ctx.globalAlpha = 1;
+
+ // Legend
+ ctx.font = '9px "JetBrains Mono", monospace';
+ const legends = [
+ { color: this.colors.videoEmb, label: 'Video' },
+ { color: this.colors.csiEmb, label: 'CSI' },
+ { color: this.colors.fusedEmb, label: 'Fused' },
+ ];
+ legends.forEach((l, i) => {
+ const ly = 12 + i * 14;
+ ctx.fillStyle = l.color;
+ ctx.beginPath();
+ ctx.arc(10, ly - 3, 3, 0, Math.PI * 2);
+ ctx.fill();
+ ctx.fillStyle = 'rgba(255,255,255,0.5)';
+ ctx.fillText(l.label, 18, ly);
+ });
+ }
+
+ _heatmapColor(val) {
+ // Dark blue → cyan → green → yellow → red
+ if (val < 0.25) {
+ const t = val / 0.25;
+ return `rgb(${Math.floor(t * 20)}, ${Math.floor(20 + t * 60)}, ${Math.floor(60 + t * 100)})`;
+ } else if (val < 0.5) {
+ const t = (val - 0.25) / 0.25;
+ return `rgb(${Math.floor(20 + t * 20)}, ${Math.floor(80 + t * 100)}, ${Math.floor(160 - t * 60)})`;
+ } else if (val < 0.75) {
+ const t = (val - 0.5) / 0.25;
+ return `rgb(${Math.floor(40 + t * 180)}, ${Math.floor(180 + t * 75)}, ${Math.floor(100 - t * 80)})`;
+ } else {
+ const t = (val - 0.75) / 0.25;
+ return `rgb(${Math.floor(220 + t * 35)}, ${Math.floor(255 - t * 120)}, ${Math.floor(20 - t * 20)})`;
+ }
+ }
+}
diff --git a/ui/pose-fusion/js/cnn-embedder.js b/ui/pose-fusion/js/cnn-embedder.js
new file mode 100644
index 00000000..5000b9d3
--- /dev/null
+++ b/ui/pose-fusion/js/cnn-embedder.js
@@ -0,0 +1,226 @@
+/**
+ * CNN Embedder — Lightweight MobileNet-V3-style feature extractor.
+ *
+ * Architecture mirrors ruvector-cnn: Conv2D → BatchNorm → ReLU → Pool → Project → L2 Normalize
+ * Uses pre-seeded random weights (deterministic). When ruvector-cnn-wasm is available,
+ * transparently delegates to the WASM implementation.
+ *
+ * Two instances are created: one for video frames, one for CSI pseudo-images.
+ */
+
+// Seeded PRNG for deterministic weight initialization
+function mulberry32(seed) {
+ return function() {
+ let t = (seed += 0x6D2B79F5);
+ t = Math.imul(t ^ (t >>> 15), t | 1);
+ t ^= t + Math.imul(t ^ (t >>> 7), t | 61);
+ return ((t ^ (t >>> 14)) >>> 0) / 4294967296;
+ };
+}
+
+export class CnnEmbedder {
+ /**
+ * @param {object} opts
+ * @param {number} opts.inputSize - Square input dimension (default 56 for speed)
+ * @param {number} opts.embeddingDim - Output embedding dimension (default 128)
+ * @param {boolean} opts.normalize - L2 normalize output
+ * @param {number} opts.seed - PRNG seed for weight init
+ */
+ constructor(opts = {}) {
+ this.inputSize = opts.inputSize || 56;
+ this.embeddingDim = opts.embeddingDim || 128;
+ this.normalize = opts.normalize !== false;
+ this.wasmEmbedder = null;
+
+ // Initialize weights with deterministic PRNG
+ const rng = mulberry32(opts.seed || 42);
+ const randRange = (lo, hi) => lo + rng() * (hi - lo);
+
+ // Conv 3x3: 3 input channels → 16 output channels
+ this.convWeights = new Float32Array(3 * 3 * 3 * 16);
+ for (let i = 0; i < this.convWeights.length; i++) {
+ this.convWeights[i] = randRange(-0.15, 0.15);
+ }
+
+ // BatchNorm params (16 channels)
+ this.bnGamma = new Float32Array(16).fill(1.0);
+ this.bnBeta = new Float32Array(16).fill(0.0);
+ this.bnMean = new Float32Array(16).fill(0.0);
+ this.bnVar = new Float32Array(16).fill(1.0);
+
+ // Projection: 16 → embeddingDim
+ this.projWeights = new Float32Array(16 * this.embeddingDim);
+ for (let i = 0; i < this.projWeights.length; i++) {
+ this.projWeights[i] = randRange(-0.1, 0.1);
+ }
+ }
+
+ /**
+ * Try to load WASM embedder from ruvector-cnn-wasm package
+ * @param {string} wasmPath - Path to the WASM package directory
+ */
+ async tryLoadWasm(wasmPath) {
+ try {
+ const mod = await import(`${wasmPath}/ruvector_cnn_wasm.js`);
+ await mod.default();
+ const config = new mod.EmbedderConfig();
+ config.input_size = this.inputSize;
+ config.embedding_dim = this.embeddingDim;
+ config.normalize = this.normalize;
+ this.wasmEmbedder = new mod.WasmCnnEmbedder(config);
+ console.log('[CNN] WASM embedder loaded successfully');
+ return true;
+ } catch (e) {
+ console.log('[CNN] WASM not available, using JS fallback:', e.message);
+ return false;
+ }
+ }
+
+ /**
+ * Extract embedding from RGB image data
+ * @param {Uint8Array} rgbData - RGB pixel data (H*W*3)
+ * @param {number} width
+ * @param {number} height
+ * @returns {Float32Array} embedding vector
+ */
+ extract(rgbData, width, height) {
+ if (this.wasmEmbedder) {
+ try {
+ const result = this.wasmEmbedder.extract(rgbData, width, height);
+ return new Float32Array(result);
+ } catch (_) { /* fallback to JS */ }
+ }
+ return this._extractJS(rgbData, width, height);
+ }
+
+ _extractJS(rgbData, width, height) {
+ // 1. Resize to inputSize × inputSize if needed
+ const sz = this.inputSize;
+ let input;
+ if (width === sz && height === sz) {
+ input = new Float32Array(rgbData.length);
+ for (let i = 0; i < rgbData.length; i++) input[i] = rgbData[i] / 255.0;
+ } else {
+ input = this._resize(rgbData, width, height, sz, sz);
+ }
+
+ // 2. ImageNet normalization
+ const mean = [0.485, 0.456, 0.406];
+ const std = [0.229, 0.224, 0.225];
+ const pixels = sz * sz;
+ for (let i = 0; i < pixels; i++) {
+ input[i * 3] = (input[i * 3] - mean[0]) / std[0];
+ input[i * 3 + 1] = (input[i * 3 + 1] - mean[1]) / std[1];
+ input[i * 3 + 2] = (input[i * 3 + 2] - mean[2]) / std[2];
+ }
+
+ // 3. Conv2D 3x3 (3 → 16 channels)
+ const convOut = this._conv2d3x3(input, sz, sz, 3, 16);
+
+ // 4. BatchNorm
+ this._batchNorm(convOut, 16);
+
+ // 5. ReLU
+ for (let i = 0; i < convOut.length; i++) {
+ if (convOut[i] < 0) convOut[i] = 0;
+ }
+
+ // 6. Global average pooling → 16-dim
+ const outH = sz - 2, outW = sz - 2;
+ const pooled = new Float32Array(16);
+ const spatial = outH * outW;
+ for (let i = 0; i < spatial; i++) {
+ for (let c = 0; c < 16; c++) {
+ pooled[c] += convOut[i * 16 + c];
+ }
+ }
+ for (let c = 0; c < 16; c++) pooled[c] /= spatial;
+
+ // 7. Linear projection → embeddingDim
+ const emb = new Float32Array(this.embeddingDim);
+ for (let o = 0; o < this.embeddingDim; o++) {
+ let sum = 0;
+ for (let i = 0; i < 16; i++) {
+ sum += pooled[i] * this.projWeights[i * this.embeddingDim + o];
+ }
+ emb[o] = sum;
+ }
+
+ // 8. L2 normalize
+ if (this.normalize) {
+ let norm = 0;
+ for (let i = 0; i < emb.length; i++) norm += emb[i] * emb[i];
+ norm = Math.sqrt(norm);
+ if (norm > 1e-8) {
+ for (let i = 0; i < emb.length; i++) emb[i] /= norm;
+ }
+ }
+
+ return emb;
+ }
+
+ _conv2d3x3(input, H, W, Cin, Cout) {
+ const outH = H - 2, outW = W - 2;
+ const output = new Float32Array(outH * outW * Cout);
+ for (let y = 0; y < outH; y++) {
+ for (let x = 0; x < outW; x++) {
+ for (let co = 0; co < Cout; co++) {
+ let sum = 0;
+ for (let ky = 0; ky < 3; ky++) {
+ for (let kx = 0; kx < 3; kx++) {
+ for (let ci = 0; ci < Cin; ci++) {
+ const px = ((y + ky) * W + (x + kx)) * Cin + ci;
+ const wt = (((ky * 3 + kx) * Cin) + ci) * Cout + co;
+ sum += input[px] * this.convWeights[wt];
+ }
+ }
+ }
+ output[(y * outW + x) * Cout + co] = sum;
+ }
+ }
+ }
+ return output;
+ }
+
+ _batchNorm(data, channels) {
+ const spatial = data.length / channels;
+ for (let i = 0; i < spatial; i++) {
+ for (let c = 0; c < channels; c++) {
+ const idx = i * channels + c;
+ data[idx] = this.bnGamma[c] * (data[idx] - this.bnMean[c]) / Math.sqrt(this.bnVar[c] + 1e-5) + this.bnBeta[c];
+ }
+ }
+ }
+
+ _resize(rgbData, srcW, srcH, dstW, dstH) {
+ const output = new Float32Array(dstW * dstH * 3);
+ const xRatio = srcW / dstW;
+ const yRatio = srcH / dstH;
+ for (let y = 0; y < dstH; y++) {
+ for (let x = 0; x < dstW; x++) {
+ const sx = Math.min(Math.floor(x * xRatio), srcW - 1);
+ const sy = Math.min(Math.floor(y * yRatio), srcH - 1);
+ const srcIdx = (sy * srcW + sx) * 3;
+ const dstIdx = (y * dstW + x) * 3;
+ output[dstIdx] = rgbData[srcIdx] / 255.0;
+ output[dstIdx + 1] = rgbData[srcIdx + 1] / 255.0;
+ output[dstIdx + 2] = rgbData[srcIdx + 2] / 255.0;
+ }
+ }
+ return output;
+ }
+
+ /** Cosine similarity between two embeddings */
+ static cosineSimilarity(a, b) {
+ let dot = 0, normA = 0, normB = 0;
+ for (let i = 0; i < a.length; i++) {
+ dot += a[i] * b[i];
+ normA += a[i] * a[i];
+ normB += b[i] * b[i];
+ }
+ normA = Math.sqrt(normA);
+ normB = Math.sqrt(normB);
+ if (normA < 1e-8 || normB < 1e-8) return 0;
+ return dot / (normA * normB);
+ }
+}
diff --git a/ui/pose-fusion/js/csi-simulator.js b/ui/pose-fusion/js/csi-simulator.js
new file mode 100644
index 00000000..62540995
--- /dev/null
+++ b/ui/pose-fusion/js/csi-simulator.js
@@ -0,0 +1,267 @@
+/**
+ * CSI Simulator — Generates realistic WiFi Channel State Information data.
+ *
+ * In live mode, connects to the sensing server via WebSocket.
+ * In demo mode, generates synthetic CSI that correlates with detected motion.
+ *
+ * Outputs: 3-channel pseudo-image (amplitude, phase, temporal diff)
+ * matching the ADR-018 frame format expectations.
+ */
+
+export class CsiSimulator {
+ constructor(opts = {}) {
+ this.subcarriers = opts.subcarriers || 52; // 802.11n HT20
+ this.timeWindow = opts.timeWindow || 56; // frames in sliding window
+ this.mode = 'demo'; // 'demo' | 'live'
+ this.ws = null;
+
+ // Circular buffer for CSI frames
+ this.amplitudeBuffer = [];
+ this.phaseBuffer = [];
+ this.frameCount = 0;
+
+ // Noise parameters
+ this._rng = this._mulberry32(opts.seed || 7);
+ this._noiseState = new Float32Array(this.subcarriers);
+ this._baseAmplitude = new Float32Array(this.subcarriers);
+ this._basePhase = new Float32Array(this.subcarriers);
+
+ // Initialize base CSI profile (empty room)
+ for (let i = 0; i < this.subcarriers; i++) {
+ this._baseAmplitude[i] = 0.5 + 0.3 * Math.sin(i * 0.12);
+ this._basePhase[i] = (i / this.subcarriers) * Math.PI * 2;
+ }
+
+ // Person influence (updated from video motion)
+ this.personPresence = 0;
+ this.personX = 0.5;
+ this.personY = 0.5;
+ this.personMotion = 0;
+ }
+
+ /**
+ * Connect to live sensing server WebSocket
+ * @param {string} url - WebSocket URL (e.g. ws://localhost:3030/ws/csi)
+ */
+ async connectLive(url) {
+ return new Promise((resolve) => {
+ try {
+ this.ws = new WebSocket(url);
+ this.ws.binaryType = 'arraybuffer';
+ this.ws.onmessage = (evt) => this._handleLiveFrame(evt.data);
+ this.ws.onopen = () => { this.mode = 'live'; resolve(true); };
+ this.ws.onerror = () => resolve(false);
+ this.ws.onclose = () => { this.mode = 'demo'; };
+ // Timeout after 3s
+ setTimeout(() => { if (this.mode !== 'live') resolve(false); }, 3000);
+ } catch {
+ resolve(false);
+ }
+ });
+ }
+
+ disconnect() {
+ if (this.ws) { this.ws.close(); this.ws = null; }
+ this.mode = 'demo';
+ }
+
+ get isLive() { return this.mode === 'live'; }
+
+ /**
+ * Update person state from video detection (for correlated demo data).
+ * When person exits frame, CSI maintains presence with slow decay
+ * (simulating through-wall sensing capability).
+ */
+ updatePersonState(presence, x, y, motion) {
+ if (presence > 0.1) {
+ // Person detected in video — update CSI state directly
+ this.personPresence = presence;
+ this.personX = x;
+ this.personY = y;
+ this.personMotion = motion;
+ this._lastSeenTime = performance.now();
+ this._lastSeenX = x;
+ this._lastSeenY = y;
+ } else if (this._lastSeenTime) {
+ // Person NOT in video — CSI "through-wall" persistence
+ const elapsed = (performance.now() - this._lastSeenTime) / 1000;
+ // CSI can sense through walls for ~10 seconds with decaying confidence
+ const decayRate = 0.15; // Lose ~15% per second
+ this.personPresence = Math.max(0, 1.0 - elapsed * decayRate);
+ // Position slowly drifts (person walking behind wall)
+ this.personX = this._lastSeenX;
+ this.personY = this._lastSeenY;
+ this.personMotion = Math.max(0, motion * 0.5 + this.personPresence * 0.2);
+
+ if (this.personPresence < 0.05) {
+ this._lastSeenTime = null;
+ }
+ } else {
+ this.personPresence = 0;
+ this.personMotion = 0;
+ }
+ }
+
+ /**
+ * Generate next CSI frame (demo mode) or return latest live frame
+ * @param {number} elapsed - Time in seconds
+ * @returns {{ amplitude: Float32Array, phase: Float32Array, snr: number }}
+ */
+ nextFrame(elapsed) {
+ const amp = new Float32Array(this.subcarriers);
+ const phase = new Float32Array(this.subcarriers);
+
+ if (this.mode === 'live' && this._liveAmplitude) {
+ amp.set(this._liveAmplitude);
+ phase.set(this._livePhase);
+ } else {
+ this._generateDemoFrame(amp, phase, elapsed);
+ }
+
+ // Push to circular buffer
+ this.amplitudeBuffer.push(new Float32Array(amp));
+ this.phaseBuffer.push(new Float32Array(phase));
+ if (this.amplitudeBuffer.length > this.timeWindow) {
+ this.amplitudeBuffer.shift();
+ this.phaseBuffer.shift();
+ }
+
+ // SNR estimate
+ let signalPower = 0, noisePower = 0;
+ for (let i = 0; i < this.subcarriers; i++) {
+ signalPower += amp[i] * amp[i];
+ noisePower += this._noiseState[i] * this._noiseState[i];
+ }
+ const snr = noisePower > 0 ? 10 * Math.log10(signalPower / noisePower) : 30;
+
+ this.frameCount++;
+ return { amplitude: amp, phase, snr: Math.max(0, Math.min(40, snr)) };
+ }
+
+ /**
+ * Build 3-channel pseudo-image for CNN input
+ * @param {number} targetSize - Output image dimension (square)
+ * @returns {Uint8Array} RGB data (targetSize * targetSize * 3)
+ */
+ buildPseudoImage(targetSize = 56) {
+ const buf = this.amplitudeBuffer;
+ const pBuf = this.phaseBuffer;
+ const frames = buf.length;
+ if (frames < 2) {
+ return new Uint8Array(targetSize * targetSize * 3);
+ }
+
+ const rgb = new Uint8Array(targetSize * targetSize * 3);
+
+ for (let y = 0; y < targetSize; y++) {
+ const fi = Math.min(Math.floor(y / targetSize * frames), frames - 1);
+ for (let x = 0; x < targetSize; x++) {
+ const si = Math.min(Math.floor(x / targetSize * this.subcarriers), this.subcarriers - 1);
+ const idx = (y * targetSize + x) * 3;
+
+ // R: Amplitude (normalized to 0-255)
+ const ampVal = buf[fi][si];
+ rgb[idx] = Math.min(255, Math.max(0, Math.floor(ampVal * 255)));
+
+ // G: Phase (wrapped to 0-255)
+ const phaseVal = (pBuf[fi][si] % (2 * Math.PI) + 2 * Math.PI) % (2 * Math.PI);
+ rgb[idx + 1] = Math.floor(phaseVal / (2 * Math.PI) * 255);
+
+ // B: Temporal difference
+ if (fi > 0) {
+ const diff = Math.abs(buf[fi][si] - buf[fi - 1][si]);
+ rgb[idx + 2] = Math.min(255, Math.floor(diff * 500));
+ }
+ }
+ }
+
+ return rgb;
+ }
+
+ /**
+ * Get heatmap data for visualization
+ * @returns {{ data: Float32Array, width: number, height: number }}
+ */
+ getHeatmapData() {
+ const frames = this.amplitudeBuffer.length;
+ const w = this.subcarriers;
+ const h = Math.min(frames, this.timeWindow);
+ const data = new Float32Array(w * h);
+ for (let y = 0; y < h; y++) {
+ const fi = frames - h + y;
+ if (fi >= 0 && fi < frames) {
+ for (let x = 0; x < w; x++) {
+ data[y * w + x] = this.amplitudeBuffer[fi][x];
+ }
+ }
+ }
+ return { data, width: w, height: h };
+ }
+
+ // === Private ===
+
+ _generateDemoFrame(amp, phase, elapsed) {
+ const rng = this._rng;
+ const presence = this.personPresence;
+ const motion = this.personMotion;
+ const px = this.personX;
+
+ for (let i = 0; i < this.subcarriers; i++) {
+ // Base CSI profile (frequency-selective channel)
+ let a = this._baseAmplitude[i];
+ let p = this._basePhase[i] + elapsed * 0.05;
+
+ // Environmental noise (correlated across subcarriers)
+ this._noiseState[i] = 0.95 * this._noiseState[i] + 0.05 * (rng() * 2 - 1) * 0.03;
+ a += this._noiseState[i];
+
+ // Person-induced CSI perturbation
+ if (presence > 0.1) {
+ // Subcarrier-dependent body reflection (Fresnel zone model)
+ const freqOffset = (i - this.subcarriers * px) / (this.subcarriers * 0.3);
+ const bodyReflection = presence * 0.25 * Math.exp(-freqOffset * freqOffset);
+
+ // Motion causes amplitude fluctuation
+ const motionEffect = motion * 0.15 * Math.sin(elapsed * 3.5 + i * 0.3);
+
+ // Breathing modulation (0.2-0.3 Hz)
+ const breathing = presence * 0.02 * Math.sin(elapsed * 1.5 + i * 0.05);
+
+ a += bodyReflection + motionEffect + breathing;
+ p += presence * 0.4 * Math.sin(elapsed * 2.1 + i * 0.15);
+ }
+
+ amp[i] = Math.max(0, Math.min(1, a));
+ phase[i] = p;
+ }
+ }
+
+ _handleLiveFrame(data) {
+ const view = new DataView(data);
+ // Check ADR-018 magic: 0xC5110001
+ if (data.byteLength < 20) return;
+ const magic = view.getUint32(0, true);
+ if (magic !== 0xC5110001) return;
+
+ const numSub = Math.min(view.getUint16(8, true), this.subcarriers);
+ this._liveAmplitude = new Float32Array(this.subcarriers);
+ this._livePhase = new Float32Array(this.subcarriers);
+
+ const headerSize = 20;
+ for (let i = 0; i < numSub && (headerSize + i * 4 + 3) < data.byteLength; i++) {
+ const real = view.getInt16(headerSize + i * 4, true);
+ const imag = view.getInt16(headerSize + i * 4 + 2, true);
+ this._liveAmplitude[i] = Math.sqrt(real * real + imag * imag) / 2048;
+ this._livePhase[i] = Math.atan2(imag, real);
+ }
+ }
+
+ _mulberry32(seed) {
+ return function() {
+ let t = (seed += 0x6D2B79F5);
+ t = Math.imul(t ^ (t >>> 15), t | 1);
+ t ^= t + Math.imul(t ^ (t >>> 7), t | 61);
+ return ((t ^ (t >>> 14)) >>> 0) / 4294967296;
+ };
+ }
+}
diff --git a/ui/pose-fusion/js/fusion-engine.js b/ui/pose-fusion/js/fusion-engine.js
new file mode 100644
index 00000000..8ded2e8a
--- /dev/null
+++ b/ui/pose-fusion/js/fusion-engine.js
@@ -0,0 +1,166 @@
+/**
+ * FusionEngine — Attention-weighted dual-modal embedding fusion.
+ *
+ * Combines visual (camera) and CSI (WiFi) embeddings with dynamic
+ * confidence gating based on signal quality.
+ */
+
+export class FusionEngine {
+ /**
+ * @param {number} embeddingDim
+ */
+ constructor(embeddingDim = 128) {
+ this.embeddingDim = embeddingDim;
+
+ // Learnable attention weights (initialized to balanced 0.5)
+ // In production, these would be loaded from trained JSON
+ this.attentionWeights = new Float32Array(embeddingDim).fill(0.5);
+
+ // Dynamic modality confidence [0, 1]
+ this.videoConfidence = 1.0;
+ this.csiConfidence = 0.0;
+ this.fusedConfidence = 0.5;
+
+ // Smoothing for confidence transitions
+ this._smoothAlpha = 0.85;
+
+ // Embedding history for visualization
+ this.recentVideoEmbeddings = [];
+ this.recentCsiEmbeddings = [];
+ this.recentFusedEmbeddings = [];
+ this.maxHistory = 50;
+ }
+
+ /**
+ * Update quality-based confidence scores
+ * @param {number} videoBrightness - [0,1] video brightness quality
+ * @param {number} videoMotion - [0,1] motion detected
+ * @param {number} csiSnr - CSI signal-to-noise ratio in dB
+ * @param {boolean} csiActive - Whether CSI source is connected
+ */
+ updateConfidence(videoBrightness, videoMotion, csiSnr, csiActive) {
+ // Video confidence: drops with low brightness, boosted by motion
+ let vc = 0;
+ if (videoBrightness > 0.05) {
+ vc = Math.min(1, videoBrightness * 1.5) * 0.7 + Math.min(1, videoMotion * 3) * 0.3;
+ }
+
+ // CSI confidence: based on SNR and connection status
+ let cc = 0;
+ if (csiActive) {
+ cc = Math.min(1, csiSnr / 25); // 25dB = full confidence
+ }
+
+ // Smooth transitions
+ this.videoConfidence = this._smoothAlpha * this.videoConfidence + (1 - this._smoothAlpha) * vc;
+ this.csiConfidence = this._smoothAlpha * this.csiConfidence + (1 - this._smoothAlpha) * cc;
+
+ // Fused confidence is the max of either (fusion can only help)
+ this.fusedConfidence = Math.min(1, Math.sqrt(
+ this.videoConfidence * this.videoConfidence + this.csiConfidence * this.csiConfidence
+ ));
+ }
+
+ /**
+ * Fuse video and CSI embeddings
+ * @param {Float32Array|null} videoEmb - Visual embedding (or null if video-off)
+ * @param {Float32Array|null} csiEmb - CSI embedding (or null if CSI-off)
+ * @param {string} mode - 'dual' | 'video' | 'csi'
+ * @returns {Float32Array} Fused embedding
+ */
+ fuse(videoEmb, csiEmb, mode = 'dual') {
+ const dim = this.embeddingDim;
+ const fused = new Float32Array(dim);
+
+ if (mode === 'video' || !csiEmb) {
+ if (videoEmb) fused.set(videoEmb);
+ this._recordEmbedding(videoEmb, null, fused);
+ return fused;
+ }
+
+ if (mode === 'csi' || !videoEmb) {
+ if (csiEmb) fused.set(csiEmb);
+ this._recordEmbedding(null, csiEmb, fused);
+ return fused;
+ }
+
+ // Dual mode: attention-weighted fusion with confidence gating
+ const totalConf = this.videoConfidence + this.csiConfidence;
+ const videoWeight = totalConf > 0 ? this.videoConfidence / totalConf : 0.5;
+
+ for (let i = 0; i < dim; i++) {
+ const alpha = this.attentionWeights[i] * videoWeight +
+ (1 - this.attentionWeights[i]) * (1 - videoWeight);
+ fused[i] = alpha * videoEmb[i] + (1 - alpha) * csiEmb[i];
+ }
+
+ // Re-normalize
+ let norm = 0;
+ for (let i = 0; i < dim; i++) norm += fused[i] * fused[i];
+ norm = Math.sqrt(norm);
+ if (norm > 1e-8) {
+ for (let i = 0; i < dim; i++) fused[i] /= norm;
+ }
+
+ this._recordEmbedding(videoEmb, csiEmb, fused);
+ return fused;
+ }
+
+ /**
+ * Get embedding pairs for 2D visualization (PCA projection)
+ * @returns {{ video: Array, csi: Array, fused: Array }}
+ */
+ getEmbeddingPoints() {
+ // Simple 2D projection using first two principal components (approximated)
+ const project = (emb) => {
+ if (!emb || emb.length < 4) return null;
+ // Use pairs of dimensions as crude 2D projection
+ let x = 0, y = 0;
+ for (let i = 0; i < emb.length; i += 2) {
+ x += emb[i] * (i % 4 < 2 ? 1 : -1);
+ if (i + 1 < emb.length) {
+ y += emb[i + 1] * (i % 4 < 2 ? 1 : -1);
+ }
+ }
+ return [x * 2, y * 2]; // Scale for visibility
+ };
+
+ return {
+ video: this.recentVideoEmbeddings.map(project).filter(Boolean),
+ csi: this.recentCsiEmbeddings.map(project).filter(Boolean),
+ fused: this.recentFusedEmbeddings.map(project).filter(Boolean)
+ };
+ }
+
+ /**
+ * Cross-modal similarity score
+ * @returns {number} Cosine similarity between latest video and CSI embeddings
+ */
+ getCrossModalSimilarity() {
+ const v = this.recentVideoEmbeddings[this.recentVideoEmbeddings.length - 1];
+ const c = this.recentCsiEmbeddings[this.recentCsiEmbeddings.length - 1];
+ if (!v || !c) return 0;
+
+ let dot = 0, na = 0, nb = 0;
+ for (let i = 0; i < v.length; i++) {
+ dot += v[i] * c[i];
+ na += v[i] * v[i];
+ nb += c[i] * c[i];
+ }
+ na = Math.sqrt(na); nb = Math.sqrt(nb);
+ return (na > 1e-8 && nb > 1e-8) ? dot / (na * nb) : 0;
+ }
+
+ _recordEmbedding(video, csi, fused) {
+ if (video) {
+ this.recentVideoEmbeddings.push(new Float32Array(video));
+ if (this.recentVideoEmbeddings.length > this.maxHistory) this.recentVideoEmbeddings.shift();
+ }
+ if (csi) {
+ this.recentCsiEmbeddings.push(new Float32Array(csi));
+ if (this.recentCsiEmbeddings.length > this.maxHistory) this.recentCsiEmbeddings.shift();
+ }
+ this.recentFusedEmbeddings.push(new Float32Array(fused));
+ if (this.recentFusedEmbeddings.length > this.maxHistory) this.recentFusedEmbeddings.shift();
+ }
+}
diff --git a/ui/pose-fusion/js/main.js b/ui/pose-fusion/js/main.js
new file mode 100644
index 00000000..db045922
--- /dev/null
+++ b/ui/pose-fusion/js/main.js
@@ -0,0 +1,315 @@
+/**
+ * WiFi-DensePose — Dual-Modal Pose Estimation Demo
+ *
+ * Main orchestration: video capture → CNN embedding → CSI processing → fusion → rendering
+ */
+
+import { VideoCapture } from './video-capture.js';
+import { CsiSimulator } from './csi-simulator.js';
+import { CnnEmbedder } from './cnn-embedder.js';
+import { FusionEngine } from './fusion-engine.js';
+import { PoseDecoder } from './pose-decoder.js';
+import { CanvasRenderer } from './canvas-renderer.js';
+
+// === State ===
+let mode = 'dual'; // 'dual' | 'video' | 'csi'
+let isRunning = false;
+let isPaused = false;
+let startTime = 0;
+let frameCount = 0;
+let fps = 0;
+let lastFpsTime = 0;
+let confidenceThreshold = 0.3;
+
+// Latency tracking
+const latency = { video: 0, csi: 0, fusion: 0, total: 0 };
+
+// === Components ===
+const videoCapture = new VideoCapture(document.getElementById('webcam'));
+const csiSimulator = new CsiSimulator({ subcarriers: 52, timeWindow: 56 });
+const visualCnn = new CnnEmbedder({ inputSize: 56, embeddingDim: 128, seed: 42 });
+const csiCnn = new CnnEmbedder({ inputSize: 56, embeddingDim: 128, seed: 137 });
+const fusionEngine = new FusionEngine(128);
+const poseDecoder = new PoseDecoder(128);
+const renderer = new CanvasRenderer();
+
+// === Canvas Elements ===
+const skeletonCanvas = document.getElementById('skeleton-canvas');
+const skeletonCtx = skeletonCanvas.getContext('2d');
+const csiCanvas = document.getElementById('csi-canvas');
+const csiCtx = csiCanvas.getContext('2d');
+const embeddingCanvas = document.getElementById('embedding-canvas');
+const embeddingCtx = embeddingCanvas.getContext('2d');
+
+// === UI Elements ===
+const modeSelect = document.getElementById('mode-select');
+const statusDot = document.getElementById('status-dot');
+const statusLabel = document.getElementById('status-label');
+const fpsDisplay = document.getElementById('fps-display');
+const cameraPrompt = document.getElementById('camera-prompt');
+const startCameraBtn = document.getElementById('start-camera-btn');
+const pauseBtn = document.getElementById('pause-btn');
+const confSlider = document.getElementById('confidence-slider');
+const confValue = document.getElementById('confidence-value');
+const wsUrlInput = document.getElementById('ws-url');
+const connectWsBtn = document.getElementById('connect-ws-btn');
+
+// Fusion bar elements
+const videoBar = document.getElementById('video-bar');
+const csiBar = document.getElementById('csi-bar');
+const fusedBar = document.getElementById('fused-bar');
+const videoBarVal = document.getElementById('video-bar-val');
+const csiBarVal = document.getElementById('csi-bar-val');
+const fusedBarVal = document.getElementById('fused-bar-val');
+
+// Latency elements
+const latVideoEl = document.getElementById('lat-video');
+const latCsiEl = document.getElementById('lat-csi');
+const latFusionEl = document.getElementById('lat-fusion');
+const latTotalEl = document.getElementById('lat-total');
+
+// Cross-modal similarity
+const crossModalEl = document.getElementById('cross-modal-sim');
+
+// === Initialize ===
+function init() {
+ resizeCanvases();
+ window.addEventListener('resize', resizeCanvases);
+
+ // Mode change
+ modeSelect.addEventListener('change', (e) => {
+ mode = e.target.value;
+ updateModeUI();
+ });
+
+ // Camera start
+ startCameraBtn.addEventListener('click', startCamera);
+
+ // Pause
+ pauseBtn.addEventListener('click', () => {
+ isPaused = !isPaused;
+ pauseBtn.textContent = isPaused ? '▶ Resume' : '⏸ Pause';
+ pauseBtn.classList.toggle('active', isPaused);
+ });
+
+ // Confidence slider
+ confSlider.addEventListener('input', (e) => {
+ confidenceThreshold = parseFloat(e.target.value);
+ confValue.textContent = confidenceThreshold.toFixed(2);
+ });
+
+ // WebSocket connect
+ connectWsBtn.addEventListener('click', async () => {
+ const url = wsUrlInput.value.trim();
+ if (!url) return;
+ connectWsBtn.textContent = 'Connecting...';
+ const ok = await csiSimulator.connectLive(url);
+ connectWsBtn.textContent = ok ? '✓ Connected' : 'Connect';
+ if (ok) {
+ connectWsBtn.classList.add('active');
+ }
+ });
+
+ // Try to load WASM embedders (non-blocking)
+ // Resolve relative to this JS module file (in pose-fusion/js/) → ../pkg/
+ const wasmBase = new URL('../pkg/ruvector_cnn_wasm', import.meta.url).href;
+ visualCnn.tryLoadWasm(wasmBase);
+ csiCnn.tryLoadWasm(wasmBase);
+
+ // Auto-connect to local sensing server WebSocket if available
+ const defaultWsUrl = 'ws://localhost:8765/ws/sensing';
+ if (wsUrlInput) wsUrlInput.value = defaultWsUrl;
+ csiSimulator.connectLive(defaultWsUrl).then(ok => {
+ if (ok && connectWsBtn) {
+ connectWsBtn.textContent = '✓ Live ESP32';
+ connectWsBtn.classList.add('active');
+ statusLabel.textContent = 'LIVE CSI';
+ statusDot.classList.remove('offline');
+ }
+ });
+
+ // Auto-start camera for video/dual modes
+ updateModeUI();
+ startTime = performance.now() / 1000;
+ isRunning = true;
+ requestAnimationFrame(mainLoop);
+}
+
+async function startCamera() {
+ cameraPrompt.style.display = 'none';
+ const ok = await videoCapture.start();
+ if (ok) {
+ statusDot.classList.remove('offline');
+ statusLabel.textContent = 'LIVE';
+ resizeCanvases();
+ } else {
+ cameraPrompt.style.display = 'flex';
+ cameraPrompt.querySelector('p').textContent = 'Camera access denied. Try CSI-only mode.';
+ }
+}
+
+function updateModeUI() {
+ const needsVideo = mode !== 'csi';
+ const needsCsi = mode !== 'video';
+
+ // Show/hide camera prompt
+ if (needsVideo && !videoCapture.isActive) {
+ cameraPrompt.style.display = 'flex';
+ } else {
+ cameraPrompt.style.display = 'none';
+ }
+}
+
+function resizeCanvases() {
+ const videoPanel = document.querySelector('.video-panel');
+ if (videoPanel) {
+ const rect = videoPanel.getBoundingClientRect();
+ skeletonCanvas.width = rect.width;
+ skeletonCanvas.height = rect.height;
+ }
+
+ // CSI canvas
+ csiCanvas.width = csiCanvas.parentElement.clientWidth;
+ csiCanvas.height = 120;
+
+ // Embedding canvas
+ embeddingCanvas.width = embeddingCanvas.parentElement.clientWidth;
+ embeddingCanvas.height = 140;
+}
+
+// === Main Loop ===
+function mainLoop(timestamp) {
+ if (!isRunning) return;
+ requestAnimationFrame(mainLoop);
+
+ if (isPaused) return;
+
+ const elapsed = performance.now() / 1000 - startTime;
+ const totalStart = performance.now();
+
+ // --- Video Pipeline ---
+ let videoEmb = null;
+ let motionRegion = null;
+ if (mode !== 'csi' && videoCapture.isActive) {
+ const t0 = performance.now();
+ const frame = videoCapture.captureFrame(56, 56);
+ if (frame) {
+ videoEmb = visualCnn.extract(frame.rgb, frame.width, frame.height);
+ motionRegion = videoCapture.detectMotionRegion(56, 56);
+
+ // Feed motion to CSI simulator for correlated demo data
+ // When detected=false, CSI simulator handles through-wall persistence
+ csiSimulator.updatePersonState(
+ motionRegion.detected ? 1.0 : 0,
+ motionRegion.detected ? motionRegion.x + motionRegion.w / 2 : 0.5,
+ motionRegion.detected ? motionRegion.y + motionRegion.h / 2 : 0.5,
+ frame.motion
+ );
+
+ fusionEngine.updateConfidence(
+ frame.brightness, frame.motion,
+ 0, csiSimulator.isLive || mode === 'dual'
+ );
+ }
+ latency.video = performance.now() - t0;
+ }
+
+ // --- CSI Pipeline ---
+ let csiEmb = null;
+ if (mode !== 'video') {
+ const t0 = performance.now();
+ const csiFrame = csiSimulator.nextFrame(elapsed);
+ const pseudoImage = csiSimulator.buildPseudoImage(56);
+ csiEmb = csiCnn.extract(pseudoImage, 56, 56);
+
+ fusionEngine.updateConfidence(
+ videoCapture.brightnessScore,
+ videoCapture.motionScore,
+ csiFrame.snr,
+ true
+ );
+
+ // Draw CSI heatmap
+ const heatmap = csiSimulator.getHeatmapData();
+ renderer.drawCsiHeatmap(csiCtx, heatmap, csiCanvas.width, csiCanvas.height);
+
+ latency.csi = performance.now() - t0;
+ }
+
+ // --- Fusion ---
+ const t0f = performance.now();
+ const fusedEmb = fusionEngine.fuse(videoEmb, csiEmb, mode);
+ latency.fusion = performance.now() - t0f;
+
+ // --- Pose Decode ---
+ // For CSI-only mode, generate a synthetic motion region from CSI energy
+ if (mode === 'csi' && (!motionRegion || !motionRegion.detected)) {
+ const csiPresence = csiSimulator.personPresence;
+ if (csiPresence > 0.1) {
+ motionRegion = {
+ detected: true,
+ x: 0.25, y: 0.15, w: 0.5, h: 0.7,
+ coverage: csiPresence,
+ motionGrid: null,
+ gridCols: 10,
+ gridRows: 8
+ };
+ }
+ }
+
+ // CSI state for through-wall tracking
+ const csiState = {
+ csiPresence: csiSimulator.personPresence,
+ isLive: csiSimulator.isLive
+ };
+
+ const keypoints = poseDecoder.decode(fusedEmb, motionRegion, elapsed, csiState);
+
+ // --- Render Skeleton ---
+ const labelMap = { dual: 'DUAL FUSION', video: 'VIDEO ONLY', csi: 'CSI ONLY' };
+ renderer.drawSkeleton(skeletonCtx, keypoints, skeletonCanvas.width, skeletonCanvas.height, {
+ minConfidence: confidenceThreshold,
+ color: mode === 'csi' ? 'amber' : 'green',
+ label: labelMap[mode]
+ });
+
+ // --- Render Embedding Space ---
+ const embPoints = fusionEngine.getEmbeddingPoints();
+ renderer.drawEmbeddingSpace(embeddingCtx, embPoints, embeddingCanvas.width, embeddingCanvas.height);
+
+ // --- Update UI ---
+ latency.total = performance.now() - totalStart;
+
+ // FPS
+ frameCount++;
+ if (timestamp - lastFpsTime > 500) {
+ fps = Math.round(frameCount * 1000 / (timestamp - lastFpsTime));
+ lastFpsTime = timestamp;
+ frameCount = 0;
+ fpsDisplay.textContent = `${fps} FPS`;
+ }
+
+ // Fusion bars
+ const vc = fusionEngine.videoConfidence;
+ const cc = fusionEngine.csiConfidence;
+ const fc = fusionEngine.fusedConfidence;
+ videoBar.style.width = `${vc * 100}%`;
+ csiBar.style.width = `${cc * 100}%`;
+ fusedBar.style.width = `${fc * 100}%`;
+ videoBarVal.textContent = `${Math.round(vc * 100)}%`;
+ csiBarVal.textContent = `${Math.round(cc * 100)}%`;
+ fusedBarVal.textContent = `${Math.round(fc * 100)}%`;
+
+ // Latency
+ latVideoEl.textContent = `${latency.video.toFixed(1)}ms`;
+ latCsiEl.textContent = `${latency.csi.toFixed(1)}ms`;
+ latFusionEl.textContent = `${latency.fusion.toFixed(1)}ms`;
+ latTotalEl.textContent = `${latency.total.toFixed(1)}ms`;
+
+ // Cross-modal similarity
+ const sim = fusionEngine.getCrossModalSimilarity();
+ crossModalEl.textContent = sim.toFixed(3);
+}
+
+// Boot
+document.addEventListener('DOMContentLoaded', init);
diff --git a/ui/pose-fusion/js/pose-decoder.js b/ui/pose-fusion/js/pose-decoder.js
new file mode 100644
index 00000000..d5b0203d
--- /dev/null
+++ b/ui/pose-fusion/js/pose-decoder.js
@@ -0,0 +1,373 @@
+/**
+ * PoseDecoder — Maps motion detection grid → 17 COCO keypoints.
+ *
+ * Uses per-cell motion intensity to track actual body part positions:
+ * - Head: top-center motion cluster
+ * - Shoulders/Elbows/Wrists: lateral motion in upper body zone
+ * - Hips/Knees/Ankles: lower body motion distribution
+ *
+ * When person exits frame, CSI data continues tracking (through-wall mode).
+ */
+
+// COCO keypoint definitions
+export const KEYPOINT_NAMES = [
+ 'nose', 'left_eye', 'right_eye', 'left_ear', 'right_ear',
+ 'left_shoulder', 'right_shoulder', 'left_elbow', 'right_elbow',
+ 'left_wrist', 'right_wrist', 'left_hip', 'right_hip',
+ 'left_knee', 'right_knee', 'left_ankle', 'right_ankle'
+];
+
+// Skeleton connections (pairs of keypoint indices)
+export const SKELETON_CONNECTIONS = [
+ [0, 1], [0, 2], [1, 3], [2, 4], // Head
+ [5, 6], // Shoulders
+ [5, 7], [7, 9], // Left arm
+ [6, 8], [8, 10], // Right arm
+ [5, 11], [6, 12], // Torso
+ [11, 12], // Hips
+ [11, 13], [13, 15], // Left leg
+ [12, 14], [14, 16], // Right leg
+];
+
+// Standard body proportions (relative to body height)
+const PROPORTIONS = {
+ headToShoulder: 0.15,
+ shoulderWidth: 0.25,
+ shoulderToElbow: 0.18,
+ elbowToWrist: 0.16,
+ shoulderToHip: 0.30,
+ hipWidth: 0.18,
+ hipToKnee: 0.24,
+ kneeToAnkle: 0.24,
+ eyeSpacing: 0.04,
+ earSpacing: 0.07,
+};
+
+export class PoseDecoder {
+ constructor(embeddingDim = 128) {
+ this.embeddingDim = embeddingDim;
+ this.smoothedKeypoints = null;
+ this.smoothingFactor = 0.45; // Lower = more responsive to movement
+ this._time = 0;
+
+ // Through-wall tracking state
+ this._lastBodyState = null;
+ this._ghostState = null;
+ this._ghostConfidence = 0;
+ this._ghostVelocity = { x: 0, y: 0 };
+
+ // Arm tracking history (smoothed positions)
+ this._leftArmY = 0.5;
+ this._rightArmY = 0.5;
+ this._leftArmX = 0;
+ this._rightArmX = 0;
+ this._headOffsetX = 0;
+ }
+
+ /**
+ * Decode motion data into 17 keypoints
+ * @param {Float32Array} embedding - Fused embedding vector
+ * @param {{ detected, x, y, w, h, motionGrid, gridCols, gridRows, motionCx, motionCy, exitDirection }} motionRegion
+ * @param {number} elapsed - Time in seconds
+ * @param {{ csiPresence: number }} csiState - CSI sensing state for through-wall
+ * @returns {Array<{x: number, y: number, confidence: number, name: string}>}
+ */
+ decode(embedding, motionRegion, elapsed, csiState = {}) {
+ this._time = elapsed;
+
+ const hasMotion = motionRegion && motionRegion.detected;
+ const hasCsi = csiState && csiState.csiPresence > 0.1;
+
+ if (hasMotion) {
+ // Active tracking from video motion grid
+ this._ghostConfidence = 0;
+ const rawKeypoints = this._trackFromMotionGrid(motionRegion, embedding, elapsed);
+ this._lastBodyState = { keypoints: rawKeypoints.map(kp => ({...kp})), time: elapsed };
+
+ // Track exit velocity
+ if (motionRegion.exitDirection) {
+ const speed = 0.008;
+ this._ghostVelocity = {
+ x: motionRegion.exitDirection === 'left' ? -speed : motionRegion.exitDirection === 'right' ? speed : 0,
+ y: motionRegion.exitDirection === 'up' ? -speed : motionRegion.exitDirection === 'down' ? speed : 0
+ };
+ }
+
+ // Apply temporal smoothing
+ if (this.smoothedKeypoints && this.smoothedKeypoints.length === rawKeypoints.length) {
+ const alpha = this.smoothingFactor;
+ for (let i = 0; i < rawKeypoints.length; i++) {
+ rawKeypoints[i].x = alpha * this.smoothedKeypoints[i].x + (1 - alpha) * rawKeypoints[i].x;
+ rawKeypoints[i].y = alpha * this.smoothedKeypoints[i].y + (1 - alpha) * rawKeypoints[i].y;
+ }
+ }
+
+ this.smoothedKeypoints = rawKeypoints;
+ return rawKeypoints;
+
+ } else if (this._lastBodyState && (hasCsi || this._ghostConfidence > 0.05)) {
+ // Through-wall mode: person left frame but CSI still senses them
+ return this._trackThroughWall(elapsed, csiState);
+
+ } else if (this.smoothedKeypoints) {
+ // Fade out
+ const faded = this.smoothedKeypoints.map(kp => ({
+ ...kp,
+ confidence: kp.confidence * 0.88
+ })).filter(kp => kp.confidence > 0.05);
+ if (faded.length === 0) this.smoothedKeypoints = null;
+ else this.smoothedKeypoints = faded;
+ return faded;
+ }
+
+ return [];
+ }
+
+ /**
+ * Track body parts from the motion grid.
+ * The grid tells us WHERE motion is happening → we map that to joint positions.
+ */
+ _trackFromMotionGrid(region, embedding, elapsed) {
+ const grid = region.motionGrid;
+ const cols = region.gridCols || 10;
+ const rows = region.gridRows || 8;
+
+ // Body bounding box
+ const cx = region.x + region.w / 2;
+ const cy = region.y + region.h / 2;
+ const bodyH = Math.max(region.h, 0.3);
+ const bodyW = Math.max(region.w, 0.15);
+
+ // Analyze the motion grid to find arm positions
+ // Divide body into zones: head (top 20%), arms (top 60% sides), torso (center), legs (bottom 40%)
+ if (grid) {
+ const armAnalysis = this._analyzeArmMotion(grid, cols, rows, region);
+ // Smooth arm tracking
+ this._leftArmY = 0.6 * this._leftArmY + 0.4 * armAnalysis.leftArmHeight;
+ this._rightArmY = 0.6 * this._rightArmY + 0.4 * armAnalysis.rightArmHeight;
+ this._leftArmX = 0.6 * this._leftArmX + 0.4 * armAnalysis.leftArmSpread;
+ this._rightArmX = 0.6 * this._rightArmX + 0.4 * armAnalysis.rightArmSpread;
+ this._headOffsetX = 0.7 * this._headOffsetX + 0.3 * armAnalysis.headOffsetX;
+ }
+
+ const P = PROPORTIONS;
+ const halfW = P.shoulderWidth * bodyH / 2;
+ const hipHalfW = P.hipWidth * bodyH / 2;
+
+ // Breathing (subtle)
+ const breathe = Math.sin(elapsed * 1.5) * 0.002;
+
+ // Core body positions from detection center
+ const hipY = cy + bodyH * 0.15;
+ const shoulderY = hipY - P.shoulderToHip * bodyH + breathe;
+ const headY = shoulderY - P.headToShoulder * bodyH;
+ const kneeY = hipY + P.hipToKnee * bodyH;
+ const ankleY = kneeY + P.kneeToAnkle * bodyH;
+
+ // HEAD follows motion centroid
+ const headX = cx + this._headOffsetX * bodyW * 0.3;
+
+ // ARM POSITIONS driven by motion grid analysis
+ // leftArmY: 0 = arm down at side, 1 = arm fully raised
+ // leftArmSpread: how far out the arm extends
+ const leftArmRaise = this._leftArmY; // 0-1
+ const rightArmRaise = this._rightArmY;
+ const leftSpread = 0.02 + this._leftArmX * 0.12;
+ const rightSpread = 0.02 + this._rightArmX * 0.12;
+
+ // Elbow: interpolate between "at side" and "raised"
+ const lElbowY = shoulderY + P.shoulderToElbow * bodyH * (1 - leftArmRaise * 0.9);
+ const rElbowY = shoulderY + P.shoulderToElbow * bodyH * (1 - rightArmRaise * 0.9);
+ const lElbowX = cx - halfW - leftSpread;
+ const rElbowX = cx + halfW + rightSpread;
+
+ // Wrist: extends further when raised
+ const lWristY = lElbowY + P.elbowToWrist * bodyH * (1 - leftArmRaise * 1.1);
+ const rWristY = rElbowY + P.elbowToWrist * bodyH * (1 - rightArmRaise * 1.1);
+ const lWristX = lElbowX - leftSpread * 0.6;
+ const rWristX = rElbowX + rightSpread * 0.6;
+
+ // Leg motion from lower grid cells
+ const legMotion = grid ? this._analyzeLegMotion(grid, cols, rows) : { left: 0, right: 0 };
+ const legSwing = 0.015;
+
+ const keypoints = [
+ // 0: nose
+ { x: headX, y: headY + 0.01, confidence: 0.92 },
+ // 1: left_eye
+ { x: headX - P.eyeSpacing * bodyH, y: headY - 0.005, confidence: 0.88 },
+ // 2: right_eye
+ { x: headX + P.eyeSpacing * bodyH, y: headY - 0.005, confidence: 0.88 },
+ // 3: left_ear
+ { x: headX - P.earSpacing * bodyH, y: headY + 0.005, confidence: 0.72 },
+ // 4: right_ear
+ { x: headX + P.earSpacing * bodyH, y: headY + 0.005, confidence: 0.72 },
+ // 5: left_shoulder
+ { x: cx - halfW, y: shoulderY, confidence: 0.94 },
+ // 6: right_shoulder
+ { x: cx + halfW, y: shoulderY, confidence: 0.94 },
+ // 7: left_elbow
+ { x: lElbowX, y: lElbowY, confidence: 0.87 },
+ // 8: right_elbow
+ { x: rElbowX, y: rElbowY, confidence: 0.87 },
+ // 9: left_wrist
+ { x: lWristX, y: lWristY, confidence: 0.82 },
+ // 10: right_wrist
+ { x: rWristX, y: rWristY, confidence: 0.82 },
+ // 11: left_hip
+ { x: cx - hipHalfW, y: hipY, confidence: 0.91 },
+ // 12: right_hip
+ { x: cx + hipHalfW, y: hipY, confidence: 0.91 },
+ // 13: left_knee
+ { x: cx - hipHalfW + legMotion.left * legSwing, y: kneeY, confidence: 0.88 },
+ // 14: right_knee
+ { x: cx + hipHalfW + legMotion.right * legSwing, y: kneeY, confidence: 0.88 },
+ // 15: left_ankle
+ { x: cx - hipHalfW + legMotion.left * legSwing * 1.3, y: ankleY, confidence: 0.83 },
+ // 16: right_ankle
+ { x: cx + hipHalfW + legMotion.right * legSwing * 1.3, y: ankleY, confidence: 0.83 },
+ ];
+
+ for (let i = 0; i < keypoints.length; i++) {
+ keypoints[i].name = KEYPOINT_NAMES[i];
+ }
+
+ return keypoints;
+ }
+
+ /**
+ * Analyze the motion grid to determine arm positions.
+ * Left side of grid = left side of body, etc.
+ */
+ _analyzeArmMotion(grid, cols, rows, region) {
+ // Body center column
+ const centerCol = Math.floor(cols / 2);
+
+ // Upper body rows (top 60% of detected region)
+ const upperEnd = Math.floor(rows * 0.6);
+
+ // Compute motion intensity for left vs right, at different heights
+ let leftUpperMotion = 0, leftMidMotion = 0;
+ let rightUpperMotion = 0, rightMidMotion = 0;
+ let leftCount = 0, rightCount = 0;
+ let headMotionX = 0, headMotionWeight = 0;
+
+ for (let r = 0; r < upperEnd; r++) {
+ const heightWeight = 1.0 - (r / upperEnd) * 0.3; // Upper rows weighted more
+
+ // Head zone: top 25%, center 40% of width
+ if (r < Math.floor(rows * 0.25)) {
+ const headLeft = Math.floor(cols * 0.3);
+ const headRight = Math.floor(cols * 0.7);
+ for (let c = headLeft; c <= headRight; c++) {
+ const val = grid[r][c];
+ headMotionX += (c / cols - 0.5) * val;
+ headMotionWeight += val;
+ }
+ }
+
+ // Left arm zone: left 40% of grid
+ for (let c = 0; c < Math.floor(cols * 0.4); c++) {
+ const val = grid[r][c];
+ if (r < rows * 0.3) leftUpperMotion += val * heightWeight;
+ else leftMidMotion += val * heightWeight;
+ leftCount++;
+ }
+
+ // Right arm zone: right 40% of grid
+ for (let c = Math.floor(cols * 0.6); c < cols; c++) {
+ const val = grid[r][c];
+ if (r < rows * 0.3) rightUpperMotion += val * heightWeight;
+ else rightMidMotion += val * heightWeight;
+ rightCount++;
+ }
+ }
+
+ // Normalize
+ const leftTotal = leftUpperMotion + leftMidMotion;
+ const rightTotal = rightUpperMotion + rightMidMotion;
+ const maxMotion = 0.15; // Calibration threshold
+
+ // Arm height: 0 = at side, 1 = raised
+ // High motion in upper-left → left arm is raised
+ const leftArmHeight = Math.min(1, (leftUpperMotion / maxMotion) * 2);
+ const rightArmHeight = Math.min(1, (rightUpperMotion / maxMotion) * 2);
+
+ // Arm spread: how far out from body
+ const leftArmSpread = Math.min(1, leftTotal / maxMotion);
+ const rightArmSpread = Math.min(1, rightTotal / maxMotion);
+
+ // Head offset
+ const headOffsetX = headMotionWeight > 0.01 ? headMotionX / headMotionWeight : 0;
+
+ return { leftArmHeight, rightArmHeight, leftArmSpread, rightArmSpread, headOffsetX };
+ }
+
+ /**
+ * Analyze lower grid for leg motion.
+ */
+ _analyzeLegMotion(grid, cols, rows) {
+ const lowerStart = Math.floor(rows * 0.6);
+ let leftMotion = 0, rightMotion = 0;
+
+ for (let r = lowerStart; r < rows; r++) {
+ for (let c = 0; c < Math.floor(cols / 2); c++) {
+ leftMotion += grid[r][c];
+ }
+ for (let c = Math.floor(cols / 2); c < cols; c++) {
+ rightMotion += grid[r][c];
+ }
+ }
+
+ // Return as -1 to 1 range (asymmetry indicates which leg is moving)
+ const total = leftMotion + rightMotion + 0.001;
+ return {
+ left: (leftMotion - rightMotion) / total,
+ right: (rightMotion - leftMotion) / total
+ };
+ }
+
+ /**
+ * Through-wall tracking: continue showing pose via CSI when person left video frame.
+ * The skeleton drifts in the exit direction with decreasing confidence.
+ */
+ _trackThroughWall(elapsed, csiState) {
+ if (!this._lastBodyState) return [];
+
+ const dt = elapsed - this._lastBodyState.time;
+ const csiPresence = csiState.csiPresence || 0;
+
+ // Initialize ghost on first call
+ if (this._ghostConfidence <= 0.05) {
+ this._ghostConfidence = 0.8;
+ this._ghostState = this._lastBodyState.keypoints.map(kp => ({...kp}));
+ }
+
+ // Ghost confidence decays, but CSI presence sustains it
+ const csiBoost = Math.min(0.7, csiPresence * 0.8);
+ this._ghostConfidence = Math.max(0.05, this._ghostConfidence * 0.995 - 0.001 + csiBoost * 0.002);
+
+ // Drift the ghost in exit direction
+ const vx = this._ghostVelocity.x;
+ const vy = this._ghostVelocity.y;
+
+ // Breathing continues via CSI
+ const breathe = Math.sin(elapsed * 1.5) * 0.003 * csiPresence;
+
+ const keypoints = this._ghostState.map((kp, i) => {
+ return {
+ x: kp.x + vx * dt * 0.3,
+ y: kp.y + vy * dt * 0.3 + (i >= 5 && i <= 6 ? breathe : 0),
+ confidence: kp.confidence * this._ghostConfidence * (0.5 + csiPresence * 0.5),
+ name: kp.name
+ };
+ });
+
+ // Slow down drift over time
+ this._ghostVelocity.x *= 0.998;
+ this._ghostVelocity.y *= 0.998;
+
+ this.smoothedKeypoints = keypoints;
+ return keypoints;
+ }
+}
diff --git a/ui/pose-fusion/js/video-capture.js b/ui/pose-fusion/js/video-capture.js
new file mode 100644
index 00000000..fe3ed333
--- /dev/null
+++ b/ui/pose-fusion/js/video-capture.js
@@ -0,0 +1,235 @@
+/**
+ * VideoCapture — getUserMedia webcam capture with frame extraction.
+ * Provides quality metrics (brightness, motion) for fusion confidence gating.
+ */
+
+export class VideoCapture {
+ constructor(videoElement) {
+ this.video = videoElement;
+ this.stream = null;
+ this.offscreen = document.createElement('canvas');
+ this.offCtx = this.offscreen.getContext('2d', { willReadFrequently: true });
+ this.prevFrame = null;
+ this.motionScore = 0;
+ this.brightnessScore = 0;
+ }
+
+ async start(constraints = {}) {
+ const defaultConstraints = {
+ video: {
+ width: { ideal: 640 },
+ height: { ideal: 480 },
+ facingMode: 'user',
+ frameRate: { ideal: 30 }
+ },
+ audio: false
+ };
+
+ try {
+ this.stream = await navigator.mediaDevices.getUserMedia(
+ Object.keys(constraints).length ? constraints : defaultConstraints
+ );
+ this.video.srcObject = this.stream;
+ await this.video.play();
+
+ this.offscreen.width = this.video.videoWidth;
+ this.offscreen.height = this.video.videoHeight;
+
+ return true;
+ } catch (err) {
+ console.error('[Video] Camera access failed:', err.message);
+ return false;
+ }
+ }
+
+ stop() {
+ if (this.stream) {
+ this.stream.getTracks().forEach(t => t.stop());
+ this.stream = null;
+ }
+ this.video.srcObject = null;
+ }
+
+ get isActive() {
+ return this.stream !== null && this.video.readyState >= 2;
+ }
+
+ get width() { return this.video.videoWidth || 640; }
+ get height() { return this.video.videoHeight || 480; }
+
+ /**
+ * Capture current frame as RGB Uint8Array + compute quality metrics.
+ * @param {number} targetW - Target width for CNN input
+ * @param {number} targetH - Target height for CNN input
+ * @returns {{ rgb: Uint8Array, width: number, height: number, motion: number, brightness: number }}
+ */
+ captureFrame(targetW = 56, targetH = 56) {
+ if (!this.isActive) return null;
+
+ // Draw to offscreen at target resolution
+ this.offscreen.width = targetW;
+ this.offscreen.height = targetH;
+ this.offCtx.drawImage(this.video, 0, 0, targetW, targetH);
+ const imageData = this.offCtx.getImageData(0, 0, targetW, targetH);
+ const rgba = imageData.data;
+
+ // Convert RGBA → RGB
+ const pixels = targetW * targetH;
+ const rgb = new Uint8Array(pixels * 3);
+ let brightnessSum = 0;
+ let motionSum = 0;
+
+ for (let i = 0; i < pixels; i++) {
+ const r = rgba[i * 4];
+ const g = rgba[i * 4 + 1];
+ const b = rgba[i * 4 + 2];
+ rgb[i * 3] = r;
+ rgb[i * 3 + 1] = g;
+ rgb[i * 3 + 2] = b;
+
+ // Luminance for brightness
+ const lum = 0.299 * r + 0.587 * g + 0.114 * b;
+ brightnessSum += lum;
+
+ // Motion: diff from previous frame
+ if (this.prevFrame) {
+ const pr = this.prevFrame[i * 3];
+ const pg = this.prevFrame[i * 3 + 1];
+ const pb = this.prevFrame[i * 3 + 2];
+ motionSum += Math.abs(r - pr) + Math.abs(g - pg) + Math.abs(b - pb);
+ }
+ }
+
+ this.brightnessScore = brightnessSum / (pixels * 255);
+ this.motionScore = this.prevFrame ? Math.min(1, motionSum / (pixels * 100)) : 0;
+ this.prevFrame = new Uint8Array(rgb);
+
+ return {
+ rgb,
+ width: targetW,
+ height: targetH,
+ motion: this.motionScore,
+ brightness: this.brightnessScore
+ };
+ }
+
+ /**
+ * Capture full-resolution RGBA for overlay rendering
+ * @returns {ImageData|null}
+ */
+ captureFullFrame() {
+ if (!this.isActive) return null;
+ this.offscreen.width = this.width;
+ this.offscreen.height = this.height;
+ this.offCtx.drawImage(this.video, 0, 0);
+ return this.offCtx.getImageData(0, 0, this.width, this.height);
+ }
+
+ /**
+ * Detect motion region + detailed motion grid for body-part tracking.
+ * Returns bounding box + a grid showing WHERE motion is concentrated.
+ * @returns {{ x, y, w, h, detected: boolean, motionGrid: number[][], gridCols: number, gridRows: number, exitDirection: string|null }}
+ */
+ detectMotionRegion(targetW = 56, targetH = 56) {
+ if (!this.isActive || !this.prevFrame) return { detected: false, motionGrid: null };
+
+ this.offscreen.width = targetW;
+ this.offscreen.height = targetH;
+ this.offCtx.drawImage(this.video, 0, 0, targetW, targetH);
+ const rgba = this.offCtx.getImageData(0, 0, targetW, targetH).data;
+
+ let minX = targetW, minY = targetH, maxX = 0, maxY = 0;
+ let motionPixels = 0;
+ const threshold = 25;
+
+ // Motion grid: divide frame into cells and track motion intensity per cell
+ const gridCols = 10;
+ const gridRows = 8;
+ const cellW = targetW / gridCols;
+ const cellH = targetH / gridRows;
+ const motionGrid = Array.from({ length: gridRows }, () => new Float32Array(gridCols));
+ const cellPixels = cellW * cellH;
+
+ // Also track motion centroid weighted by intensity
+ let motionCxSum = 0, motionCySum = 0, motionWeightSum = 0;
+
+ for (let y = 0; y < targetH; y++) {
+ for (let x = 0; x < targetW; x++) {
+ const i = y * targetW + x;
+ const r = rgba[i * 4], g = rgba[i * 4 + 1], b = rgba[i * 4 + 2];
+ const pr = this.prevFrame[i * 3], pg = this.prevFrame[i * 3 + 1], pb = this.prevFrame[i * 3 + 2];
+ const diff = Math.abs(r - pr) + Math.abs(g - pg) + Math.abs(b - pb);
+
+ if (diff > threshold * 3) {
+ motionPixels++;
+ if (x < minX) minX = x;
+ if (y < minY) minY = y;
+ if (x > maxX) maxX = x;
+ if (y > maxY) maxY = y;
+ }
+
+ // Accumulate per-cell motion intensity
+ const gc = Math.min(Math.floor(x / cellW), gridCols - 1);
+ const gr = Math.min(Math.floor(y / cellH), gridRows - 1);
+ const intensity = diff / (3 * 255); // Normalize 0-1
+ motionGrid[gr][gc] += intensity / cellPixels;
+
+ // Weighted centroid
+ if (diff > threshold) {
+ motionCxSum += x * diff;
+ motionCySum += y * diff;
+ motionWeightSum += diff;
+ }
+ }
+ }
+
+ const detected = motionPixels > (targetW * targetH * 0.02);
+
+ // Motion centroid (normalized 0-1)
+ const motionCx = motionWeightSum > 0 ? motionCxSum / (motionWeightSum * targetW) : 0.5;
+ const motionCy = motionWeightSum > 0 ? motionCySum / (motionWeightSum * targetH) : 0.5;
+
+ // Detect exit direction: if centroid is near edges
+ let exitDirection = null;
+ if (detected && motionCx < 0.1) exitDirection = 'left';
+ else if (detected && motionCx > 0.9) exitDirection = 'right';
+ else if (detected && motionCy < 0.1) exitDirection = 'up';
+ else if (detected && motionCy > 0.9) exitDirection = 'down';
+
+ // Track last known position for through-wall persistence
+ if (detected) {
+ this._lastDetected = {
+ x: minX / targetW,
+ y: minY / targetH,
+ w: (maxX - minX) / targetW,
+ h: (maxY - minY) / targetH,
+ cx: motionCx,
+ cy: motionCy,
+ exitDirection,
+ time: performance.now()
+ };
+ }
+
+ return {
+ detected,
+ x: minX / targetW,
+ y: minY / targetH,
+ w: (maxX - minX) / targetW,
+ h: (maxY - minY) / targetH,
+ coverage: motionPixels / (targetW * targetH),
+ motionGrid,
+ gridCols,
+ gridRows,
+ motionCx,
+ motionCy,
+ exitDirection
+ };
+ }
+
+ /**
+ * Get the last known detection info (for through-wall persistence)
+ */
+ get lastDetection() {
+ return this._lastDetected || null;
+ }
+}
diff --git a/ui/pose-fusion/pkg/ruvector_cnn_wasm/package.json b/ui/pose-fusion/pkg/ruvector_cnn_wasm/package.json
new file mode 100644
index 00000000..f1e17faf
--- /dev/null
+++ b/ui/pose-fusion/pkg/ruvector_cnn_wasm/package.json
@@ -0,0 +1,26 @@
+{
+ "name": "ruvector-cnn-wasm",
+ "type": "module",
+ "description": "WASM bindings for ruvector-cnn - CNN feature extraction for image embeddings",
+ "version": "0.1.0",
+ "license": "MIT OR Apache-2.0",
+ "repository": {
+ "type": "git",
+ "url": "https://github.com/ruvnet/ruvector"
+ },
+ "files": [
+ "ruvector_cnn_wasm_bg.wasm",
+ "ruvector_cnn_wasm.js"
+ ],
+ "main": "ruvector_cnn_wasm.js",
+ "sideEffects": [
+ "./snippets/*"
+ ],
+ "keywords": [
+ "cnn",
+ "embeddings",
+ "wasm",
+ "simd",
+ "machine-learning"
+ ]
+}
\ No newline at end of file
diff --git a/ui/pose-fusion/pkg/ruvector_cnn_wasm/ruvector_cnn_wasm.js b/ui/pose-fusion/pkg/ruvector_cnn_wasm/ruvector_cnn_wasm.js
new file mode 100644
index 00000000..f899cf7b
--- /dev/null
+++ b/ui/pose-fusion/pkg/ruvector_cnn_wasm/ruvector_cnn_wasm.js
@@ -0,0 +1,802 @@
+/**
+ * Configuration for CNN embedder
+ */
+export class EmbedderConfig {
+ __destroy_into_raw() {
+ const ptr = this.__wbg_ptr;
+ this.__wbg_ptr = 0;
+ EmbedderConfigFinalization.unregister(this);
+ return ptr;
+ }
+ free() {
+ const ptr = this.__destroy_into_raw();
+ wasm.__wbg_embedderconfig_free(ptr, 0);
+ }
+ constructor() {
+ const ret = wasm.embedderconfig_new();
+ this.__wbg_ptr = ret >>> 0;
+ EmbedderConfigFinalization.register(this, this.__wbg_ptr, this);
+ return this;
+ }
+ /**
+ * Output embedding dimension
+ * @returns {number}
+ */
+ get embedding_dim() {
+ const ret = wasm.__wbg_get_embedderconfig_embedding_dim(this.__wbg_ptr);
+ return ret >>> 0;
+ }
+ /**
+ * Input image size (square)
+ * @returns {number}
+ */
+ get input_size() {
+ const ret = wasm.__wbg_get_embedderconfig_input_size(this.__wbg_ptr);
+ return ret >>> 0;
+ }
+ /**
+ * Whether to L2 normalize embeddings
+ * @returns {boolean}
+ */
+ get normalize() {
+ const ret = wasm.__wbg_get_embedderconfig_normalize(this.__wbg_ptr);
+ return ret !== 0;
+ }
+ /**
+ * Output embedding dimension
+ * @param {number} arg0
+ */
+ set embedding_dim(arg0) {
+ wasm.__wbg_set_embedderconfig_embedding_dim(this.__wbg_ptr, arg0);
+ }
+ /**
+ * Input image size (square)
+ * @param {number} arg0
+ */
+ set input_size(arg0) {
+ wasm.__wbg_set_embedderconfig_input_size(this.__wbg_ptr, arg0);
+ }
+ /**
+ * Whether to L2 normalize embeddings
+ * @param {boolean} arg0
+ */
+ set normalize(arg0) {
+ wasm.__wbg_set_embedderconfig_normalize(this.__wbg_ptr, arg0);
+ }
+}
+if (Symbol.dispose) EmbedderConfig.prototype[Symbol.dispose] = EmbedderConfig.prototype.free;
+
+/**
+ * Layer operations for building custom networks
+ */
+export class LayerOps {
+ __destroy_into_raw() {
+ const ptr = this.__wbg_ptr;
+ this.__wbg_ptr = 0;
+ LayerOpsFinalization.unregister(this);
+ return ptr;
+ }
+ free() {
+ const ptr = this.__destroy_into_raw();
+ wasm.__wbg_layerops_free(ptr, 0);
+ }
+ /**
+ * Apply batch normalization (returns new array)
+ * @param {Float32Array} input
+ * @param {Float32Array} gamma
+ * @param {Float32Array} beta
+ * @param {Float32Array} mean
+ * @param {Float32Array} _var
+ * @param {number} epsilon
+ * @returns {Float32Array}
+ */
+ static batch_norm(input, gamma, beta, mean, _var, epsilon) {
+ try {
+ const retptr = wasm.__wbindgen_add_to_stack_pointer(-16);
+ const ptr0 = passArrayF32ToWasm0(input, wasm.__wbindgen_export2);
+ const len0 = WASM_VECTOR_LEN;
+ const ptr1 = passArrayF32ToWasm0(gamma, wasm.__wbindgen_export2);
+ const len1 = WASM_VECTOR_LEN;
+ const ptr2 = passArrayF32ToWasm0(beta, wasm.__wbindgen_export2);
+ const len2 = WASM_VECTOR_LEN;
+ const ptr3 = passArrayF32ToWasm0(mean, wasm.__wbindgen_export2);
+ const len3 = WASM_VECTOR_LEN;
+ const ptr4 = passArrayF32ToWasm0(_var, wasm.__wbindgen_export2);
+ const len4 = WASM_VECTOR_LEN;
+ wasm.layerops_batch_norm(retptr, ptr0, len0, ptr1, len1, ptr2, len2, ptr3, len3, ptr4, len4, epsilon);
+ var r0 = getDataViewMemory0().getInt32(retptr + 4 * 0, true);
+ var r1 = getDataViewMemory0().getInt32(retptr + 4 * 1, true);
+ var v6 = getArrayF32FromWasm0(r0, r1).slice();
+ wasm.__wbindgen_export(r0, r1 * 4, 4);
+ return v6;
+ } finally {
+ wasm.__wbindgen_add_to_stack_pointer(16);
+ }
+ }
+ /**
+ * Apply global average pooling
+ * Returns one value per channel
+ * @param {Float32Array} input
+ * @param {number} height
+ * @param {number} width
+ * @param {number} channels
+ * @returns {Float32Array}
+ */
+ static global_avg_pool(input, height, width, channels) {
+ try {
+ const retptr = wasm.__wbindgen_add_to_stack_pointer(-16);
+ const ptr0 = passArrayF32ToWasm0(input, wasm.__wbindgen_export2);
+ const len0 = WASM_VECTOR_LEN;
+ wasm.layerops_global_avg_pool(retptr, ptr0, len0, height, width, channels);
+ var r0 = getDataViewMemory0().getInt32(retptr + 4 * 0, true);
+ var r1 = getDataViewMemory0().getInt32(retptr + 4 * 1, true);
+ var v2 = getArrayF32FromWasm0(r0, r1).slice();
+ wasm.__wbindgen_export(r0, r1 * 4, 4);
+ return v2;
+ } finally {
+ wasm.__wbindgen_add_to_stack_pointer(16);
+ }
+ }
+}
+if (Symbol.dispose) LayerOps.prototype[Symbol.dispose] = LayerOps.prototype.free;
+
+/**
+ * SIMD-optimized operations
+ */
+export class SimdOps {
+ __destroy_into_raw() {
+ const ptr = this.__wbg_ptr;
+ this.__wbg_ptr = 0;
+ SimdOpsFinalization.unregister(this);
+ return ptr;
+ }
+ free() {
+ const ptr = this.__destroy_into_raw();
+ wasm.__wbg_simdops_free(ptr, 0);
+ }
+ /**
+ * Dot product of two vectors
+ * @param {Float32Array} a
+ * @param {Float32Array} b
+ * @returns {number}
+ */
+ static dot_product(a, b) {
+ const ptr0 = passArrayF32ToWasm0(a, wasm.__wbindgen_export2);
+ const len0 = WASM_VECTOR_LEN;
+ const ptr1 = passArrayF32ToWasm0(b, wasm.__wbindgen_export2);
+ const len1 = WASM_VECTOR_LEN;
+ const ret = wasm.simdops_dot_product(ptr0, len0, ptr1, len1);
+ return ret;
+ }
+ /**
+ * L2 normalize a vector (returns new array)
+ * @param {Float32Array} data
+ * @returns {Float32Array}
+ */
+ static l2_normalize(data) {
+ try {
+ const retptr = wasm.__wbindgen_add_to_stack_pointer(-16);
+ const ptr0 = passArrayF32ToWasm0(data, wasm.__wbindgen_export2);
+ const len0 = WASM_VECTOR_LEN;
+ wasm.simdops_l2_normalize(retptr, ptr0, len0);
+ var r0 = getDataViewMemory0().getInt32(retptr + 4 * 0, true);
+ var r1 = getDataViewMemory0().getInt32(retptr + 4 * 1, true);
+ var v2 = getArrayF32FromWasm0(r0, r1).slice();
+ wasm.__wbindgen_export(r0, r1 * 4, 4);
+ return v2;
+ } finally {
+ wasm.__wbindgen_add_to_stack_pointer(16);
+ }
+ }
+ /**
+ * ReLU activation (returns new array)
+ * @param {Float32Array} data
+ * @returns {Float32Array}
+ */
+ static relu(data) {
+ try {
+ const retptr = wasm.__wbindgen_add_to_stack_pointer(-16);
+ const ptr0 = passArrayF32ToWasm0(data, wasm.__wbindgen_export2);
+ const len0 = WASM_VECTOR_LEN;
+ wasm.simdops_relu(retptr, ptr0, len0);
+ var r0 = getDataViewMemory0().getInt32(retptr + 4 * 0, true);
+ var r1 = getDataViewMemory0().getInt32(retptr + 4 * 1, true);
+ var v2 = getArrayF32FromWasm0(r0, r1).slice();
+ wasm.__wbindgen_export(r0, r1 * 4, 4);
+ return v2;
+ } finally {
+ wasm.__wbindgen_add_to_stack_pointer(16);
+ }
+ }
+ /**
+ * ReLU6 activation (returns new array)
+ * @param {Float32Array} data
+ * @returns {Float32Array}
+ */
+ static relu6(data) {
+ try {
+ const retptr = wasm.__wbindgen_add_to_stack_pointer(-16);
+ const ptr0 = passArrayF32ToWasm0(data, wasm.__wbindgen_export2);
+ const len0 = WASM_VECTOR_LEN;
+ wasm.simdops_relu6(retptr, ptr0, len0);
+ var r0 = getDataViewMemory0().getInt32(retptr + 4 * 0, true);
+ var r1 = getDataViewMemory0().getInt32(retptr + 4 * 1, true);
+ var v2 = getArrayF32FromWasm0(r0, r1).slice();
+ wasm.__wbindgen_export(r0, r1 * 4, 4);
+ return v2;
+ } finally {
+ wasm.__wbindgen_add_to_stack_pointer(16);
+ }
+ }
+}
+if (Symbol.dispose) SimdOps.prototype[Symbol.dispose] = SimdOps.prototype.free;
+
+/**
+ * WASM CNN Embedder for image feature extraction
+ */
+export class WasmCnnEmbedder {
+ __destroy_into_raw() {
+ const ptr = this.__wbg_ptr;
+ this.__wbg_ptr = 0;
+ WasmCnnEmbedderFinalization.unregister(this);
+ return ptr;
+ }
+ free() {
+ const ptr = this.__destroy_into_raw();
+ wasm.__wbg_wasmcnnembedder_free(ptr, 0);
+ }
+ /**
+ * Compute cosine similarity between two embeddings
+ * @param {Float32Array} a
+ * @param {Float32Array} b
+ * @returns {number}
+ */
+ cosine_similarity(a, b) {
+ try {
+ const retptr = wasm.__wbindgen_add_to_stack_pointer(-16);
+ const ptr0 = passArrayF32ToWasm0(a, wasm.__wbindgen_export2);
+ const len0 = WASM_VECTOR_LEN;
+ const ptr1 = passArrayF32ToWasm0(b, wasm.__wbindgen_export2);
+ const len1 = WASM_VECTOR_LEN;
+ wasm.wasmcnnembedder_cosine_similarity(retptr, this.__wbg_ptr, ptr0, len0, ptr1, len1);
+ var r0 = getDataViewMemory0().getFloat32(retptr + 4 * 0, true);
+ var r1 = getDataViewMemory0().getInt32(retptr + 4 * 1, true);
+ var r2 = getDataViewMemory0().getInt32(retptr + 4 * 2, true);
+ if (r2) {
+ throw takeObject(r1);
+ }
+ return r0;
+ } finally {
+ wasm.__wbindgen_add_to_stack_pointer(16);
+ }
+ }
+ /**
+ * Get the embedding dimension
+ * @returns {number}
+ */
+ get embedding_dim() {
+ const ret = wasm.wasmcnnembedder_embedding_dim(this.__wbg_ptr);
+ return ret >>> 0;
+ }
+ /**
+ * Extract embedding from image data (RGB format, row-major)
+ * @param {Uint8Array} image_data
+ * @param {number} width
+ * @param {number} height
+ * @returns {Float32Array}
+ */
+ extract(image_data, width, height) {
+ try {
+ const retptr = wasm.__wbindgen_add_to_stack_pointer(-16);
+ const ptr0 = passArray8ToWasm0(image_data, wasm.__wbindgen_export2);
+ const len0 = WASM_VECTOR_LEN;
+ wasm.wasmcnnembedder_extract(retptr, this.__wbg_ptr, ptr0, len0, width, height);
+ var r0 = getDataViewMemory0().getInt32(retptr + 4 * 0, true);
+ var r1 = getDataViewMemory0().getInt32(retptr + 4 * 1, true);
+ var r2 = getDataViewMemory0().getInt32(retptr + 4 * 2, true);
+ var r3 = getDataViewMemory0().getInt32(retptr + 4 * 3, true);
+ if (r3) {
+ throw takeObject(r2);
+ }
+ var v2 = getArrayF32FromWasm0(r0, r1).slice();
+ wasm.__wbindgen_export(r0, r1 * 4, 4);
+ return v2;
+ } finally {
+ wasm.__wbindgen_add_to_stack_pointer(16);
+ }
+ }
+ /**
+ * Create a new CNN embedder
+ * @param {EmbedderConfig | null} [config]
+ */
+ constructor(config) {
+ try {
+ const retptr = wasm.__wbindgen_add_to_stack_pointer(-16);
+ let ptr0 = 0;
+ if (!isLikeNone(config)) {
+ _assertClass(config, EmbedderConfig);
+ ptr0 = config.__destroy_into_raw();
+ }
+ wasm.wasmcnnembedder_new(retptr, ptr0);
+ var r0 = getDataViewMemory0().getInt32(retptr + 4 * 0, true);
+ var r1 = getDataViewMemory0().getInt32(retptr + 4 * 1, true);
+ var r2 = getDataViewMemory0().getInt32(retptr + 4 * 2, true);
+ if (r2) {
+ throw takeObject(r1);
+ }
+ this.__wbg_ptr = r0 >>> 0;
+ WasmCnnEmbedderFinalization.register(this, this.__wbg_ptr, this);
+ return this;
+ } finally {
+ wasm.__wbindgen_add_to_stack_pointer(16);
+ }
+ }
+}
+if (Symbol.dispose) WasmCnnEmbedder.prototype[Symbol.dispose] = WasmCnnEmbedder.prototype.free;
+
+/**
+ * InfoNCE loss for contrastive learning (SimCLR style)
+ */
+export class WasmInfoNCELoss {
+ __destroy_into_raw() {
+ const ptr = this.__wbg_ptr;
+ this.__wbg_ptr = 0;
+ WasmInfoNCELossFinalization.unregister(this);
+ return ptr;
+ }
+ free() {
+ const ptr = this.__destroy_into_raw();
+ wasm.__wbg_wasminfonceloss_free(ptr, 0);
+ }
+ /**
+ * Compute loss for a batch of embedding pairs
+ * embeddings: [2N, D] flattened where (i, i+N) are positive pairs
+ * @param {Float32Array} embeddings
+ * @param {number} batch_size
+ * @param {number} dim
+ * @returns {number}
+ */
+ forward(embeddings, batch_size, dim) {
+ try {
+ const retptr = wasm.__wbindgen_add_to_stack_pointer(-16);
+ const ptr0 = passArrayF32ToWasm0(embeddings, wasm.__wbindgen_export2);
+ const len0 = WASM_VECTOR_LEN;
+ wasm.wasminfonceloss_forward(retptr, this.__wbg_ptr, ptr0, len0, batch_size, dim);
+ var r0 = getDataViewMemory0().getFloat32(retptr + 4 * 0, true);
+ var r1 = getDataViewMemory0().getInt32(retptr + 4 * 1, true);
+ var r2 = getDataViewMemory0().getInt32(retptr + 4 * 2, true);
+ if (r2) {
+ throw takeObject(r1);
+ }
+ return r0;
+ } finally {
+ wasm.__wbindgen_add_to_stack_pointer(16);
+ }
+ }
+ /**
+ * Create new InfoNCE loss with temperature parameter
+ * @param {number} temperature
+ */
+ constructor(temperature) {
+ const ret = wasm.wasminfonceloss_new(temperature);
+ this.__wbg_ptr = ret >>> 0;
+ WasmInfoNCELossFinalization.register(this, this.__wbg_ptr, this);
+ return this;
+ }
+ /**
+ * Get the temperature parameter
+ * @returns {number}
+ */
+ get temperature() {
+ const ret = wasm.wasminfonceloss_temperature(this.__wbg_ptr);
+ return ret;
+ }
+}
+if (Symbol.dispose) WasmInfoNCELoss.prototype[Symbol.dispose] = WasmInfoNCELoss.prototype.free;
+
+/**
+ * Triplet loss for metric learning
+ */
+export class WasmTripletLoss {
+ __destroy_into_raw() {
+ const ptr = this.__wbg_ptr;
+ this.__wbg_ptr = 0;
+ WasmTripletLossFinalization.unregister(this);
+ return ptr;
+ }
+ free() {
+ const ptr = this.__destroy_into_raw();
+ wasm.__wbg_wasmtripletloss_free(ptr, 0);
+ }
+ /**
+ * Compute loss for a batch of triplets
+ * @param {Float32Array} anchors
+ * @param {Float32Array} positives
+ * @param {Float32Array} negatives
+ * @param {number} dim
+ * @returns {number}
+ */
+ forward(anchors, positives, negatives, dim) {
+ try {
+ const retptr = wasm.__wbindgen_add_to_stack_pointer(-16);
+ const ptr0 = passArrayF32ToWasm0(anchors, wasm.__wbindgen_export2);
+ const len0 = WASM_VECTOR_LEN;
+ const ptr1 = passArrayF32ToWasm0(positives, wasm.__wbindgen_export2);
+ const len1 = WASM_VECTOR_LEN;
+ const ptr2 = passArrayF32ToWasm0(negatives, wasm.__wbindgen_export2);
+ const len2 = WASM_VECTOR_LEN;
+ wasm.wasmtripletloss_forward(retptr, this.__wbg_ptr, ptr0, len0, ptr1, len1, ptr2, len2, dim);
+ var r0 = getDataViewMemory0().getFloat32(retptr + 4 * 0, true);
+ var r1 = getDataViewMemory0().getInt32(retptr + 4 * 1, true);
+ var r2 = getDataViewMemory0().getInt32(retptr + 4 * 2, true);
+ if (r2) {
+ throw takeObject(r1);
+ }
+ return r0;
+ } finally {
+ wasm.__wbindgen_add_to_stack_pointer(16);
+ }
+ }
+ /**
+ * Compute loss for a single triplet
+ * @param {Float32Array} anchor
+ * @param {Float32Array} positive
+ * @param {Float32Array} negative
+ * @returns {number}
+ */
+ forward_single(anchor, positive, negative) {
+ try {
+ const retptr = wasm.__wbindgen_add_to_stack_pointer(-16);
+ const ptr0 = passArrayF32ToWasm0(anchor, wasm.__wbindgen_export2);
+ const len0 = WASM_VECTOR_LEN;
+ const ptr1 = passArrayF32ToWasm0(positive, wasm.__wbindgen_export2);
+ const len1 = WASM_VECTOR_LEN;
+ const ptr2 = passArrayF32ToWasm0(negative, wasm.__wbindgen_export2);
+ const len2 = WASM_VECTOR_LEN;
+ wasm.wasmtripletloss_forward_single(retptr, this.__wbg_ptr, ptr0, len0, ptr1, len1, ptr2, len2);
+ var r0 = getDataViewMemory0().getFloat32(retptr + 4 * 0, true);
+ var r1 = getDataViewMemory0().getInt32(retptr + 4 * 1, true);
+ var r2 = getDataViewMemory0().getInt32(retptr + 4 * 2, true);
+ if (r2) {
+ throw takeObject(r1);
+ }
+ return r0;
+ } finally {
+ wasm.__wbindgen_add_to_stack_pointer(16);
+ }
+ }
+ /**
+ * Get the margin parameter
+ * @returns {number}
+ */
+ get margin() {
+ const ret = wasm.wasmtripletloss_margin(this.__wbg_ptr);
+ return ret;
+ }
+ /**
+ * Create new triplet loss with margin
+ * @param {number} margin
+ */
+ constructor(margin) {
+ const ret = wasm.wasmtripletloss_new(margin);
+ this.__wbg_ptr = ret >>> 0;
+ WasmTripletLossFinalization.register(this, this.__wbg_ptr, this);
+ return this;
+ }
+}
+if (Symbol.dispose) WasmTripletLoss.prototype[Symbol.dispose] = WasmTripletLoss.prototype.free;
+
+/**
+ * Initialize panic hook for better error messages
+ */
+export function init() {
+ wasm.init();
+}
+
+function __wbg_get_imports() {
+ const import0 = {
+ __proto__: null,
+ __wbg___wbindgen_throw_39bc967c0e5a9b58: function(arg0, arg1) {
+ throw new Error(getStringFromWasm0(arg0, arg1));
+ },
+ __wbg_error_a6fa202b58aa1cd3: function(arg0, arg1) {
+ let deferred0_0;
+ let deferred0_1;
+ try {
+ deferred0_0 = arg0;
+ deferred0_1 = arg1;
+ console.error(getStringFromWasm0(arg0, arg1));
+ } finally {
+ wasm.__wbindgen_export(deferred0_0, deferred0_1, 1);
+ }
+ },
+ __wbg_new_227d7c05414eb861: function() {
+ const ret = new Error();
+ return addHeapObject(ret);
+ },
+ __wbg_stack_3b0d974bbf31e44f: function(arg0, arg1) {
+ const ret = getObject(arg1).stack;
+ const ptr1 = passStringToWasm0(ret, wasm.__wbindgen_export2, wasm.__wbindgen_export3);
+ const len1 = WASM_VECTOR_LEN;
+ getDataViewMemory0().setInt32(arg0 + 4 * 1, len1, true);
+ getDataViewMemory0().setInt32(arg0 + 4 * 0, ptr1, true);
+ },
+ __wbindgen_cast_0000000000000001: function(arg0, arg1) {
+ // Cast intrinsic for `Ref(String) -> Externref`.
+ const ret = getStringFromWasm0(arg0, arg1);
+ return addHeapObject(ret);
+ },
+ __wbindgen_object_drop_ref: function(arg0) {
+ takeObject(arg0);
+ },
+ };
+ return {
+ __proto__: null,
+ "./ruvector_cnn_wasm_bg.js": import0,
+ };
+}
+
+const EmbedderConfigFinalization = (typeof FinalizationRegistry === 'undefined')
+ ? { register: () => {}, unregister: () => {} }
+ : new FinalizationRegistry(ptr => wasm.__wbg_embedderconfig_free(ptr >>> 0, 1));
+const LayerOpsFinalization = (typeof FinalizationRegistry === 'undefined')
+ ? { register: () => {}, unregister: () => {} }
+ : new FinalizationRegistry(ptr => wasm.__wbg_layerops_free(ptr >>> 0, 1));
+const SimdOpsFinalization = (typeof FinalizationRegistry === 'undefined')
+ ? { register: () => {}, unregister: () => {} }
+ : new FinalizationRegistry(ptr => wasm.__wbg_simdops_free(ptr >>> 0, 1));
+const WasmCnnEmbedderFinalization = (typeof FinalizationRegistry === 'undefined')
+ ? { register: () => {}, unregister: () => {} }
+ : new FinalizationRegistry(ptr => wasm.__wbg_wasmcnnembedder_free(ptr >>> 0, 1));
+const WasmInfoNCELossFinalization = (typeof FinalizationRegistry === 'undefined')
+ ? { register: () => {}, unregister: () => {} }
+ : new FinalizationRegistry(ptr => wasm.__wbg_wasminfonceloss_free(ptr >>> 0, 1));
+const WasmTripletLossFinalization = (typeof FinalizationRegistry === 'undefined')
+ ? { register: () => {}, unregister: () => {} }
+ : new FinalizationRegistry(ptr => wasm.__wbg_wasmtripletloss_free(ptr >>> 0, 1));
+
+function addHeapObject(obj) {
+ if (heap_next === heap.length) heap.push(heap.length + 1);
+ const idx = heap_next;
+ heap_next = heap[idx];
+
+ heap[idx] = obj;
+ return idx;
+}
+
+function _assertClass(instance, klass) {
+ if (!(instance instanceof klass)) {
+ throw new Error(`expected instance of ${klass.name}`);
+ }
+}
+
+function dropObject(idx) {
+ if (idx < 1028) return;
+ heap[idx] = heap_next;
+ heap_next = idx;
+}
+
+function getArrayF32FromWasm0(ptr, len) {
+ ptr = ptr >>> 0;
+ return getFloat32ArrayMemory0().subarray(ptr / 4, ptr / 4 + len);
+}
+
+let cachedDataViewMemory0 = null;
+function getDataViewMemory0() {
+ if (cachedDataViewMemory0 === null || cachedDataViewMemory0.buffer.detached === true || (cachedDataViewMemory0.buffer.detached === undefined && cachedDataViewMemory0.buffer !== wasm.memory.buffer)) {
+ cachedDataViewMemory0 = new DataView(wasm.memory.buffer);
+ }
+ return cachedDataViewMemory0;
+}
+
+let cachedFloat32ArrayMemory0 = null;
+function getFloat32ArrayMemory0() {
+ if (cachedFloat32ArrayMemory0 === null || cachedFloat32ArrayMemory0.byteLength === 0) {
+ cachedFloat32ArrayMemory0 = new Float32Array(wasm.memory.buffer);
+ }
+ return cachedFloat32ArrayMemory0;
+}
+
+function getStringFromWasm0(ptr, len) {
+ ptr = ptr >>> 0;
+ return decodeText(ptr, len);
+}
+
+let cachedUint8ArrayMemory0 = null;
+function getUint8ArrayMemory0() {
+ if (cachedUint8ArrayMemory0 === null || cachedUint8ArrayMemory0.byteLength === 0) {
+ cachedUint8ArrayMemory0 = new Uint8Array(wasm.memory.buffer);
+ }
+ return cachedUint8ArrayMemory0;
+}
+
+function getObject(idx) { return heap[idx]; }
+
+let heap = new Array(1024).fill(undefined);
+heap.push(undefined, null, true, false);
+
+let heap_next = heap.length;
+
+function isLikeNone(x) {
+ return x === undefined || x === null;
+}
+
+function passArray8ToWasm0(arg, malloc) {
+ const ptr = malloc(arg.length * 1, 1) >>> 0;
+ getUint8ArrayMemory0().set(arg, ptr / 1);
+ WASM_VECTOR_LEN = arg.length;
+ return ptr;
+}
+
+function passArrayF32ToWasm0(arg, malloc) {
+ const ptr = malloc(arg.length * 4, 4) >>> 0;
+ getFloat32ArrayMemory0().set(arg, ptr / 4);
+ WASM_VECTOR_LEN = arg.length;
+ return ptr;
+}
+
+function passStringToWasm0(arg, malloc, realloc) {
+ if (realloc === undefined) {
+ const buf = cachedTextEncoder.encode(arg);
+ const ptr = malloc(buf.length, 1) >>> 0;
+ getUint8ArrayMemory0().subarray(ptr, ptr + buf.length).set(buf);
+ WASM_VECTOR_LEN = buf.length;
+ return ptr;
+ }
+
+ let len = arg.length;
+ let ptr = malloc(len, 1) >>> 0;
+
+ const mem = getUint8ArrayMemory0();
+
+ let offset = 0;
+
+ for (; offset < len; offset++) {
+ const code = arg.charCodeAt(offset);
+ if (code > 0x7F) break;
+ mem[ptr + offset] = code;
+ }
+ if (offset !== len) {
+ if (offset !== 0) {
+ arg = arg.slice(offset);
+ }
+ ptr = realloc(ptr, len, len = offset + arg.length * 3, 1) >>> 0;
+ const view = getUint8ArrayMemory0().subarray(ptr + offset, ptr + len);
+ const ret = cachedTextEncoder.encodeInto(arg, view);
+
+ offset += ret.written;
+ ptr = realloc(ptr, len, offset, 1) >>> 0;
+ }
+
+ WASM_VECTOR_LEN = offset;
+ return ptr;
+}
+
+function takeObject(idx) {
+ const ret = getObject(idx);
+ dropObject(idx);
+ return ret;
+}
+
+let cachedTextDecoder = new TextDecoder('utf-8', { ignoreBOM: true, fatal: true });
+cachedTextDecoder.decode();
+const MAX_SAFARI_DECODE_BYTES = 2146435072;
+let numBytesDecoded = 0;
+function decodeText(ptr, len) {
+ numBytesDecoded += len;
+ if (numBytesDecoded >= MAX_SAFARI_DECODE_BYTES) {
+ cachedTextDecoder = new TextDecoder('utf-8', { ignoreBOM: true, fatal: true });
+ cachedTextDecoder.decode();
+ numBytesDecoded = len;
+ }
+ return cachedTextDecoder.decode(getUint8ArrayMemory0().subarray(ptr, ptr + len));
+}
+
+const cachedTextEncoder = new TextEncoder();
+
+if (!('encodeInto' in cachedTextEncoder)) {
+ cachedTextEncoder.encodeInto = function (arg, view) {
+ const buf = cachedTextEncoder.encode(arg);
+ view.set(buf);
+ return {
+ read: arg.length,
+ written: buf.length
+ };
+ };
+}
+
+let WASM_VECTOR_LEN = 0;
+
+let wasmModule, wasm;
+function __wbg_finalize_init(instance, module) {
+ wasm = instance.exports;
+ wasmModule = module;
+ cachedDataViewMemory0 = null;
+ cachedFloat32ArrayMemory0 = null;
+ cachedUint8ArrayMemory0 = null;
+ wasm.__wbindgen_start();
+ return wasm;
+}
+
+async function __wbg_load(module, imports) {
+ if (typeof Response === 'function' && module instanceof Response) {
+ if (typeof WebAssembly.instantiateStreaming === 'function') {
+ try {
+ return await WebAssembly.instantiateStreaming(module, imports);
+ } catch (e) {
+ const validResponse = module.ok && expectedResponseType(module.type);
+
+ if (validResponse && module.headers.get('Content-Type') !== 'application/wasm') {
+ console.warn("`WebAssembly.instantiateStreaming` failed because your server does not serve Wasm with `application/wasm` MIME type. Falling back to `WebAssembly.instantiate` which is slower. Original error:\n", e);
+
+ } else { throw e; }
+ }
+ }
+
+ const bytes = await module.arrayBuffer();
+ return await WebAssembly.instantiate(bytes, imports);
+ } else {
+ const instance = await WebAssembly.instantiate(module, imports);
+
+ if (instance instanceof WebAssembly.Instance) {
+ return { instance, module };
+ } else {
+ return instance;
+ }
+ }
+
+ function expectedResponseType(type) {
+ switch (type) {
+ case 'basic': case 'cors': case 'default': return true;
+ }
+ return false;
+ }
+}
+
+function initSync(module) {
+ if (wasm !== undefined) return wasm;
+
+
+ if (module !== undefined) {
+ if (Object.getPrototypeOf(module) === Object.prototype) {
+ ({module} = module)
+ } else {
+ console.warn('using deprecated parameters for `initSync()`; pass a single object instead')
+ }
+ }
+
+ const imports = __wbg_get_imports();
+ if (!(module instanceof WebAssembly.Module)) {
+ module = new WebAssembly.Module(module);
+ }
+ const instance = new WebAssembly.Instance(module, imports);
+ return __wbg_finalize_init(instance, module);
+}
+
+async function __wbg_init(module_or_path) {
+ if (wasm !== undefined) return wasm;
+
+
+ if (module_or_path !== undefined) {
+ if (Object.getPrototypeOf(module_or_path) === Object.prototype) {
+ ({module_or_path} = module_or_path)
+ } else {
+ console.warn('using deprecated parameters for the initialization function; pass a single object instead')
+ }
+ }
+
+ if (module_or_path === undefined) {
+ module_or_path = new URL('ruvector_cnn_wasm_bg.wasm', import.meta.url);
+ }
+ const imports = __wbg_get_imports();
+
+ if (typeof module_or_path === 'string' || (typeof Request === 'function' && module_or_path instanceof Request) || (typeof URL === 'function' && module_or_path instanceof URL)) {
+ module_or_path = fetch(module_or_path);
+ }
+
+ const { instance, module } = await __wbg_load(await module_or_path, imports);
+
+ return __wbg_finalize_init(instance, module);
+}
+
+export { initSync, __wbg_init as default };
diff --git a/ui/pose-fusion/pkg/ruvector_cnn_wasm/ruvector_cnn_wasm_bg.wasm b/ui/pose-fusion/pkg/ruvector_cnn_wasm/ruvector_cnn_wasm_bg.wasm
new file mode 100644
index 00000000..a1a54ee2
Binary files /dev/null and b/ui/pose-fusion/pkg/ruvector_cnn_wasm/ruvector_cnn_wasm_bg.wasm differ