@@ -9290,6 +9395,7 @@ def get_local_ip():
if (name === 'brain') loadBrainPage();
if (name === 'security') { loadSecurityPage(); loadSecurityPosture(); }
if (name === 'channels') loadChannelsPage();
+ if (name === 'context') loadContextInspector();
if (name === 'logs') { if (!logStream || logStream.readyState === EventSource.CLOSED) startLogStream(); loadLogs(); }
}
@@ -10449,6 +10555,74 @@ def get_local_ip():
}
}
+async function loadContextInspector() {
+ try {
+ var data = await fetch('/api/context-inspector').then(function(r){return r.json();});
+ if (data.error) throw new Error(data.error);
+
+ var s = data.summary || {};
+ document.getElementById('ctx-total-agents').textContent = s.totalAgents || 0;
+
+ var avgEl = document.getElementById('ctx-avg-coverage');
+ var avg = s.avgCoverage || 0;
+ avgEl.textContent = avg + '%';
+ avgEl.style.color = avg >= 70 ? '#4ade80' : avg >= 40 ? '#f0c040' : '#f87171';
+
+ document.getElementById('ctx-total-warnings').textContent = s.totalWarnings || 0;
+ document.getElementById('ctx-files-found').textContent = (s.contextFilesFound || 0) + ' / ' + (data.contextFiles || []).length;
+
+ // Context files list
+ var filesHtml = '';
+ (data.contextFiles || []).forEach(function(f) {
+ var icon = f.exists ? '✅' : '❌';
+ var clr = f.exists ? 'var(--text-primary)' : '#888';
+ var size = f.exists ? '
(' + f.sizeKB + ' KB)' : '
missing';
+ filesHtml += '
' +
+ '' + icon + '' + f.name + '' + size + '
';
+ });
+ var memCt = s.memoryFileCount || 0;
+ filesHtml += '
memory/ — ' + memCt + ' file' + (memCt !== 1 ? 's' : '') + '
';
+ document.getElementById('ctx-files-list').innerHTML = filesHtml || '
No workspace files found';
+
+ // Lint warnings
+ var lintHtml = '';
+ (data.lintWarnings || []).forEach(function(w) {
+ var sev = w.severity || 'info';
+ var sevColor = sev === 'error' ? '#f87171' : sev === 'warn' ? '#f0c040' : '#60a0ff';
+ var sevIcon = sev === 'error' ? '🔴' : sev === 'warn' ? '🟡' : '🔵';
+ lintHtml += '
' +
+ '' + sevIcon + '' +
+ '' + w.message + '' +
+ '
';
+ });
+ document.getElementById('ctx-lint-list').innerHTML = lintHtml || '
✓ No lint warnings
';
+
+ // Agent tree
+ var agentHtml = '';
+ (data.agents || []).sort(function(a, b) { return (b.lastActiveMs || 0) - (a.lastActiveMs || 0); }).forEach(function(a) {
+ var cov = a.coverageScore || 0;
+ var covColor = cov >= 70 ? '#4ade80' : cov >= 40 ? '#f0c040' : '#f87171';
+ var indent = (a.depth || 0) * 20;
+ var tag = a.depth > 0 ? '
sub' : '';
+ var missing = (a.missingContextFiles || []).length > 0 ?
+ '
missing: ' + a.missingContextFiles.join(', ') + '' : '';
+ var task = a.spawnTaskSnippet ? '
' + a.spawnTaskSnippet.replace(/ 200 ? '…' : '') + '
' : '';
+ agentHtml += '
' +
+ '
' +
+ tag +
+ '' + a.displayName + '' +
+ missing +
+ '' + cov + '%' +
+ '
' + task + '
';
+ });
+ document.getElementById('ctx-agent-list').innerHTML = agentHtml || '
No sessions found
';
+
+ document.getElementById('ctx-refresh-time').textContent = 'Updated ' + new Date().toLocaleTimeString();
+ } catch(e) {
+ document.getElementById('ctx-agent-list').innerHTML = '
Error: ' + e.message + '
';
+ }
+}
+
async function loadSubAgentsPage(silent) {
try {
var data = await fetch('/api/subagents').then(function(r){return r.json();});
@@ -15919,6 +16093,7 @@ def _gw_invoke_docker(tool, args=None, token=None):
bp_security = _Blueprint('security', __name__)
bp_usage = _Blueprint('usage', __name__)
bp_version = _Blueprint('version', __name__)
+bp_context = _Blueprint('context', __name__)
# ─────────────────────────────────────────────────────────────────────────────
# ── Version check & self-update routes ────────────────────────────────────────
@@ -23275,6 +23450,220 @@ def api_automation_analysis():
})
+# ── Context Inspector (GH #9) ─────────────────────────────────────────
+
+@bp_context.route('/api/context-inspector')
+def api_context_inspector():
+ """Context Inspector: shows context inheritance, coverage scores, and lint warnings
+ for multi-agent workflows. Reads workspace files + session transcripts.
+ """
+ try:
+ result = _build_context_inspector_data()
+ return jsonify(result)
+ except Exception as e:
+ return jsonify({'error': str(e), 'agents': [], 'lintWarnings': [], 'summary': {}}), 500
+
+
+def _build_context_inspector_data():
+ """Analyse workspace context files and session transcripts to produce the
+ Context Inspector payload.
+
+ Returns:
+ {
+ agents: [{sessionId, displayName, depth, parentId, contextFiles,
+ coverageScore, lintWarnings, spawnTaskSnippet, tokensIn}],
+ lintWarnings: [{sessionId, message, severity}],
+ summary: {totalAgents, avgCoverage, totalWarnings, contextFilesFound},
+ contextFiles: [{name, sizeKB, exists}],
+ generatedAt: ISO string,
+ }
+ """
+ import math
+
+ workspace = WORKSPACE or os.path.expanduser('~')
+ sessions_dir = SESSIONS_DIR or os.path.expanduser('~/.openclaw/agents/main/sessions')
+
+ # ── 1. Discover workspace context files ──────────────────────────────
+ KNOWN_CONTEXT_FILES = [
+ 'SOUL.md', 'AGENTS.md', 'MEMORY.md', 'USER.md', 'IDENTITY.md',
+ 'HEARTBEAT.md', 'CODING.md', 'TOOLS.md',
+ ]
+ context_files_info = []
+ existing_context_files = set()
+ for fname in KNOWN_CONTEXT_FILES:
+ fpath = os.path.join(workspace, fname)
+ exists = os.path.isfile(fpath)
+ size_kb = 0.0
+ if exists:
+ try:
+ size_kb = round(os.path.getsize(fpath) / 1024, 1)
+ existing_context_files.add(fname.lower())
+ except OSError:
+ pass
+ context_files_info.append({'name': fname, 'sizeKB': size_kb, 'exists': exists})
+
+ # Also check memory/ subdirectory
+ mem_dir = os.path.join(workspace, 'memory')
+ memory_file_count = 0
+ if os.path.isdir(mem_dir):
+ try:
+ memory_file_count = sum(1 for f in os.listdir(mem_dir) if f.endswith('.md'))
+ except OSError:
+ pass
+
+ # ── 2. Parse sessions.json to build agent tree ──────────────────────
+ index_path = os.path.join(sessions_dir, 'sessions.json')
+ sessions_raw = []
+ try:
+ with open(index_path) as f:
+ idx = json.load(f)
+ sessions_raw = list(idx.values()) if isinstance(idx, dict) else idx
+ except (OSError, json.JSONDecodeError, TypeError):
+ pass
+
+ # Limit to 50 most recent to keep response fast
+ sessions_raw = sorted(sessions_raw, key=lambda s: s.get('lastActiveMs', 0), reverse=True)[:50]
+
+ # ── 3. For each session read the first few lines to extract spawn task ─
+ def _extract_spawn_task(sess_id):
+ """Return first user message text (truncated) — this is the task the agent got."""
+ fpath = os.path.join(sessions_dir, sess_id + '.jsonl')
+ if not os.path.isfile(fpath):
+ return ''
+ try:
+ with open(fpath) as f:
+ for line in f:
+ try:
+ obj = json.loads(line.strip())
+ except (json.JSONDecodeError, ValueError):
+ continue
+ if obj.get('type') == 'message':
+ msg = obj.get('message', {})
+ if msg.get('role') == 'user':
+ content = msg.get('content', '')
+ if isinstance(content, str):
+ return content[:300]
+ if isinstance(content, list):
+ for block in content:
+ if isinstance(block, dict) and block.get('type') == 'text':
+ return block.get('text', '')[:300]
+ except OSError:
+ pass
+ return ''
+
+ def _compute_coverage_score(sess, task_text):
+ """Heuristic 0-100 coverage score.
+
+ Checks:
+ - Is there a task description at all? +20
+ - SOUL.md mentioned / present in workspace? +20
+ - AGENTS.md / MEMORY.md present? +15 each
+ - Task length ≥ 50 chars (enough context)? +15
+ - memory/ has recent files? +15
+ """
+ score = 0
+ if task_text:
+ score += 20
+ txt_lower = task_text.lower()
+ if 'soul' in txt_lower or 'soul.md' in existing_context_files:
+ score += 20
+ if 'agents.md' in existing_context_files:
+ score += 15
+ if 'memory.md' in existing_context_files:
+ score += 15
+ if len(task_text) >= 50:
+ score += 15
+ if memory_file_count > 0:
+ score += 15
+ return min(score, 100)
+
+ def _lint_task(sess_id, sess, task_text):
+ """Return list of lint warning strings for this agent's spawn context."""
+ warnings = []
+ txt_lower = task_text.lower()
+ # Warn if task mentions user-specific data but no memory files
+ user_data_hints = ['vivek', 'user', 'my ', "i'm", 'password', 'email', 'phone']
+ if any(h in txt_lower for h in user_data_hints) and 'user.md' not in existing_context_files:
+ warnings.append({'severity': 'warn', 'message': 'Task references user data but USER.md not found in workspace'})
+ # Warn if sub-agent task is very short (context starvation risk)
+ depth = sess.get('depth', 0) or 0
+ if depth > 0 and len(task_text) < 50:
+ warnings.append({'severity': 'error', 'message': f'Sub-agent (depth {depth}) has a very short task — possible context starvation (<50 chars)'})
+ # Warn if no SOUL.md
+ if 'soul.md' not in existing_context_files:
+ warnings.append({'severity': 'warn', 'message': 'SOUL.md not found — agent identity/persona context is missing'})
+ # Warn if no MEMORY.md
+ if 'memory.md' not in existing_context_files:
+ warnings.append({'severity': 'info', 'message': 'MEMORY.md not found — long-term memory context unavailable'})
+ return warnings
+
+ # ── 4. Build agent list ───────────────────────────────────────────────
+ agents = []
+ all_lint_warnings = []
+
+ for sess in sessions_raw:
+ sess_id = sess.get('sessionId') or sess.get('key', '')
+ if not sess_id:
+ continue
+
+ display = sess.get('displayName') or sess_id[:16]
+ depth = int(sess.get('depth', 0) or 0)
+ parent_id = sess.get('spawnedBy') or sess.get('parentKey') or None
+ tokens_in = sess.get('inputTokens') or sess.get('totalTokens', 0) or 0
+
+ task_text = _extract_spawn_task(sess_id)
+ coverage = _compute_coverage_score(sess, task_text)
+ lint = _lint_task(sess_id, sess, task_text)
+
+ # Collect files referenced in the task text (simple heuristic)
+ referenced = [f for f in KNOWN_CONTEXT_FILES if f.lower() in task_text.lower()]
+ missing = [f for f in referenced if f.lower() not in existing_context_files]
+
+ agent_entry = {
+ 'sessionId': sess_id,
+ 'displayName': display,
+ 'depth': depth,
+ 'parentId': parent_id,
+ 'coverageScore': coverage,
+ 'lintWarnings': lint,
+ 'spawnTaskSnippet': task_text[:200] if task_text else '',
+ 'referencedContextFiles': referenced,
+ 'missingContextFiles': missing,
+ 'tokensIn': tokens_in,
+ 'lastActiveMs': sess.get('lastActiveMs', 0),
+ 'model': sess.get('model') or sess.get('modelRef', 'unknown'),
+ }
+ agents.append(agent_entry)
+
+ for w in lint:
+ all_lint_warnings.append({'sessionId': sess_id, 'displayName': display, **w})
+
+ # Deduplicate global lint warnings (same message across sessions)
+ seen_msgs = set()
+ deduped_warnings = []
+ for w in all_lint_warnings:
+ key = w['message']
+ if key not in seen_msgs:
+ seen_msgs.add(key)
+ deduped_warnings.append(w)
+
+ avg_coverage = round(sum(a['coverageScore'] for a in agents) / len(agents), 1) if agents else 0
+
+ return {
+ 'agents': agents,
+ 'lintWarnings': deduped_warnings,
+ 'summary': {
+ 'totalAgents': len(agents),
+ 'avgCoverage': avg_coverage,
+ 'totalWarnings': len(all_lint_warnings),
+ 'contextFilesFound': len(existing_context_files),
+ 'memoryFileCount': memory_file_count,
+ },
+ 'contextFiles': context_files_info,
+ 'generatedAt': datetime.now(timezone.utc).isoformat(),
+ }
+
+
# ── Data Helpers ────────────────────────────────────────────────────────
def _get_sessions():
diff --git a/tests/test_api.py b/tests/test_api.py
index 0bc61e0..63461d8 100644
--- a/tests/test_api.py
+++ b/tests/test_api.py
@@ -471,3 +471,47 @@ def test_memory_analytics_files_have_status(self, api, base_url):
for f in d["files"]:
assert_keys(f, "path", "sizeBytes", "sizeKB", "estTokens", "status")
assert f["status"] in ("ok", "warning", "critical")
+
+
+class TestContextInspector:
+ """Tests for Context Inspector (GH #9)."""
+
+ def test_context_inspector_returns_200(self, api, base_url):
+ """Context Inspector endpoint returns 200."""
+ d = assert_ok(get(api, base_url, "/api/context-inspector"))
+ assert_keys(d, "agents", "lintWarnings", "summary", "contextFiles", "generatedAt")
+
+ def test_context_inspector_summary_fields(self, api, base_url):
+ """Summary contains all required fields."""
+ d = assert_ok(get(api, base_url, "/api/context-inspector"))
+ s = d["summary"]
+ assert_keys(s, "totalAgents", "avgCoverage", "totalWarnings", "contextFilesFound", "memoryFileCount")
+ assert isinstance(s["totalAgents"], int)
+ assert isinstance(s["avgCoverage"], (int, float))
+ assert 0 <= s["avgCoverage"] <= 100
+
+ def test_context_inspector_context_files(self, api, base_url):
+ """contextFiles list covers all known workspace files."""
+ d = assert_ok(get(api, base_url, "/api/context-inspector"))
+ files = d["contextFiles"]
+ assert len(files) >= 5, "Expected at least 5 known context files"
+ for f in files:
+ assert_keys(f, "name", "sizeKB", "exists")
+ assert isinstance(f["exists"], bool)
+
+ def test_context_inspector_agent_fields(self, api, base_url):
+ """Each agent entry has required fields with valid values."""
+ d = assert_ok(get(api, base_url, "/api/context-inspector"))
+ for agent in d["agents"][:10]:
+ assert_keys(agent, "sessionId", "displayName", "depth", "coverageScore",
+ "lintWarnings", "spawnTaskSnippet")
+ assert 0 <= agent["coverageScore"] <= 100, "Coverage score out of range"
+ assert isinstance(agent["depth"], int)
+ assert isinstance(agent["lintWarnings"], list)
+
+ def test_context_inspector_lint_warnings_format(self, api, base_url):
+ """Lint warnings have severity and message."""
+ d = assert_ok(get(api, base_url, "/api/context-inspector"))
+ for w in d["lintWarnings"]:
+ assert_keys(w, "severity", "message")
+ assert w["severity"] in ("info", "warn", "error"), f"Unknown severity: {w['severity']}"